seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
70875903393 | from typing import Iterable
from minecraft_launch.modules.interface.iresource import IResource
from minecraft_launch.modules.models.launch.game_core import GameCore
from minecraft_launch.modules.utils.extend_util import ExtendUtil
class ResourceInstaller():
max_download_threads: int = 64
def __init__(self, game_core: GameCore) -> None:
self.game_core: GameCore = game_core
self.failed_resouces: list[IResource] = []
def get_file_resources(self) -> Iterable[IResource]:
if(self.game_core.client_file != None):
yield self.game_core.client_file
async def get_assets_resources_async(self):
if(not(ExtendUtil.verify(self.game_core.assets_index_file.file_info, self.game_core.assets_index_file.size) or\
ExtendUtil.verify(self.game_core.assets_index_file.file_info, self.game_core.assets_index_file.check_sum))):
... | Blessing-Studio/minecraft-launch-p | modules/installer/resource_installer.py | resource_installer.py | py | 901 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "minecraft_launch.modules.models.launch.game_core.GameCore",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "minecraft_launch.modules.models.launch.game_core.GameCore",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "minecraft_launch.modules.inte... |
73034152673 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rahul Handay <rahulha@saltstack.com>`
'''
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import oracle
import os
# Globals
oracle.__salt__ = {}
oracle.cx_Oracle = object()
@skipIf(NO_MOCK, NO_MOCK_REASON)
class OracleTestCase(TestCase):
'''
Test cases for salt.modules.oracle
'''
def test_run_query(self):
'''
Test for Run SQL query and return result
'''
with patch.object(oracle, '_connect', MagicMock()) as mock_connect:
mock_connect.cursor.execute.fetchall.return_value = True
with patch.object(oracle, 'show_dbs', MagicMock()):
self.assertTrue(oracle.run_query('db', 'query'))
def test_show_dbs(self):
'''
Test for Show databases configuration from pillar. Filter by `*args`
'''
with patch.dict(oracle.__salt__, {'pillar.get':
MagicMock(return_value='a')}):
self.assertDictEqual(oracle.show_dbs('A', 'B'),
{'A': 'a', 'B': 'a'})
self.assertEqual(oracle.show_dbs(), 'a')
def test_version(self):
'''
Test for Server Version (select banner from v$version)
'''
with patch.dict(oracle.__salt__, {'pillar.get':
MagicMock(return_value='a')}):
with patch.object(oracle, 'run_query', return_value='A'):
self.assertDictEqual(oracle.version(), {})
def test_client_version(self):
'''
Test for Oracle Client Version
'''
with patch.object(oracle, 'cx_Oracle',
MagicMock(side_effect=MagicMock())):
self.assertEqual(oracle.client_version(), '')
def test_show_pillar(self):
'''
Test for Show Pillar segment oracle.*
'''
with patch.dict(oracle.__salt__, {'pillar.get':
MagicMock(return_value='a')}):
self.assertEqual(oracle.show_pillar('item'), 'a')
def test_show_env(self):
'''
Test for Show Environment used by Oracle Client
'''
with patch.object(os, 'environ',
return_value={'PATH': 'PATH',
'ORACLE_HOME': 'ORACLE_HOME',
'TNS_ADMIN': 'TNS_ADMIN',
'NLS_LANG': 'NLS_LANG'}):
self.assertDictEqual(oracle.show_env(), {})
if __name__ == '__main__':
from integration import run_tests
run_tests(OracleTestCase, needs_daemon=False)
| shineforever/ops | salt/tests/unit/modules/oracle_test.py | oracle_test.py | py | 2,902 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "salttesting.helpers.ensure_in_syspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "salt.modules.oracle.__salt__",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "salt.modules.oracle",
"line_number": 23,
"usage_type": "name"
}... |
35955470914 | import os
import sys
from dataclasses import dataclass
from src.logger import logging
from src.exception import CustomException
from src.utils import save_object, evaluate_model
from sklearn.ensemble import AdaBoostRegressor, GradientBoostingRegressor, RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBRegressor
@dataclass
class ModelTrainerConfig:
trained_model_file_path = os.path.join('artifacts','model.pkl')
class ModelTrainer:
def __init__(self):
self.model_trainer_config = ModelTrainerConfig()
def initiate_model_trainer(self, train_arr, test_arr, preprocessor_path=None):
try:
# separate the target variable in the train and test arrays
logging.info('Splitting training and testing input data')
X_train, y_train, X_test, y_test = (
train_arr[:,:-1],
train_arr[:,-1],
test_arr[:,:-1],
test_arr[:,-1],
)
# a list of models to experiment with
models = {
"Random Forest": RandomForestRegressor(),
"Decision Tree": DecisionTreeRegressor(),
"Gradient Boosting": GradientBoostingRegressor(),
"Linear Regression": LinearRegression(),
"K-Neighbours Regressor": KNeighborsRegressor(),
"XGBRegressor": XGBRegressor(),
"AdaBoost Regressor": AdaBoostRegressor(),
}
model_report:dict = evaluate_model(x_train=X_train, y_train=y_train, x_test=X_test,
y_test=y_test, models=models)
## to get the best model score from dict
best_model_score = max(sorted(model_report.values()))
## to get best model froom dict
best_model_name = list(model_report.keys())[
list(model_report.values()).index(best_model_score)
]
best_model = models[best_model_name]
if best_model_score < 0.6:
raise CustomException('No best Model Found!!!')
logging.info('Found best model on both training and test data')
save_object(file_path=self.model_trainer_config.trained_model_file_path,
obj=best_model)
return (best_model, best_model_name, best_model_score)
except Exception as e:
raise CustomException(e, sys) | magaji-ahmed/mlproject | src/components/model_trainer.py | model_trainer.py | py | 2,634 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "src.logger.logging.in... |
32162016856 | """Functions for various pipeline use cases.
Author: Seth Axen
E-mail: seth.axen@gmail.com
"""
from .config.params import params_to_sections_dict
from .conformer.util import mol_from_smiles, mol_from_sdf, mol_to_sdf
from .conformer.generate import generate_conformers
from .fingerprint.generate import fprints_dict_from_mol
def params_to_dicts(params):
"""Get params dicts for pipeline functions from INI format params file."""
sections_dict = params_to_sections_dict(params, auto=True)
# preproc_params will eventually be returned separately, when there's a
# pipeline function for protonation
preproc_params = sections_dict.get("preprocessing", {})
confgen_params = sections_dict.get("conformer_generation", {})
confgen_params.update(preproc_params)
fprint_params = sections_dict.get("fingerprinting", {})
return confgen_params, fprint_params
def confs_from_smiles(smiles, name, confgen_params={}, save=False):
"""Generate conformations of molecule from SMILES string."""
mol = mol_from_smiles(smiles, name)
confgen_result = generate_conformers(
mol, name, save=save, **confgen_params
)
mol = confgen_result[0]
return mol
def sdf_from_smiles(
smiles, name, confgen_params={}, out_file=None, out_ext=".sdf.bz2"
):
"""Generate conformations from SMILES string and save to SDF file."""
mol = confs_from_smiles(
smiles, name, confgen_params=confgen_params, save=False
)
if out_file is None:
out_file = name + out_ext
mol_to_sdf(mol, out_file)
def fprints_from_fprints_dict(fprints_dict, level=-1):
"""Get fingerprint at `level` from dict of level to fingerprint."""
fprints_list = fprints_dict.get(
level, fprints_dict[max(fprints_dict.keys())]
)
return fprints_list
def fprints_from_mol(mol, fprint_params={}, save=False):
"""Generate fingerprints for all `first` conformers in mol."""
fprints_dict = fprints_dict_from_mol(mol, save=save, **fprint_params)
level = fprint_params.get("level", -1)
fprints_list = fprints_from_fprints_dict(fprints_dict, level=level)
return fprints_list
def fprints_from_smiles(
smiles, name, confgen_params={}, fprint_params={}, save=False
):
"""Generate conformers and fingerprints from a SMILES string."""
if save is False and "first" not in confgen_params:
confgen_params["first"] = fprint_params.get("first", -1)
mol = confs_from_smiles(
smiles, name, confgen_params=confgen_params, save=save
)
fprints_list = fprints_from_mol(
mol, fprint_params=fprint_params, save=save
)
return fprints_list
def fprints_from_sdf(sdf_file, fprint_params={}, save=False):
"""Generate fingerprints from conformers in an SDF file."""
mol = mol_from_sdf(sdf_file)
fprints_list = fprints_from_mol(
mol, fprint_params=fprint_params, save=save
)
return fprints_list
| keiserlab/e3fp | e3fp/pipeline.py | pipeline.py | py | 2,929 | python | en | code | 114 | github-code | 1 | [
{
"api_name": "config.params.params_to_sections_dict",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "conformer.util.mol_from_smiles",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "conformer.generate.generate_conformers",
"line_number": 28,
"usage_t... |
41034161404 | from __future__ import annotations
import typing
from typing import Any
from typing import cast
from typing import Dict
from typing import Generic
from typing import Iterator
from typing import List
from typing import Mapping
from typing import MutableMapping
from typing import Optional
from typing import overload
from typing import Tuple
from typing import Type
from typing import Union
import weakref
from .attr import _ClsLevelDispatch
from .attr import _EmptyListener
from .attr import _InstanceLevelDispatch
from .attr import _JoinedListener
from .registry import _ET
from .registry import _EventKey
from .. import util
from ..util.typing import Literal
_registrars: MutableMapping[
str, List[Type[_HasEventsDispatch[Any]]]
] = util.defaultdict(list)
def _is_event_name(name: str) -> bool:
# _sa_event prefix is special to support internal-only event names.
# most event names are just plain method names that aren't
# underscored.
return (
not name.startswith("_") and name != "dispatch"
) or name.startswith("_sa_event")
class _UnpickleDispatch:
"""Serializable callable that re-generates an instance of
:class:`_Dispatch` given a particular :class:`.Events` subclass.
"""
def __call__(self, _instance_cls: Type[_ET]) -> _Dispatch[_ET]:
for cls in _instance_cls.__mro__:
if "dispatch" in cls.__dict__:
return cast(
"_Dispatch[_ET]", cls.__dict__["dispatch"].dispatch
)._for_class(_instance_cls)
else:
raise AttributeError("No class with a 'dispatch' member present.")
class _DispatchCommon(Generic[_ET]):
__slots__ = ()
_instance_cls: Optional[Type[_ET]]
def _join(self, other: _DispatchCommon[_ET]) -> _JoinedDispatcher[_ET]:
raise NotImplementedError()
def __getattr__(self, name: str) -> _InstanceLevelDispatch[_ET]:
raise NotImplementedError()
@property
def _events(self) -> Type[_HasEventsDispatch[_ET]]:
raise NotImplementedError()
class _Dispatch(_DispatchCommon[_ET]):
"""Mirror the event listening definitions of an Events class with
listener collections.
Classes which define a "dispatch" member will return a
non-instantiated :class:`._Dispatch` subclass when the member
is accessed at the class level. When the "dispatch" member is
accessed at the instance level of its owner, an instance
of the :class:`._Dispatch` class is returned.
A :class:`._Dispatch` class is generated for each :class:`.Events`
class defined, by the :meth:`._HasEventsDispatch._create_dispatcher_class`
method. The original :class:`.Events` classes remain untouched.
This decouples the construction of :class:`.Events` subclasses from
the implementation used by the event internals, and allows
inspecting tools like Sphinx to work in an unsurprising
way against the public API.
"""
# "active_history" is an ORM case we add here. ideally a better
# system would be in place for ad-hoc attributes.
__slots__ = "_parent", "_instance_cls", "__dict__", "_empty_listeners"
_active_history: bool
_empty_listener_reg: MutableMapping[
Type[_ET], Dict[str, _EmptyListener[_ET]]
] = weakref.WeakKeyDictionary()
_empty_listeners: Dict[str, _EmptyListener[_ET]]
_event_names: List[str]
_instance_cls: Optional[Type[_ET]]
_joined_dispatch_cls: Type[_JoinedDispatcher[_ET]]
_events: Type[_HasEventsDispatch[_ET]]
"""reference back to the Events class.
Bidirectional against _HasEventsDispatch.dispatch
"""
def __init__(
self,
parent: Optional[_Dispatch[_ET]],
instance_cls: Optional[Type[_ET]] = None,
):
self._parent = parent
self._instance_cls = instance_cls
if instance_cls:
assert parent is not None
try:
self._empty_listeners = self._empty_listener_reg[instance_cls]
except KeyError:
self._empty_listeners = self._empty_listener_reg[
instance_cls
] = {
ls.name: _EmptyListener(ls, instance_cls)
for ls in parent._event_descriptors
}
else:
self._empty_listeners = {}
def __getattr__(self, name: str) -> _InstanceLevelDispatch[_ET]:
# Assign EmptyListeners as attributes on demand
# to reduce startup time for new dispatch objects.
try:
ls = self._empty_listeners[name]
except KeyError:
raise AttributeError(name)
else:
setattr(self, ls.name, ls)
return ls
@property
def _event_descriptors(self) -> Iterator[_ClsLevelDispatch[_ET]]:
for k in self._event_names:
# Yield _ClsLevelDispatch related
# to relevant event name.
yield getattr(self, k)
def _listen(self, event_key: _EventKey[_ET], **kw: Any) -> None:
return self._events._listen(event_key, **kw)
def _for_class(self, instance_cls: Type[_ET]) -> _Dispatch[_ET]:
return self.__class__(self, instance_cls)
def _for_instance(self, instance: _ET) -> _Dispatch[_ET]:
instance_cls = instance.__class__
return self._for_class(instance_cls)
def _join(self, other: _DispatchCommon[_ET]) -> _JoinedDispatcher[_ET]:
"""Create a 'join' of this :class:`._Dispatch` and another.
This new dispatcher will dispatch events to both
:class:`._Dispatch` objects.
"""
if "_joined_dispatch_cls" not in self.__class__.__dict__:
cls = type(
"Joined%s" % self.__class__.__name__,
(_JoinedDispatcher,),
{"__slots__": self._event_names},
)
self.__class__._joined_dispatch_cls = cls
return self._joined_dispatch_cls(self, other)
def __reduce__(self) -> Union[str, Tuple[Any, ...]]:
return _UnpickleDispatch(), (self._instance_cls,)
def _update(
self, other: _Dispatch[_ET], only_propagate: bool = True
) -> None:
"""Populate from the listeners in another :class:`_Dispatch`
object."""
for ls in other._event_descriptors:
if isinstance(ls, _EmptyListener):
continue
getattr(self, ls.name).for_modify(self)._update(
ls, only_propagate=only_propagate
)
def _clear(self) -> None:
for ls in self._event_descriptors:
ls.for_modify(self).clear()
def _remove_dispatcher(cls: Type[_HasEventsDispatch[_ET]]) -> None:
for k in cls.dispatch._event_names:
_registrars[k].remove(cls)
if not _registrars[k]:
del _registrars[k]
class _HasEventsDispatch(Generic[_ET]):
_dispatch_target: Optional[Type[_ET]]
"""class which will receive the .dispatch collection"""
dispatch: _Dispatch[_ET]
"""reference back to the _Dispatch class.
Bidirectional against _Dispatch._events
"""
if typing.TYPE_CHECKING:
def __getattr__(self, name: str) -> _InstanceLevelDispatch[_ET]:
...
def __init_subclass__(cls) -> None:
"""Intercept new Event subclasses and create associated _Dispatch
classes."""
cls._create_dispatcher_class(cls.__name__, cls.__bases__, cls.__dict__)
@classmethod
def _accept_with(
cls, target: Union[_ET, Type[_ET]], identifier: str
) -> Optional[Union[_ET, Type[_ET]]]:
raise NotImplementedError()
@classmethod
def _listen(
cls,
event_key: _EventKey[_ET],
*,
propagate: bool = False,
insert: bool = False,
named: bool = False,
asyncio: bool = False,
) -> None:
raise NotImplementedError()
@staticmethod
def _set_dispatch(
klass: Type[_HasEventsDispatch[_ET]],
dispatch_cls: Type[_Dispatch[_ET]],
) -> _Dispatch[_ET]:
# This allows an Events subclass to define additional utility
# methods made available to the target via
# "self.dispatch._events.<utilitymethod>"
# @staticmethod to allow easy "super" calls while in a metaclass
# constructor.
klass.dispatch = dispatch_cls(None)
dispatch_cls._events = klass
return klass.dispatch
@classmethod
def _create_dispatcher_class(
cls, classname: str, bases: Tuple[type, ...], dict_: Mapping[str, Any]
) -> None:
"""Create a :class:`._Dispatch` class corresponding to an
:class:`.Events` class."""
# there's all kinds of ways to do this,
# i.e. make a Dispatch class that shares the '_listen' method
# of the Event class, this is the straight monkeypatch.
if hasattr(cls, "dispatch"):
dispatch_base = cls.dispatch.__class__
else:
dispatch_base = _Dispatch
event_names = [k for k in dict_ if _is_event_name(k)]
dispatch_cls = cast(
"Type[_Dispatch[_ET]]",
type(
"%sDispatch" % classname,
(dispatch_base,),
{"__slots__": event_names},
),
)
dispatch_cls._event_names = event_names
dispatch_inst = cls._set_dispatch(cls, dispatch_cls)
for k in dispatch_cls._event_names:
setattr(dispatch_inst, k, _ClsLevelDispatch(cls, dict_[k]))
_registrars[k].append(cls)
for super_ in dispatch_cls.__bases__:
if issubclass(super_, _Dispatch) and super_ is not _Dispatch:
for ls in super_._events.dispatch._event_descriptors:
setattr(dispatch_inst, ls.name, ls)
dispatch_cls._event_names.append(ls.name)
if getattr(cls, "_dispatch_target", None):
dispatch_target_cls = cls._dispatch_target
assert dispatch_target_cls is not None
if (
hasattr(dispatch_target_cls, "__slots__")
and "_slots_dispatch" in dispatch_target_cls.__slots__
):
dispatch_target_cls.dispatch = slots_dispatcher(cls)
else:
dispatch_target_cls.dispatch = dispatcher(cls)
class Events(_HasEventsDispatch[_ET]):
"""Define event listening functions for a particular target type."""
@classmethod
def _accept_with(
cls, target: Union[_ET, Type[_ET]], identifier: str
) -> Optional[Union[_ET, Type[_ET]]]:
def dispatch_is(*types: Type[Any]) -> bool:
return all(isinstance(target.dispatch, t) for t in types)
def dispatch_parent_is(t: Type[Any]) -> bool:
return isinstance(
cast("_JoinedDispatcher[_ET]", target.dispatch).parent, t
)
# Mapper, ClassManager, Session override this to
# also accept classes, scoped_sessions, sessionmakers, etc.
if hasattr(target, "dispatch"):
if (
dispatch_is(cls.dispatch.__class__)
or dispatch_is(type, cls.dispatch.__class__)
or (
dispatch_is(_JoinedDispatcher)
and dispatch_parent_is(cls.dispatch.__class__)
)
):
return target
return None
@classmethod
def _listen(
cls,
event_key: _EventKey[_ET],
*,
propagate: bool = False,
insert: bool = False,
named: bool = False,
asyncio: bool = False,
) -> None:
event_key.base_listen(
propagate=propagate, insert=insert, named=named, asyncio=asyncio
)
@classmethod
def _remove(cls, event_key: _EventKey[_ET]) -> None:
event_key.remove()
@classmethod
def _clear(cls) -> None:
cls.dispatch._clear()
class _JoinedDispatcher(_DispatchCommon[_ET]):
"""Represent a connection between two _Dispatch objects."""
__slots__ = "local", "parent", "_instance_cls"
local: _DispatchCommon[_ET]
parent: _DispatchCommon[_ET]
_instance_cls: Optional[Type[_ET]]
def __init__(
self, local: _DispatchCommon[_ET], parent: _DispatchCommon[_ET]
):
self.local = local
self.parent = parent
self._instance_cls = self.local._instance_cls
def __getattr__(self, name: str) -> _JoinedListener[_ET]:
# Assign _JoinedListeners as attributes on demand
# to reduce startup time for new dispatch objects.
ls = getattr(self.local, name)
jl = _JoinedListener(self.parent, ls.name, ls)
setattr(self, ls.name, jl)
return jl
def _listen(self, event_key: _EventKey[_ET], **kw: Any) -> None:
return self.parent._listen(event_key, **kw)
@property
def _events(self) -> Type[_HasEventsDispatch[_ET]]:
return self.parent._events
class dispatcher(Generic[_ET]):
"""Descriptor used by target classes to
deliver the _Dispatch class at the class level
and produce new _Dispatch instances for target
instances.
"""
def __init__(self, events: Type[_HasEventsDispatch[_ET]]):
self.dispatch = events.dispatch
self.events = events
@overload
def __get__(
self, obj: Literal[None], cls: Type[Any]
) -> Type[_Dispatch[_ET]]:
...
@overload
def __get__(self, obj: Any, cls: Type[Any]) -> _DispatchCommon[_ET]:
...
def __get__(self, obj: Any, cls: Type[Any]) -> Any:
if obj is None:
return self.dispatch
disp = self.dispatch._for_instance(obj)
try:
obj.__dict__["dispatch"] = disp
except AttributeError as ae:
raise TypeError(
"target %r doesn't have __dict__, should it be "
"defining _slots_dispatch?" % (obj,)
) from ae
return disp
class slots_dispatcher(dispatcher[_ET]):
def __get__(self, obj: Any, cls: Type[Any]) -> Any:
if obj is None:
return self.dispatch
if hasattr(obj, "_slots_dispatch"):
return obj._slots_dispatch
disp = self.dispatch._for_instance(obj)
obj._slots_dispatch = disp
return disp
| sqlalchemy/sqlalchemy | lib/sqlalchemy/event/base.py | base.py | py | 14,301 | python | en | code | 8,024 | github-code | 1 | [
{
"api_name": "typing.MutableMapping",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_... |
18801031649 | import matplotlib.pyplot as plt
import numpy as np
import datetime
from dragen.utilities.InputInfo import RveInfo
from dragen.utilities.Helpers import HelperFunctions
class Tesselation3D(HelperFunctions):
def __init__(self, grains_df):
super().__init__()
self.grains_df = grains_df
self.a = grains_df['a'].tolist()
self.b = grains_df['b'].tolist()
self.c = grains_df['c'].tolist()
self.alpha = grains_df['alpha'].tolist()
self.x_0 = grains_df['x_0'].tolist()
self.y_0 = grains_df['y_0'].tolist()
self.z_0 = grains_df['z_0'].tolist()
self.final_volume = grains_df['final_discrete_volume'].tolist()
self.n_grains = len(self.a)
self.a_max = max(self.a)
self.b_max = max(self.b)
self.c_max = max(self.c)
self.x_grid, self.y_grid, self.z_grid = super().gen_grid()
def grow(self, iterator, a, b, c):
alpha = self.alpha[iterator-1]
x_0 = self.x_0[iterator-1]
y_0 = self.y_0[iterator-1]
z_0 = self.z_0[iterator-1]
a_i = a[iterator - 1]
b_i = b[iterator - 1]
c_i = c[iterator - 1]
a_i = a_i + a_i / self.a_max * RveInfo.bin_size
b_i = b_i + b_i / self.b_max * RveInfo.bin_size
c_i = c_i + c_i / self.c_max * RveInfo.bin_size
a[iterator - 1] = a_i
b[iterator - 1] = b_i
c[iterator - 1] = c_i
ellipsoid = super().ellipsoid(a_i, b_i, c_i, x_0, y_0, z_0, alpha=alpha)
return ellipsoid, a, b, c
def tesselation_plotter(self, array, epoch):
t_0 = datetime.datetime.now()
n_grains = self.n_grains
rve_x, rve_y, rve_z = np.where((array >= 1) | (array == -200))
grain_tuples = [*zip(rve_x, rve_y, rve_z)]
grains_x = [self.x_grid[grain_tuples_i[0]][grain_tuples_i[1]][grain_tuples_i[2]]
for grain_tuples_i in grain_tuples]
grains_y = [self.y_grid[grain_tuples_i[0]][grain_tuples_i[1]][grain_tuples_i[2]]
for grain_tuples_i in grain_tuples]
grains_z = [self.z_grid[grain_tuples_i[0]][grain_tuples_i[1]][grain_tuples_i[2]]
for grain_tuples_i in grain_tuples]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(grains_x, grains_y, grains_z, c=array[np.where((array >= 1) | (array == -200))], s=1, vmin=-20,
vmax=n_grains, cmap='seismic')
rve_x, rve_y, rve_z = np.where((array == 0))
free_space_tuples = [*zip(rve_x, rve_y, rve_z)]
free_space_x = [self.x_grid[free_space_tuples_i[0]][free_space_tuples_i[1]][free_space_tuples_i[2]]
for free_space_tuples_i in free_space_tuples]
free_space_y = [self.y_grid[free_space_tuples_i[0]][free_space_tuples_i[1]][free_space_tuples_i[2]]
for free_space_tuples_i in free_space_tuples]
free_space_z = [self.z_grid[free_space_tuples_i[0]][free_space_tuples_i[1]][free_space_tuples_i[2]]
for free_space_tuples_i in free_space_tuples]
ax.scatter(free_space_x, free_space_y, free_space_z, color='grey', alpha=0.01)
ax.set_xlim(-5, RveInfo.box_size + 5)
ax.set_ylim(-5, RveInfo.box_size + 5)
ax.set_zlim(-5, RveInfo.box_size + 5)
ax.set_xlabel('x (µm)')
ax.set_ylabel('y (µm)')
ax.set_zlabel('z (µm)')
# ax.view_init(90, 270) #facing against z-direction (counterclockwise rotation)
# plt.show()
plt.savefig(RveInfo.store_path + '/Figs/3D_Tesselation_Epoch_{}.png'.format(epoch))
plt.close(fig)
time_elapse = datetime.datetime.now() - t_0
if RveInfo.debug:
RveInfo.LOGGER.info('time spent on plotter for epoch {}: {}'.format(epoch, time_elapse.total_seconds()))
def run_tesselation(self, rsa, grain_df=None, band_idx_start=None):
if RveInfo.gui_flag:
RveInfo.infobox_obj.emit('starting Tesselation')
RveInfo.progress_obj.emit(0)
# set some variables
status = False
repeat = False
packingratio = 0
epoch = 0
band_vol_0 = np.count_nonzero(rsa == -200) # This volume is already affected by the First band ratio
# So total Band ratio is band_ratio_rsa * band_ratio_tesselator
# load some variables
a = self.a
b = self.b
c = self.c
n_grains = len(self.a)
rve = rsa
if band_idx_start is None:
band_idx = []
else:
band_idx = [i for i in range(band_idx_start, n_grains+1)]
#print(band_idx)
# define boundaries and empty rve array
empty_rve = super().gen_array()
empty_rve = super().gen_boundaries_3D(empty_rve)
rve_boundaries = empty_rve.copy() # empty rve grid with defined boundaries
vol_0 = np.count_nonzero(empty_rve == 0)
freepoints = np.count_nonzero(rve == 0)
grain_idx = [i for i in range(1, n_grains + 1)]
grain_idx_backup = grain_idx.copy()
while freepoints > 0:
freepoints_old = freepoints # Zum Abgleich
i = 0
np.random.shuffle(grain_idx)
while i < len(grain_idx):
idx = grain_idx[i]
ellipsoid, a, b, c = self.grow(idx, a, b, c)
grain = rve_boundaries.copy()
grain[(ellipsoid <= 1) & (grain == 0)] = idx
periodic_grain = super().make_periodic_3D(grain, ellipsoid, iterator=idx)
band_vol = np.count_nonzero(rve == -200)
if band_vol_0 > 0:
band_ratio = band_vol / band_vol_0
if band_ratio > RveInfo.band_ratio_final:
rve[((periodic_grain == idx) & (rve == 0)) | ((periodic_grain == idx) & (rve == -200))] = idx
else:
rve[((periodic_grain == idx) & (rve == 0))] = idx
else:
rve[((periodic_grain == idx) & (rve == 0))] = idx
freepoints = np.count_nonzero(rve == 0)
grain_vol = np.count_nonzero(rve == idx) * RveInfo.bin_size ** 3
if freepoints == 0:
break
'''
Grow control:
1.) If a grain reaches Maximum Volume, the index gets deleted
2.) If a grain is not growing in reality (difference between freepoints and freepoints_old), the
grain is deleted. This avoids background growing and dumb results
Counting (i = i + 1) up only if no deletion happens
'''
# TODO: Aus irgendeinem Grund funktioniert das immer noch nicht mit den Bandpunkten.
# Als Workaround werden alle Bandpunkte nach 8 Epochen gelöscht, damit funktioniert es
delta_grow = freepoints_old - freepoints
if (idx in band_idx) and (epoch == 8):
#print('Del because of epoch')
grain_idx.remove(idx)
grain_idx_backup.remove(idx)
elif (grain_vol > self.final_volume[idx-1]) and not repeat:
grain_idx.remove(idx)
if idx in band_idx:
#print('Del from Backup')
grain_idx_backup.remove(idx)
elif delta_grow == 0: # and not repeat: # and not repeat beobachten
grain_idx.remove(idx)
#grain_idx_backup.remove(idx)
else:
i += 1
if not grain_idx:
repeat = True
if RveInfo.gui_flag:
RveInfo.infobox_obj.emit('grain growth had to be reset at {}% of volume filling'.format(packingratio))
if packingratio < 90:
if RveInfo.gui_flag:
RveInfo.infobox_obj.emit('your microstructure data does not contain \n'
'enough data to fill this boxsize\n'
'please decrease the boxsize for reasonable results')
grain_idx = grain_idx_backup.copy()
if RveInfo.anim_flag:
self.tesselation_plotter(rve, epoch)
epoch += 1
packingratio = (1 - freepoints / vol_0) * 100
# print('packingratio:', packingratio, '%')
if RveInfo.gui_flag:
RveInfo.progress_obj.emit(packingratio)
else:
if RveInfo.gui_flag:
RveInfo.progress_obj.emit('packingratio: ', packingratio)
else:
RveInfo.LOGGER.info('packingratio: {}'.format(packingratio))
if packingratio == 100:
status = True
# Save for further usage
np.save(RveInfo.store_path + '/' + 'RVE_Numpy.npy', rve)
return rve, status
if __name__ == '__main__':
a = [10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10]
b = [5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5]
c = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]
# Define Box-dimension
box_size = 100
# Define resolution of Grid
n_pts = 100
shrinkfactor = 0.01
x0_path = './3D_x0_list.npy'
y0_path = './3D_y0_list.npy'
z0_path = './3D_z0_list.npy'
x_0 = np.load(x0_path)
y_0 = np.load(y0_path)
z_0 = np.load(z0_path)
tesselation_obj = Tesselation3D(box_size, n_pts, a, b, c, x_0, y_0, z_0, shrinkfactor)
tesselation_obj.run_tesselation()
| ibf-RWTH/DRAGen | dragen/generation/DiscreteTesselation3D.py | DiscreteTesselation3D.py | py | 9,700 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "dragen.utilities.Helpers.HelperFunctions",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "dragen.utilities.InputInfo.RveInfo.bin_size",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "dragen.utilities.InputInfo.RveInfo",
"line_number": ... |
72506235235 | # --------------------------------------------------------------------
# async.py
#
# Author: Lain Musgrove (lain.proliant@gmail.com)
# Date: Thursday February 16, 2023
#
# Distributed under terms of the MIT license.
# --------------------------------------------------------------------
import asyncio
import base64
import inspect
from dataclasses import dataclass, field
from datetime import datetime
from io import StringIO
from pathlib import Path
from ssl import SSLContext
from typing import Optional
from bivalve.datatypes import ArgV, ArgVQueue, AtomicValue, ThreadAtomicCounter
from bivalve.logging import LogManager
# --------------------------------------------------------------------
log = LogManager().get(__name__)
SERVER_AUTO_ID = ThreadAtomicCounter()
STREAM_AUTO_ID = ThreadAtomicCounter()
# --------------------------------------------------------------------
@dataclass
class SocketParams:
"""
Encapsulates and validates the range of parameters available
when connecting to or starting a server via sockets.
"""
host: Optional[str] = None
port: Optional[int] = None
path: Optional[Path | str] = None
ssl: Optional[SSLContext] = None
def __post_init__(self):
if not self._validate():
raise ValueError("Invalid socket params.")
def _validate(self) -> bool:
if self.host and self.port:
return True
elif self.path:
return True
return False
@property
def is_tcp(self):
return self.host is not None
@property
def is_unix_path(self):
return self.path is not None
def __str__(self):
sb = StringIO()
if self.host and self.port:
sb.write(f"{self.host}:{self.port}")
elif self.path:
sb.write(f"file={self.path}")
else:
sb.write("INVALID")
return sb.getvalue()
def __repr__(self):
sb = StringIO()
sb.write(f"<{self.__class__.__qualname__} ")
sb.write(str(self))
sb.write(">")
return sb.getvalue()
# --------------------------------------------------------------------
@dataclass
class Server:
"""
Class encapsulating an asyncio.Server and the connection details
that were used to establish it.
"""
params: SocketParams
asyncio_server: asyncio.Server
id: int = field(default_factory=SERVER_AUTO_ID.next)
@classmethod
def _wrap_callback(self, params: SocketParams, callback):
async def connected_callback(reader, writer):
stream = Stream(reader, writer, params)
if inspect.iscoroutinefunction(callback):
await callback(stream)
else:
callback(stream)
return connected_callback
@classmethod
async def serve(cls, callback, **kwargs) -> "Server":
"""
Used to start a server on a TCP port or UNIX named socket path which
will be fed Stream objects for connected clients via the provided
`callback` function or coroutine.
"""
params = SocketParams(**kwargs)
callback = cls._wrap_callback(params, callback)
if params.is_tcp:
asyncio_server = await asyncio.start_server(
callback, host=params.host, port=params.port, ssl=params.ssl
)
else: # if params.is_unix_path
asyncio_server = await asyncio.start_unix_server(
callback, path=params.path, ssl=params.ssl
)
return Server(params, asyncio_server)
def close(self):
self.asyncio_server.close()
def __repr__(self):
sb = StringIO()
sb.write(f"<{self.__class__.__qualname__} ")
sb.write(f"id={self.id} ")
sb.write(f"{self.params}")
sb.write(">")
return sb.getvalue()
# --------------------------------------------------------------------
@dataclass
class Stream:
"""
Class encapsulating an asyncio StreamReader/StreamWriter pair
for an open connection and the params used to establish it.
"""
reader: asyncio.StreamReader
writer: asyncio.StreamWriter
params: SocketParams
id: int = field(default_factory=STREAM_AUTO_ID.next)
async def close(self):
try:
self.writer.close()
await self.writer.wait_closed()
except Exception:
log.exception(f"Failed to close stream: {self}.")
@classmethod
async def connect(cls, **kwargs) -> "Stream":
params = SocketParams(**kwargs)
if params.is_tcp:
reader, writer = await asyncio.open_connection(
params.host, params.port, ssl=params.ssl
)
else: # if params.is_unix_path
reader, writer = await asyncio.open_unix_connection(
params.path, ssl=params.ssl
)
return Stream(reader, writer, params)
def __repr__(self):
sb = StringIO()
sb.write(f"<{self.__class__.__qualname__} ")
sb.write(f"id={self.id} ")
sb.write(f"{self.params}")
sb.write(">")
return sb.getvalue()
# --------------------------------------------------------------------
class Connection:
"""
Abstract base type for connections which send and receive
shell-style commands.
"""
def __init__(self, id: int):
self.id = id
self.alive = AtomicValue(True)
@classmethod
async def connect(cls, **kwargs) -> "Connection":
stream = Stream.connect(**kwargs)
return StreamConnection(stream)
@classmethod
def bridge(
cls, send_queue: ArgVQueue, recv_queue: ArgVQueue, poll_timeout=1.0
) -> "Connection":
return BridgeConnection(send_queue, recv_queue, poll_timeout)
async def close(self):
if await self.alive():
await self.alive.set(False)
async def send(self, *argv):
assert argv
assert len(argv) > 0
argv = [*argv]
await self._send(*argv)
log.debug(f"Sent {argv} to {self}")
async def recv(self) -> ArgV:
argv = await self._recv()
assert argv
log.debug(f"Received {argv} from {self}")
return argv
async def try_send(self, *argv):
assert argv
assert len(argv) > 0
try:
await self.send(*argv)
except ConnectionError:
log.warning(f"Could not send `{argv[0]}`, connection was lost.")
except Exception as e:
log.warning(f"Could not send `{argv[0]}`, unexpected error occurred.", e)
def _encode_word(self, s: str) -> str:
return base64.a85encode(s.encode("utf-8")).decode("utf-8")
def _decode_word(self, word: str) -> str:
return base64.a85decode(word.encode("utf-8")).decode("utf-8")
def _join(self, argv: list[str]) -> str:
return ' '.join([self._encode_word(str(s)) for s in argv])
def _split(self, argv: str) -> ArgV:
return [self._decode_word(s) for s in argv.split(' ')]
async def _send(self, *argv):
raise NotImplementedError()
async def _recv(self) -> ArgV:
raise NotImplementedError()
async def __aenter__(self) -> "Connection":
return self
async def __aexit__(self, exc_t, exc_v, exc_tb):
await self.try_send("bye")
await self.close()
# --------------------------------------------------------------------
class StreamConnection(Connection):
"""
Connection using a Stream to send and receive shell-style commands.
Used to connect to a BivalveAgent over a socket connection.
"""
def __init__(self, stream: Stream):
super().__init__(stream.id)
self.stream = stream
self.syn_at = datetime.min
self.ack_ttl: Optional[datetime] = None
async def close(self):
if await self.alive():
await self.stream.close()
await self.alive.set(False)
async def _recv(self) -> ArgV:
out = await self.stream.reader.readline()
if not out or not await self.alive():
raise ConnectionAbortedError()
return self._split(out.decode())
async def _send(self, *argv):
self.stream.writer.write((self._join([str(s) for s in argv]) + "\n").encode())
await self.stream.writer.drain()
def __repr__(self):
return repr(self.stream)
sb = StringIO()
sb.write(f"<{self.__class__.__qualname__} ")
sb.write(f"{self.stream.params}")
sb.write(">")
return sb.getvalue()
# --------------------------------------------------------------------
class BridgeConnection(Connection):
"""
Connection using queues to send and receive shell-style commands.
Used to connect to a BivalveAgent in the same process, or to bridge a
BivalveAgent connection across another medium.
"""
def __init__(
self,
send_queue: ArgVQueue,
recv_queue: ArgVQueue,
poll_timeout: float = 1.0,
):
super().__init__(STREAM_AUTO_ID.next())
self.send_queue = send_queue
self.recv_queue = recv_queue
self.poll_timeout = poll_timeout
async def _recv(self) -> ArgV:
while await self.alive():
result = await self.recv_queue.get()
self.recv_queue.task_done()
return result
raise ConnectionAbortedError()
async def _send(self, *argv):
await self.send_queue.put(argv)
| lainproliant/bivalve | bivalve/aio.py | aio.py | py | 9,490 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "bivalve.logging.LogManager",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "bivalve.datatypes.ThreadAtomicCounter",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "bivalve.datatypes.ThreadAtomicCounter",
"line_number": 27,
"usage_type":... |
12993456149 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
from keras.datasets import mnist
(X_train, _), (X_test, _) = mnist.load_data()
X_train = X_train.reshape(-1,784)
X_train = X_train/255
# In[2]:
from sklearn.neural_network import BernoulliRBM
rbm = BernoulliRBM(n_components=100, learning_rate=0.01, random_state=42, verbose=True)
rbm.fit(X_train)
# In[3]:
rbm.n_components
# In[4]:
rbm.components_.shape
# In[5]:
rbm.intercept_hidden_.shape
# In[6]:
rbm.intercept_visible_.shape
# In[7]:
X_train[:1].shape
# In[8]:
rbm.transform(X_train[:1])
# In[ ]:
| Arijit-Debnath111/RBM- | RBM .py | RBM .py | py | 625 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "keras.datasets.mnist.load_data",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "keras.datasets.mnist",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sklearn.neural_network.BernoulliRBM",
"line_number": 20,
"usage_type": "call"
}
] |
5175296459 | import numpy as np
import rdkit.Chem as Chem
from rdkit import DataStructs
from rdkit.Chem import rdMolDescriptors
import copy
class CandidatePool():
def __init__(self,
candidate_pool_size=50):
self.candidate_pool_size = candidate_pool_size
self.pool = []
def _calculate_softmax(self, pool_life_time):
exp_sum = 0
for i in pool_life_time:
exp_sum += np.exp(i)
res = [i/exp_sum for i in pool_life_time]
return res
def get_length(self):
return len(self.pool)
def add_molecule(self, molecule):
pool_length = len(self.pool)
if pool_length < self.candidate_pool_size:
self.pool.append(molecule)
elif pool_length == self.candidate_pool_size:
pool_life_time = [i.pool_life_time for i in self.pool if i.prior_flag == False]
sorted_life_time = np.argsort(pool_life_time)
index = sorted_life_time[0]
self.pool.pop(index)
molecule.pool_life_time = 0
self.pool.append(molecule)
def _smilarity_between_two_smiles(self, smi1, smi2):
mol1, mol2 = Chem.MolFromSmiles(smi1), Chem.MolFromSmiles(smi2)
vec1 = rdMolDescriptors.GetMorganFingerprintAsBitVect(mol1, 4, nBits=512)
vec2 = rdMolDescriptors.GetMorganFingerprintAsBitVect(mol2, 4, nBits=512)
tani = DataStructs.TanimotoSimilarity(vec1, vec2)
return tani
def _calculate_similarity(self, molecule):
similarity = []
for i in self.pool:
similarity.append(self._smilarity_between_two_smiles(molecule.smiles, i.smiles))
return similarity
def extract_molecules(self, molecule_list):
res_mols = []
length = len(molecule_list)
if length > len(self.pool):
res_mols = self.pool
return res_mols
else:
for mol in molecule_list:
similarity = self._calculate_similarity(mol)
max_index = np.argmax(similarity)
self.pool[max_index].pool_life_time = 0
res_mols.append(self.pool[max_index])
self.pool.pop(max_index)
return res_mols
def unload_molecules(self, num_molecules):
pool = self.pool
pool = sorted(pool, key=lambda x: x.pool_life_time, reverse=True)
temp_mols = pool[:num_molecules]
for i in range(num_molecules):
pool.pop(0)
return temp_mols
def update_candidate_pool(self):
for molecule in self.pool:
molecule.pool_life_time += 1
from a_molecule import *
if __name__ == '__main__':
candidate_pool = CandidatePool()
smiles_pool = ['C']
for i in range(10):
smiles_pool.append(smiles_pool[-1]+'C')
for i in range(10):
candidate_pool.add_molecule(Molecule(smiles_pool[i]))
standard_mol = Molecule(smiles_pool[-1])
print(len(candidate_pool.pool))
print(candidate_pool.extract_molecules([standard_mol])[0].smiles) | tong2shudong/calm | a_candidate_pool.py | a_candidate_pool.py | py | 3,126 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.exp",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "rdkit.Chem.MolFromSmiles",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "rdkit.Chem",
"li... |
11506319058 | #pylint: disable=no-member
import tcod
from random import randint
from game_messages import Message
class Brute:
def take_turn(self, target, fov_map, game_map, entities):
results=[]
monster=self.owner
if tcod.map_is_in_fov(fov_map, monster.x, monster.y):
if monster.distance_to(target)>=2:
monster.move_astar(target, entities, game_map)
elif target.combatant.health>0:
results.extend(monster.combatant.attack_physical(target))
return results
class ConfusedLad:
def __init__(self, prev_ai, nof_turns=5):
self.prev_ai=prev_ai
self.nof_turns=nof_turns
def take_turn(self, target, fov_map, game_map, entities):
results=[]
if self.nof_turns:
dx=self.owner.x+randint(-1, 1)
dy=self.owner.y+randint(-1, 1)
if dx!=self.owner.x and dy!=self.owner.y:
self.owner.move_towards(dx, dy, game_map, entities)
self.nof_turns-=1
else:
self.owner.ai=self.prev_ai
results.append({'message': Message('The {0} regains self-control.'.format(self.owner.name), tcod.red)})
return results | propfeds/project-regular | components/ai.py | ai.py | py | 1,201 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "tcod.map_is_in_fov",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "game_messages.Messag... |
73866758434 | from mmcv.runner import HOOKS, Hook
from mmcv.runner import EpochBasedRunner
from mpa.utils.logger import get_logger
logger = get_logger()
@HOOKS.register_module()
class CancelInterfaceHook(Hook):
def __init__(self, init_callback: callable, interval=5):
self.on_init_callback = init_callback
self.runner = None
self.interval = interval
def cancel(self):
logger.info('CancelInterfaceHook.cancel() is called.')
if self.runner is None:
logger.warning('runner is not configured yet. ignored this request.')
return
if self.runner.should_stop:
logger.warning('cancel already requested.')
return
if isinstance(self.runner, EpochBasedRunner):
epoch = self.runner.epoch
self.runner._max_epochs = epoch # Force runner to stop by pretending it has reached it's max_epoch
self.runner.should_stop = True # Set this flag to true to stop the current training epoch
logger.info('requested stopping to the runner')
def before_run(self, runner):
self.runner = runner
self.on_init_callback(self)
| openvinotoolkit/model_preparation_algorithm | mpa/modules/hooks/cancel_interface_hook.py | cancel_interface_hook.py | py | 1,159 | python | en | code | 20 | github-code | 1 | [
{
"api_name": "mpa.utils.logger.get_logger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "mmcv.runner.Hook",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "mmcv.runner.EpochBasedRunner",
"line_number": 26,
"usage_type": "argument"
},
{
"api_... |
38786133703 | '''
# @ Author: Andrew Hossack
# @ Create Time: 2022-05-28 13:56:14
'''
import configparser
from typing import List, Union
def _get_config() -> List[str]:
"""
Get the config file
"""
config = configparser.ConfigParser()
config.read('config.ini')
return config
def get_config_value(key: str, section: str = "DEFAULT") -> Union[str, None]:
"""
Get the value of a key in a section or None if not found
"""
config = _get_config()
try:
value = config[section][key]
if len(value) > 0:
return value
return None
except KeyError:
return None
def set_config_value(key: str, value: str, section: str = "DEFAULT") -> None:
"""
Set the value of a key in a section
"""
config = _get_config()
config.set(section, key, value)
with open('config.ini', 'w') as configfile:
config.write(configfile)
| andrew-hossack/dash-tools | src/dashtools/data/configUtils.py | configUtils.py | py | 912 | python | en | code | 79 | github-code | 1 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 19,
"usage_type": "name"
}
] |
24769331873 | from helper.help_functions import extract_list
def solve():
lines = extract_list("inputs/input_04")
points = 0
points2 = 0
for line in lines:
try:
pairs = line.split(",")
code_a = pairs[0].split("-")
code_b = pairs[1].split("-")
if contained(int(code_a[0]), int(code_a[1]), int(code_b[0]), int(code_b[1])):
points += 1
if overlap(int(code_a[0]), int(code_a[1]), int(code_b[0]), int(code_b[1])):
points2 += 1
except:
pass
print(f"case 1: {points}")
print(f"case 2: {points2}")
def contained(a_first: int, a_second: int, b_first: int, b_second: int) -> bool:
result = False
if (a_first <= b_first and a_second >= b_second) or (b_first <= a_first and b_second >= a_second):
result = True
return result
def overlap(a_first: int, a_second: int, b_first: int, b_second: int) -> bool:
result = False
if (a_first <= b_first <= a_second) or (b_first <= a_first <= b_second) or (b_first <= a_second <= b_second)\
or (a_first <= b_second <= a_second):
result = True
return result
solve()
| Koell/AdventOfCode | 2022/04.py | 04.py | py | 1,185 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "helper.help_functions.extract_list",
"line_number": 5,
"usage_type": "call"
}
] |
1165332467 | from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from sklearn import metrics
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import RepeatedStratifiedKFold, GridSearchCV, HalvingGridSearchCV
from functions import *
import numpy as np
#%%
with open('feature_matrix.pkl', 'rb') as f:
feat_matrix = pickle.load(f)
rawTruth = read_truth_data()
truths = truthimport(rawTruth)
labels = truths['same']
results = []
#%%
#deal with Nan values, normalize data #split into train and test sets
cv = RepeatedStratifiedKFold(n_splits=5)
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
input_matrix = normalize(imputer.fit_transform(feat_matrix))
X_train, X_test, y_train, y_test = split_data(input_matrix, labels)
#%%
lg = LogisticRegression(C=0.01, penalty='l2', solver='newton-cg', max_iter=1000)
#lg = LogisticRegression()
lg.fit(X_train, y_train)
lg_pred = lg.predict(X_test)
lg_acc = metrics.accuracy_score(y_test, lg_pred)
lg_auc = metrics.roc_auc_score(y_test, lg_pred)
lg_rep = metrics.classification_report(y_test, lg_pred)
#%%
svm = SVC(C=5, gamma=0.01, kernel='rbf')
#svm = SVC()
svm.fit(X_train, y_train)
svm_pred = svm.predict(X_test)
svm_acc = metrics.accuracy_score(y_test, svm_pred)
svm_auc = metrics.roc_auc_score(y_test, svm_pred)
svm_rep = metrics.classification_report(y_test, svm_pred)
#%%
rf = RandomForestClassifier(max_depth=90, max_features=2, min_samples_leaf=3, min_samples_split=10, n_estimators=200)
#rf = RandomForestClassifier()
rf.fit(X_train, y_train)
rf_pred = rf.predict(X_test)
rf_acc = metrics.accuracy_score(y_test, rf_pred)
rf_auc = metrics.roc_auc_score(y_test, rf_pred)
rf_rep = metrics.classification_report(y_test, rf_pred)
#%%
mlp = MLPClassifier(activation='relu', alpha=0.0001, hidden_layer_sizes=(100,), learning_rate='constant', solver='adam', max_iter=1000)
#mlp = MLPClassifier(max_iter=1000)
mlp.fit(X_train, y_train)
mlp_pred = mlp.predict(X_test)
mlp_acc = metrics.accuracy_score(y_test, mlp_pred)
mlp_auc = metrics.roc_auc_score(y_test, mlp_pred)
mlp_rep = metrics.classification_report(y_test, mlp_pred)
#%%#Leave-one_out method to see which features contribute
# acc_scores = []
# for i in range(len(feat_matrix.T)):
# print(i)
# lg_i = LogisticRegression()
# X_train_i = np.delete(X_train, obj = i, axis = 1 )
# X_test_i = np.delete(X_test, obj = i, axis = 1 )
# lg_i.fit(X_train_i, y_train)
# lg_pred_i = lg_i.predict(X_test_i)
# lg_acc_i = metrics.accuracy_score(y_test, lg_pred_i)
# acc_scores.append(lg_acc_i)
| goodPointP/AuthorshipAttribution | model_evaluation.py | model_evaluation.py | py | 2,788 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.model_selection.RepeatedStratifiedKFold",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sklearn.impute.SimpleImputer",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 26,
"usage_type": "attribute"
}... |
17360221735 | """hotjar_tracking
Revision ID: 5cdf7f1bbd6f
Revises: d788fb44fa0e
Create Date: 2022-04-01 11:02:46.044381
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5cdf7f1bbd6f'
down_revision = 'd788fb44fa0e'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('hotjar_tracking',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('nr_id', sa.Integer(), nullable=True),
sa.Column('hotjar_user', sa.VARCHAR(length=20), nullable=True),
sa.Column('last_update_dt', sa.DateTime(timezone=True), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('hotjar_tracking')
| bcgov/namex | api/migrations/versions/5cdf7f1bbd6f_hotjar_tracking.py | 5cdf7f1bbd6f_hotjar_tracking.py | py | 821 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "alembic.op.create_table",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
23650238154 | from bs4 import BeautifulSoup
import requests
import xlsxwriter
base_trade_url = 'https://www.realmeye.com/offers-by/'
pots = {
2793: "Life Potion",
2592: "Defense Potion",
2591: "Attack Potion",
2593: "Speed Potion",
2636: "Dexterity Potion",
2613: "Wisdom Potion",
2612: "Vitality Potion",
2794: "Mana Potion"
}
rev_pots = {v: k for k, v in pots.items()}
# In terms of Speed Pots ETA
pot_values = {
"Life Potion": 8,
"Defense Potion": 3,
"Attack Potion": 3,
"Speed Potion": 1,
"Dexterity Potion": 1,
"Wisdom Potion": 1.2,
"Vitality Potion": 2,
"Mana Potion": 5,
}
class Item:
def __init__(self, item_id, qty):
self.item_id = item_id
self.qty = int(qty[1:])
if item_id in pots.keys():
self.name = pots[item_id]
self.worth = self.qty * pot_values[self.name]
else:
self.name = 'OTHER_ITEM'
self.worth = 0
def __str__(self):
return f'{self.qty} {self.name}'
class Trade:
def __init__(self, selling_items, buying_items, qty, added, author, seen, server):
self.selling_items = selling_items
self.buying_items = buying_items
self.qty = qty
self.added = added
self.author = author
self.seen = seen
self.server = server
self.selling_worth = sum([i.worth for i in selling_items])
self.buying_worth = sum(i.worth for i in buying_items)
def getBuyingList(id):
buying_spd_pots_url = f'https://www.realmeye.com/offers-to/buy/{id}/pots'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'}
r = requests.get(buying_spd_pots_url, headers=headers)
soup = BeautifulSoup(r.content, 'html.parser')
tbl = soup.find("table", id="g").find("tbody")
raw_trades = tbl.findAll("tr")
trades_list = []
for trade in raw_trades:
trade_attributes = trade.findAll('td')
selling_list = trade_attributes[0].findAll('span', {'class': 'item-static'})
buying_list = trade_attributes[1].findAll('span', {'class': 'item-static'})
selling_itms = []
for itm in selling_list:
data_id = int(itm.find('span', {'class': 'item'})['data-item'])
data_qt = itm.find('span', {'class': 'item-quantity-static'}).text
temp_item = Item(data_id, data_qt)
selling_itms.append(temp_item)
buying_itms = []
for itm in buying_list:
data_id = int(itm.find('span', {'class': 'item'})['data-item'])
data_qt = itm.find('span', {'class': 'item-quantity-static'}).text
buying_itms.append(Item(data_id, data_qt))
trade_qty = trade_attributes[2].find('span')
trade_added = trade_attributes[3].find('span').text
trade_author = trade_attributes[5].find('a').text
trade_seen = trade_attributes[6].find('span')
trade_server = trade_attributes[7].find('abbr')
if trade_server is not None:
trade_server = trade_server.text
trades_list.append(Trade(selling_itms, buying_itms, trade_qty, trade_added, trade_author, trade_seen, trade_server))
return trades_list
col_headers = [
{'header': 'USER'},
{'header': 'SERVER'},
{'header': 'SELLING'},
{'header': 'BUYING'},
{'header': 'SELLING WORTH'},
{'header': 'BUYING WORTH'},
{'header': 'DIFFERENCE'},
]
trades_file = open('trades.txt', 'w')
workbook = xlsxwriter.Workbook('Trades.xlsx')
worksheet = workbook.add_worksheet()
data = []
for pot_type in pots.values():
trades_list = getBuyingList(rev_pots[pot_type])
for trade in trades_list:
buying_items_string = ''
for item in trade.buying_items:
if item.name == 'OTHER_ITEM':
continue
buying_items_string += f'{item}, '
selling_items_string = ''
for item in trade.selling_items:
if item.name == 'OTHER_ITEM':
continue
selling_items_string += f'{item}, '
user_url = f'{base_trade_url}{trade.author}'
excel_hyperlink = f'=HYPERLINK(\"{user_url}\",\"{trade.author}\")'
data.append([excel_hyperlink, trade.server, selling_items_string, buying_items_string, trade.selling_worth,
trade.buying_worth, trade.selling_worth - trade.buying_worth])
printing_string = f'{trade.author}({trade.server}) has {selling_items_string}and wants {buying_items_string}'
print(printing_string)
trades_file.write(printing_string + '\n')
worksheet.add_table(0, 0, len(data), len(col_headers) - 1, {'data': data, 'columns': col_headers})
trades_file.close()
workbook.close()
| mm1013g/Realmeye-Potion-Trade-Scraper | rotmgtrader.py | rotmgtrader.py | py | 4,784 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "xlsxwriter.Workbook",
"line_number": 113,
"usage_type": "call"
}
] |
8702115205 | import streamlit as st
import altair as alt
import inspect
from vega_datasets import data
@st.experimental_memo
def get_chart_19705(use_container_width: bool):
import altair as alt
source = "https://frdata.wikimedia.org/donationdata-vs-day.csv"
chart = alt.Chart(source).mark_line().encode(
alt.X('monthdate(date):T', title='Month', axis=alt.Axis(format='%B')),
alt.Y('max(ytdsum):Q', title='Cumulative Donations', stack=None),
alt.Color('year(date):O', legend=alt.Legend(title='Year')),
alt.Order('year(data):O')
)
tab1, tab2 = st.tabs(["Streamlit theme (default)", "Altair native theme"])
with tab1:
st.altair_chart(chart, theme="streamlit", use_container_width=True)
with tab2:
st.altair_chart(chart, theme=None, use_container_width=True)
try:
st.expander("See code").code(inspect.getsource(get_chart_19705))
get_chart_19705(use_container_width=True)
except Exception as e:
st.exception(e)
| streamlit/release-demos | 1.16.0/demo_app_altair/pages/109_Cumulative_Wiki_Donations.py | 109_Cumulative_Wiki_Donations.py | py | 1,004 | python | en | code | 78 | github-code | 1 | [
{
"api_name": "altair.Chart",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "altair.X",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "altair.Axis",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "altair.Y",
"line_number": 14,
... |
36284428225 | import sys
from collections import deque
#sys.stdin = open('input.txt', 'r')
def eat(p, depth):
visited = [[0 for _ in range(N)] for _ in range(N)]
q = deque([[p[0], p[1], depth]])
compare = []
mind = 401
visited[p[0]][p[1]] = 1
while len(q) > 0:
r, c, d = q.popleft()
if mind < d:
break
if 0 < arr[r][c] <= size-1:
if len(compare) == 0:
mind = d
compare.append([r, c])
for dr, dc in move:
if 0 <= r+dr < N and 0 <= c+dc < N:
if arr[r+dr][c+dc] <= size and not visited[r+dr][c+dc]:
visited[r+dr][c+dc] = 1
q.append([r+dr, c+dc, d+1])
if len(compare) == 0:
return 0
compare.sort(key=lambda x: (x[0], x[1]))
fish[arr[compare[0][0]][compare[0][1]]] -= 1
position[0], position[1] = compare[0][0], compare[0][1]
arr[compare[0][0]][compare[0][1]] = 0
return mind
def shark():
global answer
global hungry
global size
while sum(fish[:min(size, 7)]) > 0:
distance = eat(position, 0)
if distance == 0:
break
answer += distance
hungry += 1
if hungry == size:
hungry = 0
size += 1
move = [[-1, 0], [0, -1], [1, 0], [0, 1]]
fish = [0 for i in range(7)]
position = [-1, -1]
size = 2
hungry = 0
answer = 0
arr = []
N = int(input().rstrip())
for i in range(N):
temp = list(map(int, input().split()))
arr.append(temp)
for idx, j in enumerate(temp):
if j != 0 and j != 9:
fish[j] += 1
if j == 9:
position[0], position[1] = i, idx
arr[position[0]][position[1]] = 0
shark()
print(answer)
| kyeong8/Algorithm | 백준/Gold/16236. 아기 상어/아기 상어.py | 아기 상어.py | py | 1,809 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 8,
"usage_type": "call"
}
] |
5305293579 | import random
import torch
import numpy as np
import argparse
import torch.nn as nn
import torch.optim as optim
import numpy as np
import os
from torchvision import transforms
from torchvision.utils import save_image
#import sys
from utils.dataframe import UCIDatasets
from utils.experiment import exp_imputation
from model.MIWAE import MIWAE
from model.imputer import imputer
from utils.dataframe import dataframe, UCIDatasets
from utils.trainer import VAE_trainer, GAN_trainer
from utils.experiment import *
"""
Use the MIWAE and not-MIWAE on UCI data
Find a data from here
https://archive.ics.uci.edu/ml/datasets.php
"""
parser = argparse.ArgumentParser(description='VAE Example')
parser.add_argument('--batch-size', type=int, default=16, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=100000, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--data', type=str, default='whitewine', metavar='N',
help='which dataset from UCI would you like to use?')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
args.cuda = not args.no_cuda and torch.cuda.is_available()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
name = args.data
n_hidden = 128
n_samples = 20
max_iter = args.epochs
batch_size = args.batch_size
impute_sample = 10000
### the missing model ###
# mprocess = 'linear'
# mprocess = 'selfmasking'
mprocess = 'selfmasking_known'
# ---- number of runs
runs = 1
RMSE_result = dict()
methods = ['miwae','notmiwae','mean','mice','RF']
for method in methods:
RMSE_result[method] = []
"""
load data: white wine
"""
data = UCIDatasets(name=name)
N, D = data.N, data.D
dl = D - 1
optim_kwargs = {'lr': 0.0001, 'betas': (0.9, 0.999), 'eps': 1e-08 }
MIWAE_kwargs = {
'data_dim': D, 'z_dim': dl, 'h_dim': n_hidden, 'n_samples': n_samples
}
notMIWAE_kwargs = {
'data_dim': D, 'z_dim': dl, 'h_dim': n_hidden, 'n_samples': n_samples, 'missing_process': mprocess
}
data_kwargs = {
'batch_size': batch_size
}
imputer_par = {
'missing_values': np.nan, 'max_iter': 10, 'random_state': 0, 'n_estimators': 100, 'n_neighbors': 3, 'metric': 'nan_euclidean'
}
exp_kwargs = {
'dataset':name, 'runs':runs, 'seed': args.seed,
}
config = {
'exp_kwargs': exp_kwargs, 'optim_kwargs': optim_kwargs,
'MIWAE_kwargs': MIWAE_kwargs, 'notMIWAE_kwargs': notMIWAE_kwargs,
'data_kwargs': data_kwargs, 'imputer_par': imputer_par,
}
def main():
RMSE_result = exp_imputation( 'exp_imputation', model_list = ['miwae', 'notmiwae'], config = config, num_of_epoch = max_iter)
print("RMSE_miwae = {0:.5f} +- {1:.5f}".format(np.mean(RMSE_result['miwae']), np.std(RMSE_result['miwae'])))
print("RMSE_notmiwae = {0:.5f} +- {1:.5f}".format(np.mean(RMSE_result['notmiwae']), np.std(RMSE_result['notmiwae'])))
print("RMSE_mean = {0:.5f} +- {1:.5f}".format(np.mean(RMSE_result['mean']), np.std(RMSE_result['mean'])))
print("RMSE_mice = {0:.5f} +- {1:.5f}".format(np.mean(RMSE_result['mice']), np.std(RMSE_result['mice'])))
print("RMSE_missForest = {0:.5f} +- {1:.5f}".format(np.mean(RMSE_result['RF']), np.std(RMSE_result['RF'])))
if __name__ == "__main__":
main() | bravo583771/Variational-inference-and-Missing-not-at-random-imputation | main.py | main.py | py | 3,881 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch... |
71547040355 | #!/usr/bin/python3
"""API for Users"""
from tasks.users import User
from tasks import storage
from api.v1.views import app_views
from flask import jsonify, abort, request, make_response
from flasgger.utils import swag_from
@app_views.route('/users', methods=['GET'], strict_slashes=False)
@swag_from('documentation/users/list_users.yml', methods=['GET'])
def list_users():
"""Retrives the list of all User objects"""
users = storage.all(User)
user_list = []
for user in users:
user_list.append(user.to_dict())
return jsonify(user_list)
@app_views.route('/users/by_id/<id>', methods=['GET'], strict_slashes=False)
@swag_from('documentation/users/get_user_id.yml', methods=['GET'])
def get_user_id(id):
"""Retrieves a user by user id"""
users = storage.get_user_by_id(User, id)
if not users:
abort(404)
return jsonify([user.to_dict() for user in users])
@app_views.route('/users/<email_address>',
methods=['GET'], strict_slashes=False)
@swag_from('documentation/users/get_user_email.yml', methods=['GET'])
def get_user_email(email_address):
"""Retrieves a user by user's email_address"""
users = storage.get_user_by_email(User, email=email_address)
if not users:
abort(404)
return jsonify([users.to_dict()])
@app_views.route('/users/<email_address>',
methods=['DELETE'], strict_slashes=False)
@swag_from('documentation/users/delete_user.yml', methods=['DELETE'])
def delete_user(email_address):
"""Deletes user"""
user = storage.get_user_by_email(User, email=email_address)
if not user:
abort(404)
storage.delete(user)
return make_response(jsonify({}), 200)
@app_views.route('/users/by_id/<id>',
methods=['PUT'], strict_slashes=False)
@swag_from('documentation/users/update_user.yml', methods=['PUT'])
def update_user(id):
"""Update a user"""
users = storage.get_user_by_id(User, id=id)
if not users:
abort(404)
if not request.get_json():
abort(400, description="Not a JSON")
ignore = ['__class__', 'id', 'created_at', 'updated_at']
data = request.get_json()
for user in users:
for key, value in data.items():
if key not in ignore:
setattr(user, key, value)
storage.save()
users = storage.get_user_by_id(User, id=id)
for user in users:
return make_response(jsonify(user.to_dict()), 200)
@app_views.route('/users',
methods=['POST'], strict_slashes=False)
@swag_from('documentation/users/create_user.yml', methods=['POST'])
def create_user():
"""Create a new user"""
if not request.get_json():
abort(400, description="Not a JSON")
if 'email_address' not in request.get_json():
abort(400, description="Missing email address")
if 'first_name' not in request.get_json():
abort(400, description="Missing first name")
if 'last_name' not in request.get_json():
abort(400, description="Missing last name")
data = request.get_json()
new_user = User(**data)
new_user.save()
return make_response(jsonify(new_user.to_dict()), 201)
| stepholo/RESTful-API-BASED-TASK-MANAGEMENT-SYSTEM | api/v1/views/user.py | user.py | py | 3,178 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "tasks.storage.all",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tasks.users.User",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "tasks.storage",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
... |
264717597 | from django.shortcuts import render
import json
from django.http import HttpResponse
from django.http import JsonResponse
from .models import ServerCategorys,ServerPosts,Keywords,Aquestions,Attachments,Centers
from school.models import Schools
from major.models import Majors
# Create your views here.
#服务中心文章列表展示
def serverpostsList(request):
server_post =ServerPosts .objects.filter(is_status=1)
context ={'server_post':server_post}
return render(request,'server/server_posts_list.html',context=context)
#查看服务文章详情
def detailserPos(request):
ser_pos_id = request.POST.get('ser_pos_id',None)
ser_posts = ServerPosts.objects.get(id=ser_pos_id)
return render(request,'server/detail_ser_pos.html',context={'ser_posts':ser_posts})
#添加新服务文章:获取新页面
def addserPos(request):
ques_school = Schools.objects.filter(is_status=1)
cateid=ServerCategorys.objects.filter(is_status=1)
keywords=Keywords.objects.all()
context = {
'ques_school': ques_school,
'cateid': cateid,
'keywords':keywords
}
return render(request, 'server/add_ser_pos.html',context=context)
#添加新服务文章,上传数据,除了标题外,都可不填
def addssserPos(request):
#获取data的数据
data = request.POST.get('data', None)
data = json.loads(data)
# print(data['post_school'])
# 判断文章标题是否存在
info = ServerPosts.objects.filter(post_title=data['post_title'], is_status=1).exists()
if info:
return JsonResponse({
'status': 'fail',
'message': '该服务文章已存在!',
'info': ''
})
ServerPosts.objects.create(
post_title=data['post_title'],
source='舟炬教育' if data['source']=='' else data['source'],
source_link='http://www.zhoujuedu.com' if data['source_link']=='' else data['source_link'],
#这几项是外键的id如果不填,会报错,都给个默认值id吧
#但是major的外键id不填却可以,什么原因呢
cateid_id='1' if data['cateid']==' ' else data['cateid'],
keywords_id='8' if data['keywords']==' ' else data['keywords'],
server_post_school_id='11' if data['server_post_school']==' ' else data['server_post_school'],
post_content=data['post_content'],
)
return JsonResponse({
'status': 'success',
'message': '创建成功',
'info': ''
})
#删除服务文章,其实是把status变为0
def delserPos(request):
ser_pos_id=request.GET.get('ser_pos_id')
ser_posts = ServerPosts.objects.get(id=ser_pos_id)
context={'ser_posts':ser_posts}
is_status = 0
ser_posts.is_status = is_status
ser_posts.save()
return render(request, 'server/server_posts_list.html', context=context)
#编辑服务文章:接收数据
def editserPos(request):
ser_pos_id = request.GET.get('ser_pos_id',None)
ser_posts = ServerPosts.objects.get(id=ser_pos_id)
ser_school = Schools.objects.filter(is_status=1).exclude(name=ser_posts.server_post_school.name)
cateid = ServerCategorys.objects.filter(is_status=1).exclude(server_name=ser_posts.cateid.server_name)
keywords = Keywords.objects.exclude(key_name=ser_posts.keywords.key_name)
context = {
'ser_posts':ser_posts,
'ser_school':ser_school,
'cateid':cateid,
'keywords':keywords
}
return render(request,'server/edit_ser_pos.html',context=context)
#编辑服务文章:上传数据
def updateserPos(request):
# 接收数据
ser_pos_id = request.POST.get("ser_pos_id",None)
'''处理数据'''
data = request.POST.get('data',None)
data = json.loads(data)
'''对数据进行处理'''
pos = ServerPosts.objects.filter(id=ser_pos_id)
pos.update(**data)
return HttpResponse(123)
#服务中心分类展示
def serverCates(request):
server_cates =ServerCategorys .objects.filter(is_status=1)
context ={'server_cates':server_cates}
return render(request,'server/server_cates.html',context=context)
#添加服务中心分类:获取新页面
def addserCates(request):
return render(request, 'server/add_ser_cates.html')
#添加新服务分类:上传数据,名称不允许为空,不允许重复
def addssserCates(request):
server_name = request.POST.get('server_name')
# 判断是否存在
info = ServerCategorys.objects.filter(server_name=server_name).exists()
if info: # 如果为true,控制台返回值
return JsonResponse({
'status': 'fail',
'message': '当前分类已存在',#在html写了框,没有用到这个
'tagid': 'server_name'
})
# 如果不存在就增加数据
ServerCategorys.objects.create(
server_name=server_name,
)
return JsonResponse({
'status': 'success',
'message': '创建成功',
'info': ''
})
#删除服务分类,其实是把status变为0
def delserCates(request):
ser_cates_id=request.GET.get('ser_cates_id')
ser_cates = ServerCategorys.objects.get(id=ser_cates_id)
context={'ser_cates':ser_cates}
is_status = 0
ser_cates.is_status = is_status
ser_cates.save()
return render(request, 'server/server_cates.html', context=context)
#编辑服务分类:接收数据
def editserCates(request):
ser_cates_id = request.GET.get('ser_cates_id',None)
ser_cates = ServerCategorys.objects.get(id=ser_cates_id)
context = {
'ser_cates':ser_cates,
}
return render(request,'server/edit_ser_cates.html',context=context)
#编辑服务分类:上传数据
def updateserCates(request):
# 接收数据
data = request.POST.get('data', None)
ser_cateid = request.POST.get("ser_cateid",None)
data = json.loads(data)
ser_cates = ServerCategorys.objects.get(id=ser_cateid)
if data['server_name'] != '':
ser_cates.server_name = data['server_name']
ser_cates.save()
return JsonResponse({
"status": "success",
"message": "更改成功",
"info": ""
})
#教学中心列表展示
def servercenterList(request):
center =Centers .objects.filter(is_status=1)
context ={'center':center}
return render(request,'server/server_center_list.html',context=context)
#查看教学中心详情
def detailserCenter(request):
ser_cen_id = request.POST.get('ser_cen_id',None)
ser_center = Centers.objects.get(id=ser_cen_id)
return render(request,'server/detail_ser_center.html',context={'ser_center':ser_center})
#添加教学中心:获取新页面
def addserCenter(request):
return render(request, 'server/add_ser_center.html')
#添加教学中心:上传数据,名称和编号不能为空
def addssserCenter(request):
cen_name = request.POST.get('cen_name')
cen_num=request.POST.get('cen_num')
address = request.POST.get('address',None)
phone = request.POST.get('phone',None)
is_direct=request.POST.get('is_direct',None)
#判断教学中心名称或编号是否存在
info = Centers.objects.filter(cen_name=cen_name,is_status=1).exists()
info2 = Centers.objects.filter(cen_num=cen_num,is_status=1).exists()
# print(info)
if info or info2:
return JsonResponse({
'status': 'fail',
'message': '教学中心或编号已存在!',#在html写了框,没有用到这个
'info': ''
})
Centers.objects.create(
cen_name=cen_name,
cen_num=cen_num, #因为字段是int,导致如果为空会报错
address=address,
phone=phone,
is_direct=is_direct,
)
return JsonResponse({
'status': 'success',
'message': '创建成功',
'info': ''
})
#删除教学中心,其实是把status变为0
def delserCenter(request):
ser_center_id=request.GET.get('ser_center_id')
ser_center = Centers.objects.get(id=ser_center_id)
context={'ser_center':ser_center}
is_status = 0
ser_center.is_status = is_status
ser_center.save()
return render(request, 'server/server_center_list.html', context=context)
#编辑教学中心:接收数据
def editserCenter(request):
ser_center_id = request.GET.get('ser_center_id',None)
ser_center = Centers.objects.get(id=ser_center_id)
context = {
'ser_center':ser_center,
}
return render(request,'server/edit_ser_center.html',context=context)
#编辑教学中心:上传数据
def updateserCenter(request):
# 接收数据
ser_center_id = request.POST.get("ser_center_id",None)
'''处理数据'''
data = request.POST.get('data',None)
data = json.loads(data)
'''对数据进行处理'''
cen = Centers.objects.filter(id=ser_center_id)
cen.update(**data)
return HttpResponse(123)
#附件表展示
def serverAtta(request):
atta =Attachments .objects.filter(is_status=1)
context ={'atta':atta}
return render(request,'server/server_atta.html',context=context)
#查看附件详情
def detailserAtta(request):
ser_atta_id = request.POST.get('ser_atta_id',None)
ser_atta = Attachments.objects.get(id=ser_atta_id)
return render(request,'server/detail_ser_atta.html',context={'ser_atta':ser_atta})
#添加附件:获取新页面
def addserAtta(request):
return render(request, 'server/add_ser_atta.html')
#添加附件:上传数据,都不能为空
def addssserAtta(request):
atta_name = request.POST.get('atta_name')
filename=request.POST.get('filename')
#判断是否存在
info = Attachments.objects.filter(atta_name=atta_name,filename=filename).exists()
# print(info)
if info:
return JsonResponse({
'status': 'fail',
'message': '附件已存在!',#在html写了框,没有用到这个
'info': ''
})
Attachments.objects.create(
atta_name=atta_name,
filename=filename,
)
return JsonResponse({
'status': 'success',
'message': '创建成功',
'info': ''
})
#删除附件,其实是把status变为0
def delserAtta(request):
ser_atta_id=request.GET.get('ser_atta_id')
ser_atta = Attachments.objects.get(id=ser_atta_id)
context={'ser_atta':ser_atta}
is_status = 0
ser_atta.is_status = is_status
ser_atta.save()
return render(request, 'server/server_atta.html', context=context)
#编辑附件:接收数据
def editserAtta(request):
ser_atta_id = request.GET.get('ser_atta_id',None)
ser_atta = Attachments.objects.get(id=ser_atta_id)
context = {
'ser_atta':ser_atta,
}
return render(request,'server/edit_ser_atta.html',context=context)
#编辑附件:上传数据
def updateserAtta(request):
# 接收数据
data = request.POST.get('data', None)
ser_attaid = request.POST.get("ser_attaid",None)
data = json.loads(data)
ser_atta = Attachments.objects.get(id=ser_attaid)
if data['atta_name'] != '':
ser_atta.atta_name = data['atta_name']
if data['filename'] != '':
ser_atta.filename = data['filename']
ser_atta.save()
return JsonResponse({
"status": "success",
"message": "更改成功",
"info": ""
})
#模拟题表展示
def serverAques(request):
aques =Aquestions .objects.filter(is_status=1)
context ={'aques':aques}
return render(request,'server/server_aques.html',context=context)
#查看模拟题附件详情
def detailserAqu(request):
ser_aqu_id = request.POST.get('ser_aqu_id',None)
ser_aqu = Aquestions.objects.get(id=ser_aqu_id)
return render(request,'server/detail_ser_aqu.html',context={'ser_aqu':ser_aqu})
#添加新模拟题:获取新页面,五项都不允许为空
def addserAqu(request):
que_school = Schools.objects.filter(is_status=1)
context = {
'ques_school': que_school,
}
return render(request, 'server/add_ser_aqu.html',context=context)
#获取院校id,反向获取到专业信息表的所有院校对应专业id(无重复名称的)
def get_maj(request):
schid = request.POST.get('schid',None)
info = Majors.objects.filter(school_id=schid,is_status=1)
# print(info)
# list = []
# for item in info.all():
# list.append([item.id,item.major_name])
lis = []
lis2=[]
for ite in info.all():
if ite.major_name not in lis:
lis.append(ite.major_name)
print(lis)
for i in lis:
info1 = Majors.objects.filter(major_name=i).first()
# print(info1.id)
lis2.append(info1.id)
print(lis2)
#接下来就是把列表合并为一个,就可
listss =[]
lists = list(zip(lis2, lis))
for item in lists:
listss.append([item[0], item[1]])
return JsonResponse({'data':listss})
#通过专业列表的id,得到专业名,再得到专业名和院校id匹配到的所有层次
def get_lev(request):
majid = request.POST.get('majid',None)
school =request.POST.get('school',None)
# print(school)
info = Majors.objects.get(id=majid)
info2 = Majors.objects.filter(major_name=info.major_name,school_id=school,is_status=1)
# print(info2)
list = []
for item in info2.all():
level = ''
if item.level == 0:
level = '高起专'
elif item.level == 1:
level = '高起本'
elif item.level == 2:
level = '专升本'
list.append([item.level, level])
return JsonResponse({'data': list})
#添加模拟题:上传数据,都不能为空
def addssserAqu(request):
ques_name = request.POST.get('ques_name')
ques_filename=request.POST.get('ques_filename')
ques_school = request.POST.get('ques_school')
ques_major = request.POST.get('ques_major')
ques_level = request.POST.get('ques_level') #实际取到的值是0/1/2,并不能传入
#判断是否存在
info = Aquestions.objects.filter(ques_name=ques_name,ques_filename=ques_filename,is_status=1).exists()
# print(info)
if info or ques_school==' ' or ques_major==' ' or ques_level==' ':
return JsonResponse({
'status': 'fail',
'message': '模拟题已存在或必填项未填写!',#在html写了框,没有用到这个
'info': ''
})
# 专业id获取名称,通过名称和输入框传来的层次值(精确些还需要筛选order_school这个条件),查找到专业的id及其层次id
mjid = Majors.objects.get(id=ques_major)
qu_level=Majors.objects.get(major_name=mjid.major_name,level=ques_level,school_id=ques_school)
# print(qu_level.id,qu_level.major_name)
Aquestions.objects.create(
ques_name=ques_name,
ques_filename=ques_filename,
ques_school_id=ques_school,
ques_major_id=ques_major,
ques_level_id=qu_level.id
)
return JsonResponse({
'status': 'success',
'message': '创建成功',
'info': ''
})
#删除模拟题,其实是把status变为0
def delserAqu(request):
ser_aqu_id=request.GET.get('ser_aqu_id')
ser_aqu = Aquestions.objects.get(id=ser_aqu_id)
context={'ser_aqu':ser_aqu}
is_status = 0
ser_aqu.is_status = is_status
ser_aqu.save()
return render(request, 'server/server_aques.html', context=context)
#编辑模拟题附件:接收数据
#相同学校,相同专业名,不同层次,使之只显示一次专业名
def editserAqu(request):
ser_aqu_id = request.GET.get('ser_aqu_id',None)
ser_aqu = Aquestions.objects.get(id=ser_aqu_id)
#下拉出的学校和专业列表,不包含is_status=0的,和当前这条数据的学校名和专业名
ques_school = Schools.objects.filter(is_status=1).exclude(name=ser_aqu.ques_school.name)
ques_major = Majors.objects.filter(is_status=1).exclude(major_name =ser_aqu.ques_major.major_name)
majors = Majors.objects.filter(major_name=ser_aqu.ques_major.major_name,school_id=ser_aqu.ques_school.id,is_status=1).exclude(level=ser_aqu.ques_level.level)
q_mj = Majors.objects.values('major_name').distinct().filter(is_status=1).exclude(major_name =ser_aqu.ques_major.major_name)
q_mj_lis = []
for i in q_mj:
# print(i['major_name']) #再通过名字和school_id取到符合条件的第一个
q_mj2 = Majors.objects.filter(major_name=i['major_name'],school_id=ser_aqu.ques_school.id).first()
if q_mj2 !=None:
q_mj_lis.append(q_mj2)
# print(q_mj_lis)
context = {
'ser_aqu':ser_aqu,
'ques_school':ques_school,
'ques_major':ques_major,
'q_mj_lis':q_mj_lis,
'majors':majors,
}
return render(request,'server/edit_ser_aqu.html',context=context)
#编辑模拟题附件:上传数据
def updateserAqu(request):
# 获取数据
data = request.POST.get('data', None)
ser_aquid = request.POST.get('ser_aquid', None)
data = json.loads(data)
aqu = Aquestions.objects.get(id=ser_aquid)
if data['ques_school'] != '':
aqu.ques_school = Schools.objects.get(id=data['ques_school'])
if data['ques_major'] != '':
aqu.ques_major = Majors.objects.get(id=data['ques_major'])
if data['ques_level'] !='':
#data['ques_level']得到的值其实是0/1/2
# print(mj.major_name)
# print(data['ques_level'])
mj =Majors.objects.get(id=data['ques_major'])
aqu.ques_level = Majors.objects.get(major_name=mj.major_name,level=data['ques_level'],school_id=data['ques_school'])
# print(aqu.ques_level)
if data['ques_name'] != '':
aqu.ques_name = data['ques_name']
if data['ques_filename'] != '':
aqu.ques_filename = data['ques_filename']
aqu.save()
return JsonResponse({
"status": "success",
"message": "信息更改成功",
"info": ""
})
#已删除服务中心文章列表展示
def deldserPos(request):
server_post =ServerPosts .objects.filter(is_status=0)
context ={'server_post':server_post}
return render(request,'server/deld_ser_pos.html',context=context)
#已删除服务中心分类展示
def deldserCates(request):
server_cates =ServerCategorys .objects.filter(is_status=0)
context ={'server_cates':server_cates}
return render(request,'server/deld_ser_cates.html',context=context)
#已删除教学中心列表展示
def deldserCenter(request):
center =Centers .objects.filter(is_status=0)
context ={'center':center}
return render(request,'server/deld_ser_center.html',context=context)
#已删除附件表展示
def deldserAtta(request):
atta =Attachments .objects.filter(is_status=0)
context ={'atta':atta}
return render(request,'server/deld_ser_atta.html',context=context)
#已删除模拟题表展示
def deldserAqu(request):
aques =Aquestions .objects.filter(is_status=0)
context ={'aques':aques}
return render(request,'server/deld_ser_aqu.html',context=context)
#还原服务文章,其实是把status变为1
def posserPos(request):
ser_pos_id=request.GET.get('ser_pos_id')
ser_posts = ServerPosts.objects.get(id=ser_pos_id)
context={'ser_posts':ser_posts}
is_status = 1
ser_posts.is_status = is_status
ser_posts.save()
return render(request, 'server/deld_ser_pos.html', context=context)
#还原服务分类,其实是把status变为1
def catesserCates(request):
ser_cates_id=request.GET.get('ser_cates_id')
ser_cates = ServerCategorys.objects.get(id=ser_cates_id)
context={'ser_cates':ser_cates}
is_status = 1
ser_cates.is_status = is_status
ser_cates.save()
return render(request, 'server/deld_ser_cates.html', context=context)
#还原教学中心,其实是把status变为1
def centerserCenter(request):
ser_center_id=request.GET.get('ser_center_id')
ser_center = Centers.objects.get(id=ser_center_id)
context={'ser_center':ser_center}
is_status = 1
ser_center.is_status = is_status
ser_center.save()
return render(request, 'server/deld_ser_center.html', context=context)
#还原附件,其实是把status变为1
def attaserAtta(request):
ser_atta_id=request.GET.get('ser_atta_id')
ser_atta = Attachments.objects.get(id=ser_atta_id)
context={'ser_atta':ser_atta}
is_status = 1
ser_atta.is_status = is_status
ser_atta.save()
return render(request, 'server/deld_ser_atta.html', context=context)
#还原模拟题,其实是把status变为1
def aquserAqu(request):
ser_aqu_id=request.GET.get('ser_aqu_id')
ser_aqu = Aquestions.objects.get(id=ser_aqu_id)
context={'ser_aqu':ser_aqu}
is_status = 1
ser_aqu.is_status = is_status
ser_aqu.save()
return render(request, 'server/deld_ser_aqu.html', context=context)
#永久删除服务文章,删除存在数据库的信息
def delesserPos(request):
#获取id
ser_pos_id = request.GET.get('ser_pos_id')
ser_posts = ServerPosts.objects.get(id=ser_pos_id)
context = {'ser_posts': ser_posts}
ServerPosts.objects.get(id=ser_pos_id).delete()
return render(request,'server/deld_ser_pos.html',context=context)
#永久删除服务分类,删除存在数据库的信息
def delesserCates(request):
#获取id
ser_cates_id = request.GET.get('ser_cates_id')
ser_cates = ServerCategorys.objects.get(id=ser_cates_id)
context = {'ser_cates': ser_cates}
ServerCategorys.objects.get(id=ser_cates_id).delete()
return render(request,'server/deld_ser_cates.html',context=context)
#永久删除教学中心,删除存在数据库的信息
def delesserCenter(request):
#获取id
ser_center_id = request.GET.get('ser_center_id')
ser_center = Centers.objects.get(id=ser_center_id)
context = {'ser_center': ser_center}
Centers.objects.get(id=ser_center_id).delete()
return render(request,'server/deld_ser_center.html',context=context)
#永久删除附件,删除存在数据库的信息
def delesserAtta(request):
#获取id
ser_atta_id = request.GET.get('ser_atta_id')
ser_atta = Attachments.objects.get(id=ser_atta_id)
context = {'ser_atta': ser_atta}
Attachments.objects.get(id=ser_atta_id).delete()
return render(request,'server/deld_ser_atta.html',context=context)
#永久删除模拟题附件,删除存在数据库的信息
def delesserAqu(request):
#获取id
ser_aqu_id = request.GET.get('ser_aqu_id')
ser_aqu = Aquestions.objects.get(id=ser_aqu_id)
context = {'ser_aqu': ser_aqu}
Aquestions.objects.get(id=ser_aqu_id).delete()
return render(request,'server/deld_ser_aqu.html',context=context)
| zhouf1234/django_obj | server/views.py | views.py | py | 22,643 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "models.ServerPosts.objects.filter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.ServerPosts.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.ServerPosts",
"line_number": 12,
"usage_type": "name"
},
{
... |
35656410164 | # Import Package
import matplotlib.pyplot as plt
import time
# membuat fungsi integral
def funcSingle(x):
return (5*x**7) - (9*x**4) + (4*x) - 2
def funcDouble(x, y):
return (5*x**7) - (9*y**4) + (4*x) - 2
def funcTriple(x, y, z):
return (5*x**7) - (9*y**4) + (4*z) - 2
# Nilai Exact
lipat1 = 234880.40
lipat2 = 328064.80
lipat3 = 656401.60
# mendeklarasikan rumus perhitungan
def trapezoidSingle(a, b, n):
h = float(b-a)/n
hA = h
result = 0.5*funcSingle(a) + 0.5*funcSingle(b)
for i in range(n):
result += funcSingle(a + i*h)
result *= hA
return result
def trapezoidDouble(a, b, c, d, n):
dx = (b - a) / n
dy = (d - c) / n
hA = dx * dy
I = funcDouble(a, c) + funcDouble(b, d)
for i in range(n):
for j in range(n):
xx = a + i * dx
yy = c + j * dy
I += funcDouble(xx, yy)
I *= hA
return I
def trapezoidTriple(a, b, c, d, g, h, n):
dx = (b - a) / n
dy = (d - c) / n
dz = (h - g) / n
hA = dx * dy * dz
I = funcTriple(a, c, g) + funcTriple(b, d, h)
for i in range(n):
for j in range(n):
for k in range(n):
xx = a + i * dx
yy = c + j * dy
zz = g + k * dz
I += funcTriple(xx, yy, zz)
I *= hA
return I
# Memanggil fungsi
mulaiSingleTrapezoid = time.time()
numericalSingleTrapezoid = trapezoidSingle(3, 5, 100)
stopSingleTrapezoid = time.time()
waktuSingleTrapezoid = stopSingleTrapezoid - mulaiSingleTrapezoid
mulaiDoubleTrapezoid = time.time()
numericalDoubleTrapezoid = trapezoidDouble(3, 5, 7, 9, 100)
stopDoubleTrapezoid = time.time()
waktuDoubleTrapezoid = stopDoubleTrapezoid - mulaiDoubleTrapezoid
mulaiTripleTrapezoid = time.time()
numericalTripleTrapezoid = trapezoidTriple(3, 5, 7, 9, 11, 13, 100)
stopTripleTrapezoid = time.time()
waktuTripleTrapezoid = stopTripleTrapezoid - mulaiTripleTrapezoid
print('\nSingle Trapezoid Integration :', numericalSingleTrapezoid)
print('Waktu Komputasi Single Trapezoid :', waktuSingleTrapezoid)
print('Error Single Trapezoid :', ((lipat1 - numericalSingleTrapezoid)/numericalSingleTrapezoid))
print('\nDouble Trapezoid Integration :', numericalDoubleTrapezoid)
print('Waktu Komputasi Double Trapezoid :', waktuDoubleTrapezoid)
print('Error Single Trapezoid :', ((lipat2 - numericalDoubleTrapezoid)/numericalDoubleTrapezoid))
print('\nDouble Trapezoid Integration :', numericalTripleTrapezoid)
print('Waktu Komputasi Triple Trapezoid :', waktuTripleTrapezoid)
print('Error Single Trapezoid :', ((lipat3 - numericalTripleTrapezoid)/numericalTripleTrapezoid))
# Plotting grafik komparasi waktu komputasi dengan jumlah N
data = {'Lipat-1':waktuSingleTrapezoid, 'Lipat-2':waktuDoubleTrapezoid, 'Lipat-3':waktuTripleTrapezoid}
parameter = list(data.keys())
nilai = list(data.values())
plt.bar(parameter, nilai, color='green', width=0.3)
plt.title('Grafik Waktu Komputasi Integrasi Metode Trapezoid')
plt.xlabel('Integal Lipat')
plt.ylabel('Waktu Komputasi')
plt.show()
| ilhamaziz45/Integration | Trapezoid.py | Trapezoid.py | py | 3,050 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 67,
... |
43694490704 | from itertools import product
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
class Dataset:
def __init__(self, protein, fitxer, model_target='tetramers', randomize_fce=False,
chosen_features=('full_fce', 'avg'), score='Median_intensity',
selected_tetramers=(0, 1, 2, 3, 4, 5, 6, 7, 8), weighted=False):
self.protein = protein
self.pbm35 = fitxer
if weighted:
self.pbm35 = self.pbm35[["ID_REF", "VALUE", 'WEIGHT']]
self.pbm35.columns = ["ID_REF", "VALUE", 'WEIGHT']
else:
self.pbm35 = self.pbm35[["ID_REF", "VALUE"]]
self.pbm35.columns = ["ID_REF", "VALUE"]
self.pbm35 = self.pbm35.dropna()
inv_seq_data = self.pbm35.copy()
inv_seq_data["ID_REF"] = inv_seq_data["ID_REF"].apply(self.inv_seq)
self.pbm35 = pd.concat([self.pbm35, inv_seq_data])
self.pbm35.reset_index(inplace=True)
self.electrostatic, self.tetra_avg, self.tetra_fce, self.tetra_fce_reduced, self.onehot_1mer, self.integer, \
self.presence_tetra, self.mgw = 8 * [None]
self.feats = chosen_features
self.score = score
self.selected_tetramers = selected_tetramers
self.randomize = randomize_fce
self.model_target = model_target # TODO include inverse sequences as well
# Get list of all kmers for indices purposes
self.sequences = list(self.pbm35["ID_REF"])
self.p = len(self.sequences[0])
# Get list of all tetramers per each octamer in order to get features
self.featurize()
def __str__(self):
return f"{self.model_target}-based dataset for protein {self.protein} using features {self.feats}"
def featurize(self):
"""
Computing all features as specified in the constructor,
and setting them as the respective attributes
:return: None
"""
tetra_fce = {line.split()[0]: np.array([float(x) for x in line.split()[1:]])
for line in open('fce_tetramer.dat')
if 'SHIFT' not in line}
if 'avg' in self.feats:
tetra_avg = {line.split()[0]: np.array([float(x) for x in line.split()[1:]])
for line in open('avg_tetramer.dat')
if 'SHIFT' not in line}
self.tetra_avg = np.array([np.concatenate([tetra_avg[otmer[i:i+4]] for i in self.selected_tetramers])
for otmer in self.sequences])
if 'full_fce' in self.feats or 'diagonal_fce' in self.feats:
if self.randomize:
keys = list(tetra_fce.keys())
permut_keys = np.random.permutation(keys)
tetra_fce = {key: tetra_fce[val] for key, val in zip(keys, permut_keys)}
self.tetra_fce = np.array([np.concatenate([tetra_fce[otmer[i:i+4]] for i in self.selected_tetramers])
for otmer in self.sequences])
# We might want to scramble the matchings to do 'negative control',
# i.e., remove all physical information from the dataset
# Let's also keep track of the reduced matrix
if 'diagonal_fce' in self.feats:
tetra_fce_reduced = {tt: tetra_fce[tt][list(range(0, 36, 7))] for tt in tetra_fce.keys()}
self.tetra_fce_reduced = np.array([np.concatenate([tetra_fce_reduced[otmer[i:i+4]]
for i in self.selected_tetramers])
for otmer in self.sequences])
if 'onehot_1mer' in self.feats:
# self.onehot_1mer = np.array([self.onehot_encoding(otmer) for otmer in self.sequences]).astype(np.int8)
self.onehot_1mer = np.array([self.onehot_encoding(otmer) for otmer in self.sequences])
if 'integer' in self.feats:
# define encoding input values
nucleotides = product('ACGT', repeat=4)
char_to_int = dict((''.join(c), i) for i, c in enumerate(nucleotides))
self.integer = np.array([[char_to_int[otmer[i:i+4]] for i in self.selected_tetramers]
for otmer in self.sequences]).astype(int)
if 'presence_tetramer' in self.feats:
self.presence_tetra = np.array([self.presence(otmer, 4) for otmer in self.sequences]).astype(np.int8)
#####
if 'mgw' in self.feats:
self.mgw = {line.split()[0]: [float(x) for x in line.split()[1:]]
for line in open('mgw_rohs.txt')
if 'SHIFT' not in line}
self.mgw = np.array([np.concatenate([self.mgw[otmer[i:i+4]] for i in self.selected_tetramers])
for otmer in self.sequences])
if 'electrostatic' in self.feats:
self.electrostatic = {line.split()[0]: [x for x in line.split()[1:]]
for line in open('electrostatic.txt')
if 'SHIFT' not in line}
self.electrostatic = np.array([np.concatenate([self.electrostatic[otmer[i:i+4]]
for i in self.selected_tetramers])
for otmer in self.sequences])
@staticmethod
def onehot_encoding(sequence):
"""
Converts a sequence to a one-hot-encoded binary vector
:param sequence: str, the DNA sequence to encode
:return: np.array
"""
# define encoding input values
nucleotides = 'ACGT'
# define mapping of chars to integers and viceversa
char_to_int = dict((c, i) for i, c in enumerate(nucleotides))
# integer encode input data
integer_encoded = [char_to_int[char] for char in sequence]
# one hot encode
onehot_encoded = list()
for value in integer_encoded:
letter = [0 for _ in range(len(nucleotides))]
letter[value] = 1
onehot_encoded.extend(letter)
return np.array(onehot_encoded)
@staticmethod
def presence(sequence, k):
"""
Converts a sequence to a vector of "presence" features that specifies the count of
a given k-mer occurrences in the sequence; the output vector has length 4**k
:param sequence: str, the DNA sequence to encode
:param k: int, length of the k-mers that will be counted
:return: np.array
"""
kmers = np.zeros(4**k)
positions = {''.join(x): n for n, x in enumerate(list(product('ACTG', repeat=k)))}
for i in range(len(sequence)-k+1):
kmers[positions[sequence[i:i+k]]] += 1
return kmers
@property
def scores(self):
"""
Yields the selected target values/affinity scores, normalized to numbers between 0 and 1
:return: np.array
"""
keyw = "VALUE" if self.score == 'Median_intensity' else "Z-score"
if self.model_target == "octamers":
vals = self.pbm35[keyw].values
return MinMaxScaler().fit_transform(vals.reshape(-1, 1))
elif self.model_target == "tetramers":
mean_scores = self.mean_score()
vals = np.array([[mean_scores[i][otmer[i:i+4]] for i in self.selected_tetramers]
for otmer in self.sequences])
return MinMaxScaler().fit_transform(vals)
@staticmethod
def inv_seq(seq):
"""
Yields the complementary DNA sequence in the same 5'->3' direction
:param seq: str, the DNA sequence
:return: str
"""
complementary = {"A": "T", "T": "A", "C": "G", "G": "C"}
return ''.join([complementary[x] for x in seq[::-1]])
@property
def features(self):
"""
Returns the combined matrix of features, pasted from individual pre-calculated attributes
:return: np.array
"""
avail = {'avg': 'tetra_avg', 'full_fce': 'tetra_fce', 'diagonal_fce': 'tetra_fce_reduced',
'onehot_1mer': 'onehot_1mer', 'integer': 'integer', 'presence_tetramer': 'presence_tetra',
'mgw': 'mgw', 'electrostatic': 'electrostatic'}
return np.hstack([self.__getattribute__(avail[feat]) for feat in self.feats])
def mean_score(self):
"""
Generates a list of dictionaries, each containing the position-wise
score per tetramer
:return: list of dicts with {str: float} mappings
"""
keyw = "VALUE" if self.score == 'Median_intensity' else "Z-score"
if self.model_target == "tetramers":
expt_scores = self.pbm35[keyw].values
from collections import defaultdict
position_scores = [defaultdict(lambda: 0) for _ in range(self.p - 3)]
position_counts = [defaultdict(lambda: 0) for _ in range(self.p - 3)]
for seq, score in zip(self.sequences, expt_scores):
for i in range(self.p - 3):
position_scores[i][seq[i:i+4]] += score
position_counts[i][seq[i:i+4]] += 1
else:
raise NotImplementedError
return [{ttmer: position_scores[i][ttmer]/position_counts[i][ttmer]
for ttmer in position_scores[i].keys()} for i in range(self.p - 3)]
| Jalbiti/DNAffinity | dataset.py | dataset.py | py | 9,421 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.concat",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number"... |
29228421218 | import sys
from typing import Any
# I cant help but wonder how much overhead is added by including QObject, pyqtSignal, and QWidget solely for type hinting
from PyQt5.QtCore import QObject, Qt, pyqtSignal
from PyQt5.QtWidgets import QAction, QLayout, QStyleFactory, QWidget, QApplication, QMainWindow
from utilities.Common import wrapper
def addAllActionsToObject(parent, obj):
""" Find all QActions whose parent is ``self`` and call ``obj.addAction()`` for each of them. """
for member in vars(parent).values(): obj.addAction(member) if type(member) == QAction else None
# TODO: stop using such a shoddy method of doing this. Hopefully much of this can be replaced with lambdas
def funcLink(func: Any):
if isinstance(func, wrapper):
def link(): func.call()
return link
elif callable(func):
# print(func.__name__ + ' is not a wrapper. It is a ' + type(func).__name__)
return func
else:
return None
def setTriggerResponse(obj: QObject, func: Any, errMsgHead: str = None):
""" Set a ``QObject``'s response to being triggered.\\\n
:param obj: The ``QObject`` for which to set the trigger response
:param func: The function to call when the ``QObject`` is triggered
:param errMsgHead: If ``func`` is None, this string will be used for an error message that will print to the console.
"""
_func = funcLink(func)
if _func is not None:
# obj.triggered.connect(_func, Qt.AutoConnection) #Qt.AutoConnection is the default anyways
obj.triggered.connect(_func)
else:
if errMsgHead is None:
print('A functionless object has been triggered.')
else:
print(errMsgHead + ' has no function')
def setClickedResponse(obj: QObject, func: Any, errMsgHead: str = None):
""" Set a ``QObject``'s response to being clicked.\\\n
:param obj: The ``QObject`` for which to set the click response
:param func: The function to call when the ``QObject`` is clicked
:param errMsgHead: If ``func`` is None, this string will be used for an error message that will print to the console.
"""
_func = funcLink(func)
if _func is not None:
obj.clicked.connect(_func)
else:
if errMsgHead is None:
print('A functionless object has been clicked.')
else:
print(errMsgHead + ' has no function')
def setStateChangedResponse(obj: QObject, func: Any, errMsgHead: str = None):
""" Set a ``QObject``'s response to a change in its state.\\\n
:param obj: The ``QObject`` for which to set the response
:param func: The function to call when the ``QObject`` experiences a state change
:param errMsgHead: If ``func`` is None, this string will be used for an error message that will print to the console.
"""
_func = funcLink(func)
if _func is not None:
obj.stateChanged.connect(_func)
else:
if errMsgHead is None:
print('A functionless object has changed states.')
else:
print(errMsgHead + ' has no function')
# noinspection PyUnusedLocal
def setSignalResponse(obj: QObject, signal: pyqtSignal, func: Any, errMsgHead: str = None):
""" Set a ``QObject``'s response to some signal.\\\n
:param obj: The ``QObject`` for which to set the signal response
:param signal: The signal to which the ``QObject`` is responding
:param func: The function to call when the ``QObject`` is signaled
:param errMsgHead: If ``func`` is None, this string will be used for an error message that will print to the console.
"""
# obj.signal = signal
_func = funcLink(func)
if _func is not None:
obj.signal.connect(_func)
else:
if errMsgHead is None:
print('A functionless object has been signaled.')
else:
print(errMsgHead + ' has no function')
def layoutContents(layout: QLayout): return (layout.itemAt(i) for i in range(layout.count()))
def setStyleFromString(widget: QWidget, styleName: str):
widget.setStyle(QStyleFactory.create(styleName))
def setAppStyleFromString(styleName: str):
print('styleName: ', styleName)
try:
QApplication.instance().setStyle(QStyleFactory.create(styleName))
except AttributeError:
print('No QApplication object exists.', file = sys.stderr)
# def stdMainSetup(appName: str, widgetType: QWidget or None, layout: QLayout) -> (QApplication, QWidget):
# import sys
# app = QApplication(sys.argv)
# app.setApplicationName(appName)
#
# display = QWidget() if widgetType is None else widgetType # .__call__()
#
# actQuit = QAction('&Quit', display)
# actQuit.setShortcut('Ctrl+q')
# actQuit.triggered.connect(sys.exit)
# display.addAction(actQuit)
#
# display.setLayout(layout)
# display.show()
# return (app, display)
def stdMainSetup(appName: str, widgetType: QWidget or None) -> QMainWindow or QWidget: # (QApplication, QWidget):
QApplication.instance().setApplicationName(appName)
display = QMainWindow() if widgetType is None else widgetType # .__call__()
actQuit = QAction('&Quit', display)
actQuit.setShortcut('Ctrl+q')
actQuit.triggered.connect(sys.exit)
display.addAction(actQuit)
# display.setWindowFlags(Qt.Window)
#It's unfortunate that none of these seem to account for the space taken up by the Windows taskbar when it's not set to hidden/autohide
# from PyQt5.QtGui import QGuiApplication
# print(QGuiApplication.primaryScreen().availableVirtualSize())
# print(QGuiApplication.primaryScreen().availableSize())
# print(QGuiApplication.primaryScreen().availableVirtualGeometry())
# print(QGuiApplication.primaryScreen().availableGeometry())
# display.setMaximumSize()
# display.setLayout(layout)
# display.show()
# return (_app, display)
return display | StaticPH/Split_Hub | utilities/QtHelpers.py | QtHelpers.py | py | 5,466 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QAction",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "utilities.Common.wrapper",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "PyQt5... |
264569740 | import os
import requests
SHEETY_API = "https://api.sheety.co/d2dd5f6e5713c07f78a8b0452cbb68a8/flightDeals/users/"
SHEETY_TOKEN = os.environ["SHEETY_PRICES_TOKEN"]
class User:
def add_user(self) -> None:
bearer_headers = {
"Authorization": f'Bearer {SHEETY_TOKEN}'
}
params = {
"user": {
"firstName" : str(input("Welcome to Flight Club. \nWe find the best flight deals and text you. \nWhat is your first name?")),
"lastName": str(input("What is your last name?")),
"phoneNumber": int(input("What is your phone number?"))
}
}
response = requests.post(f'{SHEETY_API}', headers=bearer_headers, json=params)
if response.status_code == 200:
print("You're in the club!")
else:
print(f'Failed {response.json()}')
def users_info(self) -> list:
bearer_headers = {
"Authorization": f'Bearer {SHEETY_TOKEN}'
}
response = requests.get(SHEETY_API, headers=bearer_headers)
if response.status_code == 200:
print("Successfully fetched the data")
sheety_users = response.json()
users_info = sheety_users["users"]
print(users_info)
return users_info
else:
print(f'Failed {response.status_code}')
return []
if __name__ == "__main__":
cos4 = User()
cos4.add_user()
cos4.users_info() | hollymartiniosos/100dayspython | 39. Flight deal finder/users.py | users.py | py | 1,534 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "requests.post",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 33,
"usage_type": "call"
}
] |
18779847895 | import numpy as np
from collections import namedtuple
Coordinate = namedtuple('Coordinate', ['latitude', 'longitude'])
def sin_d(angle):
return np.sin(np.deg2rad(angle))
def cos_d(angle):
return np.cos(np.deg2rad(angle))
def distance_in_km(coordinate_a, coordinate_b):
lat_sine = sin_d((coordinate_b.latitude - coordinate_a.latitude)/2) ** 2
lon_sine = sin_d((coordinate_b.longitude - coordinate_a.longitude)/2) ** 2
cosine_a = cos_d(coordinate_a.latitude)
cosine_b = cos_d(coordinate_b.latitude)
return 2 * 6372.8 * np.arcsin(np.sqrt(lat_sine + cosine_a * cosine_b * lon_sine))
def azimuth(coordinate_a, coordinate_b):
numerator_cosine = cos_d(coordinate_b.latitude)
numerator_sine = sin_d(coordinate_b.longitude - coordinate_a.longitude)
denominator_a = sin_d(coordinate_b.latitude) * cos_d(coordinate_a.latitude)
denominator_b = cos_d(coordinate_b.latitude) * sin_d(coordinate_a.latitude)
denominator_c = cos_d(coordinate_b.longitude - coordinate_a.longitude)
numerator = numerator_cosine * numerator_sine
denominator = denominator_a - denominator_b * denominator_c
argument = (np.arctan2(numerator, denominator) + 2 * np.pi) % (2 * np.pi)
return np.rad2deg(argument)
def bearing(coordinate_a, coordinate_b):
delta_lambda = coordinate_b.longitude - coordinate_a.longitude
start_latitude = coordinate_a.latitude
end_latitude = coordinate_b.latitude
start_longitude = coordinate_a.longitude
argument_num = sin_d(delta_lambda) * cos_d(end_latitude)
argument_denom_1 = cos_d(start_latitude) * sin_d(end_latitude)
argument_denom_2 = sin_d(start_latitude) * cos_d(end_latitude) * cos_d(delta_lambda)
result = np.arctan2(argument_num, argument_denom_1 - argument_denom_2)
return (np.rad2deg(result) + 360) % 360
def coordinate_at_distance(start, distance, bearing):
rad_distance = distance / 6372.8
arg_1 = sin_d(start.latitude) * np.cos(rad_distance)
arg_2 = cos_d(start.latitude) * np.sin(rad_distance) * cos_d(bearing)
target_latitude = np.arcsin(arg_1 + arg_2)
numer = sin_d(bearing) * np.sin(rad_distance) * cos_d(start.latitude)
denom = np.cos(rad_distance) - sin_d(start.latitude) * np.sin(target_latitude)
target_longitude = start.longitude + ((np.rad2deg(np.arctan2(numer, denom)) + 540) % 360 - 180)
return Coordinate(latitude = np.rad2deg(target_latitude), longitude = target_longitude)
| coproduto/mobile_location | mobile_localization/geo.py | geo.py | py | 2,369 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.deg2rad",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_num... |
13944543228 | from collections import deque
n, k= map(int, input().split())
L=deque()
for i in range(1,n+1):
L.append(i)
ans=[]
while L:
for _ in range(k-1):
q= L.popleft()
L.append(q)
q= L.popleft()
ans.append(str(q))
print('<',end='')
print(', '.join(ans),end='')
print('>',end='') | Taein2/PythonAlgorithmStudyWithBOJ | Minjae/2021-03-09/1159_Josephus.py | 1159_Josephus.py | py | 303 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 3,
"usage_type": "call"
}
] |
22347796562 | import requests
import unittest
token_ya = 'Место для вашего токена'
api_base_url = 'https://cloud-api.yandex.net/'
headers = {
'accept': 'application/json',
'authorization': f'OAuth {token_ya}'
}
class TestDocuments(unittest.TestCase):
def test_create_folder1(self):
response = requests.put(api_base_url + 'v1/disk/resources', params={'path': 'Test'}, headers=headers)
assert response.status_code == 201
def test_create_folder2(self):
response = requests.put(api_base_url + 'v1/disk/resources', params={'path': 'Test'}, headers=headers)
assert response.status_code == 409
def test_get_folder(self):
response = requests.get(api_base_url + 'v1/disk/resources', params={'path': 'Test'}, headers=headers)
assert response.status_code == 200
def test_get_folder2(self):
response = requests.get(api_base_url + 'v1/disk/resources', params={'path': 'Нет такой папки'}, headers=headers)
assert response.status_code == 404
if __name__ == '__main__':
unittest.main()
| ZlayaZayaZ/unittest | unit_tests2.py | unit_tests2.py | py | 1,116 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "requests.put",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.put",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.get",
"... |
34265319700 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file registers pre-defined datasets at hard-coded paths, and their metadata.
We hard-code metadata for common datasets. This will enable:
1. Consistency check when loading the datasets
2. Use models on these standard datasets directly and run demos,
without having to download the dataset annotations
We hard-code some paths to the dataset that's assumed to
exist in "./datasets/".
Users SHOULD NOT use this file to create new dataset / metadata for new dataset.
To add new dataset, refer to the tutorial "docs/DATASETS.md".
"""
import os
from detectron2.data import MetadataCatalog, DatasetCatalog
from .register_coco import register_coco_instances, register_coco_panoptic_separated, \
register_coco_amodal_cls_instances
from .register_cocoa import register_cocoa_instances
from .lvis import register_lvis_instances, get_lvis_instances_meta
from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic
from .pascal_voc import register_pascal_voc
from .register_d2sa import register_d2sa_instances
from .register_kins import register_kins_instances
from .register_sailvos import register_sailvos_instances
from .builtin_meta import _get_builtin_metadata
# ==== Predefined datasets and splits for COCO ==========
_PREDEFINED_SPLITS_COCO = {}
_PREDEFINED_SPLITS_COCO["coco"] = {
"coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"),
"coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"),
"coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"),
"coco_2014_minival_100": ("coco/val2014", "coco/annotations/instances_minival2014_100.json"),
"coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/instances_valminusminival2014.json",
),
"coco_2017_train": ("coco/train2017", "coco/annotations/instances_train2017.json"),
"coco_2017_val": ("coco/val2017", "coco/annotations/instances_val2017.json"),
"coco_2017_test": ("coco/test2017", "coco/annotations/image_info_test2017.json"),
"coco_2017_test-dev": ("coco/test2017", "coco/annotations/image_info_test-dev2017.json"),
"coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"),
}
_PREDEFINED_SPLITS_COCO["coco_person"] = {
"keypoints_coco_2014_train": (
"coco/train2014",
"coco/annotations/person_keypoints_train2014.json",
),
"keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"),
"keypoints_coco_2014_minival": (
"coco/val2014",
"coco/annotations/person_keypoints_minival2014.json",
),
"keypoints_coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/person_keypoints_valminusminival2014.json",
),
"keypoints_coco_2014_minival_100": (
"coco/val2014",
"coco/annotations/person_keypoints_minival2014_100.json",
),
"keypoints_coco_2017_train": (
"coco/train2017",
"coco/annotations/person_keypoints_train2017.json",
),
"keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"),
"keypoints_coco_2017_val_100": (
"coco/val2017",
"coco/annotations/person_keypoints_val2017_100.json",
),
}
_PREDEFINED_SPLITS_COCO_PANOPTIC = {
"coco_2017_train_panoptic": (
# This is the original panoptic annotation directory
"coco/panoptic_train2017",
"coco/annotations/panoptic_train2017.json",
# This directory contains semantic annotations that are
# converted from panoptic annotations.
# It is used by PanopticFPN.
# You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
# to create these directories.
"coco/panoptic_stuff_train2017",
),
"coco_2017_val_panoptic": (
"coco/panoptic_val2017",
"coco/annotations/panoptic_val2017.json",
"coco/panoptic_stuff_val2017",
),
"coco_2017_val_100_panoptic": (
"coco/panoptic_val2017_100",
"coco/annotations/panoptic_val2017_100.json",
"coco/panoptic_stuff_val2017_100",
),
}
_PREDEFINED_SPLITS_COCO_AMODAL = {
"coco_2014_amodal_train": ("coco/train2014", "coco/amodal_annotations/COCO_amodal_train2014.json"),
"coco_2014_amodal_val": ("coco/val2014", "coco/amodal_annotations/COCO_amodal_val2014.json"),
"coco_2014_amodal_test": ("coco/test2014", "coco/amodal_annotations/COCO_amodal_test2014.json"),
"cocoa_nostuff_train": ("coco/train2014", "coco/amodal_cls_annotations/COCO_amodal_train2014_detectron_no_stuff.json"),
"cocoa_nostuff_train_visible": ("coco/train2014", "coco/amodal_cls_annotations/COCO_amodal_train2014_detectron_no_stuff.json"),
"cocoa_nostuff_val": ("coco/val2014", "coco/amodal_cls_annotations/COCO_amodal_val2014_detectron_no_stuff.json"),
"cocoa_nostuff_val_visible": ("coco/val2014", "coco/amodal_cls_annotations/COCO_amodal_val2014_detectron_no_stuff.json"),
}
def register_all_coco(root="datasets"):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
for (
prefix,
(panoptic_root, panoptic_json, semantic_root),
) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
prefix_instances = prefix[: -len("_panoptic")]
instances_meta = MetadataCatalog.get(prefix_instances)
image_root, instances_json = instances_meta.image_root, instances_meta.json_file
register_coco_panoptic_separated(
prefix,
_get_builtin_metadata("coco_panoptic_separated"),
image_root,
os.path.join(root, panoptic_root),
os.path.join(root, panoptic_json),
os.path.join(root, semantic_root),
instances_json,
)
def register_all_cocoa(root="datasets"):
for key, (image_root, json_file) in _PREDEFINED_SPLITS_COCO_AMODAL.items():
# Assume pre-defined datasets live in `./datasets`.
register_cocoa_instances(
key,
_get_builtin_metadata("cocoa"),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
# ==== Predefined datasets and splits for COCOA cls ==========
_PREDEFINED_SPLITS_COCO_AMODAL_CLS = {
"cocoa_cls_train": ("coco/train2014", "coco/amodal_cls_annotations/COCO_amodal_train2014_with_classes.json"),
"cocoa_cls_train_visible": ("coco/train2014", "coco/amodal_cls_annotations/COCO_amodal_train2014_with_classes.json"),
"cocoa_cls_val": ("coco/val2014", "coco/amodal_cls_annotations/COCO_amodal_val2014_with_classes.json"),
"cocoa_cls_val_visible": ("coco/val2014", "coco/amodal_cls_annotations/COCO_amodal_val2014_with_classes.json"),
}
def register_all_coco_amodal_cls(root="datasets"):
for key, (image_root, json_file) in _PREDEFINED_SPLITS_COCO_AMODAL_CLS.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_amodal_cls_instances(
key,
_get_builtin_metadata("coco_amodal_cls"),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
# ==== Predefined datasets and splits for LVIS ==========
_PREDEFINED_SPLITS_LVIS = {
"lvis_v0.5": {
"lvis_v0.5_train": ("coco/train2017", "lvis/lvis_v0.5_train.json"),
"lvis_v0.5_val": ("coco/val2017", "lvis/lvis_v0.5_val.json"),
"lvis_v0.5_val_rand_100": ("coco/val2017", "lvis/lvis_v0.5_val_rand_100.json"),
"lvis_v0.5_test": ("coco/test2017", "lvis/lvis_v0.5_image_info_test.json"),
}
}
def register_all_lvis(root="datasets"):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_lvis_instances(
key,
get_lvis_instances_meta(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
# ==== Predefined splits for raw cityscapes images ===========
_RAW_CITYSCAPES_SPLITS = {
"cityscapes_fine_{task}_train": ("cityscapes/leftImg8bit/train", "cityscapes/gtFine/train"),
"cityscapes_fine_{task}_val": ("cityscapes/leftImg8bit/val", "cityscapes/gtFine/val"),
"cityscapes_fine_{task}_test": ("cityscapes/leftImg8bit/test", "cityscapes/gtFine/test"),
}
def register_all_cityscapes(root="datasets"):
for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items():
meta = _get_builtin_metadata("cityscapes")
image_dir = os.path.join(root, image_dir)
gt_dir = os.path.join(root, gt_dir)
inst_key = key.format(task="instance_seg")
DatasetCatalog.register(
inst_key,
lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
x, y, from_json=True, to_polygons=True
),
)
MetadataCatalog.get(inst_key).set(
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes", **meta
)
sem_key = key.format(task="sem_seg")
DatasetCatalog.register(
sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y)
)
MetadataCatalog.get(sem_key).set(
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="sem_seg", **meta
)
# ==== Predefined splits for PASCAL VOC ===========
def register_all_pascal_voc(root="datasets"):
SPLITS = [
("voc_2007_trainval", "VOC2007", "trainval"),
("voc_2007_train", "VOC2007", "train"),
("voc_2007_val", "VOC2007", "val"),
("voc_2007_test", "VOC2007", "test"),
("voc_2012_trainval", "VOC2012", "trainval"),
("voc_2012_train", "VOC2012", "train"),
("voc_2012_val", "VOC2012", "val"),
]
for name, dirname, split in SPLITS:
year = 2007 if "2007" in name else 2012
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = "pascal_voc"
_PREDEFINED_SPLITS_D2SA = {
"d2sa_train": ("D2SA/images", "D2SA/annotations/D2S_amodal_training_rot0.json"),
"d2sa_train_visible": ("D2SA/images", "D2SA/annotations/D2S_amodal_training_rot0.json"),
"d2sa_train_aug": ("D2SA/images", "D2SA/annotations/D2S_amodal_augmented.json"),
"d2sa_train_aug_visible": ("D2SA/images", "D2SA/annotations/D2S_amodal_augmented.json"),
"d2sa_val": ("D2SA/images", "D2SA/annotations/D2S_amodal_validation.json"),
"d2sa_val_visible": ("D2SA/images", "D2SA/annotations/D2S_amodal_validation.json"),
"d2sa_test": ("D2SA/images", "D2SA/annotations/D2S_amodal_test_info.json"),
}
def register_all_d2sa(root="/root/detectron2/datasets"):
for key, (image_root, json_file) in _PREDEFINED_SPLITS_D2SA.items():
# Assume pre-defined datasets live in `./datasets`.
register_d2sa_instances(
key,
_get_builtin_metadata("d2sa"),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
_PREDEFINED_SPLITS_KINS = {
"kins_train": ("KINS/training/image_2", "KINS/instances_train.json"),
"kins_train_visible": ("KINS/training/image_2", "KINS/instances_train.json"),
"kins_val": ("KINS/testing/image_2", "KINS/instances_val.json"),
"kins_val_visible": ("KINS/testing/image_2", "KINS/instances_val.json"),
}
def register_all_kins(root="datasets"):
for key, (image_root, json_file) in _PREDEFINED_SPLITS_KINS.items():
# Assume pre-defined datasets live in `./datasets`.
register_kins_instances(
key,
_get_builtin_metadata("kins"),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
# Register them all under "./datasets"
register_all_coco()
register_all_cocoa()
register_all_coco_amodal_cls()
register_all_kins()
register_all_lvis()
register_all_cityscapes()
register_all_pascal_voc()
register_all_d2sa()
| YutingXiao/Amodal-Segmentation-Based-on-Visible-Region-Segmentation-and-Shape-Prior | detectron2/data/datasets/builtin.py | builtin.py | py | 13,024 | python | en | code | 40 | github-code | 1 | [
{
"api_name": "register_coco.register_coco_instances",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "builtin_meta._get_builtin_metadata",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 127,
"usage_type": "call"
},
... |
25256431774 | """
DM decay spectra and associated functions.
Everything is provided and output in natural units!
"""
import os, sys
from scipy.special import erf
from scipy.interpolate import interp1d
import numpy as np
import pandas as pd
from units import *
from scipy import integrate
class Particle:
def __init__(self, channel='b', m_chi=100*GeV, data_dir = '/tigress/smsharma/Fermi-SmoothGalHalo/DMFiles/'):
""" Initialize the parameters to generate a sample of subhalos.
:param channel: Decay channel -- 'b', 'W' and the like
:param m_chi: Particle DM mass in natural units
:param data_dir: Where the annihilation spectra are stored
"""
self.data_dir = data_dir
self.m_chi = m_chi
self.channel = channel
self.dNdLogx_df=pd.read_csv(self.data_dir+'AtProduction_gammas.dat', delim_whitespace=True)
self.dNdE() # Interpolate spectra
def dNdE(self):
""" Make interpolated decay spectra for given mass and channel
"""
self.m_chi_interp = self.m_chi/2. # Note: PPPC4DMID gives 2->2 spectra; the correct 1->2 spectra for mchi correspond to 2->2 spectra for mchi/2
dNdLogx_ann_df = self.dNdLogx_df.query('mDM == ' + (str(np.int(float(self.m_chi_interp)/GeV))))[['Log[10,x]',self.channel]]
self.Egamma = np.array(self.m_chi_interp*(10**dNdLogx_ann_df['Log[10,x]']))
self.dNdEgamma = np.array(dNdLogx_ann_df[self.channel]/(self.Egamma*np.log(10)))
self.dNdE_interp = interp1d(self.Egamma, self.dNdEgamma)
def Phi_decay(self, tau, Emin, Emax):
""" Integrated flux for a given energy range from dNdE
Everything in natural units!
:param tau: Decay time constant
:param Emin: Energy to integrate from
:param Emax: Energy to integrate up to
"""
N = 0
if Emin < self.m_chi_interp:
if Emax < self.m_chi_interp:
N = integrate.quad(lambda x: self.dNdE_interp(x), Emin, Emax)[0]
else:
N = integrate.quad(lambda x: self.dNdE_interp(x), Emin, self.m_chi_interp)[0]
return 1/(4*np.pi*tau*self.m_chi)*np.array(N)#/(Emax-Emin)
| laurajchang/IG-NPTF | DMFiles/particle_decay.py | particle_decay.py | py | 2,236 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number"... |
21022710543 | import hashlib
import json
import requests
import time
import salt_gen
time_start = time.perf_counter()
print(time_start)
salt = salt_gen.generator() # 首次启动获取盐值
def w_rid(): # 每次请求生成w_rid参数
global time_start, salt
if (time.perf_counter() - time_start) > 24 * 60 * 60: # 一天更新一次salt
time_start = time.perf_counter()
salt = salt_gen.generator() # 尾部加盐,根据imgKey,subKey混淆得出
wts = str(int(time.time())) # 时间戳
b = "mid=" + uid + "&platform=web&token=&web_location=1550101"
a = b + "&wts=" + wts + salt # mid + platform + token + web_location + 时间戳wts + 一个固定值
return hashlib.md5(a.encode(encoding='utf-8')).hexdigest()
def get():
DEFAULT_HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
"Referer": "https://www.bilibili.com/" + uid,
"Content-Type": "application/json",
"Accept": "*/*",
"Origin": "https://space.bilibili.com",
}
API = {
"url": "https://api.bilibili.com/x/space/wbi/acc/info",
"params": {
"mid": uid,
"token": '',
"platform": "web",
"web_location": 1550101,
"w_rid": w_rid(),
"wts": str(int(time.time()))
}
}
t = time.localtime()
req = requests.request("GET", **API, headers=DEFAULT_HEADERS)
if req.ok:
con = req.json()
if con["code"] == 0:
title = con['data']['live_room']['title']
liveStatus = con['data']['live_room']['liveStatus']
name = con['data']['name']
if liveStatus == 1:
print(str(time.strftime("%Y-%m-%d %H:%M:%S", t)) + ' ' + name + ' 直播中, 标题:' + title)
else:
print(str(time.strftime("%Y-%m-%d %H:%M:%S", t)) + ' ' + name + ' 未开播')
with open('liveStatus.json', 'w') as f:
json.dump({
'title': title,
'liveStatus': liveStatus
}, f)
else:
print('B站接口返回了错误:')
print(con)
else:
print(f'网络错误, 错误码: {req.status_code}')
uid = input('请输入监控直播的主播UID:')
freq = int(input('请输入检测频率(秒每次,不建议小于60):'))
while freq:
get()
time.sleep(freq)
| velvetflame/liveStatusCheck | main.py | main.py | py | 2,496 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "time.perf_counter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "salt_gen.generator",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.perf_counte... |
21547836374 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import logging
from srm.file_operations import get_full_path
def setup_console_logger(str_format="%(levelname)s: %(message)s", level=logging.INFO):
"""
Setup console logger.
"""
root_logger = logging.getLogger()
formatter = logging.Formatter(str_format)
shandler = logging.StreamHandler()
shandler.setLevel(level)
shandler.setFormatter(formatter)
root_logger.addHandler(shandler)
root_logger.setLevel(level)
def setup_file_logger(filename="", str_format="%(levelname)s: %(message)s", level=logging.INFO):
"""Setup file logger"""
root_logger = logging.getLogger()
formatter = logging.Formatter(str_format)
fhandler = logging.FileHandler(get_full_path(filename))
fhandler.setLevel(level)
fhandler.setFormatter(formatter)
root_logger.addHandler(fhandler)
root_logger.setLevel(level)
| oderiver/project1 | srm/logger_tools.py | logger_tools.py | py | 912 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.StreamH... |
73062296675 | from app import app
from flask import Flask, jsonify, make_response, request
records = [
{
'id':1,
'title':u'aaa',
'descrption':u'bbb',
'done':False
},
{
'id':2,
'title':u'ccc',
'description':u'ddd',
'done':True
}
]
@app.route('/')
def index():
return "<h1>----hello flask!----</h1>"
@app.route('/records', methods=['GET'])
def getRec():
return jsonify({'records': records})
@app.route('/records', methods=['POST'])
def createRec():
if not request.json or not 'title' in request.json:
return make_response(jsonify({'-ERROR-': 'NEED TITLE'}), 400)
task = {
'id':records[-1]['id'] + 1,
'title':request.json['title'],
'description': request.json.get('description', ""),
'done':False
}
records.append(task)
return jsonify({'records': task}), 201
@app.route('/records/alltasks', methods=['GET'])
def getTtl():
response = [item['title'] for item in records]
return jsonify(response)
@app.route('/records/<ttlName>', methods=['DELETE'])
def deleteTask(ttlName):
matching = [item for item in records if item['title'] == ttlName]
if len(matching) == 0:
return make_response(jsonify({'-ERROR-': 'NOT FOUND'}), 404)
records.remove(matching[0])
return jsonify({'sucess', True})
@app.route('/records/<ttlName>', methods=['PUT'])
def updateTask(ttlName):
matching = [item for item in records if item['title'] == ttlName]
if len(matching) == 0:
return make_response(jsonify({'-ERROR-': 'NOT FOUND'}), 404)
if not request.json:
return make_response(jsonify({'-ERROR-': 'FORMAT ERR'}), 400)
matching[0]['title'] = request.json.get('title', matching[0]['title'])
matching[0]['description'] = request.json.get('description', matching[0]['description'])
matching[0]['done'] = request.json.get('done', matching[0]['done'])
return jsonify({'task':matching[0]})
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'-ERROR-': 'NOT FOUND'}), 404)
| ineqwij/CloudComputing | app/views.py | views.py | py | 2,081 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "app.app.route",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "app.app",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "app.app.route",
"line_number"... |
2715600905 | # start
# annotation
import os
import glob
import copy
from Bio import SeqIO
from Bio.Seq import Seq
import argparse
############################################ Arguments and declarations ##############################################
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument("-i",
help="a folder of all input fasta for annotation",
type=str, default='.',
metavar='input/')
optional.add_argument("-fa",
help="file extension of fasta to annotate",
type=str, default='fasta',
metavar='fasta')
optional.add_argument("-seq",
help="input seq format: aa or dna",
type=str, default='aa',choices=['aa','dna'],
metavar='aa or dna')
# optional output setup
optional.add_argument("-s",
help="a folder to store all scripts",
type=str, default='.',
metavar='scripts/')
optional.add_argument("-o",
help="a folder to store all output",
type=str, default='annotation/',
metavar='annotation/')
# optional search parameters
optional.add_argument('-t',
help="Optional: set the thread number assigned for running XXX (default 1)",
metavar="1 or more", action='store', default=40, type=int)
optional.add_argument('-job',
help="Optional: command to submit jobs",
metavar="nohup or customized",
action='store', default='jobmit', type=str)
# requirement for software calling
optional.add_argument('-bw', '--bowtie',
help="Optional: complete path to bowtie if not in PATH",
metavar="/usr/local/bin/bowtie",
action='store', default='bowtie', type=str)
optional.add_argument('-sp', '--spades',
help="Optional: complete path to spades if not in PATH",
metavar="/usr/local/bin/spades",
action='store', default='spades', type=str)
optional.add_argument('-pro', '--prodigal',
help="Optional: complete path to prodigal if not in PATH, None for no prodigal (default)",
metavar="/usr/local/bin/prodigal",
action='store', default='None', type=str)
optional.add_argument('-bcf', '--bcftools',
help="Optional: complete path to bcftools if not in PATH",
metavar="/usr/local/bin/bcftools",
action='store', default='bcftools', type=str)
optional.add_argument('-sam', '--samtools',
help="Optional: complete path to bwa if not in PATH",
metavar="/usr/local/bin/samtools",
action='store', default='samtools', type=str)
optional.add_argument('-mini', '--minimap2',
help="Optional: complete path to minimap2 if not in PATH",
metavar="/usr/local/bin/minimap2",
action='store', default='minimap2', type=str)
optional.add_argument('--u','--usearch',
help="Optional: cluster genes with SNPs",
metavar="usearch",
action='store', default='usearch', type=str)
################################################## Definition ########################################################
args = parser.parse_args()
input_script_sub = args.s +'/annotate'
input_script = args.s
output_dir = args.o
try:
os.mkdir(input_script_sub)
except IOError:
pass
try:
os.mkdir(output_dir)
except IOError:
pass
all_fasta = glob.glob(os.path.join(args.i, '*' + args.fa))
# functions
def annotation(all_filter_gene_fasta_file):
tag = os.path.split(all_filter_gene_fasta_file)[-1]
# run prodigal
if args.seq != 'aa':
AAfile = all_filter_gene_fasta_file.replace('.'+args.fa.replace('.',''),'.aa')
try:
f1 = open(AAfile, 'r')
except FileNotFoundError:
os.system('prodigal -q -i %s -a %s' % (all_filter_gene_fasta_file, AAfile))
all_filter_gene_fasta_file = AAfile
print(AAfile)
# run cluster
cutoff = 0.7
cmd_cluster = ('%s -sort length -cluster_fast %s -id %s -centroids %s.cluster.aa -uc %s.uc -threads %s\n'
% (args.u, all_filter_gene_fasta_file, cutoff, all_filter_gene_fasta_file,
all_filter_gene_fasta_file, 40))
os.system(cmd_cluster)
all_filter_gene_fasta_file = all_filter_gene_fasta_file + '.cluster.aa'
# run metacyc
cutoff = 50
cutoff2 = 80
database = '/scratch/users/mit_alm/database/metacyc/protseq.fsa'
cmds = ("diamond blastp --query %s --db %s.dmnd --out %s.metacyc.txt --id %s --query-cover %s --outfmt 6 --max-target-seqs 2 --evalue 1e-1 --threads 40\n"
%(all_filter_gene_fasta_file,database,all_filter_gene_fasta_file,cutoff,cutoff2))
f1 = open(os.path.join(input_script_sub, tag + '.metacyc.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n%s'%(cmds))
f1.close()
# run eggnog
cutoff = 0.01
database = '/scratch/users/mit_alm/database/eggnog/xaa.hmm'
cmds = ('hmmsearch --tblout %s.eggnog.1.txt --cpu 40 -E %s %s %s\n') %(all_filter_gene_fasta_file,cutoff,database,all_filter_gene_fasta_file)
f1 = open(os.path.join(input_script_sub, tag + '.eggnog.1.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n%s'%(cmds))
f1.close()
database = '/scratch/users/mit_alm/database/eggnog/xab.hmm'
cmds = ('hmmsearch --tblout %s.eggnog.2.txt --cpu 40 -E %s %s %s\n') % (
all_filter_gene_fasta_file, cutoff, database, all_filter_gene_fasta_file)
f1 = open(os.path.join(input_script_sub, tag + '.eggnog.2.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n%s' % (cmds))
f1.close()
database = '/scratch/users/mit_alm/database/eggnog/xac.hmm'
cmds = ('hmmsearch --tblout %s.eggnog.3.txt --cpu 40 -E %s %s %s\n') % (
all_filter_gene_fasta_file, cutoff, database, all_filter_gene_fasta_file)
f1 = open(os.path.join(input_script_sub, tag + '.eggnog.3.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n%s' % (cmds))
f1.close()
# run kegg
cutoff = 0.01
database = '/scratch/users/mit_alm/database/kegg/kofam/profiles/prokaryote/prokaryote.hmm'
cmds = ('hmmsearch --tblout %s.kegg.txt --cpu 40 -E %s %s %s\n') %(all_filter_gene_fasta_file,cutoff,database,all_filter_gene_fasta_file)
f1 = open(os.path.join(input_script_sub, tag + '.kegg.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n%s'%(cmds))
f1.close()
# run customed database
cutoff = 80
cutoff2 = 80
cmds = ''
database = '/scratch/users/anniz44/scripts/database/SARG.db.fasta'
cmds += ("diamond blastp --query %s --db %s.dmnd --out %s.SARG.txt --id %s --query-cover %s --outfmt 6 --max-target-seqs 2 --evalue 1e-1 --threads 40\n"
%(all_filter_gene_fasta_file,database,all_filter_gene_fasta_file,cutoff,cutoff2))
cutoff = 50
cutoff2 = 50
database = '/scratch/users/anniz44/scripts/database/AHR.aa.db'
cmds += ("diamond blastp --query %s --db %s.dmnd --out %s.AHR.txt --id %s --query-cover %s --outfmt 6 --max-target-seqs 2 --evalue 1e-1 --threads 40\n"
%(all_filter_gene_fasta_file,database,all_filter_gene_fasta_file,cutoff,cutoff2))
cutoff = 60
cutoff2 = 80
database = '/scratch/users/anniz44/scripts/database/Butyrate.pro.aa'
cmds += ("diamond blastp --query %s --db %s.dmnd --out %s.buty.txt --id %s --query-cover %s --outfmt 6 --max-target-seqs 2 --evalue 1e-1 --threads 40\n"
%(all_filter_gene_fasta_file,database,all_filter_gene_fasta_file,cutoff,cutoff2))
cutoff = 50
cutoff2 = 80
database = '/scratch/users/anniz44/scripts/database/IntI1_database.fasta'
cmds += ("diamond blastp --query %s --db %s.dmnd --out %s.int.txt --id %s --query-cover %s --outfmt 6 --max-target-seqs 2 --evalue 1e-1 --threads 40\n"
%(all_filter_gene_fasta_file,database,all_filter_gene_fasta_file,cutoff,cutoff2))
cutoff = 50
cutoff2 = 80
database = '/scratch/users/anniz44/scripts/database/SRB.AA'
cmds += ("diamond blastp --query %s --db %s.dmnd --out %s.SRB.txt --id %s --query-cover %s --outfmt 6 --max-target-seqs 2 --evalue 1e-1 --threads 40\n"
%(all_filter_gene_fasta_file,database,all_filter_gene_fasta_file,cutoff,cutoff2))
cutoff = 0.01
database = '/scratch/users/anniz44/scripts/database/NR.hmm'
cmds += ('hmmsearch --tblout %s.NR.txt --cpu 40 -E %s %s %s\n') %(all_filter_gene_fasta_file,cutoff,database,all_filter_gene_fasta_file)
f1 = open(os.path.join(input_script_sub, tag + '.customed.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n%s'%(cmds))
f1.close()
# run clustering
for files in all_fasta:
annotation(files)
# all scripts
f1 = open(os.path.join(input_script, 'allannotate.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n')
for sub_scripts in glob.glob(os.path.join(input_script_sub, '*.sh')):
if 'jobmit' in args.job:
f1.write('jobmit %s %s\n' % (sub_scripts, os.path.split(sub_scripts)[-1]))
else:
f1.write('nohup sh %s > %s.out &\n' % (sub_scripts, os.path.split(sub_scripts)[-1]))
f1.close()
################################################### END ########################################################
| caozhichongchong/snp_finder | snp_finder/scripts/annotate.py | annotate.py | py | 9,730 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "argparse.RawDescriptionHelpFormatter",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 78,
"usage_type": "call"
},
{
"api_na... |
35476188136 | import requests
import threading
import sys
import re
import time
handle = str(input("please enter the cf handle : "))
roundID = int(input("please enter the contestID : "))
url = "https://codeforces.com/api/contest.standings?contestId={0}&handles={1}&showUnofficial=true".format(roundID, handle)
file = open("cf_logger.txt", "w")
def fn():
while 1:
t = time.time()
with requests.get(url) as r:
match = re.search(r"\"rank\":(\d),", r.text)
s = str(int((t-t_start)/60)) + " : "
if match:
file.write(s + str(match.group(1)))
file.write('\n')
time.sleep(8)
x = threading.Thread(target = fn, daemon = True)
t_start = time.time()
x.start()
while str(input("press \"y\" to end the process : ")) != "y":
pass
file.close()
sys.exit(0) | lazymon4d/cf_rank_logger | cf.py | cf.py | py | 770 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 23,
... |
11815448576 | import threading
from urllib import parse
from urllib.request import urlopen
from django.contrib import admin
from django.contrib import messages
from django.urls import reverse
from django.utils.safestring import mark_safe
from mysite import settings
from vodmanagement.models import Vod
from epg.models import Channel, Program
from epg.utils import download_m3u8_files
@admin.register(Channel)
class ChannelModelAdmin(admin.ModelAdmin):
list_display = ['channel_id', 'channel_name', 'rtmp_url']
list_display_links = ['channel_id'] # image_tag
list_editable = ['channel_name', 'rtmp_url']
search_fields = ['channel_id', 'channel_name']
@admin.register(Program)
class ProgramModelAdmin(admin.ModelAdmin):
"""
Program admin site view
"""
list_display = ['channel', 'title', 'start_time', 'end_time', 'url']
list_display_links = ['channel']
list_filter = ['finished', 'channel']
search_fields = ['title']
actions = ['record']
def get_queryset(self, request):
return super(ProgramModelAdmin, self).get_queryset(request).filter(finished=1)
def record(self, request, queryset):
legal_program_cnt = 0
for program in queryset:
try:
m3u8_file_path = parse.urlparse(program.url).path # /CCTV1/20180124/123456.m3u8
urlopen(program.url, timeout=5)
print(m3u8_file_path)
except Exception as e:
self.message_user(request, '%s 转点播失败 请检查录播的网址是否可以访问'%(program.title), messages.ERROR)
continue
new_record = Vod(
title=program.title,
video=settings.RECORD_MEDIA_FOLDER + m3u8_file_path
)
new_record.save()
p = threading.Thread(target=download_m3u8_files, args=(new_record.id, program.url, settings.RECORD_MEDIA_ROOT))
p.start()
legal_program_cnt += 1
print('start downloading m3u8 files', program.url)
record_url = reverse('admin:vodmanagement_vod_changelist')
print(record_url)
self.message_user(request, mark_safe('%s/%s 个节目正在转成点播,转换进度请到<a href="%s">录制节目</a>处查看。'%(legal_program_cnt,queryset.count(),record_url))
, messages.SUCCESS)
record.short_description = '转为点播'
| xahhy/Django-vod | epg/admin.py | admin.py | py | 2,411 | python | en | code | 16 | github-code | 1 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 15,
"usage_type": "call"
},
... |
40806840179 | from datetime import date
from selenium import webdriver
from selenium.webdriver.common.by import By
driver = webdriver.Chrome(executable_path="C:\\Users\\mukunth\\PycharmProjects\\pythonProject\\mypackage\\chromedriver.exe")
#def datastream(value):
# send1 = driver.get("http://demo.automationtesting.in/Alerts.html")
# send2 = driver.find_element(By.CSS_SELECTOR, datastream1 ).click()
# driver.implicitly_wait(5)
# alert1 = driver.switch_to_alert
# print(alert1.text)
# alert1.accept()
#datastream1 = "button.btn.btn-danger"
#datastream(datastream1)
#driver.implicitly_wait(20)
#driver.quit()
##second selectform
#def keyvalue():
# send1 = driver.get("http://demo.automationtesting.in/Frames.html")
# send2 = driver.find_element(By.CSS_SELECTOR, "a.analystic").text
# print(send2)
#
# driver.switch_to.default_content()
# driver.switch_to.parent_frame()
#keyvalue()
def keyvalue2():
send1 = driver.get("https://online2pdf.com/pdf2word")
send2 = driver.find_element(By.XPATH, "button[@type='button']")
send2.send_keys("C:\\Users\\mukunth\\execution3.json")
send2.click()
keyvalue2()
### some cases if you have router there the username and password are pop up before enter into config menu in that case do below steps
#https://admin:admin@"desired url"
| Mukunth-arya/selenium | data1.py | data1.py | py | 1,315 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 37,
"usage_type": "attribute"
},
{
... |
20853184226 | """empty message
Revision ID: 405b9e06626f
Revises:
Create Date: 2022-05-20 13:44:53.601560
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '405b9e06626f'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('entry',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=80), nullable=False),
sa.Column('body', sa.Text(), nullable=False),
sa.Column('pub_date', sa.DateTime(), nullable=False),
sa.Column('is_published', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('is_published', sa.Boolean(), nullable=True),
sa.Column('entry_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['entry_id'], ['entry.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_comment_created'), 'comment', ['created'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_comment_created'), table_name='comment')
op.drop_table('comment')
op.drop_table('entry')
# ### end Alembic commands ###
| PeJot86/blog | migrations/versions/405b9e06626f_.py | 405b9e06626f_.py | py | 1,476 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
4490056792 | import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape, y_train.shape) # (60000, 28, 28), (60000,)
print(x_test.shape, y_test.shape) # (10000, 28, 28), (10000,)
print(x_train[0])
print(y_train[0])
print(x_train[0].shape) # (28, 28)
x_train = x_train.reshape(60000,28,28,1)/255.
x_test = x_test.reshape(10000,28,28,1)/255.
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout
model = Sequential()
model.add(Conv2D(filters=256, kernel_size=(2,2), padding='same',
strides=1, input_shape=(28,28,1)))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(64, (2,2), padding='same', strides=1))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(64, (2,2), padding='same', strides=1))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
# ModelCheckPoint
import datetime
date_now = datetime.datetime.now()
print(date_now) # 컴퓨터에 설정되어 있는 시간
date_time = date_now.strftime('%m%d_%H%M')
print(date_time)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
filepath='../data/modelcheckpoint/'
filename='_{epoch:02d}-{val_loss:.4f}.hdf5'
modelpath = "".join([filepath, "k45_", '{timer}', filename])
# ModelCheckpoint Class 상속해서 파일을 생성해주는 def 고침.
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.distribute import distributed_file_utils
@keras_export('keras.callbacks.ModelCheckpoint')
class MyModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
def _get_file_path(self, epoch, logs):
"""Returns the file path for checkpoint."""
# pylint: disable=protected-access
try:
# `filepath` may contain placeholders such as `{epoch:02d}` and
# `{mape:.2f}`. A mismatch between logged metrics and the path's
# placeholders can cause formatting to fail.
file_path = self.filepath.format(epoch=epoch + 1, timer=datetime.datetime.now().strftime('%m%d_%H%M'), **logs)
except KeyError as e:
raise KeyError('Failed to format this callback filepath: "{}". '
'Reason: {}'.format(self.filepath, e))
self._write_filepath = distributed_file_utils.write_filepath(
file_path, self.model.distribute_strategy)
return self._write_filepath
cp = MyModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')
es = EarlyStopping(monitor='val_loss', patience=5)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
hist = model.fit(x_train, y_train, epochs=50, batch_size=32, validation_split=0.2, verbose=2, callbacks=[es, cp])
print(hist)
result = model.evaluate(x_test, y_test)
print('loss :', result[0])
print('acc :', result[1])
'''
# 시각화
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
font_path = './NanumGothic.ttf'
fontprop = fm.FontProperties(fname=font_path, size=8)
plt.figure(figsize=(10, 6))
plt.subplot(2,1,1) # 2행 1열 중 첫번째
plt.plot(hist.history['loss'], marker='.', c='red', label='loss')
plt.plot(hist.history['val_loss'], marker='.', c='blue', label='val_loss')
plt.grid()
plt.title('손실비용', fontproperties=fontprop)
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(loc='upper right')
plt.subplot(2,1,2) # 2행 2열 중 두번째
plt.plot(hist.history['acc'], marker='.', c='red', label='acc')
plt.plot(hist.history['val_acc'], marker='.', c='blue', label='val_acc')
plt.grid()
plt.title('정확도', fontproperties=fontprop)
plt.ylabel('acc')
plt.xlabel('epoch')
plt.legend(loc='upper right')
plt.show()
''' | Taerimmm/ML | keras/keras45_ModelCheckPoint2_datatime.py | keras45_ModelCheckPoint2_datatime.py | py | 4,189 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "tensorflow.keras.datasets.mnist.load_data",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.datasets.mnist",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.utils.to_categorical",
"line_number": 20,
"usage... |
18934358632 | #---------------------------------------------------
# 21/05/2020 - начато
# программа по переименованию .jpg
# под нужды проги База Эльф
#
#---------------------------------------------------
#
import os, configparser, fnmatch
from shutil import copyfile
from PySide2.QtWidgets import *
from PySide2.QtCore import *
from main_os_form import Ui_MainWindow # главное окно программы
class Main_window(QMainWindow):
def __init__(self):
super(Main_window, self).__init__() # super(Main_window, self).__init__()
print('------class Main_window(QMainWindow) создан------')
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.cs = CopyScans() # определение путей до сканов - откуда и куда скидывать
self.init_ui_dop_config()
def closeEvent(self, QCloseEvent):
'''действия при закрытии приложения'''
# обновить файл cfg если произошли изменения в настройках путей
if self.need_to_save_cfg:
self.cs.save_cfg()
def init_ui_dop_config(self): # set widgets to default position
self.ui.pushB_close.clicked.connect(self.close_win)
self.ui.pushB_new.clicked.connect(self.new_jpg)
self.ui.pushB_open_jpg.clicked.connect(self.open_jpg)
self.ui.pushB_rename.clicked.connect(self.new_date_rename)
self.ui.path_glob.triggered.connect(self.set_path_glob)
self.ui.path_local.triggered.connect(self.set_path_local)
self.ui.instruction_pr.triggered.connect(self.instruction_pr)
self.ui.about_pr.triggered.connect(self.win_about)
day_1 = QDate.currentDate()
self.ui.dateE_dost.setDateTime(QDateTime(day_1, QTime(0, 0, 0)))
self.ui.dateE_dost_new.setDateTime(QDateTime(day_1, QTime(0, 0, 0)))
self.ui.checkB_north.toggled.connect(self.checkB_north_changed)
self.ui.checkB_got.toggled.connect(self.checkB_got_changed)
self.ui.dateE_got.setEnabled(False)
self.set_label_path_glob()
self.set_label_path_local()
self.need_to_save_cfg = False
self.ui.lineE_number.setInputMask('0000/d') #поле ввода номера (ограничение по маске)
# МАСКА: 0-числовые необязательные знаки [0-9] / d-необязательный знак [1-9]
def read_data_from_win(self):
''' read data from win forms and translate to logical class CopyScans\n
date_dost = QDateTime.dateTime() из формы ui.DateEdit\n
date_got = QDateTime.dateTime() из формы ui.DateEdit\n
'''
number = self.format_number(self.ui.lineE_number.text())
fio = self.ui.lineE_fio.text()
date_dost = self.ui.dateE_dost.dateTime()
date_got = None
if self.ui.checkB_got.checkState(): date_got=self.ui.dateE_got.dateTime()
north = self.ui.checkB_north.checkState()
self.cs.data_set(north, date_dost, number, fio, date_got)
def open_jpg(self):
'''open file.jpg with number !OR! fio from server/scans'''
print('зашел в опен джпег')
# собираем входные данные с окошка...
if not self.cs.path_to_global_scans:
QMessageBox.critical(self, 'Ошибка!', 'Не задан путь до сервера!')
return
self.read_data_from_win()
# определить имя файла исходя из данных и открыть...
file_name_full = self.cs.find_file_name()
os.system(f'start "" "{file_name_full}"')
def win_about(self): # вывод инфы о проге
'''info about prog'''
QMessageBox.about(self, 'о программе', 'Версия 1.0 (06.2020)\nmatveykenya@gmail.com')
def instruction_pr(self):
'''win with instuction and help for work with prog'''
QMessageBox.about(self, 'справка', 'в разработке')
def set_label_path_glob(self):
self.ui.label_path_glob.setText('Путь к серверу сканов '+self.cs.path_to_global_scans)
def set_label_path_local(self):
self.ui.label_path_local.setText('Путь к локальной папке '+self.cs.path_to_local_scans)
def set_path_glob(self):
path_to_global_scans = QFileDialog.getExistingDirectory(self, caption='Выберите файлы jpg для переноса')
if path_to_global_scans:
self.cs.path_to_global_scans = path_to_global_scans
self.set_label_path_glob()
self.need_to_save_cfg = True
def set_path_local(self):
path_to_local_scans= QFileDialog.getExistingDirectory(self, caption='Выберите файлы jpg для переноса')
if path_to_local_scans:
self.cs.path_to_local_scans = path_to_local_scans
self.set_label_path_local()
self.need_to_save_cfg = True
def close_win(self):
self.close()
def new_jpg(self):
'''переименовываем новые файлы jpg, копируем их из path_local в path_global'''
# собираем входные данные с окошка...
if self.cs.path_to_global_scans:
self.read_data_from_win()
if self.cs.number and self.cs.fio:
list_files = QFileDialog.getOpenFileNames(self, caption='Выберите файлы jpg для переноса',
dir=self.cs.path_to_local_scans, filter='*.jpg')
files_jpg = list_files[0]
if files_jpg:
# ... и собственно само действо по переносам и переименованиям
w = self.cs.new_rename_and_remove(files_jpg)
if w[0]: QMessageBox.information(self, 'Успешно', w[1])
elif w[1]: QMessageBox.critical(self, 'Копирование отменено!', w[1])# если сообщение W[1]='' пусто то сообщения нет
else: QMessageBox.critical(self, 'Ошибка!', 'Нет номера или ФИО заказчика')
else: QMessageBox.critical(self, 'Ошибка!', 'Не задан путь до сервера!')
def new_date_rename(self):
'''переименовываем файлы jpg и перносим по новой дате ВНУТРИ path_global'''
# считывание данных и вызов self.cs.remove_with_new_date_dost
#
# собираем входные данные с окошка...
if not self.cs.path_to_global_scans:
QMessageBox.critical(self, 'Ошибка!', 'Не задан путь до сервера!')
return
self.read_data_from_win()
# определить имена файлов исходя из данных
files_jpg = self.cs.find_file_name()
buttonYesNO = QMessageBox.question(self, 'Найдено', 'Столько-то файлов.\n Преносим?')
if (buttonYesNO == QMessageBox.StandardButton.Yes):
print('перемещаем файлы...')
print('the end')
#
def format_number(self, number) -> str:
'''returns number for name file.jpg in the desired format'''
razdel = number.find('/')
num = number[:razdel]
if num == '': return ''
doz = number[razdel+1:]
if doz == '': doz = ''
elif doz == '1': doz = ' доз'
else: doz = ' доз' + doz
print('возвращаемый номер', f'{num}{doz}')
return f'{num}{doz}'
def checkB_north_changed(self):
if self.ui.checkB_north.isChecked():
self.ui.checkB_got.setChecked(True)
self.ui.checkB_got.setEnabled(False)
else:
self.ui.checkB_got.setChecked(False)
self.ui.checkB_got.setEnabled(True)
def checkB_got_changed(self):
if self.ui.checkB_got.isChecked():
self.ui.dateE_got.setEnabled(True)
dateTime_1 = self.ui.dateE_dost.dateTime().addDays(-7)
#print(day_1)
self.ui.dateE_got.setDateTime(dateTime_1)
else: self.ui.dateE_got.setEnabled(False)
class CopyScans(): # здесь реализована логика, а class Main_window отвечает за UI и сбор данных с окошек
'''copy scans *.jpg from folder1 to folder2 and Rename its'''
def __init__(self):
self.path_to_global_scans = ''
self.path_to_local_scans = ''
self.file_cfg = self.file_config_name()
self.open_cfg()
self.data_set()
def data_set(self, north=False, date_dost=None, number='', fio='', date_got=None):
''' set parameters '''
self.north=north
self.date_dost=date_dost
self.number=number
self.fio=fio
self.date_got=date_got
def file_config_name(self) -> str:
'''return full path to config.ini in curent dir with prog'''
file_config_name = 'config.ini'
base_path = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(base_path, file_config_name)
return config_path
def save_cfg(self):
'''save config.ini in dir with prog'''
print('просто зашел')
if os.path.isdir(self.path_to_global_scans) and os.path.isdir(self.path_to_local_scans):
config = configparser.ConfigParser()
config.add_section("Settings")
config.set('Settings', 'path_to_server_with_scans', self.path_to_global_scans)
config.set('Settings', 'path_to_local_scans', self.path_to_local_scans)
print('зашел в def save_cfg(self)')
with open(self.file_cfg, "w") as f:
config.write(f)
print('файл cfg создан/перезаписан')
def open_cfg(self):
'''Get paths from config.ini'''
if os.path.isfile(self.file_cfg):
config = configparser.ConfigParser()
config.read(self.file_cfg)
self.path_to_global_scans = config.get('Settings', 'path_to_server_with_scans')
self.path_to_local_scans = config.get('Settings', 'path_to_local_scans')
if not os.path.isdir(self.path_to_global_scans): self.path_to_global_scans = ''
if not os.path.isdir(self.path_to_local_scans): self.path_to_local_scans = ''
def new_rename_and_remove(self, files_jpg=list(), saveDuplicates=True):
'''переименовать группу файлов files.jpg и копировать на Сервер path_global\n
формат хх.хх №№№ fio-1, -2, -3 и т.д. (дата № ФИО)\n
files_jpg - список выбранных файлов с полными путями\n
проверка условия корректности данных должна быть проведена до вызова этой подпрогр!
saveDuplicates=True - оставить копию переименованных файлы в исходном месте
'''
# новое имя файлов (без ".jpg") и путь на сервере
new_file_name = self.file_name()
path_to_server = self.path_to_server()
# путь до локальной папки с файлами
path_to_file = os.path.dirname(files_jpg[0])
textMessage = f'Файлы jpg будут перенесены по пути:\n{path_to_server}\n\n{new_file_name} - {len(files_jpg)} шт'
if saveDuplicates: textMessage += f'\n\nДубликаты сохранены в {path_to_file}'
buttonYesNO = QMessageBox.question(application, 'Проверь правильность данных', textMessage)
if (buttonYesNO == QMessageBox.StandardButton.Yes):
if not os.path.isdir(path_to_server):
try: os.makedirs(path_to_server)
except: return False, 'Создание папки на сервер не удалось!\nКопирование отменилось'
for i in range(len(files_jpg)):
ii_str = str(i+1)
if len(ii_str)<2: ii_str = '0'+ii_str
#Защита от дурака епта - чтобы ранее скопированные файлы не перезаписались
file_name_to_sever = path_to_server +'/'+ new_file_name +'-'+ ii_str + '.jpg'
if os.path.isfile(file_name_to_sever):
return False, f'файл {file_name_to_sever} уже есть на сервере'
file_name = path_to_file +'/'+ new_file_name +'-'+ ii_str + '.jpg'
if os.path.isfile(file_name):
return False, f'файл {file_name} уже есть здесь'
try:
copyfile(files_jpg[i], file_name_to_sever) # копия файлов на сервер
except: return False, 'Копирование файлов на сервер не удалось!'
if saveDuplicates:
try:
os.rename(files_jpg[i], file_name) # переименование в текущую директорию
except: return False, 'Переименование файлов в локал дир не удалось!'
else:
try:
os.remove(files_jpg[i]) # удаление исходных файлов в текущей директории
except: return False, 'Удаление исходных файлов в локал дир не удалось!'
return True, f'Перенесено на сервер {path_to_server}\n\n{len(files_jpg)} файлов'
return False, f''
def file_name(self) -> str:
'''local name(s) file.jpg on server (no dir path and without-".jpg")'''
if self.north:
return self.date_got.toString('dd.MM') + ' (c) ' + self.number + ' ' + self.fio
else:
return self.date_dost.toString('dd.MM') +' '+ self.number +' '+ self.fio
def file_name_mask(self) -> str:
''' возвращает маску имени файла по неполным параметрам '''
return '*' + self.number + '*' + self.fio+ '*'
def path_to_server(self) -> str:
'''dir path to scan server for curent file(s)'''
year = self.date_dost.toString('yyyy')
month = self.date_dost.toString('MM')
day_month = self.date_dost.toString('dd.MM')
if self.north:
return self.path_to_global_scans + '/' + year +\
'/СЕВЕР/(' + month + ') отгруз ' + day_month
else:
day = self.date_dost.toString('dd')
return self.path_to_global_scans+'/'+year+'/'+self.name_month(month)+'/'+day
def remove_with_new_date_dost(self):
'''по пути path_global перенести файлы .jpg из 1 в 2 папки с переименованием даты в имени файла'''
def name_month(self, mm='00'):
if mm=='01': mm='(01) январь'
if mm=='02': mm='(02) февраль'
if mm=='03': mm='(03) март'
if mm=='04': mm='(04) апрель'
if mm=='05': mm='(05) май'
if mm=='06': mm='(06) июнь'
if mm=='07': mm='(07) июль'
if mm=='08': mm='(08) август'
if mm=='09': mm='(09) сентябрь'
if mm=='10': mm='(10) октябрь'
if mm=='11': mm='(11) ноябрь'
if mm=='12': mm='(12) декабрь'
return mm
def find_file_name(self) -> str:
''' Возвращает полное имя файла на сервере по заданным параметрам.
Проверяет его, и если не может найти, то передает
путь до папки с date_dost
'''
file_path = self.path_to_server()
if not(self.number or self.fio): return file_path # если нет номера и фио то открыть папку
file_name = self.file_name_mask()
file_name_full = os.path.join(file_path, file_name)
print(file_name_full)
if os.path.isdir(file_path):
for file in os.listdir(file_path):
if fnmatch.fnmatch(file, file_name):
print (os.path.join(file_path, file))
return os.path.join(file_path, file)
return file_path
else: return ''
if __name__=='__main__':
app = QApplication([])
application = Main_window()
application.show()
app.exec_()
| MatveyKenya/jpg_RenameAndCopy_Elf | main_my_os.pyw | main_my_os.pyw | pyw | 17,687 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "main_os_form.Ui_MainWindow",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "os.path",
"... |
11787865317 | from collections import Counter
import transformers
import os
import torch
import numpy as np
import pyterrier as pt
if not pt.started():
pt.init()
import pandas as pd
from more_itertools import chunked
import deepct
def _subword_weight_to_word_weight(tokens, logits, smoothing="none", m=100, keep_all_terms=False):
import numpy as np
fulltokens = []
weights = []
for token, weight in zip(tokens, logits):
if token.startswith('##'):
fulltokens[-1] += token[2:]
else:
fulltokens.append(token)
weights.append(weight)
fulltokens_filtered, weights_filtered = [], []
selected_tokens = {}
for token, w in zip(fulltokens, weights):
if token == '[CLS]' or token == '[SEP]' or token == '[PAD]':
continue
if w < 0: w = 0
if smoothing == "sqrt":
tf = int(np.round(m * np.sqrt(w)))
else:
tf = int(np.round(m * w))
if tf < 1:
if not keep_all_terms: continue
else: tf = 1
selected_tokens[token] = max(tf, selected_tokens.get(token, 0))
return selected_tokens
def dict_tf2text(tfdict):
rtr = ""
for t in tfdict:
for i in range(tfdict[t]):
rtr += t + " "
return rtr
class DeepCTTransformer(pt.Transformer):
#bert_config="/users/tr.craigm/projects/pyterrier/DeepCT/bert-base-uncased/bert_config.json"
#checkpoint="/users/tr.craigm/projects/pyterrier/DeepCT/outputs/marco/model.ckpt-65816"
def __init__(self, bert_config, checkpoint, vocab_file="bert-base-uncased/vocab.txt", max_seq_length=128):
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
try:
import deepct
except ImportError:
raise ImportError('deepct package not found\n - pip install git+https://github.com/cmacdonald/DeepCT.git@tf1#egg=DeepCT')
from deepct import modeling
from deepct import run_deepct
model_fn = run_deepct.model_fn_builder(
bert_config=modeling.BertConfig.from_json_file(bert_config),
init_checkpoint=checkpoint,
learning_rate=5e5,
num_train_steps=None,
num_warmup_steps=None,
use_tpu=False,
use_one_hot_embeddings=False,
use_all_layers=False,
)
from deepct import tokenization
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=True)
import tensorflow as tf
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=None,
master=None,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=1000,
num_shards=8,
per_host_input_for_training=is_per_host))
self.estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=run_config,
predict_batch_size=16)
self.max_seq_length = max_seq_length
def transform(self, docs):
def gen():
from deepct.run_deepct import InputExample
for row in docs.itertuples():
yield InputExample(row.docno, row.text, {})
from deepct import run_deepct
features = run_deepct.convert_examples_to_features(gen(), None, self.max_seq_length, self.tokenizer)
input_fn = run_deepct.input_fn_builder(features, self.max_seq_length, False, False)
result = self.estimator.predict(input_fn=input_fn)
newdocs = []
for (i, prediction) in enumerate(result):
targets = prediction["target_weights"]
logits = prediction["logits"]
tokens = self.tokenizer.convert_ids_to_tokens(prediction["token_ids"])
term2tf = _subword_weight_to_word_weight(tokens, logits)
newdocs.append(dict_tf2text(term2tf))
if i >= len(docs):
break
rtr = pd.DataFrame()
rtr["docno"] = docs["docno"]
rtr["text"] = newdocs
return rtr
def _subword_weight_to_dict(tokens, logits):
fulltokens = []
weights = []
for token, weight in zip(tokens, logits.tolist()):
if token.startswith('##'):
fulltokens[-1] += token[2:]
else:
fulltokens.append(token)
weights.append(weight)
selected_tokens = {}
for token, tf in zip(fulltokens, weights):
if token in ('[CLS]', '[SEP]', '[PAD]') or tf <= 0:
continue
selected_tokens[token] = max(tf, selected_tokens.get(token, 0))
return selected_tokens
class DeepCT(pt.Transformer):
def __init__(self, model='macavaney/deepct', batch_size=64, scale=100, device=None, round=True):
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(device)
self.model = transformers.AutoModelForTokenClassification.from_pretrained(model).eval().to(self.device)
self.tokenizer = transformers.AutoTokenizer.from_pretrained(model)
self.batch_size = batch_size
self.scale = scale
self.round = round
def transform(self, inp):
res = []
with torch.no_grad():
for texts in chunked(inp['text'], self.batch_size):
texts = list(texts)
toks = self.tokenizer(texts, return_tensors='pt', padding=True, truncation=True)
batch_tok_scores = self.model(**{k: v.to(self.device) for k, v in toks.items()})['logits']
batch_tok_scores = batch_tok_scores.squeeze(2).cpu().numpy()
batch_tok_scores = self.scale * batch_tok_scores
if self.round:
batch_tok_scores = np.round(batch_tok_scores).astype(np.int32)
for i in range(batch_tok_scores.shape[0]):
toks_txt = self.tokenizer.convert_ids_to_tokens(toks['input_ids'][i])
toks_dict = _subword_weight_to_dict(toks_txt, batch_tok_scores[i])
res.append(toks_dict)
return inp.assign(toks=res)
class Toks2Text(pt.Transformer):
def transform(self, inp):
return inp.assign(text=inp['toks'].apply(self.toks2text))
def toks2text(self, toks):
return ' '.join(Counter(toks).elements())
| terrierteam/pyterrier_deepct | pyterrier_deepct/__init__.py | __init__.py | py | 6,447 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "pyterrier.started",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pyterrier.init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_num... |
45871652881 | from .models import Course,Registration
from rest_framework import serializers
from course.models import Course
from users.serializer import UserSerializer
class CourseSerializer(serializers.ModelSerializer):
c_teacher = UserSerializer()
class Meta:
model = Course
fields = (
'c_id',
'c_code',
'c_name',
'c_teacher',
'cnt_sign'
)
class RegistrationSerializer(serializers.ModelSerializer):
user = UserSerializer()
course = CourseSerializer()
class Meta:
model = Registration
fields = (
'user',
'course',
'cnt_abcense',
)
class RegistrationSerializerByCourse(serializers.ModelSerializer):
user = UserSerializer()
# course = CourseSerializer()
class Meta:
model = Registration
fields = (
'user',
# 'course',
'cnt_abcense',
)
class RegistrationSerializerByUser(serializers.ModelSerializer):
#user = UserSerializer()
course = CourseSerializer()
class Meta:
model = Registration
fields = (
#'user',
'course',
'cnt_abcense',
)
class RegistrationSerializer1(serializers.ModelSerializer):
c_teacher = UserSerializer()
student= serializers.SerializerMethodField()
def get_student(self, obj):
result_set = Registration.objects.filter(course=obj)
return RegistrationSerializer(result_set, many=True).data
class Meta:
model = Course
fields = (
'c_id',
'c_name',
'c_code',
'c_teacher',
'cnt_sign',
'student',
)
| iris19990802/HDUSamaritan-backend | course/serializer.py | serializer.py | py | 1,746 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "users.serializer.UserSerializer",
"line_number": 7,
"usage_type"... |
39011961976 | # -*- coding: utf-8 -*-
"""
Created on Thr Jan 10 09:13:24 2018
@author: Takashi Tokuda
Keigan Inc.
"""
import argparse
import sys
import pathlib
import serial
import msvcrt
import serial.tools.list_ports
from time import sleep
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.insert(0, str(current_dir) + '/../../') # give 1st priority to the directory where pykeigan exists
from pykeigan import usbcontroller
from pykeigan import utils
"""
----------------------
Change Baud Rate
You can check new baud rate by "baudrate_run.py" after executing this sample
Set the current baud rate to current_baud
ボーレートの変更
本サンプル実行後、"baudrate_run.py" で動作確認が可能
以下の current_baud に現在のボーレートを入れること
----------------------
"""
current_baud = 115200
def select_port():
print('Available COM ports list')
portlist = serial.tools.list_ports.comports()
if not portlist:
print('No available port')
sys.exit()
print('i : name')
print('--------')
for i, port in enumerate(portlist):
print(i, ':', port.device)
print('- Enter the port number (0~)')
portnum = input()
portnum = int(portnum)
portdev = None
if portnum in range(len(portlist)):
portdev = portlist[portnum].device
print('Conncted to', portdev)
return portdev
def baud_rate_setting():
print('Select baud rate to set')
print('--------')
print('0: 115200')
print('1: 230400')
print('2: 250000')
print('3: 460800')
print('4: 921600')
print('5: 1000000 (1M)')
print('--------')
num = int(input())
while num < 0 or num > 5:
print('Invalid value!')
num = int(input())
return num
dev=usbcontroller.USBController(select_port(),baud=current_baud) # Set the current baudrate to communicate
dev.set_baud_rate(baud_rate_setting()) #5: 1Mbps
dev.save_all_registers()
sleep(1)
dev.reboot()
| keigan-motor/pykeigan_motor | examples/windows_examples/baudrate_change.py | baudrate_change.py | py | 1,977 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path.insert",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "serial.tools.list_ports.co... |
8445031902 | from pyspark.sql import SparkSession
import numpy as np
import pandas as pd
import gc
from pyspark.ml.feature import StringIndexer, VectorIndexer, VectorAssembler
from pyspark.sql.functions import col
from pyspark.sql.types import StringType,BooleanType,DateType,DoubleType
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
def init_spark():
sql = SparkSession.builder\
.appName("hdfs_ICU-stays-pred_LR")\
.getOrCreate()
sc = sql.sparkContext
return sql,sc
# i dati utilizzati sono di physionet.org challenge 2012 sono però in un formato diverso dunque c'è una prima fase di reshaping per renderlo nel formato a noi più
#congeniale, nel seguente programma viene saltata questa fase di preprocessing utilizzando un dato già presente e preprocessato da kaggle
#se venisse richiesto quà trovi come fare il pre-processing e tutto https://www.kaggle.com/code/msafi04/predict-icu-mortality-shap/notebook oppure
# fai una prima fase in panda e poi converti un un dataframe Spark
file = "hdfs://namenode:9000/user/root/input/Applications/Data/icu_mortality_train.csv"
sql,sc = init_spark()
#lavoriamo con panda per il momento
df = sql.read.load(file,format = "csv", inferSchema="true", sep=",", header="true").toPandas()
df = df.replace([-1.0, np.inf, -np.inf], np.nan)
df['age_group'] = pd.cut(df['Age'], bins = 9, labels = ['<20', '20s', '30s', '40s', '50s', '60s', '70s', '80s', '90s'])
df['Height'] = df['Height'].fillna(df['Height'].median())
df['Weight'] = df['Weight'].fillna(df['Weight'].median())
df['bmi'] = df.apply(lambda x: round((x['Weight'] / (x['Height'] ** 2)) * 10000, 2), axis = 1)
df['bmi_group'] = pd.cut(df['bmi'], bins = [df['bmi'].min(), 18.5, 24.9, 29.9, df['bmi'].max()], labels = ['Underweight', 'Healthy', 'Overweight', 'Obesity'])
del df['bmi']
gc.collect()
cat_features = ['Gender', 'ICUType', 'age_group', 'bmi_group']
num_features = [c for c in df.columns if c not in cat_features]
num_features = [c for c in num_features if c not in ['RecordID', 'In-hospital_death']]
cat_features, num_features
#lavoriamo con dataframe panda per poi passare ad un dataframe spark
df_0 = df[df['In-hospital_death'] == 0].copy()
df_1 = df[df['In-hospital_death'] == 1].copy()
#Impute Numerical Features with mean value
df_0[num_features] = df_0[num_features].fillna(df_0[num_features].mean())
df_1[num_features] = df_1[num_features].fillna(df_1[num_features].mean())
#Impute Categorical Features with most frequent value
for col in cat_features:
df_0[col] = df_0[col].fillna(df_0[col].value_counts().index[0])
df_1[col] = df_1[col].fillna(df_1[col].value_counts().index[0])
#concat both df, shuffle and reset index
df = pd.concat([df_0, df_1], axis = 0).sample(frac = 1).reset_index(drop = True)
#print(cat_features, num_features)
data = sql.createDataFrame(df)
#anzichè fare il label casta ad integer i valori in string e poi abilità la pipeline di sotto
indexer = StringIndexer(inputCol="age_group", outputCol="age_label")
indexer.fit(data).transform(data)
temp_sdf = indexer.fit(data).transform(data)
data = temp_sdf.withColumn("age_group", temp_sdf["age_label"].cast("integer"))
indexer = StringIndexer(inputCol="bmi_group", outputCol="bmi_label")
indexer.fit(data).transform(data)
temp_sdf = indexer.fit(data).transform(data)
data = temp_sdf.withColumn("bmi_group", temp_sdf["bmi_label"].cast("integer"))
feature = VectorAssembler(inputCols = data.drop('In-hospital_death').columns, outputCol='features')
feature_vector = feature.transform(data)
feat_lab_df = feature_vector.select(['features', 'In-hospital_death'])
train, test = feat_lab_df.randomSplit([0.8, 0.2])
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit
from pyspark.mllib.evaluation import BinaryClassificationMetrics as metric
lr = LogisticRegression(labelCol='In-hospital_death')
paramGrid = ParamGridBuilder().addGrid(lr. regParam, (0.01, 0.1))\
.addGrid(lr.maxIter, (5, 10))\
.addGrid(lr.tol, (1e-4, 1e-5))\
.addGrid(lr.elasticNetParam, (0.25, 0.75))\
.build()
tvs = TrainValidationSplit(estimator=lr,
estimatorParamMaps=paramGrid,
evaluator=MulticlassClassificationEvaluator(labelCol='In-hospital_death'),
trainRatio=0.8)
lr_model = tvs.fit(train)
lr_model_pred = lr_model.transform(test)
lr_model_pred.show(3)
lr_model_pred.printSchema()
results = lr_model_pred.select(['probability', 'In-hospital_death'])
results.show(10)
results_collect = results.collect()
results_list = [(float(i[0][0]), 1.0-float(i[1])) for i in results_collect]
scoreAndLabels = sc.parallelize(results_list)
metrics = metric(scoreAndLabels)
lr_acc = round(MulticlassClassificationEvaluator(labelCol='In-hospital_death', metricName='accuracy').evaluate(lr_model_pred), 4)
lr_prec = round(MulticlassClassificationEvaluator(labelCol='In-hospital_death', metricName='weightedPrecision').evaluate(lr_model_pred), 4)
lr_roc = round(metrics.areaUnderROC, 4)
lr_dict = {'Accuracy': lr_acc, 'Precision': lr_prec, 'ROC Score': lr_roc}
print(lr_dict)
| TommasoD/SEASHELL | MLicu_lr_hdfs.py | MLicu_lr_hdfs.py | py | 5,353 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 14,
"usage_type"... |
20652999633 | from template2.database.Mongoconnection import Mongoconnection
from bson.objectid import ObjectId
import pymongo
class PolicyDao(Mongoconnection):
print("insidepolicydao")
def __init__(self):
super(PolicyDao, self).__init__()
print("inside_init")
self.get_collection("policies")
def getCountOfTotalEntries(self, customerId):
return self.collection.count_documents({"customerId": customerId})
'''def getPolicies(self, filterQuery, sortQuery, skip, limit):
cursor = self.collection.find(filterQuery).collation({"locale": "en"}).sort(sortQuery).skip(skip).limit(limit)
policies = list(cursor)
totalResults = cursor.count()
return policies, totalResults'''
'''def isPolicyExist(self, policyId, customerId):
result = self.collection.find_one({"_id": ObjectId(policyId), 'customerId': customerId})
if result:
return True
return False'''
def getpolicy(self,custid):
cursor = self.collection.find({"customer_id" : custid})
lists = list(cursor)
#cursor = self.collection.find_one({"cust_id" : customerid} )
res = {}
print(lists)
for i in lists:
for j, k in i.items():
if j!="_id":
res[j]=k
print(res)
return res
def create(self, policy,custid):
count=0
list1 = {}
list2 = {}
list3 = {}
list1["customer_id"] = custid
list2["customer_id"] = custid
list3["customer_id"] = custid
for i, j in policy.items():
count=count+1
if count>=1 and count<=4:
list1[i]=j
elif count>=5 and count<=7:
list2[i]=j
elif count>=8 and count<=10:
list3[i]=j
insertOneResult = self.collection.insert_one(list1)
print("self id",insertOneResult.inserted_id)
insertOneResult = self.collection.insert_one(list2)
print("self id",insertOneResult.inserted_id)
insertOneResult = self.collection.insert_one(list3)
print("self id",insertOneResult.inserted_id)
def updatepolicy(self, policy, customerid):
# Remove the '_id' as it's immutable and results in WriteError
res = self.collection.find_one({"cust_id": customerid})
print("policy customerid",policy,customerid,res)
print(res["_id"])
res1 = {}
count=0
for i, j in policy.items():
print(i, ":", j)
count=count+1
res1[i]=j
if count==4 :
break
res1["cust_id"] = customerid
updateResult = self.collection.update({'_id': ObjectId(res["_id"])}, res1)
#updateResult = self.collection.update({'cust_id': customerid}, policy)
#print("update",updateresult)
def getPolicy(self, policyId, customerId):
return self.collection.find_one({'_id': ObjectId(policyId), 'customerId': customerId})
def replace(self, policyName, customerId, policy):
# True flag makes upsert = True i.e. when filter result not found it will create entry
updateResult = self.collection.replace_one(
{'name': policyName, "customerId": customerId}, policy, True)
return updateResult.raw_result['updatedExisting'] | Nimanita/mongodjangoproject | template2/dao/action.py | action.py | py | 3,465 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "template2.database.Mongoconnection.Mongoconnection",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "bson.objectid.ObjectId",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "bson.objectid.ObjectId",
"line_number": 93,
"usage_type": "call"... |
38252725617 | from rest_framework import viewsets, status, generics
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.authtoken.models import Token
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated, AllowAny, IsAdminUser, IsAuthenticatedOrReadOnly
from django.contrib.auth.models import User
from .serializers import NodeSerializer, AuthorSerializer, PostSerializer, CommentSerializer, LikeSerializer, LikesSerializer, InboxSerializer, Author_neat_Serializer
from .models import Node, Author, Post, Like, Comment, FriendRequest, Likes, Inbox
from .serializers import FriendRequestSerializer
from django.http import JsonResponse, HttpResponse
from .permissions import IsOwnerOrReadOnly
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.views import APIView
from rest_framework.decorators import api_view
from django.shortcuts import get_list_or_404, get_object_or_404
import uuid
from itertools import chain
import numpy as np
from django.db.models import F
# =====================================================================================================================================
# Node
# =====================================================================================================================================
class NodeViewSet(viewsets.ModelViewSet):
queryset = Node.objects.all()
serializer_class = NodeSerializer
permission_classes = (AllowAny, )
# =====================================================================================================================================
# Author
# =====================================================================================================================================
# URL: ://service/author/{AUTHOR_ID}/
class AuthorViewSet(viewsets.ModelViewSet):
queryset = Author.objects.all()
serializer_class = AuthorSerializer
# permission_classes = (AllowAny, )
#static_author_id = ''
def all_users(self, request, *args, **kwargs):
queryset = Author.objects.all()
serializer = AuthorSerializer(queryset, many=True)
return Response(serializer.data)
def retrive(self, request, author_uid=None, *args, **kwargs):
#request_str = str(request)
#author_id = request_str.split("/")[2]
# print(author_id)
#self.static_author_id = author_id
# give global to use
host = 'https://nofun.herokuapp.com'
author_id = f'{host}/author/{author_uid}'
queryset = Author.objects.get(id=author_id)
serializer = Author_neat_Serializer(queryset)
return Response(serializer.data) # t5
def create_1(self, request, *args, **kwargs):
display_name = request.data.get('displayName')
github = request.data.get('github')
author_uid = str(uuid.uuid4().hex)
host = 'https://nofun.herokuapp.com'
author_id = f'{host}/author/{author_uid}'
url = author_id
email = request.data.get('email')
username = request.data.get('username')
password = request.data.get('password')
is_approved = request.data.get('is_approved')
Author.objects.create(
id=author_id,
host=host,
url=url,
email=email,
username=username,
password=password,
displayName=display_name,
github=github,
)
# it will return true false and change true false in database and return it out to json
if is_approved != None:
if is_approved == 'true':
Author.objects.filter(pk=author_id).update(is_approved=True)
else:
is_approved = Author._meta.get_field('is_approved').get_default()
print(is_approved)
author_data = {'id': author_id, 'host': host, 'url': url,
'displayName': display_name, 'github': github, 'email': email, 'username': username, 'password': password, 'is_approved': is_approved}
# authentication:
return JsonResponse(author_data)
def author_login(self, request, *args, **kwargs):
username = request.data.get('username')
password = request.data.get('password')
try:
author = Author.objects.get(username=username)
if password == author.password:
if author.is_approved:
serializer = AuthorSerializer(author)
return Response(serializer.data)
else:
return Response(False)
else:
return Response(None)
except:
return Response(None)
def update(self, request, author_uid=None, *args, **kwargs):
host = 'https://nofun.herokuapp.com'
author_id = f'{host}/author/{author_uid}'
author = Author.objects.get(id=author_id)
# print(author.id)
name = request.data.get('displayName', None)
email = request.data.get('email', None)
password = request.data.get('password', None)
github = request.data.get('github', None)
if name:
Author.objects.filter(pk=author_id).update(
displayName=name
)
if github:
Author.objects.filter(pk=author_id).update(
github=github
)
if email:
Author.objects.filter(pk=author_id).update(
email=email
)
if password:
Author.objects.filter(pk=author_id).update(
password=password
)
# return frontend need data
serializer = AuthorSerializer(author)
return Response(serializer.data)
# havent used
# def create(self, request):
# try:
# author = Author.objects.get(username=request.data['username'])
# token = Token.objects.get(user=author)
# response = {'id': author.id, 'username': author.username,
# 'password': author.password, 'token': token.key}
# return JsonResponse(response)
# except:
# author = Author.objects.create(
# username=request.data['username'],
# password=request.data['password'],
# )
# token = Token.objects.create(user=author)
# response = {'id': author.id, 'username': author.username,
# 'password': author.password, 'token': token.key}
# return JsonResponse(response)
# =====================================================================================================================================
# Post
# =====================================================================================================================================
# URL: ://service/author/{AUTHOR_ID}/posts/{POST_ID}
class PostViewSet(viewsets.ModelViewSet):
queryset = Post.objects.all()
serializer_class = PostSerializer
#authentication_classes = (TokenAuthentication, )
# permission_classes = (AllowAny, )
def all_posts(self, request, *args, **kwargs):
queryset = Post.objects.all()
serializer = PostSerializer(queryset, many=True)
return Response(serializer.data)
def all_friends_posts(self, request, *args, **kwargs):
# get list of friend only
request = str(request)
author_uuid = request.split("/")[2]
host = "https://nofun.herokuapp.com/"
author_id = host + "author/" + author_uuid
friends = []
# follower_list = {"type": "followers", "items": []}
for item in FriendRequest.objects.filter(object=author_id, status='A').values():
friends.append(item["actor"])
for item in FriendRequest.objects.filter(actor=author_id, status='A').values():
friends.append(item["object"])
posts = []
for author_id in friends:
post = Post.objects.filter(author=Author.objects.get(id=author_id))
serializer = PostSerializer(post, many=True)
posts.append(serializer.data)
return Response(posts)
def post_list(self, request, author_uid=None, *args, **kwargs):
host = 'https://nofun.herokuapp.com'
author_id = f'{host}/author/{author_uid}'
post = Post.objects.filter(author=Author.objects.get(id=author_id))
serializer = PostSerializer(post, many=True)
print(serializer)
# serializer = PostSerializer(post)
return Response(serializer.data)
# return Response(post)
# return Response(Post.objects.filter(author=author_id).values())
def post_list_id(self, request, author_uid=None, post_id=None, *args, **kwargs):
host = 'https://nofun.herokuapp.com'
post_id = f'{host}/author/{author_uid}/posts/{post_id}'
#author_id= f'{host}/author/{author_uid}'
post = Post.objects.get(id=post_id)
data = PostSerializer(post)
return Response(data.data)
# DELETE a single post using post_id
# URL: ://service/author/{AUTHOR_ID}/posts/{POST_ID}
def delete(self, request, author_uid=None, post_id=None, *args, **kwargs):
host = 'https://nofun.herokuapp.com'
author_id = f'{host}/author/{author_uid}'
post_id = f'{host}/author/{author_uid}/posts/{post_id}'
post = get_object_or_404(Post, id=post_id)
try:
author_items = Inbox.objects.filter(author=author_id).values()
for item in author_items:
if item["items"]["object"] == post_id:
Inbox.objects.get(id=item["id"]).delete()
post.delete()
except ValueError:
return Response("No such a post. Deletion fails.", 500)
return Response("Delete successful")
def create_1(self, request, author_uid=None, *args, **kwargs):
post_uid = str(uuid.uuid4().hex)
host = 'https://nofun.herokuapp.com'
#author_id = f'{host}/author/{author_id}'
post_id = f'{host}/author/{author_uid}/posts/{post_uid}'
author_id = f'{host}/author/{author_uid}'
comment = f'{host}/author/{author_uid}/posts/{post_uid}/comments'
title = request.data.get('title')
if request.data.get('source'):
source = request.data.get('source')
else:
source = post_id
if request.data.get('origin'):
origin = request.data.get('origin')
else:
origin = post_id
description = request.data.get('description')
contentType = request.data.get('contentType')
content = request.data.get('content')
categories = request.data.get('categories')
published = request.data.get('published')
comment = comment
visibility = request.data.get('visibility')
unlisted = request.data.get('unlisted')
size = Post._meta.get_field('size').get_default()
count = Post._meta.get_field('count').get_default()
Post.objects.create(
id=post_id,
title=title,
source=source, # fix this
origin=origin, # fix this
description=description,
contentType=contentType,
content=content,
count=count,
size=size,
categories=categories,
comment=comment,
visibility=visibility,
published=published,
unlisted=unlisted,
author=Author.objects.get(id=author_id),
# image = img
)
# return response
post_data = {'title': title, 'source': source,
'origin': origin, 'description': description, 'contentType': contentType,
'content': content, 'author': author_id, 'categories': categories,
'count': count, 'size': size, 'comment': comment,
'visibility': visibility, 'unlisted': unlisted, 'id': post_id}
followers = []
followers_request_1 = FriendRequest.objects.filter(
object=author_id).values()
for request in followers_request_1:
followers.append(request["actor"])
followers_request_2 = FriendRequest.objects.filter(
actor=author_id, status='A').values()
for request in followers_request_2:
followers.append(request["object"])
for follower in followers:
Inbox.objects.create(
type="post",
author=follower,
items={
"actor": author_id,
"object": post_id,
"title": title,
"source": source,
"origin": origin,
"content": content,
"description": description,
"contentType": contentType,
"categories": categories,
"count": count,
"size": size,
"comment": comment,
"visibility": visibility,
"unlisted": unlisted
}
)
return Response(post_data)
def edit(self, request, author_uid=None, post_id=None, *args, **kwargs):
host = 'https://nofun.herokuapp.com'
comments_id = f'{host}/author/{author_uid}/posts/{post_id}/comments'
post_id = f'{host}/author/{author_uid}/posts/{post_id}'
author_id = f'{host}/author/{author_uid}'
#post = Post.objects.get(id=post_id)
post = get_object_or_404(Post, id=post_id)
# print('correct',post.title)
title = request.data.get('title')
# print(title)
source = request.data.get('source')
origin = request.data.get('origin')
description = request.data.get('description')
contentType = request.data.get('contentType')
content = request.data.get('content')
categories = request.data.get('categories')
count = request.data.get('count')
size = request.data.get('size')
comment = comments_id
visibility = request.data.get('visibility')
unlisted = post.unlisted
post_data = {'title': title, 'source': source,
'origin': origin, 'description': description, 'contentType': contentType,
'content': content, 'author': author_id, 'categories': categories,
'count': count, 'size': size, 'comment': comment,
'visibility': visibility, 'unlisted': unlisted, 'id': post_id}
Post.objects.filter(pk=post_id).update(
title=title
)
post.source = post_id # fix this
post.origin = post_id # fix this
Post.objects.filter(pk=post_id).update(
description=description
)
if contentType:
Post.objects.filter(pk=post_id).update(
contentType=contentType
)
Post.objects.filter(pk=post_id).update(
content=content
)
if count:
Post.objects.filter(pk=post_id).update(
count=count
)
if size:
Post.objects.filter(pk=post_id).update(
size=size
)
Post.objects.filter(pk=post_id).update(
categories=categories
)
if visibility:
Post.objects.filter(pk=post_id).update(
visibility=visibility
)
#post.author = Author.objects.get(id=author_id)
return Response(post_data)
# return Response('Author updated successfully', 204)
def create_2(self, request, author_uid=None, post_id=None, *args, **kwargs):
host = 'https://nofun.herokuapp.com'
comments_id = f'{host}/author/{author_uid}/posts/{post_id}/comments'
#author_id = f'{host}/author/{author_id}'
post_id = f'{host}/author/{author_uid}/posts/{post_id}'
author_id = f'{host}/author/{author_uid}'
title = request.data.get('title')
source = request.data.get('source')
origin = request.data.get('origin')
description = request.data.get('description')
contentType = request.data.get('contentType')
content = request.data.get('content')
categories = request.data.get('categories')
count = request.data.get('count')
published = request.data.get('published')
size = request.data.get('size')
comment = comments_id
visibility = request.data.get('visibility')
unlisted = request.data.get('unlisted')
Post.objects.filter(pk=post_id).update(
title=title,
source=post_id, # fix this
origin=post_id, # fix this
description=description,
contentType=contentType,
content=content,
count=count,
size=size,
categories=categories,
comment=comment,
visibility=visibility,
published=published,
unlisted=unlisted,
author=Author.objects.get(id=author_id),
)
# return response
post_data = {'title': title, 'source': source,
'origin': origin, 'description': description, 'contentType': contentType,
'content': content, 'author': author_id, 'categories': categories,
'count': count, 'size': size, 'comment': comment,
'visibility': visibility, 'unlisted': unlisted, 'id': post_id}
return Response(post_data)
# =====================================================================================================================================
# Comment
# =====================================================================================================================================
class CommentViewSet(viewsets.ModelViewSet):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
def post_new_comment(self, request, *args, **kwargs):
request_str = str(request)
# currently the author_id is the UUID
author_id = request_str.split("/")[2]
post_id = request_str.split("/")[4]
host = "https://nofun.herokuapp.com/"
real_author_id = host + "author/" + author_id
real_post_id = real_author_id + "/posts/" + post_id
post_id = real_post_id
comment_uuid = uuid.uuid4().hex
comment_id = post_id + "/comments/" + comment_uuid
author = request.data.get('author')
comment = request.data.get('comment')
contentType = request.data.get('contentType')
comment_data = {'type': 'comment', 'author': author, 'post': post_id,
'comment': comment, 'contentType': contentType, 'id': comment_id}
Comment.objects.create(author=author, post=post_id,
comment=comment, contentType=contentType, id=comment_id)
post = Post.objects.get(pk=post_id)
post.size = len(comment)
post.count += 1
post.save()
# add this comment to the post's owner's inbox
receiver_id = host + "author/" + author_id
Inbox.objects.create(
type="comment",
author=receiver_id,
items={
"actor": author,
"object": post_id,
"comment": comment,
"contentType": contentType
}
)
return Response(comment_data)
def get_comment_list(self, request, *args, **kwargs):
request_str = str(request)
# currently the author_id is the UUID
author_id = request_str.split("/")[2]
post_id = request_str.split("/")[4]
host = "https://nofun.herokuapp.com/"
real_author_id = host + "author/" + author_id
real_post_id = real_author_id + "/posts/" + post_id
queryset = Comment.objects.filter(post=real_post_id)
if queryset.exists():
return Response(list(queryset.values()))
else:
return Response([])
def retrive_a_comment(self, request, *args, **kwargs):
request_str = str(request)
# currently the author_id is the pure UUID
author_id = request_str.split("/")[2]
post_id = request_str.split("/")[4]
host = "https://nofun.herokuapp.com/"
real_author_id = host + "author/" + author_id
real_post_id = real_author_id + "/posts/" + post_id
comment_id = request_str.split("/")[6]
real_comment_id = real_post_id + "/comments/" + comment_id
if Comment.objects.filter(id=real_comment_id).exists():
return Response(Comment.objects.filter(id=real_comment_id).values())
else:
return Response("This comment does not exist.")
# =====================================================================================================================================
# Friend/Follower
# =====================================================================================================================================
class FriendRequestViewSet(viewsets.ModelViewSet):
serializer_class = FriendRequestSerializer
queryset = FriendRequest.objects.all()
def get_permissions(self):
self.permission_classes = [AllowAny]
return super(FriendRequestViewSet, self).get_permissions()
def create(self, request, *args, **kwargs):
# create friend request
# from_user_id = Author.objects.get(id=request.data["actor"])
from_user_id = request.data["actor"]
to_user_id = request.data["object"]
if FriendRequest.objects.filter(actor=from_user_id, object=to_user_id, status="R").exists():
# Check if the request alreay exists and status is "requested".
return Response("Unable to send friend request because the friend request alreay exists!")
elif FriendRequest.objects.filter(actor=from_user_id, object=to_user_id, status="A").exists():
# Check if the request exists and status is "A"
return Response("Unable to send friend request because you have already become friends!")
elif FriendRequest.objects.filter(actor=from_user_id, object=to_user_id, status="D").exists():
# If your reuqest was declined and send again
FriendRequest.objects.filter(actor=from_user_id, object=to_user_id, status="D").update(
actor=from_user_id, object=to_user_id, status='R')
return Response("Successfully create the friend request!")
elif FriendRequest.objects.filter(actor=to_user_id, object=from_user_id, status="R").exists():
# if he already send the request to you and status is R, then you become friend automatically
FriendRequest.objects.filter(actor=to_user_id, object=from_user_id, status="R").update(
actor=to_user_id, object=from_user_id, status='A')
return Response("He/She had sent the request to you and you become friend automatically!")
elif FriendRequest.objects.filter(actor=to_user_id, object=from_user_id, status="A").exists():
return Response("Unable to send friend request because you have already become friends!")
elif FriendRequest.objects.filter(actor=to_user_id, object=from_user_id, status="D").exists():
FriendRequest.objects.filter(actor=to_user_id, object=from_user_id, status="D").update(
actor=to_user_id, object=from_user_id, status='R')
return Response("Successfully create the friend request!")
else:
friend_request = FriendRequest.objects.create(
actor=from_user_id, object=to_user_id, status='R')
return Response("Successfully create the friend request!")
def accept_incoming_request(self, request, *args, **kwargs):
# accept incoming friend request
# request_from_user_id = Author.objects.get(id=request.data["actor"])
request_from_user_id = request.data["actor"]
current_user_id = request.data["object"]
if FriendRequest.objects.filter(actor=request_from_user_id, object=current_user_id, status='A').exists():
# Check if the request has already been accepted
return Response("Unable to accept, because you had already accepted it!")
elif FriendRequest.objects.filter(actor=request_from_user_id, object=current_user_id, status='D').exists():
# Check if the request has already been declined
return Response("Unable to accept, because you had already declined it!")
elif FriendRequest.objects.filter(actor=request_from_user_id, object=current_user_id, status='R').exists():
# If request exists and status is Requested, then able to accept:
FriendRequest.objects.filter(actor=request_from_user_id, object=current_user_id, status='R').update(
actor=request_from_user_id, object=current_user_id, status='A')
return Response("Successfully accept the friend request!")
else:
return Response("Unable to accept because this request does not exist.")
def decline_incoming_request(self, request, *args, **kwargs):
# decline incoming friend request
# request_from_user_id = Author.objects.get(id=request.data["actor"])
request_from_user_id = request.data["actor"]
current_user_id = request.data["object"]
if FriendRequest.objects.filter(actor=request_from_user_id, object=current_user_id, status='A').exists():
# Check if the request has already been accepted
return Response("Unable to decline because you had already accepted it!")
elif FriendRequest.objects.filter(actor=request_from_user_id, object=current_user_id, status='D').exists():
# Check if the request has already been delined
return Response("Unable to decline because you had already declined it!")
elif FriendRequest.objects.filter(actor=request_from_user_id, object=current_user_id, status='R').exists():
# Successfully decline this friend request
FriendRequest.objects.filter(actor=request_from_user_id, object=current_user_id, status='R').update(
actor=request_from_user_id, object=current_user_id, status='D')
return Response("Successfully decline this friend request!")
else:
# Request does not exist
return Response("Unable to decline because this request does not exist.")
def delete(self, request, *args, **kwargs):
# delete friend(only available when the status of request is 'Accepted')
# user_1 = Author.objects.get(id=request.data["actor"])
user_1 = request.data["actor"]
user_2 = request.data["object"]
if FriendRequest.objects.filter(actor=user_1, object=user_2, status='A').exists():
# user1 create the friend request and user1 delete
FriendRequest.objects.filter(
actor=user_1, object=user_2, status='A').delete()
return Response("Successfully delete this friend!")
elif FriendRequest.objects.filter(actor=user_2, object=user_1, status='A').exists():
# user2 create the friend request and userr1 delete
FriendRequest.objects.filter(
actor=user_2, object=user_1, status='A').delete()
return Response("Successfully delete this friend!")
else:
return Response("Unable to delete because you are not friends.")
def get_follower_list(self, request, *args, **kwargs):
# get list of followers and friends
request = str(request)
author_uuid = request.split("/")[2]
host = "https://nofun.herokuapp.com/"
author_id = host + "author/" + author_uuid
# print('22222',author_id)
#current_user = Author.objects.get(id=author_id)
# print('1111',current_user)
items = []
if FriendRequest.objects.filter(object=author_id, status='R').exists():
for item in FriendRequest.objects.filter(object=author_id, status='R').values():
# print(item)
items.append(item["actor"])
# print('11111',follower_id)
if FriendRequest.objects.filter(object=author_id, status='A').exists():
for item in FriendRequest.objects.filter(object=author_id, status='A').values():
items.append(item["actor"])
if FriendRequest.objects.filter(object=author_id, status='D').exists():
for item in FriendRequest.objects.filter(object=author_id, status='D').values():
items.append(item["actor"])
if FriendRequest.objects.filter(actor=author_id, status='A').exists():
for item in FriendRequest.objects.filter(actor=author_id, status='A').values():
items.append(item["object"])
return Response({
'type': 'followers',
'items': items
})
def get_friend_list(self, request, *args, **kwargs):
# get list of friend only
request = str(request)
author_uuid = request.split("/")[2]
host = "https://nofun.herokuapp.com/"
author_id = host + "author/" + author_uuid
items = []
# follower_list = {"type": "followers", "items": []}
for item in FriendRequest.objects.filter(object=author_id, status='A').values():
items.append(item["actor"])
for item in FriendRequest.objects.filter(actor=author_id, status='A').values():
items.append(item["object"])
return Response({
'type': 'friends',
'items': items
})
def is_follower(self, request, *args, **kwargs):
# check if author2 is author1's follower
request = str(request)
author_1_uuid = request.split("/")[2]
author_2_uuid = request.split("/")[4]
host = "https://nofun.herokuapp.com/"
author_1_id = host + "author/" + author_1_uuid
author_2_id = host + "author/" + author_2_uuid
# current_user = Author.objects.get(id=author_1_id)
# foreign_user = Author.objects.get(id=author_2_id)
for item in FriendRequest.objects.all().values():
if author_1_uuid in item["object"] and author_2_uuid in item["actor"] and item["status"] == 'R':
return Response({
'is_follower': True,
'actor': item["actor"],
'object': item["object"],
'status': 'R'
})
elif author_1_uuid in item["object"] and author_2_uuid in item["actor"] and item["status"] == 'A':
return Response({
'is_follower': True,
'actor': item["actor"],
'object': item["object"],
'status': 'A'
})
elif author_2_uuid in item["object"] and author_1_uuid in item["actor"] and item["status"] == 'A':
return Response({
'is_follower': True,
'actor': item["object"],
'object': item["actor"],
'status': 'A'
})
elif author_1_uuid in item["object"] and author_2_uuid in item["actor"] and item["status"] == 'D':
return Response({
'is_follower': True,
'actor': item["actor"],
'object': item["object"],
'status': 'D'
})
return Response({'is_follower': False})
# if FriendRequest.objects.filter(object=author_1_id, actor=author_2_id, status='R').exists():
# return Response({
# 'is_follower': True,
# 'actor': author_2_id,
# 'object': author_1_id,
# 'status': 'R'
# })
# elif FriendRequest.objects.filter(object=author_1_id, actor=author_2_id, status='A').exists():
# return Response({
# 'is_follower': True,
# 'actor': author_2_id,
# 'object': author_1_id,
# 'status': 'A'
# })
# elif FriendRequest.objects.filter(object=author_2_id, actor=author_1_id, status='A').exists():
# return Response({
# 'is_follower': True,
# 'actor': author_1_id,
# 'object': author_2_id,
# 'status': 'A'
# })
# elif FriendRequest.objects.filter(object=author_1_id, actor=author_2_id, status='D').exists():
# return Response({
# 'is_follower': True,
# 'actor': author_1_id,
# 'object': author_2_id,
# 'status': 'D'
# })
# else:
# return Response({'is_follower': False})
def put_follower(self, request, *args, **kwargs):
# check if author2 is author1's follower
request = str(request)
author_1_uuid = request.split("/")[2]
author_2_uuid = request.split("/")[4]
host = "https://nofun.herokuapp.com/"
author_1_id = host + "author/" + author_1_uuid
author_2_id = host + "author/" + author_2_uuid
# current_user = Author.objects.get(id=author_1_id)
# foreign_user = Author.objects.get(id=author_2_id)
if not FriendRequest.objects.filter(actor=author_2_id, object=author_1_id, status='R').exists():
FriendRequest.objects.create(
actor=author_2_id, object=author_1_id, status='R')
return Response("Successfully add this follower.")
else:
return Response("")
def remove_follower(self, request, *args, **kwargs):
request = str(request)
author_1_uuid = request.split("/")[2]
author_2_uuid = request.split("/")[4]
host = "https://nofun.herokuapp.com/"
author_1_id = host + "author/" + author_1_uuid
author_2_id = host + "author/" + author_2_uuid
# current_user = Author.objects.get(id=author_1_id)
# foreign_user = Author.objects.get(id=author_2_id)
FriendRequest.objects.filter(
actor=author_1_id, object=author_2_id, status='A').delete()
FriendRequest.objects.filter(
actor=author_2_id, object=author_1_id, status='A').delete()
FriendRequest.objects.filter(
actor=author_2_id, object=author_1_id, status='R').delete()
return Response("Successfully removed this follower.")
# =====================================================================================================================================
# Like/Likes/Liked
# =====================================================================================================================================
class LikesViewSet(viewsets.ModelViewSet):
serializer_class = LikesSerializer
queryset = Likes.objects.all()
# create like for the comment/post
def create_likes(self, request, *args, **kwargs):
request_str = str(request)
author_uuid = request_str.split("/")[2]
post_uuid = request_str.split("/")[4]
host = "https://nofun.herokuapp.com/"
author_id = host + "author/" + author_uuid
post_id = author_id + "/posts/" + post_uuid
is_comments = False
if '/comments/' in request_str:
is_comments = True
comment_uuid = request_str.split("/")[6]
comment_id = post_id + "/comments/" + comment_uuid
comment_author = Comment.objects.get(id=comment_id)
comment_author_id = comment_author.author
print(comment_author_id)
context = ''
actor = request.data.get('author', None) # author ID
if is_comments:
# actor.displayname or some name
summary = str(actor) + ' liked your comment. '
likes_data = {'type': 'Like', 'summary': summary,
'author': actor, 'object': comment_id, 'context': context}
# create author who is an actor
Likes.objects.create(summary=summary, author=actor,
object=comment_id, context=context)
# add to object author's inbox
receiver_id = comment_author_id
Inbox.objects.create(
type="Like",
author=receiver_id,
items={
"actor": actor,
"object": post_id,
"comment": comment_id
}
)
return Response({
'type': 'Like',
'summary': summary,
'author': actor,
'object': comment_id,
'context': context
})
else:
summary = str(actor) + ' liked your post. '
likes_data = {'type': 'Like', 'summary': summary,
'author': actor, 'object': post_id, 'context': context}
Likes.objects.create(summary=summary, author=actor,
object=post_id, context=context)
# add to object author's inbox
receiver_id = author_id
Inbox.objects.create(
type="Like",
author=receiver_id,
items={
"actor": actor,
"object": post_id
}
)
return Response({
'type': 'Like',
'summary': summary,
'author': actor,
'object': post_id,
'context': context
})
# get a list of likes for this post
def get_postLike_list(self, request, *args, **kwargs):
request_str = str(request)
author_uuid = request_str.split("/")[2]
post_uuid = request_str.split("/")[4]
host = "https://nofun.herokuapp.com/"
author_id = host + "author/" + author_uuid
post_id = author_id + "/posts/" + post_uuid
# response_body = []
item = Likes.objects.filter(object=post_id).values()
# response_body.append(item)
# return Response(response_body)
return Response(item)
# get a list of like for this comment
def get_commentLike_list(self, request, *args, **kwargs):
request_str = str(request)
author_uuid = request_str.split("/")[2]
post_uuid = request_str.split("/")[4]
host = "https://nofun.herokuapp.com/"
author_id = host + "author/" + author_uuid
post_id = author_id + "/posts/" + post_uuid
is_comments = False
if '/comments/' in request_str:
is_comments = True
comment_uuid = request_str.split("/")[6]
comment_id = post_id + "/comments/" + comment_uuid
# response_body = []
item = Likes.objects.filter(object=comment_id).values()
# response_body.append(item)
return Response(item)
@api_view(['GET'])
def likedList(request, *args, **kwargs):
request_str = str(request)
author_uuid = request_str.split("/")[2]
host = "https://nofun.herokuapp.com/"
author_id = host + "author/" + author_uuid
item = Likes.objects.filter(author=author_id).values()
return Response(item)
# =====================================================================================================================================
# Inbox
# =====================================================================================================================================
class InboxViewSet(viewsets.ModelViewSet):
serializer_class = InboxSerializer
def send_into_inbox(self, request, *args, **kwargs):
request_str = str(request)
author_uuid = request_str.split("/")[2]
host = "https://nofun.herokuapp.com/"
author_id = host + "author/" + author_uuid
request_body_data = request.data
Inbox.objects.create(type=request.data['type'] ,author=author_id, items=request_body_data)
return Response({
"type": "inbox",
"author": author_id,
"items": request_body_data
})
def all_info_list(self, request, *args, **kwargs):
request_str = str(request)
author_uuid = request_str.split("/")[2]
post_uuid = request_str.split("/")[4]
host = "https://nofun.herokuapp.com/"
author_id = host + "author/" + author_uuid
post_id = author_id + "/posts/" + post_uuid
is_comments = False
if '/comments/' in request_str:
is_comments = True
comment_uuid = request_str.split("/")[6]
comment_id = post_id + "/comments/" + comment_uuid
all_info_list = []
# add friends requests info
request_list = []
if FriendRequest.objects.filter(object=author_id, status="R").exists():
request_list = FriendRequest.objects.filter(
object=author_id, status="R").values()
# TODO get likes, comment, posts info from inbox
item_list = Inbox.objects.filter(author=author_id).values()
# return all info with chain(queryset1, queryset2, ...)
return Response({
'type': 'Inbox',
'author': author_id,
'items': chain(request_list, item_list)
})
def current_user_requests(self, request, *args, **kwargs):
# the requests you received
request_str = str(request)
author_uuid = request_str.split("/")[2]
post_uuid = request_str.split("/")[4]
host = "https://nofun.herokuapp.com/"
author_id = host + "author/" + author_uuid
post_id = author_id + "/posts/" + post_uuid
is_comments = False
if '/comments/' in request_str:
is_comments = True
comment_uuid = request_str.split("/")[6]
comment_id = post_id + "/comments/" + comment_uuid
# add friends requests info
request_list = None
if FriendRequest.objects.filter(object=author_id, status="R").exists():
request_list = FriendRequest.objects.filter(
object=author_id, status="R").values()
# return request list
return Response({
'type': 'Inbox',
'author': author_id,
'items': request_list
})
def clear(self, request, *args, **kwargs):
# clear the inbox database and decline all the requests
request_str = str(request)
author_uuid = request_str.split("/")[2]
host = "https://nofun.herokuapp.com/"
author_id = host + "author/" + author_uuid
Inbox.objects.filter(author=author_id).delete()
FriendRequest.objects.filter(
object=author_id, status='R').update(status='D')
return Response('Successfully clear the inbox.')
# =====================================================================================================================================
# =====================================================================================================================================
| UAACC/404-project | backend/api/views.py | views.py | py | 43,464 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "models.Node.objects.all",
"line_number": 27,
"usage_type": "call"
},
... |
29161122794 | import pyautogui
import time
#-Fail Safe Activated-#
pyautogui.FAILSAFE = True
currentMouseX, currentMouseY = pyautogui.position()
print("X Cordinate is: ", currentMouseX)
print("Y Cordinate is: ", currentMouseY)
#-Set your Y-start point here
yStartPoint = 566
#---INPUT HOW MANY OF YOUR ODERS HERE---#
HowMany = 130
#--------------------------------------#
def autoBot():
t1 = time.time()
i=0
global yStartPoint
while i<HowMany:
try:
pyautogui.moveTo(904,yStartPoint,duration=2) # Move to title
pyautogui.click(904,yStartPoint,1,0.1,'left') # Click at title
pyautogui.moveTo(1471,155,duration=2) # Move at the approve button
time.sleep(1.5) #Wait seconds
pyautogui.click(1471,155,1,0.1,'left') #click at the approve button
pyautogui.moveTo(905,519,duration=1) #Move to the confimed button
pyautogui.click(905,519,1,0.1,'left') #click at the confirmed button
time.sleep(1.5) #wait seconds
print(f'The order no.{i+1} has been approved.')
#yStartPoint=yStartPoint -24
i+=1
except BaseException:
print('Sequence suspended ')
break
t2 = time.time()
print(f'Done, Usage time: {(t2-t1):.2f}s.')
autoBot()
| CuiMoo/POSCO_PC | AutoHR/AutoHr.py | AutoHr.py | py | 1,500 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyautogui.FAILSAFE",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pyautogui.position",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pyautogui.moveTo",... |
23112442511 | import pandas as pd
import numpy as np
from multiprocessing import cpu_count, Pool
cores = cpu_count()
def parallelize(df, func):
data_split = np.array_split(df, cores)
pool = Pool(cores)
data = pd.concat(pool.map(func, data_split))
pool.close()
pool.join()
return data
if __name__ == '__main__':
print(cores)
| minlik/TextSummarization | utils/multi_proc_utils.py | multi_proc_utils.py | py | 343 | python | en | code | 16 | github-code | 1 | [
{
"api_name": "multiprocessing.cpu_count",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.array_split",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas... |
9707196048 | from botocore.exceptions import ClientError
import services
from .service import Service
from services.response import ServicesApiResponse
from typing import Dict
from boto3.dynamodb.conditions import Key
from services import const
import services.utils as utils
class DynamoDbService(Service):
"""
This class provides higher-level integration with DynamoDB to facilitate common operations.
"""
def __init__(self, service_name: str = 'dynamodb', region: str = const.default_region, aws_service_key: str = const.AWS_ACCESS_KEY_ID,
aws_secret_key: str = const.AWS_SECRET_ACCESS_KEY):
"""
Initializes the Service.
:param region: The region in which the aws service resides.
:param aws_service_key: Specifies the AWS access key used as part of the credentials to authenticate the user.
:param aws_secret_key: credentials to authenticate the AWS user.
manager.
"""
super().__init__(service_name, region, aws_service_key, aws_secret_key)
def scan_table(self, table_name: str, filter_name: str, filter_value: str) -> ServicesApiResponse:
"""
Scan DynamoDB table.
:param table_name: The DynamoDB table in question.
:param filter_name: Filter Expression Name.
:param filter_value: Filter Expression Value.
https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GettingStarted.Python.04.html
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/customizations/dynamodb.html#boto3.dynamodb.conditions.Key
:return: A ServicesApiResponse containing the following attributes:
items - A list of all the matched items from DynamoDB table in Dict format. If
there is no match then this value will contain an empty list.
"""
ddb_table = self.service_resource.Table(table_name)
if not self.is_ready():
response = Service.build_unavailable_response(**{'item': None})
else:
try:
if table_name is None:
item_from_table = None
else:
filtering_exp = Key(filter_name).eq(filter_value)
item_from_table = \
ddb_table.scan(FilterExpression=filtering_exp)['Items']
response = \
Service.build_ok_response(**{'items': item_from_table})
except ClientError as e:
response = Service.build_error_response(e)
return response
def put_item_in_table(self, table_name: str, item_data: Dict) -> ServicesApiResponse:
"""
Inserts items in Dynamodb tables
:param table_name: The DynamoDB table in question.
:param item_data: data in the form of dictionary to be inserted in table
:return: A ServicesApiResponse containing the following attributes:
Inserts the values from data provided as input in table.
Returns exception if data already exists in table
"""
ddb_table = self.service_resource.Table(table_name)
if not self.is_ready():
response = Service.build_unavailable_response(**{'tables': None})
else:
try:
if table_name is None:
item_in_table = None
else:
item_in_table = ddb_table.put_item(Item=item_data, Expected={'id': {'Exists': False}})
response = Service.build_ok_response(**{'item': item_in_table})
except ClientError as e:
response = Service.build_error_response(e)
if e.response['Error']['Code'] == 'ConditionalCheckFailedException':
services.logger.info("Entry already exists")
else:
services.logger.info(e.response['Error']['Message'])
return response
def get_item_from_table(self, table_name: str, search_key: Dict) -> ServicesApiResponse:
"""
Gets item values for given dynamodb table based on primary key and sort key.
:param table_name: The DynamoDB table in question.
:param search_key: dict representing the primary key of the item to retrieve.
:return: A ServicesApiResponse containing the following attributes:
tables - Returns a Dict in key value pair for given search_key value.
"""
ddb_table = self.service_resource.Table(table_name)
search_item = None
if not self.is_ready():
response = Service.build_unavailable_response(**{'tables': None, 'item': None})
else:
try:
if table_name is None:
search_item = None
else:
item_from_table = ddb_table.get_item(Key=search_key)
if 'Item' in item_from_table:
search_item = item_from_table['Item']
response = Service.build_ok_response(**{'item': search_item})
except ClientError as e:
response = Service.build_error_response(e)
return response
def update_item(self, table_name: str, key: Dict, update_values: dict) -> ServicesApiResponse:
"""
Update an existing attributes or adds a new attributes to the table if it does not already exist.
:param table_name: The DynamoDB table in question.
:param key: dict representing the primary key of the item to retrieve.
:param update_values: The values to be updated in the mentioned table.
:return: A ServicesApiResponse containing the following attributes:
:tables - Returns a updated attributes for the mentioned key.
"""
ddb_table = self.service_resource.Table(table_name)
updated_items = None
if not self.is_ready():
response = Service.build_unavailable_response(**{'tables': None, 'item': None})
else:
try:
if table_name is None:
updated_items = None
else:
update_expression, expression_value = utils.generate_expression(update_values)
item_from_table = ddb_table.update_item(Key=key, UpdateExpression=update_expression,
ExpressionAttributeValues=expression_value, ReturnValues="UPDATED_NEW")
if 'Attributes' in item_from_table:
updated_items = item_from_table['Attributes']
response = Service.build_ok_response(**{'item': updated_items})
except ClientError as e:
response = Service.build_error_response(e)
return response
| CianGrimnir/JourneySharing-Backend | services/dynamodb.py | dynamodb.py | py | 6,736 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "service.Service",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "services.const.default_region",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "services.const",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "ser... |
72374240994 | #!/usr/bin/python3
import asyncio
import random
import time
import threading
from quart import Quart, request, jsonify
from servo import Servo
from wheel import Wheel
from broadcast import VideoBroadcastThread
###################################################
# start a thread to broadcast video only
start_video = False
def broadcast_video():
video_loop = asyncio.new_event_loop()
video_loop.create_task(broadcast(video_loop))
video_loop.run_forever()
if start_video:
threading.Thread(target=broadcast_video).start()
###################################################
app = Quart(__name__)
svo = Servo()
drv = Wheel()
video_broadcast = None
@app.route('/', methods=['GET'])
async def index():
svo.angle(0, random.randint(0, 180))
await asyncio.sleep(0)
return ('', 200)
@app.route('/direction', methods=['POST'])
async def direction():
content = await request.get_json()
if content is not None:
seconds = float(content['duration'])
direction = int(content['direction'])
throttle = int(content['throttle'])
drv.drive(direction, throttle) # start driving
if direction != 0:
await asyncio.sleep(seconds) # driving time
return ('', 200)
@app.route('/camera', methods=['POST'])
async def camera():
content = await request.get_json()
if content is not None:
angle = int(content['angle'])
svo.angle(0, angle)
return ('', 200)
@app.route('/broadcast', methods=['POST'])
async def broadcast():
global video_broadcast
content = await request.get_json()
if content is not None:
action = content['action']
if action == 'start':
if video_broadcast is None:
video_broadcast = VideoBroadcastThread()
video_broadcast.start()
elif action == 'stop':
print('stop video')
if video_broadcast is not None:
video_broadcast.stop()
print('exception')
#video_broadcast.join()
video_broadcast = None
return ('', 200)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| home9464/selfdrivingcar | rest_main.py | rest_main.py | py | 2,172 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "asyncio.new_event_loop",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "quart.Quart",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "servo.Servo",
... |
73184185314 | """
Defines User resource's endpoints.
"""
from application import api, db, app
from application.models import User, Student, Course, BadSignature, TeamProjectGrade, StudentMilestoneGrade, StudentQuizGrade, Submission
from flask.ext.restful import Resource, abort, marshal, marshal_with
from fields import user_fields, course_fields, grades_fields, team_project_grade_fields, student_milestone_grade_fields, student_quiz_grade_fields, submission_fields
from parsers import user_parser
from decorators import login_required
from flask import g, request
from flanker.addresslib import address
import datetime
class UsersResource(Resource):
"""User collection."""
def post(self):
"""
Creates a new user.
Decides if teacher based on email host name.
"""
arguments = user_parser.parse_args()
guc_id = arguments['guc_id']
password = arguments['password']
email = arguments['email']
name = arguments['name']
parsed_email = address.parse(email)
if parsed_email is None or not parsed_email.hostname.endswith('guc.edu.eg'):
abort(400)
if User.objects(email__iexact=email).count() != 0:
abort(422) # Duplicate found
if parsed_email.hostname.startswith('student'):
user = Student(guc_id=guc_id, email=email, name=name)
else:
user = User(email=email, name=name)
user.password = password
user.active = False
user.save()
if app.config['ENABLE_EMAIL_ACTIVATION']:
user.send_activation_mail()
return marshal(user.to_dict(), user_fields), 201
@marshal_with(user_fields)
@login_required
def get(self):
return [user.to_dict() for user in User.objects]
class UserResource(Resource):
"""Singular resource."""
method_decorators = [login_required]
@marshal_with(user_fields)
def put(self, id):
"""
Updates the fields of the resource.
email updates are not allowed, but will not trigger a fail. Instead
they are silently ignored.
"""
if str(g.user.id) == id:
arguments = user_parser.parse_args()
if "guc_id" in arguments and arguments['guc_id'] != '' and not isinstance(g.user, Student):
abort(400)
args = {
"set__{0}".format(key): val for key, val in arguments.items()
if val is not None and val != '' and key in ['name', 'guc_id']
}
if len(args) > 0:
g.user.update(**args)
if arguments['password'] is not None and arguments['password'] != '':
g.user.password = arguments['password']
g.user.save()
g.user.reload()
return g.user.to_dict()
else:
abort(403)
@marshal_with(user_fields)
def get(self, id):
"""
Returns a single user.
While a login is required, No special user privileges are given to
specific users. As there is no private data in the profiles that isn't
shared anyway amongst TAs and Students.
"""
return User.objects.get_or_404(id=id).to_dict()
class UserPassReset(Resource):
def get(self):
"""
sends new password.
"""
token = request.args.get('token')
if token is not None:
try:
user = User.verify_pass_reset_token(token)
user.reset_pass()
return {}, 204
except (BadSignature):
abort(404, message="Bad Token")
else:
abort(400, message="Missing activation token.")
def post(self):
"""
Sends reset email.
"""
json = request.get_json()
if json and json['email']:
user = User.objects.get_or_404(email__iexact=json['email'])
if (user.reset_sent_at is None
or user.reset_sent_at < datetime.datetime.utcnow() - datetime.timedelta(hours=12)):
user.send_password_reset_mail()
return {}, 204
else:
abort(412, message='Stay calm and check your spam folder.')
else:
abort(400, message="Missing email field.")
class UserActivation(Resource):
def get(self):
"""
Attempts to activate user.
"""
token = request.args.get('token')
if token is not None:
try:
user = User.verify_activation_token(token)
user.active = True
user.save()
return {}, 204
except (BadSignature):
abort(404, message="Bad Token")
else:
abort(400, message="Missing activation token.")
def post(self):
"""
Resends activation email.
"""
json = request.get_json(silent=True)
if json and 'email' in json:
user = User.objects.get_or_404(email__iexact=json['email'])
if user.active:
abort(422, message="Account is already active.")
elif user.activation_sent_at is None:
user.send_activation_mail()
return {}, 204
elif (user.activation_sent_at <
datetime.datetime.utcnow() - datetime.timedelta(hours=1)):
user.send_activation_mail()
return {}, 204
else:
abort(412, message='Stay calm and check your spam folder.')
else:
abort(400, message="Missing email field")
class UserDashboard(Resource):
method_decorators = [login_required]
@marshal_with(course_fields)
def get(self):
if isinstance(g.user, Student):
courses = Course.objects(students=g.user)
else:
courses = Course.objects(teachers=g.user)
return [course.to_dict() for course in courses]
class UserSubmissions(Resource):
method_decorators = [login_required]
@marshal_with(submission_fields)
def get(self, id):
"""
Lists all grades related to the user.
"""
user = User.objects.get_or_404(id=id)
return [sub.to_dict() for sub
in Submission.objects(submitter=user).order_by('-created_at')]
class UserGrades(Resource):
method_decorators = [login_required]
@marshal_with(grades_fields)
def get(self):
"""
Lists all grades related to the user.
"""
if isinstance(g.user, Student):
team_grades = [
grade.to_dict() for grade
in TeamProjectGrade.objects(team_id=g.user.team_id)]
quiz_grades = [
grade.to_dict() for grade
in StudentQuizGrade.objects(student=g.user)]
milestone_grades = [
grade.to_dict() for grade
in StudentMilestoneGrade.objects(student=g.user)]
return {
"team_grades": team_grades,
"quiz_grades": quiz_grades,
"milestone_grades": milestone_grades,
}
else:
abort(403, message="Must be a student to view grades")
class UserTeamProjectGrades(Resource):
method_decorators = [login_required]
@marshal_with(team_project_grade_fields)
def get(self):
"""
Lists all grades related to the user.
"""
if isinstance(g.user, Student):
team_grades = [
grade.to_dict() for grade
in TeamProjectGrade.objects(team_id=g.user.team_id)]
return team_grades
else:
abort(403, message="Must be a student to view grades")
class UserMilestoneGrades(Resource):
method_decorators = [login_required]
@marshal_with(student_milestone_grade_fields)
def get(self):
"""
Lists all grades related to the user.
"""
if isinstance(g.user, Student):
milestone_grades = [
grade.to_dict() for grade
in StudentMilestoneGrade.objects(student=g.user)]
return milestone_grades
else:
abort(403, message="Must be a student to view grades")
class UserQuizGrades(Resource):
method_decorators = [login_required]
@marshal_with(student_quiz_grade_fields)
def get(self):
"""
Lists all grades related to the user.
"""
if isinstance(g.user, Student):
quiz_grades = [
grade.to_dict() for grade
in StudentQuizGrade.objects(student=g.user)]
return quiz_grades
else:
abort(403, message="Must be a student to view grades")
api.add_resource(UsersResource, '/users', endpoint='users_ep')
api.add_resource(UserResource, '/user/<string:id>', endpoint='user_ep')
api.add_resource(UserSubmissions, '/user/<string:id>/submissions', endpoint='user_submissions_ep')
api.add_resource(UserGrades, '/user/grades', endpoint='user_grades_ep')
api.add_resource(UserTeamProjectGrades, '/user/team_grades', endpoint='user_team_grades_ep')
api.add_resource(UserQuizGrades, '/user/quiz_grades', endpoint='user_quiz_grades_ep')
api.add_resource(UserMilestoneGrades, '/user/milestone_grades', endpoint='user_milestone_grades_ep')
api.add_resource(UserActivation, '/activate', endpoint='activation_ep')
api.add_resource(UserDashboard, '/user/dashboard', endpoint='dashboard')
api.add_resource(UserPassReset, '/user/reset', endpoint='pass_reset_ep')
| amrdraz/java-project-runner | application/resources/user.py | user.py | py | 9,566 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.ext.restful.Resource",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "parsers.user_parser.parse_args",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "parsers.user_parser",
"line_number": 24,
"usage_type": "name"
},
{
"api... |
6651127391 | import os
import json
import zipfile
def zip_files_and_folders(file_paths, zip_name):
with zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED) as zipf:
for file_path in file_paths:
if os.path.isdir(file_path):
for root, dirs, files in os.walk(file_path):
for file in files:
zipf.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), os.path.dirname(file_path)))
else:
zipf.write(file_path, os.path.basename(file_path))
def list_files_and_subdirectories(directory, output_dict):
for root, dirs, files in os.walk(directory):
for file in files:
file_path = os.path.relpath(os.path.join(root, file), directory)
if not file_path.endswith('png') and not file_path.endswith('gif'):
output_dict["additionFile"].append('img/' + file_path.replace("\\", "/"))
else:
output_dict["imgFileList"].append('img/' + file_path.replace("\\", "/"))
def generate_files_with_text(folder_path, file_list):
for filename in file_list:
file_path = os.path.join(folder_path, filename)
with open(file_path, 'w', encoding='utf-8') as file:
file.write('[\n]')
if not os.path.exists('clothes'):
type_list = ['face.json', 'feet.json', 'genitals.json', 'hands.json', 'head.json',
'legs.json', 'neck.json', 'upper.json', 'lower.json', 'under_upper.json',
'under_lower.json', 'over_upper.json', 'over_lower.json', 'over_head.json']
os.makedirs('clothes', exist_ok=True)
generate_files_with_text('clothes', type_list)
os.makedirs('img', exist_ok=True)
output_dict = {}
output_dict['name'] = input('请输入模组名称:')
output_dict['version'] = input('请输入类似于1.0.0的模组版本号:')
print(f'模组生成中请稍等...')
output_dict['styleFileList'] = []
output_dict['scriptFileList'] = []
output_dict['tweeFileList'] = []
output_dict['additionFile'] = []
output_dict['imgFileList'] = []
list_files_and_subdirectories('img', output_dict)
output_dict['addonPlugin'] = [
{
"modName": "ModdedClothesAddon",
"addonName": "ModdedClothesAddon",
"modVersion": "^1.1.0",
"params": {
"clothes": [
{
"key": "face",
"filePath": "clothes/face.json"
},
{
"key": "feet",
"filePath": "clothes/feet.json"
},
{
"key": "genitals",
"filePath": "clothes/genitals.json"
},
{
"key": "hands",
"filePath": "clothes/hands.json"
},
{
"key": "head",
"filePath": "clothes/head.json"
},
{
"key": "legs",
"filePath": "clothes/legs.json"
},
{
"key": "neck",
"filePath": "clothes/neck.json"
},
{
"key": "upper",
"filePath": "clothes/upper.json"
},
{
"key": "lower",
"filePath": "clothes/lower.json"
},
{
"key": "under_upper",
"filePath": "clothes/under_upper.json"
},
{
"key": "under_lower",
"filePath": "clothes/under_lower.json"
},
{
"key": "over_upper",
"filePath": "clothes/over_upper.json"
},
{
"key": "over_lower",
"filePath": "clothes/over_lower.json"
},
{
"key": "over_head",
"filePath": "clothes/over_head.json"
}
]
}
},
{
"modName": "ModLoader DoL ImageLoaderHook",
"addonName": "ImageLoaderAddon",
"modVersion": "^2.3.0",
"params": [
]
}
]
output_dict['dependenceInfo'] = [
{
"modName": "ModdedClothesAddon",
"version": "^1.1.0"
},
{
"modName": "ModLoader DoL ImageLoaderHook",
"version": "^2.3.0"
}
]
# 将内容输出到文本文件
with open('boot.json', 'w', encoding='utf-8') as file:
json.dump(output_dict, file, indent=2, ensure_ascii=False)
# 要压缩的文件和文件夹路径列表
file_paths = ['img', 'boot.json', "clothes"]
# 压缩后的文件名
zip_name = output_dict['name'] + '.zip'
zip_files_and_folders(file_paths, zip_name)
os.remove('boot.json')
print(f'模组生成完成: {zip_name}')
| cphxj123/Dol-BJX-Mods | 自制衣服模组生成器/dol衣服美化模组自动生成器.py | dol衣服美化模组自动生成器.py | py | 4,526 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "zipfile.ZipFile",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "zipfile.ZIP_DEFLATED",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"l... |
32917234276 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.animation as ani
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
def NACA4Camber_line(x, max_camber, pos_camber):
""" Function to generate camber line for NACA 4 digit serie airfoil
using explicit function of chord wise station.
"""
x0 = x[np.where(x < pos_camber)];
x1 = x[np.where(x >= pos_camber)];
x_f = max_camber/pos_camber**2*(2*pos_camber*x0 - x0**2);
x_b = max_camber/(1 - pos_camber)**2*(1 - 2*pos_camber + 2*pos_camber*x1 - x1**2);
z = np.concatenate((x_f, x_b));
return z;
def Generic_single_1D_plot(x, y, xlabel, ylabel, save_plots, name, line_label = None, linewidth = 0.75, ax = None, marker = '-x', marker_size = 5, alpha = 1, xscale = 'linear', yscale = 'linear'):
if ax == None: fig = plt.figure(); ax = fig.add_subplot(111);
ax.plot(x, y, marker, label = line_label, mfc = 'none', linewidth = linewidth, markersize = marker_size, alpha = alpha);
ax.set_xscale(xscale);
ax.set_yscale(yscale);
ax.grid(True);
ax.minorticks_on();
ax.grid(b = True, which = 'minor', color = '#999999', linestyle = '-', alpha = 0.2);
ax.set_xlabel('%s'%(xlabel), fontsize = 13);
ax.set_ylabel('%s'%(ylabel), fontsize = 13);
if line_label != None:
plt.legend(loc = 'best');
if (save_plots):
plt.savefig('./Figures/{}.png'.format(name), dpi = 300, bbox_inches = 'tight');
return ax;
def Animate_Solution(Solution, save_anim):
idx = 0;
fig = plt.figure(figsize = (12, 8));
ax = fig.add_subplot(111);
im, quv, line, time = Solution.Plot_Flow_Field(idx, fig = fig, ax = ax);
anim = ani.FuncAnimation(fig, Solution.Animate_flow_field, Solution.Ndt, interval = 50, fargs = (im, quv, line, time));
if save_anim:
anim.save('./Flow_field.gif');
plt.show();
return 0;
def Genetrate_Steady_Plots(Solution, alpha_qury, save_plots, flap = False, delta = 0):
alp_idxs = np.where(np.in1d(np.degrees(Solution.alpha_steady), alpha_qury))[0];
for idx in alp_idxs:
Solution.Plot_Flow_Field(idx, unsteady = False, save_name = '%s_Flow_Field_steady_aoa_%0.1f_delta_%0.2f'%(Solution.mesh.airfoil, np.degrees(Solution.alpha_steady[idx]), np.degrees(delta)), save_plot = save_plots);
Solution.Plot_Flow_Field(idx, unsteady = False, plot_pressure = True, save_name = '%s_Flow_Field_steady_pressure_aoa_%0.1f_delta_%0.2f'%(Solution.mesh.airfoil, np.degrees(Solution.alpha_steady[idx]), np.degrees(delta)), save_plot = save_plots);
if flap == False:
polar_data = np.genfromtxt('NACA%s.dat'%(Solution.mesh.airfoil));
exp_data = np.genfromtxt('NACA0012_Exp.csv', delimiter = ',');
ax = Generic_single_1D_plot(np.degrees(Solution.alpha_steady), Solution.lift_steady, '', '', False, '', marker = '-o', line_label = 'VPM Results', marker_size = 5);
Generic_single_1D_plot(np.degrees(Solution.alpha_steady), 2*np.pi*Solution.alpha_steady, r'$\alpha \: [^\circ]$', r'$C_l$ [-]', save_plots, '%s_Cl_vs_alpha_steady_lone'%(Solution.mesh.airfoil), marker = '-x', line_label = 'Thin airfoil theory', ax = ax, marker_size = 5);
ax = Generic_single_1D_plot(np.degrees(Solution.alpha_steady), Solution.lift_steady, '', '', False, '', marker = '-o', line_label = 'VPM Results', marker_size = 5);
Generic_single_1D_plot(polar_data[:, 0], polar_data[:, 1], '', '', False, '', ax = ax, marker = '-*', line_label = 'XFoil Results', marker_size = 2.5);
Generic_single_1D_plot(exp_data[:, 0], exp_data[:, 1], r'$\alpha \: [^\circ]$', r'$C_l$ [-]', save_plots, '%s_Cl_vs_alpha_steady'%(Solution.mesh.airfoil), ax = ax, marker = '-^', line_label = 'Experimental Results', marker_size = 5);
return 0;
def Generate_Unsteady_Plots(Solution, k, save_plots):
Solution.Plot_Flow_Field(Solution.Ndt - 1, unsteady = True, save_name = '%s_Flow_Field_unsteady_k_%0.3f'%(Solution.mesh.airfoil, k), save_plot = save_plots);
ax = Generic_single_1D_plot(Solution.alpha*180/np.pi, Solution.lift, '', '', False, '', alpha = 0.75, marker = '--r', marker_size = 4, line_label = 'Unsteady solution', linewidth = 0.85);
Generic_single_1D_plot(Solution.alpha*180/np.pi, 2*np.pi*(Solution.alpha + Solution.omega/2), '', '', False, '', alpha = 0.75, ax = ax, marker = '--g', marker_size = 5, line_label = 'Quasi-Steady solution', linewidth = 0.85);
Generic_single_1D_plot(Solution.alpha_steady[2:-2]*180/np.pi, Solution.lift_steady[2:-2], '', '', False, '', ax = ax, marker = '-k^', marker_size = 5, line_label = 'Steady solution', linewidth = 0.85);
Generic_single_1D_plot(Solution.alpha[0]*180/np.pi, 2*np.pi*(Solution.alpha + Solution.omega/2)[0], '', '', False, '', ax = ax, marker = '^g', marker_size = 8, line_label = 'Start Quasi-Steady');
Generic_single_1D_plot(Solution.alpha[-1]*180/np.pi, 2*np.pi*(Solution.alpha + Solution.omega/2)[-1], '', '', False, '', ax = ax, marker = 'og', marker_size = 8, line_label = 'End Quasi-Steady');
Generic_single_1D_plot(Solution.alpha[0]*180/np.pi, Solution.lift[0], '', '', False, '', marker = '^r', marker_size = 8, ax = ax, line_label = 'Start Unsteady');
Generic_single_1D_plot(Solution.alpha[-1]*180/np.pi, Solution.lift[-1], r'$\alpha \: [^{\circ}]$', r'$C_l$ [-]', save_plots, '%s_Cl_vs_alpha_t_k_%0.3f'%(Solution.mesh.airfoil, k), marker = 'or', marker_size = 8, ax = ax, line_label = 'End Unsteady');
ax = Generic_single_1D_plot(Solution.t, Solution.theta, '', '', False, '', line_label = r'$\theta(t)$', marker_size = 3);
Generic_single_1D_plot(Solution.t, Solution.omega, r'$t$ [s]', r'$\theta$ [rad], $\Omega$ [rad/s]', save_plots, '%s_Omega_theta_vs_t_k_%0.3f'%(Solution.mesh.airfoil, k), ax = ax, marker = '-o', marker_size = 3.5, line_label = r'$\Omega(t)$');
ax = Generic_single_1D_plot(Solution.t, Solution.lift, r'$t$ [s]', r'$C_l$ [-]', save_plots, '%s_Cl_vs_t_k_%0.3f'%(Solution.mesh.airfoil, k), marker_size = 3.5);
ax = Generic_single_1D_plot(Solution.t, np.degrees(Solution.alpha), '', '', False, '', line_label = r'$\alpha_{s}$', marker_size = 1);
Generic_single_1D_plot(Solution.t, np.degrees(Solution.alpha + Solution.omega/2), '', '', False, '', ax = ax, line_label = r'$\alpha_{qs}$', marker = '-o', marker_size = 1);
Generic_single_1D_plot(Solution.t, np.degrees(Solution.alpha_unsteady), r'$t$ [s]', r'$\alpha \: [^{\circ}]$', save_plots, 'Alpha_sd_unsd_k_%0.3f'%(k), ax = ax, line_label = r'$\alpha_{eq}$', marker = '-^', marker_size = 1);
return 0;
def Generate_Gust_Plots(Solution, gust_amp, gust_delay, save_plots):
ax = Generic_single_1D_plot(Solution.t, Solution.lift, '', '', False, '', marker_size = 3.5, line_label = 'Numerical Solution');
Generic_single_1D_plot(Solution.t, (Solution.t > gust_delay)*2*np.pi*np.sin(gust_amp)*(1 - 0.5*(np.exp(-0.13*2*(Solution.t - gust_delay)) + np.exp(-2*(Solution.t - gust_delay)))), r'$t$ [s]', r'$C_l$ [-]', save_plots, '%s_gust_responce_amp_%0.3f_delay_%0.3f'%(Solution.mesh.airfoil, gust_amp, gust_delay), ax = ax, marker = '-o', marker_size = 3.5, line_label = 'Analytical Solution');
t_idx = np.where(Solution.t > gust_delay)[0];
Solution.Plot_Flow_Field(t_idx[0] + 2, unsteady = True, save_name = '%s_Flow_Field_gust_amp_%0.3f_delay_%0.3f'%(Solution.mesh.airfoil, gust_amp, gust_delay), save_plot = save_plots);
Solution.Plot_Flow_Field(1, unsteady = True, save_name = '%s_Flow_Field_gust_t0_amp_%0.3f_delay_%0.3f'%(Solution.mesh.airfoil, gust_amp, gust_delay), save_plot = save_plots);
return 0;
| MazenZohiry/Unsteady-Vortex-Panel-Method | Program/Generic_Functions.py | Generic_Functions.py | py | 7,469 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.rcParams",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.where",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.whe... |
37964376441 | import os
from pathlib import Path
def return_config(factor, mail, private_plugin, uid, tid):
base_path = Path(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), "static/upload",
str(uid), str(tid)).as_posix()
initial_path = base_path + "/initial/"
predict_out_path = base_path + "/predictout/"
out_path = base_path + "/output/"
length_ratio = factor
area_ratio = factor * factor
mail = mail
if mail == "":
mail = "2020801253@stu.njau.edu.cn"
os.mkdir(predict_out_path)
os.mkdir(out_path)
data = {
'base_path': base_path,
'initial_path': initial_path,
'predict_out_path': predict_out_path,
'out_path': out_path,
'length_ratio': length_ratio,
'area_ratio': area_ratio,
'mail': mail,
'uid': str(uid),
'tid': str(tid),
'pid': str(private_plugin)
}
return data
| Eric-1986/faCRSA | facrsa_code/library/analysis/config.py | config.py | py | 947 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_nu... |
39286320369 | """
Modified from https://github.com/pytorch/vision.git
"""
import math
import torch.nn as nn
import torch.nn.init as init
# fmt: off
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
# fmt: on
class VGG(nn.Module):
"""
VGG model
"""
def __init__(self, features, num_classes):
super(VGG, self).__init__()
self.features = features
self.num_classes = num_classes
self.penultimate_active = False
if self.num_classes == 1000:
logger.warning(
"This open source implementation is only suitable for small datasets like CIFAR. For Imagenet we recommend to use Resnet based models"
)
self.classifier_penultimate = nn.Sequential(
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
)
self.final_classifier = nn.Linear(512, self.num_classes)
# Describe model with source code link
self.description = "Open Source Implementation of VGG16 adapted from chengyangfu/pytorch-vgg-cifar10 repository"
self.source_link = (
"https://github.com/chengyangfu/pytorch-vgg-cifar10/blob/master/vgg.py"
)
# Initialize weights
## This is Kaiming He initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
# x = self.classifier(x)
z = self.classifier_penultimate(x)
x = self.final_classifier(z)
if self.penultimate_active:
return z, x
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
# fmt: off
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
512, 512, 512, 512, 'M'],
}
# fmt:on
def vgg11():
"""VGG 11-layer model (configuration "A")"""
return VGG(make_layers(cfg["A"]))
def vgg11_bn():
"""VGG 11-layer model (configuration "A") with batch normalization"""
return VGG(make_layers(cfg["A"], batch_norm=True))
def vgg13():
"""VGG 13-layer model (configuration "B")"""
return VGG(make_layers(cfg["B"]))
def vgg13_bn():
"""VGG 13-layer model (configuration "B") with batch normalization"""
return VGG(make_layers(cfg["B"], batch_norm=True))
def vgg16():
"""VGG 16-layer model (configuration "D")"""
return VGG(make_layers(cfg["D"]))
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
return VGG(make_layers(cfg["D"], batch_norm=True), **kwargs)
def vgg19():
"""VGG 19-layer model (configuration "E")"""
return VGG(make_layers(cfg["E"]))
def vgg19_bn():
"""VGG 19-layer model (configuration 'E') with batch normalization"""
return VGG(make_layers(cfg["E"], batch_norm=True))
| PrateekMunjal/TorchAL | pycls/models/vgg_style/vgg_2.py | vgg_2.py | py | 3,872 | python | en | code | 56 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
27192970669 | import json
import requests
import time
from apscheduler.schedulers.blocking import BlockingScheduler # 引入后台
import os
from utils.file import FileUtil
# from utils.requst import postres
# from detect import run,main,parse_opt
from detect import run,main,parse_opt
SPATH = './testfile/input'
TPATH = './testfile/output'
Source = './testfile/input/test1.mp4'
Weight = './runs/train/exp3/weights/best.pt'
def postres(time,dsc,site,img_name,img_path):
print("准备提交数据")
url = 'http://113.250.49.15:8084/smartPlant/portal/drone/video/droneInfo'
data = {"order_time": time, "dsc": dsc, "site": site}
files = {"files":(img_name, open(img_path, 'rb'), "image/png", {})}
# print(files)
print(data)
print(files)
res = requests.request("POST", url, data=data, files=files)
print(res.text)
# print("请求发送完毕")
def autodetect(srcpath = SPATH, tarpath = TPATH):
futi = FileUtil(srcpath,tarpath)
fname = futi.containfile()
print("fname is ",fname)
if fname!= "":
print('检测到新视频')
print('执行检测算法')
# 调用检测算法
filepath = os.path.join(srcpath,fname)
# 获取检测结果,并打印
res = main(Source = filepath,Weight =Weight)
# {'time': '2022-10-18 13:30:54.342617', 'path': 'runs/tmp/exp10', 'class': 'water_leakage', 'frame': '0'}
if res:
print()
print('检测完成,检测结果为', res)
order_time = res['time']
desc = res['desc']
site = res['site']
img_name = res['img_name']
img_path = os.path.join(res['path'], res['img_name'])
# 发送http请求
# (time, dsc, site, img_name, img_path)
print("准备发送请求")
postres(time=order_time, dsc=desc, site=site, img_name=img_name, img_path=img_path)
print("发送请求成功")
# 清理文件夹
# 检测完成后移动文件
print()
print("检测完成后,开始移动文件")
futi.mvfile()
else:
print('检测完成,检测结果为', res)
print("检测完成后,开始移动文件")
futi.mvfile()
else:
print('没有新视频')
sched = BlockingScheduler()
autodetect()
sched.add_job(autodetect, 'interval', seconds=60, max_instances=1)
sched.start() | HypoQ/waterProject | main.py | main.py | py | 2,449 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.request",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "utils.file.FileUtil",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line... |
31831184436 | from dotenv import load_dotenv
import json
from utils import *
from db import *
import jwt
import time
load_dotenv()
def handle_post(c, request: HttpRequest):
filename_type = request.path[len("/api/files/?file=") :].split("&type=")
filename = filename_type[0]
file_type = filename_type[1]
size = int(request.headers["Content-Length"])
cur = len(request.data)
print(cur, len(request.data), size)
data = request.data
while cur < size:
left = c.recv(1000000)
cur += len(left)
data += left
print(cur, len(left), len(data), size)
print(size, len(data))
print(file_type)
file_meta = create_file(filename, data, file_type, size)
return wrap_response(
request.version,
200,
{
"Content-Type": "application/json",
"Connection": "close",
"Access-Control-Allow-Origin": "https://cnfinal2022.herokuapp.com",
"Access-Control-Allow-Methods": "POST, GET, OPTIONS",
"Access-Control-Request-Headers": "Access-Control-Allow-Headers, Content-Type, X-Requested-With, content-type, Origin, Accept, Access-Control-Request-Method, Access-Control-Request-Headers",
"Access-Control-Allow-Credentials": "true",
},
json.dumps({"id": file_meta}),
)
def handle_get(request: HttpRequest):
file_id = request.path[len("/api/files/") :]
file_type, size, data = get_file(file_id)
print(len(data), size)
if data != None:
return wrap_response(
request.version,
200,
{
"Content-Type": file_type,
"Content-Length": size,
"Connection": "close",
"Access-Control-Allow-Origin": "https://cnfinal2022.herokuapp.com",
"Access-Control-Allow-Methods": "POST, GET, OPTIONS",
"Access-Control-Request-Headers": "Access-Control-Allow-Headers, Content-Type, X-Requested-With, content-type, Origin, Accept, Access-Control-Request-Method, Access-Control-Request-Headers",
"Access-Control-Allow-Credentials": "true",
},
data,
)
else:
return wrap_response(
request.version,
404,
{
"Content-Type": "application/json",
"Connection": "close",
"Access-Control-Allow-Origin": "https://cnfinal2022.herokuapp.com",
"Access-Control-Allow-Methods": "POST, GET, OPTIONS",
"Access-Control-Request-Headers": "Access-Control-Allow-Headers, Content-Type, X-Requested-With, content-type, Origin, Accept, Access-Control-Request-Method, Access-Control-Request-Headers",
"Access-Control-Allow-Credentials": "true",
},
)
| jeff-901/CN2022FallFinal | handle_file.py | handle_file.py | py | 2,816 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 39,
"usage_type": "call"
}
] |
33410335386 | import csv, json
from geojson import Feature, FeatureCollection, Point
features = []
with open('../data2.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader,None)
for row in reader:
# print(row[0])
latitude, longitude = map(float, (row[1], row[0]))
features.append(
Feature(
geometry = Point((longitude, latitude)),
properties = {
'Unique_Squirrel_ID': row[2],
}
)
)
collection = FeatureCollection(features)
with open("../map/static/map/GeoObs3.geojson", "w") as f:
f.write('%s' % collection)
| evo0522/squirrelTracker | utilities/create_geojson.py | create_geojson.py | py | 669 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "csv.reader",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "geojson.Feature",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "geojson.Point",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "geojson.FeatureCollection",
... |
23008791513 | import requests
from autopr.models.rail_objects import PullRequestDescription
import structlog
log = structlog.get_logger()
class PublishService:
def publish(self, pr: PullRequestDescription):
raise NotImplementedError
def update(self, pr: PullRequestDescription):
raise NotImplementedError
class GithubPublishService(PublishService):
def __init__(self, token: str, owner: str, repo_name: str, head_branch: str, base_branch: str):
self.token = token
self.owner = owner
self.repo = repo_name
self.head_branch = head_branch
self.base_branch = base_branch
def _get_headers(self):
return {
'Authorization': f'Bearer {self.token}',
'Accept': 'application/vnd.github+json',
'X-GitHub-Api-Version': '2022-11-28',
}
def publish(self, pr: PullRequestDescription):
existing_pr = self._find_existing_pr()
if existing_pr:
self.update(pr)
else:
self._create_pr(pr)
def _create_pr(self, pr: PullRequestDescription):
url = f'https://api.github.com/repos/{self.owner}/{self.repo}/pulls'
headers = self._get_headers()
data = {
'head': self.head_branch,
'base': self.base_branch,
'title': pr.title,
'body': pr.body,
}
response = requests.post(url, json=data, headers=headers)
if response.status_code == 201:
log.debug('Pull request created successfully', response=response.json())
else:
log.debug('Failed to create pull request', response_text=response.text)
def update(self, pr: PullRequestDescription):
existing_pr = self._find_existing_pr()
if not existing_pr:
log.debug("No existing pull request found to update")
return
url = f'https://api.github.com/repos/{self.owner}/{self.repo}/pulls/{existing_pr["number"]}'
headers = self._get_headers()
data = {
'title': pr.title,
'body': pr.body,
}
response = requests.patch(url, json=data, headers=headers)
if response.status_code == 200:
log.debug('Pull request updated successfully', response=response.json())
else:
log.debug('Failed to update pull request', response_text=response.text)
def _find_existing_pr(self):
url = f'https://api.github.com/repos/{self.owner}/{self.repo}/pulls'
headers = self._get_headers()
params = {'state': 'open', 'head': f'{self.owner}:{self.head_branch}', 'base': self.base_branch}
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
prs = response.json()
if prs:
return prs[0] # Return the first pull request found
else:
log.debug('Failed to get pull requests', response_text=response.text)
return None
| chikib89/AutoPR | autopr/services/publish_service.py | publish_service.py | py | 2,993 | python | en | code | null | github-code | 1 | [
{
"api_name": "structlog.get_logger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "autopr.models.rail_objects.PullRequestDescription",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "autopr.models.rail_objects.PullRequestDescription",
"line_number": 13,
... |
25339418278 | from fastapi import APIRouter, Depends, HTTPException
router = APIRouter(
prefix="/companies",
tags=["Companies"],
responses={
404: {"description": "Company or companies not found"},
403: {"description": "Operation not allowed"}
}
)
fake_companies_db = [
{
"id": 1,
"id_company_group": 1,
"name": "COMPANY A"
},
{
"id": 2,
"id_company_group": 1,
"name": "COMPANY B"
},
{
"id": 3,
"id_company_group": 1,
"name": "COMPANY C"
}
]
@router.get("/")
async def read_companies():
return fake_companies_db
@router.get("/{id}")
async def read_company(id: int):
for company in fake_companies_db:
if company["id"] == id:
return company
return HTTPException(404) | MikelMC96byte/digital-inventory-service | app/routers/companies.py | companies.py | py | 813 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 39,
"usage_type": "call"
}
] |
16017099364 | from django.urls import path
app_label='api'
from .views import *
urlpatterns=[
path('books',BookApiViewAll.as_view(),name='book-list'),
path('books/<slug:slug>',BookApiView.as_view(),name='book-detail'),
path('books/book/create',BookCreateApiView.as_view(),name='book-create'),
path('books/<slug:slug>',BookUpdateApiView.as_view(),name='book-update'),
path('books/<slug:slug>/delete',BookDestroyApiView.as_view(),name='book-delete'),
path('categories',CategoryApiViewAll.as_view(),name='category-list'),
path('categories/<slug:slug>',CategoryApiView.as_view(),name='category-detail'),
path('categories/category/create',CategoryCreateApiView.as_view(),name='category-create'),
path('categories/<slug:slug>',CategoryUpdateApiView.as_view(),name='category-update'),
path('categories/<slug:slug>/delete',CategoryDestroyApiView.as_view(),name='category-delete'),
]
| devGauravTiwari/Library-Python | apibook/urls.py | urls.py | py | 902 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
72887707553 | from django.contrib.auth.models import AbstractUser
from django.core.validators import RegexValidator
from django.db import models
class User(AbstractUser):
POSITIONS = (
('data', 'Дата-Квантум'),
('it', 'IT-Квантум'),
('robot', 'Робо-Квантум'),
('hightech', 'Хайтек'),
('geo', 'Гео-Квантум'),
('shah', 'Шахматы'),
('other', 'Другое'),
('kvant', 'Кванториум'),
)
middle_name = models.CharField(max_length=30, verbose_name="Отчество",
default="", blank=True)
position = models.CharField(max_length=30, choices=POSITIONS,
verbose_name="Должность", default="", blank=True)
phoneRegex = RegexValidator(regex=r"^\+?1?\d{8,15}$")
phone = models.CharField(validators=[phoneRegex], max_length=12, unique=True,
verbose_name="Номер телефона (Через +7)", blank=True)
password = models.CharField(max_length=2000, verbose_name="Пароль",
default="pbkdf2_sha256$390000$jNQoFll3T4NGf109vDxvYX$wzXUUE3zThacoBzb0BE4x89F4N6qtUtpF1z2vYFpg/c=")
PublicKey = models.FileField(verbose_name='Публичный ключ', blank=True)
image = models.ImageField(verbose_name='Фото', upload_to='users/', blank=True)
class Meta:
verbose_name_plural = "Пользователи"
| nastya-mishina/kvantorium-dms | users/models.py | models.py | py | 1,535 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.auth.models.AbstractUser",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 17,
"usage_type": "name"
},
{
... |
73703380194 | import socket
import struct
import matplotlib.pyplot as plt
import threading
import queue
from queue import Queue
bone_map = [
"Hips", # 0
"Spine", # 1
None, # 2
"Chest", # 3
None, # 4
"UpperChest", # 5
None, # 6
"-", # 7
"-", # 8
"Neck", # 9
"Head", # 10
"RightShoulder", # 11
"RightUpperArm", # 12
"RightLowerArm", # 13
"RightHand", # 14
"LeftShoulder", # 15
"LeftUpperArm", # 16
"LeftLowerArm", # 17
"LeftHand", # 18
"RightUpperLeg", # 19
"RightLowerLeg", # 20
"RightFoot", # 21
"RightToes", # 22
"LeftUpperLeg", # 23
"LeftLowerLeg", # 24
"LeftFoot", # 25
"LeftToes" # 26
]
# Add the following imports for plot animation
from matplotlib.animation import FuncAnimation
# Uncomment the following line if using Matplotlib with a non-default backend
# import matplotlib
# matplotlib.use('Qt5Agg')
def is_field(name):
return name.isalpha()
def _deserialize(data, index, length, is_list=False):
result = [] if is_list else {}
end_pos = index + length
while end_pos - index > 8 and is_field(data[index + 4: index + 8]):
size = struct.unpack("@i", data[index: index + 4])[0]
index += 4
field = data[index: index + 4]
index += 4
value, index2 = _deserialize(data, index, size, field in [b"btrs", b"bons"])
index = index2
if is_list:
result.append(value)
else:
result[field.decode()] = value
if len(result) == 0:
body = data[index: index + length]
return body, index + len(body)
else:
return result, index
def _process_packet(message):
data = _deserialize(message, 0, len(message), False)[0]
data["head"]["ftyp"] = data["head"]["ftyp"].decode()
data["head"]["vrsn"] = ord(data["head"]["vrsn"])
data["sndf"]["ipad"] = struct.unpack("@BBBBBBBB", data["sndf"]["ipad"])
data["sndf"]["rcvp"] = struct.unpack("@H", data["sndf"]["rcvp"])[0]
if "skdf" in data:
for item in data["skdf"]["bons"]:
item["bnid"] = struct.unpack("@H", item["bnid"])[0]
item["pbid"] = struct.unpack("@H", item["pbid"])[0]
item["tran"] = struct.unpack("@fffffff", item["tran"])
elif "fram" in data:
data["fram"]["fnum"] = struct.unpack("@I", data["fram"]["fnum"])[0]
data["fram"]["time"] = struct.unpack("@I", data["fram"]["time"])[0]
for item in data["fram"]["btrs"]:
item["bnid"] = struct.unpack("@H", item["bnid"])[0]
item["tran"] = struct.unpack("@fffffff", item["tran"])
return data
class Receiver:
def __init__(self, addr="10.18.80.194", port=12351):
self.addr = addr
self.port = port
self.figure = None
self.axes = None
self.lines = []
self.queue = Queue()
self.stop_event = threading.Event()
def plot_human_frame(self, data):
# Process the human frame data and update the plot
# Modify this function according to your data structure and plotting requirements
# Example:
frame_data = data["fram"]
fnum = frame_data["fnum"]
time = frame_data["time"]
btrs = frame_data["btrs"]
# Create the plot if it doesn't exist
if self.figure is None:
self.figure = plt.figure()
self.axes = self.figure.add_subplot(111)
self.lines = []
for item in btrs:
line, = self.axes.plot([], [], label=f"Bone {item['bnid']}")
self.lines.append(line)
self.axes.legend()
# Update the plot data
for i, item in enumerate(btrs):
bnid = item["bnid"]
tran = item["tran"]
line = self.lines[i]
if not line.get_data():
line.set_data(range(len(tran)), tran)
else:
line.set_ydata(tran)
# Set appropriate plot limits if needed
# You can modify this based on your requirements
self.axes.set_xlim(0, len(tran) - 1)
self.axes.set_ylim(-1.0, 1.0) # Modify the y-axis limits if needed
self.figure.canvas.draw()
def animate(self, frame_data):
# Plot the human frame
self.plot_human_frame(frame_data)
def receive_data(self):
while not self.stop_event.is_set():
try:
# Receive data from the queue
data = self.queue.get(timeout=1)
# Check if the expected keys are present in the data dictionary
if "fram" in data:
self.plot_human_frame(data["fram"])
else:
print("Key 'fram' not found in data:", data)
except Queue.empty: # Update to use Queue.Empty
# Ignore empty queue exceptions and continue
pass
def plot_animation(self):
# Create the plot animation
ani = FuncAnimation(self.figure, self.animate, interval=200)
# Show the plot
plt.show()
def start(self):
# Start the data receiving thread
receive_thread = threading.Thread(target=self.receive_data)
receive_thread.start()
# Create the plot animation
self.figure = plt.figure()
self.axes = self.figure.add_subplot(111)
self.lines = []
def init_animation():
for item in self.lines:
item.set_data([], [])
return self.lines
def update_animation(frame):
self.plot_human_frame(frame)
return self.lines
def frame_generator():
while not self.stop_event.is_set():
yield self.queue.get()
ani = FuncAnimation(
self.figure,
update_animation,
init_func=init_animation,
frames=frame_generator, # Use the frame generator function
interval=200
)
# Show the plot
plt.show()
try:
# Wait for a keyboard interrupt to stop the thread
while True:
pass
except KeyboardInterrupt:
# Set the stop event to terminate the thread
self.stop_event.set()
# Wait for the threads to finish
receive_thread.join()
# Usage
receiver = Receiver()
receiver.start()
| gangadhara691/mocopi_read | mcp_receiver/receiver1.py | receiver1.py | py | 6,416 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "struct.unpack",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_n... |
14771653745 | # Importing the Agent class from the mesa module.
# The above code is importing the random and math modules.
import random
import math
from mesa import Agent
# ----------------------------------------------------------
# Actividad Integradora agents.py
#
# Date: 21-Nov-2022
# Authors:
# Sergio Manuel Gonzalez Vargas - A01745446
# Gilberto André García Gaytán - A01753176
# Fernando Ortiz Saldaña - A01376737
# Ricardo Ramírez Condado - A01379299
# ----------------------------------------------------------
# The Caja class is a subclass of the Agent class. It has a constructor that
# initializes the agent's type, color, next color, not_move, agent_details,
# next_state, picked_up, and moves attributes. It also has a recogida() method
# that returns True if the agent's position is in the list of initial
# boxes, otherwise it returns False. It has an agarrar() method that deletes
# the box from the initial_boxes dictionary and adds it to the picked_boxes
# dictionary. It also changes the agent's next_state to the box's next_state.
# It has a levantada() method that sets the object's picked_up attribute to
# False and sets the next_state attribute to the object's current position.
# It has a mueve() method that sets the
# agent's picked_up attribute to True, increments the moves attribute by
# 1, and sets the next_state attribute to the agent's current position. It
class Caja(Agent):
def __init__(self, unique_id, model):
"""
The function takes in a unique_id and a model, and then sets the type
of the agent to box, the color of the agent to 0, the next color of
the agent to 0,
the not_move variable to False, the
agent_details to None, the next_state to None, the picked_up variable
to False, and the moves
variable to 0.
:param unique_id: The unique identifier for the agent
:param model: The model that the agent is in
"""
super().__init__(unique_id, model)
self.type = 'box'
self.color = 0
self.next_color = 0
self.not_move = False
self.agent_details = None
self.next_state = None
self.picked_up = False
self.moves = 0
def recogida(self):
"""
If the position of the agent is in the list of initial boxes,
then return True, otherwise return False
:return: The position of the agent.
"""
if self.pos in self.model.initial_boxes:
return True
else:
return False
def agarrar(self):
"""
The function agarrar() is called when the agent is in the same
position as a box. The function deletes the box from the
initial_boxes dictionary and adds it to the picked_boxes dictionary.
The function also changes the agent's next_state to the box's
next_state
"""
self.agent_details = self.model.initial_boxes[self.pos]
del self.model.initial_boxes[self.pos]
self.model.picked_boxes[self.agent_details[0]] = \
self.agent_details[-1]
self.next_state = self.agent_details[-1]
self.picked_up = True
def levantada(self):
"""
The function is called when the player picks up the object.
It sets the object's picked_up attribute to False and sets the
next_state attribute to the object's current position.
"""
self.picked_up = False
self.next_state = self.pos
def mueve(self):
"""
The function mueve() is called when the agent is picked up by the
robot. It sets the agent's picked_up attribute to True, increments
the moves attribute by 1, and sets the next_state attribute
to the agent's current position
"""
self.picked_up = True
self.moves += 1
self.next_state = self.model.picked_boxes[
self.agent_details[0]]
def step(self):
"""
If the agent is not moving, it checks if it has an agent_details
object, if it does, it moves, if it doesn't, it checks if it can pick
up an object, if it can, it picks it up, if it can't, it does
nothing
"""
if not self.not_move:
if self.agent_details is None:
if self.recogida():
self.agarrar()
else:
self.levantada()
else:
self.mueve()
if self.next_state in self.model.pallets:
self.picked_up = False
self.not_move = True
else:
self.picked_up = False
self.next_state = self.pos
if self.picked_up:
self.next_color = 3
else:
self.next_color = 0
def advance(self):
"""
The function takes the current state of the agent and the next state
of the agent and moves the agent from the current state to the next
state
"""
self.color = self.next_color
self.model.grid.move_agent(self, self.next_state)
# The class Robot_Agent is a
# subclass of the Agent class. It has a constructor that initializes the
# attributes of the class Robot_Agent. It has a function neighbors() that
# returns a list of lists, each of which contains the type of object in a cell
# and the cell's coordinates. It has a function puede_soltar() that returns the
# position of the pallet if it is possible to drop the pallet, otherwise it
# returns False. It has a function soltar_caja() that is called when the agent
# has a box and is in the same cell as the dropoff point. It has a function
# tarima_cercana() that takes a list of coordinates (neighbor) and returns the
# closest pallet to the last coordinate in the list. It has a function
# mueve_caja() that returns
class Robot_Agent(Agent):
def __init__(self, unique_id, model):
"""
The function __init__ is a constructor that initializes the attributes
of the class RobotAgent :param unique_id: The unique ID of the agent
:param model: The model that the agent is in
"""
super().__init__(unique_id, model)
self.type = 'robotagent'
self.color = 1
self.next_color = None
self.has_box = False
self.next_state = None
self.objective_box = None
self.moves = 0
def neighbors(self):
"""
It returns a list of lists, each of which contains the type of object
in a cell and the cell's coordinates :return: A list of lists.
Each list contains the type of object in the cell, the object itself,
and the position of the cell.
"""
contenido = []
neighbors = self.model.grid.get_neighborhood(
self.pos,
moore=False,
include_center=False)
for neighbor in neighbors:
if neighbor in self.model.pallets:
contenido.append(['pallet', self.model.pallets[neighbor],
neighbor])
else:
content = self.model.grid.get_cell_list_contents(neighbor)
if content:
robot, box = False, False
for object in content:
if object.type == 'robot':
robot = True
elif object.type == 'box':
box = True
if robot and box:
contenido.append(['robot-with-box', neighbor])
elif box:
contenido.append(['box', neighbor])
else:
contenido.append(['robot', neighbor])
else:
contenido.append(['empty', neighbor])
return contenido
def puede_soltar(self, neighbors_content):
"""
If there is a pallet in the neighborhood, and it has less than 5 items
on it, then return the pallet's id. Otherwise, return False
:param neighbors_content: a list of tuples, each tuple is a neighbor
of the agent. The tuple contains the type of the neighbor, the number
of items in the neighbor, and the id of the neighbor :return:
the position of the pallet if it is possible
to drop the pallet, otherwise it returns False.
"""
for neighbor in neighbors_content:
if neighbor[0] == 'pallet':
if neighbor[1] < 5:
return neighbor[-1]
return False
def soltar_caja(self, coordinar):
"""
The function soltar_caja() is called when the agent has a box and is in
the same cell as the dropoff point. The function adds one to the
pallet in the dropoff point, sets the next state to the current state,
sets the agent's has_box attribute to False,
and adds the agent's unique_id to the model's picked_boxes dictionary
:param coordinar: the coordinate of the pallet
"""
self.model.pallets[coordinar] += 1
self.next_state = self.pos
self.has_box = False
self.model.picked_boxes[self.unique_id] = coordinar
def tarima_cercana(self, neighbor):
"""
It takes a list of coordinates (neighbor) and returns the closest
pallet to the last coordinate in
the list :param neighbor: a list of coordinates that represent
the path taken by the agent:return: the closest pallet to the
last position of the neighbor.
"""
x1, y1 = neighbor[-1]
min_distance = float('inf')
tarima_cercana = 0
for key in self.model.pallets:
if self.model.pallets[key] < 5:
distance = math.sqrt(((key[0] - x1)**2) + ((key[1] - y1)**2))
if distance < min_distance:
tarima_cercana = [key, neighbor[-1], distance]
min_distance = distance
if tarima_cercana:
return tarima_cercana
else:
return False
def mueve_caja(self, neighbors_content):
"""
The function returns the position of the closest empty cell to
the agent, if there is one, and if it is not reserved by another agent
:param neighbors_content: list of tuples, each tuple contains
the content of a cell and its position :return:
The next state of the agent.
"""
min_distance = float('inf')
graph = []
for neighbor in neighbors_content:
if neighbor[0] == 'empty' and neighbor[-1] not in \
self.model.reserved_cells:
distance = self.tarima_cercana(neighbor)
if distance:
if distance[-1] < min_distance:
min_distance = distance[-1]
graph = distance
if graph:
self.model.reserved_cells.append(graph[1])
self.next_state = graph[1]
self.model.picked_boxes[self.unique_id] = self.next_state
return graph[1]
else:
self.next_state = self.pos
self.model.picked_boxes[self.unique_id] = self.next_state
return self.pos
def hay_caja(self, neighbors_content):
"""
If there is a box in the neighborhood, then add it to the list of
reserved boxes and return the box. Otherwise, return False
:param neighbors_content: list of tuples, each tuple is a cell content
:return: the box that is in the same cell as the agent.
"""
for neighbor in neighbors_content:
if neighbor[0] == 'box' and neighbor[-1] not \
in self.model.reserved_boxes:
self.model.reserved_boxes.append(neighbor[-1])
return neighbor
return False
def mover(self, neighbors_content):
"""
If there is an empty cell in the neighborhood, move to it
:param neighbors_content: a list of tuples,
each tuple contains the content of a cell and its position
:return: a boolean value.
"""
random.shuffle(neighbors_content)
for neighbor in neighbors_content:
if neighbor[0] == 'empty' and neighbor[-1] not in \
self.model.reserved_cells:
self.model.reserved_cells.append(neighbor[-1])
self.next_state = neighbor[-1]
self.moves += 1
return True
self.next_state = self.pos
return False
def recoger(self, box):
"""
The agent picks up a box and adds it to the list of picked up boxes
:param box: the box that the agent is picking up
"""
self.model.picked_objective_boxes.append(box[-1])
self.model.initial_boxes[box[-1]] = [self.unique_id, self.pos]
self.next_state = self.pos
self.has_box = True
def comunicar(self, box_position):
"""
The function comunicar() is called when a box is added to
the objective_boxes list :param box_position: the position of
the box that the agent is currently on
"""
if box_position not in self.model.objective_boxes_added:
self.model.objective_boxes.append(box_position)
self.model.objective_boxes_added.append(box_position)
def caja_cerca(self, neighbor):
"""
It calculates the distance between the objective box and the last box
in the neighbor list.
:param neighbor: a list of tuples, each tuple is a coordinate
:return: The distance between the last point in the neighbor list
and the objective box.
"""
x1, y1 = neighbor[-1]
distancia = math.sqrt(((self.objective_box[0] - x1) ** 2) +
((self.objective_box[1] - y1) ** 2))
return distancia
def mover_a_caja(self, neighbors_content):
"""
If there is an empty cell next to the agent, and that cell is not
already reserved, then the agent
will move to that cell
:param neighbors_content: a list of tuples, each tuple contains the
content of a cell and its
position
:return: The next state of the agent.
"""
min_distance = float('inf')
shortest_path = self.pos
for neighbor in neighbors_content:
if neighbor[0] == 'empty' and neighbor[-1] not in \
self.model.reserved_cells:
distance = self.caja_cerca(neighbor)
if distance < min_distance:
min_distance = distance
shortest_path = neighbor[-1]
if shortest_path:
self.moves += 1
self.model.reserved_cells.append(shortest_path)
self.next_state = shortest_path
return shortest_path
else:
self.next_state = self.pos
return self.pos
def step(self):
"""
If the agent has a box, it will try to drop it, if it can't, it will
try to move it, if it can't, it
will try to communicate with other agents. If the agent doesn't have a
box, it will try to pick one
up, if it can't, it will try to move to the box, if it can't, it will
try to move
"""
if self.objective_box in self.model.picked_objective_boxes:
self.objective_box = None
neighbors_content = self.neighbors()
is_there_a_box = self.hay_caja(neighbors_content)
if self.has_box:
if is_there_a_box:
self.comunicar(is_there_a_box[-1])
can_drop_it = self.puede_soltar(neighbors_content)
if can_drop_it:
self.soltar_caja(can_drop_it)
else:
self.mueve_caja(neighbors_content)
else:
if is_there_a_box:
if self.objective_box:
if self.objective_box not in \
self.model.picked_objective_boxes:
self.model.objective_boxes.append(self.objective_box)
self.objective_box = None
self.recoger(is_there_a_box)
else:
if self.model.objective_boxes and not self.objective_box:
self.objective_box = self.model.objective_boxes.pop()
self.mover_a_caja(neighbors_content)
elif self.objective_box:
self.mover_a_caja(neighbors_content)
else:
self.mover(neighbors_content)
if self.has_box:
self.next_color = 2
else:
self.next_color = 1
def advance(self):
"""
The function takes the current state of the agent, and the next state
of the agent, and moves the
agent from the current state to the next state
"""
self.model.reserved_cells = []
self.model.reserved_boxes = []
self.color = self.next_color
self.model.grid.move_agent(self, self.next_state)
def llenar_stack(model):
"""
If any of the values in the pallets dictionary are less than 5, return
False. Otherwise, return True
:param model: the model object
:return: a boolean value.
"""
for value in model.pallets.values():
if value < 5:
return False
return True
| SergioGonzalez24/Movilidad-Urbana-MSMGC-GPO-302 | ActividadIntegradora/Server/agents.py | agents.py | py | 17,573 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "mesa.Agent",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "mesa.Agent",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "math.sqrt",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number":... |
43498491928 | from IPython import display
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from gym.envs.mujoco import *
from envs.hopper_env import HopperModEnv
from envs.cheetah_env import CheetahModEnv
import numpy as np
import copy
import gym
from scipy.io import loadmat
from scipy.io import savemat
import moviepy.editor as mpy
from simulators import *
from rot_utils import *
import seaborn as sns
sns.set_style('darkgrid')
import warnings
warnings.filterwarnings('ignore')
from part_a import lqr_infinite_horizon
# implement linearization about a point
def linearize_dynamics(f, x_ref, u_ref, dt, my_eps, x_ref_tplus1=None):
"""
f : dynamics simulator
my_eps : delta for forward and backward differences you'll need
NOTE: please use centered finite differences!
x(:,t+1) - x_ref approximately = A*( x(:,t)-x_ref ) + B* ( u(:,t) - u_ref ) + c
If we pick x_ref and u_ref to constitute a fixed point, then c == 0
For part (b), you do not need to use the optional argument (nor c).
For part (d), you'll have to revisit and modify this function
--at this point, you'll want to use the optional argument and the resulting c.
return: A, B, c
"""
if x_ref_tplus1 is not None:
x_ref_next = x_ref_tplus1
else:
x_ref_next = x_ref
dx, du = x_ref.shape[0], u_ref.shape[0]
A, B = np.zeros((dx, dx)), np.zeros((dx, du))
"""YOUR CODE HERE"""
for i in range(dx):
delta_x = np.zeros((dx,))
delta_x[i] = my_eps
f_x_forward = f(x_ref + delta_x, u_ref, dt)
f_x_backward = f(x_ref - delta_x, u_ref, dt)
f_derivatives = (f_x_forward - f_x_backward) / (2 * my_eps)
A[:, i] = f_derivatives
for j in range(du):
delta_u = np.zeros((du,))
delta_u[j] = my_eps
f_x_forward = f(x_ref, u_ref + delta_u, dt)
f_x_backward = f(x_ref, u_ref - delta_u, dt)
f_derivatives = (f_x_forward - f_x_backward) / (2 * my_eps)
B[:, j] = f_derivatives
"""YOUR CODE ENDS HERE"""
c = f(x_ref, u_ref, dt) - x_ref_next
if x_ref_tplus1 is not None:
A = np.hstack([A, c.reshape(-1,1)])
A = np.vstack([A, np.zeros(A.shape[1])])
A[-1,-1] = 1
B = np.vstack([B, np.zeros(B.shape[1])])
if len(B.shape) == 1:
return A, B.reshape(-1, 1), c
return A, B, c
# take an environment and find the infinite horizon controller for the linearized system
def lqr_nonlinear(config):
env = config['env']
f = config['f']
dt = 0.1 # we work with discrete time
my_eps = 0.01 # finite difference for numerical differentiation
# load in our reference points
x_ref, u_ref = config['x_ref'], config['u_ref']
# linearize
A, B, c = linearize_dynamics(f, x_ref, u_ref, dt, my_eps)
print('A shape: ', A.shape, 'B shape: ', B.shape)
dx, du = A.shape[0], B.shape[1]
Q, R = np.eye(dx), np.eye(du) * 2
# solve for the linearized system
K_inf, P_inf = lqr_infinite_horizon(A, B, Q, R) # you implemented in part (a)
# recognize the simulation code from part (a)? modify it to use your controller at each timestep
def simulate(K_inf, f, x_ref, u_ref, dt, n_starting_states, T, noise=None):
for s in np.arange(n_starting_states):
x, u = np.zeros((K_inf.shape[1], T + 1)), np.zeros((K_inf.shape[0], T + 1))
x[:, 0] = starting_states[:, s]
for t in np.arange(T):
"""YOUR CODE HERE"""
u[:, t] = u_ref + K_inf @ (x[:, t] - x_ref)
"""YOUR CODE ENDS HERE"""
x[:, t + 1] = f(x[:, t], u[:, t], dt)
if "p_val" in config.keys():
perturbation_values = config["p_val"]
perturb = perturbation_values[t // (T // len(perturbation_values))]
x[:, t + 1] = f(x[:, t], u[:, t], dt, rollout=True, perturb=perturb)
if env is not None:
if t % 5 == 0:
plt.clf()
plt.axis('off')
plt.grid(b=None)
plt.imshow(env.render(mode='rgb_array', width=256, height=256))
plt.title("Perturbation Magnitude {}".format(perturb))
display.clear_output(wait=True)
display.display(plt.gcf())
if noise is not None:
x[:, t + 1] += noise[:, t]
if env is not None:
plt.clf()
plt.plot(x.T[:-1], linewidth=.6)
plt.plot(np.squeeze(u.T[:-1]) / 10.0, linewidth=.7, linestyle='--') # scaling for clarity
if 'legend' in config.keys():
config['legend'].append('u')
plt.legend(config['legend'])
else:
legend_elements = [Line2D([0], [0], label='x'), Line2D([0], [0], linestyle='--', label='u')]
plt.legend(handles=legend_elements)
plt.xlabel('time')
plt.title(config["exp_name"])
plt.show()
# now let's simulate and see what happens for a few different starting states
starting_states = config['ss']
n_starting_states = starting_states.shape[1]
T = config['steps'] # simulating for T steps
simulate(K_inf, f, x_ref, u_ref, dt, n_starting_states, T)
if 'noise' in config.keys():
# and now in the presence of noise
noise_id = config['noise']
noise_loaded = loadmat("mats/" + noise_id + ".mat")[noise_id]
simulate(K_inf, f, x_ref, u_ref, dt, n_starting_states, noise_loaded.shape[1], noise=noise_loaded)
if __name__ == "__main__":
runCartPole = 1
runHelicopter = 0
runHopper = 0
if runCartPole:
# Find the infinite horizon controller for the linearized version of the cartpole balancing problem
cartpole_config = {
'f': sim_cartpole,
'exp_name': "Cartpole-Balancing",
'env': None,
'steps': 500,
'x_ref': np.array([0, np.pi, 0, 0]),
'u_ref': np.array([0]),
'legend':['x', 'theta', 'xdot', 'thetadot'],
'ss': np.array([[0, 0, 0, 10, 50],
[9*np.pi/10, 3*np.pi/4, np.pi/2, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]), #ss = starting states
'noise': 'p_b_w',
}
lqr_nonlinear(cartpole_config)
if runHelicopter:
# Find the infinite horizon controller for the linearized version of the hovering copter
# Just run the cell below to generate plots using the code you wrote for cartpole!
x_ref, u_ref = np.zeros(12), np.zeros(4)
x_ref[9] = np.arcsin(3.0 / (5 * 9.81))
u_ref[3] = 9.81 * 5 * np.cos(x_ref[9]) / 137.5
heli_config = {
'f': sim_heli,
'env': None,
'exp_name': "Helicopter-Hovering",
'steps': 200,
'x_ref': x_ref,
'u_ref': u_ref,
'ss': loadmat("mats/p_c_heli_starting_states.mat")["heli_starting_states"], # ss = starting states
'noise': 'p_c_w',
}
lqr_nonlinear(heli_config)
if runHopper:
env = HopperModEnv()
x_ref, u_ref = np.zeros(11), np.zeros(env.action_space.sample().shape[0])
hopper_config = {
'env': env,
'f': env.f_sim,
'exp_name': "Perturbed Hopper",
'steps': 500,
'x_ref': x_ref,
'u_ref': u_ref,
'ss': np.array([[np.concatenate([env.init_qpos[1:], env.init_qvel])]]),
'p_val': [0, .1, 1, 10]
}
lqr_nonlinear(hopper_config) | tpvt99/robotics | cs287hw2/part_b.py | part_b.py | py | 7,732 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "seaborn.set_style",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "warnings.filterwarnings",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
... |
19686894291 | #Author: Gentry Atkinson
#Organization: Texas University
#Data:11 May, 2021
#Create segmentations of the raw data file using 3 methds
#Method 1: regular breaks every 150 samples
#Method 2: 150 samples centered on a PIP
#Method 3: Divide segments at PIPs, resample each segment to 150
from scipy.signal import resample
from fastpip import pip
import numpy as np
import pandas
import os
files = [
'dog1_walk1.csv',
'dog1_walk2.csv',
'human1_walk1.csv',
'human1_walk2.csv',
'human1_walk3.csv'
]
if __name__ == "__main__":
os.system('rm seg_by_reg_division_rw.csv')
os.system('rm seg_cen_on_pip_rw.csv')
os.system('rm seg_and_resamp_from_pips_rw.csv')
m1_file = open('seg_by_reg_division_rw.csv', 'a+')
m2_file = open('seg_cen_on_pip_rw.csv', 'a+')
m3_file = open('seg_and_resamp_from_pips_rw.csv', 'a+')
for file in files:
print("Reading file: ", file)
instances = pandas.read_csv('raw_data/'+file)
print('Keys: ', instances.keys())
print('D types: ', instances.dtypes)
print('Number of samples: ', len(instances['time']))
instance = instances['ax']
print('Still number of sample: ', len(instance))
l = '0' if 'dog' in file else '1'
print('Label of this instance: ', l)
#Write segmentations using method 1
for j in range(0, len(instance)-150, 150):
m1_file.write(l + ', ' + ', '.join([str(x) for x in instance[j:j+150]]) + '\n')
#print(l)
num_segments = len(instance)//150
pips = pip([(i, j) for i,j in enumerate(instance)], num_segments+1, distance='vertical') #n+1 pips create n segments
trim_pips = pip([(i, j) for i,j in enumerate(instance)], num_segments+2, distance='vertical')
trim_pips = trim_pips[1:-1] #remove start and end points
#Write segmentations using method 2
for p in trim_pips:
if p[0]<75:
m2_file.write(l + ', ' + ', '.join(str(i) for i in instance[:150]) + '\n')
elif len(instance) - p[0] < 76:
m2_file.write(l + ', ' + ', '.join(str(i) for i in instance[-150:]) + '\n')
else:
m2_file.write(l + ', ' + ', '.join(str(i) for i in instance[p[0]-75:p[0]+75]) + '\n')
#print(l)
#Write segmentations using method 3
for j in range(len(pips)-1):
m3_file.write(l + ', ' + ', '.join([str(t) for t in resample(instance[pips[j][0]:pips[j+1][0]], 150)]) + '\n')
#print(l)
print('done')
m1_file.close()
m2_file.close()
m3_file.close()
| gentry-atkinson/pip_test | create_segmentations_rw.py | create_segmentations_rw.py | py | 2,615 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.system",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 33... |
70270417955 | from django.db import models
from edc_base.model.models import BaseUuidModel
from .panel import Panel
class PanelMapping(BaseUuidModel):
panel_text = models.CharField(
max_length=50,
help_text='text name of external panel',
)
panel = models.ForeignKey(Panel, null=True, help_text="local panel definition")
def __unicode__(self):
return self.panel
class Meta:
app_label = "lab_clinic_api"
| botswana-harvard/edc-lab | old/lab_clinic_api/models/panel_mapping.py | panel_mapping.py | py | 448 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "edc_base.model.models.BaseUuidModel",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 10,
"usage_type": "name"
},
{
"ap... |
42390106843 | import numpy as np
from scipy.interpolate import RectBivariateSpline
def LucasKanade_new(It0, It1, rect, p0 = np.zeros(2)):
x1, y1, x2, y2 = rect[0], rect[1], rect[2], rect[3]
p, threshold = p0, 0.1
H, W = It1.shape
x = np.linspace(0, H, H)
y = np.linspace(0, W, W)
It0_BiRect = RectBivariateSpline(x, y, It0)
It1_BiRect = RectBivariateSpline(x, y, It1)
[X, Y] = np.meshgrid(np.linspace(x1, np.ceil(x2), np.ceil(x2)-x1),
np.linspace(y1, np.ceil(y2), np.ceil(y2)-y1))
template = It0_BiRect.ev(X, Y);
[Fx, Fy] = np.gradient(template);
A = np.stack((Fx.flatten(),Fy.flatten()), axis=1)
H = np.matmul(A.T, A)
print(H)
while True:
[X1, Y1] = np.meshgrid(np.linspace(x1+p[0], np.ceil(x2+p[0]), x2-x1),
np.linspace(y1+p[1], np.ceil(y2+p[1]), y2-y1))
errorImg = It1_BiRect.ev(X1, Y1) - template
delta = np.linalg.lstsq(H, np.matmul(A.T, errorImg.flatten()))[0]
p = p - delta
print(np.linalg.norm(delta))
if np.linalg.norm(delta) < threshold:
break
return p
| danenigma/Traditional-Computer-Vision | LK-Tracking/code/testing/LucasKanade_new.py | LucasKanade_new.py | py | 1,024 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.RectBivari... |
22932207040 | #!/bin/env python3
# Authors:
## Alexandre Santos 80106
## Leonardo Costa 80162
from ass1_classes import CorpusReader, SimpleTokenizer, ImprovedTokenizer, Indexer, results
import time
import sys
import tracemalloc
import json
#Start time
time1 = time.time()
data = CorpusReader.read('all_sources_metadata_2020-03-13.csv')
#if '-i' use ImprovedTokenizer
if len(sys.argv) == 2 and sys.argv[1] == '-i':
tokenizer = ImprovedTokenizer('english', 'snowball_stopwords_EN.txt')
tokenizer_type = 'Improved'
#else use Default SimpleTokenizer
else:
tokenizer = SimpleTokenizer()
tokenizer_type = 'Simple'
#Tokenization Step
files_tokens1 = [tokenizer.process(data_from_doc[1]) for data_from_doc in data]
#Indexing Step
inverted_index1 = Indexer.process(files_tokens1)
#end timer
time2 = time.time()
#memory, peak = tracemalloc.get_traced_memory()
#tracemalloc.stop()
fout = open(tokenizer_type + "_results.json", "w")
fout.write(json.dumps(inverted_index1))
time3 = time.time()
#show results
results(tokenizer_type, time2 - time1, time3 - time2, inverted_index1, data)
| tuxPT/RI_Assignment1 | RI_ass1.py | RI_ass1.py | py | 1,088 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "ass1_classes.CorpusReader.read",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ass1_classes.CorpusReader",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "sy... |
73420122275 | import sys
import xbmc
import xbmcvfs
import xbmcgui
import json
import hashlib
import xml.etree.ElementTree as ET
from contextlib import contextmanager
XML_HEADER = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'
@contextmanager
def isactive_winprop(name, value='True', windowid=10000):
xbmcgui.Window(windowid).setProperty(name, value)
try:
yield
finally:
xbmcgui.Window(windowid).clearProperty(name)
@contextmanager
def busy_dialog():
xbmc.executebuiltin('ActivateWindow(busydialognocancel)')
try:
yield
finally:
xbmc.executebuiltin('Dialog.Close(busydialognocancel)')
def get_localized(text):
if text.startswith('$LOCALIZE'):
text = text.strip('$LOCALIZE[]')
if try_parse_int(text):
text = xbmc.getLocalizedString(try_parse_int(text))
return text
def get_jsonrpc(method=None, params=None):
if not method or not params:
return {}
query = {
"jsonrpc": "2.0",
"params": params,
"method": method,
"id": 1}
try:
jrpc = xbmc.executeJSONRPC(json.dumps(query))
response = json.loads(try_decode_string(jrpc, errors='ignore'))
except Exception as exc:
kodi_log(u'SkinVariables - JSONRPC Error:\n{}'.format(exc), 1)
response = {}
return response
def make_xml_itertxt(xmltree, indent=1, indent_spaces=4, p_dialog=None):
"""
xmltree = [{'tag': '', 'attrib': {'attrib-name': 'attrib-value'}, 'content': '' or []}]
<{tag} {attrib-name}="{attrib-value}">{content}</{name}>
"""
txt = []
indent_str = ' ' * indent_spaces * indent
p_total = len(xmltree) if p_dialog else 0
p_dialog_txt = ''
for p_count, i in enumerate(xmltree):
if not i.get('tag', ''):
continue # No tag name so ignore
txt += ['\n', indent_str, '<{}'.format(i.get('tag'))] # Start our tag
for k, v in i.get('attrib', {}).items():
if not k:
continue
txt.append(' {}=\"{}\"'.format(k, v)) # Add tag attributes
p_dialog_txt = v
if not i.get('content'):
txt.append('/>')
continue # No content so close tag and move onto next line
txt.append('>')
if p_dialog:
p_dialog.update((p_count * 100) // p_total, message=u'{}'.format(p_dialog_txt))
if isinstance(i.get('content'), list):
txt.append(make_xml_itertxt(i.get('content'), indent=indent + 1))
txt += ['\n', indent_str] # Need to indent before closing tag
else:
txt.append(i.get('content'))
txt.append('</{}>'.format(i.get('tag'))) # Finish
return ''.join(txt)
def make_xml_includes(lines=[], p_dialog=None):
txt = [XML_HEADER]
txt.append('<includes>')
txt.append(make_xml_itertxt(lines, p_dialog=p_dialog))
txt.append('</includes>')
return '\n'.join(txt)
def merge_dicts(org, upd, skipempty=False):
source = org.copy()
for k, v in upd.items():
if not k:
continue
if skipempty and not v:
continue
if isinstance(v, dict):
if not isinstance(source.get(k), dict):
source[k] = {}
source[k] = merge_dicts(source.get(k), v, skipempty=skipempty)
continue
source[k] = v
return source
def del_empty_keys(d, values=[]):
my_dict = d.copy()
for k, v in d.items():
if not v or v in values:
del my_dict[k]
return my_dict
def get_skinfolders():
"""
Get the various xml folders for skin as defined in addon.xml
e.g. 21x9 1080i xml etc
"""
folders = []
try:
addonfile = xbmcvfs.File('special://skin/addon.xml')
addoncontent = addonfile.read()
finally:
addonfile.close()
xmltree = ET.ElementTree(ET.fromstring(addoncontent))
for child in xmltree.getroot():
if child.attrib.get('point') == 'xbmc.gui.skin':
for grandchild in child:
if grandchild.tag == 'res' and grandchild.attrib.get('folder'):
folders.append(grandchild.attrib.get('folder'))
return folders
def make_hash(content):
return hashlib.md5(content.encode('utf-8')).hexdigest()
def check_hash(hashname, hashvalue=None):
last_version = xbmc.getInfoLabel('Skin.String({})'.format(hashname))
if not last_version:
return hashvalue
if hashvalue != last_version:
return hashvalue
def write_file(filepath=None, content=None):
if not filepath:
return
f = xbmcvfs.File(filepath, 'w')
f.write(try_encode_string(content))
f.close()
def write_skinfile(filename=None, folders=None, content=None, hashvalue=None, hashname=None, reloadskin=True, checksum=None):
if not filename or not folders or not content:
return
for folder in folders:
write_file(filepath='special://skin/{}/{}'.format(folder, filename), content=content)
if hashvalue and hashname:
xbmc.executebuiltin('Skin.SetString({},{})'.format(hashname, hashvalue))
if checksum:
xbmc.executebuiltin('Skin.SetString({},{})'.format(checksum, make_hash(content)))
if reloadskin:
xbmc.executebuiltin('ReloadSkin()')
def join_conditions(org='', new='', operator=' | '):
return '{}{}{}'.format(org, operator, new) if org else new
def kodi_log(value, level=0):
try:
if isinstance(value, bytes):
value = value.decode('utf-8')
logvalue = u'{0}{1}'.format('[script.skinvariables]\n', value)
if sys.version_info < (3, 0):
logvalue = logvalue.encode('utf-8', 'ignore')
if level == 1:
xbmc.log(logvalue, level=xbmc.LOGNOTICE)
else:
xbmc.log(logvalue, level=xbmc.LOGDEBUG)
except Exception as exc:
xbmc.log(u'Logging Error: {}'.format(exc), level=xbmc.LOGNOTICE)
def load_filecontent(filename=None):
try:
vfs_file = xbmcvfs.File(filename)
content = vfs_file.read()
finally:
vfs_file.close()
return content
def try_parse_int(string):
'''helper to parse int from string without erroring on empty or misformed string'''
try:
return int(string)
except Exception:
return 0
def try_decode_string(string, encoding='utf-8', errors=None):
"""helper to decode strings for PY 2 """
if sys.version_info.major == 3:
return string
try:
return string.decode(encoding, errors) if errors else string.decode(encoding)
except Exception:
return string
def try_encode_string(string, encoding='utf-8'):
"""helper to encode strings for PY 2 """
if sys.version_info.major == 3:
return string
try:
return string.encode(encoding)
except Exception:
return string
| Atrion/Kodi_18_Repo | script.skinvariables/resources/lib/utils.py | utils.py | py | 7,084 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "xbmcgui.Window",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "xbmcgui.Window",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "xbmc.executeb... |
7974408338 | import os
import numpy as np
from matplotlib import pyplot as plt
from time import perf_counter_ns
import python_impl
import numba_impl
import ray_impl
import opencl_impl
MATRIX_MIN_VALUE = -10
MATRIX_MAX_VALUE = 10
def test_correctness(method, max_size):
for i in range(3, max_size):
matrix = np.random.randint(MATRIX_MIN_VALUE, MATRIX_MAX_VALUE, size=(i, i))
np_res = round(np.linalg.det(matrix))
our_res = round(method(matrix))
assert np_res == our_res
print(f'Run {i + 1}/{max_size} completed')
def measure_time(method, max_size):
times = []
for i in range(3, max_size):
matrix = np.random.randint(MATRIX_MIN_VALUE, MATRIX_MAX_VALUE, size=(i, i))
start = perf_counter_ns()
method(matrix)
end = perf_counter_ns()
times.append(end - start)
return times
def make_graph(name, times, max_size):
plt.cla()
plt.title(f"Time to count a matrix determinant using '{name}'")
plt.plot(range(1, max_size + 1), times)
plt.xlabel("Size of the square matrix")
plt.ylabel("Time [ns]")
plt.grid()
plt.savefig(f'plots/{name}.png')
def make_combined_graph(names, all_times, max_size):
plt.cla()
plt.title(f"Time to count a matrix determinant using different methods")
for (times, name) in zip(all_times, names):
plt.plot(range(1, max_size + 1), times, label=name)
plt.xlabel("Size of the square matrix")
plt.ylabel("Time [ns]")
plt.grid()
plt.legend()
plt.savefig(f'plots/combined.png')
def dump_times(name, times):
with open(f'plots/{name}.csv', "wt") as file:
file.writelines((f'{i + 1};{time}\n' for (i, time) in enumerate(times)))
if __name__ == "__main__":
if not os.path.exists('plots'):
os.mkdir('plots')
PERF_MAX_SIZE = 10
TIME_MAX_SIZE = 15
methods = [ opencl_impl.det]
names = ["open_cl"]
all_times = []
for (method, name) in zip(methods, names):
print(name, ":")
# test_correctness(method, PERF_MAX_SIZE)
times = measure_time(method, TIME_MAX_SIZE)
all_times.append(times)
dump_times(name, times)
make_graph(name, times, TIME_MAX_SIZE)
make_combined_graph(names, all_times, TIME_MAX_SIZE)
# print(numpy_scores)
# print(our_det_scores)
# plt.title("Time of counting determinant according to matrix size ")
# plt.plot(range(1 ,11), our_det_scores, label="our implementation" )
# plt.plot(range(1 ,11), numpy_scores, label="numpy det")
# plt.xlabel("Size XnX of matrix")
# plt.ylabel("Time passed to count determinant (ns)")
# plt.grid()
# plt.legend()
# plt.savefig("example.png")
| DocentSzachista/akceleracja | main.py | main.py | py | 2,693 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.random.randint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.det",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.linalg"... |
11946989578 | from django.db.models import Sum
from debts.models import Debt
def get_users(debts):
"""will get users"""
users = []
for debt in debts:
if debt.creditor not in users:
users.append(debt.creditor)
if debt.debtor not in users:
users.append(debt.debtor)
return users
def get_creditors(debts):
"""Will retur a dict of creditors with total credits for each creditor"""
users = get_users(debts)
creditors = {}
for user in users:
total_credit = debts.filter(creditor=user).aggregate(total_credit=Sum('charge'))
creditors[user.name] = float(total_credit['total_credit']) if total_credit['total_credit'] else 0
return creditors
def get_debtors(debts):
"""Will return a dict of debtors with total debts for each debtor"""
users = get_users(debts)
debtors = {}
for user in users:
total_debt = debts.filter(debtor=user).aggregate(total_debt=Sum('charge'))
debtors[user.name] = float(total_debt['total_debt']) if total_debt['total_debt'] else 0
return debtors
def make_debt_calculateon(debts):
"""Will return the list of debts with nettings"""
creditors = get_creditors(debts)
debtors = get_debtors(debts)
results = {'creditors': {}, 'debtors': {}}
for creditor in creditors:
result = debtors[creditor] - creditors[creditor]
if result > 0:
results['debtors'][creditor] = abs(result)
if result < 0:
results['creditors'][creditor] = abs(result)
else:
next
return results
def get_opt_debts_payers(debts_data):
"""Will return the dict of optimyzed payments"""
debts = make_debt_calculateon(debts_data)
result_dict = {debtor: {} for debtor in debts['debtors']}
while debts['creditors'] and debts['debtors']:
cur_creditor = max(debts['creditors'])
cur_credit = debts['creditors'].pop(cur_creditor)
cur_debtor = max(debts['debtors'])
cur_debt = debts['debtors'].pop(cur_debtor)
if cur_credit == cur_debt:
try:
result_dict[cur_debtor][cur_creditor] += cur_debt
except KeyError:
result_dict[cur_debtor][cur_creditor] = cur_debt
elif cur_credit > cur_debt:
try:
result_dict[cur_debtor][cur_creditor] += cur_debt
except KeyError:
result_dict[cur_debtor][cur_creditor] = cur_debt
debts['creditors'][cur_creditor] = round(abs(cur_credit - cur_debt), 2)
else:
try:
result_dict[cur_debtor][cur_creditor] += cur_credit
except KeyError:
result_dict[cur_debtor][cur_creditor] = cur_credit
debts['debtors'][cur_debtor] = round(abs(cur_credit - cur_debt), 2)
if len(result_dict) == 0:
debts = Debt.objects.all()
for debt in debts:
debt.charge = 0
debt.save()
return result_dict
| Rven721/my_crm | debts/busines_logic/debt_calc.py | debt_calc.py | py | 2,986 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "debts.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "debts.models",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "debts.models.filter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "debts.models",
"... |
15479925188 | # -*- coding: utf-8 -*-
"""
Filename: hsb_trade_tracker.py
Date created: Fri Aug 21 12:17:57 2020
@author: Julio Hong
Purpose: Tracks all the ongoing trades in the bazaar of Hypixel Skyblock. Focus on lapis lazuli for now.
Track how the amount per price changes over time for buy/sell orders.
Maybe also track instant buy/sell
Steps:
Find the lapis_info
Create a df for each price. Then each row represents a timestamp, and the data represents the amount transacted.
Create a dict of dfs? But updating them would be annoying. Maybe multi-index? Timestamp -> prices
But if a price is absent? Still have to add a timestamp row to show zero value.
It'll be wasteful I guess, but if timestamp -> price, some prices will be absent within timestamps.
Price -> Timestamp. Timestamps are columnns because easier to add to the df
If there is a new price, then add a new row to the outer index
Store the lapis_info in an external document, maybe a spreadsheet.
Refresh the scraping of lapis_info
Change to melons because lapis is full of speculation and price depression.
Graph the info
Notes: https://hypixel.net/threads/skyblock-cant-find-what-item-is-that.3064833/
Cowtipper said:
Hypixel SkyBlock uses the pre-Minecraft 1.13 damage/data values for some items:
INK_SACK:0 is the normal ink sack (= a dye with damage value zero).
INK_SACK:3 is a dye with damage value 3: Cocoa Beans.
INK_SACK:4 is Lapis Lazuli respectively.
You can see the full list of all item>damage values on the Minecraft Wiki.
It works the same way for logs and fish: for example RAW_FISH:0 is the normal fish, while RAW_FISH:3 is pufferfish.
Check for the largest spread for bazaar flipping?
"""
import requests
import datetime as dt
from time import sleep
import pandas as pd
import os
from openpyxl import load_workbook
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = [10, 5]
# To adjust the dataframe appearance
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 20)
pd.set_option('display.width', 200)
# sold_over_time_df = pd.DataFrame(index=)
# Unique to each player and can be updated
api_key = '4aa4f936-c170-4a8e-849a-4418f76c8fe5'
payload = {'key': api_key}
sold_over_time_dfs = {}
bought_over_time_dfs = {}
spread_over_time_df = pd.DataFrame([])
# Add the selected keys into a list
# goods_of_interest = ['MELON', 'IRON_INGOT', 'DIAMOND', 'WHEAT', 'OBSIDIAN']
# goods_of_interest = ['DIAMOND', 'INK_SACK:4']
goods_of_interest = 'ALL GOODS'
def export_create_or_update(filename, input_df, good=''):
# Export the results
# Use goods_of_interest to generate filenames?
# Open the existing file and add a new sheet based on datetime
# Might convert this into a function in future
if good == '':
# Check if the files already exist
if os.path.exists(filename):
print('Update existing spreadsheet')
book = load_workbook(filename)
writer = pd.ExcelWriter(filename, engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
else:
print('Create new spreadsheet')
writer = pd.ExcelWriter(filename, engine='xlsxwriter')
else:
# Determine if buy or sell sheet is being updated
if 'sold' in filename:
txn_type = 'sell'
elif 'bought' in filename:
txn_type = 'buy'
# Check if the files already exist
if os.path.exists(filename):
print('Update existing ' + txn_type + '_spreadsheet for ' + good)
# sold_over_time
book = load_workbook(filename)
writer = pd.ExcelWriter(filename, engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
else:
print('Create new ' + txn_type + '_spreadsheet for ' + good)
writer = pd.ExcelWriter(filename, engine='xlsxwriter')
# Put all the common code after the 'else' statement
input_df.to_excel(writer, sheet_name=timestamp)
writer.save()
writer.close()
def find_best_spread(bought_over_time_dfs, spread_over_time_df, interval=50):
# Need to track number of transactions for each item also
# See how popular they are and the spread as well or else I try to flip slow-moving items
# If the delta is non-zero for a certain period of time, then mark it on the spread_df
# This is kinda nasty. Just take the average within the latest interval and see if there are any transactions
# See what is the best interval value to select? Run a few values
# Then calculate a weight based on number of transactions within the interval. Price doesn't matter just the number of orders.
# I removed the orders up earlier. Uh oh. I guess it's not that needed.
def calc_timestamp_deltas(bought_over_time_dfs, interval):
trade_freq = pd.Series(index=bought_over_time_dfs.keys())
for item in bought_over_time_dfs.keys():
# Track the delta between columns.
deltas_over_time = bought_over_time_dfs[item].diff(axis=1)
delta_average = deltas_over_time.sum().sum() / interval
trade_freq.loc[item] = abs(delta_average)
return trade_freq
# I was going to take the latest value but average is less susceptible to random error
latest_spreads = pd.DataFrame(index=spread_over_time_df.index)
latest_spreads['mean_spread'] = spread_over_time_df.iloc[:,-interval:].mean(axis=1)
# latest_spreads = spread_over_time_df.iloc[:,-interval:].mean(axis=1)
latest_spreads['trade_freq_multiplier'] = calc_timestamp_deltas(bought_over_time_dfs, interval)
# latest_spreads['buy_trades'] = calc_timestamp_deltas(bought_over_time_dfs, interval)
# latest_spreads['sell_trades'] = calc_timestamp_deltas(sold_over_time_dfs, interval)
# Take the min of the buying trades and selling trades, which might represent what is being satisfied?
# latest_spreads['trade_freq_multiplier'] = latest_spreads[['buy_trades', 'sell_trades']].max(axis=1)
# No, how to distinguish between trades being satisfied and trades being removed?
# Find the sellPrice (bazaar sells)
# Brutal way of doing this
# for key in json_products.keys():
# # I'm not sure how this value is calculated, but it sometimes doesn't match the bazaar sell values
# latest_spreads.loc[key, 'sellPrice'] = json_products[key]['quick_status']['sellPrice']
for key in sold_over_time_dfs.keys():
# Find the lowest sell price within the interval
# print(key)
latest_means = sold_over_time_dfs[key].iloc[:, -interval:].mean(axis=1)
# print(latest_means)
non_nan_means = latest_means.dropna()
try:
latest_spreads.loc[key, 'sellPrice'] = max(list(non_nan_means.index))
except ValueError:
# Make this very high so it won't show up under best options
latest_spreads.loc[key, 'sellPrice'] = -1
# Calculate profit% from the spread
latest_spreads['profit_pct'] = latest_spreads['mean_spread'] / latest_spreads['sellPrice']
# Multiply it by the spread.
# latest_spreads['weighted_spread'] = latest_spreads['mean_spread'] * latest_spreads['trade_freq_multiplier']
latest_spreads['weighted_spread'] = latest_spreads['mean_spread'] * latest_spreads['trade_freq_multiplier'] * latest_spreads['profit_pct']
# Suppress scientific notation in pandas
# latest_spreads.apply(lambda x: '%.5f' % x)
latest_spreads = latest_spreads.round(1)
return latest_spreads
def load_goods_records(path):
# Load the results and stitch them together?
# Find all the files that exist in the directory
recording_spreadsheets = [file for file in os.listdir(path) if file.endswith(".xlsx")]
for file in recording_spreadsheets:
filename_list = file.split('_')
good = filename_list[0]
# Combines all the sheets into a single dataframe
read_df = pd.concat(pd.read_excel(file, sheet_name=None, index_col=0), axis=0)
# Removes the highest level of the multi-index columns
read_df.columns = read_df.columns.droplevel()
# Sort by good
# Sort sell or buy
if filename_list[1] == 'sold':
sold_over_time_dfs[good] = read_df
elif filename_list[1] == 'bought':
bought_over_time_dfs[good] = read_df
else:
return 'How did we get here?'
#=========== RUN CODE ===========
while True:
try:
start_time = dt.datetime.now()
runtime_elapsed = dt.timedelta(minutes=1)
runtime_cap = dt.timedelta(minutes=15)
# Run this in a loop every interval
# # 1) Testing mode
# count = 0
# while count < 4:
# count += 1
# 2) Run mode
# while True:
# Run a while loop for every interval of 15min.
while runtime_elapsed < runtime_cap:
try:
time_of_scrape = dt.datetime.now()
# print('Hypixel Skyblock Bazaar trading for ' + str(goods_of_interest) + ' running at ' + str(time_of_scrape))
print('Hypixel Skyblock Bazaar trading for ALL GOODS running at ' + str(time_of_scrape))
r = requests.get('https://api.hypixel.net/skyblock/bazaar', params=payload)
#r = requests.get('https://api.hypixel.net/skyblock/product?', params=payload)
json_data = (r.json())
json_products = json_data['products']
# Get everything inside
goods_of_interest = list(json_products.keys())
sell_dfs = {}
buy_dfs = {}
# I'm going to generalise this. I don't know if that's wise
for good in json_products.keys():
# Convert sell_summary and buy_summary into df
# Sell and buy are reversed for some reason
sell_dfs[good] = pd.DataFrame(json_products[good]['sell_summary'])
buy_dfs[good] = pd.DataFrame(json_products[good]['buy_summary'])
# If there are no transactions available at the time
if sell_dfs[good].empty or buy_dfs[good].empty:
print(good + ' has no transactions')
sell_dfs[good]['orders'] = -1
sell_dfs[good][time_of_scrape] = -1
buy_dfs[good]['orders'] = -1
buy_dfs[good][time_of_scrape] = -1
spread_over_time_df.loc[good, time_of_scrape] = -1
else:
# Change price to index
sell_dfs[good].set_index('pricePerUnit', inplace=True)
buy_dfs[good].set_index('pricePerUnit', inplace=True)
# Change amount to timestamp
sell_dfs[good].rename(columns={'amount':time_of_scrape}, inplace=True)
buy_dfs[good].rename(columns={'amount':time_of_scrape}, inplace=True)
# print(good + ' being scraped')
# Set a threshold volume
threshold_vol = 2000
# Find the highest buy price and lowest sell price above the threshold
highest_sell = max(sell_dfs[good].index)
lowest_buy = min(buy_dfs[good].index)
# Find delta and store in a df for each good against time
spread_over_time_df.loc[good, time_of_scrape] = lowest_buy - highest_sell
# If first, then set the main df as equal to this timestamp
# Check if this variable exists first, then concat with the new column
# print(("sold_over_time_dfs['" + good + "']"))
# if ("sold_over_time_dfs['" + good + "']") in locals():
# Change to check if the value exists in keys()
# Only if this is a GOI, then record in a df
if good in goods_of_interest:
if good in sold_over_time_dfs.keys():
sold_over_time_dfs[good] = pd.concat([sold_over_time_dfs[good], sell_dfs[good][time_of_scrape]], axis=1)
bought_over_time_dfs[good] = pd.concat([bought_over_time_dfs[good], buy_dfs[good][time_of_scrape]], axis=1)
# Else set the current df as the template
else:
print('Initialising over_time_dfs for ' + good)
sold_over_time_dfs[good] = sell_dfs[good].copy()
bought_over_time_dfs[good] = buy_dfs[good].copy()
# Temporary until I'm sure the amount is being tabulated properly
sold_over_time_dfs[good].drop(columns='orders', inplace=True)
bought_over_time_dfs[good].drop(columns='orders', inplace=True)
print('Scrape completed at ' + str(dt.datetime.now()))
sleep(10)
# Measure how long the loop has been running
runtime_elapsed = dt.datetime.now() - start_time
# Adds a way to break the loop with user input
except KeyboardInterrupt:
break
# Moved out of the except clause because I want this to also run after the while-loop ends
# Put this here so I don't keep making new sheets every time I run the export section
# Excel can't accept ":" in sheet name
timestamp = str(dt.datetime.now()).replace(':', '_')
print('End scraping')
# Graph the results
# Can't really make sense of it though...
# bought_over_time_dfs['DIAMOND'].T.plot()
# Save data
folder_path = r"C:\Users\Julio Hong\Documents\GitHub\Minecraft\Hypixel Skyblock\\"
print('Exporting results to Excel')
sell_spreadsheets = {}
buy_spreadsheets = {}
goods_of_interest = ['DIAMOND', 'INK_SACK:4']
for good in goods_of_interest:
if ':' in good:
good_name = good.replace(':', '_')
else:
good_name = good
sell_spreadsheets[good] = os.path.join(folder_path, good_name + "_sold_over_time.xlsx")
buy_spreadsheets[good] = os.path.join(folder_path, good_name + "_bought_over_time.xlsx")
export_create_or_update(sell_spreadsheets[good], sold_over_time_dfs[good], good)
export_create_or_update(buy_spreadsheets[good], bought_over_time_dfs[good], good)
# If the loop time is less than the interval time OR If before the halfway mark, revert back to last spread_data
# Elif after the halfway mark, calculate a new truncated spread data (and mark it)
# if runtime_elapsed < dt.timedelta(seconds=interval * 15) / 2:
interval = 50
if runtime_elapsed < dt.timedelta(seconds=interval * 15):
interval = len(spread_over_time_df.columns)
more_spreads = find_best_spread(bought_over_time_dfs, spread_over_time_df, interval)
print(more_spreads.sort_values(by=['weighted_spread']))
# Export the spreads
txn_spread_file = os.path.join(folder_path, "txn_spread_over_time.xlsx")
export_create_or_update(txn_spread_file, more_spreads.sort_values(by=['weighted_spread'], ascending=False))
print('Results successfully exported')
except KeyboardInterrupt:
interval = 50
# If the loop time is less than the interval time OR If before the halfway mark, revert back to last spread_data
# Elif after the halfway mark, calculate a new truncated spread data (and mark it)
# if runtime_elapsed < dt.timedelta(seconds=interval * 15) / 2:
if runtime_elapsed < dt.timedelta(seconds=interval * 15):
interval = len(spread_over_time_df.columns)
more_spreads = find_best_spread(bought_over_time_dfs, spread_over_time_df, interval)
print(more_spreads.sort_values(by=['weighted_spread']))
# Export the spreads
txn_spread_file = os.path.join(folder_path, "txn_spread_over_time.xlsx")
export_create_or_update(txn_spread_file, more_spreads.sort_values(by=['weighted_spread'], ascending=False))
print('Results successfully exported')
break
# Run this entire code until I want it to stop
# Loop it every 10-15 min
print('BEFORE SHARING REMOVE YOUR API KEY')
| LioHong/Hypixel-Skyblock | hsb_trade_tracker.py | hsb_trade_tracker.py | py | 17,030 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "pandas.set_option",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "p... |
40884002735 | import os
import glob
import json
import datetime
from collections import defaultdict
import cv2
import numpy as np
import pandas as pd
from sklearn.neighbors import KDTree
from PySide6.QtCore import QObject, Signal
from ..utils.common import get_immediate_subdirectories, to_celsius
from ..utils.geojson import load_geojson, save_geojson, coords_wgs84_to_ltp
def load_modules(file):
df = load_geojson(open(file, "r"))
df = df.set_index("track_id")
df_ltp = coords_wgs84_to_ltp(df)
df_corners = df_ltp.loc[df_ltp["geometry_type"] == "Polygon"]
df_centers = df_ltp.loc[df_ltp["geometry_type"] == "Point"]
return df, df_corners, df_centers
def truncate_patch(patch, margin=0.05):
"""Truncates module edges by margin (percent of width) to remove module frame."""
width = patch.shape[1]
margin_px = int(margin*width)
patch = patch[margin_px:-margin_px, margin_px:-margin_px]
return patch
def remove_patches_with_sun_reflection(patch_files, sun_reflections):
"""Returns a copy of the patch_files list withput patches that contain a sun reflections as per 'sun_reflections'."""
patch_files_filtered = []
for patch_file in patch_files:
patch_name = os.path.splitext(os.path.basename(patch_file))[0]
if patch_name not in sun_reflections:
patch_files_filtered.append(patch_file)
return patch_files_filtered
def get_patch_temps(patch_files, margin, to_celsius_gain, to_celsius_offset):
"""Returns min, max, mean and median temperatures for each patch of a module."""
temps = defaultdict(list)
for patch_file in patch_files:
patch = cv2.imread(patch_file, cv2.IMREAD_ANYDEPTH)
if patch is not None:
patch = truncate_patch(patch, margin)
temps["min"].append(to_celsius(np.min(patch), to_celsius_gain, to_celsius_offset))
temps["max"].append(to_celsius(np.max(patch), to_celsius_gain, to_celsius_offset))
temps["mean"].append(to_celsius(np.mean(patch), to_celsius_gain, to_celsius_offset))
temps["median"].append(to_celsius(np.median(patch), to_celsius_gain, to_celsius_offset))
return temps
def mean_over_patches(dataframe, temps):
"""Compute the mean of the module temperatures over all patches of a module."""
for patch_area_agg in ["min", "max", "mean", "median"]:
dataframe["{}_temp".format(patch_area_agg)] = pd.Series({track_id: np.mean(t[patch_area_agg]) for track_id, t in temps.items()})
class AnalysisModuleTemperaturesWorker(QObject):
finished = Signal()
progress = Signal(float, bool, str)
def __init__(self, dataset_dir, dataset_version, name, to_celsius_gain, to_celsius_offset,
border_margin, neighbour_radius, ignore_sun_reflections, sun_reflections):
super().__init__()
self.is_cancelled = False
self.timestamp = datetime.datetime.utcnow().isoformat()
self.dataset_dir = dataset_dir
self.dataset_version = dataset_version
self.name = name
self.to_celsius_gain = to_celsius_gain
self.to_celsius_offset = to_celsius_offset
self.border_margin = 0.01 * border_margin
self.neighbour_radius = neighbour_radius
self.ignore_sun_reflections = ignore_sun_reflections
self.sun_reflections = sun_reflections
self.progress_last_step = 0.0
def get_neighbours_median_temp(self, df_centers, neighbour_radius=7, column="mean_of_max_temps"):
"""Returns a list of mean temperatures of the neighbours of each module in `df_centers`.
The `neighbour_radius` defines the circle radius in which to look for neighbouring modules.
The `column` specifies which temperature column to use."""
centers = np.array([[d["coordinates"][0], d["coordinates"][1]] for d in df_centers["geometry"]])
tree = KDTree(centers)
neighbor_idxs = tree.query_radius(centers, r=neighbour_radius)
# get mean temperature of neighbors
neighbour_mean_temps = []
for row_idx, neighbor_idx in enumerate(neighbor_idxs):
progress = self.progress_last_step + (row_idx / len(neighbor_idxs)) / 5
if self.is_cancelled:
self.progress.emit(progress, True, "Cancelled")
self.finished.emit()
return
neighbor_idx = np.delete(neighbor_idx, np.nonzero(neighbor_idx == row_idx)) # remove the current module from list of neighbors
mean_temp = df_centers.iloc[neighbor_idx][column].median()
neighbour_mean_temps.append(mean_temp)
self.progress.emit(progress, False, "Computing corrected {}...".format(" ".join(column.split("_"))))
self.progress_last_step = progress
return neighbour_mean_temps
def run(self):
if self.dataset_version == "v1":
patches_dir = os.path.join(self.dataset_dir, "patches_final", "radiometric")
elif self.dataset_version == "v2":
patches_dir = os.path.join(self.dataset_dir, "patches", "radiometric")
file = os.path.join(self.dataset_dir, "mapping", "module_geolocations_refined.geojson")
df, df_corners, df_centers = load_modules(file)
temps = {}
track_ids = sorted(get_immediate_subdirectories(patches_dir))
for i, track_id in enumerate(track_ids):
progress = (i / len(track_ids)) / 5
if self.is_cancelled:
self.progress.emit(progress, True, "Cancelled")
self.finished.emit()
return
patch_files = sorted(glob.glob(os.path.join(patches_dir, track_id, "*")))
if self.ignore_sun_reflections and self.sun_reflections is not None:
patch_files = remove_patches_with_sun_reflection(patch_files, self.sun_reflections[track_id])
temps[track_id] = get_patch_temps(patch_files, self.border_margin, self.to_celsius_gain, self.to_celsius_offset)
self.progress.emit(progress, False, "Computing temperature distribution...")
self.progress_last_step = progress
mean_over_patches(df_corners, temps)
mean_over_patches(df_centers, temps)
for patch_area_agg in ["min", "max", "mean", "median"]:
column = "{}_temp".format(patch_area_agg)
neighbour_mean_temps = self.get_neighbours_median_temp(df_centers, neighbour_radius=self.neighbour_radius, column=column)
if neighbour_mean_temps is None: # cancelled
return
df_corners["{}_corrected".format(column)] = df_corners.loc[:, column] - neighbour_mean_temps
df_centers["{}_corrected".format(column)] = df_centers.loc[:, column] - neighbour_mean_temps
# merge back into single geodataframe
df_merged = df_corners.append(df_centers)
# reuse WGS84 coordinates of original dataframe
df_merged = df_merged.reset_index()
df_merged = df_merged.set_index(["track_id", "geometry_type"])
df = df.reset_index()
df = df.set_index(["track_id", "geometry_type"])
df_merged.update(df.loc[:, "geometry"])
# write results to disk
self.progress.emit(1, False, "Saving analysis results...")
save_path = os.path.join(self.dataset_dir, "analyses", self.name)
save_file = os.path.join(save_path, "results.geojson")
print("Saving module temperature results in {}".format(save_file))
os.makedirs(save_path, exist_ok=True)
save_geojson(df_merged, open(save_file, "w"))
print("Saving meta json in {}".format(os.path.join(save_path, "meta.json")))
meta = {
"type": "module_temperatures",
"timestamp": self.timestamp,
"dataset_dir": self.dataset_dir,
"hyperparameters": {
"border_margin": self.border_margin,
"neighbour_radius": self.neighbour_radius,
"ignore_sun_reflections": self.ignore_sun_reflections
}
}
if self.ignore_sun_reflections and self.sun_reflections is not None:
meta["hyperparameters"]["sun_reflections"] = self.sun_reflections
json.dump(meta, open(os.path.join(save_path, "meta.json"), "w"))
self.progress.emit(1, False, "Done")
self.finished.emit()
| LukasBommes/PV-Hawk-Viewer | src/analysis/temperatures.py | temperatures.py | py | 8,344 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "utils.geojson.load_geojson",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "utils.geojson.coords_wgs84_to_ltp",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 37,
"usage_type": "call"
},
{
"api... |
21255341912 | from sys import stdin
from collections import defaultdict
stdin = open("input.txt", "r")
memory = defaultdict(int)
def apply_mask(val):
return val | ones_mask
def set_bit(val, idx, bit):
if bit == 1:
val |= 2 ** idx
else:
val &= 2 ** 64 - 1 - 2 ** idx
return val
def gen_floating(base_address, idx, cur_address, floating):
if len(floating) == 0 or idx > floating[-1]:
yield cur_address
else:
if idx in floating:
yield from gen_floating(base_address, idx + 1, set_bit(cur_address, idx, 0), floating)
yield from gen_floating(base_address, idx + 1, set_bit(cur_address, idx, 1), floating)
else:
yield from gen_floating(base_address, idx + 1, cur_address, floating)
for line in stdin:
command, value = line.split(' = ')
if command[1] == 'e':
address = int(command.split('[')[1][:-1])
value = int(value)
address = apply_mask(address)
for cur_address in gen_floating(address, 0, address, floating):
memory[cur_address] = value
else:
ones_mask = 0
floating = []
for idx, ch in enumerate(value[::-1].strip()):
if ch == 'X':
floating.append(idx)
elif ch == '1':
ones_mask |= 2 ** idx
print(sum(memory.values()))
| mmehas/advent_of_code_2020 | src/14_hard.py | 14_hard.py | py | 1,346 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 28,
"usage_type": "name"
}
] |
32704990583 | from itertools import permutations
def isPrime(n):
if n < 2:
return False
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
def solution(numbers):
result = []
# numbers에서 나올 수 있는 숫자 리스트 도출
arr = []
numbers = [n for n in numbers]
for i in range(1, len(numbers)+1):
arr += list(permutations(numbers, i))
arr = [int(("").join(i)) for i in arr]
# arr에서 소수인 것만 리스트에 추가 후 중복 제거
result = len(set([i for i in arr if isPrime(i)]))
return result
| Min-su-Jeong/Algorithm_Study | 프로그래머스/lv2/42839. 소수 찾기/소수 찾기.py | 소수 찾기.py | py | 645 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "itertools.permutations",
"line_number": 18,
"usage_type": "call"
}
] |
30768822737 | import tkinter as tk
from PIL import ImageTk, Image
import io
class PlantWidget(tk.Frame): # tk.Frame
def delete_widget_and_data(self):
self.my_plant_service.delete_user_plant(self.values["_id"])
self.destroy()
def add_plant_to_pot(self):
self.my_plant_service.handle_user_plant(self.values["_id"], True)
self.load_planted_plants()
self.destroy()
def __init__(
self, parent, plant, my_plant_service, load_planted_plants, update_pie_chart
):
super().__init__(parent, borderwidth=2, relief="groove")
self.values = plant
self.my_plant_service = my_plant_service
self.load_planted_plants = load_planted_plants
self.update_pie_chart = update_pie_chart
self.grid_columnconfigure(0, weight=1)
# retrieve image data from MongoDB
image_data = self.values.get("image_data")
if image_data is not None:
# convert image data to PIL Image object
image = Image.open(io.BytesIO(image_data))
# create PhotoImage from PIL Image object
self.image_photo = ImageTk.PhotoImage(image.resize((150, 170)))
# create label with image
image_label = tk.Label(self, image=self.image_photo, height=170, width=150)
name = tk.Label(
self,
justify="left",
text="Name: \t\t{}".format(self.values["name"]),
)
type = tk.Label(
self,
justify="left",
text="Type: \t\t{}".format(self.values["type"]),
)
watering = tk.Label(
self,
justify="left",
text="Watering: \t{}".format(self.values["watering"]),
)
description = tk.Label(
self,
justify="left",
text="Description: \t{}".format(self.values["desc"]),
wraplength=720,
)
self.delete_button = tk.Button(
self,
text="Delete",
command=lambda: (self.delete_widget_and_data(), self.update_pie_chart()),
width=20,
padx=5,
pady=5,
)
self.add_to_pot_button = tk.Button(
self,
text="Add to Pot",
command=lambda: (self.add_plant_to_pot(), self.update_pie_chart()),
width=20,
padx=5,
pady=5,
)
self.add_to_pot_button.grid(row=0, column=1, padx=5, pady=5, sticky="nw")
name.grid(row=0, column=0, padx=10, pady=10, sticky="nw")
type.grid(row=1, column=0, padx=10, pady=10, sticky="nw")
watering.grid(row=2, column=0, padx=10, pady=10, sticky="nw")
description.grid(row=3, column=0, padx=10, pady=10, sticky="nw")
self.delete_button.grid(row=3, column=1, padx=5, pady=5, sticky="nw")
image_label.grid(row=0, column=1, padx=5, pady=5, sticky="nw", rowspan=3)
| JuleZg/pyfloraapp | model/plant_widget.py | plant_widget.py | py | 2,925 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tkinter.Frame",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_num... |
33458341126 | """
http://stackoverflow.com/questions/3612094/better-way-to-zip-files-in-python-zip-a-whole-directory-with-a-single-command?lq=1
http://stackoverflow.com/questions/10060069/safely-extract-zip-or-tar-using-python
"""
import os
import string
import zipfile
from .log import warning
def zipdir(target_dir, dest_file, compression=zipfile.ZIP_DEFLATED):
z = zipfile.ZipFile(dest_file, "w", compression=compression)
rootlen = len(target_dir) + 1
for base, _, filenames in os.walk(target_dir):
for n in filenames:
p = os.path.join(base, n)
z.write(p, p[rootlen:])
def unzipdir(zip_name, dest, overwrite=False):
if not overwrite and os.path.exists(dest):
raise Exception("%s already exists!" % dest)
zfile = zipfile.ZipFile(zip_name)
for name in zfile.namelist():
# This precaution is necessary with Python < 2.7.4 .
if (
".." not in name
and ":" not in name
and name[0] in string.ascii_letters + "_"
):
zfile.extract(name, dest)
else:
warning("unzipdir: didn't extract %s", name)
if __name__ == "__main__":
import shutil
zf = "tmp_zipdirtest.zip"
if os.path.exists(zf):
os.remove(zf)
zipdir("../tests", zf)
dest = "tmp_tests"
if os.path.exists(dest):
shutil.rmtree(dest)
unzipdir(zf, dest)
input(f"Check {zf} and {dest}, then press Enter to continue the test.")
try:
unzipdir(zf, dest)
except:
pass
else:
assert False, "An exception should be raised when the destination exists!"
unzipdir("tmp_zipdirtest.zip", dest, overwrite=True)
os.remove(zf)
shutil.rmtree(dest)
| soundmud/soundrts | soundrts/lib/zipdir.py | zipdir.py | py | 1,722 | python | en | code | 37 | github-code | 1 | [
{
"api_name": "zipfile.ZIP_DEFLATED",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
... |
6150757713 | # coding: utf-8
import os
import random
import typing as t
from PIL.Image import new as createImage, Image, QUAD, BILINEAR
from PIL.ImageDraw import Draw, ImageDraw
from PIL.ImageFilter import SMOOTH
from PIL.ImageFont import FreeTypeFont, truetype
from io import BytesIO
import time
ColorTuple = t.Union[t.Tuple[int, int, int], t.Tuple[int, int, int, int]]
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data')
DEFAULT_FONTS = [os.path.join(DATA_DIR, 'DroidSansMono.ttf')]
class Captcha:
lookup_table: t.List[int] = [int(i * 1.97) for i in range(256)]
def __init__(self, width: int = 160, height: int = 60, key: int = None, length: int = 4,
fonts: t.Optional[t.List[str]] = None, font_sizes: t.Optional[t.Tuple[int]] = None):
self._width = width
self._height = height
self._length = length
self._key = (key or int(time.time())) + random.randint(1,100)
self._fonts = fonts or DEFAULT_FONTS
self._font_sizes = font_sizes or (42, 50, 56)
self._truefonts: t.List[FreeTypeFont] = []
random.seed(self._key)
@property
def truefonts(self) -> t.List[FreeTypeFont]:
if self._truefonts:
return self._truefonts
self._truefonts = [
truetype(n, s)
for n in self._fonts
for s in self._font_sizes
]
return self._truefonts
@staticmethod
def create_noise_curve(image: Image, color: ColorTuple) -> Image:
w, h = image.size
x1 = random.randint(0, int(w / 5))
x2 = random.randint(w - int(w / 5), w)
y1 = random.randint(int(h / 5), h - int(h / 5))
y2 = random.randint(y1, h - int(h / 5))
points = [x1, y1, x2, y2]
end = random.randint(160, 200)
start = random.randint(0, 20)
Draw(image).arc(points, start, end, fill=color)
return image
@staticmethod
def create_noise_dots(image: Image, color: ColorTuple, width: int = 3, number: int = 30) -> Image:
draw = Draw(image)
w, h = image.size
while number:
x1 = random.randint(0, w)
y1 = random.randint(0, h)
draw.line(((x1, y1), (x1 - 1, y1 - 1)), fill=color, width=width)
number -= 1
return image
def _draw_character(self, c: str, draw: ImageDraw, color: ColorTuple) -> Image:
font = random.choice(self.truefonts)
left, top, right, bottom = draw.textbbox((0, 0), c, font=font)
w = int((right - left)*1.7) or 1
h = int((bottom - top)*1.7) or 1
dx1 = random.randint(0, 4)
dy1 = random.randint(0, 6)
im = createImage('RGBA', (w + dx1, h + dy1))
Draw(im).text((dx1, dy1), c, font=font, fill=color)
# rotate
im = im.crop(im.getbbox())
im = im.rotate(random.uniform(-30, 30), BILINEAR, expand=True)
# warp
dx2 = w * random.uniform(0.1, 0.3)
dy2 = h * random.uniform(0.2, 0.3)
x1 = int(random.uniform(-dx2, dx2))
y1 = int(random.uniform(-dy2, dy2))
x2 = int(random.uniform(-dx2, dx2))
y2 = int(random.uniform(-dy2, dy2))
w2 = w + abs(x1) + abs(x2)
h2 = h + abs(y1) + abs(y2)
data = (
x1, y1,
-x1, h2 - y2,
w2 + x2, h2 + y2,
w2 - x2, -y1,
)
im = im.resize((w2, h2))
im = im.transform((w, h), QUAD, data)
return im
def create_captcha_image(self, chars: str, color: ColorTuple, background: ColorTuple) -> Image:
image = createImage('RGB', (self._width, self._height), background)
draw = Draw(image)
images: t.List[Image] = []
for c in chars:
if random.random() > 0.5:
images.append(self._draw_character(" ", draw, color))
images.append(self._draw_character(c, draw, color))
text_width = sum([im.size[0] for im in images])
width = max(text_width, self._width)
image = image.resize((width, self._height))
average = int(text_width / len(chars))
rand = int(0.25 * average)
offset = int(average * 0.1)
for im in images:
w, h = im.size
mask = im.convert('L').point(self.lookup_table)
image.paste(im, (offset, int((self._height - h) / 2)), mask)
offset = offset + w + random.randint(-rand, 0)
if width > self._width:
image = image.resize((self._width, self._height))
return image
def generate_image(self, chars: str) -> Image:
background = random_color(238, 255)
color = random_color(10, 200, random.randint(220, 255))
im = self.create_captcha_image(chars, color, background)
self.create_noise_dots(im, color)
self.create_noise_curve(im, color)
im = im.filter(SMOOTH)
return im
def generate(self, format: str = 'png') -> (BytesIO,str):
code = generate_code(self._length)
im = self.generate_image(code)
out = BytesIO()
im.save(out, format=format)
out.seek(0)
return out, code
def write(self, output: str, format: str = 'png') -> (Image, str):
code = generate_code(self._length)
im = self.generate_image(code)
im.save(output, format=format)
return im, code
def random_color(start: int, end: int, opacity: t.Optional[int] = None) -> ColorTuple:
red = random.randint(start, end)
green = random.randint(start, end)
blue = random.randint(start, end)
if opacity is None:
return (red, green, blue)
return (red, green, blue, opacity)
def generate_code(length: int = 4):
characters = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
return ''.join(random.choice(characters) for _ in range(length)) | Ya0h4cker/MyCTFproblems | ACTF 2023/story/story/utils/captcha.py | captcha.py | py | 5,858 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Union",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_... |
3367828967 | #!/usr/bin/env python3
import json
import sys
import http.client as http
import subprocess
import re
import os
###
# COLOR CONSTANTS
###
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
###
# CONFIGURABLE
###
JIRA_HOST = 'jira.atlassian.com'
JIRA_API_VERSION = '2'
GITLAB_HOST = 'gitlab.com'
GITLAB_API_VERSION = 'v4'
TARGET_BRANCH = 'develop' #TODO replace with repo default branch
TASK_NAME_REGEX='TCRM-[0-9]*'
SQUASH = True
REMOVE_SOURCE_BRANCH = True
if 'JIRA_TOKEN' not in os.environ:
print(f'{RED}There is no "JIRA_TOKEN" in ENV variables! Exiting...{NC}')
exit(1)
JIRA_TOKEN = os.environ['JIRA_TOKEN']
if 'GITLAB_TOKEN' not in os.environ:
print(f'{RED}There is no "GITLAB_TOKEN" in ENV variables! Exiting...{NC}')
exit(1)
GITLAB_TOKEN = os.environ['GITLAB_TOKEN']
def get_current_branch():
return subprocess.run(['git', 'branch', '--show-current'], capture_output=True, text=True).stdout.strip('\n')
def send_https_request(method, host, path, headers, body = '', expected_status=200):
connection = http.HTTPSConnection(host)
try:
connection.request(method, path, body, headers = headers)
response = connection.getresponse()
if(response.status != expected_status):
raise Exception(
f'{RED}Request failed{NC}\n' \
f'request: {method} https://{host}{path}\n{body}\n\n' \
f'response status: {str(response.status)}\n' \
f'response body: {response.read().decode("utf-8")}'
)
return json.load(response)
finally:
connection.close()
def get_project_id():
remote_url = subprocess.run(['git', 'remote', 'get-url', 'origin'], capture_output=True, text=True).stdout.strip('\n')
project_name = re.search('/([a-zA-Z0-9\-]*)\.git', remote_url).group(1)
min_access_level = '30' # Developer
max_per_page = '100' # Gitlab API max. FIXME: because of max size per page there is some chance of not finding the project in large repos
response = send_https_request(
'GET',
GITLAB_HOST,
f'/api/{GITLAB_API_VERSION}/projects?' \
'archived=false' \
f'&min_access_level={min_access_level}' \
'&simple=true' \
'&with_merge_requests_enabled=true' \
'&pagination=keyset' \
f'&per_page={max_per_page}' \
'&sort=desc' \
'&order_by=id' \
f'&search={project_name}',
headers = {'PRIVATE-TOKEN': GITLAB_TOKEN},
)
for project in response:
if remote_url in [ project['ssh_url_to_repo'], project['http_url_to_repo'] ]:
return project['id']
raise Exception(f'{RED}Cannot find project whith url:{NC} {remote_url}')
def get_user_id():
return send_https_request(
'GET',
GITLAB_HOST,
f'/api/{GITLAB_API_VERSION}/user',
headers = {'PRIVATE-TOKEN': GITLAB_TOKEN}
)['id']
def get_already_exist_mr_link(project_id, current_branch, target_branch):
response = send_https_request(
'GET',
GITLAB_HOST,
f'/api/{GITLAB_API_VERSION}/projects/{project_id}/merge_requests?' \
'state=opened'\
f'&source_branch={current_branch}' \
f'&target_branch={target_branch}',
headers = {'PRIVATE-TOKEN': GITLAB_TOKEN, 'Content-Type': 'application/json'}
)
if len(response) > 0:
return response[0]['web_url']
else:
return None
def create_mr_request_body(title, current_branch, target_branch, user_id, squash=True, remove_source_branch=True):
return json.dumps({
'title': title,
'source_branch': current_branch,
'target_branch': target_branch,
'assignee_id': user_id,
'squash': squash,
'remove_source_branch': remove_source_branch
})
def create_mr(project_id, title, current_branch, target_branch, user_id, squash=True, remove_source_branch=True):
return send_https_request(
'POST',
GITLAB_HOST,
f'/api/{GITLAB_API_VERSION}/projects/{project_id}/merge_requests',
body = create_mr_request_body(title, current_branch, target_branch, user_id, squash, remove_source_branch),
headers = {'PRIVATE-TOKEN': GITLAB_TOKEN, 'Content-Type': 'application/json'},
expected_status = 201
)['web_url']
def get_task_title(task):
return send_https_request(
'GET',
JIRA_HOST,
f'/rest/api/{JIRA_API_VERSION}/issue/{task}',
headers = {'Authorization': f'Bearer {JIRA_TOKEN}'}
)['fields']['summary']
def get_service_name_from(mr_link):
return re.search('\/([a-zA-Z\-]*)\/-\/merge_requests', mr_link).group(1)
def is_issue_link_already_exist(task, mr_link):
response = send_https_request(
'GET',
JIRA_HOST,
f'/rest/api/{JIRA_API_VERSION}/issue/{task}/remotelink',
headers = {'Authorization': f'Bearer {JIRA_TOKEN}'}
)
for issue in response:
if issue['object']['url'] == mr_link:
return True
return False
def create_jira_issue_link_request_body(mr_link):
return json.dumps({
'object': {
'url': mr_link,
'title': get_service_name_from(mr_link),
'icon': {
'url16x16': f'https://{GITLAB_HOST}/favicon.ico'
},
}
})
def add_jira_issue_link(task, mr_link):
return send_https_request(
'POST',
JIRA_HOST,
f'/rest/api/{JIRA_API_VERSION}/issue/{task}/remotelink',
body = create_jira_issue_link_request_body(mr_link),
headers = {'Authorization': f'Bearer {JIRA_TOKEN}', 'Content-Type': 'application/json'},
expected_status = 201
)
def main():
print('>>> Geting current branch name...')
current_branch = get_current_branch()
print(current_branch)
if current_branch == TARGET_BRANCH:
print(f'{RED}Cannot open MR:{NC} current branch is target ({TARGET_BRANCH})')
exit(1)
print('>>> Extracting task name from branch name...')
match = re.match(TASK_NAME_REGEX, current_branch) # Validate branch name format
if not match:
print(f"{RED}Cannot open MR:{NC} current branch name ({TARGET_BRANCH}) doesn't start with 'TCRM-[0-9]*'")
exit(1)
task = match.group()
print(task)
print('>>> Getting GitLab project id...')
project_id = get_project_id()
print(project_id)
print('>>> Check if MR is already oppened...')
mr_link = get_already_exist_mr_link(project_id, current_branch, TARGET_BRANCH)
if mr_link is not None:
print(f'MR already opened:\n{BLUE}{mr_link}{NC}')
else:
print('No open MR was found. A new one will be created')
print('>>> Getting task title...')
title = task + get_task_title(task)
print(title)
print('>>> Creating MR...')
mr_link = create_mr(
project_id,
title,
current_branch,
TARGET_BRANCH,
get_user_id(),
SQUASH,
REMOVE_SOURCE_BRANCH
)
print(f'{GREEN}MR opened:{NC} {current_branch}')
print(f'{BLUE}{mr_link}{NC}')
print('>>> Check if MR link in task is already exist...')
if is_issue_link_already_exist(task, mr_link):
print('MR link already added to task')
else:
print('MR link not found')
print('>>> Adding MR link to task...')
add_jira_issue_link(task, mr_link)
print(f'{GREEN}MR link added to task{NC}')
print(f'{BLUE}https://{JIRA_HOST}/browse/{task}{NC}')
if __name__ == "__main__":
main()
| Xez99/openmr | open-mr.py | open-mr.py | py | 7,676 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"lin... |
14281799287 | from q2generator import *
import math
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
import pdb
import random
def make_deepcopy( Switches, Graph_sz ):
""" Creates a deep copy of all switch settiings within a graph"""
return [[Switches[y][x] for x in range(Graph_sz)] for y in range(Graph_sz)]
def mh_w_gibbs(o, G, num_iter, error_prob=0.1):
s = [] # store samples for the start positions
X = [] # store switch states
accepted=0
proposals=0
X.append(sample_switch_states(G.lattice_size)) # generate initial switch state
s.append(sample_start_pos(G)) # set the initial start position as the one at G[0][0]
# Gibbs for the startposition
for n in range(num_iter):
log_likelihoods=[]
for r in range(G.lattice_size):
for c in range(G.lattice_size):
switches_prev_it = X[-1]
logL = cond_L(o, G, G.get_node(r,c), switches_prev_it ,error_prob) #+math.log(1/n_pos)
log_likelihoods.append( logL )
# 'Unlogging'the likelihoods
likelihoods = np.exp(log_likelihoods - np.max(log_likelihoods))
# Normalization
probabilities = likelihoods / np.sum( likelihoods)
#Categorical sampling of new start position
s_new = np.argmax(np.random.multinomial(1,probabilities))
# we extract a node for the index for the sampled start position and add it to our list of smaples
s.append( extract_start_pos(len(X[0]), s_new, G))
# Metropolis hastings for the switchsettings
# First we make a deepcopy of the switchsettinsg smapled from the previous iteration
last_sampled_switch=X[-1]
X_previous = make_deepcopy(last_sampled_switch, G.lattice_size )
#We create a vector to store our new accepted switches
x_new = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
#We make a copy of the last accepted swittchsettings so that we can condition
#on the rest of the nodes
X_proposal = make_deepcopy(last_sampled_switch, G.lattice_size )
for row in range(G.lattice_size):
for col in range(G.lattice_size):
s_last_it=s[-1] # we want to condition on s1 from the last iteration
#For every node we sample a new switchsetting once from uniform distribution.
x_proposal =np.random.randint(1,4)
X_proposal[row][col] = x_proposal
x_previous = X_previous[row][col] # we keep the previous value in case the proposal is rejected
# Compute the loglikelihoods for the proposal and the previous iteration
logL_previous = cond_L(o, G, s_last_it, X_previous, error_prob)
logL_proposal = cond_L(o, G, s_last_it, X_proposal, error_prob)
u = np.random.rand()
#We compute the acceptence prob
acceptance_prob = min( math.exp(logL_proposal - logL_previous) , 1)
if u < acceptance_prob:
# If the acceptence probability is larger than an uniform sample we accpt the proposal
accepted+=1
proposals+=1
X_previous = make_deepcopy(X_proposal, G.lattice_size )
x_new[row][col] = x_proposal
else:
X_proposal = make_deepcopy( X_previous , G.lattice_size )
x_new[row][col] = x_previous
proposals+=1
X.append(x_new)
print('Acceptance rate ',accepted / proposals)
return s, X
def extract_start_pos(sz,new_s1,G):
""" Extraxt a new sampled startposition node from the graph"""
c=np.mod(new_s1,sz)
r=(int)(new_s1/sz)
return G.get_node(r,c)
def gibbs(o, G, num_iter, prob_err=0.1):
"""A gibbs sampler"""
acceptancerate=0
proposals=0
s = [] # Start position samples
X = [] # Switch setting samples
graph_sz=len(G.G)
X.append(sample_switch_states(G.lattice_size)) # generate initial switch state
s.append(sample_start_pos(G)) # set the initial start position as the one at G[ 0 , 0 ]
for n in range(num_iter):
log_likelihoods=[]
#For the gibbs sampler we want to go thorugh every possible start position
#for s1 in the graph.
for row in range(graph_sz):
for col in range(graph_sz):
#We compute the loged likelihood for every startposition
start_pos=G.get_node(row,col)
last_switchsetting=X[-1]
log_likelihoods.append(cond_L(o, G,start_pos,last_switchsetting,prob_err))
Likelihoods=np.exp(log_likelihoods-np.max(log_likelihoods)) #"unlogging" the logged likelihoods
probabilities=Likelihoods/np.sum(Likelihoods) #Normalization
#Sampling from a Categorical
samples=np.random.multinomial(1,probabilities)
new_s1 = np.argmax(samples)
s.append(extract_start_pos(len(X[0]),new_s1,G)) #Extracting the node for the sampled start position
X_last_sample= make_deepcopy(X[-1], G.lattice_size) #Making a deepcopy of the last sampled X... python is not nice sometimes
temp=[]
for row in range(graph_sz):
for col in range(graph_sz):
log_likelihoods=[]
for switch_setting in range(1,4):
#For every node in the current Graoh we want to go through every
#possible switch setting, i.e. 1,2 and 3
X_last_sample[row][col]=switch_setting
last_start_pos=s[-1]
namnare_temp= cond_L( o, G,last_start_pos, X_last_sample, prob_err) + math.log(1 / 3) # the prior is given in the assignment as 1/3
log_likelihoods.append( namnare_temp )
unlogedL=np.exp(log_likelihoods-np.max(log_likelihoods)) #unlogging
normL=unlogedL/np.sum(unlogedL) #normalized liklihoods
#categorical sampling of new switch setting
samples=np.random.multinomial(1,normL)
temp.append(np.max(samples))
x_val_new = np.argmax(samples)
X_last_sample[row][col] = 1 + x_val_new
X.append(X_last_sample)
return s, X
def convert_array_to_matrix(array_list):
"""Converts an array of size 9 into a 3x3 matrix
"""
return [array_list[0:3], array_list[3:6], array_list[6:9]]
def convert_matrix_to_array( matrix ):
"""converts a matrix into an array
"""
return [val for row in matrix for val in row]
def block_gibbs(o, G, num_iter, error_prob=0.1):
s = [] # store samples for the start positions
X = [] # store switch states
X.append(sample_switch_states(G.lattice_size)) # generate initial switch state
s.append(sample_start_pos(G)) # set the initial start position as the one at G[0][0]
for n in range(num_iter):
log_likelihoods=[]
for row in range( G.lattice_size ):
for col in range( G.lattice_size ):
last_switch_setting = X[-1]
# We go thorugh every possible value the start pos
s1=G.get_node( row, col )
logL = cond_L( o, G, s1, last_switch_setting ,error_prob)
log_likelihoods.append( logL )
Likleihood = np.exp( log_likelihoods - np.max( log_likelihoods ))
#normalize the likelihoods
probs = Likleihood / np.sum(Likleihood)
# categorical sampling of new startposition
s_new = np.argmax(np.random.multinomial(1,probs))
# we extract a node for the index for the sampled start position and add it to our list of smaples
s.append( extract_start_pos( len(X[0]), s_new, G))
# A list with the indicies of the 3 blocks as suggested by Seyong during
#the help session
block_1 = [0,2,4]
block_2 = [3,5,7]
block_3 = [1,6,8]
block_indicies = [block_1, block_2, block_3]
#block_indicies = [[0,2,4], [3,5,7], [3,5,7]]
# We create an array of the switches in the last samplex switch setting(s) X
last_switches=X[-1]
X_tmp=make_deepcopy( last_switches, G.lattice_size )
X_array = convert_matrix_to_array(X_tmp )
for block in block_indicies:
log_likelihoods = []
# So for the three blocks we want to go though all possible swithc setting for each position
# Also note that we need three nested loops since we want to compute all possible cobintaions of switch settiings
#for the three blocks
for sw_b1 in range(1, 4):
for sw_b2 in range(1,4):
for sw_b3 in range(1,4):
ind_b1 = block[0]
X_array[ind_b1] = sw_b1
ind_b2 = block[1]
X_array[ind_b2] = sw_b2
ind_b3 = block[2]
X_array[ind_b3] = sw_b3
# We convert the array into a matrix again so taht we can send it into cond_L()
X_array =convert_array_to_matrix( X_array )
prev_start_pos = s[-1]
logL = cond_L( o, G, prev_start_pos, X_array, error_prob)+math.log(1/3)
log_likelihoods.append(logL)
X_array = convert_matrix_to_array(X_array)
unLogL =np.exp(log_likelihoods-np.max(log_likelihoods)) # unlogging
probs = unLogL / np.sum(unLogL) #normalization
# Categorical resmapling
x_new = np.argmax(np.random.multinomial( 1, probs ))
# We extract the new indicies for the new sampled x SEBBE??
X_NEW=extracter(x_new,sw_b2,sw_b3,X_array,block)
X_new =convert_array_to_matrix( X_NEW )
X.append(X_new)
return s, X
def extracter(x,sw_b2,sw_b3,X,b):
X[b[2]] = np.mod(x, sw_b3 ) + 1
X[b[1]] = np.mod(np.floor_divide(x , sw_b3), sw_b2) + 1
X[b[0]] = np.floor_divide(x, (sw_b2*sw_b3)) + 1
return X
def cond_L(o,G,start,X,p):
"""Computes the conditional likelihood
"""
logL=0
O_len=len(o)
#Initialisation: We start by getting the next node/state from the starting node
st_new=G.get_next_node(start,0, X)[0]
curr_node=G.get_node(start.row,start.col)
prev_dir=G.get_entry_direction(curr_node,st_new)
for t in range (1,O_len):
if prev_dir!=0:
# if the previous direction was not 0, then we know that the next correct
#observation should be zero
if o[t]==0:
#Therefore we add log(1-p) i.e 0.9 if the next observation is 0. Since
#we did not enter through zero and now have to exit through another switch
prob=1-p
logP=math.log(prob)
logL+=logP
else:
#if the observations is not zero it is incorrect. Therefore we add log(p)
prob=p
logP=math.log(prob)
logL+=logP
else:
#We know that the previous observation was zero. Therefore we if the switch setting is correct
#we should exit through the switch setting
true_switch_setting=X[st_new.row][st_new.col]
if o[t]==true_switch_setting:
prob=1-p
logP=math.log(prob)
logL+=logP
else:
prob=p
logP=math.log(prob)
logL+=logP
start, st_new, prev_dir= next_state(st_new,G,start,X)
return logL
def next_state(st_n,G,start,X):
st_hold=st_n
entry_dir=G.get_entry_direction(start,st_n)
st_n=G.get_next_node(st_n,entry_dir,X)
start=st_hold
st_n=st_n[0]
node_prev=G.get_node(start.row,start.col)
node_new=G.get_node(st_n.row,st_n.col)
prev_dir=G.get_entry_direction(node_prev,node_new)
return start, st_n, prev_dir
def calc_acc(burn_in,lag,s,s_truth):
"""Calculates the accuracy of a start position sequence
"""
s_b=s[burn_in:-1]
s_lag=s_b[0::lag]
s_str=convert_node_to_string(s_lag)
cnt = Counter(s_str)
tmp1=cnt.most_common(9)
occurances_most_common=tmp1[0][0][1]
s1_truth_str_rep=str(convert_node_to_string([s_truth])[0])
print('The most common sample was correct! It was: ',s1_truth_str_rep)
print('Accuracy: ',int(occurances_most_common)/len(s_lag))
print()
def convert_node_to_string(sequence):
""" Converts a node to astring representation"""
str_list=[]
for s in sequence:
str_list.append(str(s.row)+' '+str(s.col))
return str_list
def convergence_histogram_plotter(burn_in,lag,s,s_truth):
""""Computes histograms for 3 chains for all algorithms for s1 """
print()
true_s1_str=str(convert_node_to_string([s_truth])[0])
s_mhg=s[0]
sg=s[1]
sbg=s[2]
s_mhg=s_mhg[burn_in:-1]
s_mhg=s_mhg[0::lag]
sg=sg[burn_in:-1]
sg=sg[0::lag]
sbg=sbg[burn_in:-1]
sbg=sbg[0::lag]
#
sbg_str_rep=convert_node_to_string(sbg)
sg_str_rep=convert_node_to_string(sg)
s_mhg_str_rep=convert_node_to_string(s_mhg)
height0 = [sbg_str_rep.count('0 0'), sbg_str_rep.count('0 1'), sbg_str_rep.count('0 2'), sbg_str_rep.count('1 0'),sbg_str_rep.count('1 1'),sbg_str_rep.count('1 2'), sbg_str_rep.count('2 0'), sbg_str_rep.count('2 1'), sbg_str_rep.count('2 2')]
bars0 = ['(0,0)','(0,1)', '(0,2)', '(1,0)', '(1,1)', '(1,2)','(2,0)','(2,1)','(2,2)']
y0_pos = np.arange(len(bars0))
plt.bar(y0_pos, height0, color = 'r')
title_str='Start Positions - Blocked Gibbs - last half of samples - True s1='+str(true_s1_str)
plt.title(title_str)
plt.xlabel('Start positions')
plt.ylabel('Occurances')
plt.xticks(y0_pos, bars0)
plt.show()
height0 = [sg_str_rep.count('0 0'), sg_str_rep.count('0 1'), sg_str_rep.count('0 2'), sg_str_rep.count('1 0'),sg_str_rep.count('1 1'),sg_str_rep.count('1 2'), sg_str_rep.count('2 0'), sg_str_rep.count('2 1'), sg_str_rep.count('2 2')]
bars0 = ['(0,0)','(0,1)', '(0,2)', '(1,0)', '(1,1)', '(1,2)','(2,0)','(2,1)','(2,2)']
y0_pos = np.arange(len(bars0))
plt.bar(y0_pos, height0, color = 'r')
title_str='Start Positions - Gibbs - last half of samples - True s1='+str(true_s1_str)
plt.title(title_str)
plt.xlabel('Start positions')
plt.ylabel('Occurances')
plt.xticks(y0_pos, bars0)
plt.show()
height0 = [s_mhg_str_rep.count('0 0'), s_mhg_str_rep.count('0 1'), s_mhg_str_rep.count('0 2'), s_mhg_str_rep.count('1 0'),s_mhg_str_rep.count('1 1'),s_mhg_str_rep.count('1 2'), s_mhg_str_rep.count('2 0'), s_mhg_str_rep.count('2 1'), s_mhg_str_rep.count('2 2')]
bars0 = ['(0,0)','(0,1)', '(0,2)', '(1,0)', '(1,1)', '(1,2)','(2,0)','(2,1)','(2,2)']
y0_pos = np.arange(len(bars0))
plt.bar(y0_pos, height0, color = 'r')
title_str='Start Positions - MH within Gibbs - last half of samples - True s1='+str(true_s1_str)
plt.title(title_str)
plt.xlabel('Start positions')
plt.ylabel('Occurances')
plt.xticks(y0_pos, bars0)
plt.show()
def main():
data_seed = 9 #A seed to generate data
n_lattice = 3
T = 100
p = 0.1
G, X_truth, s_truth, o = generate_data(data_seed, n_lattice, T, p)
num_iter = 1000
seeds_chains=[225]#,11,2222] # Different seeds used to run different chains
for seed in seeds_chains:
np.random.seed(seed)
#Running the samplers
sbg, X=block_gibbs(o, G, num_iter)
smhg, X = mh_w_gibbs(o, G, num_iter, p)
sg, X = gibbs(o, G, num_iter, p)
s_list=[smhg,sg,sbg]
s_list_check_convergence=[smhg[0:200],sg[0:200],sbg[0:200]]
s_list_check_convergence_second_half=[smhg[500:-1],sg[500:-1],sbg[500:-1]]
burn_in=100
lag=5
# Computing accuracy for the algorithms
# for s_s in s_list:
# calc_acc(burn_in,lag,s_s,s_truth[0])
# Creating histograms for the startpositions
#convergence_histogram_plotter(burn_in,lag,s_list,s_truth[0])
#convergence_histogram_plotter(burn_in,lag,s_list_check_convergence,s_truth[0])
convergence_histogram_plotter(0,lag,s_list_check_convergence_second_half,s_truth[0])
if __name__ == '__main__':
main()
| sebastianstaahl/Statistical-Methods-in-Applied-Computer-Science | q2.py | q2.py | py | 14,249 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.exp",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 37,
... |
28987698317 | from enum import IntEnum
import numpy as np
from libcbm.storage.series import Series
import numba
class SpinupState(IntEnum):
"""The possible spinup states for stands during spinup"""
AnnualProcesses = 1
HistoricalEvent = 2
LastPassEvent = 3
GrowToFinalAge = 4
Delay = 5
End = 6
@numba.njit()
def _small_slow_diff(
last_rotation_slow: np.ndarray, this_rotation_slow: np.ndarray
) -> np.ndarray:
return (
abs(
(last_rotation_slow - this_rotation_slow)
/ (last_rotation_slow + this_rotation_slow)
/ 2.0
)
< 0.001
)
def advance_spinup_state(
spinup_state: Series,
age: Series,
delay_step: Series,
final_age: Series,
delay: Series,
return_interval: Series,
rotation_num: Series,
min_rotations: Series,
max_rotations: Series,
last_rotation_slow: Series,
this_rotation_slow: Series,
enabled: Series,
) -> np.ndarray:
"""Run the vectorized spinup finite state machine. Passed values are not
modified by this function.
Args:
spinup_state (Series): The current spinup state value. See
:py:class:`SpinupState`
age (Series): The age.
delay_step (Series): The spinup delay step, when in the Delay state
final_age (Series): The final age at the end of spinup, which is also
known as the inventory age in CBM
delay (Series): The number of delay steps to perform in spinup.
return_interval (Series): The number of years between historical
disturbance rotations.
rotation_num (Series): The number of rotations already performed
min_rotations (Series): The minimum number of rotations to perform
max_rotations (Series): The maximum number of rotations to perform
last_rotation_slow (Series): The sum of Slow C pools at the end of the
last rotation, prior to historical disturbance.
this_rotation_slow (Series): The current sum of Slow C pools.
enabled (Series): A boolean flag indicating the corresponding stands
are finished spinup (when 0), or spinup is ongoing (when 1)
Returns:
np.ndarray: The array of updated SpinupState.
"""
out_state = spinup_state.copy().to_numpy()
_advance_spinup_state(
age.length,
spinup_state.to_numpy(),
age.to_numpy(),
delay_step.to_numpy(),
final_age.to_numpy(),
delay.to_numpy(),
return_interval.to_numpy(),
rotation_num.to_numpy(),
min_rotations.to_numpy(),
max_rotations.to_numpy(),
last_rotation_slow.to_numpy(),
this_rotation_slow.to_numpy(),
enabled.to_numpy(),
out_state,
)
return out_state
@numba.njit()
def _advance_spinup_state(
n_stands: int,
spinup_state: np.ndarray,
age: np.ndarray,
delay_step: np.ndarray,
final_age: np.ndarray,
delay: np.ndarray,
return_interval: np.ndarray,
rotation_num: np.ndarray,
min_rotations: np.ndarray,
max_rotations: np.ndarray,
last_rotation_slow: np.ndarray,
this_rotation_slow: np.ndarray,
enabled: np.ndarray,
out_state: np.ndarray,
) -> np.ndarray:
for i in range(0, n_stands):
state = spinup_state[i]
if not enabled[i]:
out_state[i] = SpinupState.End
continue
if state == SpinupState.AnnualProcesses:
if age[i] >= (return_interval[i]):
small_slow_diff = (
_small_slow_diff(
last_rotation_slow[i], this_rotation_slow[i]
)
if (last_rotation_slow[i] > 0)
| (this_rotation_slow[i] > 0)
else False
)
if ((rotation_num[i] > min_rotations[i]) & small_slow_diff) | (
rotation_num[i] >= max_rotations[i]
):
out_state[i] = SpinupState.LastPassEvent
else:
out_state[i] = SpinupState.HistoricalEvent
else:
out_state[i] = SpinupState.AnnualProcesses
elif state == SpinupState.HistoricalEvent:
out_state[i] = SpinupState.AnnualProcesses
elif state == SpinupState.LastPassEvent:
if age[i] < final_age[i]:
out_state[i] = SpinupState.GrowToFinalAge
elif age[i] >= final_age[i]:
if delay[i] > 0:
out_state[i] = SpinupState.Delay
else:
out_state[i] = SpinupState.End
elif state == SpinupState.Delay:
if delay_step[i] < delay[i]:
out_state[i] = SpinupState.Delay
else:
out_state[i] = SpinupState.End
elif state == SpinupState.GrowToFinalAge:
if age[i] < final_age[i]:
out_state[i] = SpinupState.GrowToFinalAge
else:
if delay[i] > 0:
out_state[i] = SpinupState.Delay
else:
out_state[i] = SpinupState.End
return out_state
| cat-cfs/libcbm_py | libcbm/model/model_definition/spinup_engine.py | spinup_engine.py | py | 5,178 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "enum.IntEnum",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numba.njit",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_n... |
27800830147 | # -*- coding: utf-8 -*-
import numpy as np
from glob import glob
import math
import matplotlib.pyplot as plt
from collections import defaultdict
from collections import OrderedDict
from collections import namedtuple
import pickle
import re
import os.path
from scipy import stats
# used for label sorting
#import operator
# Configuration:
#-----------------
# PATHS to look for .jtl files
PATHS_API = ["/Users/chrig/benchmark-results/v3_api/"]
PATHS_BURST = ["/Users/chrig/benchmark-results/v3_api_burst/"]
PATHS_NON_BURST = ["/Users/chrig/benchmark-results/v3_api_non_burst/"]
PATHS_GOOGLE = ["/Users/chrig/benchmark-results/v3_api_google/"]
PATHS = PATHS_API+PATHS_BURST+PATHS_NON_BURST+PATHS_GOOGLE
#PATHS = PATHS_API+PATHS_BURST+PATHS_GOOGLE
#PATHS = PATHS_API+PATHS_BURST+PATHS_NON_BURST
#PATHS = PATHS_GOOGLE
#PATHS = PATHS_NON_BURST
#PATHS = PATHS_BURST
PATHS = ["/Users/chrig/benchmark-results/predict_burst_aws/"]
# Keys for grouping or selecting single instances
KEYS_GOOGLE = ["g1_g1", "s1_s1","s2_s1","s4_s1", "h2_s1", "h2_h2", "h4_s1"]
KEYS_AWS = ["c4l_s","c4l_m3m","m4l_s","m4l_m3m","c4xl_s","c4xl_m","m4xl_s"]
KEYS_BURST = ["mi_mi_burst", "s_mi_burst", "s_s_burst", "m_mi_burst" ,"m_s_burst"]
KEYS_NON_BURST = ["mi_mi", "s_mi","m_mi"]
KEYS_CONFIG_BURST = ["s_mi_burst", "s_s_burst"]+["m_mi_burst", "m_s_burst"]
KEYS_CONFIG_NON_BIRST = ["c4l_s","c4l_m3m","m4l_s","m4l_m3m"]
KEYS_BURST_NON_BURST = ["mi_mi_burst", "mi_mi", "s_mi_burst","s_mi", "m_mi_burst","m_mi"]
KEYS = KEYS_BURST+KEYS_NON_BURST+KEYS_AWS+KEYS_GOOGLE
#KEYS = KEYS_CONFIG_NON_BIRST
#KEYS = KEYS_CONFIG_BURST
#KEYS = KEYS_BURST_NON_BURST
#KEYS = KEYS_GOOGLE
#KEYS = KEYS_BURST
#KEYS = KEYS_BURST+KEYS_AWS+KEYS_GOOGLE
#KEYS = KEYS_NON_BURST
#KEYS= ["m_mi"]
#KEYS= ["322"]
FIG_NAME = "predict_burst_aws"
FIG_WIDTH = 10
FIG_HEIGHT = 7.5
ERROR_EVERY = 20
#enable data_file_overwrite
OVERWRITE_DATA_FILE = False
#Print results
PLOT_LINES = True
PLOT_SINGLE = True
PLOT_AVERAGE = False
AVERGAE_INDICATOR = False
SINGLE_INDICATOR = False
PLOT_BOXPLOT = True
ENABLE_FILE_INFO = False
SAMPLE_LENGTH = 120
PADDING = 0
GAP_LENGTH = 30
WILCOXONU = False
DROP_TIME = False
PRINT_LENGTH = 300
PRINT_START = 100
# What to compute: 1 = started reqeuest, 2 = finished, else = open requests
MODE = 2
class BenchmarkInstance(object):
def __init__(self,filename,rps,metrics):
self.filename = filename
self.rps = rps
self.metrics = metrics
class AverageInstance(object):
def __init__(self, itk,averages,list_with_means, list_with_medians,mean_mean,mean_mean_std,mean_median,mean_median_std,median_req95,median_req99, count,metrics,mean_rsd):
self.itk = itk
self.averages = averages
self.list_with_means = list_with_means
self.list_with_medians = list_with_medians
self.mean_mean = mean_mean
self.mean_mean_std = mean_mean_std
self.mean_median = mean_median
self.mean_median_std = mean_median_std
self.median_req95=median_req95
self.median_req99=median_req99
self.count = count
self.metrics = metrics
self.mean_rsd = mean_rsd
class Filename:
def __init__(self, filename):
self.filename = filename
self.splits = self.filename.split("-")
self.j_exid = self.splits[0][2:]
self.benchmark = '-'.join(self.splits[1].split("_")[1:-1])
self.types_short = self.splits[3]
self.types_long = self.splits[4]
self.number_of_slaves = self.get_flag('-j')
self.execution_time = self.get_flag('-dur')
self.ramp_up_time = self.get_flag('-rt')
self.number_of_threads = self.get_flag('-thr')
self.iteration_count = self.get_flag('-iter')
self.timestamp = self.get_flag('-tst')
self.os = self.get_flag('-os')
def get_flag(self,flag):
pattern = '(?<={})[a-zA-Z0-9]*'.format(flag)
tmp = re.search(pattern, self.filename)
if tmp:
return tmp.group(0)
else:
return "pattern not found"
#-------Methods----------
def save_object(obj, filename):
with open(filename, 'wb') as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def load_object(filename):
with open(filename, 'rb') as input:
return pickle.load(input)
def compute_started_requests_per_second(data):
rps = defaultdict(list)
for request in data:
if request["success"] == True:
s = math.ceil(request["timeStamp"])
rps[s].append(1)
for key, value in rps.items():
rps[key] = len(value)
return OrderedDict(sorted(rps.items()))
def compute_finished_requests_per_second(data):
rps = defaultdict(list)
for request in data:
if request["success"] == True:
s = math.ceil(request["timeStamp"]+(request["elapsed"]/1000))
rps[s].append(request)
for key, value in rps.items():
rps[key] = len(value)
return OrderedDict(sorted(rps.items()))
def compute_open_requests_per_second(data):
started_requests = defaultdict(list)
finished_requests = defaultdict(list)
for request in data:
if(request["success"]) == True:
s = math.ceil(request["timeStamp"])
started_requests[s].append(request)
f = math.ceil(request["timeStamp"]+(request["elapsed"]/1000))
finished_requests[f].append(request)
for skey, svalue in started_requests.items():
started_requests[skey] = len(svalue)
for fkey, fvalue in finished_requests.items():
finished_requests[fkey] = len(fvalue)
open_requests_per_second = defaultdict(list)
last_count = 0
for j in range(sorted(list(finished_requests.keys()))[-1]):
current_count = last_count+(started_requests.get(j,0) - finished_requests.get(j,0))
open_requests_per_second[j] = current_count
last_count = current_count
return OrderedDict(sorted(open_requests_per_second.items()))
def remove_path_from_file(file):
removed_path = file
for p in PATHS:
tmp = file.replace(p[:-1], "")
if tmp != file:
removed_path = tmp[1:]
return removed_path
def get_name(file):
# j_exid435-morphia_distributed_api_NoSlaves_debian-s1_s1-n1_standard_1_n1_standard_1-j0-t2500-s600-rt0_0_2016-07-25_07-21-34
n = Filename(remove_path_from_file(file))
return "{} {} {}".format(n.j_exid, n.benchmark ,n.types_short)
def get_uid(filename):
try:
found = re.search('j_(.+?)-', filename).group(1)
return found
except AttributeError:
print("No id found for: %s"%(filename))
def compute_sample_metrics(data_dict):
sample_length = SAMPLE_LENGTH-PADDING
drop_time = 0
last_request = 0
nongap_duration = 0
sample_size_reached = False
drops = []
for time, count in data_dict.items():
if time == last_request+1:
nongap_duration = nongap_duration+1
last_request = time
if nongap_duration >= sample_length:
sample_size_reached = True
else:
#after a gap
if sample_size_reached:
drop_time = last_request
drops.append(last_request)
if last_request + GAP_LENGTH <= time:
nongap_duration = 0
sample_size_reached = False
last_request = time
if sample_size_reached == True:
drop_time = last_request
if drop_time == 0:
drop_time = last_request
# print("Attention: includes gaps! Set to end:{}".format(last_request))
raise Exception("No GAP-free part found!")
print(drops)
values = list(data_dict.values())
dict_keys = list(data_dict.keys())
#adjusting
if PADDING and drop_time-PADDING in dict_keys:
drop_time_index = dict_keys.index(drop_time-PADDING)
drop_time = drop_time-PADDING
else:
print("droptime: {}".format(drop_time))
drop_time_index = dict_keys.index(drop_time)
while dict_keys[drop_time_index] >= drop_time-PADDING:
drop_time_index = drop_time_index-1
print("finding next smaller:{}>{}".format(dict_keys[drop_time_index],drop_time-PADDING))
drop_time = dict_keys[drop_time_index]
if DROP_TIME and DROP_TIME<dict_keys[-1]:
drop_time = DROP_TIME
drop_time_index = dict_keys.index(drop_time)
mean = np.mean(values[drop_time_index-sample_length:drop_time_index])
median = np.median(values[drop_time_index-sample_length:drop_time_index])
std = np.std(values[drop_time_index-sample_length:drop_time_index])
rsd = 100*(std/mean)
return sample_metric(mean, median, std, drop_time, sample_length,rsd)
def print_sample_indicator(requests_dict, metric,drop_time,sample_length):
keys = list(requests_dict.keys())
index_drop_time = keys.index(drop_time)
ax.plot(keys[index_drop_time-sample_length:index_drop_time], [metric for i in range(sample_length)],'k.',markersize=6)
def print_sample(filename, requests_dict):
label = get_label_for_key(filename.split(" ")[2])
ax.plot(list(requests_dict.keys())[PRINT_START:], list(requests_dict.values())[PRINT_START:],'-',markersize=6,label=label)
def print_averages(itk, averages):
label = "mean: {}".format(itk)
keys = list(averages.keys())
values = [i[0] for i in list(averages.values())]
ax.plot(keys, values,'-',markersize=6,label=label)
def print_line_with_error_indicators(itk, averages):
label = "mean: {}".format(get_label_for_key(itk))
errorevery = ERROR_EVERY
keys = list(averages.keys())
values = [i[0] for i in list(averages.values())]
lowers = [i[1] for i in list(averages.values())]
uppers = [i[2] for i in list(averages.values())]
ax.errorbar(keys, values, yerr=[lowers, uppers],errorevery=errorevery, ecolor='k',capthick=2, label=label)
def print_stats_single(benchmark_instance):
print("name: %s"%(benchmark_instance.filename))
print("mean: %f"%(benchmark_instance.mean))
print("median: %f"%(benchmark_instance.median))
print("standard-deviation: %f"%(benchmark_instance.std))
print("------------------")
def get_instance_type_key(file):
filename = remove_path_from_file(file)
tmp_itk = None
for itk in KEYS:
if filename.find(itk) > -1:
tmp_itk = itk
break
return tmp_itk
def get_data_file_path_name(file):
uid = get_uid(file)
return ".data/{}.pkl".format(uid)
# for more complex datafile names
# instance_type_key = get_instance_type_key(file)
# return ".data/{}_{}.pkl".format(uid, instance_type_key)
def get_label_for_key(key):
if key == "mi_mi":
return "A_nb1m"
elif key == "mi_mi_burst":
return "A_b1m"
elif key == "s_mi":
return "A_nb1s"
elif key == "s_mi_burst":
return "A_b1s_1"
elif key == "s_s_burst":
return "A_b1s_2"
elif key == "m_mi":
return "A_nb2"
elif key == "m_mi_burst":
return "A_b2_1"
elif key == "m_s_burst":
return "A_b2_2"
elif key == "m4l_s":
return "A_gp2_1"
elif key == "m4l_m3m":
return "A_gp2_2"
elif key == "m4xl_s":
return "A_gp4"
elif key == "c4l_s":
return "A_co2_1"
elif key == "c4l_m3m":
return "A_co2_2"
elif key == "c4xl_s":
return "A_co4"
#google
elif key == "g1_g1":
return "G_b1"
elif key == "s1_s1":
return "G_gp1"
elif key == "s2_s1":
return "G_gp2"
elif key == "s4_s1":
return "G_gp4"
# elif key == "h2_s1":
# return "G_co2_1"
elif key == "h2_h2":
return "G_co2"
elif key == "h4_s1":
return "G_co4"
else:
print ("no label found for key:{}".format(key))
return key
sample_metric = namedtuple('SampleMetric','mean, median, std, drop_time, sample_length, rsd')
config = namedtuple('SampleMetric','KEYS SAMPLE_LENGTH PADDING GAP_LENGTH WILCOXONU DROP_TIME PRINT_LENGTH FIG_NAME')
#---init script -----------------------------------------------------------------------------------------------------
#NON_BURSTING_CONFIG = config(["c4l_s","c4l_m3m","m4l_s","m4l_m3m"],240, 0,100,True,False,360,"deployment_options_non_bursting")
#BURSTING_CONFIG = config(["s_mi_burst", "s_s_burst", "m_mi_burst", "m_s_burst"],150,0,100,True, False,250,"deployment_options_bursting")
#
#config = BURSTING_CONFIG
#KEYS=config.KEYS
#SAMPLE_LENGTH = config.SAMPLE_LENGTH
#PADDING = config.PADDING
#GAP_LENGTH = config.GAP_LENGTH
#WILCOXONU = config.WILCOXONU
#DROP_TIME = config.DROP_TIME
#PRINT_LENGTH = config.PRINT_LENGTH
#FIG_NAME = config.FIG_NAME
#---init script -----------------------------------------------------------------------------------------------------
files = []
for path in PATHS:
files.extend(glob(path+'*.jtl'))
for idx,file in enumerate(files):
# check if there is a key in the filename
instance_type_key = get_instance_type_key(file)
if instance_type_key == None:
print("No KEY found in filename and therefore ignored: %s"%(file))
continue
filename = get_name(file)
if os.path.exists(get_data_file_path_name(file)) and not OVERWRITE_DATA_FILE:
continue
print("%s of %s: %s"%(idx+1, len(files),filename))
# read the raw file
data = np.genfromtxt(file, delimiter=',', skip_header=0, names=True, usecols=("timeStamp", "elapsed", "success"), dtype=[("timeStamp", float), ( "elapsed", float), ("success", bool)])
#transform times from ms to s based on numpy array operations
time_in_ms_from_start = data['timeStamp']-data['timeStamp'][0]
time_in_s_from_start = time_in_ms_from_start/(1000)
data['timeStamp'] = time_in_s_from_start
if MODE == 1:
rps = compute_started_requests_per_second(data)
elif MODE == 2:
rps= compute_finished_requests_per_second(data)
else:
rps = compute_open_requests_per_second(data)
benchmark_instance = BenchmarkInstance(filename,rps, None)
save_object(benchmark_instance,get_data_file_path_name(file))
del(benchmark_instance)
benchmarks_per_type = defaultdict(list)
for idx,file in enumerate(files):
if ENABLE_FILE_INFO:
print("Check File %s of %s"%(idx+1, len(files)))
instance_type_key = get_instance_type_key(file)
if instance_type_key in KEYS:
benchmark_instance = load_object(get_data_file_path_name(file))
print("Load File %s"%(benchmark_instance.filename))
if PRINT_LENGTH and max(list(benchmark_instance.rps.keys()))>PRINT_LENGTH:
benchmark_instance.rps = {k:v for k,v in benchmark_instance.rps.items() if k < PRINT_LENGTH}
if DROP_TIME and DROP_TIME not in benchmark_instance.rps.keys():
print("{} exluded!".format(benchmark_instance.filename))
continue
sample_metrics = compute_sample_metrics(benchmark_instance.rps)
benchmark_instance.metrics = sample_metrics
benchmark_instance.mean = sample_metrics.mean
benchmark_instance.median = sample_metrics.median
benchmark_instance.std = sample_metrics.std
benchmark_instance.drop_time = sample_metrics.drop_time
benchmark_instance.sample_length = sample_metrics.sample_length
benchmarks_per_type[instance_type_key].append(benchmark_instance)
averages_per_type = {}
for itk, benchmarks in benchmarks_per_type.items():
# print(itk)
aggregated_counts = defaultdict(list)
#collect for every second the count form each individual benchmark
list_with_means = []
list_with_medians = []
list_with_rsds = []
for benchmark in benchmarks:
list_with_means.append(benchmark.mean)
list_with_medians.append(benchmark.median)
list_with_rsds.append(benchmark.metrics.rsd)
for timestamp, count in benchmark.rps.items():
aggregated_counts[timestamp].append(count)
averages = defaultdict(list)
for i in range(max(list(aggregated_counts.keys()))+1):#we read the biggest number and take it as index, therefore +1
if len(aggregated_counts[i]) > 0:
item_mean = np.mean(aggregated_counts[i])
item_lower_error= item_mean-np.percentile(aggregated_counts[i],25)
item_upper_error = np.percentile(aggregated_counts[i],75)-item_mean
averages[i] = [item_mean,item_lower_error,item_upper_error]
only_averages = defaultdict(int)
for key, value in averages.items():
only_averages[key] = value[0]
mean_rsd = np.mean(list_with_rsds)
# mean_mean = np.mean(list_with_means)
# mean_mean_std = np.std(list_with_means)
# mean_median = np.mean(list_with_medians)
# mean_median_std = np.std(list_with_medians, ddof=1)
metrics = compute_sample_metrics(only_averages)
median = metrics.median
median_std = metrics.std
mean = metrics.mean
mean_std = metrics.std
median_req95 = ((1.96*mean_std)/(mean*0.05))**2
median_req99 = ((2.576*mean_std)/(mean*0.05))**2
count = len(benchmarks)
#round tp next int
median_req95 = math.ceil(median_req95)
median_req99 = math.ceil(median_req99)
averages_per_type[itk] = AverageInstance(itk,averages,list_with_means, list_with_medians,mean,mean_std,median,median_std,median_req95,median_req99, count,metrics, mean_rsd)
print("=== SINGLE ===")
for itk, benchmarks in sorted(benchmarks_per_type.items(), key=lambda tup: tup[0]):
for benchmark in benchmarks:
print_stats_single(benchmark)
print("=== TYPES ===")
file_stats = open("{}_stats.csv".format(FIG_NAME), "w")
file_stats.write("sample length: {}, padding: {},gap: {},nett lenngth: {}\n".format(SAMPLE_LENGTH, PADDING, GAP_LENGTH, SAMPLE_LENGTH-PADDING))
file_stats.write("Instance Type,Count,95%CL (5%MOE),99%CL (5%MOE),Mean,Median,Mean Std,RSD,Mean RSD\n")
for key in KEYS:
if key in averages_per_type.keys():
average_instance = averages_per_type[key]
file_stats.write("{},{},{},{},{},{},{},{},{}\n".format(get_label_for_key(average_instance.itk),average_instance.count,average_instance.median_req95,average_instance.median_req99,average_instance.mean_mean,average_instance.mean_median,average_instance.mean_median_std,average_instance.metrics.rsd, average_instance.mean_rsd))
print("---- {}----\ncount: {}\n95%CL (5%MOE):{}\n99%CL (5%MOE): {}\nmedian: {}\nstd: {}".format(average_instance.itk,average_instance.count,average_instance.median_req95,average_instance.median_req99,average_instance.mean_median,average_instance.mean_median_std))
file_stats.close()
if WILCOXONU:
file_wilcoxon = open("{}_wilcoxon.csv".format(FIG_NAME), "w")
file_wilcoxon.write("c1,count_c1,mean1,std1 ,c2, count_c2,mean_c2,std_c2,p-Value, U-value\n ")
print("Webapp-DB, Webapp-DB, p-Value, U-value")
temp = KEYS.copy()
for key in KEYS:
temp.remove(key)
for temp_key in temp:
wa1 = key.split("_")[0]
db1 = key.split("_")[1]
wa2 = temp_key.split("_")[0]
db2 = temp_key.split("_")[1]
key_mean = averages_per_type[key].mean_mean
key_std = averages_per_type[key].mean_mean_std
temp_mean = averages_per_type[temp_key].mean_mean
temp_std = averages_per_type[temp_key].mean_mean_std
wil = stats.mannwhitneyu(averages_per_type[key].list_with_means,averages_per_type[temp_key].list_with_means,alternative='two-sided')
print("{}-{}, {}-{}, {}, {}".format(wa1, db1, wa2, db2,wil.pvalue, wil.statistic))
# file_wilcoxon.write("{}, {}, {}, {}, {}\n".format(wa1, db1, wa2, db2,wil.pvalue))
file_wilcoxon.write("{}, {},{},{},{},{},{}, {},{},{}\n".format(get_label_for_key(key),len(averages_per_type[key].list_with_means),key_mean,key_std, get_label_for_key(temp_key), len(averages_per_type[temp_key].list_with_means),temp_mean,temp_std,wil.pvalue, wil.statistic))
file_wilcoxon.close()
if PLOT_LINES:
fig = plt.figure(figsize=(FIG_WIDTH, FIG_HEIGHT))
# fig.suptitle('Request Count Evolution', fontsize=12, fontweight='bold')
ax = fig.add_subplot(111)
ax.set_xlabel('duration in seconds')
ax.set_ylabel('successfull requests per second')
if PLOT_SINGLE:
for itk, benchmarks in sorted(benchmarks_per_type.items(), key=lambda tup: tup[0]):
for benchmark in benchmarks:
print_sample(benchmark.filename, benchmark.rps)
if SINGLE_INDICATOR:
print_sample_indicator(benchmark.rps, benchmark.mean,benchmark.drop_time,benchmark.sample_length)
if PLOT_AVERAGE:
for itk, average_instance in sorted(averages_per_type.items(), key=lambda tup: tup[0]):
# print_averages(itk, average_instance.averages)
print_line_with_error_indicators(itk, average_instance.averages)
metrics = average_instance.metrics
if AVERGAE_INDICATOR:
print_sample_indicator(average_instance.averages, metrics.mean,metrics.drop_time,metrics.sample_length)
#Handles sorting for legend
#handles, labels = ax.get_legend_handles_labels()
#hl = sorted(zip(handles, labels),key=operator.itemgetter(1))
#h, l = zip(*hl)
#lgd = ax.legend(h, l, loc='upper center', bbox_to_anchor=(0.5,-0.1), ncol=2, markerscale=2, title="Files")
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc='upper center', bbox_to_anchor=(0.5,-0.1), ncol=2, markerscale=2)#, title="Files")
plt.grid()
plt.savefig('{}_lines.eps'.format(FIG_NAME), bbox_extra_artists=(lgd,), bbox_inches='tight')
if PLOT_BOXPLOT:
fig = plt.figure(figsize=(FIG_WIDTH, FIG_HEIGHT))
# fig.suptitle('Request Rate Boxplot', fontsize=12, fontweight='bold')
ax = fig.add_subplot(111)
ax.set_xlabel('configuration')
ax.set_ylabel('successfull requests per second')
boxplot_data = []
boxplot_labels = []
for key in KEYS:
if key in averages_per_type.keys():
boxplot_data.append(averages_per_type[key].list_with_medians)
label = get_label_for_key(key)+"\n{}".format(len(averages_per_type[key].list_with_medians))
boxplot_labels.append(label)
plt.boxplot(boxplot_data, labels=boxplot_labels)
plt.savefig('{}_boxplot.eps'.format(FIG_NAME),bbox_inches='tight')
if PLOT_LINES or PLOT_BOXPLOT:
plt.show
| sealuzh/benchmarking_online_appendix | scripts/requests_per_second_v2.py | requests_per_second_v2.py | py | 22,852 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.search",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "pickle.HIGHEST_PROTOCOL",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
... |
25312889494 | from django.urls import path
from .views import blog, blogDetail, TagView, CategoryDetailView , CreateBlog ,Categorylist, UpdateBlog , DeleteBlog, Privacy
urlpatterns = [
path('', blog, name="blog"),
path('detail/<slug:slug_name>', blogDetail, name="detail"),
path('tags/<slug:slug_tag>',TagView, name = 'tags'),
path('category/<slug:slug>', CategoryDetailView.as_view(), name='category_detail'),
path('CreateBlog/',CreateBlog.as_view(),name='create'),
path('Category/',Categorylist.as_view(), name = 'Category'),
path('Update/<slug:slug>',UpdateBlog.as_view(),name = 'update'),
path('delete/<slug:slug>',DeleteBlog.as_view(),name = 'delete'),
path('Xavfsizlik',Privacy,name = 'privacy'),
] | AnvarNarzullayev/blog | blog/urls.py | urls.py | py | 727 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "views.blog",
"line_number": 5,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "views.blogDetail",
... |
27508929995 | from django.urls import reverse
from django.utils.html import format_html
from wagtail.contrib.modeladmin.helpers import PageAdminURLHelper, PageButtonHelper
from wagtail.contrib.modeladmin.mixins import ThumbnailMixin
from wagtail.contrib.modeladmin.options import (
ModelAdmin,
ModelAdminGroup,
modeladmin_register,
)
from .models import ArchiveIssue, MagazineDepartment, MagazineIssue
class MagazineIssueAdminURLHelper(PageAdminURLHelper):
def get_action_url(
self,
action,
*args,
**kwargs,
): # pragma: no cover
if action == "add-child":
url_name = "wagtailadmin_pages:add_subpage"
target_url = reverse(url_name, args=args, kwargs=kwargs)
return target_url
# for every other case - just call the parent method
return super().get_action_url(action, *args, **kwargs)
class MagazineIssueButtonHelperClass(PageButtonHelper):
add_child_button_classnames = ["add-child"]
def add_child_button(
self,
pk,
classnames_add=None,
classnames_exclude=None,
): # pragma: no cover
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.add_child_button_classnames + classnames_add
final_classnames = self.finalise_classname(classnames, classnames_exclude)
return {
"url": self.url_helper.get_action_url("add-child", pk),
"label": "Add Article",
"classname": final_classnames,
"title": f"Add article under this {self.verbose_name}",
}
def get_buttons_for_obj(
self,
obj,
exclude=None,
classnames_add=None,
classnames_exclude=None,
): # pragma: no cover
if exclude is None:
exclude = ([],)
if classnames_add is None:
classnames_add = ([],)
if classnames_exclude is None:
classnames_exclude = []
# call the parent class method to get the default set of buttons
buttons = super().get_buttons_for_obj(
obj,
exclude,
classnames_add,
classnames_exclude,
)
# set up some variables to do user checks and also get the primary key (id)
permission_helper = self.permission_helper
user = self.request.user
pk = getattr(obj, self.opts.pk.attname)
# many existing permission helpers are already available
# - see wagtail/contrib/modeladmin/helpers/permission.py
if "add-child" not in exclude and permission_helper.user_can_create(user):
add_child_button = self.add_child_button(
pk,
classnames_add,
classnames_exclude,
)
buttons.append(add_child_button)
return buttons
class MagazineIssueModelAdmin(ThumbnailMixin, ModelAdmin):
model = MagazineIssue
menu_icon = "doc-full-inverse"
menu_label = "Issues"
list_per_page = 10
ordering = [
"-publication_date",
]
list_display = (
"admin_thumb",
"title",
"publication_date",
"live",
"view_articles",
"add_article",
)
list_display_add_buttons = "title"
thumb_image_field_name = "cover_image"
thumb_image_filter_spec = "height-333"
thumb_col_header_text = "Cover"
thumb_default = "https://lorempixel.com/100/100"
list_filter = ("publication_date",)
empty_value_display = "-"
search_fields = ("title",)
button_helper_class = (
MagazineIssueButtonHelperClass # added to enable custom button generation
)
url_helper_class = (
MagazineIssueAdminURLHelper # added to enable custom url generation
)
def add_article(
self,
obj,
): # pragma: no cover
url_name = "wagtailadmin_pages:add_subpage"
url = reverse(url_name, args=[obj.id])
return format_html(
f'<a href="{url}" class="button button-small button-secondary">Add Article</a>', # noqa: E501
)
def view_articles(
self,
obj,
): # pragma: no cover
url_name = "wagtailadmin_explore"
url = reverse(url_name, args=[obj.id])
return format_html(
f'<a href="{url}" class="button button-small button-secondary">View Articles</a>', # noqa: E501
)
class ArchiveIssueModelAdmin(ModelAdmin):
model = ArchiveIssue
menu_icon = "doc-full"
menu_label = "Archive Issues"
list_per_page = 10
ordering = [
"publication_date",
]
list_display = (
"title",
"publication_date",
"internet_archive_identifier",
)
empty_value_display = "-"
search_fields = (
"title",
"internet_archive_identifier",
)
class MagazineDepartmentModelAdmin(ModelAdmin):
model = MagazineDepartment
menu_icon = "tag"
menu_label = "Departments"
menu_order = 200
add_to_settings_menu = False
exclude_from_explorer = False
list_per_page = 10
list_display = ("title",)
search_fields = ("title",)
class MagazineGroup(ModelAdminGroup):
menu_label = "Magazine"
menu_icon = "tablet-alt"
menu_order = 100
items = (
MagazineIssueModelAdmin,
ArchiveIssueModelAdmin,
MagazineDepartmentModelAdmin,
)
modeladmin_register(MagazineGroup)
| WesternFriend/WF-website | magazine/wagtail_hooks.py | wagtail_hooks.py | py | 5,501 | python | en | code | 46 | github-code | 1 | [
{
"api_name": "wagtail.contrib.modeladmin.helpers.PageAdminURLHelper",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "wagtail.contrib.modeladmin.helpers.PageButtonHelper",
"line_number"... |
18709079200 | # Append Dictionary Keys and Values ( In order ) in dictionary
# Input : test_dict = {“Gfg” : 1, “is” : 2, “Best” : 3}
# Output : [‘Gfg’, ‘is’, ‘Best’, 1, 2, 3]
# Explanation : All the keys before all the values in list.
test_dict = {"Gfg" : 1, "is" : 3, "Best" : 2}
print("======= 1) Naive Method ======")
lst = []
for key in test_dict.keys():
lst.append(key)
for value in test_dict.values():
lst.append(value)
print(lst)
print('\n====== 2) concanating two list =====')
concat_list = list(test_dict.keys()) + list(test_dict.values())
print(concat_list)
print('\n===== 3) chain + keys + values ======')
from itertools import chain
chain_list = list(chain(test_dict.keys(), test_dict.values()))
print(chain_list) | dilipksahu/Python-Programming-Example | Dictionary Programs/appendDictKeysValues.py | appendDictKeysValues.py | py | 753 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.chain",
"line_number": 27,
"usage_type": "call"
}
] |
41690429860 | # Функция file_date создает новый файл в текущем рабочем каталоге, проверяет дату изменения файла и возвращает только дату временной метки в формате гггг-мм-дд. Заполните пробелы, чтобы создать файл с именем «newfile.txt», и проверьте дату его изменения.
import os
import datetime
def file_date(filename):
# Create the file in the current directory
file = open(filename, 'w')
timestamp = os.path.getmtime(filename)
# Convert the timestamp into a readable format, then into a string
time = datetime.datetime.fromtimestamp(timestamp)
# Return just the date portion
# Hint: how many characters are in “yyyy-mm-dd”?
return ("{}".format(time.strftime("%Y-%m-%d")))
print(file_date("newfile.txt"))
# Should be today's date in the format of yyyy-mm-dd
| pers5not/my_rep | Google/Using_Python_to_Interact_with_the_Operating_System/week_2/ex_02_5.py | ex_02_5.py | py | 976 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "os.path.getmtime",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.... |
14661828942 | from fastapi import FastAPI, Depends
from app.routers import security, users, images, services
# < Development:
from fastapi.middleware.cors import CORSMiddleware
# >
from app.data.database import database
from app.data.io_files import create_folders
from app.security.methods import (
create_admin, create_sample_user, get_current_active_user
)
app = FastAPI()
# < Development:
origins = [
"http://localhost",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# >
app.include_router(security.router)
app.include_router(
users.router,
prefix='/users',
dependencies=[Depends(get_current_active_user)]
)
app.include_router(
images.router,
prefix='/images',
dependencies=[Depends(get_current_active_user)]
)
app.include_router(
services.router,
prefix='/services',
dependencies=[Depends(get_current_active_user)]
)
@app.on_event("startup")
async def startup():
await database.connect()
await create_admin()
await create_sample_user()
await create_folders()
@app.on_event("shutdown")
async def shutdown():
await database.disconnect()
@app.get("/ping")
async def ping():
return {"ping": True}
| sonarom-org/ariavt-backend | app/main.py | main.py | py | 1,298 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "app.routers",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "app.routers.add_middleware",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "fastapi.middle... |
73184183074 | """
Pagination helpers.
"""
import itertools
import math
def paginate_iterable(iterable, page, per_page):
"""
Pagination for custom iterables.
returns an iterator.
"""
start = (page -1) * per_page
end = start + per_page
return itertools.islice(iterable, start, end)
def mongo_paginate_to_dict(pages, items_key, convert_items=True, **nestedkwargs):
"""
Converts a page from mongoengine paginate to a dict for marshalling.
optionally converts items to dict.
pages: mongoengine page.
items_key: key to insert items in dict.
"""
dic = {
'page': pages.page,
'pages': pages.pages
}
if convert_items:
dic[items_key] = [item.to_dict(**nestedkwargs) for item in pages.items]
else:
dic[items_key] = pages.items
return dic
def custom_paginate_to_dict(iterable, items_key, page, total ,per_page, convert_items, **nestedkwargs):
"""
Converts a page from paginate_iterable to a dict for marshalling.
optionally converts items to dict.
"""
pages = int(math.ceil(total / float(per_page)))
if convert_items:
items = [item.to_dict(**nestedkwargs) for item in iterable]
else:
items = list(iterable)
dic = {
"page": page,
"pages": pages,
}
dic[items_key] = items
return dic
| amrdraz/java-project-runner | application/resources/pagination.py | pagination.py | py | 1,336 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.islice",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 39,
"usage_type": "call"
}
] |
41381149885 | import json
filename = "File-System/json/favourite_num.json"
# number = input("What's your favourite number? ")
# with open(filename, "w") as f_object:
# json.dump(number, f_object)
# print("Thanks! I'll remember that.")
with open(filename, "r") as f:
content = json.load(f)
print("I know your favourite number is " + content)
| meenphilip/Python-Basics | File-System/json/favourite_number.py | favourite_number.py | py | 344 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 13,
"usage_type": "call"
}
] |
11510531692 | # Released under the MIT License. See LICENSE for details.
#
"""Implements a flag used for marking bases, capture-the-flag games, etc."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from bascenev1lib.gameutils import SharedObjects
import bascenev1 as bs
if TYPE_CHECKING:
from typing import Any, Sequence
class FlagFactory:
"""Wraps up media and other resources used by `Flag`s.
Category: **Gameplay Classes**
A single instance of this is shared between all flags
and can be retrieved via FlagFactory.get().
"""
flagmaterial: bs.Material
"""The bs.Material applied to all `Flag`s."""
impact_sound: bs.Sound
"""The bs.Sound used when a `Flag` hits the ground."""
skid_sound: bs.Sound
"""The bs.Sound used when a `Flag` skids along the ground."""
no_hit_material: bs.Material
"""A bs.Material that prevents contact with most objects;
applied to 'non-touchable' flags."""
flag_texture: bs.Texture
"""The bs.Texture for flags."""
_STORENAME = bs.storagename()
def __init__(self) -> None:
"""Instantiate a `FlagFactory`.
You shouldn't need to do this; call FlagFactory.get() to
get a shared instance.
"""
shared = SharedObjects.get()
self.flagmaterial = bs.Material()
self.flagmaterial.add_actions(
conditions=(
('we_are_younger_than', 100),
'and',
('they_have_material', shared.object_material),
),
actions=('modify_node_collision', 'collide', False),
)
self.flagmaterial.add_actions(
conditions=(
'they_have_material',
shared.footing_material,
),
actions=(
('message', 'our_node', 'at_connect', 'footing', 1),
('message', 'our_node', 'at_disconnect', 'footing', -1),
),
)
self.impact_sound = bs.getsound('metalHit')
self.skid_sound = bs.getsound('metalSkid')
self.flagmaterial.add_actions(
conditions=(
'they_have_material',
shared.footing_material,
),
actions=(
('impact_sound', self.impact_sound, 2, 5),
('skid_sound', self.skid_sound, 2, 5),
),
)
self.no_hit_material = bs.Material()
self.no_hit_material.add_actions(
conditions=(
('they_have_material', shared.pickup_material),
'or',
('they_have_material', shared.attack_material),
),
actions=('modify_part_collision', 'collide', False),
)
# We also don't want anything moving it.
self.no_hit_material.add_actions(
conditions=(
('they_have_material', shared.object_material),
'or',
('they_dont_have_material', shared.footing_material),
),
actions=(
('modify_part_collision', 'collide', False),
('modify_part_collision', 'physical', False),
),
)
self.flag_texture = bs.gettexture('flagColor')
@classmethod
def get(cls) -> FlagFactory:
"""Get/create a shared `FlagFactory` instance."""
activity = bs.getactivity()
factory = activity.customdata.get(cls._STORENAME)
if factory is None:
factory = FlagFactory()
activity.customdata[cls._STORENAME] = factory
assert isinstance(factory, FlagFactory)
return factory
@dataclass
class FlagPickedUpMessage:
"""A message saying a `Flag` has been picked up.
Category: **Message Classes**
"""
flag: Flag
"""The `Flag` that has been picked up."""
node: bs.Node
"""The bs.Node doing the picking up."""
@dataclass
class FlagDiedMessage:
"""A message saying a `Flag` has died.
Category: **Message Classes**
"""
flag: Flag
"""The `Flag` that died."""
@dataclass
class FlagDroppedMessage:
"""A message saying a `Flag` has been dropped.
Category: **Message Classes**
"""
flag: Flag
"""The `Flag` that was dropped."""
node: bs.Node
"""The bs.Node that was holding it."""
class Flag(bs.Actor):
"""A flag; used in games such as capture-the-flag or king-of-the-hill.
Category: **Gameplay Classes**
Can be stationary or carry-able by players.
"""
def __init__(
self,
position: Sequence[float] = (0.0, 1.0, 0.0),
color: Sequence[float] = (1.0, 1.0, 1.0),
materials: Sequence[bs.Material] | None = None,
touchable: bool = True,
dropped_timeout: int | None = None,
):
"""Instantiate a flag.
If 'touchable' is False, the flag will only touch terrain;
useful for things like king-of-the-hill where players should
not be moving the flag around.
'materials can be a list of extra `bs.Material`s to apply to the flag.
If 'dropped_timeout' is provided (in seconds), the flag will die
after remaining untouched for that long once it has been moved
from its initial position.
"""
super().__init__()
self._initial_position: Sequence[float] | None = None
self._has_moved = False
shared = SharedObjects.get()
factory = FlagFactory.get()
if materials is None:
materials = []
elif not isinstance(materials, list):
# In case they passed a tuple or whatnot.
materials = list(materials)
if not touchable:
materials = [factory.no_hit_material] + materials
finalmaterials = [
shared.object_material,
factory.flagmaterial,
] + materials
self.node = bs.newnode(
'flag',
attrs={
'position': (position[0], position[1] + 0.75, position[2]),
'color_texture': factory.flag_texture,
'color': color,
'materials': finalmaterials,
},
delegate=self,
)
if dropped_timeout is not None:
dropped_timeout = int(dropped_timeout)
self._dropped_timeout = dropped_timeout
self._counter: bs.Node | None
if self._dropped_timeout is not None:
self._count = self._dropped_timeout
self._tick_timer = bs.Timer(
1.0, call=bs.WeakCall(self._tick), repeat=True
)
self._counter = bs.newnode(
'text',
owner=self.node,
attrs={
'in_world': True,
'color': (1, 1, 1, 0.7),
'scale': 0.015,
'shadow': 0.5,
'flatness': 1.0,
'h_align': 'center',
},
)
else:
self._counter = None
self._held_count = 0
self._score_text: bs.Node | None = None
self._score_text_hide_timer: bs.Timer | None = None
def _tick(self) -> None:
if self.node:
# Grab our initial position after one tick (in case we fall).
if self._initial_position is None:
self._initial_position = self.node.position
# Keep track of when we first move; we don't count down
# until then.
if not self._has_moved:
nodepos = self.node.position
if (
max(
abs(nodepos[i] - self._initial_position[i])
for i in list(range(3))
)
> 1.0
):
self._has_moved = True
if self._held_count > 0 or not self._has_moved:
assert self._dropped_timeout is not None
assert self._counter
self._count = self._dropped_timeout
self._counter.text = ''
else:
self._count -= 1
if self._count <= 10:
nodepos = self.node.position
assert self._counter
self._counter.position = (
nodepos[0],
nodepos[1] + 1.3,
nodepos[2],
)
self._counter.text = str(self._count)
if self._count < 1:
self.handlemessage(bs.DieMessage())
else:
assert self._counter
self._counter.text = ''
def _hide_score_text(self) -> None:
assert self._score_text is not None
assert isinstance(self._score_text.scale, float)
bs.animate(
self._score_text, 'scale', {0: self._score_text.scale, 0.2: 0}
)
def set_score_text(self, text: str) -> None:
"""Show a message over the flag; handy for scores."""
if not self.node:
return
if not self._score_text:
start_scale = 0.0
math = bs.newnode(
'math',
owner=self.node,
attrs={'input1': (0, 1.4, 0), 'operation': 'add'},
)
self.node.connectattr('position', math, 'input2')
self._score_text = bs.newnode(
'text',
owner=self.node,
attrs={
'text': text,
'in_world': True,
'scale': 0.02,
'shadow': 0.5,
'flatness': 1.0,
'h_align': 'center',
},
)
math.connectattr('output', self._score_text, 'position')
else:
assert isinstance(self._score_text.scale, float)
start_scale = self._score_text.scale
self._score_text.text = text
self._score_text.color = bs.safecolor(self.node.color)
bs.animate(self._score_text, 'scale', {0: start_scale, 0.2: 0.02})
self._score_text_hide_timer = bs.Timer(
1.0, bs.WeakCall(self._hide_score_text)
)
def handlemessage(self, msg: Any) -> Any:
assert not self.expired
if isinstance(msg, bs.DieMessage):
if self.node:
self.node.delete()
if not msg.immediate:
self.activity.handlemessage(FlagDiedMessage(self))
elif isinstance(msg, bs.HitMessage):
assert self.node
assert msg.force_direction is not None
self.node.handlemessage(
'impulse',
msg.pos[0],
msg.pos[1],
msg.pos[2],
msg.velocity[0],
msg.velocity[1],
msg.velocity[2],
msg.magnitude,
msg.velocity_magnitude,
msg.radius,
0,
msg.force_direction[0],
msg.force_direction[1],
msg.force_direction[2],
)
elif isinstance(msg, bs.PickedUpMessage):
self._held_count += 1
if self._held_count == 1 and self._counter is not None:
self._counter.text = ''
self.activity.handlemessage(FlagPickedUpMessage(self, msg.node))
elif isinstance(msg, bs.DroppedMessage):
self._held_count -= 1
if self._held_count < 0:
print('Flag held count < 0.')
self._held_count = 0
self.activity.handlemessage(FlagDroppedMessage(self, msg.node))
else:
super().handlemessage(msg)
@staticmethod
def project_stand(pos: Sequence[float]) -> None:
"""Project a flag-stand onto the ground at the given position.
Useful for games such as capture-the-flag to show where a
movable flag originated from.
"""
assert len(pos) == 3
bs.emitfx(position=pos, emit_type='flag_stand')
| efroemling/ballistica | src/assets/ba_data/python/bascenev1lib/actor/flag.py | flag.py | py | 12,189 | python | en | code | 468 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "bascenev1.Material",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "bascenev1.Sound",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "bas... |
12450613973 | import boto3
from datetime import datetime
import time
TABLE_NAME = 'VisitorData'
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(TABLE_NAME)
def get_unix_time():
"""Returns current unix timestamp"""
d = datetime.utcnow()
return str(time.mktime(d.timetuple()))
def add_visit(user_agent):
visit_id = get_unix_time()
item = {
'id': visit_id,
'user_agent': user_agent
}
table.put_item(Item=item)
def get_total_count():
return table.scan()['Count']
def lambda_handler(event, context):
add_visit(event['requestContext']['identity'].get('userAgent'))
total_count = get_total_count()
return {
'statusCode': 200,
'headers': {
'content-type': 'text/html'
},
'body': f'<h1>Visited {total_count} times</h1>'
}
| mgochoa/sam-udea-demo | src/index.py | index.py | py | 829 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "boto3.resource",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "time.mktime"... |
36648565632 | from splinter import Browser
from bs4 import BeautifulSoup as bs
import pandas as pd
from selenium import webdriver
def init_browser():
executable_path = {'executable_path': 'chromedriver.exe'}
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
options.add_argument("--disable-notifications")
return Browser('chrome', **executable_path, headless=False, options = options)
def scrape():
browser = init_browser()
# Visit webpage and get NASA Mars News
url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
browser.visit(url)
#Scrape the NASA Mars News Site and collect the latest News Title and Paragraph Text. Assign the text to variables that you can reference later.
news_title = browser.find_by_css('div[class="content_title"] a', wait_time=2).text
news_p = browser.find_by_css('div[class="article_teaser_body"]').text
# Visit JPL Space Images
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
# Click image to maximize
browser.find_by_id('full_image').click()
# Get featured image url
featured_image_url = browser.find_by_id('fancybox-lock').first.find_by_tag('img')['src']
#Visit Twitter for Mars Weather
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
# Get Mars Weather Data
mars_weather = browser.find_by_tag('article').find_by_css('span[class="css-901oao css-16my406 r-1qd0xha r-ad9z0x r-bcqeeo r-qvutc0"]')[4].text
# Visit Space Facts and read table facts to html table
url = "https://space-facts.com/mars/"
tables = pd.read_html(url)
df = tables[0]
df.columns = ['Description', 'Value']
df.set_index('Description', inplace=True)
html_table_df = df.to_html()
html_table = html_table_df.replace('\n', '')
# # Vist webpage to get images for Mars Hemispheres
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
# # Find Hemisphere title containing the hemisphere name.
title1 = browser.find_by_css('div[class="item"]')[0].find_by_tag('h3').text
title2 = browser.find_by_css('div[class="item"]')[1].find_by_tag('h3').text
title3 = browser.find_by_css('div[class="item"]')[2].find_by_tag('h3').text
title4 = browser.find_by_css('div[class="item"]')[3].find_by_tag('h3').text
# # Browse through webpage to get image urls
link1 = browser.find_by_xpath('/html/body/div[1]/div[1]/div[2]/section/div/div[2]/div[1]/div/a/h3').click()
open1 = browser.find_by_xpath('/html/body/div[1]/div[1]/div[2]/a').click()
img1 = browser.find_by_css('div[class="downloads"]').find_by_tag('a')['href']
back1 = browser.back()
link2 = browser.find_by_xpath('/html/body/div[1]/div[1]/div[2]/section/div/div[2]/div[2]/div/a/h3').click()
open2 = browser.find_by_xpath('/html/body/div[1]/div[1]/div[2]/a').click()
img2 = browser.find_by_css('div[class="downloads"]').find_by_tag('a')['href']
back2 = browser.back()
link3 = browser.find_by_xpath('/html/body/div[1]/div[1]/div[2]/section/div/div[2]/div[3]/div/a/h3').click()
open3 = browser.find_by_xpath('/html/body/div[1]/div[1]/div[2]/a').click()
img3 = browser.find_by_css('div[class="downloads"]').find_by_tag('a')['href']
back3 = browser.back()
link4 = browser.find_by_xpath('/html/body/div[1]/div[1]/div[2]/section/div/div[2]/div[4]/div/a/h3').click()
open4 = browser.find_by_xpath('/html/body/div[1]/div[1]/div[2]/a').click()
img4 = browser.find_by_css('div[class="downloads"]').find_by_tag('a')['href']
# Set titles and images to a list of dictionaries
hemisphere_image_urls = [
{"title": title1, "img_url": img1},
{"title": title2, "img_url": img2},
{"title": title3, "img_url": img3},
{"title": title4, "img_url": img4}
]
mars_data = {
"news_title": news_title,
"news_text": news_p,
"featured_img": featured_image_url,
"mars_weather": mars_weather,
"mars_facts" : html_table,
"hemisphere_images": hemisphere_image_urls
}
browser.quit()
return mars_data | alexrayperry/Web-Scraping-Challenge | mission-to-mars/scrape_mars.py | scrape_mars.py | py | 4,276 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "splinter.Browser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pa... |
17844225463 | # -*- test-case-name: imaginary.test -*-
from twisted.trial import unittest
from axiom import store
from imaginary import eimaginary, objects
class ContainerTestCase(unittest.TestCase):
def setUp(self):
self.store = store.Store()
self.containmentCore = objects.Thing(store=self.store, name=u"container")
self.container = objects.Container.createFor(self.containmentCore, capacity=1)
self.object = objects.Thing(store=self.store, name=u"object")
def testAdd(self):
"""
Test that successfully adding an object to a container properly adjusts
the world graph - in particular, the contents list of the container and
the location of the object.
"""
self.container.add(self.object)
self.assertEquals(list(self.container.getContents()), [self.object])
self.assertIdentical(self.object.location, self.containmentCore)
def testRemove(self):
"""
Test that successfully removing an object from a container properly
adjusts the world graph - in particular, the contents list of the
container and the location of the object.
"""
self.testAdd()
self.container.remove(self.object)
self.assertEquals(list(self.container.getContents()), [])
self.assertIdentical(self.object.location, None)
def testOverflowing(self):
"""
Test the capacity feature of the container implementation as it
interacts with the weight feature of the object implementation.
"""
self.container.capacity = 1
self.object.weight = 2
self.assertRaises(eimaginary.DoesntFit, self.container.add, self.object)
self.assertEquals(list(self.container.getContents()), [])
self.assertIdentical(self.object.location, None)
def testClosed(self):
"""
Test the closed feature of the container implementation.
"""
self.container.closed = True
self.assertRaises(eimaginary.Closed, self.container.add, self.object)
self.assertEquals(list(self.container.getContents()), [])
self.assertIdentical(self.object.location, None)
self.container.closed = False
self.container.add(self.object)
self.container.closed = True
self.assertRaises(eimaginary.Closed, self.container.remove, self.object)
self.assertEquals(list(self.container.getContents()), [self.object])
self.assertIdentical(self.object.location, self.containmentCore)
| rcarmo/divmod.org | Imaginary/imaginary/test/test_container.py | test_container.py | py | 2,536 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "twisted.trial.unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "twisted.trial.unittest",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "axiom.store.Store",
"line_number": 11,
"usage_type": "call"
},
{
"api_n... |
41977926388 | #!/usr/bin/python
import os
import sys
sys.path.append(os.path.join(os.getcwd(), '../'))
import pytest
import blackjack.card as card
import random
import string
def test_functional():
for suit in ('hearts', 'diamonds', 'spades', 'clubs'):
for number in xrange(1, 14):
newCard = card.Card(number, suit);
if (1 == number):
assert (newCard.is_ace())
value = [1, 11]
numberString = 'Ace'
elif (number > 10):
assert (False == newCard.is_ace())
value = [10]
if (11 == number):
numberString = 'Jack'
elif (12 == number):
numberString = 'Queen'
elif (13 == number):
numberString = 'King'
else:
assert (False == newCard.is_ace())
value = [number]
numberString = number
assert (value == newCard.get_value())
assert (number == newCard.get_number())
assert (suit == newCard.get_suit())
newCardString = "%s of %s" % (numberString, suit.title())
assert (newCardString == newCard.__str__())
def test_disallowed():
def __randomSuit(length=5):
return ''.join(random.choice(string.letters) for i in range(length))
# disallowed number
for number in (random.randint(-50, 49) for _ in xrange(random.randint(0,49))):
if ((number >= 1) and (number <= 13)):
continue
with pytest.raises(ValueError):
newCard = card.Card(number, 'hearts')
# dissllowed suit
for i in xrange(random.randint(0,49)):
with pytest.raises(ValueError):
newCard = card.Card(random.randint(0,13), __randomSuit())
for suit in ('HEARTSS', 'DIAMOONNDSA', 'SSPADDES', 'CCLLUUBBSS'):
with pytest.raises(ValueError):
newCard = card.Card(random.randint(0,13), suit)
| suhasgaddam/blackjack-python | blackjack/test/test_card.py | test_card.py | py | 1,989 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
35151983955 | """
This module defines a degradable lunar-lander environment derived from OpenAI gym.
"""
import numpy as np
from sklearn.base import BaseEstimator
from gym.envs.box2d import lunar_lander
from gym.envs.box2d import LunarLander as OGLunarLander
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
class LunarLanderEnv(OGLunarLander):
params = ('MAIN_ENGINE_POWER', 'SIDE_ENGINE_POWER')
def __init__(self, seed=None):
super().__init__()
self.default_params = {p: getattr(lunar_lander, p) for p in LunarLanderEnv.params}
self._seed = seed
self.seed(seed)
def set_parameters(self, MAIN_ENGINE_POWER: float=13., SIDE_ENGINE_POWER: float=0.6):
lunar_lander.MAIN_ENGINE_POWER = MAIN_ENGINE_POWER
lunar_lander.SIDE_ENGINE_POWER = SIDE_ENGINE_POWER
def set_state(self, state: np.ndarray):
raise NotImplementedError
# pylint: disable=no-member
def randomize(self):
return random_lunarlander(self.np_random, env=self)
# pylint: disable=no-member
def random_lunarlander(random: np.random.RandomState=None, env=None):
env = LunarLanderEnv() if env is None else env
feature_size = len(LunarLanderEnv.params)
feature_min = np.asarray([10., 0.5])
feature_max = np.asarray([16., 0.7])
feature_min_abs = np.asarray([10., 0.5])
if isinstance(random, np.random.RandomState):
features = random.randn(feature_size)
elif isinstance(random, np.ndarray):
features = random
elif isinstance(random, (int, float)):
random = np.random.RandomState(random)
features = random.rand(feature_size)
features = np.clip(features, feature_min, feature_max)
features = np.where(np.abs(features) < feature_min_abs,
np.sign(features) * feature_min_abs,
features)
params = {k:v for k, v in zip(env.params, features)}
env.set_parameters(**params)
return env
def plot_lunarlander(env, agent):
pass | hazrmard/AirplaneFaultTolerance | systems/lunarlander.py | lunarlander.py | py | 2,017 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gym.envs.box2d.LunarLander",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "gym.envs.box2d.lunar_lander",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "gym.envs.box2d.lunar_lander.MAIN_ENGINE_POWER",
"line_number": 25,
"usage_type... |
15643038724 | import csv
import re
from utils import get_star_elements, get_soup, is_last_page, get_key, get_item_from_star_element
import os
from print import print_yellow, print_blue, print_red
hash = {}
page_number = 1
file_name = './data.csv'
does_file_exists = bool(os.path.isfile(file_name))
if does_file_exists:
with open(file_name, "r") as file:
reader = csv.reader(file, delimiter='|')
existing_items = list(reader)
for item in existing_items:
title, image, *_ = item
key = get_key(title, image)
if(key not in hash):
hash[key] = item
else:
print_red('The file is corrupt! It contains duplicate elements! Delete the file and re-run the script!')
exit()
with open(file_name, "a") as file:
writer = csv.writer(file, delimiter='|')
# write header, since the file is new.
if not does_file_exists:
print_blue("File doesn't exist, created a new file!")
writer.writerow(['title', 'image', 'rating', 'number of reviews'])
while True:
page = get_soup(page_number)
elements = get_star_elements(page)
print_yellow(f"Found {len(elements)} products in page {page_number}.")
for index, element in enumerate(elements):
item = get_item_from_star_element(element)
image = item.find('img', class_='s-image')
if(image):
image = image['src']
title = item.find('span', class_='a-size-base-plus a-color-base a-text-normal')
if(title):
title = title.text
else:
title = item.find('span', class_="a-size-medium a-color-base a-text-normal")
title = title.text
rating = item.find('span', class_='a-icon-alt').text
rating = re.search(r'(.+?) out', rating).group(1).strip()
number_of_reviews = item.find('span', class_="a-size-base s-underline-text").text
key = get_key(title, image)
if(key not in hash):
print_blue(f"Found new item, {title}.")
hash[key] = [ title, image, rating, number_of_reviews]
writer.writerow([ title, image, rating, number_of_reviews])
if(is_last_page(page)):
break
page_number += 1
| shibisuriya/indian-e-commerce-scaper | amazon/main.py | main.py | py | 2,363 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "os.path.isfile",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "utils.get_key",
"line_numb... |
21473481999 | from pattern.text.en import singularize
from PySide6.QtWidgets import QMainWindow
from translatepy.translators.google import GoogleTranslate
from hitori_srs.text import clear_sentence, clear_word
from hitori_srs.views.definitions_dialog import DefinitionsDialog
from hitori_srs.views.ui.main_window import Ui_MainWindow
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, language, sentence) -> None:
super().__init__()
self.setupUi(self)
self.language = language
self.google = GoogleTranslate()
self.text_sentence.setText(clear_sentence(sentence))
self.translate()
self.action_add_word.triggered.connect(self.add_word)
def translate(self):
sentence = clear_sentence(self.text_sentence.toPlainText())
translated_sentence = self.google.translate(sentence, self.language).result
self.text_translated_sentence.setText(translated_sentence)
def highlight_word(self, words):
sentence = clear_sentence(self.text_sentence.toPlainText())
sentence = " ".join(sentence.split())
for word in words:
sentence = sentence.replace(word, f'<span style="color: rgb(0, 0, 255);">{word}</span>')
self.text_sentence.setText(sentence)
def add_word(self):
word = self.text_sentence.textCursor().selectedText()
if word:
words_for_highlighting = {word}
word = word.lower()
words_for_highlighting.add(word)
word = singularize(word)
words_for_highlighting.add(word)
self.highlight_word(words_for_highlighting)
self.text_word.setText(word)
dd = DefinitionsDialog(word)
if dd.exec():
word, definition = dd.get_selected()
self.text_word.setText(word)
self.text_definition.setText(definition)
# translated_word = self.google.translate(word, self.language).result
# self.edit_translated_word.setText(translated_word)
| nikohonu/hitori-srs | hitori_srs/views/main_window.py | main_window.py | py | 2,040 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PySide6.QtWidgets.QMainWindow",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "hitori_srs.views.ui.main_window.Ui_MainWindow",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "translatepy.translators.google.GoogleTranslate",
"line_number": 1... |
17342271468 | from tkinter import *
import tkinter.messagebox
import PIL.Image
import PIL.ImageTk
import pickle
import winsound
# main (root) GUI menu
class MainMenu:
def __init__(self, master):
self.master = master
self.master.title('Welcome Menu')
self.top_frame = tkinter.Frame(self.master)
self.bottom_frame = tkinter.Frame(self.master)
# create the label buttons
im = PIL.Image.open("Darkest_Dungeon_Logo.png")
photo = PIL.ImageTk.PhotoImage(im)
ddl = Label(master, image=photo)
ddl.image = photo
# https://stackoverflow.com/questions/27599311/tkinter-photoimage-doesnt-not-support-png-image
self.intro1 = tkinter.Label(self.top_frame, text='You are an exile, unwanted and alone. You have heard rumors about an nearly forgotten '
'tomb filled with endless treasure and unknown horrors.')
self.intro2 = tkinter.Label(self.top_frame, text='After months of searching you have finally found it. '
'Do you dare enter?')
# pack the intro
ddl.pack()
self.intro1.pack(side='top', anchor='n', padx=20)
self.intro2.pack(side='top', anchor='n', padx=20)
# create ok and quit buttons
self.ok_button = tkinter.Button(self.bottom_frame, text='Enter the dungeon', command=self.enter)
self.quit_button = tkinter.Button(self.bottom_frame, text='quit', command=self.master.destroy)
# pack the buttons
self.ok_button.pack(side='left')
self.quit_button.pack(side='left')
# pack the frames
self.top_frame.pack()
self.bottom_frame.pack()
def enter(self):
Outside(self.master)
# this hides the main window
class Outside:
def __init__(self, master):
# tkinter.Toplevel() is like tkinter.Frame() but it opens in a new window
self.window = tkinter.Toplevel(master)
self.window.title('Outside the dungeon...')
# create Frames for this Toplevel window
self.top_frame = tkinter.Frame(self.window, padx=10)
self.bottom_frame = tkinter.Frame(self.window, padx=10)
# widgets for top frame - label and entry box for name
self.old_man1_label = tkinter.Label(self.top_frame, text='You aproach the ancient stone doors and notice an decrepit old man etching symbols into stone slabs.')
self.old_man2_label = tkinter.Label(self.top_frame, text='Once you get close enough to the door his head snaps up in your direction. One of his eyes are covered in')
self.old_man3_label = tkinter.Label(self.top_frame, text=' dirty bandages and his clothes look like tattered scribes robes.')
self.old_man4_label = tkinter.Label(self.top_frame, text='')
im = PIL.Image.open("old man.png")
photo = PIL.ImageTk.PhotoImage(im)
old_man_pic = Label(self.top_frame, image=photo)
old_man_pic.image = photo
self.old_man5_label = tkinter.Label(self.top_frame, text='Old man: Ahhh another victim of this tomb of eldritch horrors searching for the treasures so many others')
self.old_man6_label = tkinter.Label(self.top_frame, text='have sought and died for. well if you must go in can I ask to know a little about you? I make sure no ')
self.old_man7_label = tkinter.Label(self.top_frame, text='poor soul is forgotten when they enter and die in this foul place. So tell me about yourself.')
# pack top frame
self.old_man1_label.pack(side='top', anchor='w')
self.old_man2_label.pack(side='top', anchor='w')
self.old_man3_label.pack(side='top', anchor='w')
self.old_man4_label.pack(side='top', anchor='w')
old_man_pic.pack()
self.old_man5_label.pack(side='top', anchor='w')
self.old_man6_label.pack(side='top', anchor='w')
self.old_man7_label.pack(side='top', anchor='w')
# buttons for bottom frame
self.con_button = tkinter.Button(self.bottom_frame, text='Continue', command=self.cont)
# pack bottom frame
self.con_button.pack(anchor='n')
# pack frames
self.top_frame.pack()
self.bottom_frame.pack()
def cont(self):
self.top_frame.destroy()
self.bottom_frame.destroy()
Questions(master=self.window)
class Questions:
def __init__(self, master):
# tkinter.Toplevel() is like tkinter.Frame() but it opens in a new window
self.ques = master
self.ques.title('Outside the dungeon...')
# create Frames for this Toplevel window
self.top_frame = tkinter.Frame(self.ques, padx=40)
self.bottom_frame = tkinter.Frame(self.ques, padx=40)
self.radio_var1 = tkinter.IntVar()
self.radio_var1.set(1)
self.radio_var2 = tkinter.IntVar()
self.radio_var2.set(1)
# widgets for top frame - label and entry box for name
self.quest_1_label = tkinter.Label(self.top_frame, text='What is your name?')
self.quest_1_entry = tkinter.Entry(self.top_frame, width=20)
self.quest_2_label = tkinter.Label(self.top_frame, text='What are you?')
self.quest_2_button1 = tkinter.Radiobutton(self.top_frame, text='Man',
variable=self.radio_var1, value=1)
self.quest_2_button2 = tkinter.Radiobutton(self.top_frame, text='Woman',
variable=self.radio_var1, value=2)
self.quest_3_label = tkinter.Label(self.top_frame, text='What weapon do you use?')
self.quest_3_button1 = tkinter.Radiobutton(self.top_frame, text='Sword and Shield',
variable=self.radio_var2, value=1)
self.quest_3_button2 = tkinter.Radiobutton(self.top_frame, text='Dagger and Bow',
variable=self.radio_var2, value=2)
# pack top frame
self.quest_1_label.pack(side='top', anchor='n', padx=20)
self.quest_1_entry.pack(side='top', anchor='n', padx=20)
self.quest_2_label.pack(side='top', anchor='n', padx=20)
self.quest_2_button1.pack(side='top', anchor='n', padx=20)
self.quest_2_button2.pack(side='top', anchor='n', padx=20)
self.quest_3_label.pack(side='top', anchor='n', padx=20)
self.quest_3_button1.pack(side='top', anchor='n', padx=20)
self.quest_3_button2.pack(side='top', anchor='n', padx=20)
# buttons for bottom frame
self.con_button = tkinter.Button(self.bottom_frame, text='continue', command=self.con)
# pack bottom frame
self.con_button.pack(side='top')
# pack frames
self.top_frame.pack()
self.bottom_frame.pack()
self.sex = 0
self.weapon = 0
def con(self):
if self.radio_var1.get() == 1:
self.sex = 1
else:
self.sex = 2
if self.radio_var2.get() == 1:
self.weapon = 1
else:
self.weapon = 2
character = {'name': self.quest_1_entry.get(), 'sex': self.sex}
weapon = {'weapon_combo': self.weapon}
with open('character.txt', 'wb') as char:
pickle.dump(character, char)
with open('weapon.txt', 'wb') as weap:
pickle.dump(weapon, weap)
# https://stackoverflow.com/questions/36965507/writing-a-dictionary-to-a-text-file
self.top_frame.destroy()
self.bottom_frame.destroy()
Answers(master=self.ques)
class Answers:
def __init__(self, master):
# tkinter.Toplevel() is like tkinter.Frame() but it opens in a new window
self.ans = master
self.ans.title('Outside the dungeon...')
# create Frames for this Toplevel window
self.top_frame = tkinter.Frame(self.ans)
self.bottom_frame = tkinter.Frame(self.ans)
with open("character.txt", "rb") as char:
self.char = pickle.load(char)
with open("weapon.txt", "rb") as weap:
self.weap = pickle.load(weap)
# https://stackoverflow.com/questions/36965507/writing-a-dictionary-to-a-text-file
# widgets for top frame -
if self.char['sex'] == 1:
im = PIL.Image.open("male character.png")
photo = PIL.ImageTk.PhotoImage(im)
main_character_pic = Label(self.top_frame, image=photo)
main_character_pic.image = photo
else:
im = PIL.Image.open("female character.png")
photo = PIL.ImageTk.PhotoImage(im)
main_character_pic = Label(self.top_frame, image=photo)
main_character_pic.image = photo
if self.weap['weapon_combo'] == 1:
self.weap['weapon_combo'] = 'Sword and Shield'
else:
self.weap['weapon_combo'] = 'Dagger and Bow'
self.name = tkinter.Label(self.top_frame, text='My name is ' + self.char['name'] + '.')
self.weapon = tkinter.Label(self.top_frame, text='I use a ' + self.weap['weapon_combo'] + '.')
# pack top frame
main_character_pic.pack(side='top', anchor='n')
self.name.pack(side='top', anchor='n')
self.weapon.pack(side='top', anchor='n')
# buttons for bottom frame
self.con_button = tkinter.Button(self.bottom_frame, text='continue', command=self.con)
# pack bottom frame
self.con_button.pack(side='top')
# pack frames
self.top_frame.pack()
self.bottom_frame.pack()
def con(self):
self.top_frame.destroy()
self.bottom_frame.destroy()
Response(master=self.ans)
class Response:
def __init__(self, master):
# tkinter.Toplevel() is like tkinter.Frame() but it opens in a new window
self.res = master
self.res.title('Outside the dungeon...')
# create Frames for this Toplevel window
self.top_frame = tkinter.Frame(self.res)
self.bottom_frame = tkinter.Frame(self.res)
# widgets for top frame
with open("character.txt", "rb") as myFile:
self.char = pickle.load(myFile)
im = PIL.Image.open("old man.png")
photo = PIL.ImageTk.PhotoImage(im)
old_man_pic = Label(self.top_frame, image=photo)
old_man_pic.image = photo
self.old_man_label_1 = tkinter.Label(self.top_frame, text='Very well, I hope you have a more prosperous '
'delve into this tomb them the others before you ' + self.char['name'] + '.')
self.old_man_label_2 = tkinter.Label(self.top_frame, text='May the gods have mercy on your soul.')
# pack top frame
old_man_pic.pack(side='top', anchor='n')
self.old_man_label_1.pack(side='top', anchor='w')
self.old_man_label_2.pack(side='top', anchor='n')
# buttons for bottom frame
self.con_button = tkinter.Button(self.bottom_frame, text='continue', command=self.con)
# pack bottom frame
self.con_button.pack(side='top')
# pack frames
self.top_frame.pack()
self.bottom_frame.pack()
def con(self):
self.top_frame.destroy()
self.bottom_frame.destroy()
Door(master=self.res)
class Door:
def __init__(self, master):
# tkinter.Toplevel() is like tkinter.Frame() but it opens in a new window
self.door = master
self.door.title('Outside the dungeon...')
# create Frames for this Toplevel window
self.top_frame = tkinter.Frame(self.door)
self.bottom_frame = tkinter.Frame(self.door)
# widgets for top frame
self.door_1 = tkinter.Label(self.top_frame, text='You turn away from the old man without another word')
self.door_2 = tkinter.Label(self.top_frame, text='and stare at the ancient door thatstands before you.')
self.door_3 = tkinter.Label(self.top_frame, text='You know damn well that you could never come back out,')
self.door_4 = tkinter.Label(self.top_frame, text='but the tresure promised is to tempting and your greed')
self.door_5 = tkinter.Label(self.top_frame, text='begins to grow as you put your hands on the door.')
im = PIL.Image.open("download.png")
photo = PIL.ImageTk.PhotoImage(im)
door = Label(self.top_frame, image=photo)
door.image = photo # keep a reference!
# pack top frame
door.pack()
self.door_1.pack(side='top', anchor='w')
self.door_2.pack(side='top', anchor='w')
self.door_3.pack(side='top', anchor='w')
self.door_4.pack(side='top', anchor='w')
self.door_5.pack(side='top', anchor='w')
# buttons for bottom frame
self.con_button = tkinter.Button(self.bottom_frame, text='open the door', command=self.con)
# pack bottom frame
self.con_button.pack(side='top')
# pack frames
self.top_frame.pack()
self.bottom_frame.pack()
def con(self):
winsound.PlaySound('door sound.wav', winsound.SND_FILENAME)
# https://stackoverflow.com/questions/307305/play-a-sound-with-python
self.top_frame.destroy()
self.bottom_frame.destroy()
End(master=self.door)
class End:
def __init__(self, master):
# tkinter.Toplevel() is like tkinter.Frame() but it opens in a new window
self.end = master
self.end.title('Outside the dungeon...')
# create Frames for this Toplevel window
self.top_frame = tkinter.Frame(self.end)
self.bottom_frame = tkinter.Frame(self.end)
# widgets for top frame
self.end1 = tkinter.Label(self.top_frame, text='And this is the end of the demo!')
self.end2 = tkinter.Label(self.top_frame, text='I hope you enjoyed! this was an experience')
self.end3 = tkinter.Label(self.top_frame, text='to create, it was fun despite the ups and downs.')
self.end4 = tkinter.Label(self.top_frame, text='And I truly thank you for the class.')
# pack top frame
self.end1.pack()
self.end2.pack()
self.end3.pack()
self.end4.pack()
# buttons for bottom frame
self.end_button = tkinter.Button(self.bottom_frame, text='main menu', command=self.endit)
# pack bottom frame
self.end_button.pack(side='top')
# pack frames
self.top_frame.pack()
self.bottom_frame.pack()
def endit(self):
self.end.destroy()
def main():
# create a window
root = tkinter.Tk()
# call the GUI and send it the root menu
MainMenu(root)
# control the mainloop from main instead of the class
root.mainloop()
main()
| JamesNowak/final_project | final.py | final.py | py | 15,107 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tkinter.Frame",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tkinter.Frame",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PIL.Image.Image.open",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "PIL.Image.Image",
... |
27962348329 | from google.appengine.api import taskqueue
from google.appengine.api import background_thread
import json
import random
from util import *
import sys
import urllib2
def transpose(ind1, ind2):
"""
Adds a subroute to the best location of the other individual
"""
rfrom = random.randint(0, len(ind1) - 1)
rto = random.randint(rfrom + 1, len(ind1))
subroute = ind1[rfrom:rto]
new = ind2[:]
for i in ind2:
if i in subroute:
new.remove(i)
whereto = new[0]
l = len(subroute)
mindistance = distance(new[0][1], new[0][2], subroute[l-1][1], subroute[l-1][2])
for i in new:
newdistance = distance(i[1], i[2], subroute[l-1][1], subroute[l-1][2])
if newdistance < mindistance:
whereto = i
mindistance = newdistance
new = new[0: new.index(whereto)] + subroute + new[new.index(whereto):len(new)]
return new
def swap(old):
"""
Exchanges 2 clients
"""
ind = old[:]
client1 = random.randint(0, len(ind) - 1)
client2 = random.randint(0, len(ind) - 1)
tmp = ind[client1]
ind[client1] = ind[client2]
ind[client2] = tmp
return ind
def mutate():
"""
Function responsible for the mutation
"""
queue = taskqueue.Queue('pull-queue')
# we take one task
tasks = queue.lease_tasks(3600, 1)
#print "Mutate", len(tasks)
#print >>sys.stderr, len(tasks)
#if any task was taken
if len(tasks) > 0:
old = json.loads(tasks[0].payload)
new = swap(old)
newtasks = []
if fitness(old) < fitness(new):
payload_str = json.dumps(old)
else:
payload_str = json.dumps(new)
newfit = fitness(new)
#print "Mutation", fitness(new)
save_to_cache(newfit)
# we can't save it to the database
#db.run_in_transaction(txn, newfit)
newtasks.append(taskqueue.Task(payload=payload_str, method='PULL'))
queue.delete_tasks(tasks)
queue.add(newtasks)
def cross():
"""
Function responsible for the cross-over
"""
queue = taskqueue.Queue('pull-queue')
# we take one task
tasks = queue.lease_tasks(3600, 2)
#print "Cross",len(tasks)
if len(tasks) == 2:
ind1 = json.loads(tasks[0].payload)
ind2 = json.loads(tasks[1].payload)
child1 = transpose(ind1, ind2)
child2 = transpose(ind2, ind1)
# we choose the 2 best
possible = [ind1, ind2, child1, child2]
fits = [fitness(ind1), fitness(ind2), fitness(child1), fitness(child2)]
best = min(fits)
ret1 = possible[fits.index(best)]
possible.remove(ret1)
fits.remove(best)
best = min(fits)
ret2 = possible[fits.index(best)]
possible.remove(ret2)
fits.remove(best)
newfit1 = fitness(ret1)
newfit2 = fitness(ret2)
if newfit1 < newfit2:
save_to_cache(newfit1)
else:
save_to_cache(newfit2)
newtasks = []
#print "Crossover", fitness(ret1), fitness(ret2)
newtasks.append(taskqueue.Task(payload=json.dumps(ret1), method='PULL'))
newtasks.append(taskqueue.Task(payload=json.dumps(ret2), method='PULL'))
queue.delete_tasks(tasks)
queue.add(newtasks)
elif len(tasks) == 1:
# if only one then we cannot crossover
queue.delete_tasks(tasks)
queue.add([taskqueue.Task(payload=tasks[0].payload, method='PULL')])
def f():
while True:
steps = get_and_update()
if steps is not None and steps > 0:
if random.random() > 0.5:
mutate()
else:
cross()
# starts the background thread that randomly mutates or crossovers
t = background_thread.BackgroundThread(target=f)
t.start()
| domjullier/gae_tsp | worker.py | worker.py | py | 3,848 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.randint",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "random.randint",
"li... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.