Dataset Viewer
Auto-converted to Parquet Duplicate
index
int64
0
1,000k
blob_id
stringlengths
40
40
code
stringlengths
7
10.4M
0
aff1a9263e183610f403a4d6a7f27b45eacb7ff2
name='valentina ' print(name*1000)
1
eabf06481509962652812af67ad59da5cfe30fae
""" mupub module. """ __all__ = ( '__title__', '__summary__', '__version__', '__author__', '__license__', '__copyright__', ) __title__ = 'mupub' __summary__ = 'Musical score publishing utility for the Mutopia Project' """Versioning: This utility follows a MAJOR . MINOR . EDIT format. Upon a major release, the MAJOR number is incremented and the MINOR is zeroed. During development of an upcoming release, the MINOR number may be incremented. """ __version__ = '1.0.8' __author__ = 'Glen Larsen, Chris Sawer' __author_email__= 'glenl.glx@gmail.com' __uri__ = 'http://mutopiaproject.org/' __license__ = 'MIT' __copyright__ = 'Copyright 2018 The Mutopia Project' from .assets import collect_assets from .commands.build import build from .commands.check import check from .commands.init import init from .commands.tag import tag from .commands.clean import clean from .config import CONFIG_DICT, CONFIG_DIR, getDBPath from .config import test_config, saveConfig from .core import MUTOPIA_BASE, FTP_BASE, URL_BASE from .core import id_from_footer from .exceptions import BadConfiguration, IncompleteBuild, TagProcessException from .header import Loader, LYLoader, VersionLoader from .header import RawLoader, Header, REQUIRED_FIELDS from .header import find_header from .lily import LyLocator, LyVersion from .validate import Validator, DBValidator, in_repository from .tagedit import tag_header, tag_file from .rdfu import NS, MuRDF from .utils import resolve_input,resolve_lysfile
2
54f0ed5f705d5ada28721301f297b2b0058773ad
"""Module for the bot""" from copy import deepcopy from time import sleep import mcpi.minecraft as minecraft from mcpi.vec3 import Vec3 import mcpi.block as block from search import SearchProblem, astar, bfs from singleton import singleton _AIR = block.AIR.id _WATER = block.WATER.id _LAVA = block.LAVA.id _BEDROCK = block.BEDROCK.id _DROP = 2 # It can drop at most this many _DROP_PLUS_1 = _DROP + 1 _DELAY = 1 class _Vec3(Vec3): """A Vec3 that is hashable. Everything in this program should use this class.""" def __hash__(self): """Return the hash.""" return hash((self.x, self.y, self.z)) def clone(self): """Return a clone.""" return _Vec3(self.x, self.y, self.z) class _GenericBot: """A generic bot.""" def __init__(self, pos, inventory=None): """Initialize with an empty inventory. inventory is a dictionary. If None, an empty one will be used.""" if inventory is None: self._inventory = {} else: self._inventory = deepcopy(inventory) self._pos = deepcopy(pos) def take_action(self, action): """Take the action (acquired from _get_legal_actions).""" getattr(self, action['func'])( *action.get('args', ()), **action.get('kwargs', {}) ) def take_actions(self, actions, seconds=None): """Take these actions. If seconds is not None, sleep 'seconds' seconds. """ if not actions: return self.take_action(actions[0]) for action in actions[1:]: if seconds is not None: sleep(seconds) self.take_action(action) def get_pos(self): """Return the position.""" return deepcopy(self._pos) def get_legal_actions(self, block_=None): """Return a list of legal actions. If block_ is None, return all legal actions. Otherwise, return all legal actions that don't involve placing the block.""" return self._get_move_actions(block_) + self._get_mine_actions() + \ self._get_placement_actions(block_) def contains(self, block_): """Return whether or not the bot contains the block id.""" return block_ in self._inventory def _get_block(self, pos): """Get the block at the position.""" raise NotImplementedError def _place(self, loc, exclude=None, block_=None): """Place a block from the inventory only. If exclude is not None, place a block that is not 'exclude'. If block is not None, place that block only. """ if not self._inventory: raise Exception('Inventory empty') if block_ is None: for key in self._inventory: if key != exclude: block_ = key break else: raise Exception(( 'You requested not to place %s, but it is the only ' 'block in the inventory.' % exclude )) if block_ not in self._inventory: raise Exception('Block %s is not in the inventory' % block_) if self._inventory[block_] == 1: del self._inventory[block_] else: self._inventory[block_] -= 1 self._set_block(loc, block_) def _move_down(self): """Move and mine the block below.""" new_pos = self._pos + _Vec3(0, -1, 0) block_ = self._get_block(new_pos) if block_ != _WATER: self._add_to_inv(block_) self._move(new_pos) def _add_to_inv(self, block_): """Add the block to the inventory.""" if block_ in self._inventory: self._inventory[block_] += 1 else: self._inventory[block_] = 1 def _move_up(self, exclude=None): """Move and place a block below. If exclude is not None, place a block that is not 'exclude'. """ self._move(self._pos + _Vec3(0, 1, 0)) self._place(self._pos + _Vec3(0, -1, 0), exclude) def _mine(self, loc): """Mine the block.""" block_ = self._get_block(loc) self._add_to_inv(block_) self._set_block(loc, _AIR) def _get_move_actions(self, exclude=None): """Return a list of legal movement actions. exclude is the block to exclude. """ rtn = [] # Check for moving up can_move_up = self._get_block(self._pos + _Vec3(0, 2, 0)) in {_AIR, _WATER} if can_move_up: if self._surrounded(): rtn.append({ 'func': '_move', 'args': (self._pos + _Vec3(0, 1, 0),) }) else: rtn.append({ 'func': '_move_up', 'args': (exclude,) }) # Check for moving down hidden_block = self._get_block(self._pos + _Vec3(0, -2, 0)) if hidden_block == _WATER or hidden_block not in {_AIR, _LAVA}: rtn.append({'func': '_move_down'}) # Check for side moves for dir_ in _adj_dirs(): rtn.extend(self._side_moves(dir_, can_move_up)) return rtn def _side_moves(self, dir_, can_move_up): """Return the list of side moves. dir_ is an adjacent direction. can_move_up is a boolean for whether or not the bot can move up. """ rtn = [] base_pos = self._pos + dir_ base_block = self._get_block(base_pos) empty_blocks = {_AIR, _WATER} # Check if it can move up if can_move_up and base_block not in {_AIR, _LAVA, _WATER}: for vert_dir in [_Vec3(0, 1, 0), _Vec3(0, 2, 0)]: if self._get_block(base_pos + vert_dir) not in empty_blocks: break else: rtn.append({ 'func': '_move', 'args': (base_pos + _Vec3(0, 1, 0),) }) # Check if it can move in that direction for vert_dir in [_Vec3(), _Vec3(0, 1, 0)]: if self._get_block(base_pos + vert_dir) not in empty_blocks: break # Fall else: pos = base_pos + _Vec3(0, -1, 0) for _ in xrange(_DROP_PLUS_1): block_ = self._get_block(pos) if block_ != _AIR: if block_ != _LAVA: rtn.append({ 'func': '_move', 'args': (pos + _Vec3(0, 1, 0),) }) break pos.y -= 1 def _surrounded(self): """Return whether or not the bot is surrounded by water.""" for dir_ in _adj_dirs(): if self._get_block(self._pos + dir_) != _WATER: return False return True def _get_mine_actions(self): """Return a list of legal mining actions (that only involve mining and not moving).""" rtn = [] dont_mine = {_AIR, _WATER, _LAVA} # Mine above. pos_above = self._pos + _Vec3(0, 2, 0) if self._get_block(pos_above) not in dont_mine: rtn.append({ 'func': '_mine', 'args': (pos_above,) }) for dir_ in _adj_dirs(): pos = self._pos + dir_ for _ in xrange(2): if self._get_block(pos) not in dont_mine: rtn.append({ 'func': '_mine', 'args': (pos,) }) pos = pos + _Vec3(0, 1, 0) return rtn def _get_placement_actions(self, exclude=None): """Return a list of legal actions that only involve placing a block from the inventory. exclude is a block id. It is the block that should not be placed. If None, any block can be placed.""" if not self._has_blocks_to_place(exclude=exclude): return [] dirs = [_Vec3(0, 2, 0)] for dir_ in _adj_dirs(): dirs.extend([dir_, dir_ + _Vec3(0, 1, 0)]) if self._get_block(self._pos + dir_) in [_AIR, _WATER]: dirs.append(dir_ + _Vec3(0, -1, 0)) rtn = [] for dir_ in dirs: pos = self._pos + dir_ if self._can_place(pos): rtn.append({ 'func': '_place', 'args': (pos,), 'kwargs': {'exclude': exclude} }) return rtn def _can_place(self, loc): """Return whether or not the bot can place a block at that location independent of what it has in its inventory.""" non_blocks = [_AIR, _WATER, _LAVA] player = [self._pos, self._pos + _Vec3(0, 1, 0)] for dir_ in _adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]: new_loc = loc + dir_ if new_loc not in player and self._get_block(new_loc) \ not in non_blocks: return True return False def _has_blocks_to_place(self, exclude=None): """Return whether or not the bot can place a block from the inventory. If exclude is None, any block can be placed.""" for block_ in self._inventory: if block_ != exclude: return True return False def _set_block(self, pos, block_): """Set a block. block_ is the block id.""" raise NotImplementedError def _move(self, pos): """Move there only.""" self._pos = deepcopy(pos) class _ImaginaryBot(_GenericBot): """A bot used for finding paths that doesn't actually change blocks in the world.""" def __init__(self, pos, inventory=None): """Create a new bot.""" _GenericBot.__init__(self, pos, inventory) self._changes = {} # Changes to the world def _set_block(self, pos, block_): """Set a block. block_ is the block id.""" self._changes[deepcopy(pos)] = block def _get_block(self, pos): """Get the block at the position.""" if pos in self._changes: return self._changes[pos] else: return _get_mc().getBlock(pos) def get_block(self, pos): """The public version.""" return self._get_block(pos) def __hash__(self): """Return the hash.""" return hash(frozenset([self._pos] + \ _key_vals(self._inventory) + \ _key_vals(self._changes) )) class Bot(_GenericBot): """The real bot. All vector arguments are Vec3s.""" _BOT_BLOCK = block.IRON_BLOCK.id def __init__(self): """Create a bot next to the player.""" pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0) pos = _Vec3(pos.x, pos.y, pos.z) _GenericBot.__init__(self, pos) self._pos = pos self._move(self._pos) @staticmethod def destroy_all(): """Destroy all bots within a small distance (in case I forget to destroy one).""" player_loc = _player_loc() minec = _get_mc() rad = 10 for x in xrange(player_loc.x - rad, player_loc.x + rad): for y in xrange(player_loc.y - rad, player_loc.y + rad): for z in xrange(player_loc.z - rad, player_loc.z + rad): if minec.getBlock(x, y, z) == Bot._BOT_BLOCK: minec.setBlock(x, y, z, _AIR) def destroy(self): """Set itself to air.""" self._set_block(self._pos, _AIR) self._set_block(self._pos + _Vec3(0, 1, 0), _AIR) def fetch(self, block_name): """Mine and return a block to the player.""" imag_bot = _ImaginaryBot(self._pos, self._inventory) block_id = getattr(block, block_name).id block_loc = self._get_block_loc(block_id) mine_prob = _MineProblem(imag_bot, block_loc, block_id) mine_actions = astar(mine_prob, _mine_heuristic) self.take_actions(mine_actions, _DELAY) imag_bot = _ImaginaryBot(self._pos, self._inventory) player_loc = _player_loc() return_prob = _ReturnProblem(imag_bot, block_id, player_loc) return_actions = astar(return_prob, _return_heuristic) imag_bot.take_actions(return_actions) return_actions.append({ 'func': '_place', 'args': (imag_bot.get_pos() + player_loc) / 2, 'kwargs': {'block': block_id} }) self.take_actions(return_actions, _DELAY) def _get_block_loc(self, block_id): """Return the location of the block.""" find_prob = FindProblem(self._pos, block_id) dirs = bfs(find_prob) return self._pos + sum(dirs) def _set_block(self, pos, block_): """Place an actual block in the world. block is a block id.""" _get_mc().setBlock(pos, block_) def _get_block(self, pos): """Get the block at the position.""" return _get_mc().getBlock(pos) def _move(self, pos): """Move there, and set the appropriate blocks.""" self._set_block(self._pos, _AIR) self._set_block(self._pos + _Vec3(0, 1, 0), _AIR) self._set_block(pos, self._BOT_BLOCK) self._set_block(pos + _Vec3(0, 1, 0), self._BOT_BLOCK) self._pos = pos class FindProblem(SearchProblem): """Problem for finding the location of a block in the world. A state in this problem is a location. """ def __init__(self, start_loc, block_id): """Initialize.""" self._start_loc = deepcopy(start_loc) self._block_id = block_id def getStartState(self): """Return the starting location.""" return self._start_loc def isGoalState(self, state): return _get_mc().getBlock(state) == self._block_id def getSuccessors(self, state): """Return the successors.""" rtn = [] for dir_ in _all_dirs(): successor = state + dir_ if successor.y <= _get_mc().getHeight(successor.x, successor.z) \ and _get_mc().getBlock(successor) != _BEDROCK: rtn.append((successor, dir_, 1)) return rtn class _MineProblem(SearchProblem): """The problem of finding the block and mining it (not returning it).""" def __init__(self, imag_bot, block_loc, block_id): """Initialize the problem with an _ImaginaryBot. block_loc is a Vec3. """ self._bot = imag_bot self._block_loc = deepcopy(block_loc) self._block_id = block_id def get_block_loc(self): """Return the block location.""" return deepcopy(self._block_loc) def get_block_id(self): """Return the block it's trying to mine.""" return self._block_id def getStartState(self): """Return the bot passed in.""" return self._bot def isGoalState(self, state): """Return whether or not the bot has the block.""" return state.contains(self._block_id) def getSuccessors(self, state): """Return the successors.""" rtn = [] for action in state.get_legal_actions(): successor = deepcopy(state) successor.take_action(action) rtn.append((successor, action, 1)) return rtn class _ReturnProblem(SearchProblem): """The problem of returning to the player. This does not place the block next to the player.""" def __init__(self, imag_bot, block_, player_loc): """Initialized the problem with an _ImaginaryBot. block is a block id.""" self._bot = imag_bot self._block = block_ self._player_loc = player_loc def get_player_loc(self): """Return the player location.""" return deepcopy(self._player_loc) def getStartState(self): """Return the bot passed in.""" return self._bot def isGoalState(self, state): """Return whether or not the bot is next to the player.""" diff = state.get_pos() - self._player_loc return diff.y == 0 and (diff.x == 0 or diff.z == 0) and \ abs(diff.x) + abs(diff.z) == 2 and \ state.get_block(self._player_loc + diff/2 + _Vec3(0, -1, 0)) not in \ (_AIR, _LAVA, _WATER) def getSuccessors(self, state): """Return the successors.""" rtn = [] for action in state.get_legal_actions(self._block): successor = deepcopy(state) successor.take_action(action) rtn.append((successor, action, 1)) return rtn def _mine_heuristic(bot, problem): """Return the mining heuristic. bot is an _ImaginaryBot. """ if bot.contains(problem.get_block_id()): return 0 bot_pos = bot.get_pos() dest_pos = problem.get_block_loc() # If man == dy: return man + 1 # If man > dy: return man # If man < dy: return dy? man_dist = _manhattan((bot_pos.x, bot_pos.z), (dest_pos.x, dest_pos.z)) y_diff = bot_pos.y - dest_pos.y if y_diff < 0: y_diff += 1 if y_diff == 0: return man_dist # Transform so that it's only dropping drop = _DROP if y_diff > 0 else 1 y_diff = abs(y_diff) drops = _drops(y_diff, drop) if man_dist > drops: return man_dist if man_dist == drops: return man_dist + 1 if drop == 1: return drops if y_diff % drop == 1: return drops return drops + 1 def _drops(dist, drop): """Return the number of times it takes to drop a distance dist. drop is the length of one drop. Both are assumed positive.""" rtn = dist / drop if dist % drop != 0: rtn += 1 return rtn def _return_heuristic(bot, problem): """Return the return heuristic. bot is an _ImaginaryBot. """ bot_pos = bot.get_pos() player_pos = problem.get_player_loc() bot_plane_pos = (bot.x, bot.z) y_diff = bot_pos.y - player_pos.y drop = _DROP if y_diff > 0 else 1 y_diff = abs(y_diff) drops = _drops(y_diff, drop) min_man = float('inf') for dir_ in _adj_dirs(): loc = player_pos + 2 * dir_ man_dist = _manhattan(bot_plane_pos, (loc.x, loc.z)) if man_dist < min_man: min_man = man_dist if man_dist < drops: return drops return min_man def _to_my_vec3(vec): """Return the _Vec3 alternative of the Vec3.""" return _Vec3(vec.x, vec.y, vec.z) def _player_loc(): """Return the player's location.""" return _to_my_vec3(_get_mc().player.getTilePos()) def _adj_dirs(): """Return the adjacent directions.""" return [_Vec3(1, 0, 0), _Vec3(-1, 0, 0), _Vec3(0, 0, 1), _Vec3(0, 0, -1)] def _all_dirs(): """Return all adjacent directions.""" return _adj_dirs() + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)] def _manhattan(pos1, pos2): """Return the manhattan distance. pos1 and pos2 should be iterable.""" return sum(abs(val1 - val2) for val1, val2 in zip(pos1, pos2)) @singleton def _get_mc(): """Return the Minecraft instance.""" return minecraft.Minecraft.create() def _key_vals(dict_): """Return a list of key-val tuples.""" return [(key, val) for key, val in dict_.iteritems()]
3
45969b346d6d5cbdef2f5d2f74270cf12024072d
# Generated by Django 4.1.9 on 2023-06-29 16:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("search", "0003_auto_20230209_1441"), ] operations = [ migrations.CreateModel( name="SearchSettings", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ], options={ "permissions": ( ("change_boost", "Edit boost settings for search components"), ("view_explore", "View the global search explore page"), ), "managed": False, "default_permissions": (), }, ), ]
4
3fbf1768a2fe78df591c49490dfce5fb374e7fc2
from functools import wraps import os def restoring_chdir(fn): #XXX:dc: This would be better off in a neutral module @wraps(fn) def decorator(*args, **kw): try: path = os.getcwd() return fn(*args, **kw) finally: os.chdir(path) return decorator class BaseBuilder(object): """ The Base for all Builders. Defines the API for subclasses. All workflow steps need to return true, otherwise it is assumed something went wrong and the Builder will stop """ workflow = ['clean', 'build', 'move'] def __init__(self, version): self.version = version def run(self): for step in self.workflow: fn = getattr(self, step) result = fn() assert result @restoring_chdir def force(self): """ An optional step to force a build even when nothing has changed. """ print "Forcing a build by touching files" os.chdir(self.version.project.conf_dir(self.version.slug)) os.system('touch * && touch */*') def clean(self): """ Clean up the version so it's ready for usage. This is used to add RTD specific stuff to Sphinx, and to implement whitelists on projects as well. It is guaranteed to be called before your project is built. """ raise NotImplementedError def build(self): """ Do the actual building of the documentation. """ raise NotImplementedError def move(self): """ Move the documentation from it's generated place to its final home. This needs to understand both a single server dev environment, as well as a multi-server environment. """ raise NotImplementedError @property def changed(self): """ Says whether the documentation has changed, and requires further action. This is mainly used to short-circuit more expensive builds of other output formats if the project docs didn't change on an update. Subclasses are recommended to override for more efficient builds. Defaults to `True` """ return True
5
67b967b688aeac1270eee836e0f6e6b3555b933e
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ This program is run at regular intervals to check the battery charge status of the uninterruptible power supply. In our case, it is a LiPo battery with a nominal voltage of 3.7 volts. By setting the voltage for the Raspberry PI shutdown procedure at 3.7 V,we ensure that the processor has enough time to make a clean shutdown. This program must be launched at regular intervals (5 inute in our case) by the Raspberry PI OS cron task scheduler. The crontab -e command in the home directory opens the cron file and the command line would for example be for a trigger every 5 minutes: 5 * * * * sudo /usr/bin/python3 /home/pi/dev_python/amod/pidcmes_bbu.py """ import time import datetime as dt from subprocess import call from pidcmes_lib import Pidcmes # class for 'pidcmes' procedures pidcmes = Pidcmes() # initialize pidcmese class u_bat_min = 3.7 # minumum battery voltage n_moy = 20 # averaging to reduce glitches stop_run = False # to control the execution (run/stop) u_avg = pidcmes.get_tension(n_moy) # read the value in volts if u_avg < u_bat_min:# or i > 10: print("proper shut down of the machine due to low battery") # time.sleep(5) # call("sudo shutdown -h now", shell=True) # shutdown the RASPI else: print("tout va bien dormez braves gens")
6
c59707ba07c1659d94684c54cdd7bb2658cba935
from __future__ import division, print_function, absolute_import import numbers import warnings from abc import ABCMeta, abstractmethod import numpy as np from .base import check_frame from skutil.base import overrides from sklearn.externals import six from sklearn.base import _pprint from sklearn.utils.fixes import signature, bincount from sklearn.utils import check_random_state from math import ceil, floor try: from h2o import H2OEstimator except ImportError: from h2o.estimators.estimator_base import H2OEstimator try: from sklearn.model_selection import KFold SK18 = True except ImportError: from sklearn.cross_validation import KFold SK18 = False __all__ = [ 'check_cv', 'h2o_train_test_split', 'H2OKFold', 'H2OShuffleSplit', 'H2OStratifiedKFold', 'H2OStratifiedShuffleSplit' ] def _build_repr(self): # XXX This is copied from sklearn.BaseEstimator's get_params cls = self.__class__ init = getattr(cls.__init__, 'deprecated_original', cls.__init__) init_signature = signature(init) if init is object.__init__: args = [] else: args = sorted([p.name for p in init_signature.parameters.values() if p.name != 'self' and p.kind != p.VAR_KEYWORD]) class_name = self.__class__.__name__ params = dict() for key in args: warnings.simplefilter("always", DeprecationWarning) try: with warnings.catch_warnings(record=True) as w: value = getattr(self, key, None) if len(w) and w[0].category == DeprecationWarning: continue finally: warnings.filters.pop(0) params[key] = value return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name))) def check_cv(cv=3): """Checks the ``cv`` parameter to determine whether it's a valid int or H2OBaseCrossValidator. Parameters ---------- cv : int or H2OBaseCrossValidator, optional (default=3) The number of folds or the H2OBaseCrossValidator instance. Returns ------- cv : H2OBaseCrossValidator The instance of H2OBaseCrossValidator """ if cv is None: cv = 3 if isinstance(cv, numbers.Integral): return H2OKFold(cv) if not isinstance(cv, H2OBaseCrossValidator): raise ValueError('expected int or instance of ' 'H2OBaseCrossValidator but got %s' % type(cv)) return cv def h2o_train_test_split(frame, test_size=None, train_size=None, random_state=None, stratify=None): """Splits an H2OFrame into random train and test subsets Parameters ---------- frame : H2OFrame The h2o frame to split test_size : float, int, or None (default=None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. If train size is also None, test size is set to 0.25 train_size : float, int, or None (default=None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. stratify : str or None (default=None) The name of the target on which to stratify the sampling Returns ------- out : tuple, shape=(2,) training_frame : H2OFrame The training fold split testing_frame : H2OFrame The testing fold split """ frame = check_frame(frame, copy=False) if test_size is None and train_size is None: test_size = 0.25 if stratify is not None: CVClass = H2OStratifiedShuffleSplit else: CVClass = H2OShuffleSplit cv = CVClass(n_splits=2, test_size=test_size, train_size=train_size, random_state=random_state) # for the h2o one, we only need iter 0 tr_te_tuples = [(tr, te) for tr, te in cv.split(frame, stratify)][0] # h2o "doesn't reorder rows" so we need to keep these sorted... train, test = sorted(list(tr_te_tuples[0])), sorted(list(tr_te_tuples[1])) out = ( frame[train, :], frame[test, :] ) return out # Avoid a pb with nosetests... h2o_train_test_split.__test__ = False def _val_y(y): if isinstance(y, six.string_types): return str(y) elif y is None: return y raise TypeError('y must be a string. Got %s' % y) class H2OBaseCrossValidator(six.with_metaclass(ABCMeta)): """Base class for H2O cross validation operations. All implementing subclasses should override ``get_n_splits`` and ``_iter_test_indices``. """ def __init__(self): pass def split(self, frame, y=None): """Generate indices to split data into training and test. Parameters ---------- frame : ``H2OFrame`` The h2o frame to split y : str, optional (default=None) The name of the column to stratify, if applicable. Returns ------- train : ndarray The training set indices for the split test : ndarray The testing set indices for that split """ frame = check_frame(frame, copy=False) indices = np.arange(frame.shape[0]) for test_index in self._iter_test_masks(frame, y): train_index = indices[np.logical_not(test_index)] test_index = indices[test_index] # h2o can't handle anything but lists... yield list(train_index), list(test_index) def _iter_test_masks(self, frame, y=None): """Generates boolean masks corresponding to the tests set. Parameters ---------- frame : H2OFrame The h2o frame to split y : string, optional (default=None) The column to stratify. Returns ------- test_mask : np.ndarray, shape=(n_samples,) The indices for the test split """ for test_index in self._iter_test_indices(frame, y): test_mask = np.zeros(frame.shape[0], dtype=np.bool) test_mask[test_index] = True yield test_mask def _iter_test_indices(self, frame, y=None): raise NotImplementedError('this method must be implemented by a subclass') @abstractmethod def get_n_splits(self): """Get the number of splits or folds for this instance of the cross validator. """ pass def __repr__(self): return _build_repr(self) def _validate_shuffle_split_init(test_size, train_size): """Validation helper to check the test_size and train_size at init""" if test_size is None and train_size is None: raise ValueError('test_size and train_size can not both be None') if test_size is not None: if np.asarray(test_size).dtype.kind == 'f': if test_size >= 1.: raise ValueError( 'test_size=%f should be smaller ' 'than 1.0 or be an integer' % test_size) elif np.asarray(test_size).dtype.kind != 'i': raise ValueError('Invalid value for test_size: %r' % test_size) if train_size is not None: if np.asarray(train_size).dtype.kind == 'f': if train_size >= 1.: raise ValueError( 'train_size=%f should be smaller ' 'than 1.0 or be an integer' % test_size) elif (np.asarray(test_size).dtype.kind == 'f' and (train_size + test_size) > 1.): raise ValueError('The sum of test_size and train_size = %f' 'should be smaller than 1.0. Reduce test_size ' 'and/or train_size.' % (train_size + test_size)) elif np.asarray(train_size).dtype.kind != 'i': raise ValueError('Invalid value for train_size: %r' % train_size) def _validate_shuffle_split(n_samples, test_size, train_size): if test_size is not None and np.asarray(test_size).dtype.kind == 'i' and test_size >= n_samples: raise ValueError('test_size=%d should be smaller ' 'than the number of samples %d' % (test_size, n_samples)) if train_size is not None and np.asarray(train_size).dtype.kind == 'i' and train_size >= n_samples: raise ValueError('train_size=%d should be smaller ' 'than the number of samples %d' % (train_size, n_samples)) if np.asarray(test_size).dtype.kind == 'f': n_test = ceil(test_size * n_samples) elif np.asarray(test_size).dtype.kind == 'i': n_test = float(test_size) if train_size is None: n_train = n_samples - n_test elif np.asarray(train_size).dtype.kind == 'f': n_train = floor(train_size * n_samples) else: n_train = float(train_size) if test_size is None: n_test = n_samples - n_train if n_train + n_test > n_samples: raise ValueError('The sum of train_size and test_size=%d, ' 'should be smaller than the number of ' 'samples %d. Reduce test_size and/or ' 'train_size.' % (n_train + n_test, n_samples)) return int(n_train), int(n_test) class H2OBaseShuffleSplit(six.with_metaclass(ABCMeta)): """Base class for H2OShuffleSplit and H2OStratifiedShuffleSplit. This is used for ``h2o_train_test_split`` in strategic train/test splits of H2OFrames. Implementing subclasses should override ``_iter_indices``. Parameters ---------- n_splits : int, optional (default=2) The number of folds or splits in the split test_size : float or int, optional (default=0.1) The ratio of observations for the test fold train_size : float or int, optional (default=None) The ratio of observations for the train fold random_state : int or RandomState, optional (default=None) The random state for duplicative purposes. """ def __init__(self, n_splits=2, test_size=0.1, train_size=None, random_state=None): _validate_shuffle_split_init(test_size, train_size) self.n_splits = n_splits self.test_size = test_size self.train_size = train_size self.random_state = random_state def split(self, frame, y=None): """Split the frame. Parameters ---------- frame : H2OFrame The frame to split y : string, optional (default=None) The column to stratify. """ for train, test in self._iter_indices(frame, y): yield train, test @abstractmethod def _iter_indices(self, frame, y): """Abstract method for iterating the indices. Parameters ---------- frame : H2OFrame The frame to split y : string, optional (default=None) The column to stratify. """ pass def get_n_splits(self): """Get the number of splits or folds for this instance of the shuffle split. """ return self.n_splits def __repr__(self): return _build_repr(self) class H2OShuffleSplit(H2OBaseShuffleSplit): """Default shuffle splitter used for ``h2o_train_test_split``. This shuffle split class will not perform any stratification, and will simply shuffle indices and split into the number of specified sub-frames. """ def _iter_indices(self, frame, y=None): """Iterate the indices. Parameters ---------- frame : H2OFrame The frame to split y : string, optional (default=None) The column to stratify. Since this class does not perform stratification, ``y`` is unused. Returns ------- ind_train : np.ndarray, shape=(n_samples,) The train indices ind_test : np.ndarray, shape=(n_samples,) The test indices """ n_samples = frame.shape[0] n_train, n_test = _validate_shuffle_split(n_samples, self.test_size, self.train_size) rng = check_random_state(self.random_state) for i in range(self.n_splits): permutation = rng.permutation(n_samples) ind_test = permutation[:n_test] ind_train = permutation[n_test:(n_test + n_train)] yield ind_train, ind_test class H2OStratifiedShuffleSplit(H2OBaseShuffleSplit): """Shuffle splitter used for ``h2o_train_test_split`` when stratified option is specified. This shuffle split class will perform stratification. """ def _iter_indices(self, frame, y): """Iterate the indices with stratification. Parameters ---------- frame : H2OFrame The frame to split y : string The column to stratify. Returns ------- train : np.ndarray, shape=(n_samples,) The train indices test : np.ndarray, shape=(n_samples,) The test indices """ n_samples = frame.shape[0] n_train, n_test = _validate_shuffle_split(n_samples, self.test_size, self.train_size) # need to validate y... y = _val_y(y) target = np.asarray(frame[y].as_data_frame(use_pandas=True)[y].tolist()) classes, y_indices = np.unique(target, return_inverse=True) n_classes = classes.shape[0] class_counts = bincount(y_indices) if np.min(class_counts) < 2: raise ValueError('The least populated class in y has only 1 ' 'member, which is too few. The minimum number of labels ' 'for any class cannot be less than 2.') if n_train < n_classes: raise ValueError('The train_size=%d should be greater than or ' 'equal to the number of classes=%d' % (n_train, n_classes)) if n_test < n_classes: raise ValueError('The test_size=%d should be greater than or ' 'equal to the number of classes=%d' % (n_test, n_classes)) rng = check_random_state(self.random_state) p_i = class_counts / float(n_samples) n_i = np.round(n_train * p_i).astype(int) t_i = np.minimum(class_counts - n_i, np.round(n_test * p_i).astype(int)) for _ in range(self.n_splits): train = [] test = [] for i, class_i in enumerate(classes): permutation = rng.permutation(class_counts[i]) perm_indices_class_i = np.where((target == class_i))[0][permutation] train.extend(perm_indices_class_i[:n_i[i]]) test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]]) # Might end up here with less samples in train and test than we asked # for, due to rounding errors. if len(train) + len(test) < n_train + n_test: missing_indices = np.where(bincount(train + test, minlength=len(target)) == 0)[0] missing_indices = rng.permutation(missing_indices) n_missing_train = n_train - len(train) n_missing_test = n_test - len(test) if n_missing_train > 0: train.extend(missing_indices[:n_missing_train]) if n_missing_test > 0: test.extend(missing_indices[-n_missing_test:]) train = rng.permutation(train) test = rng.permutation(test) yield train, test def split(self, frame, y): """Split the frame with stratification. Parameters ---------- frame : H2OFrame The frame to split y : string The column to stratify. """ return super(H2OStratifiedShuffleSplit, self).split(frame, y) class _H2OBaseKFold(six.with_metaclass(ABCMeta, H2OBaseCrossValidator)): """Base class for KFold and Stratified KFold. Parameters ---------- n_folds : int The number of splits shuffle : bool Whether to shuffle indices random_state : int or RandomState The random state for the split """ @abstractmethod def __init__(self, n_folds, shuffle, random_state): if not isinstance(n_folds, numbers.Integral): raise ValueError('n_folds must be of Integral type. ' '%s of type %s was passed' % (n_folds, type(n_folds))) n_folds = int(n_folds) if n_folds <= 1: raise ValueError('k-fold cross-validation requires at least one ' 'train/test split by setting n_folds=2 or more') if shuffle not in [True, False]: raise TypeError('shuffle must be True or False. Got %s (type=%s)' % (str(shuffle), type(shuffle))) self.n_folds = n_folds self.shuffle = shuffle self.random_state = random_state @overrides(H2OBaseCrossValidator) def split(self, frame, y=None): """Split the frame. Parameters ---------- frame : H2OFrame The frame to split y : string, optional (default=None) The column to stratify. """ frame = check_frame(frame, copy=False) n_obs = frame.shape[0] if self.n_folds > n_obs: raise ValueError('Cannot have n_folds greater than n_obs') for train, test in super(_H2OBaseKFold, self).split(frame, y): yield train, test @overrides(H2OBaseCrossValidator) def get_n_splits(self): """Get the number of splits or folds. Returns ------- n_folds : int The number of folds """ return self.n_folds class H2OKFold(_H2OBaseKFold): """K-folds cross-validator for an H2OFrame. Parameters ---------- n_folds : int, optional (default=3) The number of splits shuffle : bool, optional (default=False) Whether to shuffle indices random_state : int or RandomState, optional (default=None) The random state for the split """ def __init__(self, n_folds=3, shuffle=False, random_state=None): super(H2OKFold, self).__init__(n_folds, shuffle, random_state) @overrides(_H2OBaseKFold) def _iter_test_indices(self, frame, y=None): n_obs = frame.shape[0] indices = np.arange(n_obs) if self.shuffle: check_random_state(self.random_state).shuffle(indices) n_folds = self.n_folds fold_sizes = (n_obs // n_folds) * np.ones(n_folds, dtype=np.int) fold_sizes[:n_obs % n_folds] += 1 current = 0 for fold_size in fold_sizes: start, stop = current, current + fold_size yield indices[start:stop] current = stop class H2OStratifiedKFold(_H2OBaseKFold): """K-folds cross-validator for an H2OFrame with stratified splits. Parameters ---------- n_folds : int, optional (default=3) The number of splits shuffle : bool, optional (default=False) Whether to shuffle indices random_state : int or RandomState, optional (default=None) The random state for the split """ def __init__(self, n_folds=3, shuffle=False, random_state=None): super(H2OStratifiedKFold, self).__init__(n_folds, shuffle, random_state) def split(self, frame, y): """Split the frame with stratification. Parameters ---------- frame : H2OFrame The frame to split y : string The column to stratify. """ return super(H2OStratifiedKFold, self).split(frame, y) def _iter_test_masks(self, frame, y): test_folds = self._make_test_folds(frame, y) for i in range(self.n_folds): yield test_folds == i def _make_test_folds(self, frame, y): if self.shuffle: rng = check_random_state(self.random_state) else: rng = self.random_state # validate that it's a string y = _val_y(y) # gets a string back or None if y is None: raise ValueError('H2OStratifiedKFold requires a target name (got None)') target = frame[y].as_data_frame(use_pandas=True)[y].values n_samples = target.shape[0] unique_y, y_inversed = np.unique(target, return_inverse=True) y_counts = bincount(y_inversed) min_labels = np.min(y_counts) if np.all(self.n_folds > y_counts): raise ValueError(('All the n_labels for individual classes' ' are less than %d folds.' % self.n_folds), Warning) if self.n_folds > min_labels: warnings.warn(('The least populated class in y has only %d' ' members, which is too few. The minimum' ' number of labels for any class cannot' ' be less than n_folds=%d.' % (min_labels, self.n_folds)), Warning) # NOTE FROM SKLEARN: # pre-assign each sample to a test fold index using individual KFold # splitting strategies for each class so as to respect the balance of # classes # NOTE: Passing the data corresponding to ith class say X[y==class_i] # will break when the data is not 100% stratifiable for all classes. # So we pass np.zeroes(max(c, n_folds)) as data to the KFold. # Remember, however that we might be using the old-fold KFold which doesn't # have a split method... if SK18: per_cls_cvs = [ KFold(self.n_folds, # using sklearn's KFold here shuffle=self.shuffle, random_state=rng).split(np.zeros(max(count, self.n_folds))) for count in y_counts ] else: per_cls_cvs = [ KFold(max(count, self.n_folds), # using sklearn's KFold here self.n_folds, shuffle=self.shuffle, random_state=rng) for count in y_counts ] test_folds = np.zeros(n_samples, dtype=np.int) for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)): for cls, (_, test_split) in zip(unique_y, per_cls_splits): cls_test_folds = test_folds[target == cls] # the test split can be too big because we used # KFold(...).split(X[:max(c, n_folds)]) when data is not 100% # stratifiable for all the classes # (we use a warning instead of raising an exception) # If this is the case, let's trim it: test_split = test_split[test_split < len(cls_test_folds)] cls_test_folds[test_split] = test_fold_indices test_folds[target == cls] = cls_test_folds return test_folds
7
41cfd558824b6561114a48a694b1e6e6a7cb8c05
import streamlit as st from streamlit.components.v1 import components from streamlit.report_thread import get_report_ctx from util.session import * from multipage import MultiPage from pages import register def app(page): if not login_status(): title_container = st.empty() remail_input_container = st.empty() rpw_input_container = st.empty() rregister_button_container = st.empty() # title_container.write("Register") email = remail_input_container.text_input("Email ") password = rpw_input_container.text_input("Password ", type="password") rregister_button = rregister_button_container.button('Register') if rregister_button: title_container.empty() remail_input_container.empty() rpw_input_container.empty() rregister_button_container.empty() login() page.app() st.experimental_rerun()
8
f2bb44600f011a205c71985ad94c18f7e058634f
import os import requests from PIL import Image from io import BytesIO import csv from typing import Iterable, List, Tuple, Dict, Callable, Union, Collection # pull the image from the api endpoint and save it if we don't have it, else load it from disk def get_img_from_file_or_url(img_format: str = 'JPEG') -> Callable[[str, str], Image.Image]: def _apply(filepath: str, url: str) -> Image.Image: img = from_file(filepath) if img is None: img = from_url(url) img.save(filepath, img_format) return img.convert('RGB') # convert to rgb if not already (eg if grayscale) return _apply def from_url(url: str) -> Image.Image: api_response = requests.get(url).content response_bytes = BytesIO(api_response) return Image.open(response_bytes) def from_file(path: str) -> Union[Image.Image, None]: if os.path.exists(path): return Image.open(path) else: return None def load_metadata(path: str, cols: Iterable[int], class_cols: Collection[int] = tuple(), valid_only: bool = True, **reader_args)\ -> Tuple[List, int, List, List[Dict[str, int]], List[Dict[int, str]], int]: metadata = [] # one dict for each class col class_to_index: List[Dict[str, int]] = [{}] * len(class_cols) index_to_class: List[Dict[int, str]] = [{}] * len(class_cols) next_indices = [0] * len(class_cols) # next index for a new class value with open(path, 'r', newline='', encoding="utf8") as metadata_file: reader = csv.reader(metadata_file, **reader_args) headers = next(reader) for row in reader: if len(row) != 0: metadatum = [row[c] for c in cols] # for all class cols, add their vals to the class_to_index and index_to_class dicts if not there already for c, class_col in enumerate(class_cols): if not row[class_col] in class_to_index[c]: class_to_index[c][row[class_col]] = next_indices[c] index_to_class[c][next_indices[c]] = row[class_col] next_indices[c] += 1 if valid_only and '' in metadatum: continue metadata.append(metadatum) len_metadata = len(metadata) num_classes = 0 if len(next_indices) == 0 else next_indices[-1] # split off the headers return metadata, len_metadata, headers, class_to_index, index_to_class, num_classes
9
302605d8bb45b1529742bf9441d476f0276085b9
import sys from PyQt5.QtWidgets import (QMainWindow, QWidget, QHBoxLayout, QVBoxLayout, QFrame, QSplitter, QStyleFactory, QApplication, QPushButton, QTextEdit, QLabel, QFileDialog, QMessageBox) from PyQt5.QtCore import Qt from PyQt5.QtGui import QFont, QColor import myLoadData from UIPack import setLossParameterDialog, showDataWidget, setModelParametersDialog, TrainingWidget, showResultWidget,\ showJudgeWidgets, chooseJudgeDataSetWidget from MyCombCNNPack import combineNumCalculate, myCombineCNN, traditionalNN, Judgement class MyMainWindow(QMainWindow): def __init__(self): super().__init__() self.windowLength = 1250 self.windowHigh = 900 self.fname = dict() self.fname['New'] = None self.fname['Tra'] = None self.dataLossRate = dict() self.dataSetLossValue = dict() self.dataFor = dict() self.dataFor['New'] = None self.dataLossRate['New'] = 0. self.dataSetLossValue['New'] = 0. self.dataFor['Tra'] = None self.dataLossRate['Tra'] = 0. self.dataSetLossValue['Tra'] = 0. self.traingWidgetOnFlag = dict() self.traingWidgetOnFlag['New'] = False self.traingWidgetOnFlag['Tra'] = False self.combineNumConv = 2 self.convCoreNum = 5 self.combineNumPooling = 4 self.fullConnectOutInRate = 0.5 self.mcbcnn = None self.trann = None self.trainingW = None self.trainingWT = None self.initUI() self.initConnect() def initUI(self): self.statusBar().showMessage('Ready') ####### data module ####### dataModule = QVBoxLayout() self.dataFileChooseButton = QPushButton('选择数据') self.dataFileChooseButton.setFont(QFont('微软雅黑', 16)) self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数') self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16)) self.dataShowButton = QPushButton('展示数据') self.dataShowButton.setFont(QFont('微软雅黑', 16)) label = QLabel('Present Data:') label.setFont(QFont('微软雅黑', 16)) self.presentDataName = QLabel('None') self.presentDataName.setFont(QFont('微软雅黑', 16)) labelbox = QVBoxLayout() labelbox.addWidget(label) labelbox.addWidget(self.presentDataName) dataModule.addStretch(1) dataModule.addLayout(labelbox) dataModule.addStretch(1) dataModule.addWidget(self.dataFileChooseButton) dataModule.addStretch(1) dataModule.addWidget(self.dataLossSimulateSettingButton) dataModule.addStretch(1) dataModule.addWidget(self.dataShowButton) dataModule.addStretch(1) ###### training module ######## trainingModule = QVBoxLayout() self.setModelParametersButton = QPushButton('Model Parameters') self.setModelParametersButton.setFont(QFont('微软雅黑', 16)) # self.setTrainingParametersButton = QPushButton('Trainning Parameters') # self.setTrainingParametersButton.setFont(QFont('微软雅黑', 16)) self.trainingButton = QPushButton('Training') self.trainingButton.setFont(QFont('微软雅黑', 16)) self.saveModelButton = QPushButton('Save Model') self.saveModelButton.setFont(QFont('微软雅黑', 16)) self.loadModelButton = QPushButton('Load Model') self.loadModelButton.setFont(QFont('微软雅黑', 16)) label = QLabel('Present Model:') label.setFont(QFont('微软雅黑', 16)) self.presentModelName = QLabel('None') self.presentModelName.setFont(QFont('微软雅黑', 16)) labelbox = QVBoxLayout() labelbox.addWidget(label) labelbox.addWidget(self.presentModelName) trainingModule.addStretch(1) trainingModule.addLayout(labelbox) trainingModule.addStretch(1) trainingModule.addWidget(self.setModelParametersButton) trainingModule.addStretch(1) trainingModule.addWidget(self.trainingButton) trainingModule.addStretch(1) trainingModule.addWidget(self.saveModelButton) trainingModule.addStretch(1) trainingModule.addWidget(self.loadModelButton) trainingModule.addStretch(1) ############## new cnn result show ###### resultShowModule = QVBoxLayout() self.showResultButton = QPushButton('分类结果展示') self.showResultButton.setFont(QFont('微软雅黑', 16)) self.judgeResultButton = QPushButton('分类结果评估') self.judgeResultButton.setFont(QFont('微软雅黑', 16)) resultShowModule.addWidget(self.showResultButton) resultShowModule.addWidget(self.judgeResultButton) ################# new algorithm ui ########## hboxTop = QHBoxLayout() hboxTop.addStretch(1) mcnnLabel = QLabel('Combine-CNN:') mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold)) hboxTop.addWidget(mcnnLabel) hboxTop.addStretch(1) hboxTop.addLayout(dataModule) hboxTop.addStretch(1) hboxTop.addLayout(trainingModule) hboxTop.addStretch(1) hboxTop.addLayout(resultShowModule) hboxTop.addStretch(1) #########traditional data module########## dataModuleT = QVBoxLayout() self.dataFileChooseButtonT = QPushButton('选择数据') self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16)) self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数') self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16)) self.dataPreProcessButtonT = QPushButton('数据预处理') self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16)) self.dataShowButtonT = QPushButton('展示数据') self.dataShowButtonT.setFont(QFont('微软雅黑', 16)) label = QLabel('Present Data:') label.setFont(QFont('微软雅黑', 16)) self.presentDataNameT = QLabel('None') self.presentDataNameT.setFont(QFont('微软雅黑', 16)) labelbox = QVBoxLayout() labelbox.addWidget(label) labelbox.addWidget(self.presentDataNameT) dataModuleT.addStretch(1) dataModuleT.addLayout(labelbox) dataModuleT.addStretch(1) dataModuleT.addWidget(self.dataFileChooseButtonT) dataModuleT.addStretch(1) dataModuleT.addWidget(self.dataLossSimulateSettingButtonT) dataModuleT.addStretch(1) dataModuleT.addWidget(self.dataPreProcessButtonT) dataModuleT.addStretch(1) dataModuleT.addWidget(self.dataShowButtonT) dataModuleT.addStretch(1) ###### training module ######## trainingModuleT = QVBoxLayout() self.setModelParametersButtonT = QPushButton('Model Parameters') self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16)) self.trainingButtonT = QPushButton('Training') self.trainingButtonT.setFont(QFont('微软雅黑', 16)) self.saveModelButtonT = QPushButton('Save Model') self.saveModelButtonT.setFont(QFont('微软雅黑', 16)) self.loadModelButtonT = QPushButton('Load Model') self.loadModelButtonT.setFont(QFont('微软雅黑', 16)) label = QLabel('Present Model:') label.setFont(QFont('微软雅黑', 16)) self.presentModelNameT = QLabel('None') self.presentModelNameT.setFont(QFont('微软雅黑', 16)) labelbox = QVBoxLayout() labelbox.addWidget(label) labelbox.addWidget(self.presentModelNameT) trainingModuleT.addStretch(1) trainingModuleT.addLayout(labelbox) trainingModuleT.addStretch(1) trainingModuleT.addWidget(self.setModelParametersButtonT) trainingModuleT.addStretch(1) trainingModuleT.addWidget(self.trainingButtonT) trainingModuleT.addStretch(1) trainingModuleT.addWidget(self.saveModelButtonT) trainingModuleT.addStretch(1) trainingModuleT.addWidget(self.loadModelButtonT) trainingModuleT.addStretch(1) ############## traditional nn result show ###### resultShowModuleT = QVBoxLayout() self.showResultButtonT = QPushButton('分类结果展示') self.showResultButtonT.setFont(QFont('微软雅黑', 16)) self.judgeResultButtonT = QPushButton('分类结果评估') self.judgeResultButtonT.setFont(QFont('微软雅黑', 16)) resultShowModuleT.addWidget(self.showResultButtonT) resultShowModuleT.addWidget(self.judgeResultButtonT) ####### traditional algorithm ######### hboxBottom = QHBoxLayout(self) hboxBottom.addStretch(1) traditionNNLabel = QLabel('Traditional NN:') traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold)) hboxBottom.addWidget(traditionNNLabel) hboxBottom.addStretch(1) hboxBottom.addLayout(dataModuleT) hboxBottom.addStretch(1) hboxBottom.addLayout(trainingModuleT) hboxBottom.addStretch(1) hboxBottom.addLayout(resultShowModuleT) hboxBottom.addStretch(1) ########## whole frame layout ######## splitterLine = QLabel(self) splitterLine.setFont(QFont('Times', 1)) col = QColor(0, 0, 0) splitterLine.setStyleSheet("QWidget { background-color: %s }" % col.name()) splitterLine.resize(splitterLine.sizeHint()) vbox = QVBoxLayout() vbox.addLayout(hboxTop) # vbox.addWidget(QLabel(str('_'*int(self.width()/3)))) vbox.addWidget(splitterLine) vbox.addLayout(hboxBottom) mainWidget = QWidget() mainWidget.setLayout(vbox) self.setCentralWidget(mainWidget) self.setGeometry(350, 100, self.windowLength, self.windowHigh) self.setWindowTitle('适用于有缺失值数据集的神经网络系统') self.show() def initConnect(self): self.dataFileChooseButton.clicked.connect(self.chooseData) self.dataFileChooseButtonT.clicked.connect(self.chooseData) self.dataLossSimulateSettingButton.clicked.connect(self.setLossParameter) self.dataLossSimulateSettingButtonT.clicked.connect(self.setLossParameter) self.dataShowButton.clicked.connect(self.showData) self.dataShowButtonT.clicked.connect(self.showData) self.dataPreProcessButtonT.clicked.connect(self.preProcess) self.setModelParametersButton.clicked.connect(self.setModelParameters) self.setModelParametersButtonT.clicked.connect(self.setModelParameters) self.trainingButton.clicked.connect(self.training) self.trainingButtonT.clicked.connect(self.training) self.saveModelButton.clicked.connect(self.saveModel) self.saveModelButtonT.clicked.connect(self.saveModel) self.loadModelButton.clicked.connect(self.loadModel) self.loadModelButtonT.clicked.connect(self.loadModel) self.showResultButton.clicked.connect(self.showResult) self.showResultButtonT.clicked.connect(self.showResult) self.judgeResultButton.clicked.connect(self.showJudge) self.judgeResultButtonT.clicked.connect(self.showJudge) ############ data load module ##################### def chooseData(self): if self.sender() is self.dataFileChooseButton: self.fname['New'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)') if ok: # dataname = self.fname['New'].split('/')[-1].split('.')[0] # # print(dataname) # self.presentDataName.setText(dataname) # self.presentDataName.resize(self.presentDataName.sizeHint()) self.loadData() elif self.sender() is self.dataFileChooseButtonT: self.fname['Tra'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)') if ok: # dataname = self.fname['Tra'].split('/')[-1].split('.')[0] # # print(dataname) # self.presentDataNameT.setText(dataname) # self.presentDataNameT.resize(self.presentDataNameT.sizeHint()) self.loadData() return def loadData(self): if self.sender() is self.dataFileChooseButton: try: self.dataFor['New'] = myLoadData.loadData(self.fname['New'], self.dataLossRate['New'], self.dataSetLossValue['New']) # print(self.dataFor['New'].DataTrainX, '\n', self.dataFor['New'].DataTrainY) except FileNotFoundError as e: reply = QMessageBox.information(self, 'Message', "Data file not exist", QMessageBox.Yes, QMessageBox.Yes) return except Exception: reply = QMessageBox.information(self, 'Message', "Data file format error", QMessageBox.Yes, QMessageBox.Yes) return dataname = self.fname['New'].split('/')[-1].split('.')[0] # print(dataname) self.presentDataName.setText(dataname) self.presentDataName.resize(self.presentDataName.sizeHint()) elif self.sender() is self.dataFileChooseButtonT: try: self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'], self.dataLossRate['Tra'], self.dataSetLossValue['Tra']) # print(self.dataFor['Tra'].DataTrainX, '\n', self.dataFor['Tra'].DataTrainY) except FileNotFoundError as e: reply = QMessageBox.information(self, 'Message', "Data file not exist", QMessageBox.Yes, QMessageBox.Yes) return except Exception: reply = QMessageBox.information(self, 'Message', "Data file format error", QMessageBox.Yes, QMessageBox.Yes) return dataname = self.fname['Tra'].split('/')[-1].split('.')[0] # print(dataname) self.presentDataNameT.setText(dataname) self.presentDataNameT.resize(self.presentDataNameT.sizeHint()) return def setLossParameter(self): if self.sender() is self.dataLossSimulateSettingButton: self.setLPDialog = setLossParameterDialog.setLossParameterDialog('combine-CNN设置缺失参数', self, 'New') elif self.sender() is self.dataLossSimulateSettingButtonT: self.setLPDialog = setLossParameterDialog.setLossParameterDialog('traditional NN设置缺失参数', self, 'Tra') # print(self.dataLossRate) # print(self.dataSetLossValue) return def showData(self): if self.sender() is self.dataShowButton: # print(1) self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示', self, 'New') elif self.sender() is self.dataShowButtonT: # print(1) self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示', self, 'Tra') return def preProcess(self): if self.dataFor['Tra'] is None: reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理', QMessageBox.Yes, QMessageBox.Yes) else: self.dataFor['Tra'].MeanPreProcess() reply = QMessageBox.information(self, 'Message', 'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes) return ############## training module ################# def setModelParameters(self): if self.sender() is self.setModelParametersButton: # print(1) self.setModelParaW = setModelParametersDialog.setLossParameterDialog('combine-CNN模型参数设置', self, 'New') elif self.sender() is self.setModelParametersButtonT: self.setModelParaW = setModelParametersDialog.setLossParameterDialog('traditional NN模型参数设置', self, 'Tra') def training(self): if self.sender() is self.trainingButton: if self.trainingW is not None: self.trainingW.hide() # print(self.trainingW) self.trainingW.show() return senderName = 'New' elif self.sender() is self.trainingButtonT: if self.trainingWT is not None: self.trainingWT.hide() self.trainingWT.show() senderName = 'Tra' if self.dataFor[senderName] is None: reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练', QMessageBox.Yes, QMessageBox.Yes) return elif senderName == 'New': if self.dataFor[senderName].DataTrainX.shape[1] < self.combineNumConv: reply = QMessageBox.information(self, '参数错误', '卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes) return if combineNumCalculate.combineNumCal(self.dataFor[senderName].DataTrainX.shape[1], self.combineNumConv)\ < self.combineNumPooling: reply = QMessageBox.information(self, '参数错误', '池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes, QMessageBox.Yes) return # print(self.trainingW) if self.trainingWT is not None: reply = QMessageBox.information(self, '提示', 'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes, QMessageBox.Yes) return self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练', self, senderName) self.traingWidgetOnFlag[senderName] = False elif senderName == 'Tra': if self.trainingW is not None: reply = QMessageBox.information(self, '提示', 'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes, QMessageBox.Yes) return self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练', self, senderName) self.traingWidgetOnFlag[senderName] = False return def saveModel(self): if self.sender() is self.saveModelButton: if self.mcbcnn is None: reply = QMessageBox.information(self, '模型错误', '模型不存在', QMessageBox.Yes, QMessageBox.Yes) return else: fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\myCombineCNN.cbcnn.json', 'Combine-CNN json files (*.cbcnn.json)') if ok: succeed = self.mcbcnn.saveModel(fname) if succeed: reply = QMessageBox.information(self, '保存结果', '模型保存成功', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '保存结果', '模型保存失败', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '保存结果', '模型保存失败', QMessageBox.Yes, QMessageBox.Yes) elif self.sender() is self.saveModelButtonT: if self.trann is None: reply = QMessageBox.information(self, '模型错误', '模型不存在', QMessageBox.Yes, QMessageBox.Yes) return else: fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\traditionalNN.trann.json', 'Traditional NN json files (*.trann.json)') if ok: succeed = self.trann.saveModel(fname) if succeed: reply = QMessageBox.information(self, '保存结果', '模型保存成功', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '保存结果', '模型保存失败', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '保存结果', '模型保存失败', QMessageBox.Yes, QMessageBox.Yes) def loadModel(self): if self.sender() is self.loadModelButton: fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..', 'Combine-CNN json files (*.cbcnn.json)') if ok: if self.mcbcnn is None: self.mcbcnn = myCombineCNN.myCombineCNN(None, self.combineNumConv, self.convCoreNum, self.combineNumPooling) succeed = self.mcbcnn.setModel(fname) if succeed: modelName = fname.split('/')[-1].split('.')[0] self.presentModelName.setText(modelName) reply = QMessageBox.information(self, '设置结果', '模型设置成功', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '设置结果', '模型设置失败', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '设置结果', '模型设置失败', QMessageBox.Yes, QMessageBox.Yes) elif self.sender() is self.loadModelButtonT: fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..', 'Traditional NN json files (*.trann.json)') if ok: if self.trann is None: self.trann = traditionalNN.traditionalNN(None) succeed = self.trann.setModel(fname) if succeed: modelName = fname.split('/')[-1].split('.')[0] self.presentModelNameT.setText(modelName) reply = QMessageBox.information(self, '设置结果', '模型设置成功', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '设置结果', '模型设置失败', QMessageBox.Yes, QMessageBox.Yes) else: reply = QMessageBox.information(self, '设置结果', '模型设置失败', QMessageBox.Yes, QMessageBox.Yes) return def showResult(self): if self.sender() is self.showResultButton: if self.traingWidgetOnFlag['New']: reply = QMessageBox.information(self, '提示', '训练正在进行', QMessageBox.Yes, QMessageBox.Yes) return self.showResultW = showResultWidget.ShowResultWidget('combine-CNN预测结果展示', self, 'New') elif self.sender() is self.showResultButtonT: if self.traingWidgetOnFlag['Tra']: reply = QMessageBox.information(self, '提示', '训练正在进行', QMessageBox.Yes, QMessageBox.Yes) return self.showResultW = showResultWidget.ShowResultWidget('traditional NN预测结果展示', self, 'Tra') return def showJudge(self): if self.sender() is self.judgeResultButton: if self.traingWidgetOnFlag['New']: reply = QMessageBox.information(self, '提示', '训练正在进行', QMessageBox.Yes, QMessageBox.Yes) return self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set', self, 'New') elif self.sender() is self.judgeResultButtonT: if self.traingWidgetOnFlag['Tra']: reply = QMessageBox.information(self, '提示', '训练正在进行', QMessageBox.Yes, QMessageBox.Yes) return self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set', self, 'Tra') # self.testw = showJudgeWidgets.judgeWidget('test', self, 'New', 'Train') # self.mcbcnn.runCNN('Test', self.dataFor['New']) # drawCM = Judgement.myJudge(self.mcbcnn.data.yClassDic, self.mcbcnn.getAccuratePredictResult().argmax(1), self.mcbcnn.data.DataTestY.argmax(1)) # drawCM.plotConfuseMatrix() if __name__ == '__main__': app = QApplication(sys.argv) myMainWindow = MyMainWindow() sys.exit(app.exec_())
10
5d9c8e235385ff53c7510994826ff3a04e4a5888
""" @file : 001-rnn+lstm+crf.py @author: xiaolu @time : 2019-09-06 """ import re import numpy as np import tensorflow as tf from sklearn.metrics import classification_report class Model: def __init__(self, dim_word, dim_char, dropout, learning_rate, hidden_size_char, hidden_size_word, num_layers): ''' :param dim_word: 词的维度 :param dim_char: 字符维度 :param dropout: dropout :param learning_rate: 学习率 :param hidden_size_char: 字符隐层输出维度 :param hidden_size_word: 词隐层输出维度 :param num_layers: 几层 ''' def cells(size, reuse=False): return tf.contrib.rnn.DropoutWrapper( tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(), reuse=reuse), output_keep_prob=dropout ) # 1. define input self.word_ids = tf.placeholder(tf.int32, shape=[None, None]) self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None]) self.labels = tf.placeholder(tf.int32, shape=[None, None]) self.maxlen = tf.shape(self.word_ids)[1] self.lengths = tf.count_nonzero(self.word_ids, 1) # 2. embedding self.word_embeddings = tf.Variable(tf.truncated_normal([len(word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word))) self.char_embeddings = tf.Variable(tf.truncated_normal([len(char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char))) word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids) char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids) s = tf.shape(char_embedded) # (51312, 50, 27, embedding_size) char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2], dim_char]) for n in range(num_layers): (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn( cell_fw=cells(hidden_size_char), cell_bw=cells(hidden_size_char), inputs=char_embedded, dtype=tf.float32, scope='bidirectional_rnn_char_%d' % n ) char_embedded = tf.concat((out_fw, out_bw), 2) output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2*hidden_size_char]) word_embedded = tf.concat([word_embedded, output], axis=-1) # 将词嵌入部分与字符嵌入通过双向lstm输出部分进行拼接 for n in range(num_layers): (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn( cell_fw=cells(hidden_size_word), cell_bw=cells(hidden_size_word), inputs=word_embedded, dtype=tf.float32, scope='bidirectional_rnn_word_%d' % n ) word_embedded = tf.concat((out_fw, out_bw), 2) logits = tf.layers.Dense(word_embedded, len(idx2tag)) y_t = self.labels log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood( logits, y_t, self.lengths ) self.cost = tf.reduce_mean(-log_likelihood) self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost) mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen) self.tags_seq, tags_score = tf.contrib.crf.crf_decode( logits, transition_params, self.lengths ) self.tags_seq = tf.identity(self.tags_seq, name='logits') y_t = tf.cast(y_t, tf.int32) self.prediction = tf.boolean_mask(self.tags_seq, mask) mask_label = tf.boolean_mask(y_t, mask) correct_pred = tf.equal(self.prediction, mask_label) correct_index = tf.cast(correct_pred, tf.float32) self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) def parse(file): ''' 加载文件并且解析 :param file: 文件名 :return: 词<->词性 ''' with open(file) as fopen: texts = fopen.read().split('\n') left, right = [], [] for text in texts: if '-DOCSTART' in text or not len(text): continue splitted = text.split() left.append(splitted[0]) right.append(splitted[-1]) return left, right def process_string(string): ''' :param string: :return: ''' string= re.sub('[^A-Za-z0-9\-\/ ]+', ' ', string).split() return ' '.join([to_title(y.strip()) for y in string]) def to_title(string): if string.isupper(): string = string.title() return string def parse_XY(texts, labels): ''' 整理词性表  词表  字符表  并将文本转为对应的数字序列 :param texts: 文本 词的一个列表 :param labels: 词性的一个列表 :return: 词转为id的序列 词性转为id的序列 ''' global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx X, Y = [], [] for no, text in enumerate(texts): text = text.lower() # 当前这个单词转小写 tag = labels[no] # 取出对应的词性 for c in text: # 字符表 if c not in char2idx: char2idx[c] = char_idx char_idx += 1 if tag not in tag2idx: # 词性表 tag2idx[tag] = tag_idx tag_idx += 1 Y.append(tag2idx[tag]) # 当前这个词的词性转为id的值 if text not in word2idx: # 词表 word2idx[text] = word_idx word_idx += 1 X.append(word2idx[text]) # 将词转为id的标号 return X, np.array(Y) def iter_seq(x): return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)]) def to_train_seq(*args): ''' :param args: 词转为的id的序列   词性转为id的序列 :return: ''' return [iter_seq(x) for x in args] def generate_char_seq(batch): ''' 传进来是50一个块 总共有多少块 然后将每块的单词转为字符序列 :param batch: :return: ''' x = [[len(idx2word[i]) for i in k] for k in batch] # 得出每个单词的长度 maxlen = max([j for i in x for j in i]) # 最大长度 temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32) for i in range(batch.shape[0]): for k in range(batch.shape[1]): for no, c in enumerate(idx2word[batch[i, k]]): temp[i, k, -1-no] = char2idx[c] return temp # [文章数, 单词个数, maxlen(每个单词按字符转的id)] def pred2label(pred): # 将预测结果转为标签 out = [] for pred_i in pred: out_i = [] for p in pred_i: out_i.append(idx2tag[p]) out.append(out_i) return out if __name__ == '__main__': left_train, right_train = parse('./data/eng.train') left_test, right_test = parse('./data/eng.testa') # print(left_train[:10]) # print(right_train[:10]) word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2} # 词表 tag2idx = {'PAD': 0} # 词性表 char2idx = {'PAD': 0} word_idx = 3 tag_idx = 1 char_idx = 1 train_X, train_Y = parse_XY(left_train, right_train) test_X, test_Y = parse_XY(left_test, right_test) # print(train_X[:20]) # print(train_Y[:20]) idx2word = {idx: tag for tag, idx in word2idx.items()} idx2tag = {i: w for w, i in tag2idx.items()} seq_len = 50 X_seq, Y_seq = to_train_seq(train_X, train_Y) # 长度为50为一个段落 X_char_seq = generate_char_seq(X_seq) print(X_seq.shape) # (203571, 50) print(X_char_seq.shape) # (203571, 50, 61) X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y) X_char_seq_test = generate_char_seq(X_seq_test) print(X_seq_test.shape) # (51312, 50) print(X_char_seq_test.shape) # (51312, 50, 27) train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test tf.reset_default_graph() sess = tf.Session() dim_word = 64 dim_char = 128 dropout = 0.8 learning_rate = 1e-3 hidden_size_char = 128 hidden_size_word = 128 num_layers = 2 batch_size = 32 model = Model(dim_word, dim_char, dropout, learning_rate, hidden_size_char, hidden_size_word, num_layers) sess.run(tf.global_variables_initializer()) for e in range(3): train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0 for i in range(0, len(train_X), batch_size): batch_x = train_X[i: min(i + batch_size, train_X.shape[0])] batch_char = train_char[i: min(i + batch_size, train_X.shape[0])] batch_y = train_Y[i: min(i + batch_size, train_X.shape[0])] acc, cost, _ = sess.run( [model.accuracy, model.cost, model.optimizer], feed_dict={ model.word_ids: batch_x, model.char_ids: batch_char, model.labels: batch_y }, ) train_loss += cost train_acc += acc print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc)) for i in range(0, len(test_X), batch_size): batch_x = test_X[i: min(i + batch_size, test_X.shape[0])] batch_char = test_char[i: min(i + batch_size, test_X.shape[0])] batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])] acc, cost = sess.run( [model.accuracy, model.cost], feed_dict={ model.word_ids: batch_x, model.char_ids: batch_char, model.labels: batch_y }, ) test_loss += cost test_acc += acc print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc)) train_loss /= len(train_X) / batch_size train_acc /= len(train_X) / batch_size test_loss /= len(test_X) / batch_size test_acc /= len(test_X) / batch_size print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n' % (e, train_loss, train_acc, test_loss, test_acc)) real_Y, predict_Y = [], [] for i in range(0, len(test_X), batch_size): batch_x = test_X[i: min(i + batch_size, test_X.shape[0])] batch_char = test_char[i: min(i + batch_size, test_X.shape[0])] batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])] predicted = pred2label( sess.run(model.tags_seq, feed_dict={ model.word_ids: batch_x, model.char_ids: batch_char, }, ) ) real = pred2label(batch_y) predict_Y.extend(predicted) real_Y.extend(real) print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel()))
11
54e04d740ef46fca04cf4169d2e7c05083414bd8
import random import math import time import pygame pygame.init() scr = pygame.display.set_mode((700,700)) enemies = [] #music = pygame.mixer.music.load('ENERGETIC CHIPTUNE Thermal - Evan King.mp3') #pygame.mixer.music.play(-1) hit = [] class Player: def __init__(self): self.x = 275 self.y = 275 self.image = pygame.image.load('player.jpg') self.image1 = pygame.image.load('hearts.png') self.lives = 5 def draw(self): scr.blit(self.image,(self.x,self.y)) def rotate(self, x, y): oppos = math.fabs(y - self.y) adjac = math.fabs(x - self.x) hypot = math.hypot(oppos,adjac) sin = oppos/hypot radians = math.asin(sin) angle = radians * (180/3.14) if x > self.x: if y > self.y: angle -= angle + angle if x < self.x: angle = 180 + (angle - (angle + angle)) if y > self.y: angle -= angle + angle return angle - 90 class Bullet: def __init__(self, color): self.x = 0 self.y = 0 self.angle = 0 self.color = color def draw(self): pygame.draw.rect(scr,self.color,pygame.Rect(self.x,self.y,5,5)) class Gun: def __init__(self): self.x = 0 self.y = 0 self.bullets = [] self.bullets2 = [] def shoot1(self,x,y,angle): self.bullets.append(Bullet((0,255,255))) self.bullets[-1].x = x self.bullets[-1].y = y self.bullets[-1].angle = angle def shoot2(self,x,y,angle): self.bullets2.append(Bullet((255,255,0))) self.bullets2[-1].x = x self.bullets2[-1].y = y self.bullets2[-1].angle = angle class Enemy: def __init__(self): self.x = 100 self.y = 100 self.speed = 2 self.hearts = 3 self.image = pygame.image.load('enemy.png') def draw(self): scr.blit(self.image,(self.x,self.y)) def rotate(self, x, y): oppos = math.fabs(y - self.y) adjac = math.fabs(x - self.x) hypot = math.hypot(oppos,adjac) sin = oppos/hypot radians = math.asin(sin) angle = radians * (180/3.14) if x > self.x: if y > self.y: angle -= angle + angle if x < self.x: angle = 180 + (angle - (angle + angle)) if y > self.y: angle -= angle + angle return angle - 90 def distance(self,x,y): oppos = math.fabs(y - self.y) adjac = math.fabs(x - self.x) hypot = math.hypot(oppos,adjac) return hypot def spawn(self): enemies.append(Enemy()) enemies[-1].x = random.randint(0,600) enemies[-1].y = random.randint(0,600) cmd = Enemy() gun = Gun() player = Player() cmd.spawn() cmd.spawn() last = 0 frames = 0 fro = 1 while True: frames += 1 scr.fill((0,0,0)) for event in pygame.event.get(): key = pygame.key.get_pressed() Mpos = pygame.mouse.get_pos() if event.type == 5: gun.shoot1(player.x + 12.5,player.y + 12.5,angle) for i in range(0,player.lives): scr.blit(player.image1,(i*35,1)) for i in range(len(gun.bullets)): try: gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians(gun.bullets[i].angle + 90)) gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians(gun.bullets[i].angle - 90)) if gun.bullets[i].x > 600: del gun.bullets[i] if gun.bullets[i].x < 0: del gun.bullets[i] if gun.bullets[i].y > 600: del gun.bullets[i] if gun.bullets[i].y < 0: del gun.bullets[i] gun.bullets[i].draw() except IndexError: pass for i in range(len(gun.bullets2)): try: gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.radians(gun.bullets2[i].angle + 90)) gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.radians(gun.bullets2[i].angle - 90)) if gun.bullets2[i].x > 600: del gun.bullets2[i] if gun.bullets2[i].x < 0: del gun.bullets2[i] if gun.bullets2[i].y > 600: del gun.bullets2[i] if gun.bullets2[i].y < 0: del gun.bullets2[i] gun.bullets2[i].draw() except IndexError: pass for i in range(len(enemies)): if enemies[i].distance(player.x,player.y) > 100: enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.radians(enemies[i].rotate(player.x,player.y) + 90)) enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.radians(enemies[i].rotate(player.x,player.y) - 90)) enemies[i].image = pygame.image.load("enemy.png").convert() enemies[i].image = enemies[i].image.copy() enemies[i].image = pygame.transform.rotate(enemies[i].image,enemies[i].rotate(player.x,player.y)) angle2 = enemies[i].rotate(player.x,player.y) if frames % 100 == 0: gun.shoot2(enemies[i].x + 12.5,enemies[i].y + 12.5,angle2) enemies[i].draw() for j in range(len(gun.bullets)): for i in range(len(gun.bullets)): try: if gun.bullets[j].x > enemies[i].x and gun.bullets[j].x < enemies[i].x+25 and gun.bullets[j].y > enemies[i].y and gun.bullets[j].y < enemies[i].y + 25: del enemies[i] except IndexError: pass for j in range(len(gun.bullets2)): for i in range(len(gun.bullets2)): try: if gun.bullets2[j].x > player.x and gun.bullets2[j].x < player.x+25 and gun.bullets2[j].y > player.y and gun.bullets2[j].y < player.y + 25: for i in range(len(hit)-1): if not (hit[i].x > player.x or hit[i].x < player.x+25 or hit[i].y > player.y or hit[i].y < player.y): del hit[i] if hit.count(gun.bullets2[j]) == 0: hit.append(gun.bullets2[j]) player.lives = 5 - len(hit) except IndexError: pass if key[pygame.K_a]: player.x -= 3 if key[pygame.K_d]: player.x += 3 if key[pygame.K_w]: player.y -= 3 if key[pygame.K_s]: player.y += 3 if frames % 150 == 0: cmd.spawn() if player.lives < 1: pygame.quit() break player.image = pygame.image.load("player.jpg").convert() player.image = player.image.copy() player.image = pygame.transform.rotate(player.image,player.rotate(Mpos[0],Mpos[1])) angle = player.rotate(Mpos[0],Mpos[1]) player.draw() pygame.display.update() time.sleep(0.005) quit()
12
0a7ffc027511d5fbec0076f6b25a6e3bc3dfdd9b
''' Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order. You may assume no duplicates in the array. Here are few examples. [1,3,5,6], 5 -> 2 [1,3,5,6], 2 -> 1 [1,3,5,6], 7 -> 4 [1,3,5,6], 0 -> 0 ''' class Solution(object): def searchInsert(self, nums, target): if target < nums[0]: return 0 if target > nums[-1]: return len(nums) l_idx, h_idx = 0, len(nums)-1 while True: m_idx = int((l_idx+h_idx)/2) if l_idx >= h_idx: return l_idx elif target > nums[m_idx]: l_idx = m_idx + 1 else: h_idx = m_idx sol = Solution() print sol.searchInsert([1,3,5,6], 5) print sol.searchInsert([1,3,5,6], 2) print sol.searchInsert([1,3,5,6], 4) print sol.searchInsert([1,3,5,6], 0)
13
2cbce618d1ec617d1c7dc0e9792b6a49361ec5a4
def mais_populoso(dic): p=0 sp=0 for t,i in dic.items(): for m in dic[t].values(): p+=m if p>sp: sp=p x=t return x
14
2092ead8b8f268a22711b8af8052241c1ac00c15
wage=5 print("%d시간에 %d%s 벌었습니다." %(1, wage*1, "달러")) print("%d시간에 %d%s 벌었습니다." %(5, wage*5, "달러")) print("%d시간에 %.1f%s 벌었습니다" %(1,5710.8,"원")) print("%d시간에 %.1f%s 벌었습니다" %(5, 28554.0, "원"))
15
b5cbb73c152dd60e9063d5a19f6182e2264fec6d
#!/usr/bin/python # coding=UTF-8 import sys import subprocess import os def printReportTail(reportHtmlFile): reportHtmlFile.write(""" </body> </html> """) def printReportHead(reportHtmlFile): reportHtmlFile.write("""<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Document</title> </head> <body> """) def printTitle(reportHtmlFile, title): reportHtmlFile.write("<h2>" + title + "</h2>\n") def printText(reportHtmlFile, text): reportHtmlFile.write("<h4>" + text + "</h4>\n") def printSVG(reportHtmlFile, svgPath): reportHtmlFile.write('<embed src="') reportHtmlFile.write(svgPath) reportHtmlFile.write('" type="image/svg+xml" />') def ParseStack(currentPath, ndkPath, stackFile, architecture, symbolsDir): print "currentPath: " + currentPath # 查找addr2line文件 print "architecture is " + architecture if architecture == "arm64-v8a": addr2line = ndkPath + "/toolchains/aarch64-linux-android-4.9/prebuilt/darwin-x86_64/bin/aarch64-linux-android-addr2line" elif architecture == "armeabi" or architecture == "armeabi-v7a": addr2line = ndkPath + "/toolchains/arm-linux-androideabi-4.9/prebuilt/darwin-x86_64/bin/arm-linux-androideabi-addr2line" else: print "do not support architecture type for " + architecture print "only support armeabi/armeabi-v7a/arm64-v8a" return print "addr2line path: " + addr2line if not os.path.exists(addr2line): print "can not find " + architecture + " addr2line" else: print "find " + architecture + " addr2line" reportHtmlPath = os.path.split(stackFile)[0] + "/leakReport.html" if os.path.exists(reportHtmlPath): os.unlink(reportHtmlPath) reportHtmlFile = open(reportHtmlPath, "a") printReportHead(reportHtmlFile) # 处理stack文件 for line in open(stackFile): if line.startswith("libName:"): libName = line.replace("libName:", "").replace('\n', '').replace('\r', '') printTitle(reportHtmlFile, libName) libAbsolutePath = os.path.split(stackFile)[0] + "/" + libName if not os.path.exists(libAbsolutePath): os.makedirs(libAbsolutePath) flStackFilePath = libAbsolutePath + "/fl_stack.txt" flameGraphFile = open(flStackFilePath, "w") print "find lib: " + libName elif line.startswith("leakSize:"): leakSize = line.replace("leakSize:", "").replace('\n', '').replace('\r', '') leakMsg = "leak size: " + leakSize + "\n" printText(reportHtmlFile, leakMsg) print leakMsg elif line.startswith("stack:"): stack = line.replace("stack:", "").replace('\n', '').replace('\r', '') # print "stack: " for stackElement in stack.split("^"): if stackElement == "": continue dlinfo = stackElement.split("|") pc = dlinfo[0] libPath = dlinfo[1] symbol = dlinfo[2] # print "pc " + pc + " " + libPath + " " + symbol symbolFile = symbolsDir + "/" + os.path.split(libPath)[1] if os.path.exists(symbolFile): # print "---------" parseCommend = addr2line + " -Ce " + symbolFile + " -f " + pc # print parseCommend # os.system(parseCommend) result = os.popen(parseCommend) res = result.read() retraces = res.splitlines() if len(retraces) != 2 or "?" in retraces[0] or "?" in retraces[1]: if symbol != "": method = symbol codeLine = -1 else: method = pc codeLine = -1 else: method = retraces[0] codeLine = retraces[1] # print method # print codeLine elif symbol != "": method = symbol codeLine = -1 else: method = pc codeLine = -1 flameGraphFile.write(method + ";") flameGraphFile.write(" 1\n") elif line.replace('\n', '').replace('\r', '') == "libSplit!!!": # 结束了一个lib的输出 print "finish lib " + libName + " parse" plExePath = os.path.split(currentPath)[0] + "/flamegraph.pl" svgPath = libAbsolutePath + "/" + libName + ".svg" commend = plExePath + " " + flStackFilePath + " > " + svgPath os.system(commend) printSVG(reportHtmlFile, svgPath.replace(os.path.split(libAbsolutePath)[0], "./")) printReportTail(reportHtmlFile) def main(args): if 4 > len(args): print("请输入\"android ndk路径\" \"stack文件路径\" \"arm架构(armeabi/armeabi-v7a/arm64-v8a)\" \"带符号表so所在目录\"") return ParseStack(args[0], args[1], args[2], args[3], args[4]) if __name__ == "__main__": main(sys.argv)
16
805fc9a26650f85227d14da972311ffbd9dbd555
class Date: def __init__(self, strDate): strDate = strDate.split('.') self.day = strDate[0] self.month = strDate[1] self.year = strDate[2]
17
a7218971b831e2cfda9a035eddb350ecf1cdf938
#!/usr/bin/python # encoding: utf-8 # # In case of reuse of this source code please do not remove this copyright. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # For more information on the GNU General Public License see: # <http://www.gnu.org/licenses/>. # from Components.config import config from datetime import datetime import os MinCacheLimit = config.EMC.min_file_cache_limit.getValue() pathisfile = os.path.isfile pathisdir = os.path.isdir pathislink = os.path.islink pathexists = os.path.exists pathreal = os.path.realpath idx_isLink=0 idx_isDir=1 idx_isFile=2 idx_Date=3 idx_realpath=4 idx_num=5 class EMCFileCache(): def __init__(self): self.cacheDirectoryList = {} self.cacheFileList = {} self.cacheAttributeList = {} self.cacheCountSizeList = {} def addCountSizeToCache(self, path, count, size): # print "EMC addCountSizeToCache", path if self.cacheCountSizeList.has_key(path): lastcount, lastsize = self.cacheCountSizeList[path] if lastcount != count or lastsize != size: del self.cacheCountSizeList[path] self.cacheCountSizeList[path] = count, size else: self.cacheCountSizeList[path] = count, size # print "EMC addCountSizeToCache", self.cacheCountSizeList def getCountSizeFromCache(self, path): if self.cacheCountSizeList.has_key(path): return self.cacheCountSizeList[path] else: return None # print "EMC getCountSizeFromCache", self.cacheCountSizeList def delcacheCountSizeList(self): self.cacheCountSizeList = {} print "EMC delete cacheCountSizeList", self.cacheCountSizeList def delcacheCountSizeListEntriesOnFileOp(self,path): #print "EMC delcacheCountSizeListEntriesOnFileOp",path rescanPaths = [] if path: for k in self.cacheCountSizeList.keys(): if (k+"/").startswith(path+"/") or (path+"/").startswith(k+"/"): # drop dirs containing path, but not "a/bc" when path is "a/bcd/e", therefore append "/" del self.cacheCountSizeList[k] rescanPaths.append(k) #print "EMC delcacheCountSizeListEntriesOnFileOp IS deleting",k," due to OP on path ",path #else: #print "EMC delcacheCountSizeListEntriesOnFileOp NOT deleting",k," due to OP on path ",path return rescanPaths def IsPathInCountSizeList(self, path): if self.cacheCountSizeList.has_key(path): return True else: return False def addPathToCache(self, path, subdirlist, filelist, MovieCenterInst): if config.EMC.files_cache.value: print "EMC addPathToCache", path if (len(subdirlist)>MinCacheLimit) or (len(filelist)>MinCacheLimit): self.cacheDirectoryList[path] = subdirlist for p, n, e in subdirlist: if not (p in self.cacheAttributeList): AttributeList=[None]*idx_num AttributeList[idx_isLink] = pathislink(p) AttributeList[idx_isDir] = True # we are in subdirlist AttributeList[idx_isFile] = False # we are in subdirlist AttributeList[idx_Date] = pathexists(p) and MovieCenterInst.checkDate(p, True) AttributeList[idx_realpath] = pathreal(p) #for dirs only self.cacheAttributeList[p] = AttributeList self.cacheFileList[path] = filelist for p, n, e in filelist: if not (p in self.cacheAttributeList): AttributeList=[None]*idx_num AttributeList[idx_isLink] = pathislink(p) AttributeList[idx_isDir] = False # we are in filelist, no entry is a real directrory ... AttributeList[idx_isFile] = pathisfile(p) # ... but filelist might contain virtual directories AttributeList[idx_Date] = pathexists(p) and MovieCenterInst.checkDate(p, False) #AttributeList[idx_realpath] = pathreal(p) #for dirs only self.cacheAttributeList[p] = AttributeList else: if self.cacheDirectoryList.has_key(path): self.deleteAssociatedListEntries(self.cacheDirectoryList[path]) del self.cacheDirectoryList[path] if self.cacheFileList.has_key(path): self.deleteAssociatedListEntries(self.cacheFileList[path]) del self.cacheFileList[path] # self.debugPrintDirCache() # self.debugPrintFileCache() # self.debugPrintFileAttributeCache() def addRecToCacheFileList(self, path, rec): if config.EMC.files_cache.value: if self.cacheFileList.has_key(path): filelist = self.cacheFileList[path] filelist.append(rec) del self.cacheFileList[path] self.cacheFileList[path] = filelist def getCacheForPath(self, path): print "EMC getCacheForPath", path if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path) and self.cacheFileList.has_key(path): subdirlist = self.cacheDirectoryList[path] filelist = self.cacheFileList[path] # self.debugPrintDirCache() # self.debugPrintFileCache() # self.debugPrintFileAttributeCache() return subdirlist, filelist else: return None, None def isLink(self, path): isLink = None if config.EMC.files_cache.value and (path in self.cacheAttributeList): isLink = self.cacheAttributeList[path][idx_isLink] if isLink is None: isLink = pathislink(path) return isLink def isDir(self, path): isDir = None if (config.EMC.check_dead_links.value != "always") and config.EMC.files_cache.value and (path in self.cacheAttributeList): isDir = self.cacheAttributeList[path][idx_isDir] if isDir is None: isDir = pathisdir(path) return isDir def isFile(self, path): isFile = None if (config.EMC.check_dead_links.value != "always") and config.EMC.files_cache.value and (path in self.cacheAttributeList): isFile = self.cacheAttributeList[path][idx_isFile] if isFile is None: isFile = pathisfile(path) return isFile def realpath(self, path): realpath = None if config.EMC.files_cache.value and (path in self.cacheAttributeList): realpath = self.cacheAttributeList[path][idx_realpath] if realpath is None: realpath = pathreal(path) return realpath def getDateInfoFromCacheForPath(self, path): if config.EMC.files_cache.value and (path in self.cacheAttributeList): return self.cacheAttributeList[path][idx_Date] else: return None def getDirsFromCacheForPath(self, path): if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path): subdirlist = self.cacheDirectoryList[path] return subdirlist else: return None def getFilesFromCacheForPath(self, path): if config.EMC.files_cache.value and self.cacheFileList.has_key(path): filelist = self.cacheFileList[path] return filelist else: return None def IsPathInCache(self, path): if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path) and self.cacheFileList.has_key(path): return True else: return False def IsPathWithDirsInCache(self, path): if config.EMC.files_cache.value and self.cacheDirectoryList.has_key(path): return True else: return False def IsPathWithFilesInCache(self, path): if config.EMC.files_cache.value and self.cacheFileList.has_key(path): return True else: return False def delPathFromCache(self, path): if len(path)>1 and path[-1]=="/": path = path[:-1] print "EMC delPathFromCache", path if self.cacheDirectoryList.has_key(path): self.deleteAssociatedListEntries(self.cacheDirectoryList[path]) del self.cacheDirectoryList[path] if self.cacheFileList.has_key(path): self.deleteAssociatedListEntries(self.cacheFileList[path]) del self.cacheFileList[path] # self.debugPrintDirCache() # self.debugPrintFileCache() # self.debugPrintFileAttributeCache() def delPathFromDirCache(self, path): if len(path)>1 and path[-1]=="/": path = path[:-1] if self.cacheDirectoryList.has_key(path): self.deleteAssociatedListEntries(self.cacheDirectoryList[path]) del self.cacheDirectoryList[path] def delPathFromFileCache(self, path): if len(path)>1 and path[-1]=="/": path = path[:-1] if self.cacheFileList.has_key(path): self.deleteAssociatedListEntries(self.cacheFileList[path]) del self.cacheFileList[path] def debugPrintFileCache(self): print "cacheFileList:" for p in self.cacheFileList: print p,self.cacheFileList[p] print "" def debugPrintDirCache(self): print "cacheDirectoryList:" for p in self.cacheDirectoryList: print p,self.cacheDirectoryList[p] print "" def debugPrintFileAttributeCache(self): print "cacheAttributeList:" for p in self.cacheAttributeList: print p,self.cacheAttributeList[p] print "" def deleteAssociatedListEntries(self, list): for p, n, e in list: if p in self.cacheAttributeList and (config.EMC.check_dead_links.value != "only_initially"): del self.cacheAttributeList[p] movieFileCache = EMCFileCache()
18
038ccba05113fb7f2f589eaa7345df53cb59a5af
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import torch from torch import nn, autograd import config import time import copy import progressbar as pb from dataset import TrainDataSet from model import BiAffineSrlModel from fscore import FScore config.add_option('-m', '--mode', dest='mode', default='train', type='string', help='[train|eval|pred]', action='store') config.add_option('--seed', dest='seed', default=1, type='int', help='torch random seed', action='store') def train(num_epochs = 30): lossfunction = nn.CrossEntropyLoss() trainset = TrainDataSet() model = BiAffineSrlModel(vocabs=trainset.vocabs) optimizer = torch.optim.Adam(model.parameters(), lr=0.01) since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_f = FScore() for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1), file=sys.stderr) print('-' * 10, file=sys.stderr) for phase in ['train', 'dev']: model.train(phase == 'train') running_loss = 0.0 running_f = FScore() for sentence in pb.progressbar(trainset.get_set(phase)): model.zero_grad() role_p = model(*sentence['inputs']) _, predict = torch.max(role_p, 1) loss = lossfunction(role_p, autograd.Variable(sentence['targets'][0])) if phase == 'train': loss.backward() optimizer.step() if epoch > 28: print(predict.data) print(sentence['targets'][0]) running_loss += loss.data[0] running_f.update(predict, sentence['targets'][0]) print('\n{} Loss: {:.4f} {}'.format(phase, running_loss, running_f), file=sys.stderr) if phase == 'dev' and running_f > best_f: best_f = running_f best_model_wts = copy.deepcopy(model.state_dict()) print('', file=sys.stderr) time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60), file=sys.stderr) print('Best val F: {}s'.format(best_f), file=sys.stderr) model.load_state_dict(best_model_wts) return model if __name__ == '__main__': config.parse_args() torch.manual_seed(config.get_option('seed')) mode = config.get_option('mode') if mode == 'train': train() else: NotImplementedError()
19
b5180a2dbe1f12e1bbc92874c67ea99c9a84a9ed
# print all cards with even numbers. cards = ["2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K", "A"] for card in cards: try: number = int(card) if number % 2 == 0: # modulo operator print(card, "is an even card.") except ValueError: print (card, "can not be divided")
20
a045423edd94d985dfc9660bcfe4a88c61bf4574
#Script start print"This is the two number subtraction python program." a = 9 b = 2 c = a - b print c # Scrip close
21
13c9f0f58ec6da317c3802f594bb0db7c275dee9
''' !pip install wget from zipfile import ZipFile import wget print('Beginning file downlaod with wget module') url = 'https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip' wget.download(url, 'sample_data/') print('2. Extract all files in ZIP to different directory') # Create a ZipFile Object and load sample.zip in it with ZipFile('sample_data/kagglecatsanddogs_3367a.zip', 'r') as zipObj: # Extract all the contents of zip file in different directory zipObj.extractall('content/') ''' import numpy as np import matplotlib.pyplot as plt import os import cv2 import pickle import random import datetime import tensorflow as tf from tensorflow.python.keras.datasets import cifar10 from tensorflow.python.keras.preprocessing.image import ImageDataGenerator from tensorflow.python.keras.models import Sequential from tensorflow.python.keras.layers import Activation, Dense, Flatten, Dropout from tensorflow.python.keras.layers import Conv2D, MaxPooling2D from tensorflow.python.keras.optimizers import Adam from tensorflow.python.keras.callbacks import TensorBoard DATADIR = 'content/PetImages' CATEGORIES = ['Cat', 'Dog'] #'''categories that we have to deal with''' img_array= [] for category in CATEGORIES: path = os.path.join(DATADIR, category) # path to cats and dogs dir for img in os.listdir(path): img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR) plt.imshow(img_array, cmap='gray') plt.show() print(img_array) print(img_array.shape) break break IMG_SIZE = 299 #every image of 299x299 resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) plt.imshow(resized_img_array, cmap='gray') # cmap = hot, plasma, cool, plt.show() training_data = [] def create_training_data(): # creating training datasets for category in CATEGORIES: path = os.path.join(DATADIR, category) # path to cats and dogs dir classIndex = CATEGORIES.index(category) # 0 for dog and 1 for cat for img in os.listdir(path): try: img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR) resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) training_data.append([resized_img_array, classIndex]) except Exception as e: pass create_training_data() print(len(training_data)) '''shuffle training data''' random.shuffle(training_data) # for sample in training_data[:10]: # print(sample[1]) x=[] y=[] for features, label in training_data: x.append(features) y.append(label) x = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3) #we can't pass a list to keras for training #'''we have to pass here a numpy array ''' # print(x[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1)) pickle_out = open("x.pickle", 'wb') pickle.dump(x, pickle_out) pickle_out.close() pickle_out= open('y.pickle', 'wb') pickle.dump(y, pickle_out) pickle_out.close() pickle_in = open('x.pickle', 'rb') x = pickle.load(pickle_in) pickle_in = open('y.pickle', 'rb') y = pickle.load(pickle_in) x = x / 255.0 INPUT_SHAPE = x.shape[1:]#(224, 224, 3) DROPOUT=0.2 NB_CLASSES=10 NB_EPOCHS=10 BATCH_SIZE=128 VALIDATION_SPLIT=0.2 OPTIMIZER = Adam() max, min, accIndex , lossIndex=70.0 , 4.0, 1, 1 date = datetime.datetime.now() dense_layers = [2, 1, 0] # 0, 1,2 layer_sizes = [512, 256, 128, 64] #32, 64, 128, 256, 512 conv_layers = [3, 2, 1] # 1, 2,3 for dense_layer in dense_layers: for layer_size in layer_sizes: for conv_layer in conv_layers: NAME = "{}-conv-{}-nodes-{}-dense-{}".format(conv_layer, layer_size, dense_layer, int(time.time())) print(NAME) model = Sequential() model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) for l in range(conv_layer-1): model.add(Conv2D(layer_size, (5, 5))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Dropout(DROPOUT)) model.add(Flatten()) for _ in range(dense_layer): model.add(Dense(layer_size)) model.add(Activation('relu')) model.add(Dropout(DROPOUT)) model.add(Dense(NB_CLASSES)) model.add(Activation('softmax')) tensorboard = TensorBoard(log_dir="logs/{}".format(NAME)) model.compile(loss='categorical_crossentropy', optimizer=OPTIMIZER, metrics=['accuracy'], ) history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1, callbacks=[tensorboard]) if history.history.get('val_acc')[-1] > max: max = history.history.get('val_acc')[-1] if accIndex >= 2: os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex-1, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")) val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}"), "wb") pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")), val_acc_out) val_acc_out.close() accIndex += 1 pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb') p_upload = pickle.load(pickle_upload) print(p_upload) if history.history.get('val_loss')[-1] < min: min = history.history.get('val_loss')[-1] if lossIndex>=2: os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex-1, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")) val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")) pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")), val_loss_out) val_loss_out.close() lossIndex += 1 model.save('64x3-CNN.model') CATEGORIES = ["Dog", "Cat"] # will use this to convert prediction num to string value def prepare(filepath): IMG_SIZE = 299 # 50 in txt-based img_array = cv2.imread(filepath, cv2.IMREAD_COLOR) # read in the image, convert to grayscale resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3) # return the image with shaping that TF wants. model = tf.keras.models.load_model("64x3-CNN.model") prediction = model.predict([prepare('dog.jpg')]) # REMEMBER YOU'RE PASSING A LIST OF THINGS YOU WISH TO PREDICT print(prediction) print(prediction[0][0]) print(CATEGORIES[int(prediction[0][0])]) #We can also test our cat example: prediction = model.predict([prepare('cat.jpg')]) print(prediction) # will be a list in a list. print(CATEGORIES[int(prediction[0][0])]) ''' alpha. Also referred to as the learning rate or step size. The proportion that weights are updated (e.g. 0.001). Larger values (e.g. 0.3) results in faster initial learning before the rate is updated. Smaller values (e.g. 1.0E-5) slow learning right down during training beta1. The exponential decay rate for the first moment estimates (e.g. 0.9). beta2. The exponential decay rate for the second-moment estimates (e.g. 0.999). This value should be set close to 1.0 on problems with a sparse gradient (e.g. NLP and computer vision problems). epsilon. Is a very small number to prevent any division by zero in the implementation (e.g. 10E-8). We can see that the popular deep learning libraries generally use the default parameters recommended by the paper. TensorFlow: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08. Keras: lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0. Blocks: learning_rate=0.002, beta1=0.9, beta2=0.999, epsilon=1e-08, decay_factor=1. Lasagne: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08 Caffe: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08 MxNet: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8 Torch: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8 '''
22
95c5971a102fb2ed84ab0de0471278d0167d8359
#!/usr/bin/python3 """1. Divide a matrix """ def matrix_divided(matrix, div): """Divides a Matrix Args: matrix: A list of lists of ints or floats div: a non zero int or float Exceptions: TypeError: if the matrix and/or div is not as stated or the matrix elements are not of the same size ZeroDivisionError: if div is zero Returns: a new matrix holding the results """ workmat = [] WrongType = False TooLong = False i = 0 if isinstance(matrix, list): if matrix == []: WrongType = True for x in range(len(matrix)): if isinstance(matrix[x], list): workmat.append([]) for y in range(len(matrix[x])): if matrix[x] == []: WrongType = True if ( isinstance(matrix[x][y], int) or isinstance(matrix[x][y], int) ): workmat[x].append(matrix[x][y]) else: WrongType = True if x == 0 and y == 0: i = len(matrix[x]) else: if not i == len(matrix[x]): TooLong = True else: WrongType = True else: WrongType = True if WrongType: raise TypeError( "matrix must be a matrix (list of lists) of integers/floats") if TooLong: raise TypeError( "Each row of the matrix must have the same size") if not isinstance(div, float) and not isinstance(div, int): raise TypeError( "div must be a number") if div == 0: raise ZeroDivisionError( "division by zero") for x in range(len(workmat)): for y in range(len(workmat[x])): workmat[x][y] = round((workmat[x][y] / div), 2) return workmat
23
5fb998fa761b989c6dd423634824197bade4f8a5
""" You can perform the following operations on the string, : Capitalize zero or more of 's lowercase letters. Delete all of the remaining lowercase letters in . Given two strings, and , determine if it's possible to make equal to as described. If so, print YES on a new line. Otherwise, print NO. For example, given and , in we can convert and delete to match . If and , matching is not possible because letters may only be capitalized or discarded, not changed. Function Description Complete the function in the editor below. It must return either or . abbreviation has the following parameter(s): a: the string to modify b: the string to match Input Format The first line contains a single integer , the number of queries. Each of the next pairs of lines is as follows: - The first line of each query contains a single string, . - The second line of each query contains a single string, . Constraints String consists only of uppercase and lowercase English letters, ascii[A-Za-z]. String consists only of uppercase English letters, ascii[A-Z]. Output Format For each query, print YES on a new line if it's possible to make string equal to string . Otherwise, print NO. Sample Input 1 daBcd ABC Sample Output YES Explanation image We have daBcd and ABC. We perform the following operation: Capitalize the letters a and c in so that dABCd. Delete all the remaining lowercase letters in so that ABC. Because we were able to successfully convert to , we print YES on a new line. """ #!/bin/python3 import math import os import random import re import sys # Complete the abbreviation function below. def abbreviation(a, b): m, n = len(a), len(b) dp = [[False]*(m+1) for _ in range(n+1)] dp[0][0] = True for i in range(n+1): for j in range(1,m+1): if a[j-1] == b[i-1]: dp[i][j] = dp[i-1][j-1] elif a[j-1].upper() == b[i-1]: dp[i][j] = dp[i-1][j-1] or dp[i][j-1] elif a[j-1].islower(): dp[i][j] = dp[i][j-1] return "YES" if dp[n][m] else "NO" if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') q = int(input()) for q_itr in range(q): a = input() b = input() result = abbreviation(a, b) fptr.write(result + '\n') fptr.close()
24
5ed439a2a7cfb9c941c40ea0c5eba2851a0f2855
#!/bin/python3 # Implement a stack with push, pop, inc(e, k) operations # inc (e,k) - Add k to each of bottom e elements import sys class Stack(object): def __init__(self): self.arr = [] def push(self, val): self.arr.append(val) def pop(self): if len(self.arr): return self.arr.pop() def inc(self, e, k): count = min(len(self.arr), e) for i in range(count): self.arr[i] += k def peek(self): if len(self.arr): return self.arr[-1] else: return 'EMPTY' def superStack(operations): s = Stack() for o in operations: op = o.split(' ') if op[0] == 'push': s.push(int(op[1])) print(s.peek()) elif op[0] == 'pop': s.pop() print(s.peek()) elif op[0] == 'inc': s.inc(int(op[1]), int(op[2])) print(s.peek()) if __name__ == "__main__": operations_cnt = 0 operations_cnt = int(input()) operations_i = 0 operations = [] while operations_i < operations_cnt: try: operations_item = str(input()) except: operations_item = None operations.append(operations_item) operations_i += 1 res = superStack(operations);
25
39f9341313e29a22ec5e05ce9371bf65e89c91bd
""" 리스트에 있는 숫자들의 최빈값을 구하는 프로그램을 만들어라. [12, 17, 19, 17, 23] = 17 [26, 37, 26, 37, 91] = 26, 37 [28, 30, 32, 34, 144] = 없다 최빈값 : 자료의 값 중에서 가장 많이 나타난 값 ① 자료의 값이 모두 같거나 모두 다르면 최빈값은 없다. ② 자료의 값이 모두 다를 때, 도수가 가장 큰 값이 1개 이상 있으면 그 값은 모두 최빈값이다. """ n_list = [[12, 17, 19, 17, 23], [26, 37, 26, 37, 91], [28, 30, 32, 34, 144], [10, 10, 10, 10, 10]] for numbers in n_list: n_dict = {} for n in numbers: if n in n_dict: n_dict[n] += 1 else: n_dict[n] = 1 mode = [] if len(n_dict) == 1 or len(n_dict) == len(numbers): print(numbers, '= 없다') else: mode_count = max(n_dict.values()) for e in n_dict.keys(): if n_dict[e] == mode_count: mode.append(e) print(numbers, '=', mode)
26
312cc666c88fcd22882c49598db8c5e18bd3dae1
from setuptools import setup, find_packages from setuptools.extension import Extension from sys import platform cython = True try: from Cython.Build import cythonize cython = True except ImportError: cython = False # Define the C++ extension if platform == "darwin": extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x', '-stdlib=libc++', '-mmacosx-version-min=10.7'] else: extra_compile_args = ['-O3', '-pthread', '-funroll-loops', '-std=c++0x'] extensions = [] if cython: extensions = [ Extension('sent2vec', sources=[ 'sent2vec/sent2vec.pyx', 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc', 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc', 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc', 'sent2vec/cpp/src/productquantizer.cc', 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc', 'sent2vec/cpp/src/vector.cc' ], language='c++', extra_compile_args=extra_compile_args ) ] extensions = cythonize(extensions) else: extensions = [ Extension('sent2vec', sources=[ 'sent2vec/sent2vec.cpp', 'sent2vec/cpp/src/args.cc', 'sent2vec/cpp/src/dictionary.cc', 'sent2vec/cpp/src/fasttext.cc', 'sent2vec/cpp/src/main.cc', 'sent2vec/cpp/src/matrix.cc', 'sent2vec/cpp/src/model.cc', 'sent2vec/cpp/src/productquantizer.cc', 'sent2vec/cpp/src/qmatrix.cc', 'sent2vec/cpp/src/utils.cc', 'sent2vec/cpp/src/vector.cc' ], language='c++', extra_compile_args=extra_compile_args ) ] # Package details setup( name='sent2vec', version='0.1.0', author='', author_email='', url='', description='A Python interface for sent2vec library', license='BSD 3-Clause License', packages=['sent2vec'], ext_modules = extensions, install_requires=[], classifiers= [] )
27
2aec0581413d4fb0ffb4090231fde0fed974bf18
import numpy as np import random with open("./roc.txt", "r") as fin: with open("./roc_shuffle.txt", "w") as fout: tmp = [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: idx = [0] + np.random.permutation(range(1,5)).tolist() for sen in np.take(tmp, idx).tolist(): fout.write(sen+"\n") tmp = [] fout.write(line.strip()+"\n") else: tmp.append(line.strip()) with open("./roc.txt", "r") as fin: with open("./roc_repeat.txt", "w") as fout: tmp = [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: idx = random.randint(1,4) tmp[idx] = tmp[idx][:-1] + tmp[idx] for sen in tmp: fout.write(sen+"\n") tmp = [] fout.write(line.strip()+"\n") else: tmp.append(line.strip()) with open("./roc.txt", "r") as fin: with open("./roc_replace.txt", "w") as fout: post, tmp = [], [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: post.append(tmp) tmp = [] else: tmp.append(line.strip().split()) data = {"1":[], "2":[], "3":[], "4":[], "5":[]} for p in post: for i in range(5): data["%d"%(i+1)].append(p[i]) random_data = data.copy() for i in range(5): random_data["%d"%(i+1)] = np.random.permutation(random_data["%d"%(i+1)]) for k in range(len(post)): idx = np.random.permutation(range(1,5))[0] for i in range(5): if i == idx: fout.write(' '.join(random_data["%d"%(i+1)][k])+"\n") else: fout.write(' '.join(data["%d"%(i+1)][k])+"\n") fout.write("------\n")
28
4f13e2858d9cf469f14026808142886e5c3fcc85
class Solution: def merge(self, nums1, m, nums2, n): """ Do not return anything, modify nums1 in-place instead. """ if n == 0: nums1 = nums1 if nums1[m-1] <= nums2[0]: for i in range(n): nums1[m+i] = nums2[i] elif nums1[0] >= nums2[-1]: for i in range(m): nums1[i] = nums1[n+i] else: ans = [None]*len(nums1) i = 0 j = 0 k = 0 while i < m and j < n: if nums1[i] <= nums2[j]: print("take 1: ", nums1[i]) ans[k] = nums1[i] i += 1 else: print("take 2: ", nums2[j]) ans[k] = nums2[j] j += 1 k += 1 nums1 = ans if __name__ == "__main__": solve = Solution() nums1 = [1,2,3,0,0,0] m = 3 nums2 = [2,5,6] n = 3 solve.merge(nums1, m, nums2, n) print(nums1)
29
57967f36a45bb3ea62708bbbb5b2f4ddb0f4bb16
# -*- coding:ascii -*- from mako import runtime, filters, cache UNDEFINED = runtime.UNDEFINED __M_dict_builtin = dict __M_locals_builtin = locals _magic_number = 10 _modified_time = 1428612037.145222 _enable_loop = True _template_filename = 'C:\\Users\\Cody\\Desktop\\Heritage\\chf\\templates/account.rentalcart.html' _template_uri = '/account.rentalcart.html' _source_encoding = 'ascii' import os, os.path, re _exports = ['content'] from datetime import datetime, timedelta now = datetime.now() noww = now.strftime('%B %d, %Y') def _mako_get_namespace(context, name): try: return context.namespaces[(__name__, name)] except KeyError: _mako_generate_namespaces(context) return context.namespaces[(__name__, name)] def _mako_generate_namespaces(context): pass def _mako_inherit(template, context): _mako_generate_namespaces(context) return runtime._inherit_from(context, 'base_ajax.htm', _template_uri) def render_body(context,**pageargs): __M_caller = context.caller_stack._push_frame() try: __M_locals = __M_dict_builtin(pageargs=pageargs) int = context.get('int', UNDEFINED) str = context.get('str', UNDEFINED) rentals = context.get('rentals', UNDEFINED) def content(): return render_content(context._locals(__M_locals)) request = context.get('request', UNDEFINED) STATIC_URL = context.get('STATIC_URL', UNDEFINED) __M_writer = context.writer() __M_writer('\r\n') __M_writer('\r\n') __M_writer(str(nowww = noww - timedelta(days=3))) __M_writer('\r\n') if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'): context['self'].content(**pageargs) __M_writer('\r\n\r\n') return '' finally: context.caller_stack._pop_frame() def render_content(context,**pageargs): __M_caller = context.caller_stack._push_frame() try: int = context.get('int', UNDEFINED) str = context.get('str', UNDEFINED) rentals = context.get('rentals', UNDEFINED) def content(): return render_content(context) request = context.get('request', UNDEFINED) STATIC_URL = context.get('STATIC_URL', UNDEFINED) __M_writer = context.writer() __M_writer('\r\n\r\n<table class="table-responsive table-striped">\r\n <th></th>\r\n <th>#</th>\r\n <th>Name</th>\r\n <th>Price per Day</th>\r\n <th># of Days Rented</th>\r\n') for item in rentals: __M_writer(' <tr>\r\n <td><button rel="') __M_writer(str( item.id )) __M_writer('" class="btn btn-danger btn-sm deleter">Remove</button></td>\r\n <td class="img-col"><img class="shopping_cart_image" src="') __M_writer(str(STATIC_URL)) __M_writer(str( item.photo.image )) __M_writer('"/></td>\r\n <td class="name-col">') __M_writer(str( noww )) __M_writer('</td>\r\n <td class="price-col">') __M_writer(str( item.price_per_day )) __M_writer('</td>\r\n <td class="qty-col">') __M_writer(str(int(request.session['rental_cart'][str(item.id)]))) __M_writer('</td>\r\n </tr>\r\n') __M_writer('</table>\r\n<table id="button-table" class="table-responsive">\r\n <tr>\r\n <td id="space"></td>\r\n') if request.user.is_authenticated(): __M_writer(' <td id=\'checkout\'><a href="/account.checkout" class="btn btn-warning">Checkout</a></td>\r\n') else: __M_writer(' <td id=\'checkout\'><a href="/mylogin.cartlogin" class="btn btn-warning">Checkout</a></td>\r\n') __M_writer(' </tr>\r\n</table>\r\n') return '' finally: context.caller_stack._pop_frame() """ __M_BEGIN_METADATA {"uri": "/account.rentalcart.html", "line_map": {"70": 8, "71": 16, "72": 17, "73": 18, "74": 18, "75": 19, "76": 19, "77": 19, "78": 20, "79": 20, "80": 21, "81": 21, "82": 22, "83": 22, "84": 25, "85": 29, "86": 30, "87": 31, "88": 32, "89": 34, "95": 89, "33": 0, "16": 2, "45": 1, "46": 6, "47": 7, "48": 7, "53": 36, "59": 8}, "filename": "C:\\Users\\Cody\\Desktop\\Heritage\\chf\\templates/account.rentalcart.html", "source_encoding": "ascii"} __M_END_METADATA """
30
5771f49ad5254588f1683a8d45aa81ce472bb562
def prime_sieve(n): if n==2: return [2] elif n<2: return [] s=range(3,n+1,2) mroot = n ** 0.5 half=(n+1)/2-1 i=0 m=3 while m <= mroot: if s[i]: j=(m*m-3)/2 s[j]=0 while j<half: s[j]=0 j+=m i=i+1 m=2*i+3 return [2]+[x for x in s if x] ps = prime_sieve(1000000) def get_primes_upto(n): i = 0 while ps[i] <= n: i += 1 return ps[0:i+1]; def trial_division(n): if n == 1: return [1] primes = get_primes_upto(int(n**0.5) + 1) prime_factors = [] for p in primes: if p*p > n: break while n % p == 0: prime_factors.append(p) n //= p if n > 1: prime_factors.append(n) return prime_factors def unique_factors(n): return len(set(trial_division(n))) fs = [0] c = 0 for i in range(1,1000000): c+= 1 fs.append(unique_factors(i)) if len(fs) > 4: if fs[-4:] == [4,4,4,4]: print c -3 break
31
44d87f112ab60a202e4c8d64d7aec6f4f0d10578
# coding: utf-8 import os import factory import datetime from journalmanager import models from django.contrib.auth.models import Group from django.core.files.base import File _HERE = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216.xml')) as xml_file: SAMPLE_XML = xml_file.read() SAMPLE_TIFF_IMAGE = open( os.path.join(_HERE, 'image_test', 'sample_tif_image.tif')) with open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216_related.xml')) as xml_file: SAMPLE_XML_RELATED = xml_file.read() class UserFactory(factory.Factory): FACTORY_FOR = models.User @classmethod def _setup_next_sequence(cls): try: return cls._associated_class.objects.values_list( 'id', flat=True).order_by('-id')[0] + 1 except IndexError: return 0 username = factory.Sequence(lambda n: "jmanager_username%s" % n) first_name = factory.Sequence(lambda n: "jmanager_first_name%s" % n) last_name = factory.Sequence(lambda n: "jmanager_last_name%s" % n) email = factory.Sequence(lambda n: "jmanager_email%s@example.com" % n) password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3' is_staff = False is_active = True is_superuser = False last_login = datetime.datetime(2000, 1, 1) date_joined = datetime.datetime(1999, 1, 1) class GroupFactory(factory.Factory): FACTORY_FOR = Group name = factory.Sequence(lambda n: "Group #%s" % n) class SubjectCategoryFactory(factory.Factory): FACTORY_FOR = models.SubjectCategory term = 'Acoustics' class StudyAreaFactory(factory.Factory): FACTORY_FOR = models.StudyArea study_area = 'Health Sciences' class SponsorFactory(factory.Factory): FACTORY_FOR = models.Sponsor name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo' address = u'Av. Professor Lineu Prestes, 338 Cidade Universitária \ Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047' email = 'fapesp@scielo.org' complement = '' class UseLicenseFactory(factory.Factory): FACTORY_FOR = models.UseLicense license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n) reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt' disclaimer = u'<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/"><img alt="Licença Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png" /></a><br />Este trabalho foi licenciado com uma Licença <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.' class CollectionFactory(factory.Factory): FACTORY_FOR = models.Collection url = u'http://www.scielo.br/' name = factory.Sequence(lambda n: 'scielo%s' % n) address_number = u'430' country = u'Brasil' address = u'Rua Machado Bittencourt' email = u'fapesp@scielo.org' name_slug = factory.Sequence(lambda n: 'scl%s' % n) class JournalFactory(factory.Factory): FACTORY_FOR = models.Journal ctrl_vocabulary = u'decs' frequency = u'Q' scielo_issn = u'print' print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n)) eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n)) init_vol = u'1' title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)' title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)' short_title = u'ABCD.(São Paulo)' editorial_standard = u'vancouv' secs_code = u'6633' init_year = u'1986' acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n)) pub_level = u'CT' init_num = u'1', subject_descriptors = u""" MEDICINA CIRURGIA GASTROENTEROLOGIA GASTROENTEROLOGIA""".strip() publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva' publisher_country = u'BR' publisher_state = u'SP' publication_city = u'São Paulo' editor_address = u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741' editor_email = u'cbcd@cbcd.org.br' creator = factory.SubFactory(UserFactory) use_license = factory.SubFactory(UseLicenseFactory) class SectionFactory(factory.Factory): FACTORY_FOR = models.Section code = factory.Sequence(lambda n: 'BJCE%s' % n) journal = factory.SubFactory(JournalFactory) class LanguageFactory(factory.Factory): FACTORY_FOR = models.Language iso_code = 'pt' name = 'portuguese' class IssueTitleFactory(factory.Factory): """ ``issue`` must be provided """ FACTORY_FOR = models.IssueTitle language = factory.SubFactory(LanguageFactory) title = u'Bla' class IssueFactory(factory.Factory): FACTORY_FOR = models.Issue total_documents = 16 number = factory.Sequence(lambda n: '%s' % n) volume = factory.Sequence(lambda n: '%s' % n) is_trashed = False publication_start_month = 9 publication_end_month = 11 publication_year = 2012 is_marked_up = False suppl_text = '1' journal = factory.SubFactory(JournalFactory) @classmethod def _prepare(cls, create, **kwargs): section = SectionFactory() issue = super(IssueFactory, cls)._prepare(create, **kwargs) issue.section.add(section) return issue class UserProfileFactory(factory.Factory): FACTORY_FOR = models.UserProfile user = factory.SubFactory(UserFactory) email_notifications = True class SectionTitleFactory(factory.Factory): FACTORY_FOR = models.SectionTitle title = u'Artigos Originais' language = factory.SubFactory(LanguageFactory) section = factory.SubFactory(SectionFactory) class RegularPressReleaseFactory(factory.Factory): FACTORY_FOR = models.RegularPressRelease issue = factory.SubFactory(IssueFactory) doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n) class AheadPressReleaseFactory(factory.Factory): FACTORY_FOR = models.AheadPressRelease journal = factory.SubFactory(JournalFactory) doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n) class PressReleaseTranslationFactory(factory.Factory): FACTORY_FOR = models.PressReleaseTranslation language = factory.SubFactory(LanguageFactory) press_release = factory.SubFactory(RegularPressReleaseFactory) title = u'Yeah, this issue is amazing!' content = u'Want to read more about...' class PressReleaseArticleFactory(factory.Factory): FACTORY_FOR = models.PressReleaseArticle press_release = factory.SubFactory(RegularPressReleaseFactory) article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n) class ArticleFactory(factory.Factory): FACTORY_FOR = models.Article xml = SAMPLE_XML is_aop = False domain_key = factory.Sequence( lambda n: 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n) journal_title = u'Revista de Saúde Pública' issn_ppub = u'0034-8910' issn_epub = u'1518-8787' xml_version = u'sps-1.2' article_type = u'research-article' doi = u'10.1590/S0034-8910.2014048004965' class ArticleAssetFactory(factory.Factory): FACTORY_FOR = models.ArticleAsset article = factory.SubFactory(ArticleFactory) file = File(SAMPLE_TIFF_IMAGE) owner = u'SciELO' use_license = u'Creative Commons - BY'
32
81dfdf0479fc1f136fa5153840d8c7015f9db676
# required !!! # pip install selenium # pip install webdriver-manager from theMachine import loops # fill the number and message # you can fill the number with array phoneNumber = "fill the number" message = "fill with ur message" loop = 1 # this how many u want to loop loops(loop, phoneNumber, message) # input how many u want to loop
33
24de4f486d4e976850e94a003f8d9cbe3e518402
a= input("Enter number") a= a.split() b=[] for x in a: b.append(int(x)) print(b) l=len(b) c=0 s=0 for i in range(l): s=len(b[:i]) for j in range(s): if b[s]<b[j]: c=b[s] b.pop(s) b.insert(b.index(b[j]),c) print(b,b[:i],b[s])
34
0ecd2a298203365b20b2369a99c3c1d7c0646f19
# coding: utf-8 #ack program with the ackermann_function """ ackermann_function """ def ack(m,n): #n+1 if m = 0 if m is 0: return n + 1 #A(m−1, 1) if m > 0 and n = 0 if m > 0 and n is 0: return ack(m-1, 1) #A(m−1, A(m, n−1)) if m > 0 and n > 0 if m > 0 and n > 0: return ack(m-1, ack(m, n - 1)) if __name__ == "__main__": expected = [[1,2,3,4,5], [2,3,4,5,6], [3,5,7,9,11], [5,13,29,61,125]] ok = True for m in range(4): for n in range(5): actual = ack(m,n) if not actual == expected[m][n]: print "error" ok = False if ok: print "All tests pass"
35
a98be930058269a6adbc9a28d1c0ad5d9abba136
import sys import time import pymorphy2 import pyglet import pyttsx3 import threading import warnings import pytils warnings.filterwarnings("ignore") """ Количество раундов, вдохов в раунде, задержка дыхания на вдохе""" rounds, breaths, hold = 4, 30, 13 def play_wav(src): wav = pyglet.media.load(sys.path[0] + '\\src\\wav\\' + src + '.wav') wav.play() time.sleep(wav.duration) def play_wav_inline(src): wav = pyglet.media.load(sys.path[0] + '\\src\\wav\\' + src + '.wav') wav.play() def correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()): new_phrase = [] py_gen = 1 phrase = phrase.split(' ') while phrase: word = phrase.pop(-1) if 'NUMB' in morph.parse(word)[0].tag: new_phrase.append(pytils.numeral.sum_string(int(word), py_gen)) else: new_phrase.append(word) py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0].tag else pytils.numeral.MALE return ' '.join(new_phrase[::-1]) def nums(phrase, morph=pymorphy2.MorphAnalyzer()): """ согласование существительных с числительными, стоящими перед ними """ phrase = phrase.replace(' ', ' ').replace(',', ' ,') numeral = '' new_phrase = [] for word in phrase.split(' '): if 'NUMB' in morph.parse(word)[0].tag: numeral = word if numeral: word = str(morph.parse(word)[0].make_agree_with_number(abs(int(numeral))).word) new_phrase.append(word) return ' '.join(new_phrase).replace(' ,', ',') def speak(what): speech_voice = 3 # голосовой движок rate = 120 tts = pyttsx3.init() voices = tts.getProperty("voices") tts.setProperty('rate', rate) tts.setProperty("voice", voices[speech_voice].id) print('🔊', what) what = correct_numerals(what) tts.say(what) tts.runAndWait() # tts.stop() class Workout: def __init__(self, rounds=3, breaths=30, hold=15): self.rounds = rounds self.breaths = breaths self.hold = hold self.round_times = [] self.lock = threading.Lock() # взаимоблокировка отдельных голосовых потоков def __str__(self): return '\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold) def __hold_breath(self): start_time = time.time() input() seconds = int(time.time() - start_time) mins = seconds // 60 secs = seconds % 60 self.round_times.append('{:02}:{:02}'.format(mins, secs)) play_wav_inline('inhale') self.say('Глубокий вдох. ' + nums("{} минута {} секунда".format(mins, secs))) def __clock_tick(self): for i in range(self.hold): if i < hold - 3: time.sleep(1) else: play_wav('clock') play_wav_inline('gong2') def __breathe_round(self, round): self.say('Раунд ' + str(round)) for i in range(self.breaths): if i % 10 == 0: play_wav_inline('gong') play_wav('inhale') print(i + 1, end=' ') play_wav('exhale') print() self.say('Задерживаем дыхание на выдохе') self.__hold_breath() # self.say('Держим ' + nums(str(self.hold) + ' секунда')) self.__clock_tick() play_wav_inline('exhale') self.say('Выдох') time.sleep(1) def breathe(self): self.say('Выполняем ' + nums(str(self.rounds) + ' раунд')) self.say('Каждый раунд это ' + nums(str(self.breaths) + ' глубокий вдох - и спокойный выдох')) self.say('Приготовились...') for i in range(self.rounds): self.__breathe_round(i + 1) self.say('Восстанавливаем дыхание.') def statistics(self): print('=============') for i in range(len(self.round_times)): print('Раунд', i, self.round_times[i]) print('=============') def say(self, what): self.lock.acquire() thread = threading.Thread(target=speak, kwargs={'what': what}) thread.start() thread.join() self.lock.release() workout = Workout(rounds, breaths, hold) workout.breathe() workout.statistics()
36
4f0933c58aa1d41faf4f949d9684c04f9e01b473
from os.path import exists from_file = input('form_file') to_file = input('to_file') print(f"copying from {from_file} to {to_file}") indata = open(from_file).read()#这种方式读取文件后无需close print(f"the input file is {len(indata)} bytes long") print(f"does the output file exist? {exists(to_file)}") print("return to continue, CTRL-C to abort") input('?') open(to_file,'w').write(indata)#无需close print("done!")
37
5c81ddbc8f5a162949a100dbef1c69551d9e267a
# -*- coding: utf-8 -*- from django.test import TestCase from django.contrib.auth.models import User from ..models import Todo class MyTestCase(TestCase): def test_mark_done(self): user = User.objects.create_user(email='user@…', username='user', password='somepasswd') todo = Todo(title='SomeTitle', description='SomeDescr', owner=user) res = todo.mark_done(user) self.assertTrue(res) self.assertEqual(Todo.objects.count(), 1) def test_mark_done_already_done(self): user = User.objects.create_user(email='user@…', username='user', password='somepasswd') todo = Todo(title='SomeTitle', description='SomeDescr', is_done=True, done_by=user, owner=user) res = todo.mark_done(user) self.assertIsNone(res) # todo not saved because mark_done don't save already done todos self.assertEqual(Todo.objects.count(), 0)
38
509129052f97bb32b4ba0e71ecd7b1061d5f8da2
print (180 / 4)
39
2c90c4e0b42a75d6d387b9b2d0118d8e991b5a08
import math import decimal from typing import Union, List, Set from sqlalchemy import text from .model import BaseMixin from ..core.db import db Orders = List[Set(str, Union(str, int, decimal.Decimal))] class BaseDBMgr: def get_page(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), page:int=1, per_page:int=10)->dict: '''获取分页数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @param int page 页码 @param int per_page 每页数据数量 @return dict ''' res = { 'page': { 'current_page': page, 'per_page': per_page, 'total_page': 0, 'count': 0, }, 'items': [] } query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at==0) res['page']['count'] = query.count() res['page']['total_page'] = math.ceil(res['page']['count'] / per_page) for order in orders: field, sort = order sort = 'desc' if sort not in ['asc', 'desc'] else sort query = query.order_by(text(f'{field} {sort}')) data = query.offset((page-1)*per_page).limit(per_page) if not field: res['items'] = [item.to_dict() for item in data] else: res['items'] = [item.to_dict(only=field) for item in data] return res def get_all(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=(), limit:int=0)->list: '''获取所有满足条件的数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @param int limit 取数据最大数量 @return list ''' query = db.query(cls_) if filters: query = query.filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at==0) for order in orders: field, sort = order sort = 'desc' if sort not in ['asc', 'desc'] else sort query = query.order_by(text(f'{field} {sort}')) if limit != 0: query = query.limit(limit) query = query.all() if not field: items = [item.to_dict() for item in items] else: items = [item.to_dict(only=field) for item in items] return items def get_first(self, cls_:BaseMixin, filters:set, orders:Orders=list(), field:tuple=())->dict: '''获取所有满足条件的第一条数据 @param BaseMixin cls 数据库模型实体类 @param set filters 查询条件 @param str order 排序 @param tuple field 返回字段 @return dict ''' items = self.get_all(cls_, filters, orders, field, limit=1) return items[0] if items else None def add(self, cls_:BaseMixin, data:dict)->int: '''插入一条数据 @param BaseMixin cls 数据库模型实体类 @param dict data 数据 @return int 插入数据的主键 ''' item = cls_(**data) db.add(item) db.flush() return item.id def update(self, cls_:BaseMixin, data:dict, filters:set)->int: '''更新数据 @param BaseMixin cls 数据库模型实体类 @param dict data 数据 @param set filters 过滤条件 @return int 影响的行数 ''' query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at==0) return query.update(data, synchronize_session=False) def delete(self, cls_:BaseMixin, filters:set)->int: '''更新数据 @param BaseMixin cls 数据库模型实体类 @param set filters 过滤条件 @return int 影响的行数 ''' query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): items = query.filter(cls_.deleted_at==0).all() for item in items: item.delete() affect_rows = len(items) else: affect_rows = query.filter(*filters).delete(synchronize_session=False) db.commit() return affect_rows def count(self, cls_:BaseMixin, filters:set, field=None)->int: '''获取满足条件的总行数 @param BaseMixin cls 数据库模型实体类 @param set filters 过滤条件 @param string|None field 统计的字段 @return int ''' query = db.query(cls_).filter(*filters) if hasattr(cls_, 'deleted_at'): query = query.filter(cls_.deleted_at==0) if field is None: return query.count() else: return query.count(field)
40
cb2e800cc2802031847b170a462778e5c0b3c6f9
from math import * from numpy import * from random import * import numpy as np import matplotlib.pyplot as plt from colorama import Fore, Back, Style from gridworld import q_to_arrow N_ROWS = 6 N_COLUMNS = 10 class State(object): def __init__(self, i, j, is_cliff=False, is_goal=False): self.i = i self.j = j self.is_cliff = is_cliff self.is_goal = is_goal # north, east, south, west self.q_values = np.array([0.0, 0.0, 0.0, 0.0]) def __str__(self): return '({}, {})'.format(self.i, self.j) def is_terminal(self): return self.is_goal or self.is_cliff def get_max_q_index(self): best_q_values = np.argwhere(self.q_values == np.max(self.q_values)) if len(best_q_values) > 1: return best_q_values[randint(0, len(best_q_values) - 1)][0] else: _max_q = np.argmax(self.q_values) return _max_q def get_max_q_value(self): return np.max(self.q_values) def initialize_states(): # This is the set of states, all initialised with default values states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)] # make the cliff for j in range(1, N_COLUMNS - 1): states[-1][j].is_cliff = True states[-1][-1].is_goal = True return states # The reward function defines what reward I get for transitioning between the first and second state def reward(s_1, s_2): if (s_1.is_goal or s_1.is_cliff): return 0 elif (s_2.is_goal): return 10 elif (s_2.is_cliff): return -100 else: return -1 """ the transition function takes state and action and results in a new state, depending on their attributes. The method takes the whole state-space as an argument (since the transition depends on the attributes of the states in the state-space), which could for example be the "states" matrix from above, the current state s from the state-space (with its attributes), and the current action, which takes the form of a "difference vector. For example, dx = 0, dy = 1 means: Move to the south. dx = -1, dy = 0 means: Move to the left""" def transition(stsp, s, di, dj): if (s.is_cliff or s.is_goal): return s elif (s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS)): return s else: return stsp[s.i + di][s.j + dj] gamma = 1 learning_rate = 0.01 def action_to_diff_vector(action): if action == 0: # NORTH return -1, 0 elif action == 1: # EAST return 0, 1 elif action == 2: # SOUTH return 1, 0 elif action == 3: # WEST return 0, -1 def action_to_verbose(action): if action == 0: return 'NORTH' elif action == 1: return 'EAST' elif action == 2: return 'SOUTH' elif action == 3: return 'WEST' def sarsa(state, next_state, action, next_state_action): return reward(state, next_state), state.q_values[action] +\ learning_rate * (reward(state, next_state) + gamma * next_state.q_values[next_state_action] - state.q_values[action]) def q_learning(state, next_state, action, next_state_action): next_state_q_value = next_state.get_max_q_value() return reward(state, next_state), state.q_values[action] +\ learning_rate * (reward(state, next_state) + gamma * next_state_q_value - state.q_values[action]) N_STEPS = 10000 METHOD = 'BOTH' EPSILONS = [0.05, 0.1, 0.25] def run_code(use_q_learning=False, _epsilon=0.01): states = initialize_states() decay = 1 min_epsilon = 0.00001 epsilon = _epsilon episode_rewards = [] mistakes_array = [] # array which tracks error from convergence on each step for i in range(N_STEPS): # select a random starting state current_state = states[N_ROWS-1][0] # iterate until reaching a terminal state epsilon = max(min_epsilon, epsilon * decay) episode_reward = 0 while not current_state.is_terminal(): if random() < epsilon: next_action = randint(0, 3) else: next_action = current_state.get_max_q_index() di, dj = action_to_diff_vector(next_action) next_state = transition(states, current_state, di, dj) if random() < epsilon: next_state_action = randint(0, 3) else: next_state_action = next_state.get_max_q_index() if use_q_learning: reward, current_state.q_values[next_action] = q_learning(current_state, next_state, next_action, next_state_action) else: reward, current_state.q_values[next_action] = sarsa(current_state, next_state, next_action, next_state_action) # print(current_state, next_state, action_to_verbose(next_action), di, dj) episode_reward += reward current_state = next_state if len(episode_rewards): episode_rewards.append(episode_rewards[-1] + episode_reward) else: episode_rewards.append(episode_reward) ''' if (i % 100 == 0): print(i) ''' mistakes_array.append(check_accuracy(states)) return np.array(mistakes_array), states, episode_rewards def check_accuracy(states): correct_result = np.array([ [-3, -2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 ], [-2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ], [-1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ], [0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ], [1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ], [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ], ]) mistakes_delta = 0 for i in range(N_ROWS): for j in range(N_COLUMNS): mistakes_delta += abs(correct_result[i][j] - max(states[i][j].q_values)) return mistakes_delta def plot_errors(mistakes_sarsa, mistakes_q_learning): plt.gca().invert_yaxis() legend = [] for mistake_sarsa in mistakes_sarsa: plt.plot(mistake_sarsa[1]) legend.append(r'SARSA $\epsilon={}$'.format(mistake_sarsa[0])) for mistake_q_learning in mistakes_q_learning: plt.plot(mistake_q_learning[1]) legend.append(r'Q-learning $\epsilon={}$'.format(mistake_q_learning[0])) plt.grid(which='y') plt.legend(legend) plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS)) # plt.show() def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax): final_grid = np.array([[max(states[i][j].q_values) for j in range(N_COLUMNS)] for i in range(N_ROWS)]) if PLOTS > 2: ax = ax[PLOTS % 3, 1] else: ax = ax[PLOTS, 0] ax.imshow(final_grid, aspect='auto', cmap='coolwarm') # fig, ax = plt.subplots() ax.set_xticks(np.arange(N_COLUMNS)) ax.set_yticks(np.arange(N_ROWS)) ax.set_xticklabels([i for i in range(N_COLUMNS)]) ax.set_yticklabels([i for i in range(N_ROWS)]) plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. for i in range(N_ROWS): for j in range(N_COLUMNS): text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)), ha="center", va="center", color="w") fig.tight_layout() ax.set_title("{}; $\epsilon={}$".format(method, epsilon)) for i in range(N_ROWS): str_ = "" for j in range(N_COLUMNS): str_ += str(int(final_grid[i][j])) + ", " PLOTS += 1 # plt.savefig('CLIFF_WALKING: {}-{}-{}.png'.format(N_STEPS, epsilon, method)) # plt.show() def display_optimal_policy(states, method, epsilon): print("{}; ε = {}".format(method, epsilon)) print('-' * 60) for i in range(len(states)): line_str = '' for j in range(len(states[0])): if j == 0: print('|', end='') if states[i][j].is_goal: print(Back.GREEN + ' ', end='') print(Style.RESET_ALL + ' | ', end='') elif states[i][j].is_cliff: print(Back.RED + ' ', end='') print(Style.RESET_ALL + ' | ', end='') else: print(' {} | '.format(q_to_arrow(states[i][j].get_max_q_index())), end='') print(line_str) print('-' * 60) if METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']: print('invalidt method. must be Q_LEARNING or SARSA or both') import sys; sys.exit() mistakes_q_learning = [] mistakes_sarsa = [] PLOTS = 0 fig, axes = plt.subplots(3, 2) rewards = [] for epsilon in EPSILONS: if METHOD == 'Q_LEARNING' or METHOD == 'BOTH': _mistakes_q_learning, end_states_q_learning, episode_rewards = run_code(use_q_learning=True, _epsilon=epsilon) plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING', epsilon, PLOTS, fig, axes) display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon) mistakes_q_learning.append((epsilon, _mistakes_q_learning)) rewards.append(('Q_LEARNING', epsilon, episode_rewards)) PLOTS += 1 for epsilon in EPSILONS: if METHOD == 'SARSA' or METHOD == 'BOTH': _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(use_q_learning=False, _epsilon=epsilon) plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS, fig, axes) display_optimal_policy(end_states_sarsa, 'SARSA', epsilon) mistakes_sarsa.append((epsilon, _mistakes_sarsa)) rewards.append(('SARSA', epsilon, episode_rewards)) PLOTS += 1 plt.savefig('all_runs.png') plt.show() # for i, j in [(0, 3), (1, 4), (2, 5)]: for reward in rewards: # plt.plot(rewards[i][2], 'o', label='{} ε = {} '.format(rewards[i][0], rewards[i][1])) # plt.plot(rewards[j][2], 'o', label='{} ε = {} '.format(rewards[j][0], rewards[j][1])) plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1])) plt.xlabel('Episodes') plt.ylabel('Sum of rewards during episode') plt.legend() plt.show() plt.savefig('episode_rewards.png') plot_errors(mistakes_sarsa, mistakes_q_learning)
41
52da8608e43b2d8dfe00f0956a1187fcf2e7b1ff
# Generated by Django 2.2.6 on 2020-05-21 09:44 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('DHOPD', '0015_auto_20200515_0126'), ] operations = [ migrations.CreateModel( name='Patient_c', fields=[ ('patient_id', models.AutoField(max_length=200, primary_key=True, serialize=False)), ('patient_fname', models.CharField(max_length=200)), ('patient_mname', models.CharField(max_length=200)), ('patient_lname', models.CharField(max_length=200)), ('patient_title', models.CharField(max_length=20)), ('patient_address', models.CharField(max_length=500)), ('patient_town', models.CharField(max_length=200)), ('patient_phone', models.CharField(max_length=15)), ('patient_services', models.CharField(max_length=500)), ('patient_status', models.CharField(max_length=2)), ('patient_cost', models.CharField(max_length=100)), ('patient_date', models.DateField(default=datetime.date.today)), ('patient_time', models.TimeField(auto_now_add=True)), ('patient_comment', models.CharField(max_length=200)), ], ), migrations.CreateModel( name='Receipt_c', fields=[ ('receipt_id', models.AutoField(max_length=200, primary_key=True, serialize=False)), ('receipt_patient', models.CharField(max_length=200)), ('receipt_cost', models.CharField(max_length=200)), ('receipt_time', models.TimeField(auto_now=True)), ('receipt_status', models.CharField(default='-1', max_length=10)), ], ), ]
42
1084478226777b9259274e053984ac34d461198d
from .ast import * # noinspection PyPep8Naming def addToClass(cls): def decorator(func): setattr(cls, func.__name__, func) return func return decorator def print_intended(to_print, intend): print(intend * "| " + to_print) # noinspection PyPep8Naming,PyUnresolvedReferences class TreePrinter: # General @addToClass(Node) def printTree(self, indent=0): raise Exception("printTree not defined in class " + self.__class__.__name__) @addToClass(Instruction) def printTree(self, indent=0): print_intended(self.type, indent) @addToClass(Expression) def printTree(self, indent=0): print_intended(self.type, indent) # Instructions @addToClass(Block) def printTree(self, indent=0): print_intended(self.type, indent) if self.instructions is not None: self.instructions.printTree(indent + 1) @addToClass(Assignment) def printTree(self, indent=0): print_intended(self.operator, indent) self.left.printTree(indent + 1) self.right.printTree(indent + 1) @addToClass(For) def printTree(self, indent=0): print_intended(self.type, indent) self.variable.printTree(indent + 1) self.range.printTree(indent + 1) self.instruction.printTree(indent + 1) @addToClass(While) def printTree(self, indent=0): print_intended(self.type, indent) self.condition.printTree(indent + 1) self.instruction.printTree(indent + 1) @addToClass(If) def printTree(self, indent=0): print_intended(self.type, indent) self.condition.printTree(indent + 1) print_intended('then', indent) self.if_block.printTree(indent + 1) if self.else_block is not None: print_intended('else', indent) self.else_block.printTree(indent + 1) @addToClass(Print) def printTree(self, indent=0): print_intended(self.type, indent) self.args.printTree(indent + 1) @addToClass(Return) def printTree(self, indent=0): print_intended(self.type, indent) if self.args is not None: self.args.printTree(indent + 1) @addToClass(ArrayElement) def printTree(self, indent=0): print_intended("get_element", indent) self.array.printTree(indent + 1) self.ids.printTree(indent + 1) # Expressions @addToClass(Value) def printTree(self, indent=0): print_intended(str(self.value), indent) @addToClass(Array) def printTree(self, indent=0): if self.list is not None: print_intended('array', indent) self.list.printTree(indent + 1) else: print_intended('empty_array', indent) @addToClass(BinaryExpression) def printTree(self, indent=0): print_intended(self.operator, indent) self.left.printTree(indent + 1) self.right.printTree(indent + 1) @addToClass(MatrixFunction) def printTree(self, indent=0): print_intended(self.function, indent) self.parameter.printTree(indent + 1) @addToClass(UnaryMinus) def printTree(self, indent=0): print_intended('-', indent) self.value.printTree(indent + 1) @addToClass(Transpose) def printTree(self, indent=0): print_intended(self.type, indent) self.value.printTree(indent + 1) # Other @addToClass(Program) def printTree(self, indent=0): print_intended(self.type, indent) self.instructions_opt.printTree(indent + 1) @addToClass(Identifier) def printTree(self, indent=0): print_intended(self.name, indent) @addToClass(Range) def printTree(self, indent=0): print_intended(self.type, indent) self.start_value.printTree(indent + 1) self.end_value.printTree(indent + 1) @addToClass(List) def printTree(self, indent=0): for element in self.elements: element.printTree(indent)
43
999de0965efa3c1fe021142a105dcf28184cd5ba
import dnf_converter def parse(query): print("parsing the query...") query = dnf_converter.convert(query) cp_clause_list = [] clause_list = [] for cp in query["$or"]: clauses = [] if "$and" in cp: for clause in cp["$and"]: clauses.append(clause) clause_list.append(clause) else: clause = cp clauses.append(clause) clause_list.append(clause) cp_clause_list.append({ "cp": cp, "clauses": clauses }) return cp_clause_list, clause_list
44
cb08f64d1ad7e53f1041684d4ca4ef65036c138d
import json import re from bs4 import BeautifulSoup from bs4.element import NavigableString, Tag from common import dir_path def is_element(el, tag): return isinstance(el, Tag) and el.name == tag class ElemIterator(): def __init__(self, els): self.els = els self.i = 0 def peek(self): try: return self.els[self.i] except IndexError: return None def __next__(self): self.i += 1 return self.els[self.i - 1] def hasNext(self): return len(self.els) > (self.i) def peek_till(self, tag): while not is_element(self.peek(), tag): self.__next__() def next_till(self, tag): self.peek_till(tag) self.__next__() def parse_lines(iter_): iter_.peek_till('strong') county = [] while iter_.hasNext(): county += [iter_.__next__()] if is_element(iter_.peek(), 'strong'): yield ElemIterator(county) county = [] yield ElemIterator(county) county = [] def parse_emails_url(iter_): emails = [] url = None try: while True: iter_.peek_till('a') email = iter_.__next__() href = email['href'] if href.startswith('mailto:'): if href[7:]: emails += [href[7:]] else: emails += [email.text] else: url = href except IndexError: pass return emails, url def parse_url(iter_): iter_.peek_till('a') link = iter_.__next__() href = link['href'] assert not href.startswith('mailto:') return [href] def parse_county(iter_): county_title = iter_.__next__().text.strip().title() locale = re.match('(.*) (City|County)', county_title).group(0) if county_title.startswith('Clark County Elections Mailing Address'): emails, url = parse_emails_url(iter_) return { 'locale': locale, 'county': locale, 'emails': emails, } while True: el = iter_.__next__() if isinstance(el, NavigableString): if 'Clerk' in el or 'Registrar' in el: official = el.strip().split(',')[0] break address = [] while True: el = iter_.__next__() if isinstance(el, NavigableString): address += [el.strip()] if re.search(r'Nevada \d{5}', el) or re.search(r'NV \d{5}', el): break el = iter_.__next__() el = iter_.__next__() if isinstance(el, NavigableString): el = el.replace(u'\xa0', ' ') # replace non-breaking space matches1 = re.search(r'(\(\d{3}\) \d{3}-\d{4}) FAX (\(\d{3}\) \d{3}-\d{4})', el) matches2 = re.search(r'(\(\d{3}\) \d{3}-VOTE \(\d{4}\)) FAX (\(\d{3}\) \d{3}-\d{4})', el) if matches1: phone = matches1.group(1) fax = matches1.group(2) elif matches2: phone = matches2.group(1) fax = matches2.group(2) else: print(county_title) print(el) print(re.search(r'(\(\d{3}\) \d{3}-\d{4}) FAX', el)) assert False emails, url = parse_emails_url(iter_) init = {'city': locale} if locale.endswith('City') else {'county': locale} return { **init, 'locale': locale, 'official': official, 'address': ', '.join(address), 'emails': list(set(emails)), 'phones': [phone], 'faxes': [fax], 'url': url, } def main(): # Actually this file: https://www.nvsos.gov/sos/elections/voters/county-clerk-contact-information # But it's behind a javascript test with open(dir_path(__file__) + '/cache/Nevada.htm') as fh: page = fh.read() soup = BeautifulSoup(page, 'lxml') ps = soup.select('div.content_area > p') iter_ = ElemIterator([x for p in ps for x in p.children]) raw_counties = [parse_county(county) for county in parse_lines(iter_)] merge_counties = {} for county in raw_counties: locale = county['locale'] if locale in merge_counties: merge_counties[locale]['emails'] += county['emails'] else: merge_counties[locale] = county counties = list(merge_counties.values()) assert len(counties) == len(raw_counties) - 1 with open('public/nevada.json', 'w') as fh: json.dump(counties, fh) if __name__ == '__main__': main()
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
5