blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
45297b843b717fd571b9a542906a15e1a9b43bb3 | 8b3ca44ee3d990233e74655b7131d616094f70c2 | /experiments/sparsity/methylation_gm/gaussian_truncatednormal_hierarchical.py | 427b535bd7e7c6d03bb77c738acc2c5ee7ee563c | [] | no_license | zshwuhan/BMF_Priors | 8b8c54271285a72d2085a56a9475c0756f375e67 | 6a600da1c41f1ccde2f2ba99298b40e68fb9910a | refs/heads/master | 2021-05-13T19:10:07.203215 | 2017-12-01T13:30:21 | 2017-12-01T13:30:21 | 116,883,181 | 1 | 0 | null | 2018-01-09T23:36:13 | 2018-01-09T23:36:13 | null | UTF-8 | Python | false | false | 1,480 | py | '''
Measure sparsity experiment on the methylation GM dataset, with
the Gaussian + Truncated Normal + hierarchical model.
'''
project_location = "/Users/thomasbrouwer/Documents/Projects/libraries/"
import sys
sys.path.append(project_location)
from BMF_Priors.code.models.bmf_gaussian_truncatednormal_hierarchical import BMF_Gaussian_TruncatedNormal_Hierarchical
from BMF_Priors.data.methylation.load_data import load_gene_body_methylation_integer
from BMF_Priors.experiments.sparsity.sparsity_experiment import sparsity_experiment
import matplotlib.pyplot as plt
''' Run the experiment. '''
R, M = load_gene_body_methylation_integer()
model_class = BMF_Gaussian_TruncatedNormal_Hierarchical
n_repeats = 10
stratify_rows = False
fractions_unknown = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99]
settings = {
'R': R,
'M': M,
'K': 5,
'hyperparameters': { 'alpha':1., 'beta':1., 'mu_mu':0., 'tau_mu':0.1, 'a':1., 'b':1. },
'init': 'random',
'iterations': 250,
'burn_in': 200,
'thinning': 2,
}
fout = './results/performances_gaussian_truncatednormal_hierarchical.txt'
average_performances, all_performances = sparsity_experiment(
n_repeats=n_repeats, fractions_unknown=fractions_unknown, stratify_rows=stratify_rows,
model_class=model_class, settings=settings, fout=fout)
''' Plot the performance. '''
plt.figure()
plt.title("Sparsity performances")
plt.plot(fractions_unknown, average_performances['MSE'])
plt.ylim(0,10) | [
"tab43@cam.ac.uk"
] | tab43@cam.ac.uk |
828bf78e07e40ade4e22b0de6413ae510a8921c7 | f3ed1e9ad23dc48a23a58d724471622edd6741f0 | /test.py | 7d935a37b08951b90b286af25621543df3d4e883 | [] | no_license | asansyzb/Latinize-Kazakh-to-Russian | 73cc34fdac2a0b41ead28f68481fab204be5ad63 | 32f64d883c4a0dbcae532b6925a22bca08791b77 | refs/heads/master | 2021-09-09T17:27:01.783639 | 2018-03-18T13:56:54 | 2018-03-18T13:56:54 | 125,730,210 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | global prssmall, prsbig
prssmall = dict([('а', "a"), ('ә', "a'"), ('б', 'b'), ('д', 'd'), ('е', 'e'), ('ф', 'f'), ('г', 'g'), ('ғ', "g'"),
('х', 'h'), ('һ', 'h'), ('і', "i"), ('и', "i'"), ('й', "i'"), ('ж', 'j'), ('к', 'k'), ('л', 'l'),
('м', 'm'), ('н', 'n'), ('ң', "n'"), ('о', 'o'), ('ө', "o'"), ('п', 'p'), ('р', 'r'), ('с', 's'),
('ш', "s'"), ('ч', "c'"), ('т', 't'), ('ұ', 'u'), ('ү', "u'"), ('в', 'v'), ('ы', 'y'), ('у', "y'"),
('з', 'z'), ('қ', 'q')
])
prsbig = dict([('А', "A"), ('Ә', "A'"), ('Б', 'B'), ('Д', 'D'), ('Е', 'E'), ('Ф', 'F'), ('Г', 'G'), ('Ғ', "G'"),
('Х', 'H'), ('Һ', 'H'), ('І', "I"), ('И', "I'"), ('Й', "I'"), ('Ж', 'J'), ('К', 'K'), ('Л', 'L'),
('М', 'M'), ('Н', 'N'), ('Ң', "N'"), ('О', 'O'), ('Ө', "O'"), ('П', 'P'), ('Р', 'R'), ('С', 'S'),
('Ш', "S'"), ('Ч', "C'"), ('Т', 'T'), ('Ұ', 'U'), ('Ү', "U'"), ('В', 'V'), ('Ы', 'Y'), ('У', "Y'"),
('З', 'Z'), ('Қ', 'Q')
])
def latinize(msg):
global prsbig, prssmall
nmsg = ""
for ch in msg:
if ch in prssmall:
nmsg += prssmall[ch]
elif ch in prsbig:
nmsg += prsbig[ch]
else:
nmsg += ch
return nmsg
def main() :
print(latinize('е там өмір қалай бауырым, не жаңалық, Әлібөғңқ'))
if __name__ == '__main__':
main()
| [
"sansyzbayev.alibek@gmail.com"
] | sansyzbayev.alibek@gmail.com |
4e6151e08844e9f992a2200fb36b50f34d975023 | 9bff711e7e28f5c3b59d3faa9c0f851719dff4e6 | /blair/__init__.py | cfe408230df1f0ee7935709afce62772cba2fc14 | [] | no_license | crzeller11/Debiasing-Word-Embeddings | e61c46adc7440927030d8436ea9568710423d03a | 70de33a89be147a0091f6fb75c7e688f2db99564 | refs/heads/master | 2020-03-28T16:28:55.082077 | 2019-03-29T17:57:13 | 2019-03-29T17:57:13 | 148,699,831 | 0 | 1 | null | 2019-03-29T17:57:14 | 2018-09-13T21:23:45 | Python | UTF-8 | Python | false | false | 107 | py | from .evaluate import read_dataset_directory, score_embedding
from .src.embeddings import WrappedEmbedding
| [
"justinnhli@gmail.com"
] | justinnhli@gmail.com |
250692130016bd8b68bba47f883404dbe047de02 | c7b1d4037804c809687b6bd839c45f7da0ccaac8 | /proplot/config.py | c218b25feb1503dfe66b98466efde282dd829fa1 | [
"MIT"
] | permissive | gepcel/proplot | af03d4302d6a8cbaf88bc2032368d8240c7d19d3 | afeb7da4cd52f83e34bf3e2f8e781efa1dd2b763 | refs/heads/master | 2022-11-16T00:26:52.142575 | 2020-07-10T03:38:36 | 2020-07-10T03:38:36 | 278,497,577 | 0 | 0 | MIT | 2020-07-10T00:09:13 | 2020-07-10T00:09:12 | null | UTF-8 | Python | false | false | 62,496 | py | #!/usr/bin/env python3
"""
Tools for setting up ProPlot and configuring global settings.
See the :ref:`configuration guide <ug_config>` for details.
"""
# NOTE: The matplotlib analogue to this file is actually __init__.py
# but it makes more sense to have all the setup actions in a separate file
# so the namespace of the top-level module is unpolluted.
# NOTE: Why also load colormaps and cycles in this file and not colors.py?
# Because I think it makes sense to have all the code that "runs" (i.e. not
# just definitions) in the same place, and I was having issues with circular
# dependencies and where import order of __init__.py was affecting behavior.
import logging
import os
import re
from collections import namedtuple
import cycler
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as mcolors
import matplotlib.font_manager as mfonts
import matplotlib.mathtext # noqa
import matplotlib.rcsetup as msetup
import matplotlib.style.core as mstyle
import numpy as np
from . import colors as pcolors
from .internals import ic # noqa: F401
from .internals import _not_none, docstring, rcsetup, timers, warnings
from .utils import to_xyz, units
try:
from IPython import get_ipython
except ImportError:
def get_ipython():
return
__all__ = [
'rc', 'RcConfigurator',
'register_cmaps', 'register_cycles', 'register_colors', 'register_fonts',
'config_inline_backend', 'use_style',
'inline_backend_fmt', 'rc_configurator', # deprecated
]
logger = logging.getLogger('matplotlib.mathtext')
logger.setLevel(logging.ERROR) # suppress warnings!
# Dictionaries used to track custom proplot settings
rc_proplot = rcsetup._rc_proplot_default.copy()
rc_matplotlib = mpl.rcParams # PEP8 4 lyfe
RcParams = mpl.RcParams # the special class
_RcContext = namedtuple('RcContext', ('mode', 'kwargs', 'rc_new', 'rc_old'))
# Misc constants
# TODO: Use explicit validators for specific settings like matplotlib.
REGEX_STRING = re.compile('\\A(\'.*\'|".*")\\Z')
REGEX_POINTS = re.compile(
r'\A(?!colorbar|subplots|pdf|ps).*(width|space|size|pad|len)\Z'
)
ALWAYS_ADD = (
*( # common fancy names or natural names
'charcoal', 'tomato', 'burgundy', 'maroon', 'burgundy', 'lavendar',
'taupe', 'ocre', 'sand', 'stone', 'earth', 'sand brown', 'sienna',
'terracotta', 'moss', 'crimson', 'mauve', 'rose', 'teal', 'forest',
'grass', 'sage', 'pine', 'vermillion', 'russet', 'cerise', 'avocado',
'wine', 'brick', 'umber', 'mahogany', 'puce', 'grape', 'blurple',
'cranberry', 'sand', 'aqua', 'jade', 'coral', 'olive', 'magenta',
'turquoise', 'sea blue', 'royal blue', 'slate blue', 'slate grey',
'baby blue', 'salmon', 'beige', 'peach', 'mustard', 'lime', 'indigo',
'cornflower', 'marine', 'cloudy blue', 'tangerine', 'scarlet', 'navy',
'cool grey', 'warm grey', 'chocolate', 'raspberry', 'denim',
'gunmetal', 'midnight', 'chartreuse', 'ivory', 'khaki', 'plum',
'silver', 'tan', 'wheat', 'buff', 'bisque', 'cerulean',
),
*( # common combos
'red orange', 'yellow orange', 'yellow green',
'blue green', 'blue violet', 'red violet',
),
*( # common names
prefix + color
for color in (
'red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet',
'brown', 'grey'
)
for prefix in (
'', 'light ', 'dark ', 'medium ', 'pale ',
)
)
)
ALWAYS_REMOVE = ( # filter these out, let's try to be professional here...
'shit', 'poop', 'poo', 'pee', 'piss', 'puke', 'vomit', 'snot',
'booger', 'bile', 'diarrhea',
)
TRANSLATE_COLORS = ( # prevent registering similar-sounding names
('/', ' '),
("'s", ''),
('forrest', 'forest'), # typo?
('reddish', 'red'), # remove 'ish'
('purplish', 'purple'),
('bluish', 'blue'),
('ish ', ' '),
('grey', 'gray'),
('pinky', 'pink'),
('greeny', 'green'),
('bluey', 'blue'),
('purply', 'purple'),
('purpley', 'purple'),
('yellowy', 'yellow'),
('robin egg', 'robins egg'),
('egg blue', 'egg'),
('bluegray', 'blue gray'),
('grayblue', 'gray blue'),
('lightblue', 'light blue'),
('yellowgreen', 'yellow green'),
('yelloworange', 'yellow orange'),
)
OPEN_COLORS = {} # populated during register_colors
XKCD_COLORS = {} # populated during register_colors
BASE_COLORS = {
**mcolors.BASE_COLORS, # shorthand names like 'r', 'g', etc.
'blue': (0, 0, 1),
'green': (0, 0.5, 0),
'red': (1, 0, 0),
'cyan': (0, 0.75, 0.75),
'magenta': (0.75, 0, 0.75),
'yellow': (0.75, 0.75, 0),
'black': (0, 0, 0),
'white': (1, 1, 1),
}
_config_docstring = """
user : bool, optional
Whether to reload user {name}. Default is ``True``.
default : bool, optional
Whether to reload default proplot {name}. Default is ``False``.
"""
docstring.snippets['register_cmaps.params'] = _config_docstring.format(name='colormaps')
docstring.snippets['register_cycles.params'] = _config_docstring.format(name='cycles')
docstring.snippets['register_colors.params'] = _config_docstring.format(name='colors')
docstring.snippets['rc.params'] = """
local : bool, optional
Whether to reload ``.proplotrc`` settings in this directory and parent
directories. Default is ``True``.
user : bool, optional
Whether to reload ``~/.proplotrc`` user settings. Default is ``True``.
default : bool, optional
Whether to reload default proplot settings. Default is ``True``.
"""
docstring.snippets['register.ext_table'] = """
Valid file extensions are as follows:
================== =====================================================================================================================================================================================================================
Extension Description
================== =====================================================================================================================================================================================================================
``.hex`` List of HEX strings in any format (comma-separated, separate lines, with double quotes... anything goes).
``.xml`` XML files with ``<Point .../>`` tags specifying ``x``, ``r``, ``g``, ``b``, and (optionally) ``o`` parameters, where ``x`` is the coordinate and the rest are the red, blue, green, and opacity channel values.
``.rgb``, ``.txt`` 3-4 column table of red, blue, green, and (optionally) opacity channel values, delimited by commas or spaces. If values larger than 1 are detected, they are assumed to be on the 0-255 scale and are divided by 255.
================== =====================================================================================================================================================================================================================
""" # noqa: E501
def _get_data_paths(subfolder, user=True, default=True, reverse=False):
"""
Return data folder paths in reverse order of precedence.
"""
# When loading colormaps, cycles, and colors, files in the latter
# directories overwrite files in the former directories. When loading
# fonts, the resulting paths need to be *reversed*.
paths = []
if user:
paths.append(os.path.join(os.path.dirname(__file__), subfolder))
if default:
paths.append(os.path.join(os.path.expanduser('~'), '.proplot', subfolder))
if reverse:
paths = paths[::-1]
return paths
def _iter_data_paths(subfolder, **kwargs):
"""
Iterate over all files in the data paths. Also yield an index indicating
whether these are default ProPlot files or user files.
"""
for i, path in enumerate(_get_data_paths(subfolder, **kwargs)):
for dirname, dirnames, filenames in os.walk(path):
for filename in filenames:
if filename[0] == '.': # UNIX-style hidden files
continue
yield i, dirname, filename
class RcConfigurator(object):
"""
Magical abstract class for managing matplotlib's `builtin settings <rc_matplotlib>`_
and ProPlot's :ref:`added settings <rc_proplot>`.
When ProPlot is imported, this class is instantiated as the `rc` object
and the ProPlot default settings and ``.proplotrc`` user overrides
are applied. To modify these settings, use the `rc` object.
See the :ref:`configuration guide <ug_config>` for details.
"""
def __repr__(self):
rcdict = type('rc', (dict,), {})({ # encapsulate params in temporary class
key: value for key, value in rc_proplot.items()
if '.' not in key # show short names
})
string = type(rc_matplotlib).__repr__(rcdict)
return string.strip()[:-2] + ',\n ... <rcParams> ...\n })'
def __str__(self):
rcdict = type('rc', (dict,), {})({
key: value for key, value in rc_proplot.items()
if '.' not in key # show short names
})
string = type(rc_matplotlib).__str__(rcdict)
return string + '\n... <rcParams> ...'
def __iter__(self): # lets us build dict
"""
Iterate over keys and values of matplotlib and proplot settings.
"""
for key in sorted((*rc_proplot, *rc_matplotlib)):
yield key, self[key]
def __contains__(self, key):
"""
Test whether key exists as matplotlib or proplot setting.
"""
return key in rc_proplot or key in rc_matplotlib
@docstring.add_snippets
def __init__(self, local=True, user=True, default=True):
"""
Parameters
----------
%(rc.params)s
"""
self._context = []
self.reset(local=local, user=user, default=default)
def __enter__(self):
"""
Apply settings from the most recent context block.
"""
if not self._context:
raise RuntimeError(
'rc object must be initialized for context block '
'using rc.context().'
)
context = self._context[-1]
kwargs = context.kwargs
rc_new = context.rc_new # used for context-based _get_item
rc_old = context.rc_old # used to re-apply settings without copying whole dict
for key, value in kwargs.items():
kw_proplot, kw_matplotlib = self._get_synced_params(key, value)
for rc_dict, kw_new in zip(
(rc_proplot, rc_matplotlib),
(kw_proplot, kw_matplotlib),
):
for key, value in kw_new.items():
rc_old[key] = rc_dict[key]
rc_new[key] = rc_dict[key] = value
def __exit__(self, *args): # noqa: U100
"""
Restore settings from the most recent context block.
"""
if not self._context:
raise RuntimeError(
'rc object must be initialized for context block '
'using rc.context().'
)
context = self._context[-1]
for key, value in context.rc_old.items():
kw_proplot, kw_matplotlib = self._get_synced_params(key, value)
rc_proplot.update(kw_proplot)
rc_matplotlib.update(kw_matplotlib)
del self._context[-1]
def __delitem__(self, item): # noqa: 100
"""
Raise an error. This enforces pseudo-immutability.
"""
raise RuntimeError('rc settings cannot be deleted.')
def __delattr__(self, item): # noqa: 100
"""
Raise an error. This enforces pseudo-immutability.
"""
raise RuntimeError('rc settings cannot be deleted.')
def __getattr__(self, attr):
"""
Pass the attribute to `~RcConfigurator.__getitem__` and return
the result.
"""
if attr[:1] == '_':
return super().__getattribute__(attr)
else:
return self[attr]
def __getitem__(self, key):
"""
Return a `builtin matplotlib setting <rc_matplotlib>`_
or a ProPlot :ref:`added setting <rc_proplot>`.
"""
key = self._sanitize_key(key)
if key is None: # means key was *removed*, warnings was issued
return None
for kw in (rc_proplot, rc_matplotlib):
try:
return kw[key]
except KeyError:
continue
raise KeyError(f'Invalid setting name {key!r}.')
def __setattr__(self, attr, value):
"""
Pass the attribute and value to `~RcConfigurator.__setitem__`.
"""
if attr[:1] == '_':
super().__setattr__(attr, value)
else:
self.__setitem__(attr, value)
def __setitem__(self, key, value):
"""
Modify a `builtin matplotlib setting <rc_matplotlib>`_ or
a ProPlot :ref:`added setting <rc_proplot>`.
"""
kw_proplot, kw_matplotlib = self._get_synced_params(key, value)
rc_proplot.update(kw_proplot)
rc_matplotlib.update(kw_matplotlib)
def _get_context_mode(self):
"""
Return lowest (most permissive) context mode.
"""
return min((context.mode for context in self._context), default=0)
def _get_item(self, key, mode=None):
"""
As with `~RcConfigurator.__getitem__` but the search is limited
based on the context mode and ``None`` is returned if the key is not
found in the dictionaries.
"""
if mode is None:
mode = self._get_context_mode()
cache = tuple(context.rc_new for context in self._context)
if mode == 0:
rcdicts = (*cache, rc_proplot, rc_matplotlib)
elif mode == 1:
rcdicts = (*cache, rc_proplot) # custom only!
elif mode == 2:
rcdicts = (*cache,)
else:
raise KeyError(f'Invalid caching mode {mode!r}.')
for rcdict in rcdicts:
if not rcdict:
continue
try:
return rcdict[key]
except KeyError:
continue
if mode == 0:
raise KeyError(f'Invalid setting name {key!r}.')
else:
return
def _get_synced_params(self, key, value):
"""
Return dictionaries for updating the `rc_proplot`
and `rc_matplotlib` properties associated with this key.
"""
key = self._sanitize_key(key)
if key is None: # means setting was removed
return {}, {}, {}
keys = (key,) + rcsetup._rc_children.get(key, ()) # settings to change
value = self._sanitize_value(value)
kw_proplot = {} # custom properties that global setting applies to
kw_matplotlib = {} # builtin properties that global setting applies to
# Permit arbitary units for builtin matplotlib params
# See: https://matplotlib.org/users/customizing.html, props matching
# the below strings use the units 'points'.
# TODO: Incorporate into more sophisticated validation system
if any(REGEX_POINTS.match(_) for _ in keys):
try:
self._scale_font(value) # *validate* but do not translate
except KeyError:
value = units(value, 'pt')
# Special key: configure inline backend
if key == 'inlinefmt':
config_inline_backend(value)
# Special key: apply stylesheet
elif key == 'style':
if value is not None:
kw_matplotlib, kw_proplot = _get_style_dicts(value, infer=True)
# Cycler
elif key == 'cycle':
colors = _get_cycle_colors(value)
kw_matplotlib['patch.facecolor'] = 'C0'
kw_matplotlib['axes.prop_cycle'] = cycler.cycler('color', colors)
# Zero linewidth almost always means zero tick length
# TODO: Document this feature
elif key == 'linewidth' and value == 0:
ikw_proplot, ikw_matplotlib = self._get_synced_params('ticklen', 0)
kw_proplot.update(ikw_proplot)
kw_matplotlib.update(ikw_matplotlib)
# Tick length/major-minor tick length ratio
elif key in ('tick.len', 'tick.lenratio'):
if key == 'tick.len':
ticklen = value
ratio = rc_proplot['tick.lenratio']
else:
ticklen = rc_proplot['tick.len']
ratio = value
kw_matplotlib['xtick.minor.size'] = ticklen * ratio
kw_matplotlib['ytick.minor.size'] = ticklen * ratio
# Spine width/major-minor tick width ratio
elif key in ('linewidth', 'tick.ratio'):
if key == 'linewidth':
tickwidth = value
ratio = rc_proplot['tick.ratio']
else:
tickwidth = rc_proplot['linewidth']
ratio = value
kw_matplotlib['xtick.minor.width'] = tickwidth * ratio
kw_matplotlib['ytick.minor.width'] = tickwidth * ratio
# Gridline width
elif key in ('grid.linewidth', 'grid.ratio'):
if key == 'grid.linewidth':
gridwidth = value
ratio = rc_proplot['grid.ratio']
else:
gridwidth = rc_matplotlib['grid.linewidth']
ratio = value
kw_proplot['gridminor.linewidth'] = gridwidth * ratio
# Gridline toggling, complicated because of the clunky way this is
# implemented in matplotlib. There should be a gridminor setting!
elif key in ('grid', 'gridminor'):
b = value
ovalue = rc_matplotlib['axes.grid']
owhich = rc_matplotlib['axes.grid.which']
# Instruction is to turn off gridlines
if not value:
# Gridlines are already off, or they are on for the particular
# ones that we want to turn off. Instruct to turn both off.
if (
not ovalue
or key == 'grid' and owhich == 'major'
or key == 'gridminor' and owhich == 'minor'
):
which = 'both' # disable both sides
# Gridlines are currently on for major and minor ticks, so we
# instruct to turn on gridlines for the one we *don't* want off
elif owhich == 'both': # and ovalue is True, as already tested
# if gridminor=False, enable major, and vice versa
b = True
which = 'major' if key == 'gridminor' else 'minor'
# Gridlines are on for the ones that we *didn't* instruct to
# turn off, and off for the ones we do want to turn off. This
# just re-asserts the ones that are already on.
else:
b = True
which = owhich
# Instruction is to turn on gridlines
else:
# Gridlines are already both on, or they are off only for the
# ones that we want to turn on. Turn on gridlines for both.
if (
owhich == 'both'
or key == 'grid' and owhich == 'minor'
or key == 'gridminor' and owhich == 'major'
):
which = 'both'
# Gridlines are off for both, or off for the ones that we
# don't want to turn on. We can just turn on these ones.
else:
which = owhich
# Finally apply settings
kw_matplotlib['axes.grid'] = b
kw_matplotlib['axes.grid.which'] = which
# Update original setting and linked settings
for key in keys:
if key in rc_proplot:
kw_proplot[key] = value
elif key in rc_matplotlib:
kw_matplotlib[key] = value
else:
raise KeyError(f'Invalid rc key {key!r}.')
return kw_proplot, kw_matplotlib
@staticmethod
def _get_local_paths():
"""
Return locations of local proplotrc files in this directory
and in parent directories.
"""
idir = os.getcwd()
paths = []
while idir: # not empty string
ipath = os.path.join(idir, '.proplotrc')
if os.path.exists(ipath):
paths.append(ipath)
ndir = os.path.dirname(idir)
if ndir == idir: # root
break
idir = ndir
return paths[::-1] # sort from decreasing to increasing importantce
@staticmethod
def _get_user_path():
"""
Return location of user proplotrc file.
"""
return os.path.join(os.path.expanduser('~'), '.proplotrc')
@staticmethod
def _sanitize_key(key):
"""
Ensure string and convert keys with omitted dots.
"""
if not isinstance(key, str):
raise KeyError(f'Invalid key {key!r}. Must be string.')
# Translate from nodots to 'full' version
if '.' not in key:
key = rcsetup._rc_nodots.get(key, key)
# Handle deprecations
if key in rcsetup._rc_removed:
alternative, version = rcsetup._rc_removed[key]
message = f'rc setting {key!r} was removed in version {version}.'
if alternative: # provide an alternative
message = f'{message} {alternative}'
warnings._warn_proplot(warnings)
key = None
if key in rcsetup._rc_renamed:
key_new, version = rcsetup._rc_renamed[key]
warnings._warn_proplot(
f'rc setting {key!r} was renamed to {key_new} in version {version}.'
)
key = key_new
return key.lower()
@staticmethod
def _sanitize_value(value):
"""
Convert numpy ndarray to list.
"""
if isinstance(value, np.ndarray):
if value.size <= 1:
value = value.item()
else:
value = value.tolist()
return value
@staticmethod
def _scale_font(size):
"""
Translate font size to numeric.
"""
# NOTE: Critical this remains KeyError so except clause
# in _get_synced_params works.
if isinstance(size, str):
try:
scale = mfonts.font_scalings[size]
except KeyError:
raise KeyError(
f'Invalid font scaling {size!r}. Options are: '
+ ', '.join(
f'{key!r} ({value})'
for key, value in mfonts.font_scalings.items()
) + '.'
)
else:
size = rc_matplotlib['font.size'] * scale
return size
def category(self, cat, *, trimcat=True, context=False):
"""
Return a dictionary of settings beginning with the substring
``cat + '.'``.
Parameters
----------
cat : str, optional
The `rc` setting category.
trimcat : bool, optional
Whether to trim ``cat`` from the key names in the output
dictionary. Default is ``True``.
context : bool, optional
If ``True``, then each category setting that is not found in the
context mode dictionaries is omitted from the output dictionary.
See `~RcConfigurator.context`.
"""
if cat not in rcsetup._rc_categories:
raise ValueError(
f'Invalid rc category {cat!r}. Valid categories are '
', '.join(map(repr, rcsetup._rc_categories)) + '.'
)
kw = {}
mode = 0 if not context else None
for rcdict in (rc_proplot, rc_matplotlib):
for key in rcdict:
if not re.match(fr'\A{cat}\.[^.]+\Z', key):
continue
value = self._get_item(key, mode)
if value is None:
continue
if trimcat:
key = re.sub(fr'\A{cat}\.', '', key)
kw[key] = value
return kw
def context(self, *args, mode=0, file=None, **kwargs):
"""
Temporarily modify the rc settings in a "with as" block.
Parameters
----------
*args
Dictionaries of `rc` names and values.
file : str, optional
Filename from which settings should be loaded.
**kwargs
`rc` names and values passed as keyword arguments. If the
name has dots, simply omit them.
Other parameters
----------------
mode : {0, 1, 2}, optional
The context mode. Dictates the behavior of `~RcConfigurator.get`,
`~RcConfigurator.fill`, and `~RcConfigurator.category` within a
"with as" block when called with ``context=True``.
The options are as follows:
0. Matplotlib's `builtin settings <rc_matplotlib>`_ and ProPlot's
:ref:`added settings <rc_proplot>` are all returned,
whether or not `~RcConfigurator.context` has changed them.
1. *Unchanged* `matplotlib settings <rc_matplotlib>`_ return ``None``.
All of ProPlot's :ref:`added settings <rc_proplot>` are returned
whether or not `~RcConfigurator.context` has changed them.
This is used in the `~proplot.axes.Axes.__init__` call to
`~proplot.axes.Axes.format`. When a lookup returns ``None``,
`~proplot.axes.Axes.format` does not apply it.
2. All unchanged settings return ``None``. This is used during
user calls to `~proplot.axes.Axes.format`.
Note
----
This is used by ProPlot internally but may also be useful for power users.
It was invented to prevent successive calls to `~proplot.axes.Axes.format`
from constantly looking up and re-applying unchanged settings. These
gratuitous lookups increased runtime significantly, and resulted in successive
calls to `~proplot.axes.Axes.format` overwriting the previous calls.
Example
-------
The below applies settings to axes in a specific figure using
`~RcConfigurator.context`.
>>> import proplot as plot
>>> with plot.rc.context(linewidth=2, ticklen=5):
>>> fig, ax = plot.subplots()
>>> ax.plot(data)
The below applies settings to a specific axes using `~proplot.axes.Axes.format`,
which uses `~RcConfigurator.context` internally.
>>> import proplot as plot
>>> fig, ax = plot.subplots()
>>> ax.format(linewidth=2, ticklen=5)
"""
# Add input dictionaries
for arg in args:
if not isinstance(arg, dict):
raise ValueError('Non-dictionary argument {arg!r}.')
kwargs.update(arg)
# Add settings from file
# TODO: Incoporate with matplotlib 'stylesheets'
if file is not None:
kw_proplot, kw_matplotlib = self._load_file(file)
kwargs.update(kw_proplot)
kwargs.update(kw_matplotlib)
# Activate context object
if mode not in range(3):
raise ValueError(f'Invalid mode {mode!r}.')
context = _RcContext(mode=mode, kwargs=kwargs, rc_new={}, rc_old={})
self._context.append(context)
return self
def get(self, key, *, context=False):
"""
Return a single setting.
Parameters
----------
key : str
The setting name.
context : bool, optional
If ``True``, then ``None`` is returned if the setting is not found
in the context mode dictionaries. See `~RcConfigurator.context`.
"""
mode = 0 if not context else None
return self._get_item(key, mode)
def fill(self, props, *, context=False):
"""
Return a dictionary filled with settings whose names match the
string values in the input dictionary.
Parameters
----------
props : dict-like
Dictionary whose values are `rc` setting names.
context : bool, optional
If ``True``, then each setting that is not found in the
context mode dictionaries is omitted from the output dictionary.
See `~RcConfigurator.context`.
"""
kw = {}
mode = 0 if not context else None
for key, value in props.items():
item = self._get_item(value, mode)
if item is not None:
kw[key] = item
return kw
def update(self, *args, **kwargs):
"""
Update several settings at once with a dictionary and/or
keyword arguments.
Parameters
----------
*args : str, dict, or (str, dict), optional
A dictionary containing `rc` keys and values. You can also
pass a "category" name as the first argument, in which case all
settings are prepended with ``'category.'``. For example,
``rc.update('axes', labelsize=20, titlesize=20)`` changes the
:rcraw:`axes.labelsize` and :rcraw:`axes.titlesize` properties.
**kwargs, optional
`rc` keys and values passed as keyword arguments. If the
name has dots, simply omit them.
"""
# Parse args
kw = {}
prefix = ''
if len(args) > 2:
raise ValueError(
f'rc.update() accepts 1-2 arguments, got {len(args)}. Usage '
'is rc.update(kw), rc.update(category, kw), '
'rc.update(**kwargs), or rc.update(category, **kwargs).'
)
elif len(args) == 2:
prefix = args[0]
kw = args[1]
elif len(args) == 1:
if isinstance(args[0], str):
prefix = args[0]
else:
kw = args[0]
# Apply settings
if prefix:
prefix = prefix + '.'
kw.update(kwargs)
for key, value in kw.items():
self.__setitem__(prefix + key, value)
@docstring.add_snippets
def reset(self, local=True, user=True, default=True):
"""
Reset the configurator to its initial state.
Parameters
----------
%(rc.params)s
"""
# Always remove context objects
self._context.clear()
# Update from default settings
# NOTE: see _remove_blacklisted_style_params bugfix
if default:
rc_matplotlib.update(_get_style_dicts('original', filter=False))
rc_matplotlib.update(rcsetup._rc_matplotlib_default)
rc_proplot.update(rcsetup._rc_proplot_default)
for key, value in rc_proplot.items():
kw_proplot, kw_matplotlib = self._get_synced_params(key, value)
rc_matplotlib.update(kw_matplotlib)
rc_proplot.update(kw_proplot)
# Update from user home
user_path = None
if user:
user_path = self._get_user_path()
if os.path.isfile(user_path):
self.load_file(user_path)
# Update from local paths
if local:
local_paths = self._get_local_paths()
for path in local_paths:
if path == user_path: # local files always have precedence
continue
self.load_file(path)
def _load_file(self, path):
"""
Return dictionaries of proplot and matplotlib settings loaded from the file.
"""
added = set()
path = os.path.expanduser(path)
kw_proplot = {}
kw_matplotlib = {}
with open(path, 'r') as fh:
for cnt, line in enumerate(fh):
# Parse line and ignore comments
stripped = line.split('#', 1)[0].strip()
if not stripped:
continue
pair = stripped.split(':', 1)
if len(pair) != 2:
warnings._warn_proplot(
f'Illegal line #{cnt + 1} in file {path!r}:\n{line!r}"'
)
continue
# Get key value pair
key, val = pair
key = key.strip()
val = val.strip()
if key in added:
warnings._warn_proplot(
f'Duplicate key {key!r} on line #{cnt + 1} in file {path!r}.'
)
added.add(key)
# *Very primitive* type conversion system for proplot settings.
# Matplotlib does this automatically.
if REGEX_STRING.match(val): # also do this for matplotlib settings
val = val[1:-1] # remove quotes from string
if key in rc_proplot:
if not val:
val = None # older proplot versions supported this
elif val in ('True', 'False', 'None'):
val = eval(val) # rare case where eval is o.k.
else:
try:
val = float(val) if '.' in val else int(val)
except ValueError:
pass
# Add to dictionaries
try:
ikw_proplot, ikw_matplotlib = self._get_synced_params(key, val)
kw_proplot.update(ikw_proplot)
kw_matplotlib.update(ikw_matplotlib)
except KeyError:
warnings._warn_proplot(
f'Invalid key {key!r} on line #{cnt} in file {path!r}.'
)
return kw_proplot, kw_matplotlib
def load_file(self, path):
"""
Load settings from the specified file.
Parameters
----------
path : str
The file path.
"""
kw_proplot, kw_matplotlib = self._load_file(path)
rc_proplot.update(kw_proplot)
rc_matplotlib.update(kw_matplotlib)
@staticmethod
def _save_rst(path):
"""
Used internally to create table for online docs.
"""
string = rcsetup._gen_rst_table()
with open(path, 'w') as fh:
fh.write(string)
@staticmethod
def _save_proplotrc(path, comment=False):
"""
Used internally to create initial proplotrc file and file for online docs.
"""
self = object() # self is unused when 'user' is False
RcConfigurator.save(self, path, user=False, backup=False, comment=comment)
def save(self, path=None, user=True, comment=None, backup=True, description=False):
"""
Save the current settings to a ``.proplotrc`` file. This writes
the default values commented out plus the values that *differ*
from the defaults at the top of the file.
Parameters
----------
path : str, optional
The path name. The default file name is ``.proplotrc`` and the default
directory is the home directory. Use ``path=''`` to save to the current
directory.
user : bool, optional
If ``True`` (the default), the settings you changed since importing
proplot are shown uncommented at the very top of the file.
backup : bool, optional
If the file already exists and this is set to ``True``, it is moved
to a backup file with the suffix ``.bak``.
comment : bool, optional
Whether to comment out the default settings. Default is the
value of `user`.
description : bool, optional
Whether to include descriptions of each setting as comments.
Default is ``False``.
"""
if path is None:
path = '~'
path = os.path.abspath(os.path.expanduser(path))
if os.path.isdir(path):
path = os.path.join(path, '.proplotrc')
if os.path.isfile(path) and backup:
os.rename(path, path + '.bak')
warnings._warn_proplot(
f'Existing proplotrc file {path!r} was moved to {path + ".bak"!r}.'
)
# Generate user-specific table, ignoring non-style related
# settings that may be changed from defaults like 'backend'
rc_user = ()
if user:
# Changed settings
rcdict = {
key: value for key, value in self
if value != rcsetup._get_default_param(key)
}
# Special handling for certain settings
# TODO: For now not sure how to detect if prop cycle changed since
# we cannot load it from _cmap_database in rcsetup.
rcdict.pop('interactive', None) # changed by backend
rcdict.pop('axes.prop_cycle', None)
# Filter and get table
rcdict = _get_filtered_dict(rcdict, warn=False)
rc_user_table = rcsetup._gen_yaml_table(rcdict, comment=False)
rc_user = ('# Settings changed by user', rc_user_table, '') # + blank line
# Generate tables and write
comment = _not_none(comment, user)
rc_proplot_table = rcsetup._gen_yaml_table(
rcsetup._rc_proplot, comment=comment, description=description,
)
rc_matplotlib_table = rcsetup._gen_yaml_table(
rcsetup._rc_matplotlib_default, comment=comment
)
with open(path, 'w') as fh:
fh.write('\n'.join((
'#--------------------------------------------------------------------',
'# Use this file to change the default proplot and matplotlib settings',
'# The syntax is identical to matplotlibrc syntax. For details see:',
'# https://proplot.readthedocs.io/en/latest/configuration.html',
'# https://matplotlib.org/3.1.1/tutorials/introductory/customizing',
'#--------------------------------------------------------------------',
*rc_user, # includes blank line
'# ProPlot settings',
rc_proplot_table,
'\n# Matplotlib settings',
rc_matplotlib_table,
)))
def items(self):
"""
Return an iterator that loops over all setting names and values.
Same as `dict.items`.
"""
for key in self:
yield key, self[key]
def keys(self):
"""
Return an iterator that loops over all setting names.
Same as `dict.keys`.
"""
for key in self:
yield key
def values(self):
"""
Return an iterator that loops over all setting values.
Same as `dict.values`.
"""
for key in self:
yield self[key]
def config_inline_backend(fmt=None):
"""
Set up the `ipython inline backend \
<https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-matplotlib>`__
format and ensure that inline figures always look the same as saved
figures. This runs the following ipython magic commands:
.. code-block:: ipython
%config InlineBackend.figure_formats = rc['inlinefmt']
%config InlineBackend.rc = {} # never override rc settings
%config InlineBackend.close_figures = True \
# cells start with no active figures
%config InlineBackend.print_figure_kwargs = {'bbox_inches': None} \
# never override rc settings
When the inline backend is inactive or unavailable, this has no effect.
This function is called when you modify the :rcraw:`inlinefmt` property.
Parameters
----------
fmt : str or list of str, optional
The inline backend file format(s). Default is :rc:`inlinefmt`.
Valid formats include ``'jpg'``, ``'png'``, ``'svg'``, ``'pdf'``,
and ``'retina'``.
"""
# Note if inline backend is unavailable this will fail silently
ipython = get_ipython()
if ipython is None:
return
fmt = _not_none(fmt, rc_proplot['inlinefmt'])
if isinstance(fmt, str):
fmt = [fmt]
elif np.iterable(fmt):
fmt = list(fmt)
else:
raise ValueError(
f'Invalid inline backend format {fmt!r}. Must be string or list thereof.'
)
ipython.magic('config InlineBackend.figure_formats = ' + repr(fmt))
ipython.magic('config InlineBackend.rc = {}')
ipython.magic('config InlineBackend.close_figures = True')
ipython.magic("config InlineBackend.print_figure_kwargs = {'bbox_inches': None}")
def _get_cycle_colors(cycle):
"""
Update the color cycle.
"""
try:
colors = pcolors._cmap_database[cycle].colors
except (KeyError, AttributeError):
cycles = sorted(
name for name, cmap in pcolors._cmap_database.items()
if isinstance(cmap, pcolors.ListedColormap)
)
raise ValueError(
f'Invalid cycle name {cycle!r}. Options are: '
+ ', '.join(map(repr, cycles)) + '.'
)
return colors
def _get_default_dict():
"""
Get the default rc parameters dictionary with deprecated parameters filtered.
"""
# NOTE: Use RcParams update to filter and translate deprecated settings
# before actually applying them to rcParams down pipeline. This way we can
# suppress warnings for deprecated default params but still issue warnings
# when user-supplied stylesheets have deprecated params.
# WARNING: Some deprecated rc params remain in dictionary as None so we
# filter them out. Beware if hidden attribute changes.
rcdict = _get_filtered_dict(mpl.rcParamsDefault, warn=False)
with cbook._suppress_matplotlib_deprecation_warning():
rcdict = dict(RcParams(rcdict))
for attr in ('_deprecated_remain_as_none', '_deprecated_set'):
if hasattr(mpl, attr): # _deprecated_set is in matplotlib before v3
for deprecated in getattr(mpl, attr):
rcdict.pop(deprecated, None)
return rcdict
def _get_filtered_dict(rcdict, warn=True):
"""
Filter out blacklisted style parameters.
"""
# NOTE: This implements bugfix: https://github.com/matplotlib/matplotlib/pull/17252
# This fix is *critical* for proplot because we always run style.use()
# when the configurator is made. Without fix backend is reset every time
# you import proplot in jupyter notebooks. So apply retroactively.
rcdict_filtered = {}
for key in rcdict:
if key in mstyle.STYLE_BLACKLIST:
if warn:
warnings._warn_proplot(
f'Dictionary includes a parameter, {key!r}, that is not related '
'to style. Ignoring.'
)
else:
rcdict_filtered[key] = rcdict[key]
return rcdict_filtered
def _get_style_dicts(style, infer=False, filter=True):
"""
Return a dictionary of settings belonging to the requested style(s). If `infer`
is ``True``, two dictionaries are returned, where the second contains custom
ProPlot settings "inferred" from the matplotlib settings. If `filter` is ``True``,
invalid style parameters like `backend` are filtered out.
"""
# NOTE: This is adapted from matplotlib source for the following changes:
# 1. Add 'original' option. Like rcParamsOrig except we also *reload*
# from user matplotlibrc file.
# 2. When the style is changed we reset to the *default* state ignoring
# matplotlibrc. Matplotlib applies styles on top of current state
# (including matplotlibrc changes and runtime rcParams changes) but
# IMO the word 'style' implies a *rigid* static format.
# 3. Add a separate function that returns lists of style dictionaries so
# that we can modify the active style in a context block. ProPlot context
# is more conservative than matplotlib's rc_context because it gets
# called a lot (e.g. every time you make an axes and every format() call).
# Instead of copying the entire rcParams dict we just track the keys
# that were changed.
style_aliases = {
'538': 'fivethirtyeight',
'mpl20': 'default',
'mpl15': 'classic',
'original': mpl.matplotlib_fname(),
}
# Always apply the default style *first* so styles are rigid
kw_matplotlib = _get_default_dict()
if style == 'default' or style is mpl.rcParamsDefault:
return kw_matplotlib
# Apply "pseudo" default properties. Pretend some proplot settings are part of
# the matplotlib specification so they propagate to other styles.
kw_matplotlib['font.family'] = 'sans-serif'
kw_matplotlib['font.sans-serif'] = rcsetup._rc_matplotlib_default['font.sans-serif']
# Apply user input style(s) one by one
# NOTE: Always use proplot fonts if style does not explicitly set them.
if isinstance(style, str) or isinstance(style, dict):
styles = [style]
else:
styles = style
for style in styles:
if isinstance(style, dict):
kw = style
elif isinstance(style, str):
style = style_aliases.get(style, style)
if style in mstyle.library:
kw = mstyle.library[style]
else:
try:
kw = mpl.rc_params_from_file(style, use_default_template=False)
except IOError:
raise IOError(
f'Style {style!r} not found in the style library and input is '
'not a valid URL or path. Available styles are: '
+ ', '.join(map(repr, mstyle.available)) + '.'
)
else:
raise ValueError(f'Invalid style {style!r}. Must be string or dictionary.')
if filter:
kw = _get_filtered_dict(kw, warn=True)
kw_matplotlib.update(kw)
# Infer proplot params from stylesheet params
if infer:
kw_proplot = _infer_added_params(kw_matplotlib)
return kw_matplotlib, kw_proplot
else:
return kw_matplotlib
def _infer_added_params(kw_params):
"""
Infer values for proplot's "added" parameters from stylesheets.
"""
kw_proplot = {}
mpl_to_proplot = {
'font.size': ('tick.labelsize',),
'axes.titlesize': (
'abc.size', 'suptitle.size', 'title.size',
'leftlabel.size', 'rightlabel.size',
'toplabel.size', 'bottomlabel.size',
),
'text.color': (
'abc.color', 'suptitle.color', 'tick.labelcolor', 'title.color',
'leftlabel.color', 'rightlabel.color',
'toplabel.color', 'bottomlabel.color',
),
}
for key, params in mpl_to_proplot.items():
if key in kw_params:
value = kw_params[key]
for param in params:
kw_proplot[param] = value
return kw_proplot
def use_style(style):
"""
Apply the `matplotlib style(s) \
<https://matplotlib.org/tutorials/introductory/customizing.html>`__
with `matplotlib.style.use`. This function is
called when you modify the :rcraw:`style` property.
Parameters
----------
style : str, dict, or list thereof
The matplotlib style name(s) or stylesheet filename(s), or dictionary(s)
of settings. Use ``'default'`` to apply matplotlib default settings and
``'original'`` to include settings from your user ``matplotlibrc`` file.
"""
# NOTE: This function is not really necessary but makes proplot's
# stylesheet-supporting features obvious. Plus changing the style does
# so much *more* than changing rc params or quick settings, so it is
# nice to have dedicated function instead of just another rc_param name.
kw_matplotlib, kw_proplot = _get_style_dicts(style, infer=True)
rc_matplotlib.update(kw_matplotlib)
rc_proplot.update(kw_proplot)
@docstring.add_snippets
def register_cmaps(user=True, default=False):
"""
Register colormaps packaged with ProPlot or saved to the
``~/.proplot/cmaps`` folder. This is called on import.
Colormaps are registered according to their filenames -- for example,
``name.xyz`` will be registered as ``'name'``.
%(register.ext_table)s
To visualize the registered colormaps, use `~proplot.demos.show_cmaps`.
Parameters
----------
%(register_cmaps.params)s
"""
for i, dirname, filename in _iter_data_paths('cmaps', user=user, default=default):
path = os.path.join(dirname, filename)
cmap = pcolors.LinearSegmentedColormap.from_file(path, warn_on_failure=True)
if not cmap:
continue
if i == 0 and cmap.name.lower() in (
'phase', 'graycycle', 'romao', 'broco', 'corko', 'viko',
):
cmap.set_cyclic(True)
pcolors._cmap_database[cmap.name] = cmap
@docstring.add_snippets
def register_cycles(user=True, default=False):
"""
Register color cycles packaged with ProPlot or saved to the
``~/.proplot/cycles`` folder. This is called on import. Color cycles
are registered according to their filenames -- for example, ``name.hex``
will be registered as ``'name'``.
%(register.ext_table)s
To visualize the registered color cycles, use `~proplot.demos.show_cycles`.
Parameters
----------
%(register_cycles.params)s
"""
for _, dirname, filename in _iter_data_paths('cycles', user=user, default=default):
path = os.path.join(dirname, filename)
cmap = pcolors.ListedColormap.from_file(path, warn_on_failure=True)
if not cmap:
continue
pcolors._cmap_database[cmap.name] = cmap
@docstring.add_snippets
def register_colors(user=True, default=False, space='hcl', margin=0.10):
"""
Register the `open-color <https://yeun.github.io/open-color/>`_ colors,
XKCD `color survey <https://xkcd.com/color/rgb/>`_ colors, and colors
saved to the ``~/.proplot/colors`` folder. This is called on import.
The color survey colors are filtered to a subset that is "perceptually
distinct" in the HCL colorspace. The user color names are loaded from
``.txt`` files saved in ``~/.proplot/colors``. Each file should contain
one line per color in the format ``name : hex``. Whitespace is ignored.
To visualize the registered colors, use `~proplot.demos.show_colors`.
Parameters
----------
%(register_colors.params)s
space : {'hcl', 'hsl', 'hpl'}, optional
The colorspace used to detect "perceptually distinct" colors.
margin : float, optional
The margin by which a color's normalized hue, saturation, and
luminance channel values must differ from the normalized channel
values of the other colors to be deemed "perceptually distinct."
"""
# Reset native colors dictionary
mcolors.colorConverter.colors.clear() # clean out!
mcolors.colorConverter.cache.clear() # clean out!
# Add in base colors and CSS4 colors so user has no surprises
for name, dict_ in (('base', BASE_COLORS), ('css', mcolors.CSS4_COLORS)):
mcolors.colorConverter.colors.update(dict_)
# Load colors from file and get their HCL values
# NOTE: Colors that come *later* overwrite colors that come earlier.
hex = re.compile(rf'\A{pcolors.HEX_PATTERN}\Z') # match each string
for i, dirname, filename in _iter_data_paths('colors', user=user, default=default):
path = os.path.join(dirname, filename)
cat, ext = os.path.splitext(filename)
if ext != '.txt':
raise ValueError(
f'Unknown color data file extension ({path!r}). '
'All files in this folder should have extension .txt.'
)
# Read data
loaded = {}
with open(path, 'r') as fh:
for cnt, line in enumerate(fh):
# Load colors from file
stripped = line.strip()
if not stripped or stripped[0] == '#':
continue
pair = tuple(
item.strip().lower() for item in line.split(':')
)
if len(pair) != 2 or not hex.match(pair[1]):
warnings._warn_proplot(
f'Illegal line #{cnt + 1} in file {path!r}:\n'
f'{line!r}\n'
f'Lines must be formatted as "name: hexcolor".'
)
continue
# Never overwrite "base" colors with xkcd colors.
# Only overwrite with user colors.
name, color = pair
if i == 0 and name in BASE_COLORS:
continue
loaded[name] = color
# Add every user color and every opencolor color and ensure XKCD
# colors are "perceptually distinct".
if i == 1:
mcolors.colorConverter.colors.update(loaded)
elif cat == 'opencolor':
mcolors.colorConverter.colors.update(loaded)
OPEN_COLORS.update(loaded)
elif cat == 'xkcd':
# Always add these colors, but make sure not to add other
# colors too close to them.
hcls = []
filtered = []
for name in ALWAYS_ADD:
color = loaded.pop(name, None)
if color is None:
continue
if 'grey' in name:
name = name.replace('grey', 'gray')
hcls.append(to_xyz(color, space=space))
filtered.append((name, color))
mcolors.colorConverter.colors[name] = color
XKCD_COLORS[name] = color
# Get locations of "perceptually distinct" colors
# WARNING: Unique axis argument requires numpy version >=1.13
for name, color in loaded.items():
for string, replace in TRANSLATE_COLORS:
if string in name:
name = name.replace(string, replace)
if any(string in name for string in ALWAYS_REMOVE):
continue # remove "unpofessional" names
hcls.append(to_xyz(color, space=space))
filtered.append((name, color)) # category name pair
hcls = np.asarray(hcls)
if not hcls.size:
continue
hcls = hcls / np.array([360, 100, 100])
hcls = np.round(hcls / margin).astype(np.int64)
_, idxs = np.unique(hcls, return_index=True, axis=0)
# Register "distinct" colors
for idx in idxs:
name, color = filtered[idx]
mcolors.colorConverter.colors[name] = color
XKCD_COLORS[name] = color
else:
raise ValueError(f'Unknown proplot color database {path!r}.')
def register_fonts():
"""
Add fonts packaged with ProPlot or saved to the ``~/.proplot/fonts``
folder, if they are not already added. Detects ``.ttf`` and ``.otf`` files
-- see `this link \
<https://gree2.github.io/python/2015/04/27/python-change-matplotlib-font-on-mac>`__
for a guide on converting various other font file types to ``.ttf`` and
``.otf`` for use with matplotlib.
To visualize the registered fonts, use `~proplot.demos.show_fonts`.
"""
# Find proplot fonts
# WARNING: If you include a font file with an unrecognized style,
# matplotlib may use that font instead of the 'normal' one! Valid styles:
# 'ultralight', 'light', 'normal', 'regular', 'book', 'medium', 'roman',
# 'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black'
# https://matplotlib.org/api/font_manager_api.html
# For macOS the only fonts with 'Thin' in one of the .ttf file names
# are Helvetica Neue and .SF NS Display Condensed. Never try to use these!
paths_proplot = _get_data_paths('fonts', reverse=True)
fnames_proplot = set(mfonts.findSystemFonts(paths_proplot))
# Detect user-input ttc fonts and issue warning
fnames_proplot_ttc = {
file for file in fnames_proplot if os.path.splitext(file)[1] == '.ttc'
}
if fnames_proplot_ttc:
warnings._warn_proplot(
'Ignoring the following .ttc fonts because they cannot be '
'saved into PDF or EPS files (see matplotlib issue #3135): '
+ ', '.join(map(repr, sorted(fnames_proplot_ttc)))
+ '. Please consider expanding them into separate .ttf files.'
)
# Rebuild font cache only if necessary! Can be >50% of total import time!
fnames_all = {font.fname for font in mfonts.fontManager.ttflist}
fnames_proplot -= fnames_proplot_ttc
if not fnames_all >= fnames_proplot:
warnings._warn_proplot('Rebuilding font cache.')
if hasattr(mfonts.fontManager, 'addfont'):
# New API lets us add font files manually
for fname in fnames_proplot:
mfonts.fontManager.addfont(fname)
mfonts.json_dump(mfonts.fontManager, mfonts._fmcache)
else:
# Old API requires us to modify TTFPATH
# NOTE: Previously we tried to modify TTFPATH before importing
# font manager with hope that it would load proplot fonts on
# initialization. But 99% of the time font manager just imports
# the FontManager from cache, so this doesn't work.
paths = ':'.join(paths_proplot)
if 'TTFPATH' not in os.environ:
os.environ['TTFPATH'] = paths
elif paths not in os.environ['TTFPATH']:
os.environ['TTFPATH'] += ':' + paths
mfonts._rebuild()
# Remove ttc files and 'Thin' fonts *after* rebuild
# NOTE: 'Thin' filter is ugly kludge but without this matplotlib picks up on
# Roboto thin ttf files installed on the RTD server when compiling docs.
mfonts.fontManager.ttflist = [
font for font in mfonts.fontManager.ttflist
if os.path.splitext(font.fname)[1] != '.ttc'
or 'Thin' in os.path.basename(font.fname)
]
def _patch_validators():
"""
Fix the fontsize validators to allow for new font scalings.
"""
# First define valdiators
# NOTE: In the future will subclass RcParams directly and control the
# validators ourselves.
def _validate_fontsize(s):
fontsizes = list(mfonts.font_scalings)
if isinstance(s, str):
s = s.lower()
if s in fontsizes:
return s
try:
return float(s)
except ValueError:
raise ValueError(
f'{s!r} is not a valid font size. Valid sizes are: '
', '.join(map(repr, fontsizes))
)
def _validate_fontsize_None(s):
if s is None or s == 'None':
return None
else:
return _validate_fontsize(s)
_validate_fontsizelist = None
if hasattr(msetup, '_listify_validator'):
_validate_fontsizelist = msetup._listify_validator(_validate_fontsize)
# Apply new functions
validate = RcParams.validate
for key in list(validate): # modify in-place
validator = validate[key]
if validator is msetup.validate_fontsize:
validate[key] = _validate_fontsize
elif validator is getattr(msetup, 'validate_fontsize_None', None):
validate[key] = _validate_fontsize_None
elif validator is getattr(msetup, 'validate_fontsizelist', None):
if _validate_fontsizelist is not None:
validate[key] = _validate_fontsizelist
# Initialize .proplotrc file
_user_rc_file = os.path.join(os.path.expanduser('~'), '.proplotrc')
if not os.path.exists(_user_rc_file):
RcConfigurator._save_proplotrc(_user_rc_file, comment=True)
# Initialize customization folders
_rc_folder = os.path.join(os.path.expanduser('~'), '.proplot')
if not os.path.isdir(_rc_folder):
os.mkdir(_rc_folder)
for _rc_sub in ('cmaps', 'cycles', 'colors', 'fonts'):
_rc_sub = os.path.join(_rc_folder, _rc_sub)
if not os.path.isdir(_rc_sub):
os.mkdir(_rc_sub)
# Add custom font scalings to font_manager and monkey patch rcParams validator
# NOTE: This is because we prefer large sizes
if hasattr(mfonts, 'font_scalings'):
mfonts.font_scalings['med-small'] = 0.9
mfonts.font_scalings['med-large'] = 1.1
_patch_validators()
# Convert colormaps that *should* be LinearSegmented from Listed
for _name in ('viridis', 'plasma', 'inferno', 'magma', 'cividis', 'twilight'):
_cmap = pcolors._cmap_database.get(_name, None)
if _cmap and isinstance(_cmap, pcolors.ListedColormap):
del pcolors._cmap_database[_name]
pcolors._cmap_database[_name] = pcolors.LinearSegmentedColormap.from_list(
_name, _cmap.colors, cyclic=(_name == 'twilight')
)
# Register objects and configure settings
with timers._benchmark('cmaps'):
register_cmaps(default=True)
with timers._benchmark('cycles'):
register_cycles(default=True)
with timers._benchmark('colors'):
register_colors(default=True)
with timers._benchmark('fonts'):
register_fonts()
with timers._benchmark('rc'):
_ = RcConfigurator()
#: Instance of `RcConfigurator`. This is used to change global settings.
#: See the :ref:`configuration guide <ug_config>` for details.
rc = _
# Modify N of existing colormaps because ProPlot settings may have changed
# image.lut. We have to register colormaps and cycles first so that the 'cycle'
# property accepts named cycles registered by ProPlot. No performance hit here.
lut = rc['image.lut']
for cmap in pcolors._cmap_database.values():
if isinstance(cmap, mcolors.LinearSegmentedColormap):
cmap.N = lut
# Deprecated
inline_backend_fmt, rc_configurator = warnings._rename_objs(
'0.6',
inline_backend_fmt=config_inline_backend,
RcConfigurator=config_inline_backend,
)
| [
"lukelbd@gmail.com"
] | lukelbd@gmail.com |
5a574f5632de8c2c2bd44b35eab20090eff2e13e | f3e2ac6d8b1a119233a453d3e96207cdb1b12cd6 | /GUI/Models/SweepsModel.py | 0a4e5ab178c7815d3823a50ae91e2934efd402c0 | [
"BSD-3-Clause"
] | permissive | pracedru/PracedruDesign | 017071f75ee3aabe5832828f3f4d095ee7488841 | e6e313ee3efb377a8e393e5276eb9daa172e1c58 | refs/heads/master | 2021-01-20T00:10:36.038890 | 2019-03-11T09:21:55 | 2019-03-11T09:21:55 | 89,090,104 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,578 | py | from PyQt5.QtCore import *
from Business import *
__author__ = 'mamj'
col_header = ["Sweep definitions", "Type"]
types = ['Area sweep', 'Line sweep']
class SweepsModel(QAbstractTableModel):
def __init__(self, doc):
QAbstractItemModel.__init__(self)
self._sweeps = doc.get_sweeps()
self._doc = doc
self._sweeps.add_change_handler(self.on_sweep_definition_changed)
def rowCount(self, model_index=None, *args, **kwargs):
return len(self._sweeps.get_sweep_definitions())
def columnCount(self, model_index=None, *args, **kwargs):
return len(col_header)
def data(self, model_index: QModelIndex, int_role=None):
col = model_index.column()
row = model_index.row()
data = None
if int_role == Qt.DisplayRole:
sweep_def_item = self._sweeps.get_sweep_definitions()[row]
if col == 0:
data = sweep_def_item.name
if col == 1:
data = types[sweep_def_item.type - 1]
elif int_role == Qt.EditRole:
sweep_def_item = self._sweeps.get_sweep_definitions()[row]
if col == 0:
data = sweep_def_item.name
elif col == 1:
return types
return data
def setData(self, model_index: QModelIndex, value: QVariant, int_role=None):
col = model_index.column()
row = model_index.row()
sweep_def_item = self._sweeps.get_sweep_definitions()[row]
if col == 0:
sweep_def_item.name = value
return True
elif col == 1:
sweep_def_item.type = value + 1
return False
def removeRow(self, row, QModelIndex_parent=None, *args, **kwargs):
sweep_def_item = self._sweeps.get_sweep_definitions()[row]
remove_sweep_definitions(self._doc, [sweep_def_item])
def remove_rows(self, rows):
sweep_def_items = set()
for row in rows:
sweep_def_items.add(self._sweeps.get_sweep_definitions()[row])
remove_sweep_definitions(self._doc, sweep_def_items)
def on_sweep_definition_changed(self, event: ChangeEvent):
self.layoutChanged.emit()
def flags(self, model_index: QModelIndex):
default_flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
return default_flags
def headerData(self, p_int, orientation, int_role=None):
if int_role == Qt.DisplayRole:
if orientation == Qt.Vertical:
return p_int
else:
return col_header[p_int]
else:
return
def get_sweep_object(self):
return self._sweeps
def get_sweep_definition(self, row):
return self._sweeps.get_sweep_definitions()[row]
def get_index_from_sweep_definition(self, sweep_definition):
row = self._sweeps.get_sweep_definitions().index(sweep_definition)
return self.index(row, 0)
def get_options(self, index):
return types
| [
"magnusmj@gmail.com"
] | magnusmj@gmail.com |
d2bbd1c16761d3f6c45996c05cdd1655ee3fa61e | 687306842e8082ed1c31441bbacf697352fe1d22 | /icmpTest1.py | 82eed1f10e9b50d842ad85cb046356b73ad3eb6e | [] | no_license | Garyguo2011/Firewall | a77940d6fa0957fb2c2811cfcc5fa3c3b8982209 | 0906e947853c14b0a04fcccfd350202405b1c8f5 | refs/heads/master | 2020-05-21T11:37:02.562484 | 2014-12-03T15:45:44 | 2014-12-03T15:45:44 | 26,337,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | from bypass_phase1 import Firewall
from main import PKT_DIR_INCOMING, PKT_DIR_OUTGOING
import socket
def ip_int_to_str(ipNum):
ipStrList = []
ipStrList.append((ipNum >> 24) & 255)
ipStrList.append((ipNum >> 16) & 255)
ipStrList.append((ipNum >> 8) & 255)
ipStrList.append((ipNum >> 0) & 255)
return "%d.%d.%d.%d" % (ipStrList[0], ipStrList[1], ipStrList[2], ipStrList[3])
f = open ('icmppacketpool8')
inputbuffer = f.read()
firewallTest = Firewall(None, None, None)
archieve = firewallTest.packet_allocator(PKT_DIR_OUTGOING, inputbuffer, firewallTest.countryCodeDict)
print(archieve) | [
"danielhex@berkeley.edu"
] | danielhex@berkeley.edu |
67548963546c6bc30c8509496230d3aba4cd2333 | 8d875760111fbbb1ffb1944bdcf542f4758eb4bb | /models/utilities.py | 9c09beed3b488e634d8efa3534481ce32c06a1c9 | [
"MIT"
] | permissive | humanpose1/KPConvTorch | 403f3a1ada7bee96d17729efa984759617df49a3 | 5b9388b216d7e3e347c61f4c4fac090722ad0132 | refs/heads/master | 2020-11-25T00:36:24.895410 | 2020-01-10T11:43:05 | 2020-01-10T11:43:05 | 228,411,389 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | import torch
def weight_variable(shape):
initial = torch.empty(shape, dtype=torch.float)
torch.nn.init.xavier_normal_(initial)
return initial
| [
"sofiane.horache@mines-paristech.fr"
] | sofiane.horache@mines-paristech.fr |
ff288cab989bc2e3ef931af1550bcd2c05ed611b | d5f96f7c143d7f99cfc6f1d3076f2ace3e3cad89 | /PATfiles/CameraPrograms/takingMeme.py | 2ae9d441495fc5a13a6cadc53a53644774b6908b | [] | no_license | thanawinboon/iort | 7f89f0f4e7527fd904a49cfca4d3e1068eaef250 | 65ab3daad94a61125a16a75948a335c14b46ead4 | refs/heads/master | 2021-06-06T00:57:20.224735 | 2021-05-04T14:54:46 | 2021-05-04T14:54:46 | 152,364,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | from picamera import PiCamera, Color
from time import sleep
camera = PiCamera()
camera.rotation = 180
camera.resolution = (2592, 1944)
camera.framerate = 15
camera.annotate_text = "WOW!"
camera.annotate_text_size = 120
camera.annotate_foreground = Color('#92f4c3')
camera.annotate_background = Color('#000000')
def takeMeemee():
sleep(10)
camera.capture('mememax.jpg')
try:
camera.start_preview()
takeMeemee()
finally:
camera.stop_preview()
| [
"noreply@github.com"
] | thanawinboon.noreply@github.com |
208239d6041fc771684d15c89a8f876574754a8a | 63902de3371b9d111ab8603ed96e557923161d0f | /software/matmult.py | ddfd7b841771f10cd8d4843b56c51b0fd3c7dbf1 | [] | no_license | UCI-EECS113-Spring17/F.A.S.T.R. | 61b00f2927ad061ac743c28b69109106cb44c798 | 9f55456e04e462130b7c58349df128b17b46edb0 | refs/heads/master | 2021-01-20T07:10:40.081513 | 2017-06-08T07:19:23 | 2017-06-08T07:19:23 | 89,972,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | import random as r
import time as t
import sys
#n columns, m rows
def getMat(n,m,sml,lrg):
rtnList = []
for i in range(m):
inner = []
for j in range(n):
val = r.randint(sml,lrg)
inner.append(val)
rtnList.append(inner)
return rtnList
def matmult(a,b):
zip_b = zip(*b)
# uncomment next line if python 3 :
zip_b = list(zip_b)
return [[sum(ele_a*ele_b for ele_a, ele_b in zip(row_a, col_b))
for col_b in zip_b] for row_a in a]
n = 10
if len(sys.argv) == 2:
n = int(sys.argv[1])
x = getMat(n,n,0,20)
y = getMat(n,n,0,20)
start = t.time()
ans = matmult(x,y)
finish = t.time()
elapsed = finish-start
print("Solution of %dx%d matrix took %f seconds"%(n,n,elapsed))
mults = n**3
adds = (n-1)*(n**2)
print("Totals:")
print("\t Multiplications: %d (%f per second)"%(mults,mults/elapsed))
print("\t Additions: %d (%f per second)"%(adds,adds/elapsed))
#print(matmult(x,y))
"""
for i in range(10,100,10):
print(str(i)+"x"+str(i)+" matrix:")
x = getMat(i,i,0,20)
y = getMat(i,i,0,20)
print(x)
print(y)
print(matmult(x,y))
"""
| [
"noreply@github.com"
] | UCI-EECS113-Spring17.noreply@github.com |
3a498868fa73cd0366b3104035cc7bb333a9ae24 | 93de3a2f3972fc165d0901e5a7cf801b4e90d325 | /MaoYan_Top100/MaoYan_Top100/pipelines.py | 21bf63f0925690e045d30e4965bed41cda11e691 | [] | no_license | chenzwcc/Scrapy-CrawlSpider- | f2bcfaea604bb98a85e3a295e5638eeb5b2b2983 | f721acccbb53cea14e52fe169be84156ed8ee44e | refs/heads/master | 2021-08-19T19:37:46.929640 | 2017-11-27T08:25:02 | 2017-11-27T08:25:02 | 110,527,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrapy.conf import settings
class MaoyanTop100Pipeline(object):
def __init__(self):
"""
初始化主机名,端口号,数据库名.数据表名
"""
host = settings["MONGODB_HOST"]
port = settings["MONGODB_PORT"]
dbname = settings["MONGODB_DBNAME"]
sheetname = settings["MONGODB_SHEETNAME"]
# 创建mongodb数据库链接
client = pymongo.MongoClient(host=host,port=port)
# 指定数据库
mydb =client[dbname]
# 存放数据库表名
self.post=mydb[sheetname]
def process_item(self, item, spider):
# 把类字典类型转成字典类型
data=dict(item)
# 把数据插到数据表
self.post.insert(data)
return item
| [
"2552770507@qq.com"
] | 2552770507@qq.com |
285a20b1a0d76c9aa5140add05e2a88d7bc12e03 | 6e749ee34632170fc620379e10e5a86a99a2a32f | /Python Sandbox/Sandbox/Requests/venv/Scripts/pip-script.py | cd0107fe59d3439e1324075af05bfad15281ab24 | [] | no_license | terrence-adams/Python | f242a59f39a089dbb1abc86b62c01e2cd7b8c1bb | e1e6a3d9fe2cac59195e115e078a39b95ddb036c | refs/heads/master | 2023-08-11T08:21:35.804601 | 2020-09-14T23:25:02 | 2020-09-14T23:25:02 | 209,128,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | #!C:\Repo\Python\Requests\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"terrence.adams@sync1systems.com"
] | terrence.adams@sync1systems.com |
f01d0b480aa3c5d2ae32d1deb070670ba75fff99 | c352533d509a5efcb3417c51db33ed63bf0739d9 | /api/migrations/versions/cc9ddbe6d3c4_initial_migration.py | e79395979d26b9a095fe7518446eff5fa81b665f | [] | no_license | brunowerneck/Spiral-ERP | 1b1a965309683045972f960dc4069975afaffe92 | 0b61bd2a508679466753c7dc715bc80ec3f11918 | refs/heads/main | 2023-07-31T10:58:45.091609 | 2021-09-28T12:38:04 | 2021-09-28T12:38:04 | 411,104,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,758 | py | """Initial migration
Revision ID: cc9ddbe6d3c4
Revises:
Create Date: 2021-09-20 10:51:52.072947
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'cc9ddbe6d3c4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('products',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=240), nullable=False),
sa.Column('short_description', sa.String(length=240), nullable=True),
sa.Column('long_description', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('statuses',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=240), nullable=False),
sa.Column('order', mysql.TINYINT(display_width=2, unsigned=True), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('order')
)
op.create_table('suppliers',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=240), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('units',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=240), nullable=False),
sa.Column('abbreviation', sa.String(length=20), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('abbreviation'),
sa.UniqueConstraint('name')
)
op.create_table('batches',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('product_id', sa.String(length=36), nullable=True),
sa.Column('output', sa.Float(), nullable=False),
sa.Column('output_unit_id', sa.String(length=36), nullable=True),
sa.Column('unit_value', sa.Float(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['output_unit_id'], ['units.id'], ),
sa.ForeignKeyConstraint(['product_id'], ['products.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('materials',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=240), nullable=False),
sa.Column('unit_value', sa.Float(), nullable=False),
sa.Column('supplier_id', sa.String(length=36), nullable=False),
sa.Column('unit_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['supplier_id'], ['suppliers.id'], ),
sa.ForeignKeyConstraint(['unit_id'], ['units.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('batch_materials',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('batch_id', sa.String(length=36), nullable=False),
sa.Column('material_id', sa.String(length=36), nullable=False),
sa.Column('amount', sa.Float(), nullable=False),
sa.Column('unit_value', sa.Float(), nullable=False),
sa.ForeignKeyConstraint(['batch_id'], ['batches.id'], ),
sa.ForeignKeyConstraint(['material_id'], ['materials.id'], ),
sa.PrimaryKeyConstraint('id', 'batch_id', 'material_id')
)
op.create_table('batch_statuses',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('batch_id', sa.String(length=36), nullable=False),
sa.Column('status_id', sa.String(length=36), nullable=False),
sa.Column('notes', sa.Text(), nullable=True),
sa.Column('created', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['batch_id'], ['batches.id'], ),
sa.ForeignKeyConstraint(['status_id'], ['statuses.id'], ),
sa.PrimaryKeyConstraint('id', 'batch_id', 'status_id')
)
op.create_table('product_materials',
sa.Column('id', mysql.INTEGER(unsigned=True), autoincrement=True, nullable=False),
sa.Column('product_id', sa.String(length=36), nullable=False),
sa.Column('material_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['material_id'], ['materials.id'], ),
sa.ForeignKeyConstraint(['product_id'], ['products.id'], ),
sa.PrimaryKeyConstraint('id', 'product_id', 'material_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('product_materials')
op.drop_table('batch_statuses')
op.drop_table('batch_materials')
op.drop_table('materials')
op.drop_table('batches')
op.drop_table('units')
op.drop_table('suppliers')
op.drop_table('statuses')
op.drop_table('products')
# ### end Alembic commands ###
| [
"brunovaulawerneck@gmail.com"
] | brunovaulawerneck@gmail.com |
385e8a91f3b1394a8c6d0ee5111d76952ec464c1 | a16ad99fe861c518013daf48a04b28f4261013ae | /diff.py | abc13148f85cf7bebefbc9347189b7d71c1e3aa9 | [] | no_license | NamrataTtn/Practise | 2638a2f6a0a648cdf5709c04d5d35f6a06916e07 | f83ce869b984f35ca3fad77da7f9a7504d03cb73 | refs/heads/master | 2020-04-28T03:22:54.728472 | 2019-05-07T06:11:07 | 2019-05-07T06:11:07 | 174,934,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,355 | py | # encoding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import pandas as pd
from pathlib import Path
def excel_diff(path_OLD, path_NEW, index_col):
df_OLD = pd.read_excel(path_OLD, index_col=index_col).fillna(0)
df_NEW = pd.read_excel(path_NEW, index_col=index_col).fillna(0)
# Perform Diff
dfDiff = df_NEW.copy()
droppedRows = []
newRows = []
cols_OLD = df_OLD.columns
cols_NEW = df_NEW.columns
sharedCols = list(set(cols_OLD).intersection(cols_NEW))
for row in dfDiff.index:
if (row in df_OLD.index) and (row in df_NEW.index):
for col in sharedCols:
value_OLD = df_OLD.loc[row,col]
value_NEW = df_NEW.loc[row,col]
if value_OLD==value_NEW:
dfDiff.loc[row,col] = df_NEW.loc[row,col]
else:
dfDiff.loc[row,col] = ('{}→{}').format(value_OLD,value_NEW)
else:
newRows.append(row)
for row in df_OLD.index:
if row not in df_NEW.index:
droppedRows.append(row)
dfDiff = dfDiff.append(df_OLD.loc[row,:])
dfDiff = dfDiff.sort_index().fillna('')
print(dfDiff)
print('\nNew Rows: {}'.format(newRows))
print('Dropped Rows: {}'.format(droppedRows))
# Save output and format
fname = '{} vs {}.xlsx'.format(path_OLD.stem,path_NEW.stem)
writer = pd.ExcelWriter(fname, engine='xlsxwriter')
dfDiff.to_excel(writer, sheet_name='DIFF', index=True)
df_NEW.to_excel(writer, sheet_name=path_NEW.stem, index=True)
df_OLD.to_excel(writer, sheet_name=path_OLD.stem, index=True)
# get xlsxwriter objects
workbook = writer.book
worksheet = writer.sheets['DIFF']
worksheet.set_default_row(15)
# define formats
date_fmt = workbook.add_format({'align': 'center', 'num_format': 'yyyy-mm-dd'})
center_fmt = workbook.add_format({'align': 'center'})
number_fmt = workbook.add_format({'align': 'center', 'num_format': '#,##0.00'})
cur_fmt = workbook.add_format({'align': 'center', 'num_format': '$#,##0.00'})
perc_fmt = workbook.add_format({'align': 'center', 'num_format': '0%'})
grey_fmt = workbook.add_format({'font_color': '#E0E0E0'})
highlight_fmt = workbook.add_format({'font_color': '#FF0000', 'bg_color':'#B1B3B3'})
new_fmt = workbook.add_format({'font_color': '#32CD32','bold':True})
# set format over range
## highlight changed cells
worksheet.conditional_format('A1:ZZ1000', {'type': 'text',
'criteria': 'containing',
'value':'→',
'format': highlight_fmt})
# highlight new/changed rows
rowNo=0
for row in dfDiff.index:
rowNo=rowNo+1
if row in newRows:
worksheet.set_row(rowNo, 15, new_fmt)
if row in droppedRows:
worksheet.set_row(rowNo, 15, grey_fmt)
# save
writer.save()
print('\nDone.\n')
def main():
path_OLD = Path(sys.argv[1])
path_NEW = Path(sys.argv[2])
# get index col from data
df = pd.read_excel(path_NEW)
index_col = df.columns[0]
print('\nIndex column: {}\n'.format(index_col))
excel_diff(path_OLD, path_NEW, index_col)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | NamrataTtn.noreply@github.com |
a891fb7ff97b2333d83273a74ad2e6610e122e8d | ab6589d704479783e5c899600b4d58a3bcc185fe | /makeComparisonplots_Run305336_both_Eff.py | bf05386fdea438ec3711b7435f3c78d14bb55b01 | [] | no_license | tvami/SiPixelQualtiyValidation | a2b333f3fd0a26f1b808a5b6dea99474d94d7d06 | 51b74d6da25bf7727e7e91a62437c420bab22260 | refs/heads/master | 2021-08-27T20:32:23.320369 | 2021-08-06T01:37:20 | 2021-08-06T01:37:20 | 65,136,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,169 | py | #!/usr/bin/env python
from ROOT import *
import sys
import tdrstyle
# Usage:
# python makeComparisonplots_Run305336_both_Eff.py 305336 ZBbased_PromptRECO_Run305336_DQM.root ZBbased_RECO_Run305336_DQM.root "Tracking/Run summary/TrackParameters/highPurityTracks/dzPV0p1/HitEffFromHitPatternAll/" globalEfficiencies -b
def drawandSaveHistos(pname, chisto, rhisto, xTitle="", yTitle="", xLow=0, xUp=0, yLow=0, yUp=0):
tdrstyle.setTDRStyle()
c1 = TCanvas(pname, "", 50,50,1200,1200)
#c1.cd()
chisto.SetTitle("")
rhisto.SetTitle("")
chisto.GetXaxis().SetBinLabel(1,"Layer 1");
chisto.GetXaxis().SetBinLabel(2,"Layer 2");
chisto.GetXaxis().SetBinLabel(3,"Layer 3");
chisto.GetXaxis().SetBinLabel(4,"Layer 4");
chisto.GetXaxis().SetBinLabel(5,"Disk 1");
chisto.GetXaxis().SetBinLabel(6,"Disk 2");
chisto.GetXaxis().SetBinLabel(7,"Disk 3");
chisto.GetXaxis().SetLabelSize(0.15)
rhisto.GetXaxis().SetBinLabel(1,"Layer 1");
rhisto.GetXaxis().SetBinLabel(2,"Layer 2");
rhisto.GetXaxis().SetBinLabel(3,"Layer 3");
chisto.GetXaxis().SetBinLabel(4,"Layer 4");
chisto.GetXaxis().SetBinLabel(5,"Disk 1");
chisto.GetXaxis().SetBinLabel(6,"Disk 2");
chisto.GetXaxis().SetBinLabel(7,"Disk 3");
rhisto.GetXaxis().SetLabelSize(0.15)
chisto.GetYaxis().SetTitle(yTitle)
chisto.GetYaxis().SetTitleOffset(1.2);
chisto.GetYaxis().SetTitleSize(0.05);
rhisto.GetYaxis().SetTitle(yTitle)
rhisto.GetYaxis().SetTitleOffset(1.2);
rhisto.GetYaxis().SetTitleSize(0.05);
chisto.GetXaxis().SetRangeUser(0, 7)
rhisto.GetXaxis().SetRangeUser(0, 7)
chisto.SetMarkerColor(kGreen+2)
chisto.SetLineColor(kGreen+2)
chisto.SetMarkerStyle(kFullTriangleDown)
chisto.SetMarkerSize(2);
chisto.Scale(0.01)
rhisto.SetMarkerColor(kBlue)
rhisto.SetLineColor(kBlue)
rhisto.SetMarkerStyle(kOpenSquare)
rhisto.SetMarkerSize(2);
rhisto.Scale(0.01)
x1_l = 0.92
#y1_l = 0.91
y1_l = 0.48
dx_l = 0.6
dy_l = 0.1
x0_l = x1_l-dx_l
y0_l = y1_l-dy_l
leg = TLegend(x0_l,y0_l,x1_l, y1_l,"","brNDC");
leg.SetBorderSize(1);
leg.SetLineColor(1);
leg.SetLineStyle(1);
leg.SetLineWidth(1);
leg.SetFillColor(0);
leg.SetFillStyle(1001);
entry=leg.AddEntry(chisto,"Dynamic, high granularity bad component list","lp");
entry.SetMarkerSize(2);
entry2=leg.AddEntry(rhisto,"Static bad component list","lp");
entry2.SetMarkerSize(2);
rp = TRatioPlot(chisto,rhisto);
rp.GetXaxis().SetRangeUser(0, 8)
rp.GetXaxis().SetLimits(0,8.001)
rp.SetH1DrawOpt("PESAMEAXIS");
rp.SetH2DrawOpt("PESAMEAXIS");
rp.Draw("PESAMEAXIS");
rp.SetLeftMargin(0.13);
rp.SetRightMargin(0.05);
rp.SetUpTopMargin(0.1);
rp.SetLowTopMargin(0.02);
rp.SetLowBottomMargin(0.35);
rp.GetLowerRefGraph().SetMinimum(0.98);
rp.GetLowerRefGraph().SetMaximum(1.02);
rp.GetLowerRefGraph().SetMarkerColor(kGreen+2)
rp.GetLowerRefGraph().SetLineColor(kGreen+2)
rp.GetLowerRefGraph().SetMarkerStyle(kFullTriangleDown)
rp.GetLowerRefGraph().SetMarkerSize(2);
rp.GetLowYaxis().SetNdivisions(505);
rp.GetLowerRefYaxis().SetTitle("Ratio");
rp.GetLowerRefYaxis().SetTitleSize(0.05);
rp.GetLowerRefYaxis().SetTitleOffset(1.2);
rp.GetLowerRefYaxis().SetLabelSize(0.035)
rp.GetXaxis().SetBinLabel(1,"Layer 1");
rp.GetXaxis().SetBinLabel(2,"Layer 2");
rp.GetXaxis().SetLabelSize(0.035);
rp.GetLowerRefGraph().GetXaxis().SetBinLabel(1,"Layer 1");
rp.GetLowerRefGraph().GetXaxis().SetBinLabel(2,"Layer 2");
rp.GetLowerRefGraph().GetXaxis().SetBinLabel(3,"Layer 3");
rp.GetLowerRefGraph().GetXaxis().SetBinLabel(4,"Layer 4");
rp.GetLowerRefGraph().GetXaxis().SetBinLabel(5,"Disk 1");
rp.GetLowerRefGraph().GetXaxis().SetBinLabel(6,"Disk 2");
rp.GetLowerRefGraph().GetXaxis().SetBinLabel(7,"Disk 3");
rp.GetLowerRefGraph().GetXaxis().SetLabelSize(0.035)
rp.GetUpperRefYaxis().SetRangeUser(0.75, 1.01)
rp.GetUpperRefYaxis().SetLimits(0.75, 1.01)
axis1 = TGaxis(-4.5,-0.2,5.5,-0.2,-6,8,510,"");
axis1.SetName("axis1");
axis1.Draw("SAME");
gPad.Update()
c1.Modified()
c1.Update()
leg.Draw("SAME");
tex1 = TLatex(0.94,0.94,"0.142 pb^{-1} (13 TeV)");
tex1.SetNDC();
tex1.SetTextAlign(31);
tex1.SetTextFont(42);
tex1.SetTextSize(0.03);
tex1.SetLineWidth(2);
tex1.Draw("SAME");
#tex2 = TLatex(0.1,0.96,"CMS");
tex2 = TLatex(0.15,0.94,"CMS");#if there is 10^x
tex2.SetNDC();
tex2.SetTextFont(61);
tex2.SetTextSize(0.0375);
tex2.SetLineWidth(2);
tex2.Draw("SAME");
#tex3 = TLatex(0.18,0.96,"Preliminary 2017");
tex3 = TLatex(0.23,0.94,"Preliminary 2017"); #if there is 10^x
tex3.SetNDC();
tex3.SetTextFont(52);
tex3.SetTextSize(0.0285);
tex3.SetLineWidth(2);
tex3.Draw("SAME");
#tex4 = TLatex(0.55,0.94,"L_{int} = 0.142 /pb");
#tex4.SetNDC();
#tex4.SetTextFont(52);
#tex4.SetTextSize(0.0285);
#tex4.SetLineWidth(2);
#tex4.Draw("SAME");
c1.SaveAs(pname + '.png')
#c1.SaveAs(pname + '.C')
c1.SaveAs(pname + '.pdf')
def extractPlot(cFile, rFile, run, folder, histo, xTitle="", yTitle="", xLow=0, xUp=0, yLow=0, yUp=0):
basepath='DQMData/Run ' + str(run) + '/'
histopath=basepath + folder
print "Target histo=", histopath + histo
chisto=cFile.Get(histopath + histo)
rhisto=rFile.Get(histopath + histo)
#pname='comparison' + histo
pname='Run305336_both_Eff'
drawandSaveHistos(pname, chisto, rhisto, xTitle, yTitle, xLow, xUp, yLow, yUp)
#run number
run=sys.argv[1]
#current file
iFilec=TFile.Open(sys.argv[2])
#reference file
iFiler=TFile.Open(sys.argv[3])
#folder name ### provide this inside ""
folder=sys.argv[4] # e.g. "Tracking/Run summary/TrackParameters/generalTracks/GeneralProperties/"
#histoname
ht=sys.argv[5] # e,g,'TrackPhi_ImpactPoint_GenTk'
print ht
extractPlot(iFilec, iFiler, run, folder, ht, "", "N_{clust.}/N_{trk meas.}", 0., 7., 0.7, 1.09)
| [
"noreply@github.com"
] | tvami.noreply@github.com |
59e7bcf06d29ba6245a89d43c722a9adb376ecd1 | 5e789ea5a3aa9dd8caca7d9075bf38ae3c461075 | /Conteudo das Aulas/006/Exercício 2.py | 61ebbcb6d2d006808b5534ac1e08bd9b8ab138b7 | [
"Apache-2.0"
] | permissive | cerberus707/lab-python | 5138273ae6b38cdc23e6681ab9e148add128a7c5 | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | refs/heads/master | 2020-12-12T18:43:53.980296 | 2020-01-16T13:44:04 | 2020-01-16T13:44:04 | 234,203,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | """
Organize os numeros 2,3,4,5,10,12 para obter a saída 18
em uma única operação:
x = 12*3
x = x + 4
x = x//10
x = x*5
x = x - 2
print(x)
"""
x = 12*3 #36
x = x + 4 #40
x = x//10 #4
x = x*5 #20
x = x - 2 #18
print(x)
x = ((12 * 3 + 4) // 10 * 5) -2
print(x) | [
"robson.pereira@multivarejogpa.com.br"
] | robson.pereira@multivarejogpa.com.br |
b2e41b212ae0f0f766a5a7578bfa103f16c65aa6 | ce5e04e465b30d168427d3578f9883c91e50b34d | /3.5.3/bin/idle3.5 | 4aa30b575d34f1e7573e79936a051e8cfed90825 | [] | no_license | miio/pyenv-debian9-bottle | fdb96c58defd5c5d25c31963aeaa8bbc3c09876d | b3efa68c773f9d28b31632c91cc3dd975d33cab5 | refs/heads/master | 2022-10-30T07:09:22.131391 | 2018-02-26T16:15:21 | 2018-02-26T16:15:21 | 122,995,367 | 0 | 1 | null | 2022-09-29T21:20:40 | 2018-02-26T16:15:48 | Python | UTF-8 | Python | false | false | 126 | 5 | #!/home/kawaz.org/.pyenv/versions/3.5.3/bin/python3.5
from idlelib.PyShell import main
if __name__ == '__main__':
main()
| [
"info@miio.info"
] | info@miio.info |
6719372dc9c4289d882f015d127b48efb9ac2c35 | ca83edf8746a123534e98b1d6489128764b76239 | /__init__.py | 7fea3e538b664b88e5ea65c19bd6abade90548e7 | [] | no_license | weixinkai/crawlerLib | 1df9018f59ddd83e1e901e55acc750115f69f1a8 | 376c2af41fc8449e58a75c738fdd8e3b007640af | refs/heads/master | 2021-01-21T22:10:33.030817 | 2017-06-23T04:04:13 | 2017-06-23T04:04:13 | 95,181,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,390 | py | # coding:utf-8
import os
import sys
import logging
import logging.config
import configparser
from collections import deque
from redis import StrictRedis
from threading import Timer
from .downloader import DownloaderPool
from .spider import SpiderPool
from .redis_controller import RedisController
from .logging_conf import logging_config
def config_init():
'''爬虫框架配置初始化'''
config = configparser.ConfigParser()
config_name = 'crawler.conf'
if os.path.isfile(config_name):
config.read(config_name)
else:
config['DownloaderPool'] = {'thread_num': 4,
'freq': 1}
config['SpiderPool'] = {'thread_num': 4}
config['Redis'] = {'host' : '127.0.0.1',
'port' : 6379,
'db' : 0,
'task_url_name' : 'TaskURLs',
'seen_url_name' : 'SeenURLs'}
with open(config_name, 'w') as configFile:
config.write(configFile)
return config
class CrawlerCoordinator:
def __init__(self, analyzer_class, put_items_api):
'''爬虫框架调度器'''
#日志输出初始化
self.logging_init()
self.logger = logging.getLogger('Crawler')
#配置初始化
config = config_init()
try:
#检查redis是否连接得上
self.urls_manager = RedisController(config)
self.urls_manager.check_connect()
except Exception as e:
self.logger.error('Redis connect error! ' + str(e))
sys.exit(0)
#网页分析池
self.spider_pool = SpiderPool(
config=config,
analyzer_class=analyzer_class,
urls_storage=self.urls_manager.storage_urls,
items_storage=put_items_api
)
#请求池
self.downloader_pool = DownloaderPool(
config=config,
get_url=self.urls_manager.get_url,
response_text_handle=self._response_text_handle
)
#响应计数
self.response_count = 0
#定时输出统计信息
self.info_timer = None
def start(self):
'''启动爬虫框架'''
self.logger.info('Crawler Start work...')
#启动定时日志输出
self._new_info_timer()
#启动下载池
self.downloader_pool.start()
def stop(self):
'''停止爬虫框架'''
if self.info_timer: self.info_timer.cancel()
self.logger.info('Crawler Stopping...')
#停止各组件
#先停止下载池保证不会再有新response
self.downloader_pool.stop()
self.logger.info('DownloaderPool stopped!')
self.spider_pool.stop()
self.logger.info('SpiderPool stopped!')
self.logger.info('Crawler Stopped!')
self.info()
def logging_init(self):
logging.config.dictConfig(logging_config)
def task_init(self, start_urls, isFlushDB=False):
'''insert start urls to db for crawler'''
if not type(start_urls) is list:
self.error(TypeError('task urls not list'))
sys.exit(0)
if isFlushDB:
self.urls_manager.flushdb()
self.urls_manager.storage_urls(start_urls)
def _new_info_timer(self):
'''定时输出日志'''
if self.info_timer:
self.info()
self.info_timer = Timer(5, self._new_info_timer)
self.info_timer.start()
def _response_text_handle(self, response_text):
'''put a response text from downloader'''
self.response_count += 1
self.spider_pool.add_task(response_text)
def info(self):
'''输出统计信息'''
self.logger.info('========Crawler information========')
self.logger.info('Seen urls: {0}, Task urls: {1}'
.format(self.urls_manager.seen_count,
self.urls_manager.task_count))
self.logger.info('Get responses: {0}'.format(self.response_count))
self.logger.info('Extract items: {0}'.format(self.spider_pool.items_count))
self.logger.info('-----------------------------------')
| [
"k@example.com"
] | k@example.com |
079925269fb376dac8db7c2529266552831a1d8b | ad29059c6f018c17ce7e0bc6b394df4e2d159c2e | /alembic/versions/726480f10a80_add_last_few_column_to_post_table.py | 6b4868f7089b8c5f86ed458fff7a308448896b42 | [] | no_license | dwebsites/fastapi-course | 51b3fb21f63288da518fa3ddbd9ebe41ff4e45bd | 235ef28756d59746b91134484d4ca61880979813 | refs/heads/main | 2023-09-03T04:21:28.020050 | 2021-11-20T20:18:14 | 2021-11-20T20:18:14 | 429,157,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | """Add last few column to post table
Revision ID: 726480f10a80
Revises: 241d3fac9298
Create Date: 2021-11-17 17:23:32.877269
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '726480f10a80'
down_revision = '241d3fac9298'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('posts', sa.Column('published', sa.Boolean(), nullable=False,
server_default='TRUE'),)
op.add_column('posts', sa.Column('created_at', sa.TIMESTAMP(timezone=True),
nullable=False, server_default=sa.text('NOW()')))
pass
def downgrade():
op.drop_column('posts', 'published')
op.drop_column('posts', 'created_at')
pass
| [
"dondim@protonmail.com"
] | dondim@protonmail.com |
c50802232a8fa7ed73ec2a75e68ab1398f7d5a29 | 10fd4dcfc7dc44db168bb1ae528aab4cfb248841 | /data/Cnae.py | 7d9258d2d9c6dcf69ad01786930be86d4bb8fdf3 | [] | no_license | PkuDavidGuan/ML-hw1 | cfcf298580e04f3df4ad7edbfbac6b6af37914b6 | b1ae9f76a2364108d9d4af1b0a4a31cb51840b2d | refs/heads/master | 2020-04-02T12:09:43.693739 | 2018-10-31T11:14:50 | 2018-10-31T11:14:50 | 154,421,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | from sklearn.model_selection import train_test_split
def parse(line):
features = [int(f) for f in line.split(',')]
return features[1:], features[0]
def Cnae(data_path, with_val=True):
x_array, y_array = [], []
with open(data_path, 'r') as infile:
while True:
line = infile.readline().strip()
if not line:
break
x, y = parse(line)
x_array.append(x)
y_array.append(y)
if not with_val:
return train_test_split(x_array, y_array, train_size=900)
else:
x_train, x_test, y_train, y_test = train_test_split(x_array, y_array, train_size=900)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, train_size=720)
return x_train, x_val, x_test, y_train, y_val, y_test
| [
"1400012923@pku.edu.cn"
] | 1400012923@pku.edu.cn |
959f164254e5b59155446a5f7f9c6b2a856dc192 | 6665bee3e0f97be0e3b48e8de021a8634704c61a | /src/flask_video_streaming/pi_camera.py | 6857ac45ce9a7ab167654ff83c68b6758de7bf08 | [
"MIT"
] | permissive | rdilare/ros_bot | a50fb9a991668db64e1bac9738f1a9711f043978 | 64f87eadf20dcaec66713329553cbe48bf512715 | refs/heads/master | 2020-09-17T03:42:02.995200 | 2019-12-15T12:40:57 | 2019-12-15T12:40:57 | 223,976,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py | import time
import io
import threading
import picamera
import cv2
import numpy as np
class Camera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
def initialize(self):
if Camera.thread is None:
# start background frame thread
Camera.thread = threading.Thread(target=self._thread)
Camera.thread.start()
# wait until frames start to be available
while self.frame is None:
time.sleep(0)
def get_frame(self):
Camera.last_access = time.time()
self.initialize()
np_arr = np.fromstring(self.frame, dtype=np.uint8)
img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
time.sleep(.05)
return self.frame,img
@classmethod
def _thread(cls):
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (320, 240)
camera.hflip = True
camera.vflip = True
# let camera warm up
camera.start_preview()
time.sleep(2)
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
# store frame
stream.seek(0)
cls.frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls.last_access > 10:
break
cls.thread = None
| [
"rdandroidunt@gmail.com"
] | rdandroidunt@gmail.com |
90b236f23a80b2ace3809fc2e84299c42dc16c09 | 6646934b5dfa081c486d9395009d6b8b0f731430 | /lab-04.py | fbe00abcf94be1915ee8671dc8cdb9ae3dd21c7b | [] | no_license | mqpasta/python-ds-labs | 0429a02062154858b32868e60e7dd4ea89bce986 | e05618047db4ebf71967a320785207a87170afc6 | refs/heads/master | 2022-03-25T17:37:55.394517 | 2019-12-30T18:28:30 | 2019-12-30T18:28:30 | 214,096,123 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,109 | py | import random
class Sorting:
data = []
def Print(self):
print(self.data)
def GenerateRandom(self,n):
for i in range(n):
self.data.append(round(random.random(),3))
def BubbleSort(self):
for i in range(len(self.data)-1):
for j in range(len(self.data)-1-1):
if self.data[j] > self.data[j+1]:
t = self.data[j]
self.data[j] = self.data[j+1]
self.data[j+1] = t
def InsertSort(self):
for i in range(1,len(self.data)):
key = self.data[i]
j = i-1
while j>=0 and self.data[j] > key:
self.data[j+1] = self.data[j]
j = j - 1
self.data[j+1] = key
def SelectionSort(self):
l = len(self.data)
for i in range(l):
m = i
for j in range(i+1,l):
if self.data[j] < self.data[m]:
m = j
if m != i:
t = self.data[m]
self.data[m] = self.data[i]
self.data[i] = t
def IsSorted(self):
for i in range(len(self.data)-1):
if self.data[i] > self.data[i+1]:
return False
return True
def BinarySearch(self,v):
l = 0
r = len(self.data) - 1
while l<=r:
m = (l+r)//2
if self.data[m] == v:
return m
if v < self.data[m]:
r = m -1
else:
l = m + 1
return -1
def Search(self, v):
if self.IsSorted()==False:
self.InsertionSort()
return self.BinarySearch(v)
def Test():
s = Sorting()
s.GenerateRandom(10)
print("before sorint")
s.Print()
s.BubbleSort()
print("After sorting")
s.Print()
print("regenerate")
s.GenerateRandom(12)
print("before sorint")
s.Print()
s.SelectionSort()
print("After sorting")
s.Print()
print(s.Search(s.data[0]))
| [
"mqpasta@gmail.com"
] | mqpasta@gmail.com |
77eae7de5545c636d596feec9e0fe110b7b5700a | bc441bb06b8948288f110af63feda4e798f30225 | /architecture_view_sdk/model/flowable_service/bpmn_end_event_pb2.py | 3c2b2fa877b300c5b615c5a2704c5007ff77e7ce | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,097 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: bpmn_end_event.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from architecture_view_sdk.model.flowable_service import bpmn_links_pb2 as architecture__view__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='bpmn_end_event.proto',
package='flowable_service',
syntax='proto3',
serialized_options=_b('ZJgo.easyops.local/contracts/protorepo-models/easyops/model/flowable_service'),
serialized_pb=_b('\n\x14\x62pmn_end_event.proto\x12\x10\x66lowable_service\x1a=architecture_view_sdk/model/flowable_service/bpmn_links.proto\"F\n\x0c\x42PMNEndEvent\x12\n\n\x02id\x18\x01 \x01(\t\x12*\n\x05links\x18\x02 \x01(\x0b\x32\x1b.flowable_service.BPMNLinksBLZJgo.easyops.local/contracts/protorepo-models/easyops/model/flowable_serviceb\x06proto3')
,
dependencies=[architecture__view__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2.DESCRIPTOR,])
_BPMNENDEVENT = _descriptor.Descriptor(
name='BPMNEndEvent',
full_name='flowable_service.BPMNEndEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flowable_service.BPMNEndEvent.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='links', full_name='flowable_service.BPMNEndEvent.links', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=105,
serialized_end=175,
)
_BPMNENDEVENT.fields_by_name['links'].message_type = architecture__view__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2._BPMNLINKS
DESCRIPTOR.message_types_by_name['BPMNEndEvent'] = _BPMNENDEVENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BPMNEndEvent = _reflection.GeneratedProtocolMessageType('BPMNEndEvent', (_message.Message,), {
'DESCRIPTOR' : _BPMNENDEVENT,
'__module__' : 'bpmn_end_event_pb2'
# @@protoc_insertion_point(class_scope:flowable_service.BPMNEndEvent)
})
_sym_db.RegisterMessage(BPMNEndEvent)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
ca4da45fcb1463b4a36fab4f758aa8c8014d2e62 | 26d84ee00ccda6815596e7b76609d4d25fe46c41 | /Lab5/knn.py | dbb5c9676ce1cd54b747725e6434426755254c38 | [] | no_license | venkateshtanniru/Data-Mining | 9a49ad91fcc3aebf37fc29a1cf8ffa3461d55180 | 566ced52daf1929b0ed05a6e668191dbc9b55c6a | refs/heads/master | 2023-04-08T12:39:48.229083 | 2019-04-14T06:50:49 | 2019-04-14T06:50:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,104 | py | from collections import Counter
from linear_algebra import distance
from statistics import mean
import math, random
import matplotlib.pyplot as plt
import data
def majority_vote(labels):
"""assumes that labels are ordered from nearest to farthest"""
vote_counts = Counter(labels)
winner, winner_count = vote_counts.most_common(1)[0]
num_winners = len([count
for count in vote_counts.values()
if count == winner_count])
if num_winners == 1:
return winner # unique winner, so return it
else:
return majority_vote(labels[:-1]) # try again without the farthest
def knn_classify(k, labeled_points, new_point):
"""each labeled point should be a pair (point, label)"""
# order the labeled points from nearest to farthest
by_distance = sorted(labeled_points,
key=lambda point_label: distance(point_label[0], new_point))
# find the labels for the k closest
k_nearest_labels = [label for _, label in by_distance[:k]]
# and let them vote
return majority_vote(k_nearest_labels)
def predict_preferred_language_by_city(k_values, cities):
"""
TODO
predicts a preferred programming language for each city using above knn_classify() and
counts if predicted language matches the actual language.
Finally, print number of correct for each k value using this:
print(k, "neighbor[s]:", num_correct, "correct out of", len(cities))
"""
for k in k_values:
num_correct=0
for city in cities:
UpdatedCities = cities.copy()
UpdatedCities.remove(city)
pred=knn_classify(k,UpdatedCities,city[0])
if(pred == city[1]):
num_correct = num_correct + 1
print(k, "neighbor[s]:", num_correct, "correct out of", len(cities))
if __name__ == "__main__":
k_values = [1,3, 5, 7]
# TODO
# Import cities from data.py and pass it into predict_preferred_language_by_city(x, y).
predict_preferred_language_by_city(k_values, data.cities) | [
"venkateshtanniru9@gmail.com"
] | venkateshtanniru9@gmail.com |
03bca080a7ade2f5c8e31f19c07701b55b95c6aa | 8f7c595f2b9d075a89417760b7fbf9abb1fecb72 | /tele_twitter.py | 8b5573134e70b914312a9c197b1313e688781062 | [
"MIT"
] | permissive | MainakMaitra/trading-utils | 555ed240a20b26d4876f1490fc8a2d9273231fc5 | 3e73091b4d3432e74c385a9677b7f7ca4192c67f | refs/heads/main | 2023-07-04T09:19:40.122188 | 2021-08-08T09:01:37 | 2021-08-08T09:01:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,825 | py | """
Twitter -> Telegram
"""
import logging
import os
import time
from argparse import ArgumentParser
from peewee import *
from common import uuid_gen
from common.environment import GROUP_CHAT_ID
from common.logger import init_logging
from common.tele_notifier import send_message_to_telegram
from common.twitter_api import get_twitter_home_timeline
home_dir = os.getenv("HOME")
db = SqliteDatabase(home_dir + "/tele_twitter.db")
class TweetData(Model):
id = UUIDField(primary_key=True)
twitter_handle = CharField()
timestamp = BigIntegerField()
tweet_id = CharField()
tweet = CharField()
posted_at = DateTimeField(null=True)
class Meta:
database = db
@staticmethod
def save_from(twitter_handle, tweet, tweet_id, posted_at):
entity = dict(
id=uuid_gen(),
timestamp=time.time(),
twitter_handle=twitter_handle,
tweet_id=tweet_id,
tweet=tweet,
posted_at=posted_at,
)
TweetData.insert(entity).execute()
TweetData.create_table()
def save_data(tweet_data):
TweetData.save_from(**tweet_data)
def tweet_already_processed(current_tweet_id):
selected_tweet = TweetData.get_or_none(TweetData.tweet_id == current_tweet_id)
return selected_tweet is not None
def extract_tweet_id(new_tweet):
return new_tweet.id
def extract_tweet_time(recent_tweet):
return recent_tweet.created_at
def main(poll_freq_in_secs):
home_timeline = get_twitter_home_timeline()
logging.info("==> Found tweets {}".format(len(home_timeline)))
for tweet in home_timeline:
tweet_author_name = tweet.author.name
tweet_author_screen_name = tweet.author.screen_name
tweet_id = tweet.id
tweet_posted_date = tweet.created_at
formatted_posted_dt = tweet_posted_date.strftime("%H:%M(%d %B)")
tweet_text = tweet.text
if tweet_already_processed(tweet_id):
logging.warning(
"Old Tweet from {} at {} -> {} - already processed".format(
tweet_author_screen_name, tweet_posted_date, tweet_id
)
)
continue
else:
entity = dict(
twitter_handle=tweet_author_screen_name,
tweet=tweet_text,
tweet_id=tweet_id,
posted_at=tweet_posted_date,
)
save_data(entity)
if tweet_text.startswith("RT"):
continue
try:
header = f"""👀 {tweet_author_name} at [{formatted_posted_dt}](https://twitter.com/{tweet_author_screen_name}/status/{tweet_id})"""
send_message_to_telegram(
header, disable_web_preview=False, override_chat_id=GROUP_CHAT_ID
)
except:
send_message_to_telegram(
"🚨 Something went wrong trying to process {}".format(tweet)
)
logging.info(f"⏱ Sleeping for {poll_freq_in_secs}")
time.sleep(poll_freq_in_secs)
def parse_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument(
"-w",
"--wait-in-seconds",
type=int,
help="Wait between sending tweets in seconds",
default=30,
)
parser.add_argument(
"-r", "--run-once", action="store_true", default=False, help="Run once"
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
init_logging()
poll_freq_in_secs = args.wait_in_seconds
run_once = args.run_once
while True:
try:
main(poll_freq_in_secs)
if run_once:
logging.info("Running once => Exit")
break
except Exception:
logging.exception("🚨🚨🚨 Something is wrong")
| [
"575441+namuan@users.noreply.github.com"
] | 575441+namuan@users.noreply.github.com |
b132b7781065c9f6cbf69983c14d98d1baf4aea0 | ed8ebd0f2bb2d0a698b999f8bfc6d33a42146a51 | /traj_opt/script/peg_trajectory_optimize_.py | 90333511546e4ce754d92d417f9a076492f5921f | [] | no_license | karpalexander1997org1/FLSpegtransferHO | 5f99eb6136d80d7c5cf40f9a439071f2688483b9 | f2d54afbfd48b4a6728dafabddbcfa7c42bc94d1 | refs/heads/main | 2023-05-25T18:22:38.453382 | 2021-06-10T13:08:30 | 2021-06-10T13:08:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,991 | py | from FLSpegtransfer.traj_opt.PegMotionOptimizer import PegMotionOptimizer
# from FLSpegtransfer.motion.dvrkKinematics import dvrkKinematics
from FLSpegtransfer.utils import CmnUtil as U
import numpy as np
import matplotlib.pyplot as plt
def cubic(q0, qf, v_max, a_max, t_step):
num_axis = len(q0)
q0 = np.array(q0)
qf = np.array(qf)
v_max = np.array(v_max)
a_max = np.array(a_max)
# v_max = 1.5*(qf-q0)/tf
tf_vel = 1.5*(qf-q0) / v_max
# a_max = 6*(qf-q0)/(tf**2)
tf_acc = np.sqrt( abs(6*(qf-q0) / a_max))
tf_Rn = np.maximum(tf_vel, tf_acc) # tf for each axis (nx1 array)
tf = max(tf_Rn) # maximum scalar value among axes
# Define coefficients
a = -2 * (qf - q0) / (tf ** 3)
b = 3 * (qf - q0) / (tf ** 2)
c = np.zeros_like(a)
d = q0
# Calculate trajectories
t = np.arange(start=0.0, stop=tf, step=t_step)
joint = []
for i in range(num_axis):
# joint traj.
q = a[i]*t**3 + b[i]*t**2 + c[i]*t + d[i]
joint.append(q)
joint = np.array(joint).T
assert ~np.isnan(t).any()
assert ~np.isnan(joint).any()
return t, joint
# q0, qf could be cartesian coordinates or joint configurations
def LSPB(q0, qf, v_max, a_max, t_step):
q0 = np.array(q0)
qf = np.array(qf)
v_max = np.array(v_max)
a_max = np.array(a_max)
num_axis = len(q0)
# Design variables
A = max(abs((qf-q0)/a_max))
B = max(abs((qf-q0)/v_max))
tb = A/B
tf = B + tb
if tf < 2*tb:
tb = np.sqrt(A)
tf = 2*tb
# Define coefficients
A = np.array([[tb**2, -tb, -1, 0.0, 0.0, 0.0],
[2*tb, -1, 0.0, 0.0, 0.0, 0.0],
[0.0, tf-tb, 1, -(tf-tb)**2, -(tf-tb), -1],
[0.0, 1.0, 0.0, -2*(tf-tb), -1, 0.0],
[0.0, 0.0, 0.0, 2*tf, 1.0, 0.0],
[0.0, 0.0, 0.0, tf**2, tf, 1.0]])
b = np.block([[-q0], [np.zeros_like(q0)], [np.zeros_like(q0)], [np.zeros_like(q0)], [np.zeros_like(q0)], [qf]])
coeff = np.linalg.inv(A).dot(b)
a1 = coeff[0]
a2 = coeff[1]
b2 = coeff[2]
a3 = coeff[3]
b3 = coeff[4]
c3 = coeff[5]
# Calculate trajectories
t = np.arange(start=0.0, stop=tf, step=t_step)
t1 = t[t<tb]
t2 = t[(tb<=t)&(t<tf-tb)]
t3 = t[tf-tb<=t]
joint = []
for i in range(num_axis):
# joint traj.
traj1 = a1[i]*t1**2 + q0[i]
traj2 = a2[i]*t2 + b2[i]
traj3 = a3[i]*t3**2 + b3[i]*t3 + c3[i]
q = np.concatenate((traj1, traj2, traj3))
joint.append(q)
joint = np.array(joint).T
assert ~np.isnan(t).any()
assert ~np.isnan(joint).any()
return t, joint
def plot_trajectories(t, joint, t_step):
vel = []
acc = []
joint = np.array(joint).T
for q in joint:
# velocity traj.
q_prev = np.insert(q, 0, 0, axis=0)
q_prev = np.delete(q_prev, -1, axis=0)
qv = (q - q_prev)/t_step
qv[0] = 0.0
vel.append(qv)
# acceleration traj.
qv_prev = np.insert(qv, 0, 0, axis=0)
qv_prev = np.delete(qv_prev, -1, axis=0)
qa = (qv - qv_prev)/t_step
qa[0] = 0.0
acc.append(qa)
joint = np.array(joint).T
vel = np.array(vel).T
acc = np.array(acc).T
print ("maximum vel:", np.max(abs(vel), axis=0))
print ("maximum acc:", np.max(abs(acc), axis=0))
# Create plot
# plt.title('joint angle')
ax = plt.subplot(911)
plt.plot(t, joint[:, 0], 'r.-')
plt.ylabel('x (mm)')
ax = plt.subplot(912)
plt.plot(t, joint[:, 1], 'r.-')
plt.ylabel('y (mm)')
ax = plt.subplot(913)
plt.plot(t, joint[:, 2], 'r.-')
plt.ylabel('z (mm)')
ax = plt.subplot(914)
plt.plot(t, vel[:, 0], 'b.-')
plt.ylabel('vx (mm)')
ax = plt.subplot(915)
plt.plot(t, vel[:, 1], 'b.-')
plt.ylabel('vy (mm)')
ax = plt.subplot(916)
plt.plot(t, vel[:, 2], 'b.-')
plt.ylabel('vz (mm)')
ax = plt.subplot(917)
plt.plot(t, acc[:, 0], 'g.-')
plt.ylabel('ax (mm)')
ax = plt.subplot(918)
plt.plot(t, acc[:, 1], 'g.-')
plt.ylabel('ay (mm)')
ax = plt.subplot(919)
plt.plot(t, acc[:, 2], 'g.-')
plt.ylabel('az (mm)')
plt.xlabel('t (sec)')
plt.show()
from FLSpegtransfer.motion.dvrkDualArm import dvrkDualArm
from FLSpegtransfer.motion.dvrkKinematics import dvrkKinematics
import FLSpegtransfer.motion.dvrkVariables as dvrkVar
import time
dvrk_model = dvrkKinematics()
# pose information of pick & place
pose1 = np.array([0.14635528297124054, -0.02539699498070678, -0.15560356171404516, 0.6])
pose2 = np.array([0.14635528297124054, -0.02539699498070678, -0.12, 0.6])
pose3 = np.array([0.03511456574520817, -0.03390017669363312, -0.12, 0.0])
q0 = dvrk_model.pose_to_joint(pos=pose1[:3], rot=U.euler_to_quaternion([pose1[3], 0.0, 0.0]))
qw = dvrk_model.pose_to_joint(pos=pose2[:3], rot=U.euler_to_quaternion([pose2[3], 0.0, 0.0]))
qf = dvrk_model.pose_to_joint(pos=pose3[:3], rot=U.euler_to_quaternion([pose3[3], 0.0, 0.0]))
print (q0)
print (qw)
print (qf)
v_max = np.array([2.0, 2.0, 0.2, 10.0, 10.0, 10.0]) # max velocity (rad/s) or (m/s)
a_max = np.array([20.0, 20.0, 1.6, 20.0, 20.0, 20.0]) # max acceleration (rad/s^2) or (m/s^2)
opt = PegMotionOptimizer()
pos, vel, acc, t = opt.optimize_motion(q0, qw, qf, max_vel=dvrkVar.v_max, max_acc=dvrkVar.a_max, t_step=0.01, horizon=50, print_out=True, visualize=False)
dvrk = dvrkDualArm()
while True:
q = U.euler_to_quaternion([pose1[3], 0.0, 0.0])
dvrk.set_pose(pos1=pose1[:3], rot1=q)
dvrk.set_jaw(jaw1=[0.5])
st = time.time()
for joint in pos:
# q = U.euler_to_quaternion([pose[3], 0.0, 0.0])
# dvrk.set_pose(pos1=pose[:3], rot1=q, wait_callback=False)
dvrk.set_joint(joint1=joint, wait_callback=False)
time.sleep(0.01)
print(time.time() - st)
q = U.euler_to_quaternion([pose3[3], 0.0, 0.0])
dvrk.set_pose(pos1=pose3[:3], rot1=q)
dvrk.set_jaw(jaw1=[0.5])
| [
"gkgk1215@gmail.com"
] | gkgk1215@gmail.com |
e8005bb135eeec146e826549a899f9a1a9662d23 | 2a3ec828ae10d7290f7585d3b1e01d06ec8fc2cf | /cuerpos.py | ffc23e81c2cd927ec4059fe7dc8f78ad0998c46f | [] | no_license | deivyforero/repaso_obetos_decorator | 200c1a71f2e110d2bb52a872587553314cf70e62 | 2a41cdaac1a3e73a64864c3f860e42ae268b8598 | refs/heads/master | 2023-04-09T17:09:21.554087 | 2021-04-18T23:50:44 | 2021-04-18T23:50:44 | 359,279,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py |
class Cuerpo:
def __init__(self):
self.brazos = ""
self.piernas = ""
self.cabeza = ""
self.torso = ""
self.imgcuerpo = ""
class DecoradorCuerpo (Cuerpo):
def __init__(self):
self.brazos = ""
self.piernas = ""
self.cabeza = ""
self.torso = ""
self.imgcuerpo = ""
class CuerpoHumano(Cuerpo):
def __init__(self):
self.brazos = "Fuertes"
self.piernas = "Largas"
self.cabeza = "Pequeña"
self.torso = "Grande"
self.descripcion = "Cuerpo Humano"
self.imgcuerpo = "static/imagenes/humanos/Cuerpo.jpg"
class CuerpoOrco(Cuerpo):
def __init__(self):
self.brazos = "feos"
self.piernas = "Feos"
self.cabeza = "Feos"
self.torso = "Feos"
self.descripcion = "Cuerpo Orco"
self.imgcuerpo = "static/imagenes/orcos/Cuerpo.jpg"
| [
"deivyforero@gmail.com"
] | deivyforero@gmail.com |
65e85cefa6c7e05cc303e21845feb2e62721ca6c | bb95b988ae79cc5b705b71bbaeff23a09e0de868 | /Internship Materials/mysite/polls/admin.py | e64a065750f1c338e21b07ecfd3dd058c21302e5 | [] | no_license | rodrigjs/Projects | cbf6f92ef4dacfa2858970be3618b6aaa0216f07 | 285df17085d6f06cbe68b0e3596111440972e7c8 | refs/heads/master | 2023-04-15T22:31:11.672830 | 2021-04-26T00:46:22 | 2021-04-26T00:46:22 | 289,349,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Choice, Question
from .models import Question
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_display = ('question_text', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question_text']
admin.site.register(Choice)
admin.site.register(Question, QuestionAdmin)
| [
"rodrigjs@mail.gvsu.edu"
] | rodrigjs@mail.gvsu.edu |
2bba632443e99dc90c02429a7ee2ddf4fb4263c6 | 73e1fcc623327d54d593a6d718cd068e9cc1c37f | /train_model.py | e43de9b9a1a2571be97fadedfcda3e670ac44a02 | [] | no_license | ZhangQi1996/aq_predict | 9182a7497bfbfa4d9162b2f38e5e216114be8bc8 | a54aa68f42fb29222cc1907ed6195ed79845622f | refs/heads/master | 2020-06-16T06:41:06.400824 | 2019-05-27T09:39:32 | 2019-05-27T09:39:32 | 195,504,577 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | from utils import data_loader, draw_train_loss_curve, standardize, save_std_params_list_or_params, \
_standardize, get_adjusted_r2_score, BATCH_SIZE
from model import get_model
if __name__ == '__main__':
train_x, train_y, test_x, test_y = data_loader()
# 对train_x归一化
train_x, params_list = standardize(train_x)
test_x = standardize(test_x, params_list=params_list, return_params_list=False)
train_y, params = _standardize(train_y)
test_y = _standardize(test_y, params=params, return_params=False)
model = get_model(is_save_model_struct_img=True)
history = model.fit(x=train_x, y=train_y, batch_size=BATCH_SIZE, verbose=2, epochs=5000, validation_data=(test_x, test_y))
model.save_weights(filepath='model_weights.h5')
save_std_params_list_or_params(params_list_or_params=params_list, file_name='x_params_list.txt', is_params_list=True)
save_std_params_list_or_params(params_list_or_params=params, file_name='y_params.txt', is_params_list=False)
print(get_adjusted_r2_score(test_y, model.predict(test_x, BATCH_SIZE), return_str=True))
draw_train_loss_curve(history=history)
| [
"358929931@qq.com"
] | 358929931@qq.com |
c6ae89454ce95016dfc308d55eaa56954a579389 | 516916ee30e9d129a5e776151298745cab921c55 | /prototype/save_stuff.py | e7be07de36895f3f54c90e19070e805d5bf87788 | [] | no_license | Jelle-Bijlsma/UTMR | 6a16f5dcfd024e4ee585d0ba69f797bc514487b4 | 98508e3457b0585ed4cff4f15f5241895f794f07 | refs/heads/master | 2023-08-28T21:35:25.453329 | 2021-09-05T20:09:42 | 2021-09-05T20:09:42 | 382,267,947 | 0 | 0 | null | 2021-07-19T23:47:28 | 2021-07-02T07:34:36 | Python | UTF-8 | Python | false | false | 691 | py | import pickle
option = "load"
# option = "write"
path = "../data/parameters/para1.txt"
mylist = ["hi", 12, {"key": 420}, ['a','b']]
if option == "write":
file = open(path,'wb')
pickle.dump(mylist,file)
file.close()
if option == "load":
file = open(path,'rb')
mylist2 = pickle.load(file)
print(mylist==mylist2)
# print(self.CurMov.parameters)
coolparams = [[10, 10, 10, 10], [True, 10, 1], [True, 50, 25, 25], [False, 1, 10, 0], [True, 21, 89],
[False, 10, 10, 10, 10, 4, 10], [False, 50]]
for clazz, newparams in zip(SliderClass.all_sliders, coolparams):
print(clazz._params_image)
clazz.settr(newparams)
print(clazz._params_image)
| [
"jellebijlsma@protonmail.com"
] | jellebijlsma@protonmail.com |
5f4ec5f77fa382b6b645e3eb48d4da0402d7706a | ee79d49f8eda69d9dd55bcaf218532af4230b155 | /metrics.py | 49b8392b02a209ad4477d769da3cec780a8818c4 | [] | no_license | dishant199/CVSS-calculator | 39f2fc78ac21f0af9903c91c9c298ee3133b1119 | c15b3c6d0814f2652d6ede2a0d8ab89f4b5b3c06 | refs/heads/main | 2023-07-30T21:51:42.248358 | 2021-09-20T17:30:12 | 2021-09-20T17:30:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,534 | py | "this file difnes all the basic metrics for base, temoral and environmental"
class Value:
def __init__(self, **entries):
self.__dict__.update(entries)
class Metrics:
# Base Metrics
attack_vector = Value(not_defined=float(1.0), network=float(0.85), adjacent_network=float(0.62), local=float(0.55),
physical=float(0.20))
attack_complexity = Value(not_defined=float(1.0), low=float(0.77), high=float(0.44))
privileges_required = Value(not_defined=float(1.0), none=float(0.85), low=float(0.62), high=float(0.27))
privileges_required_changed = Value(not_defined=float(1.0), none=float(0.85), low=float(0.68), high=float(0.50))
user_interaction = Value(not_defined=float(1.0), none=float(0.85), required=float(0.62))
cia_impact = Value(not_defined=float(1.0), high=float(0.56), low=float(0.22), none=float(0))
# Temporal Metrics
exploit_code_maturity = Value(not_defined=float(1.0), high=float(1), functional=float(0.97),
proof_of_concept=float(0.94), unproven=float(0.91))
remediation_level = Value(not_defined=float(1.0), unavailable=float(1), workaround=float(0.97),
temporary_fix=float(0.96), official_fix=float(0.95))
report_confidence = Value(not_defined=float(1.0), confirmed=float(1), reasonable=float(0.96), unknown=float(0.92))
# Environmental Metrics
cia_requirement = Value(X=float(1.0), high=float(1.5), medium=float(1.0), low=float(0.50))
| [
"noreply@github.com"
] | dishant199.noreply@github.com |
a5f60206ad781d701721581c26832f74c129a29b | 706199290988c6e760c232683ed35a1c5a29f69a | /practice/Seok_Yong/DataCleaning/datacleaning.py | f9c763711c3422275489ac89b67c996b64c8df61 | [] | no_license | discrea/Sign_language_with_sentiment_analysis_VTS | 36ab3528126ea28cb926cc557a98f557a30d7244 | d1149cda45dbac5162c65d0e6a9c0893953e116a | refs/heads/master | 2023-06-25T18:50:52.932514 | 2021-07-27T05:03:09 | 2021-07-27T05:03:09 | 386,475,787 | 0 | 2 | null | 2021-07-22T05:55:01 | 2021-07-16T01:48:27 | Jupyter Notebook | UTF-8 | Python | false | false | 886 | py | import re
import os
import shutil
from glob import glob
import pandas as pd
words = pd.read_csv("./words.csv")
words = set(words["word"].values)
annotation = pd.read_csv("./KETI-Annotation.csv")
raw_dir = "./8381~9000(영상)/"
data_dir = "./data/"
if not os.path.exists(data_dir):
os.makedirs(data_dir)
filenames = glob(raw_dir + "*.*")
filename_re = re.compile("KETI_[A-Z]+_[0-9]+")
filetype_re = re.compile("[.]+[a-zA-Z0-9]{3}")
count = 0
for filename in filenames:
filecode = filename_re.search(filename).group(0)
filetype = filetype_re.search(filename).group(0)
target_label = annotation.loc[annotation["파일명"] == filecode, "한국어"].values[0]
if target_label in words:
shutil.move(filename, data_dir + filecode + filetype)
count += 1
shutil.rmtree(raw_dir)
print(f"total files: {len(filenames)}")
print(f"target files: {count}")
| [
"maker7788@gmail.com"
] | maker7788@gmail.com |
3bcd4da0f4a0652b9ceae41db83ea03b41ab9201 | 99bfa15723593ea351191d82fac80e36ab25aab1 | /LeetCode/merge_interval.py | 602b47cb4dc68d07416d23bb801d695654ec3578 | [] | no_license | Kartavya-verma/Python-Projects | f23739ef29eab67a8e25569e3f7bf110e42576cb | 02ffe926a7ed82bc783e4c4034a2fa53d4d1a870 | refs/heads/master | 2023-06-22T07:59:39.595084 | 2021-07-18T15:51:55 | 2021-07-18T15:51:55 | 387,139,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # interval = [[1,3],[2,6],[8,10],[15,18]]
interval = [[1,4],[4,5]]
n = interval.copy()
print(n)
res = []
v = []
for i in range(len(n)-1):
for j in range(len(n)):
print(n[i][1], n[i+1][0])
if n[i][1] > n[i+1][0]:
# print(n[i][1], n[i+1][0])
n[i].pop()
n[i].append(n[i+1][1])
v = n[i+1]
print(n[i],v)
n.remove(v)
print(n)
# l = list()
# for i in interval:
# for j in range(i[0], i[1]+1):
# l.append(j)
# print(l) | [
"vermakartavya2000@gmail.com"
] | vermakartavya2000@gmail.com |
07c31439ee6b8b59944e5a80ee15c66289f70235 | 24f27283fb96d0ad25e769f2ff64b30bde9f47ec | /leetcode/problems/36_valid_sudoku.py | df852a0df81a37f254cf09038852e1919fc692ae | [] | no_license | balloonio/algorithms | ee76f9be583136396e886512125685ada174c09f | 582ab1d7bde6431ba2ed3621ade5d170be3fc050 | refs/heads/master | 2022-07-31T07:30:57.988521 | 2020-05-17T06:04:21 | 2020-05-17T06:04:21 | 68,259,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,418 | py | class Solution:
def isValidSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: bool
"""
if not board or not board[0]:
return False
return self.row_valid(board) and self.col_valid(board) and self.box_valid(board)
def row_valid(self, board):
visited = set()
for row in board:
visited.clear()
for num in row:
if num in visited and num != ".":
return False
visited.add(num)
return True
def col_valid(self, board):
h, w = len(board), len(board[0])
visited = set()
for j in range(w):
visited.clear()
for i in range(h):
if board[i][j] in visited and board[i][j] != ".":
return False
visited.add(board[i][j])
return True
def box_valid(self, board):
visited = set()
for x in range(0, 7, 3):
for y in range(0, 7, 3):
visited.clear()
for i in range(9):
dx, dy = i // 3, i % 3
if (
board[x + dx][y + dy] in visited
and board[x + dx][y + dy] != "."
):
return False
visited.add(board[dx + x][dy + y])
return True
| [
"rzhangbolun@gmail.com"
] | rzhangbolun@gmail.com |
97a21d6460964e6c6b05ac2fe8e09d813cc3dfe8 | d15579b732aa09f516fac4917709de00e993d832 | /homepage/migrations/0003_auto_20210403_1955.py | 4b0e4399dee1f3fa46f9c81f587bbaae78c6d8b7 | [] | no_license | babynimfa/django-3-personal-website | bf292e14f710abec8a0658c148095e5059223570 | 3ce8cd29349a7c6924fb48bf85c7b8d713c1dbc7 | refs/heads/main | 2023-04-03T02:38:24.054448 | 2021-04-07T13:27:20 | 2021-04-07T13:27:20 | 346,444,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # Generated by Django 3.1.7 on 2021-04-03 11:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('homepage', '0002_auto_20210331_2058'),
]
operations = [
migrations.AlterField(
model_name='project',
name='image',
field=models.ImageField(upload_to='images/homepage/'),
),
]
| [
"redleziiter24@gmail.com"
] | redleziiter24@gmail.com |
b650eb1ec7918aef34bf31e14b27aa3ffd6dd1fb | 79062519cc7cf763377c1e37285edcc2ccbdad0f | /third_model/cut_picture.py | 96a9b78ca11057b6a31bbc5ae56b46b239a11b6e | [] | no_license | CcIsHandsome/Verification-code-identification-system | 03b99a7c1f0e0d73ce74b0d763d3d0fae04e5832 | bb3ae0df939781679e8e8055c32e2660ab90fc79 | refs/heads/master | 2021-03-28T12:10:50.372336 | 2020-03-17T02:56:51 | 2020-03-17T02:56:51 | 247,862,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | # -*-coding:utf-8 -*-
import cv2
import os
def cut_image(image, num, img_name):
# image = cv2.imread('./img/8.jpg')
im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# im_cut_real = im[8:47, 28:128]
im_cut_1 = im[0:80, 5:55]
im_cut_2 = im[0:80, 50:100]
im_cut_3 = im[0:80, 100:150]
im_cut_4 = im[0:80, 150:200]
index = 0
im_cut = [im_cut_1, im_cut_2, im_cut_3, im_cut_4]
for i in range(4):
im_temp = im_cut[i]
cv2.imwrite('./progress-train3-picture2/' + img_name[i] + '_' + str(num) + '_' + str(index) + '.jpg', im_temp)
index = index + 1
if __name__ == '__main__':
img_dir = './progress-train3-picture/'
label_dir = './mappings3.txt'
img_name = os.listdir(img_dir) # 列出文件夹下所有的目录与文件
label_txt = []
f = open(label_dir)
for i in f:
label_txt.append(i.split(",")[1].split("=")[0].strip())
print(label_txt)
f.close()
for i in range(len(img_name)):
path = os.path.join(img_dir, img_name[i])
image = cv2.imread(path)
print(image.shape)
name_list = list(label_txt[i])[:5]
cut_image(image, i, name_list)
print("step:", i)
print(u'*****图片分割预处理完成!*****')
| [
"noreply@github.com"
] | CcIsHandsome.noreply@github.com |
c762ee632243311933dfaca2cb35c205acdd08cc | cee47ea9f8c965f4e63e7f13bbc817a50ab161bc | /patroni/dcs/etcd3.py | e5e069c557ff12f46fe271396c212a3cfa9327e1 | [
"MIT"
] | permissive | CyberDem0n/patroni | bb7c012cda2f724ad7e393fee4d5cfd2d26494bc | 3734ecc8510ba6a1f4903cdcd95f3855df0b6d7d | refs/heads/master | 2023-08-16T18:20:42.511468 | 2023-08-09T15:46:53 | 2023-08-09T15:46:53 | 223,714,494 | 2 | 2 | MIT | 2023-06-16T07:24:26 | 2019-11-24T08:33:24 | Python | UTF-8 | Python | false | false | 39,414 | py | from __future__ import absolute_import
import base64
import etcd
import json
import logging
import os
import socket
import sys
import time
import urllib3
from collections import defaultdict
from enum import IntEnum
from urllib3.exceptions import ReadTimeoutError, ProtocolError
from threading import Condition, Lock, Thread
from typing import Any, Callable, Collection, Dict, Iterator, List, Optional, Tuple, Type, TYPE_CHECKING, Union
from . import ClusterConfig, Cluster, Failover, Leader, Member, SyncState, \
TimelineHistory, catch_return_false_exception, citus_group_re
from .etcd import AbstractEtcdClientWithFailover, AbstractEtcd, catch_etcd_errors, DnsCachingResolver, Retry
from ..exceptions import DCSError, PatroniException
from ..utils import deep_compare, enable_keepalive, iter_response_objects, RetryFailedError, USER_AGENT
logger = logging.getLogger(__name__)
class Etcd3Error(DCSError):
pass
class UnsupportedEtcdVersion(PatroniException):
pass
# google.golang.org/grpc/codes
class GRPCCode(IntEnum):
OK = 0
Canceled = 1
Unknown = 2
InvalidArgument = 3
DeadlineExceeded = 4
NotFound = 5
AlreadyExists = 6
PermissionDenied = 7
ResourceExhausted = 8
FailedPrecondition = 9
Aborted = 10
OutOfRange = 11
Unimplemented = 12
Internal = 13
Unavailable = 14
DataLoss = 15
Unauthenticated = 16
GRPCcodeToText: Dict[int, str] = {v: k for k, v in GRPCCode.__dict__['_member_map_'].items()}
class Etcd3Exception(etcd.EtcdException):
pass
class Etcd3ClientError(Etcd3Exception):
def __init__(self, code: Optional[int] = None, error: Optional[str] = None, status: Optional[int] = None) -> None:
if not hasattr(self, 'error'):
self.error = error and error.strip()
self.codeText = GRPCcodeToText.get(code) if code is not None else None
self.status = status
def __repr__(self) -> str:
return "<{0} error: '{1}', code: {2}>"\
.format(self.__class__.__name__, getattr(self, 'error', None), getattr(self, 'code', None))
__str__ = __repr__
def as_dict(self) -> Dict[str, Any]:
return {'error': getattr(self, 'error', None), 'code': getattr(self, 'code', None),
'codeText': self.codeText, 'status': self.status}
@classmethod
def get_subclasses(cls) -> Iterator[Type['Etcd3ClientError']]:
for subclass in cls.__subclasses__():
for subsubclass in subclass.get_subclasses():
yield subsubclass
yield subclass
class Unknown(Etcd3ClientError):
code = GRPCCode.Unknown
class InvalidArgument(Etcd3ClientError):
code = GRPCCode.InvalidArgument
class DeadlineExceeded(Etcd3ClientError):
code = GRPCCode.DeadlineExceeded
error = "context deadline exceeded"
class NotFound(Etcd3ClientError):
code = GRPCCode.NotFound
class FailedPrecondition(Etcd3ClientError):
code = GRPCCode.FailedPrecondition
class Unavailable(Etcd3ClientError):
code = GRPCCode.Unavailable
# https://github.com/etcd-io/etcd/commits/main/api/v3rpc/rpctypes/error.go
class LeaseNotFound(NotFound):
error = "etcdserver: requested lease not found"
class UserEmpty(InvalidArgument):
error = "etcdserver: user name is empty"
class AuthFailed(InvalidArgument):
error = "etcdserver: authentication failed, invalid user ID or password"
class PermissionDenied(Etcd3ClientError):
code = GRPCCode.PermissionDenied
error = "etcdserver: permission denied"
class AuthNotEnabled(FailedPrecondition):
error = "etcdserver: authentication is not enabled"
class InvalidAuthToken(Etcd3ClientError):
code = GRPCCode.Unauthenticated
error = "etcdserver: invalid auth token"
errStringToClientError = {getattr(s, 'error'): s for s in Etcd3ClientError.get_subclasses() if hasattr(s, 'error')}
errCodeToClientError = {getattr(s, 'code'): s for s in Etcd3ClientError.__subclasses__()}
def _raise_for_data(data: Union[bytes, str, Dict[str, Union[Any, Dict[str, Any]]]],
status_code: Optional[int] = None) -> Etcd3ClientError:
try:
if TYPE_CHECKING: # pragma: no cover
assert isinstance(data, dict)
data_error: Optional[Dict[str, Any]] = data.get('error') or data.get('Error')
if isinstance(data_error, dict): # streaming response
status_code = data_error.get('http_code')
code: Optional[int] = data_error['grpc_code']
error: str = data_error['message']
else:
data_code = data.get('code') or data.get('Code')
if TYPE_CHECKING: # pragma: no cover
assert not isinstance(data_code, dict)
code = data_code
error = str(data_error)
except Exception:
error = str(data)
code = GRPCCode.Unknown
err = errStringToClientError.get(error) or errCodeToClientError.get(code) or Unknown
return err(code, error, status_code)
def to_bytes(v: Union[str, bytes]) -> bytes:
return v if isinstance(v, bytes) else v.encode('utf-8')
def prefix_range_end(v: str) -> bytes:
ret = bytearray(to_bytes(v))
for i in range(len(ret) - 1, -1, -1):
if ret[i] < 0xff:
ret[i] += 1
break
return bytes(ret)
def base64_encode(v: Union[str, bytes]) -> str:
return base64.b64encode(to_bytes(v)).decode('utf-8')
def base64_decode(v: str) -> str:
return base64.b64decode(v).decode('utf-8')
def build_range_request(key: str, range_end: Union[bytes, str, None] = None) -> Dict[str, Any]:
fields = {'key': base64_encode(key)}
if range_end:
fields['range_end'] = base64_encode(range_end)
return fields
def _handle_auth_errors(func: Callable[..., Any]) -> Any:
def wrapper(self: 'Etcd3Client', *args: Any, **kwargs: Any) -> Any:
return self.handle_auth_errors(func, *args, **kwargs)
return wrapper
class Etcd3Client(AbstractEtcdClientWithFailover):
ERROR_CLS = Etcd3Error
def __init__(self, config: Dict[str, Any], dns_resolver: DnsCachingResolver, cache_ttl: int = 300) -> None:
self._token = None
self._cluster_version: Tuple[int] = tuple()
super(Etcd3Client, self).__init__({**config, 'version_prefix': '/v3beta'}, dns_resolver, cache_ttl)
try:
self.authenticate()
except AuthFailed as e:
logger.fatal('Etcd3 authentication failed: %r', e)
sys.exit(1)
def _get_headers(self) -> Dict[str, str]:
headers = urllib3.make_headers(user_agent=USER_AGENT)
if self._token and self._cluster_version >= (3, 3, 0):
headers['authorization'] = self._token
return headers
def _prepare_request(self, kwargs: Dict[str, Any], params: Optional[Dict[str, Any]] = None,
method: Optional[str] = None) -> Callable[..., urllib3.response.HTTPResponse]:
if params is not None:
kwargs['body'] = json.dumps(params)
kwargs['headers']['Content-Type'] = 'application/json'
return self.http.urlopen
def _handle_server_response(self, response: urllib3.response.HTTPResponse) -> Dict[str, Any]:
data = response.data
try:
data = data.decode('utf-8')
ret: Dict[str, Any] = json.loads(data)
if response.status < 400:
return ret
except (TypeError, ValueError, UnicodeError) as e:
if response.status < 400:
raise etcd.EtcdException('Server response was not valid JSON: %r' % e)
ret = {}
raise _raise_for_data(ret or data, response.status)
def _ensure_version_prefix(self, base_uri: str, **kwargs: Any) -> None:
if self.version_prefix != '/v3':
response = self.http.urlopen(self._MGET, base_uri + '/version', **kwargs)
response = self._handle_server_response(response)
server_version_str = response['etcdserver']
server_version = tuple(int(x) for x in server_version_str.split('.'))
cluster_version_str = response['etcdcluster']
self._cluster_version = tuple(int(x) for x in cluster_version_str.split('.'))
if self._cluster_version < (3, 0) or server_version < (3, 0, 4):
raise UnsupportedEtcdVersion('Detected Etcd version {0} is lower than 3.0.4'.format(server_version_str))
if self._cluster_version < (3, 3):
if self.version_prefix != '/v3alpha':
if self._cluster_version < (3, 1):
logger.warning('Detected Etcd version %s is lower than 3.1.0, watches are not supported',
cluster_version_str)
if self.username and self.password:
logger.warning('Detected Etcd version %s is lower than 3.3.0, authentication is not supported',
cluster_version_str)
self.version_prefix = '/v3alpha'
elif self._cluster_version < (3, 4):
self.version_prefix = '/v3beta'
else:
self.version_prefix = '/v3'
def _prepare_get_members(self, etcd_nodes: int) -> Dict[str, Any]:
kwargs = self._prepare_common_parameters(etcd_nodes)
self._prepare_request(kwargs, {})
return kwargs
def _get_members(self, base_uri: str, **kwargs: Any) -> List[str]:
self._ensure_version_prefix(base_uri, **kwargs)
resp = self.http.urlopen(self._MPOST, base_uri + self.version_prefix + '/cluster/member/list', **kwargs)
members = self._handle_server_response(resp)['members']
return [url for member in members for url in member.get('clientURLs', [])]
def call_rpc(self, method: str, fields: Dict[str, Any], retry: Optional[Retry] = None) -> Dict[str, Any]:
fields['retry'] = retry
return self.api_execute(self.version_prefix + method, self._MPOST, fields)
def authenticate(self) -> bool:
if self._use_proxies and not self._cluster_version:
kwargs = self._prepare_common_parameters(1)
self._ensure_version_prefix(self._base_uri, **kwargs)
if not (self._cluster_version >= (3, 3) and self.username and self.password):
return False
logger.info('Trying to authenticate on Etcd...')
old_token, self._token = self._token, None
try:
response = self.call_rpc('/auth/authenticate', {'name': self.username, 'password': self.password})
except AuthNotEnabled:
logger.info('Etcd authentication is not enabled')
self._token = None
except Exception:
self._token = old_token
raise
else:
self._token = response.get('token')
return old_token != self._token
def handle_auth_errors(self: 'Etcd3Client', func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
def retry(ex: Exception) -> Any:
if self.username and self.password:
self.authenticate()
return func(self, *args, **kwargs)
else:
logger.fatal('Username or password not set, authentication is not possible')
raise ex
try:
return func(self, *args, **kwargs)
except (UserEmpty, PermissionDenied) as e: # no token provided
# PermissionDenied is raised on 3.0 and 3.1
if self._cluster_version < (3, 3) and (not isinstance(e, PermissionDenied)
or self._cluster_version < (3, 2)):
raise UnsupportedEtcdVersion('Authentication is required by Etcd cluster but not '
'supported on version lower than 3.3.0. Cluster version: '
'{0}'.format('.'.join(map(str, self._cluster_version))))
return retry(e)
except InvalidAuthToken as e:
logger.error('Invalid auth token: %s', self._token)
return retry(e)
@_handle_auth_errors
def range(self, key: str, range_end: Union[bytes, str, None] = None, serializable: bool = True,
retry: Optional[Retry] = None) -> Dict[str, Any]:
params = build_range_request(key, range_end)
params['serializable'] = serializable # For better performance. We can tolerate stale reads
return self.call_rpc('/kv/range', params, retry)
def prefix(self, key: str, serializable: bool = True, retry: Optional[Retry] = None) -> Dict[str, Any]:
return self.range(key, prefix_range_end(key), serializable, retry)
@_handle_auth_errors
def lease_grant(self, ttl: int, retry: Optional[Retry] = None) -> str:
return self.call_rpc('/lease/grant', {'TTL': ttl}, retry)['ID']
def lease_keepalive(self, ID: str, retry: Optional[Retry] = None) -> Optional[str]:
return self.call_rpc('/lease/keepalive', {'ID': ID}, retry).get('result', {}).get('TTL')
def txn(self, compare: Dict[str, Any], success: Dict[str, Any],
failure: Optional[Dict[str, Any]] = None, retry: Optional[Retry] = None) -> Dict[str, Any]:
fields = {'compare': [compare], 'success': [success]}
if failure:
fields['failure'] = [failure]
ret = self.call_rpc('/kv/txn', fields, retry)
return ret if failure or ret.get('succeeded') else {}
@_handle_auth_errors
def put(self, key: str, value: str, lease: Optional[str] = None, create_revision: Optional[str] = None,
mod_revision: Optional[str] = None, retry: Optional[Retry] = None) -> Dict[str, Any]:
fields = {'key': base64_encode(key), 'value': base64_encode(value)}
if lease:
fields['lease'] = lease
if create_revision is not None:
compare = {'target': 'CREATE', 'create_revision': create_revision}
elif mod_revision is not None:
compare = {'target': 'MOD', 'mod_revision': mod_revision}
else:
return self.call_rpc('/kv/put', fields, retry)
compare['key'] = fields['key']
return self.txn(compare, {'request_put': fields}, retry=retry)
@_handle_auth_errors
def deleterange(self, key: str, range_end: Union[bytes, str, None] = None,
mod_revision: Optional[str] = None, retry: Optional[Retry] = None) -> Dict[str, Any]:
fields = build_range_request(key, range_end)
if mod_revision is None:
return self.call_rpc('/kv/deleterange', fields, retry)
compare = {'target': 'MOD', 'mod_revision': mod_revision, 'key': fields['key']}
return self.txn(compare, {'request_delete_range': fields}, retry=retry)
def deleteprefix(self, key: str, retry: Optional[Retry] = None) -> Dict[str, Any]:
return self.deleterange(key, prefix_range_end(key), retry=retry)
def watchrange(self, key: str, range_end: Union[bytes, str, None] = None,
start_revision: Optional[str] = None, filters: Optional[List[Dict[str, Any]]] = None,
read_timeout: Optional[float] = None) -> urllib3.response.HTTPResponse:
"""returns: response object"""
params = build_range_request(key, range_end)
if start_revision is not None:
params['start_revision'] = start_revision
params['filters'] = filters or []
kwargs = self._prepare_common_parameters(1, self.read_timeout)
request_executor = self._prepare_request(kwargs, {'create_request': params})
kwargs.update(timeout=urllib3.Timeout(connect=kwargs['timeout'], read=read_timeout), retries=0)
return request_executor(self._MPOST, self._base_uri + self.version_prefix + '/watch', **kwargs)
def watchprefix(self, key: str, start_revision: Optional[str] = None,
filters: Optional[List[Dict[str, Any]]] = None,
read_timeout: Optional[float] = None) -> urllib3.response.HTTPResponse:
return self.watchrange(key, prefix_range_end(key), start_revision, filters, read_timeout)
class KVCache(Thread):
def __init__(self, dcs: 'Etcd3', client: 'PatroniEtcd3Client') -> None:
super(KVCache, self).__init__()
self.daemon = True
self._dcs = dcs
self._client = client
self.condition = Condition()
self._config_key = base64_encode(dcs.config_path)
self._leader_key = base64_encode(dcs.leader_path)
self._optime_key = base64_encode(dcs.leader_optime_path)
self._status_key = base64_encode(dcs.status_path)
self._name = base64_encode(getattr(dcs, '_name')) # pyright
self._is_ready = False
self._response = None
self._response_lock = Lock()
self._object_cache = {}
self._object_cache_lock = Lock()
self.start()
def set(self, value: Dict[str, Any], overwrite: bool = False) -> Tuple[bool, Optional[Dict[str, Any]]]:
with self._object_cache_lock:
name = value['key']
old_value = self._object_cache.get(name)
ret = not old_value or int(old_value['mod_revision']) < int(value['mod_revision'])
if ret or overwrite and old_value and old_value['mod_revision'] == value['mod_revision']:
self._object_cache[name] = value
return ret, old_value
def delete(self, name: str, mod_revision: str) -> Tuple[bool, Optional[Dict[str, Any]]]:
with self._object_cache_lock:
old_value = self._object_cache.get(name)
ret = old_value and int(old_value['mod_revision']) < int(mod_revision)
if ret:
del self._object_cache[name]
return bool(not old_value or ret), old_value
def copy(self) -> List[Dict[str, Any]]:
with self._object_cache_lock:
return [v.copy() for v in self._object_cache.values()]
def get(self, name: str) -> Optional[Dict[str, Any]]:
with self._object_cache_lock:
return self._object_cache.get(name)
def _process_event(self, event: Dict[str, Any]) -> None:
kv = event['kv']
key = kv['key']
if event.get('type') == 'DELETE':
success, old_value = self.delete(key, kv['mod_revision'])
else:
success, old_value = self.set(kv, True)
if success:
old_value = old_value and old_value.get('value')
new_value = kv.get('value')
value_changed = old_value != new_value and \
(key == self._leader_key or key in (self._optime_key, self._status_key) and new_value is not None
or key == self._config_key and old_value is not None and new_value is not None)
if value_changed:
logger.debug('%s changed from %s to %s', key, old_value, new_value)
# We also want to wake up HA loop on replicas if leader optime (or status key) was updated
if value_changed and (key not in (self._optime_key, self._status_key)
or (self.get(self._leader_key) or {}).get('value') != self._name):
self._dcs.event.set()
def _process_message(self, message: Dict[str, Any]) -> None:
logger.debug('Received message: %s', message)
if 'error' in message:
raise _raise_for_data(message)
events: List[Dict[str, Any]] = message.get('result', {}).get('events', [])
for event in events:
self._process_event(event)
@staticmethod
def _finish_response(response: urllib3.response.HTTPResponse) -> None:
try:
response.close()
finally:
response.release_conn()
def _do_watch(self, revision: str) -> None:
with self._response_lock:
self._response = None
# We do most of requests with timeouts. The only exception /watch requests to Etcd v3.
# In order to interrupt the /watch request we do socket.shutdown() from the main thread,
# which doesn't work on Windows. Therefore we want to use the last resort, `read_timeout`.
# Setting it to TTL will help to partially mitigate the problem.
# Setting it to lower value is not nice because for idling clusters it will increase
# the numbers of interrupts and reconnects.
read_timeout = self._dcs.ttl if os.name == 'nt' else None
response = self._client.watchprefix(self._dcs.cluster_prefix, revision, read_timeout=read_timeout)
with self._response_lock:
if self._response is None:
self._response = response
if not self._response:
return self._finish_response(response)
for message in iter_response_objects(response):
self._process_message(message)
def _build_cache(self) -> None:
result = self._dcs.retry(self._client.prefix, self._dcs.cluster_prefix)
with self._object_cache_lock:
self._object_cache = {node['key']: node for node in result.get('kvs', [])}
with self.condition:
self._is_ready = True
self.condition.notify()
try:
self._do_watch(result['header']['revision'])
except Exception as e:
# Following exceptions are expected on Windows because the /watch request is done with `read_timeout`
if not (os.name == 'nt' and isinstance(e, (ReadTimeoutError, ProtocolError))):
logger.error('watchprefix failed: %r', e)
finally:
with self.condition:
self._is_ready = False
with self._response_lock:
response, self._response = self._response, None
if isinstance(response, urllib3.response.HTTPResponse):
self._finish_response(response)
def run(self) -> None:
while True:
try:
self._build_cache()
except Exception as e:
logger.error('KVCache.run %r', e)
time.sleep(1)
def kill_stream(self) -> None:
sock = None
with self._response_lock:
if isinstance(self._response, urllib3.response.HTTPResponse):
try:
sock = self._response.connection.sock if self._response.connection else None
except Exception:
sock = None
else:
self._response = False
if sock:
try:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
except Exception as e:
logger.debug('Error on socket.shutdown: %r', e)
def is_ready(self) -> bool:
"""Must be called only when holding the lock on `condition`"""
return self._is_ready
class PatroniEtcd3Client(Etcd3Client):
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._kv_cache = None
super(PatroniEtcd3Client, self).__init__(*args, **kwargs)
def configure(self, etcd3: 'Etcd3') -> None:
self._etcd3 = etcd3
def start_watcher(self) -> None:
if self._cluster_version >= (3, 1):
self._kv_cache = KVCache(self._etcd3, self)
def _restart_watcher(self) -> None:
if self._kv_cache:
self._kv_cache.kill_stream()
def set_base_uri(self, value: str) -> None:
super(PatroniEtcd3Client, self).set_base_uri(value)
self._restart_watcher()
def authenticate(self) -> bool:
ret = super(PatroniEtcd3Client, self).authenticate()
if ret:
self._restart_watcher()
return ret
def _wait_cache(self, timeout: float) -> None:
stop_time = time.time() + timeout
while self._kv_cache and not self._kv_cache.is_ready():
timeout = stop_time - time.time()
if timeout <= 0:
raise RetryFailedError('Exceeded retry deadline')
self._kv_cache.condition.wait(timeout)
def get_cluster(self, path: str) -> List[Dict[str, Any]]:
if self._kv_cache and path.startswith(self._etcd3.cluster_prefix):
with self._kv_cache.condition:
self._wait_cache(self.read_timeout)
ret = self._kv_cache.copy()
else:
serializable = not getattr(self._etcd3, '_ctl') # use linearizable for patronictl
ret = self._etcd3.retry(self.prefix, path, serializable).get('kvs', [])
for node in ret:
node.update({'key': base64_decode(node['key']),
'value': base64_decode(node.get('value', '')),
'lease': node.get('lease')})
return ret
def call_rpc(self, method: str, fields: Dict[str, Any], retry: Optional[Retry] = None) -> Dict[str, Any]:
ret = super(PatroniEtcd3Client, self).call_rpc(method, fields, retry)
if self._kv_cache:
value = delete = None
# For the 'failure' case we only support a second (nested) transaction that attempts to
# update/delete the same keys. Anything more complex than that we don't need and therefore it doesn't
# make sense to write a universal response analyzer and we can just check expected JSON path.
if method == '/kv/txn'\
and (ret.get('succeeded') or 'failure' in fields and 'request_txn' in fields['failure'][0]
and ret.get('responses', [{'response_txn': {'succeeded': False}}])[0]
.get('response_txn', {}).get('succeeded')):
on_success = fields['success'][0]
value = on_success.get('request_put')
delete = on_success.get('request_delete_range')
elif method == '/kv/put' and ret:
value = fields
elif method == '/kv/deleterange' and ret:
delete = fields
if value:
value['mod_revision'] = ret['header']['revision']
self._kv_cache.set(value)
elif delete and 'range_end' not in delete:
self._kv_cache.delete(delete['key'], ret['header']['revision'])
return ret
def txn(self, compare: Dict[str, Any], success: Dict[str, Any],
failure: Optional[Dict[str, Any]] = None, retry: Optional[Retry] = None) -> Dict[str, Any]:
ret = super(PatroniEtcd3Client, self).txn(compare, success, failure, retry)
# Here we abuse the fact that the `failure` is only set in the call from update_leader().
# In all other cases the txn() call failure may be an indicator of a stale cache,
# and therefore we want to restart watcher.
if not failure and not ret:
self._restart_watcher()
return ret
class Etcd3(AbstractEtcd):
def __init__(self, config: Dict[str, Any]) -> None:
super(Etcd3, self).__init__(config, PatroniEtcd3Client, (DeadlineExceeded, Unavailable, FailedPrecondition))
self.__do_not_watch = False
self._lease = None
self._last_lease_refresh = 0
self._client.configure(self)
if not self._ctl:
self._client.start_watcher()
self.create_lease()
@property
def _client(self) -> PatroniEtcd3Client:
if TYPE_CHECKING: # pragma: no cover
assert isinstance(self._abstract_client, PatroniEtcd3Client)
return self._abstract_client
def set_socket_options(self, sock: socket.socket,
socket_options: Optional[Collection[Tuple[int, int, int]]]) -> None:
if TYPE_CHECKING: # pragma: no cover
assert self._retry.deadline is not None
enable_keepalive(sock, self.ttl, int(self.loop_wait + self._retry.deadline))
def set_ttl(self, ttl: int) -> Optional[bool]:
self.__do_not_watch = super(Etcd3, self).set_ttl(ttl)
if self.__do_not_watch:
self._lease = None
return None
def _do_refresh_lease(self, force: bool = False, retry: Optional[Retry] = None) -> bool:
if not force and self._lease and self._last_lease_refresh + self._loop_wait > time.time():
return False
if self._lease and not self._client.lease_keepalive(self._lease, retry):
self._lease = None
ret = not self._lease
if ret:
self._lease = self._client.lease_grant(self._ttl, retry)
self._last_lease_refresh = time.time()
return ret
def refresh_lease(self) -> bool:
try:
return self.retry(self._do_refresh_lease)
except (Etcd3ClientError, RetryFailedError):
logger.exception('refresh_lease')
raise Etcd3Error('Failed to keepalive/grant lease')
def create_lease(self) -> None:
while not self._lease:
try:
self.refresh_lease()
except Etcd3Error:
logger.info('waiting on etcd')
time.sleep(5)
@property
def cluster_prefix(self) -> str:
return self._base_path + '/' if self.is_citus_coordinator() else self.client_path('')
@staticmethod
def member(node: Dict[str, str]) -> Member:
return Member.from_node(node['mod_revision'], os.path.basename(node['key']), node['lease'], node['value'])
def _cluster_from_nodes(self, nodes: Dict[str, Any]) -> Cluster:
# get initialize flag
initialize = nodes.get(self._INITIALIZE)
initialize = initialize and initialize['value']
# get global dynamic configuration
config = nodes.get(self._CONFIG)
config = config and ClusterConfig.from_node(config['mod_revision'], config['value'])
# get timeline history
history = nodes.get(self._HISTORY)
history = history and TimelineHistory.from_node(history['mod_revision'], history['value'])
# get last know leader lsn and slots
status = nodes.get(self._STATUS)
if status:
try:
status = json.loads(status['value'])
last_lsn = status.get(self._OPTIME)
slots = status.get('slots')
except Exception:
slots = last_lsn = None
else:
last_lsn = nodes.get(self._LEADER_OPTIME)
last_lsn = last_lsn and last_lsn['value']
slots = None
try:
last_lsn = int(last_lsn or '')
except Exception:
last_lsn = 0
# get list of members
members = [self.member(n) for k, n in nodes.items() if k.startswith(self._MEMBERS) and k.count('/') == 1]
# get leader
leader = nodes.get(self._LEADER)
if not self._ctl and leader and leader['value'] == self._name and self._lease != leader.get('lease'):
logger.warning('I am the leader but not owner of the lease')
if leader:
member = Member(-1, leader['value'], None, {})
member = ([m for m in members if m.name == leader['value']] or [member])[0]
leader = Leader(leader['mod_revision'], leader['lease'], member)
# failover key
failover = nodes.get(self._FAILOVER)
if failover:
failover = Failover.from_node(failover['mod_revision'], failover['value'])
# get synchronization state
sync = nodes.get(self._SYNC)
sync = SyncState.from_node(sync and sync['mod_revision'], sync and sync['value'])
# get failsafe topology
failsafe = nodes.get(self._FAILSAFE)
try:
failsafe = json.loads(failsafe['value']) if failsafe else None
except Exception:
failsafe = None
return Cluster(initialize, config, leader, last_lsn, members, failover, sync, history, slots, failsafe)
def _cluster_loader(self, path: str) -> Cluster:
nodes = {node['key'][len(path):]: node
for node in self._client.get_cluster(path)
if node['key'].startswith(path)}
return self._cluster_from_nodes(nodes)
def _citus_cluster_loader(self, path: str) -> Dict[int, Cluster]:
clusters: Dict[int, Dict[str, Dict[str, Any]]] = defaultdict(dict)
path = self._base_path + '/'
for node in self._client.get_cluster(path):
key = node['key'][len(path):].split('/', 1)
if len(key) == 2 and citus_group_re.match(key[0]):
clusters[int(key[0])][key[1]] = node
return {group: self._cluster_from_nodes(nodes) for group, nodes in clusters.items()}
def _load_cluster(
self, path: str, loader: Callable[[str], Union[Cluster, Dict[int, Cluster]]]
) -> Union[Cluster, Dict[int, Cluster]]:
cluster = None
try:
cluster = loader(path)
except UnsupportedEtcdVersion:
raise
except Exception as e:
self._handle_exception(e, 'get_cluster', raise_ex=Etcd3Error('Etcd is not responding properly'))
self._has_failed = False
if TYPE_CHECKING: # pragma: no cover
assert cluster is not None
return cluster
@catch_etcd_errors
def touch_member(self, data: Dict[str, Any]) -> bool:
try:
self.refresh_lease()
except Etcd3Error:
return False
cluster = self.cluster
member = cluster and cluster.get_member(self._name, fallback_to_leader=False)
if member and member.session == self._lease and deep_compare(data, member.data):
return True
value = json.dumps(data, separators=(',', ':'))
try:
return bool(self._client.put(self.member_path, value, self._lease))
except LeaseNotFound:
self._lease = None
logger.error('Our lease disappeared from Etcd, can not "touch_member"')
return False
@catch_etcd_errors
def take_leader(self) -> bool:
return self.retry(self._client.put, self.leader_path, self._name, self._lease)
def _do_attempt_to_acquire_leader(self, retry: Retry) -> bool:
def _retry(*args: Any, **kwargs: Any) -> Any:
kwargs['retry'] = retry
return retry(*args, **kwargs)
try:
return _retry(self._client.put, self.leader_path, self._name, self._lease, create_revision='0')
except LeaseNotFound:
logger.error('Our lease disappeared from Etcd. Will try to get a new one and retry attempt')
self._lease = None
retry.ensure_deadline(0)
_retry(self._do_refresh_lease)
retry.ensure_deadline(1, Etcd3Error('_do_attempt_to_acquire_leader timeout'))
return _retry(self._client.put, self.leader_path, self._name, self._lease, create_revision='0')
@catch_return_false_exception
def attempt_to_acquire_leader(self) -> bool:
retry = self._retry.copy()
def _retry(*args: Any, **kwargs: Any) -> Any:
kwargs['retry'] = retry
return retry(*args, **kwargs)
self._run_and_handle_exceptions(self._do_refresh_lease, retry=_retry)
retry.ensure_deadline(1, Etcd3Error('attempt_to_acquire_leader timeout'))
ret = self._run_and_handle_exceptions(self._do_attempt_to_acquire_leader, retry, retry=None)
if not ret:
logger.info('Could not take out TTL lock')
return ret
@catch_etcd_errors
def set_failover_value(self, value: str, version: Optional[str] = None) -> bool:
return bool(self._client.put(self.failover_path, value, mod_revision=version))
@catch_etcd_errors
def set_config_value(self, value: str, version: Optional[str] = None) -> bool:
return bool(self._client.put(self.config_path, value, mod_revision=version))
@catch_etcd_errors
def _write_leader_optime(self, last_lsn: str) -> bool:
return bool(self._client.put(self.leader_optime_path, last_lsn))
@catch_etcd_errors
def _write_status(self, value: str) -> bool:
return bool(self._client.put(self.status_path, value))
@catch_etcd_errors
def _write_failsafe(self, value: str) -> bool:
return bool(self._client.put(self.failsafe_path, value))
@catch_return_false_exception
def _update_leader(self, leader: Leader) -> bool:
retry = self._retry.copy()
def _retry(*args: Any, **kwargs: Any) -> Any:
kwargs['retry'] = retry
return retry(*args, **kwargs)
self._run_and_handle_exceptions(self._do_refresh_lease, True, retry=_retry)
if self._lease and leader.session != self._lease:
retry.ensure_deadline(1, Etcd3Error('update_leader timeout'))
fields = {'key': base64_encode(self.leader_path), 'value': base64_encode(self._name), 'lease': self._lease}
# First we try to update lease on existing leader key "hoping" that we still owning it
compare1 = {'key': fields['key'], 'target': 'VALUE', 'value': fields['value']}
request_put = {'request_put': fields}
# If the first comparison failed we will try to create the new leader key in a transaction
compare2 = {'key': fields['key'], 'target': 'CREATE', 'create_revision': '0'}
request_txn = {'request_txn': {'compare': [compare2], 'success': [request_put]}}
ret = self._run_and_handle_exceptions(self._client.txn, compare1, request_put, request_txn, retry=_retry)
return ret.get('succeeded', False)\
or ret.get('responses', [{}])[0].get('response_txn', {}).get('succeeded', False)
return bool(self._lease)
@catch_etcd_errors
def initialize(self, create_new: bool = True, sysid: str = ""):
return self.retry(self._client.put, self.initialize_path, sysid, create_revision='0' if create_new else None)
@catch_etcd_errors
def _delete_leader(self) -> bool:
cluster = self.cluster
if cluster and isinstance(cluster.leader, Leader) and cluster.leader.name == self._name:
return self._client.deleterange(self.leader_path, mod_revision=cluster.leader.version)
return True
@catch_etcd_errors
def cancel_initialization(self) -> bool:
return self.retry(self._client.deleterange, self.initialize_path)
@catch_etcd_errors
def delete_cluster(self) -> bool:
return self.retry(self._client.deleteprefix, self.client_path(''))
@catch_etcd_errors
def set_history_value(self, value: str) -> bool:
return bool(self._client.put(self.history_path, value))
@catch_etcd_errors
def set_sync_state_value(self, value: str, version: Optional[str] = None) -> Union[str, bool]:
return self.retry(self._client.put, self.sync_path, value, mod_revision=version)\
.get('header', {}).get('revision', False)
@catch_etcd_errors
def delete_sync_state(self, version: Optional[str] = None) -> bool:
return self.retry(self._client.deleterange, self.sync_path, mod_revision=version)
def watch(self, leader_version: Optional[str], timeout: float) -> bool:
if self.__do_not_watch:
self.__do_not_watch = False
return True
# We want to give a bit more time to non-leader nodes to synchronize HA loops
if leader_version:
timeout += 0.5
try:
return super(Etcd3, self).watch(None, timeout)
finally:
self.event.clear()
| [
"noreply@github.com"
] | CyberDem0n.noreply@github.com |
dd6237f2c14a30509e309d9576f6769706692ea6 | 1d28798ea14ed6d62d2e911ffe7a9e115d579cab | /chat/pagination.py | d7f5746c6df2ee60641d63828ed8601b466a4beb | [
"MIT"
] | permissive | moewahed/VMSConsulting.com | b4fb97396ee1ed9a8e1e4b86acda053aab75e3f6 | dd4539a285980c1e417f908fb6238743d4d786bf | refs/heads/master | 2022-12-15T01:38:46.011147 | 2020-08-31T23:57:19 | 2020-08-31T23:57:19 | 291,476,201 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from rest_framework import pagination
class StandardResultsSetPagination(pagination.PageNumberPagination):
page_size = 10
page_query_param = 'page'
page_size_query_param = 'per_page'
max_page_size = 1000
| [
"moewahed@gmail.com"
] | moewahed@gmail.com |
dfea3f2eb8c451bfd5519e2debc89105a227d3b5 | 27ac76acbad016d2f3530ebb85155d7582a0d52b | /todolist/urls.py | 9e50475f072c9a7c16ae8e6f631d4b8159dc80e1 | [] | no_license | NurUcar/to-do-list | 83cac0172f883ed866c3e01e612fba9a9d6b9a35 | fff25993f497ad8ae2ece980d9a59d610d918e18 | refs/heads/master | 2023-04-03T19:10:31.124705 | 2021-04-15T13:33:59 | 2021-04-15T13:33:59 | 357,266,154 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | from django.urls import path
from . import views
urlpatterns = [
path("register/", views.register, name="register"),
path("", views.todolist, name="todolist"),
path("login/", views.login, name="login"),
path("logout", views.logout, name="logout"),
path("todolist/", views.todolist, name="todolist"),
path("todolist/addList", views.addtodo, name="addtodo"),
path("todolist/deleteList/<int:id>", views.deleteToDo, name="deleteToDo"),
path("todolist/deleteItem/<int:id>", views.deleteItem, name="deleteItem"),
path("todolist/additem", views.addItem, name="addItem"),
path("todolist/search", views.searchItem, name="searchItem"),
path("todolist/filter", views.filterItem, name="filterItem"),
path("todolist/order", views.orderItem, name="orderItem"),
]
| [
"nur0ucar@gmail.com"
] | nur0ucar@gmail.com |
4a51ed57b20b68d48f5ade294c9829e567263349 | b2a20950bf85d22284733adb0e0984fa6d24fb51 | /tap_marketman/helpers.py | e45747371829b4c69f4e074583039ee4c9a3ae0e | [] | no_license | Mashey/tap-marketman | 73036157ae222aa089bb029fd4b0629476e0392c | fff170f46457b36f4b0c1b8642f9e499007ba832 | refs/heads/main | 2023-03-18T15:28:59.983917 | 2021-03-05T16:54:41 | 2021-03-05T16:54:41 | 331,840,164 | 0 | 0 | null | 2021-03-05T16:54:42 | 2021-01-22T04:52:55 | Python | UTF-8 | Python | false | false | 1,015 | py | def create_guid_list(guid_data):
buyers = extract_guid_buyers(guid_data)
vendors = extract_guid_vendors(guid_data)
chains = extract_guid_chains(guid_data)
guid_list = buyers + vendors + chains
return guid_list
def extract_guid_buyers(guid_data):
guid_list = []
if len(guid_data['Buyers']) == 0:
return guid_list
for buyer in guid_data['Buyers']:
guid_list.append(buyer['Guid'])
return guid_list
def extract_guid_vendors(guid_data):
guid_list = []
if len(guid_data['Vendors']) == 0:
return guid_list
for vendor in guid_data['Vendors']:
guid_list.append(vendor['Guid'])
return guid_list
def extract_guid_chains(guid_data):
guid_list = []
if len(guid_data['Chains']) == 0:
return guid_list
# for chain in guid_data['Chains']:
# guid_list.append(chain['Guid'])
for buyer in guid_data['Chains'][0]['Buyers']:
guid_list.append(buyer['Guid'])
return guid_list
| [
"jordan.wallace.williams@gmail.com"
] | jordan.wallace.williams@gmail.com |
730156db38b00886d662ca9a2fcc549a3c6d129f | 469debd9d7d6787d0436a56dc564710292a8e6e8 | /pythonStack/fundamentals/fooAndBar.py | 42e8c3daf9c47bec8c145f62036c0fb78b6c234b | [] | no_license | sharonanchel/codingDojo | d97432a34384970ecdc42cbe2f0cbf6479bff7f0 | 32ba96918b18ab9d0399d4e1169761f9bc8b0c92 | refs/heads/master | 2021-01-20T16:10:57.612794 | 2017-05-25T02:45:36 | 2017-05-25T02:45:36 | 90,824,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | def fooBar():
for i in range (1,101):
print i
if Prime(i):
print 'Foo'
# elif Square(i):
# print 'Bar'
# else:
# print 'FooBar'
def Prime(x):
for i in range(2, x):
if x%i == 0:
return False
return True
# def Square(x):
# for i in range(2, (x/2)+1):
# if i * i == x:
# return True
# return False
fooBar()
| [
"noreply@github.com"
] | sharonanchel.noreply@github.com |
0e28951cc42d5f70bfc63868f34fa191421dca04 | 2b17a73a7d8daefc1aa1445b7a61f8d7bec5f4bb | /venv/Scripts/pip3.6-script.py | 5057f464f0975ca2aa4964e700f9d9513ceb69fc | [] | no_license | david191624/CLRC663_Evaluation | 23b351a5c70d2ae0ab5b1ea14d4511c0bfcf58d5 | 2757bdedcace27b926760483f1313742081acb4b | refs/heads/master | 2021-05-19T04:33:14.758360 | 2020-03-31T07:29:12 | 2020-03-31T07:29:12 | 251,530,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | #!C:\Users\ASUS\PycharmProjects\CLRC663_Evaluation\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')()
)
| [
"ASUS@ASUS-PC.raspberrypi"
] | ASUS@ASUS-PC.raspberrypi |
8a65d9aee9a45241ea754e4a4551bb92d28dc1d7 | 1beec03569ad04cad74e341856b13bf50b7b0baf | /46exercises/sum_multiply.py | f60d661ab8fab4c2e5cf3371deb300ac8c61d5de | [] | no_license | billforthewynn/PythonProjects | 59810be52513f7a500ef24844764ea2b7a9be96b | 25f8072138ddd1f9ea4e3b7653d5ae3be3e8b2ad | refs/heads/master | 2020-12-02T19:37:19.842195 | 2015-06-30T04:49:16 | 2015-06-30T04:49:16 | 34,772,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | # Define a function sum() and a function multiply() that sums and multiplies (respectively)
# all the numbers in a list of numbers.
# For example, sum([1, 2, 3, 4]) should return 10, and multiply([1, 2, 3, 4]) should return 24.
input_list = [1,2,3,4]
def sum(numbers):
running_total = 0
for n in numbers:
running_total = running_total + n
return running_total
print sum(input_list)
def multiply(numbers):
multiplying_total = 1
for n in numbers:
multiplying_total = multiplying_total * n
return multiplying_total
print multiply(input_list) | [
"bill@weareclutch.com"
] | bill@weareclutch.com |
7ed1d2a72df2ebaadbe2e3d265bbaedfa8b871a9 | d823a19caa38d20b7614c790ad4ce706f0116594 | /driving.py | 16b2d9d3314a70facd2227bf14e49f53649bbac9 | [] | no_license | xrongking/driving | f88159ed7491cffa2e84566f0a378151e7ed31eb | 593071010b9db41e4faa33e8a72aeb062bb04d50 | refs/heads/master | 2022-09-04T00:41:18.766678 | 2020-05-27T07:35:58 | 2020-05-27T07:35:58 | 267,242,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | country = input('請問你是哪國人:')
age = input('請輸入年齡:')
age = int(age)
if country == '台灣' or country == '臺灣':
if age >= 18:
print('你可以考駕照')
else:
print('你還不能考駕照')
elif country == '美國':
if age >= 16:
print('你可以考駕照')
else:
print('你還不能考駕照')
else:
print('國家請輸入台灣或美國') | [
"xrongking@gmail.com"
] | xrongking@gmail.com |
fd7b5ab02d8f88fc3bc6fc51c6eb72e6884841f8 | 89bfa644209978bd36e41cc3e89278f7f48ef6ad | /checkout/__init__.py | 9d171aaad2d5087b52a7bf96ac75b568886ef2fb | [] | no_license | Code-Institute-Submissions/usupplement | 472f9ff9e324c6c003b7a39dad67f115cc461901 | 07641acd48d4505e18bf32d306149429068659f3 | refs/heads/master | 2023-01-04T05:49:58.887080 | 2020-10-31T09:31:21 | 2020-10-31T09:31:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | """Telling django the name of the default config class for the app (in apps.py)
Without this line django would not know about the custom ready method so our
signals would not work"""
default_app_config = 'checkout.apps.CheckoutConfig' | [
"anders242@gmail.com"
] | anders242@gmail.com |
d3bf2efbc8123260a779e50f1f45a7b8462de621 | b86d45c04f814bad123fd75940a97b1bd95e9091 | /RockPaperScissors.py | 4fdc47810d8d56f84f6e98653cb9754e4d8e6f7e | [
"Apache-2.0"
] | permissive | wmeueleleyb/Rock-Paper-Scissors | ea2d321d7df5315d470c31eb82c6e0fe35241b33 | 5baeda7c910e7b3e905336baabcec797a7c04b79 | refs/heads/master | 2021-03-25T21:53:12.101677 | 2020-04-21T16:49:17 | 2020-04-21T16:49:17 | 247,647,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,690 | py | import random, os, time
P_hand = {'rock':"---' ____) | (_____) | (_____) | (____) | ---.__(___) ",
'paper':"---' ____) | _________) | _________)| ________) |---.__________) ",
'scissors':"---' ____) | _________) | _________)| (____) |---.__(___) "}
CPU_hand = {'rock':" (____ '--- |(_____) |(_____) | (____) | (___)__.--- ",
'paper':" (____ '---| (_________ |(_________ | (________ | (__________.--- ",
'scissors':" (____ '--- | (_________ |(_________ | (____) | (___)__.--- "}
move, c_move = 'rock', 'rock'
p_score, c_score = 0,0
GameOver = False
line = ''
os.system('color c')
print('Rock, Paper, Scissors')
print("___________\nInstructions: Let's play a game of rock, paper, scissors.\nif you're not familiar, the rules are simple.")
print('ROCK beats SCISSORS | SCISSORS beats PAPER | PAPER beats ROCK.\nFirst one to reach a score of 3 wins.')
print('\nTo begin type your name')
print('>',end = '')
name = input()
def show_hands():
os.system('cls')
print('\n')
print(' '+name+': '+str(p_score)+' cpu: '+str(c_score)+'\n\n')
print(line,end = '')
print(' '+P_hand[move][0:17]+' '+CPU_hand[c_move][0:17])
print(' '+P_hand[move][18:35]+' '+CPU_hand[c_move][18:35])
print(' '+P_hand[move][36:53]+' '+CPU_hand[c_move][36:53])
print(' '+P_hand[move][54:71]+' '+CPU_hand[c_move][54:71])
print(' '+P_hand[move][72:-1]+' '+CPU_hand[c_move][72:-1]+'\n')
while GameOver == False:
show_hands()
print('Rock, Paper, or Scissors?')
print('>',end = '')
p_move = input().lower()
if p_move != 'rock' and p_move != 'paper' and p_move != 'scissors':
while p_move != 'rock' and p_move != 'paper' and p_move != 'scissors':
print('That is not a move please try again')
print('>',end = '')
p_move = input().lower()
for i in range(6):
move, c_move = 'rock','rock'
if i % 2 == 0:
line = '\n'
show_hands()
time.sleep(0.2)
else:
line = ''
show_hands()
time.sleep(0.2)
move = p_move
c_move = random.choice(['rock','paper','scissors'])
if c_move == 'rock' and move == 'scissors': c_score += 1
elif c_move == 'rock' and move == 'paper': p_score += 1
elif c_move == 'paper' and move == 'rock': c_score += 1
elif c_move == 'paper' and move == 'scissors': p_score += 1
elif c_move == 'scissors' and move == 'rock': p_score += 1
elif c_move == 'scissors' and move == 'paper': c_score += 1
show_hands()
if p_score == 3:
print(name+' wins')
print('\nwould you like to play again? (y/n)')
print('>',end = '')
Continue = input().lower()
if Continue == 'y':
move, c_move = 'rock','rock'
p_score, c_score = 0,0
GameOver = False
else: GameOver = True
elif c_score == 3:
print('PC wins')
print('\nwould you like to play again? (y/n)')
print('>',end = '')
Continue = input().lower()
if Continue == 'y':
move, c_move = 'rock','rock'
p_score, c_score = 0,0
GameOver = False
else: GameOver = True
| [
"noreply@github.com"
] | wmeueleleyb.noreply@github.com |
3436e2de6efc23facd03cab96d2b1b243b308c5f | 4157b25ec6f4f5a9f2937e9d56f66d704582d2e6 | /autorecord.py | 41f83b0d84f329913024d1200ec2dc5eeddb3ae7 | [] | no_license | Pike96/SNH48-MemberLive-AutoRecordTrim | dd4e397374ec6f8f7c1a18ce74ab07e6aa2108ce | b9c497fec7e58bdfbfb2e43ad8513f1cbf343e38 | refs/heads/master | 2021-05-15T14:30:33.743961 | 2017-11-07T05:33:02 | 2017-11-07T05:33:02 | 107,204,436 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,776 | py | from urllib import request
import json
import time
import re
import ffmpeg
# import subprocess
# from win10toast import ToastNotifier
api48 = 'https://plive.48.cn/livesystem/api/live/v1/memberLivePage'
headers = {
'os': 'android',
'User-Agent': 'Mobile_Pocket',
'IMEI': '864394020228161',
'token': '0',
'version': '5.0.1',
'Content-Type': 'application/json;charset=utf-8',
'Host': 'plive.48.cn',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'gzip'
}
def postform(url, form, headers):
data = bytes(form, encoding='utf8')
req = request.Request(url=url, data=data, headers=headers, method='POST')
response = request.urlopen(req)
return response.read().decode('utf-8')
def record(name):
last_stamp = int(time.time() * 1000)
form = '{"lastTime":%s,"limit":20,"groupId":0,"memberId":0,"type":0,"giftUpdTime":1490857731000}' % \
str(last_stamp)
try:
response_json = postform(api48, form, headers)
response_dict = json.loads(response_json)
print('Received ' + time.strftime("%b%d-%H-%M-%S", time.localtime(last_stamp / 1000)))
except:
print('Error in getting response ' + time.strftime("%b%d-%H-%M-%S", time.localtime(last_stamp / 1000)))
return 0
try:
live_list = response_dict['content']['liveList']
except:
return 0
for live_item in live_list:
title = live_item['title']
check = re.match("(.)*{}(.)*".format(name), title)
if check is not None:
'''
toaster = ToastNotifier()
toaster.show_toast("你老婆 " + name + " 直播啦!!!",
"直播已经自动开始录制")
'''
print(live_item['title'] + time.strftime("%b%d-%H-%M-%S", time.localtime(last_stamp / 1000)))
fname = '{}{}.mp4'.format(live_item['title'],
time.strftime("%b%d-%H-%M-%S", time.localtime(last_stamp / 1000)))
stream = ffmpeg.input(live_item["streamPath"])
stream = ffmpeg.output(stream, fname)
try:
ffmpeg.run(stream)
# remove()
except:
print('Error in recording')
'''
subprocess.run('./ffmpeg -i {} {}{}.mp4'
.format(live_item["streamPath"],
live_item['title'],
time.strftime("%b%d-%H-%M", time.localtime(last_stamp / 1000))))
'''
def main():
# name = input("小偶像的姓名:")
name = "刘崇恬"
while 1:
record(name)
time.sleep(10)
return 0
if __name__ == '__main__':
main()
| [
"linyucheng256@live.com"
] | linyucheng256@live.com |
9a767b292f26093cfbd4cb44f69dfb7b041ba7c8 | d3d318876d0393e1349308c93c205febc4636677 | /WebsiteScrap/spiders/web_spider.py | e3c1fc16f994cbbd33e5cf93d954a9e332ab76b5 | [] | no_license | apodimi/webscraping | 65d783c086912e446325a6097b50ef9f6d5a9ba8 | 0de001dcb70b4b4851085b41d56ab07d2a756ce3 | refs/heads/master | 2022-07-28T12:10:37.417412 | 2020-05-23T22:03:47 | 2020-05-23T22:03:47 | 266,422,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | #WARNING!!!!!
#IT IS NOT ALLOWED TO ALL WEBSITES. BEFORE USING IT,YOU MUST BE SURE THAT THE WEBSITE COMMITS THE EXPORT OF DATA FROM THIS.
# -*- coding: utf-8 -*-
import scrapy
from ..items import WebsitescrapItem
class WebSpiderSpider(scrapy.Spider):
name = 'web_spider'
start_urls = ['# ADD THE URL FROM THE PAGE WHERE YOU WANT TO TAKE THE DATA']
def parse(self, response):
items = WebsitescrapItem()
product_name = response.xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "a-color-base", " " )) and contains(concat( " ", @class, " " ), concat( " ", "a-text-normal", " " ))]').css('::text').extract()
product_price = response.css('.a-offscreen').css('::text').extract()
product_img = response.xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "s-image", " " ))]/@src').getall()
items['product_name'] = product_name
items['product_price'] = product_price
items['product_img'] = product_img
yield items
| [
"noreply@github.com"
] | apodimi.noreply@github.com |
8e5c571a42fe547309ed64d7e63a0cf4b1426f1d | 889605ad1becb0bcd758ef797aa5cf3d9c35ccfa | /openstack_plugin/tests/network/test_security_group_rule.py | 18ca67d44485652ccf6dbbbfec61e8fa72fed4d1 | [] | no_license | Bit-Coding/cloudify-openstack-plugin | 5bf87b0f67a7bfa68b2a300eaada26fdee06e26a | 93227bd0964b98a2940bfb44b01a974e83b9ba65 | refs/heads/master | 2022-06-28T12:57:30.365300 | 2020-05-06T07:49:14 | 2020-05-06T07:49:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,440 | py | # #######
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Third party imports
import mock
import openstack.network.v2.security_group_rule
# Local imports
from openstack_plugin.tests.base import OpenStackTestBase
from openstack_plugin.resources.network import security_group_rule
from openstack_plugin.constants import (RESOURCE_ID,
OPENSTACK_NAME_PROPERTY,
OPENSTACK_TYPE_PROPERTY,
SECURITY_GROUP_RULE_OPENSTACK_TYPE)
@mock.patch('openstack.connect')
class SecurityGroupRuleTestCase(OpenStackTestBase):
def setUp(self):
super(SecurityGroupRuleTestCase, self).setUp()
@property
def resource_config(self):
return {
'description': 'security_group_rule_description',
}
def test_create(self, mock_connection):
# Prepare the context for create operation
self._prepare_context_for_operation(
test_name='SecurityGroupRuleTestCase',
ctx_operation_name='cloudify.interfaces.lifecycle.create')
security_group_rule_instance = \
openstack.network.v2.security_group_rule.SecurityGroupRule(**{
'id': 'a95b5509-c122-4c2f-823e-884bb559afe8',
'created_at': '0',
'description': '1',
'direction': 'ingress',
'ethertype': '3',
'port_range_max': '80',
'port_range_min': '80',
'protocol': 'tcp',
'remote_group_id': '7',
'remote_ip_prefix': '0.0.0.0/0',
'revision_number': 9,
'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe3',
'tenant_id': '11',
'updated_at': '12'
})
# Mock create security group rule response
mock_connection().network.create_security_group_rule = \
mock.MagicMock(return_value=security_group_rule_instance)
# Call create security group rule
security_group_rule.create()
self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID],
'a95b5509-c122-4c2f-823e-884bb559afe8')
self.assertEqual(
self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY],
None)
self.assertEqual(
self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY],
SECURITY_GROUP_RULE_OPENSTACK_TYPE)
def test_delete(self, mock_connection):
# Prepare the context for delete operation
self._prepare_context_for_operation(
test_name='SecurityGroupRuleTestCase',
ctx_operation_name='cloudify.interfaces.lifecycle.delete')
security_group_rule_instance = \
openstack.network.v2.security_group_rule.SecurityGroupRule(**{
'id': 'a95b5509-c122-4c2f-823e-884bb559afe8',
'created_at': '0',
'description': '1',
'direction': 'ingress',
'ethertype': '3',
'port_range_max': '80',
'port_range_min': '80',
'protocol': 'tcp',
'remote_group_id': '7',
'remote_ip_prefix': '0.0.0.0/0',
'revision_number': 9,
'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe3',
'tenant_id': '11',
'updated_at': '12'
})
# Mock delete security group rule response
mock_connection().network.delete_security_group_rule = \
mock.MagicMock(return_value=None)
# Mock get security group rule response
mock_connection().network.get_security_group = \
mock.MagicMock(return_value=security_group_rule_instance)
# Call delete security group rule
security_group_rule.delete()
for attr in [RESOURCE_ID,
OPENSTACK_NAME_PROPERTY,
OPENSTACK_TYPE_PROPERTY]:
self.assertNotIn(attr, self._ctx.instance.runtime_properties)
def test_list_security_group_rules(self, mock_connection):
# Prepare the context for list security group rules operation
self._prepare_context_for_operation(
test_name='SecurityGroupRuleTestCase',
ctx_operation_name='cloudify.interfaces.operations.list')
security_group_rules = [
openstack.network.v2.security_group_rule.SecurityGroupRule(**{
'id': 'a95b5509-c122-4c2f-823e-884bb559afe3',
'created_at': '0',
'description': '1',
'direction': 'ingress',
'ethertype': '3',
'port_range_max': '80',
'port_range_min': '80',
'protocol': 'tcp',
'remote_group_id': '7',
'remote_ip_prefix': '0.0.0.0/0',
'revision_number': 9,
'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe8',
'tenant_id': '11',
'updated_at': '12'
}),
openstack.network.v2.security_group_rule.SecurityGroupRule(**{
'id': 'a95b5509-c122-4c2f-823e-884bb559afe2',
'created_at': '0',
'description': '1',
'direction': 'egress',
'ethertype': '3',
'port_range_max': '80',
'port_range_min': '80',
'protocol': 'tcp',
'remote_group_id': '7',
'remote_ip_prefix': '0.0.0.0/0',
'revision_number': 9,
'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe8',
'tenant_id': '11',
'updated_at': '12'
})
]
# Mock list security group rules response
mock_connection().network.security_group_rules = \
mock.MagicMock(return_value=security_group_rules)
# Mock find project response
mock_connection().identity.find_project = \
mock.MagicMock(return_value=self.project_resource)
# Call list security group rules
security_group_rule.list_security_group_rules()
# Check if the security group rules list saved as runtime properties
self.assertIn(
'security_group_rule_list',
self._ctx.instance.runtime_properties)
# Check the size of security groups list
self.assertEqual(
len(self._ctx.instance.runtime_properties[
'security_group_rule_list']), 2)
@mock.patch('openstack_sdk.common.OpenstackResource.get_quota_sets')
def test_creation_validation(self, mock_quota_sets, mock_connection):
# Prepare the context for creation validation operation
self._prepare_context_for_operation(
test_name='SecurityGroupRuleTestCase',
ctx_operation_name='cloudify.interfaces.validation.creation')
security_group_rules = [
openstack.network.v2.security_group_rule.SecurityGroupRule(**{
'id': 'a95b5509-c122-4c2f-823e-884bb559afe3',
'created_at': '0',
'description': '1',
'direction': 'ingress',
'ethertype': '3',
'port_range_max': '80',
'port_range_min': '80',
'protocol': 'tcp',
'remote_group_id': '7',
'remote_ip_prefix': '0.0.0.0/0',
'revision_number': 9,
'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe8',
'tenant_id': '11',
'updated_at': '12'
}),
openstack.network.v2.security_group_rule.SecurityGroupRule(**{
'id': 'a95b5509-c122-4c2f-823e-884bb559afe2',
'created_at': '0',
'description': '1',
'direction': 'egress',
'ethertype': '3',
'port_range_max': '80',
'port_range_min': '80',
'protocol': 'tcp',
'remote_group_id': '7',
'remote_ip_prefix': '0.0.0.0/0',
'revision_number': 9,
'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe8',
'tenant_id': '11',
'updated_at': '12'
})
]
# Mock list security group rules response
mock_connection().network.security_group_rules = \
mock.MagicMock(return_value=security_group_rules)
# Call list security group rules
security_group_rule.list_security_group_rules()
# Mock the quota size response
mock_quota_sets.return_value = 20
# Call creation validation
security_group_rule.creation_validation()
| [
"mohammeda@cloudify.co"
] | mohammeda@cloudify.co |
0b65d91954c41d54a0482d36fde142c335f575ef | c1aa738b1bfbd04302efd37bf7d5a5a94ebd2c92 | /students/serializers.py | e8aaffd045635249e35fa30962bebac597ad0bdf | [] | no_license | Bhavikdarji98/School_REST_APIs | a433daaf86647d4c8b28da820221cfdbe01d6122 | 382d3ab66b6e02050302be88a76ddd15c47af6a0 | refs/heads/master | 2023-06-14T10:52:06.043766 | 2021-07-10T07:51:30 | 2021-07-10T07:51:30 | 384,642,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,758 | py | from rest_framework import serializers
from students.models import Student, Teacher, Lecture
class StudentSerializer(serializers.ModelSerializer):
"""
The serialzers to serializer and Deserialzer
Student object
"""
class Meta:
model = Student
fields = '__all__'
class TeacherSerializer(serializers.ModelSerializer):
"""
The serializer to serialze and deseralize
Teacher object
"""
class Meta:
model = Teacher
fields = '__all__'
class LectureSerializer(serializers.ModelSerializer):
"""
The serializer to serialze and deseralize
Lecture object
"""
student = StudentSerializer(many= True, read_only= True)
students_ids = serializers.PrimaryKeyRelatedField(
many= True, write_only= True, queryset = Student.objects.all()
)
class Meta:
model = Lecture
fields = ['id',
'name',
'description',
'time',
'teacher',
'student',
'students_ids',]
def create(self, validated_data):
students = validated_data.pop("students_ids", None)
lecture = Lecture.objects.create(**validated_data)
if students:
lecture.student.set(students)
return lecture
def update(self, instance, validated_data):
students = validated_data.pop('students_ids', None)
instance = super(LectureSerializer, self).update(instance, validated_data)
for s in students:
print(s.id)
obj = Student.objects.filter(id = s.id)
if obj.exists():
student = obj.first()
else:
pass
instance.student.add(student.id)
return instance | [
"bhavikdarji.bd24@gmail.com"
] | bhavikdarji.bd24@gmail.com |
4df3ceca1f9d06815d43914cad8c76bf3d206085 | cd78d84441e69c1fc40b6a6e9e235e7cf6882454 | /python/48.rotate_image.py | 75ea51649a271ec6a48d11d85c9fa7c4a00e2bc0 | [] | no_license | buy/leetcode | 53a12d4e0298284a5a2034c88353d0dc195aa66c | da0e834e3f2e3016396fffc96ef943ab9ec58ea4 | refs/heads/master | 2021-01-13T01:48:01.176632 | 2015-06-14T06:17:17 | 2015-06-14T06:17:17 | 31,863,627 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | # You are given an n x n 2D matrix representing an image.
# Rotate the image by 90 degrees (clockwise).
# Follow up:
# Could you do this in-place?
# /*
# * clockwise rotate
# * first reverse up to down, then swap the symmetry
# * 1 2 3 7 8 9 7 4 1
# * 4 5 6 => 4 5 6 => 8 5 2
# * 7 8 9 1 2 3 9 6 3
# */
class Solution:
# @param matrix, a list of lists of integers
# @return a list of lists of integers
def rotate(self, matrix):
matrix.reverse()
for i in range(len(matrix)):
for j in range(i):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
# /*
# * anticlockwise rotate
# * first reverse left to right, then swap the symmetry
# * 1 2 3 3 2 1 3 6 9
# * 4 5 6 => 6 5 4 => 2 5 8
# * 7 8 9 9 8 7 1 4 7
# */
| [
"cliu@groupon.com"
] | cliu@groupon.com |
0fde684849445fbe55858eeb2ef5e887e8133f7a | 856af23d9e50ff677b9414ed7146b26e4507c84c | /atcoder/ABC103/ABC103C.py | 8c3b794e7ccd4e8b2d91c9d1644aa9594aa0044a | [] | no_license | nphys0/programing-contest | 4135ed10c7fbe0bb855f488d454a40fd5b387438 | 994138fc1298af783cda113efaff1a3337ba827d | refs/heads/master | 2020-04-12T14:32:11.566594 | 2019-02-17T11:11:54 | 2019-02-17T11:11:54 | 162,529,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | N = int(input())
a=list(map(int, input().strip().split()))
ans=0
for i in range(N):
ans+=a[i]-1
print(ans)
| [
"46012425+nphys0@users.noreply.github.com"
] | 46012425+nphys0@users.noreply.github.com |
7661cd2a03008064257f542207323dd50e1513d1 | 4eae9225b75532162b83950a3fd3dc67f865a8bc | /python_zklib-master/GetThumpData.py | 44ad504ba9dcca450e8e252b95c816707f2990a2 | [] | no_license | syedriyaz18/NIMS | 71407b32d3469b15554ed586c27d10e07b78f1c3 | 22f75f5e2187612a44ce485e66770c8ffd47ed7f | refs/heads/master | 2020-06-16T09:58:48.788674 | 2019-07-06T11:07:15 | 2019-07-06T11:07:15 | 195,530,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,184 | py | from Tkinter import *
import MySQLdb
import tkMessageBox as tm
import sys
sys.path.append("zklib")
import zklib
import time
import zkconst
import xlsxwriter
import tkMessageBox as tm
class Attendance(Frame):
def _init_(self, master):
self.master = master
def create_widgets(self):
self.topframe = Frame(root, width=100, height=100, relief=GROOVE, borderwidth=2 )
self.topframe.pack(padx=5, pady=20)
self.welcomelbl = Label(self.topframe, text="Welcome To", width="20")
self.welcomelbl.grid(row=0, column=0, pady=5 )
self.cnamelbl = Label(self.topframe, text="Shah iSolutions", width="20", fg="RED", font="10")
self.cnamelbl.grid(row=1, column=0, pady=5 )
self.ahimslbl = Label(self.topframe, text="Attendance System", width="20")
self.ahimslbl.grid(row=2, column=0, pady=5 )
self.bottomframe = Frame(root, width=100, height=100, relief=GROOVE, borderwidth=2)
self.bottomframe.pack(padx=5, pady=60)
self.blackbutton = Button(self.bottomframe, text="Clicl Here To Get Attendance Data", fg="black", command = self.attendance_verify)
self.blackbutton.grid(row=2, column=3, pady=5)
def attendance_verify(self):
connection = MySQLdb.connect(host="localhost", user="root", passwd = "root", db="nims_store")
cursor = connection.cursor ()
#cursor.execute("select * from user_tbl where user_name= %s", (regdate1))
#row = cursor.fetchone()
zk1 = zklib.ZKLib("192.168.1.71", 4370)
zk2 = zklib.ZKLib("192.168.1.72", 4370)
ret1 = zk1.connect()
ret2 = zk2.connect()
print "Connected To Doctors Machine:", ret1
print "Connected To General Machine:", ret1
if ret1 == True and ret2 == True:
print "Pesan Disable Device", zk1.disableDevice(), zk2.disableDevice()
#print "Pesan Versi:", zk.version()
#print "Pesan Versi OS:", zk.osversion()
"""
print "Pesan Extend Format:", zk.extendFormat()
print "Pesan Extend OP Log:", zk.extendOPLog()
"""
#print "Pesan Platform:", zk.platform()
#print "Pesan Platform Version:", zk.fmVersion()
#print "Pesan Work Code:", zk.workCode()
#print "Pesan Work Code:", zk.workCode()
#print "Pesan SSR:", zk.ssr()
#print "Pesan Pin Width:", zk.pinWidth()
#print "Pesan Face Function On:", zk.faceFunctionOn()
#print "Pesan Serial Number:", zk.serialNumber()
#print "Pesan Device Name:", zk.deviceName()
"""
data_user = zk.getUser()
print "Pesan Get User:"
if data_user:
for uid in data_user:
if data_user[uid][2] == 14:
level = 'Admin'
else:
level = 'User'
#print "[UID %d]: ID: %s, Name: %s, Level: %s, Password: %s" % ( uid, data_user[uid][0], data_user[uid][1], level, data_user[uid][3] )
#print "Pesan Clear Admin:", zk.clearAdmin()
#zk.setUser(uid=61, userid='41', name='Dony Wahyu Isp', password='123456', role=zkconst.LEVEL_ADMIN)
"""
# Delete a row from a table.
print "Deleting Previous Data"
sql_command = """
DELETE FROM nims_attendance
"""
cursor.execute(sql_command)
attendance1 = zk1.getAttendance()
attendance2 = zk2.getAttendance()
print "Pesan Get Attendance:"
print "Retreiving Current Data"
if ( attendance1 ):
for lattendance1 in attendance1:
if lattendance1[1] == 15:
state1 = 'Check In'
elif lattendance1[1] == 0:
state1 = 'Check Out'
else:
state1 = 'Undefined'
# insert to table
cursor.execute("""INSERT INTO nims_attendance VALUES (%s,%s,%s,%s,%s,%s,%s)""",( lattendance1[0], lattendance1[2].date(), lattendance1[2].time(), 51, 0, 0, 0))
#print "%s, %s, %s, 51, 0, 0, 0" % ( lattendance[0], lattendance[2].date(), lattendance[2].time() )
#print "Pesan Clear Attendance:", zk.clearAttendance()
if ( attendance2 ):
for lattendance2 in attendance2:
if lattendance2[1] == 15:
state2 = 'Check In'
elif lattendance2[1] == 0:
state2 = 'Check Out'
else:
state2 = 'Undefined'
# insert to table
cursor.execute("""INSERT INTO nims_attendance VALUES (%s,%s,%s,%s,%s,%s,%s)""",( lattendance2[0], lattendance2[2].date(), lattendance2[2].time(), 54, 0, 0, 0))
#print "%s, %s, %s, 51, 0, 0, 0" % ( lattendance[0], lattendance[2].date(), lattendance[2].time() )
#print "Pesan Clear Attendance:", zk.clearAttendance()
connection.commit()
#print cursor.fetchall()
connection.close()
print "Pesan Get Time:", zk1.getTime()
print "Pesan Enable Device", zk1.enableDevice()
print "Pesan Enable Device", zk2.enableDevice()
print "Pesan Disconnect Doctors Machine:", zk1.disconnect()
print "Pesan Disconnect General Machine:", zk2.disconnect()
ermessage = "Data Retrieve Completed"
tm.showinfo("Data Completed ",ermessage)
root = Tk()
root.title("Shah iSolutions")
root.iconbitmap('images/logo.ico')
root.geometry("360x360")
app = Attendance(root)
#call the method
app.create_widgets()
root.mainloop()
| [
"sdriyaz18@gmail.com"
] | sdriyaz18@gmail.com |
2a7ecd17534e9ce6ebfd36b4b2168cfe3d21c7a2 | 03d1982e2d594f13567afb37f2a5cea2f0d631b6 | /setup.py | cde1cd7244e468fc53e94c6fb1355245c8ab6099 | [
"Apache-2.0"
] | permissive | maartendraijer/django-fluent-dashboard | e26f29d434528d3b11360549c6452812176e4ecb | 8a00fa810f001d1a778eada88b8a390f495f9994 | refs/heads/master | 2020-04-03T04:22:38.353890 | 2012-09-26T19:55:18 | 2012-09-26T19:58:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | #!/usr/bin/env python
from setuptools import setup, find_packages
from os.path import dirname, join
import sys, os
# When creating the sdist, make sure the django.mo file also exists:
try:
os.chdir('fluent_dashboard')
from django.core.management.commands.compilemessages import compile_messages
compile_messages(sys.stderr)
finally:
os.chdir('..')
setup(
name='django-fluent-dashboard',
version='0.4.0dev',
license='Apache License, Version 2.0',
install_requires=[
'django-admin-tools>=0.4.1', # 0.4.1 is the first release with Django 1.3 support.
],
extras_require = {
'cachestatus': ['dashboardmods>=0.2.2'],
},
description='An improved django-admin-tools dashboard for Django projects',
long_description=open(join(dirname(__file__), 'README.rst')).read(),
author='Diederik van der Boor',
author_email='opensource@edoburu.nl',
url='https://github.com/edoburu/django-fluent-dashboard',
download_url='https://github.com/edoburu/django-fluent-dashboard/zipball/master',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
]
)
| [
"vdboor@edoburu.nl"
] | vdboor@edoburu.nl |
65bb65eef08655b1bc9f00fecef269efb447b5c5 | 8e115fc8273fd7123438fa8cb85cd7b7992246f5 | /App_Login/migrations/0003_follow.py | 1e2b7e845c904aeaa10db6c63664e1517b698f1c | [] | no_license | tasim313/Social_Media_django_project | 35160f83fa278acd616f9f952ac5acd3ec6430e6 | 78cf24305a32dfe937d7fcb031ed2f78649a4775 | refs/heads/main | 2023-06-16T00:45:39.025388 | 2021-07-11T06:38:38 | 2021-07-11T06:38:38 | 384,453,963 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | # Generated by Django 2.2.5 on 2021-07-07 13:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('App_Login', '0002_auto_20210706_1237'),
]
operations = [
migrations.CreateModel(
name='Follow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('follower', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='follower', to=settings.AUTH_USER_MODEL)),
('following', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='following', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"mostasimmahmudtasim@gmail.com"
] | mostasimmahmudtasim@gmail.com |
c321533388cef074c4a7501847d5ddca0b9ae10e | fcc88521f63a3c22c81a9242ae3b203f2ea888fd | /Python3/0838-Push-Dominoes/soln-1.py | b7c9ed30d6e1a5c6ea1654056f23001653264cab | [
"MIT"
] | permissive | wyaadarsh/LeetCode-Solutions | b5963e3427aa547d485d3a2cb24e6cedc72804fd | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | refs/heads/master | 2022-12-06T15:50:37.930987 | 2020-08-30T15:49:27 | 2020-08-30T15:49:27 | 291,811,790 | 0 | 1 | MIT | 2020-08-31T19:57:35 | 2020-08-31T19:57:34 | null | UTF-8 | Python | false | false | 1,479 | py | class Solution:
def pushDominoes(self, dominoes: str) -> str:
leftRs = []
left_R = None
n = len(dominoes)
ans = [None] * n
for i, d in enumerate(dominoes):
if d == 'R':
left_R = 0
ans[i] = 'R'
elif d == 'L':
left_R = None
ans[i] = 'L'
else:
if left_R is not None:
left_R += 1
ans[i] = 'R'
else:
ans[i] = '.'
leftRs.append(left_R)
right_L = None
for i in reversed(range(n)):
d = dominoes[i]
if d == 'L':
right_L = 0
ans[i] = 'L'
elif d == 'R':
right_L = None
ans[i] = 'R'
else:
if right_L is not None:
right_L += 1
if leftRs[i] is None:
ans[i] = 'L'
else:
if leftRs[i] < right_L:
ans[i] = 'R'
elif leftRs[i] == right_L:
ans[i] = '.'
else:
ans[i] = 'L'
else:
if leftRs[i] is not None:
ans[i] = 'R'
else:
ans[i] = '.'
return ''.join(ans)
| [
"zhang623@wisc.edu"
] | zhang623@wisc.edu |
26905f8da50507d70a2c1e7776586c9dbe580c03 | 813a2f574bb77a1221163abd16b6ae4b8c7221b5 | /client/client.py | de00caddc3db853966f88e43abae21b447f2b3de | [] | no_license | AmitGupta7580/Voice-Call-CLI | 662ef12c06f73ec03e73f8d18d5b1567dc204f73 | de6f3f88abc91f7a94279d5019b79145a32ad3f3 | refs/heads/master | 2023-08-28T03:45:02.919439 | 2021-11-09T20:51:57 | 2021-11-09T20:51:57 | 426,293,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,137 | py | import socket
import threading
import pyaudio
Format = pyaudio.paInt16
Chunks = 4096
Channels = 2
Rate = 44100
class Client:
def __init__(self, host):
self.client = socket.socket()
self.host = host
self.port = 5000
self.client.connect((self.host, self.port))
print(f"Connected to the server horray :)")
self.audio_pipe = pyaudio.PyAudio()
self.input_stream = self.audio_pipe.open(format = Format,
channels = Channels,
rate = Rate,
input = True,
frames_per_buffer = Chunks)
self.output_stream = self.audio_pipe.open(format = Format,
channels = Channels,
rate = Rate,
output = True,
frames_per_buffer = Chunks)
self.shutdown = True
self.send_thread = threading.Thread(target = self.send)
self.recieve_thread = threading.Thread(target = self.receive)
def start(self):
self.shutdown = False
self.recieve_thread.start()
self.send()
def send(self):
while not self.shutdown:
try:
data = self.input_stream.read(Chunks)
self.client.send(data)
except:
print("[-] Server Connection Lost :(")
break
self.close()
def receive(self):
while not self.shutdown:
try:
data = self.client.recv(Chunks)
self.output_stream.write(data)
except:
break
def close(self):
print("[-] Closing the client gracefully ..")
self.shutdown = True
self.recieve_thread.join()
self.input_stream.close()
self.output_stream.close()
self.audio_pipe.terminate()
if __name__ == '__main__':
try:
client = Client("<HOST_IP>")
client.start()
except ConnectionRefusedError as e:
print("[-] Unable to connect to the server. Host seems down :(")
except KeyboardInterrupt:
client.close()
| [
"amitgupta758000@gmail.com"
] | amitgupta758000@gmail.com |
4a9ae961addaa1b6427a13adb9219c5cbd463469 | 6338f5eb560d7fb150732b6389518836bed2cafc | /core/chart/modifiers/date_basis_chart_modifier.py | eaf0985a27c7b3c37fe8ba1e46fa05bb0d107498 | [] | no_license | blazern/gtd-stats | 1034e5b339231a49a21220666715a14c5e64cb51 | 314c27f9c3cc0bca09aacc1cc10f603a4ee8fbe6 | refs/heads/master | 2021-08-09T02:44:25.454724 | 2021-06-15T16:50:25 | 2021-06-15T16:50:25 | 176,126,398 | 0 | 0 | null | 2021-03-26T11:16:07 | 2019-03-17T16:10:57 | Python | UTF-8 | Python | false | false | 3,088 | py | from enum import Enum
from datetime import timedelta
from datetime import datetime
from core.chart.modifiers.chart_modifier import *
from core.chart.chart_data import *
class DateBasisChartModifier(ChartModifier):
class Unit(Enum):
WEEK = 1
MONTH = 2
YEAR = 3
def __init__(self, unit, needed_basis_day_number):
self._unit = unit
self._needed_basis_day_number = needed_basis_day_number
@staticmethod
def try_create_from(modifier_dict):
if modifier_dict['type'] != 'date-basis':
return None
needed_basis_day_number = modifier_dict['day-n']
unit = modifier_dict['of-unit']
if unit == 'week':
unit = DateBasisChartModifier.Unit.WEEK
if needed_basis_day_number > 7:
raise ValueError('Day number cannot be greater than 7 in a week')
elif unit == 'month':
unit = DateBasisChartModifier.Unit.MONTH
if needed_basis_day_number > 28:
raise ValueError('Day number cannot be greater than 28 in a month')
elif unit == 'year':
unit = DateBasisChartModifier.Unit.YEAR
if needed_basis_day_number > 7:
raise ValueError('Day number cannot be greater than 365 in a year')
else:
raise ValueError('Unknown time unit: {}'.format(unit))
return DateBasisChartModifier(unit, needed_basis_day_number)
# Override
def convert_lines(self, lines):
for line in lines:
for x in line.x_coords():
if not isinstance(x, datetime):
raise ValueError('X must have datetime type but has: {}'.format(type(x)))
earliest_date = self._find_earliest_date(lines)
initial_basis_day_number = self._get_day_number_of_date(earliest_date)
basis_day_number = initial_basis_day_number
basis_day = earliest_date
while basis_day_number != self._needed_basis_day_number:
basis_day = basis_day - timedelta(days=1)
basis_day_number = self._get_day_number_of_date(basis_day)
updated_lines = []
for line in lines:
updated_lines.append(self._convert_line(line, basis_day))
return updated_lines
def _find_earliest_date(self, lines):
earliest_date = None
for line in lines:
for x in line.x_coords():
if earliest_date is None or x < earliest_date:
earliest_date = x
return earliest_date
def _get_day_number_of_date(self, date):
if self._unit == DateBasisChartModifier.Unit.WEEK:
return date.weekday() + 1
elif self._unit == DateBasisChartModifier.Unit.MONTH:
return date.day
elif self._unit == DateBasisChartModifier.Unit.YEAR:
return date.timetuple().tm_yday
else:
raise ValueError('Unhandled time unit: {}'.format(self._unit))
def _convert_line(self, line, basis_day):
days_diff = (line.x_coords()[0] - basis_day).days
x_coords_extra = []
y_coords_extra = []
for day_index in range(0, days_diff):
mid_date = basis_day + timedelta(days=day_index)
x_coords_extra.append(mid_date)
y_coords_extra.append(0)
return ChartLineData(line.title(), x_coords_extra + line.x_coords(), y_coords_extra + line.y_coords()) | [
"blazern@yandex-team.ru"
] | blazern@yandex-team.ru |
6ba35757f68fcb64fc047141f51271e688cd213b | 98722d42ff5305bbf25e6ca5c74f2d6229c04135 | /HandlingException/ErrorHandlingFinally andElse.py | 581098cdd5a0ad86b62ad87f793f08e4aa30ba13 | [] | no_license | anamarquezz/BegginerPythonEasySteps | 88c4fa4a0bc7d025b52990a05a7e08ce67ef6eaf | 5fc5d47a6ea093e2f4f7d33268bf6b34d1730c99 | refs/heads/master | 2022-04-30T13:57:16.587346 | 2022-04-01T23:55:56 | 2022-04-01T23:55:56 | 197,239,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # Open File / Resource
try:
# business Login to read
i = 0
j = 10/i
values = [1, 2]
sum(values)
except TypeError:
print("TypeErro")
j = 10
except ZeroDivisionError:
print("ZeroDivisionError")
j = 0
else: # is executed when the exception ist not run
print("Else")
finally: #executed when exception happens or not
# close
print("Finally")
print(j)
print("End")
| [
"anamarquez.edu@hotmail.com"
] | anamarquez.edu@hotmail.com |
b2e848507e4e8658fe32998a25c0620baed53f17 | 170d46b2372782ee9f6110875070465544732d56 | /dtest/views/home.py | 47bedce5bc0355e5d4a3e7f957f9140c6ebca135 | [] | no_license | jintt/developer-demo | c226cca062d8a9d8a287b012d75274dd945c0406 | ae0e8214e1148f513cf6a4d6f0e8e046a059d1d9 | refs/heads/master | 2021-04-30T00:40:20.524046 | 2018-02-14T03:02:38 | 2018-02-14T03:02:38 | 121,463,581 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template
mod = Blueprint('home', __name__)
@mod.route('/')
def index():
return render_template('home/index.html')
| [
"tingting.jin@ele.me"
] | tingting.jin@ele.me |
5127469a13b03f16da57296c3fd2d1bfe11887b7 | 7dcf81654ac053f529c293a2201f0ff49c8674b9 | /app with sql3lite.py | 9b2e1b75614aa1562828e2cc432c71946a8849ff | [] | no_license | OlaDzido/Projekt-indywidualny | b26066c039d2f6c456544230a9abc10e26382203 | 4a232199781670f882037faa7e3ff5598eae967b | refs/heads/master | 2022-04-08T04:00:44.046320 | 2020-03-01T19:42:28 | 2020-03-01T19:42:28 | 242,814,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | import sqlite3
from datetime import datetime as dt
import datetime
c = sqlite3.connect('films.db')
con = c.cursor()
a =(datetime.date.today())
x = dt.strftime(a, '%Y_%m_%d')
c.execute("""CREATE TABLE IF NOT EXISTS ranking{datax} (place INT(4), tittle VARCHAR(40), votes INT(20), data DATE);""".format(datax=x))
c.commit()
| [
"aleksandra_dzido@wp.pl"
] | aleksandra_dzido@wp.pl |
06ebdcbe79fc30e8f7dbeae6f53b24398009b675 | 6df0d7a677129e9b325d4fdb4bbf72d512dd08b2 | /PycharmProjects/my_practice/untitled/1.py | 851d7606c9479fbaebdefa185ba912d562a89abd | [] | no_license | yingxingtianxia/python | 01265a37136f2ad73fdd142f72d70f7c962e0241 | 3e1a7617a4b6552bce4a7e15a182f30e1bae221e | refs/heads/master | 2021-06-14T15:48:00.939472 | 2019-12-13T05:57:36 | 2019-12-13T05:57:36 | 152,200,507 | 0 | 0 | null | 2021-06-10T20:54:26 | 2018-10-09T06:40:10 | Python | UTF-8 | Python | false | false | 102 | py | #!/usr/bin/env python3
# -*-coding: utf8-*-
f = open('1.txt', 'a')
f.write('this is test\n')
f.close() | [
"root@room8pc205.tedu.cn"
] | root@room8pc205.tedu.cn |
76c8f94b2f1120d880d69b6121372442259a80bc | a08409f712dc0b1045f695fd2ffee2bb6cc7835b | /math/0x02-calculus/17-integrate.py~ | 07a15858a238dab1c6d0b0de4984d551527088f4 | [] | no_license | mohsenabedelaal/holbertonschool-machine_learning | d3f2137761e10d620472ca6e5f3288c45898381d | 2765a09ba3064168b024952d18b1a2471952c8a2 | refs/heads/main | 2023-06-02T16:11:55.600921 | 2021-06-10T19:08:13 | 2021-06-10T19:08:13 | 318,244,087 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 754 | #!/usr/bin/env python3
"""derivative poly"""
def poly_integral(poly, C=0):
"""Module for integral"""
if not isinstance(poly, list):
return None
if len(poly) == 0:
return None
if len(poly) == 1:
return [0]
if len(poly) == 2:
return [poly(1)]
else:
integral = []
for i in range(0, len(poly)):
if isinstance(poly[i], (int, float)):
if i == 0:
integral.append(0)
if poly[i] % (i + 1) == 0:
result = int((1/(i+1)) * poly[i])
else:
result = (1/(i+1)) * poly[i]
integral.append(result)
else:
return None
return der
| [
"mohsen.abedelaal@gmail.com"
] | mohsen.abedelaal@gmail.com | |
6562ceefb580fe6394f1e927b79291c2063a56c7 | 5692e8a3357f7afe6284b43c4a9770d81957a511 | /student/migrations/0015_auto_20201119_1605.py | 8fc7c2324f4ec87bd9f70cdb6eabaa98d7202789 | [] | no_license | OmarFateh/student-management-system | 49bcfbdf15a631cf7f64ff200d530a44a44409ac | 2c53f81a55fe631406b642365a68de19501c0f17 | refs/heads/master | 2023-07-16T00:02:54.796428 | 2021-08-25T01:54:02 | 2021-08-25T01:54:02 | 355,033,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | # Generated by Django 3.1.2 on 2020-11-19 14:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('student', '0014_sessionyear_date_range'),
]
operations = [
migrations.AlterModelOptions(
name='student',
options={'ordering': ['user__full_name'], 'verbose_name': 'Student', 'verbose_name_plural': 'Students'},
),
]
| [
"66747309+OmarFateh@users.noreply.github.com"
] | 66747309+OmarFateh@users.noreply.github.com |
5adfe82e1d05b7025ced56b8ef0b25d4c36d7479 | 575db53a21cdcabd958fe083be1c1e6187909a42 | /server/resources.py | d8204fa02129318d19c2aa3aa426798038c7d693 | [] | no_license | sumchattering/iOS-Client-Python-Docker-Server-Example | 681ffad01cc9849fad7cbb46a607c3f3f83b5552 | 1ddbe0bfffad45b213caa1d199dbc183942c454d | refs/heads/master | 2022-02-26T19:18:32.346519 | 2019-11-18T01:12:29 | 2019-11-18T01:12:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,358 | py | #! .env/bin/python
from models import Card
from db import session
from flask_restful import reqparse
from flask_restful import abort
from flask_restful import Resource
from flask_restful import fields
from flask_restful import marshal_with
card_fields = {
'cardId': fields.String,
'name': fields.String,
'cardSet': fields.String,
'type': fields.String,
'rarity': fields.String,
'cost': fields.Integer,
'attack': fields.Integer,
'health': fields.Integer,
'text': fields.String,
'flavor': fields.String,
'artist': fields.String,
'collectible': fields.Boolean,
'elite': fields.Boolean,
'playerClass': fields.String,
'multiClassGroup': fields.String,
'howToGet': fields.String,
'howToGetGold': fields.String,
'img': fields.String,
'imgGold': fields.String,
'locale': fields.String,
}
parser = reqparse.RequestParser()
parser.add_argument('card', type=str)
class CardResource(Resource):
@marshal_with(card_fields)
def get(self, cardId):
card = session.query(Card).filter(Card.cardId == cardId).first()
if not card:
abort(404, message="Card {} doesn't exist".format(cardId))
return card
class CardListResource(Resource):
@marshal_with(card_fields)
def get(self):
cards = session.query(Card).all()
return cards
| [
"sumchatterjee@ebay.com"
] | sumchatterjee@ebay.com |
dbc68db34ebdb4fc4ad9fa40b314bf9274e01fc5 | dd08502acdb8d9b3c9eb32630f55ff8a3bb1573c | /BJJtrainer/src/trainer/states.py | c9fe4af4254d0c385f318563813b842abb513e3d | [] | no_license | seanahmad/BJJtrainer | 85b4fc93058464dbc9509baafe5dc91c99dbef74 | e5461a89086c65604586b75367cf966e5d87b1cf | refs/heads/master | 2021-12-19T11:37:08.449775 | 2014-05-24T01:24:13 | 2014-05-24T01:24:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,688 | py | '''
Created on 21 May 2014
@author: Ash
'''
from transition import Transition
from attacks import Attack
import sys
import random
class State(object):
def __init__(self):
self.attacks = None
self.transitions = None
self.description = None
self.name = None
def trans(self):
is_valid = False
while not is_valid:
print "\nWhich transition do you want to try?"
choice = raw_input("(type '-o' for options)")
if choice == 'quit':
is_valid = True
return 'quitState'
elif choice == '-o':
print ''
for key in self.transitions:
print key
elif choice in self.transitions:
is_valid = True
tempTransition = self.transitions[choice]
if tempTransition.run():
print "\nNice!"
return tempTransition.toState
else:
print "\nNot this time!"
return tempTransition.fromState
else:
print ("\nPlease enter valid option.")
def attack(self):
is_valid = False
while not is_valid:
print "\nWhich attack do you want to try?"
choice = raw_input("(type '-o' for options)")
if choice == 'quit':
is_valid = True
return 'quitState'
elif choice == '-o':
print ''
for key in self.attacks:
print key
elif choice in self.attacks:
is_valid = True
tempAttack = self.attacks[choice]
if tempAttack.run():
print '\n' + tempAttack.succMessage
return 'winState'
else:
print "\nNot this time!"
return self.name
else:
print ("\nPlease enter valid option.")
def run(self):
print "\n" + self.description
if not self.transitions:
return self.attack()
elif not self.attacks:
return self.trans()
else:
is_valid = False
while not is_valid:
choice = raw_input('\nWould you rather advance position or attack? (adv or att)')
if choice == 'quit':
is_valid = True
return 'quitState'
elif choice == 'adv':
is_valid = True
return self.trans()
elif choice == 'att':
is_valid = True
return self.attack()
else:
print ("\nPlease enter 'adv' or 'att'.")
class StartState():
def run(self):
posNames = ['full mount', 'fully mounted',
'bottom guard', 'top guard',
'back mount']
print ("\n\n"
"How would you like to start, standing or in a random position?")
is_valid = False
while not is_valid:
choice = raw_input('Enter your choice (s or r): ')
if choice == 'quit':
is_valid = True
return 'quitState'
elif choice == 's':
is_valid = True
startPos = 'standing'
elif choice == 'r':
is_valid = True
startPos = random.choice(posNames)
else:
print ("Please enter 's' or 'r'.")
return startPos
class WinState(State):
def run(self):
print '\nYou win'
is_valid = False
while not is_valid:
choice = raw_input('\nPlay again? (y or n):')
if choice == 'quit':
return 'quitState'
elif choice == 'y':
return 'startState'
elif choice == 'n':
return 'quitState'
else:
print ("\nPlease enter 'y' or 'n'.")
class QuitState(State):
def run(self):
print ("\n\n We hope you enjoyed the game!\n"
"********************************************************")
sys.exit()
###############################################################################
################################## Positions ##################################
###############################################################################
class FullMount(State):
def __init__(self, difficulty, adjustProb):
self.name = "full mount"
self.attacks = {'americana' : Attack('americana',
adjustProb(0.8, difficulty),
"You ripped you're opponents arm off!"),
'arm lock' : Attack('arm lock',
adjustProb(0.4, difficulty),
"You ripped you're opponents arm off!")}
self.transitions = {}
self.description = """You have fully mounted your opponent."""
class FullyMounted(State):
def __init__(self, difficulty, adjustProb):
self.name = "fully mounted"
self.attacks = {}
self.transitions = {'trap & roll' : Transition('trap & rol',
self.name,
'top guard',
adjustProb(0.6, difficulty)),
'elbow escape' : Transition('elbow escape',
self.name,
'bottom guard',
adjustProb(0.5, difficulty))}
self.description = """Your opponent has fully mounted you."""
class BottomGuard(State):
def __init__(self, difficulty, adjustProb):
self.name = "bottom guard"
self.attacks = {'kimura' : Attack('kimura',
adjustProb(0.3, difficulty),
"You ripped you're opponents arm off!"),
'arm lock' : Attack('arm lock',
adjustProb(0.4, difficulty),
"You ripped you're opponents arm off!"),
'triangle' : Attack('triangle',
adjustProb(0.5, difficulty),
"You're opponent is unconscious!"),
'guillotine' : Attack('guillotine',
adjustProb(0.3, difficulty),
"You're opponent is unconscious!")}
self.transitions = {'elevator sweep' : Transition('elevator sweep',
self.name,
'full mount',
adjustProb(0.6, difficulty)),
'take the back' : Transition('take the back',
self.name,
'back mount',
adjustProb(0.5, difficulty))}
self.description = """You're opponent is in your guard"""
class TopGuard(State):
def __init__(self, difficulty, adjustProb):
self.name = "top guard"
self.attacks = {}
# TODO Add double underhook pass to side control
self.transitions = {'open guard pass' : Transition('open guard pass',
self.name,
'full mount',
adjustProb(0.5, difficulty))}
self.description = """You are in your opponent's guard"""
class BackMount(State):
def __init__(self, difficulty, adjustProb):
self.name = "back mount"
self.attacks = {'rear naked choke' : Attack('rear naked choke',
adjustProb(0.9, difficulty),
"You choked out your opponent!")}
self.transitions = {}
self.description = """You have taken your opponent's back"""
| [
"ash.booth@soton.ac.uk"
] | ash.booth@soton.ac.uk |
904e5fab937e3d2dcca3efe3b27da1f91a65b6bc | cb134e2aba36a578aadb79acb85a41ba31b78f54 | /blog/migrations/0003_auto_20181120_1610.py | a33c7bfb46fff4315f5a7bd841683e89f095f3d5 | [] | no_license | vicsoulz/blog | 52ca51682c1440bdd742488d168f938a9e835496 | f741ba0c17fb0b3da13977fe7011c634e21d4585 | refs/heads/master | 2020-04-07T08:19:36.700938 | 2018-11-22T10:07:42 | 2018-11-22T10:07:42 | 143,368,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2018-11-20 08:10
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20170517_1929'),
]
operations = [
migrations.AlterField(
model_name='post',
name='body',
field=ckeditor.fields.RichTextField(),
),
]
| [
"yaomaolei@luojilab.com"
] | yaomaolei@luojilab.com |
89509c3ebcab0d26460f14bf5810e0a088b1aa54 | 1fa16c1df35fd8247f9274b26a70523a514210f0 | /dependencies/amitools-0.1.0/amitools/vamos/lib/dos/FileManager.py | c9ed60eea544afb45ca4756963bba90fd9cacfbe | [
"GPL-1.0-or-later",
"MIT"
] | permissive | limi/AGSImager | 25a816b1c5b9ce8553cd6e3a47babce574f15119 | d3771800308e61a7a07df4a9b361e5bd5ba9e409 | refs/heads/master | 2023-01-10T11:19:52.248376 | 2020-04-14T19:59:23 | 2020-04-14T19:59:23 | 255,813,813 | 0 | 0 | MIT | 2020-04-15T05:26:57 | 2020-04-15T05:26:56 | null | UTF-8 | Python | false | false | 8,753 | py | import sys
import os.path
import os
import logging
import errno
import stat
from amitools.vamos.Log import log_file
from amitools.vamos.AccessStruct import AccessStruct
from DosStruct import DosPacketDef
from amitools.vamos.lib.lexec.ExecStruct import MessageDef
from Error import *
from DosProtection import DosProtection
from FileHandle import FileHandle
class FileManager:
def __init__(self, path_mgr, alloc, mem):
self.path_mgr = path_mgr
self.alloc = alloc
self.mem = mem
self.files_by_b_addr = {}
# get current umask
self.umask = os.umask(0)
os.umask(self.umask)
def setup(self, fs_handler_port):
self.fs_handler_port = fs_handler_port
# setup std input/output
self.std_input = FileHandle(sys.stdin,'<STDIN>','',need_close=False)
self.std_output = FileHandle(sys.stdout,'<STDOUT>','',need_close=False)
self._register_file(self.std_input)
self._register_file(self.std_output)
def finish(self,have_native_shell):
if not have_native_shell: #the Shell otherwise closes the streams for us
self._unregister_file(self.std_input)
self._unregister_file(self.std_output)
def get_fs_handler_port(self):
return self.fs_handler_port
def _register_file(self, fh):
baddr = fh.alloc_fh(self.alloc, self.fs_handler_port)
self.files_by_b_addr[baddr] = fh
log_file.info("registered: %s" % fh)
def _unregister_file(self,fh):
if fh.b_addr in self.files_by_b_addr:
check = self.files_by_b_addr[fh.b_addr]
if check != fh:
raise ValueError("Invalid File to unregister: %s" % fh)
else:
raise ValueError("Invalid File to unregister: %s" % fh)
del self.files_by_b_addr[fh.b_addr]
log_file.info("unregistered: %s"% fh)
fh.free_fh(self.alloc)
def get_input(self):
return self.std_input
def get_output(self):
return self.std_output
def open(self, lock, ami_path, f_mode):
try:
# special names
uname = ami_path.upper()
# thor: NIL: and CONSOLE: also work as device names
# and the file names behind are ignored.
if uname.startswith('NIL:'):
sys_name = "/dev/null"
if f_mode == "rwb+":
f_mode = "rb+"
fobj = open(sys_name, f_mode)
fh = FileHandle(fobj, ami_path, sys_name, is_nil = True)
elif uname == '*' or uname.startswith('CONSOLE:'):
sys_name = ''
fh = FileHandle(sys.stdout,'*','',need_close=False)
else:
# map to system path
sys_path = self.path_mgr.ami_to_sys_path(lock,ami_path,searchMulti=True)
if sys_path == None:
log_file.info("file not found: '%s' -> '%s'" % (ami_path, sys_path))
return None
# make some checks on existing file
if os.path.exists(sys_path):
# if not writeable -> no append mode
if f_mode == "rwb+":
f_mode = "rb+"
if not os.access(sys_path, os.W_OK):
if f_mode[-1] == '+':
f_mode = f_mode[:-1]
else:
# if the file does not exist, but the mode is MODE_READWRITE, create it.
if f_mode == "rwb+":
f_mode = "wb+"
log_file.debug("opening file: '%s' -> '%s' f_mode=%s" % (ami_path, sys_path, f_mode))
fobj = open(sys_path, f_mode)
fh = FileHandle(fobj, ami_path, sys_path)
self._register_file(fh)
return fh
except IOError as e:
log_file.info("error opening: '%s' -> '%s' f_mode=%s -> %s" % (ami_path, sys_path, f_mode, e))
return None
def close(self, fh):
fh.close()
self._unregister_file(fh)
def get_by_b_addr(self, b_addr, for_writing = None):
if b_addr == 0:
return None
if b_addr in self.files_by_b_addr:
fh = self.files_by_b_addr[b_addr]
# AmigaDos has no problem reading from an output console handle
# or writing to the input handle for the console.
if for_writing == True and fh.obj == sys.stdin:
return self.std_output
elif for_writing == False and fh.obj == sys.stdout:
return self.std_input
return fh
else:
addr = b_addr << 2
raise ValueError("Invalid File Handle at b@%06x = %06x" % (b_addr, addr))
def delete(self, lock, ami_path):
sys_path = self.path_mgr.ami_to_sys_path(lock,ami_path)
if sys_path == None or not os.path.exists(sys_path):
log_file.info("file to delete not found: '%s'" % (ami_path))
return ERROR_OBJECT_NOT_FOUND
try:
if os.path.isdir(sys_path):
os.rmdir(sys_path)
else:
os.remove(sys_path)
return 0
except OSError as e:
if e.errno == errno.ENOTEMPTY: # Directory not empty
log_file.info("can't delete directory: '%s' -> not empty!" % (ami_path))
return ERROR_DIRECTORY_NOT_EMPTY
else:
log_file.info("can't delete file: '%s' -> %s" % (ami_path, e))
return ERROR_OBJECT_IN_USE
def rename(self, lock, old_ami_path, new_ami_path):
old_sys_path = self.path_mgr.ami_to_sys_path(lock,old_ami_path)
new_sys_path = self.path_mgr.ami_to_sys_path(lock,new_ami_path)
if old_sys_path == None or not os.path.exists(old_sys_path):
log_file.info("old file to rename not found: '%s'" % old_ami_path)
return ERROR_OBJECT_NOT_FOUND
if new_sys_path == None:
log_file.info("new file to rename not found: '%s'" % new_ami_path)
return ERROR_OBJECT_NOT_FOUND
try:
os.rename(old_sys_path, new_sys_path)
return 0
except OSError as e:
log_file.info("can't rename file: '%s','%s' -> %s" % (old_ami_path, new_ami_path, e))
return ERROR_OBJECT_IN_USE
def is_file_system(self, lock, name):
uname = name.upper()
if uname.startswith('NIL:'):
return False
elif uname == '*' or uname.startswith('CONSOLE:'):
return False
# Everything else is a file system here, we don't support any
# other devices.
return True
def set_protection(self, lock, ami_path, mask):
sys_path = self.path_mgr.ami_to_sys_path(lock, ami_path)
if sys_path == None or not os.path.exists(sys_path):
log_file.info("file to set proteciton not found: '%s'", ami_path)
return ERROR_OBJECT_NOT_FOUND
prot = DosProtection(mask)
posix_mask = 0
if prot.is_e():
posix_mask |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
if prot.is_w():
posix_mask |= stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
if prot.is_r():
posix_mask |= stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
posix_mask &= ~self.umask
log_file.info("set protection: '%s': %s -> '%s': posix_mask=%03o umask=%03o", ami_path, prot, sys_path, posix_mask, self.umask)
try:
os.chmod(sys_path, posix_mask)
return NO_ERROR
except OSError:
return ERROR_OBJECT_WRONG_TYPE
def create_dir(self, lock, ami_path):
sys_path = self.path_mgr.ami_to_sys_path(lock, ami_path)
try:
os.mkdir(sys_path)
return NO_ERROR
except OSError:
return ERROR_OBJECT_EXISTS
# ----- Direct Handler Access -----
# callback from port manager for fs handler port
# -> Async I/O
def put_msg(self, port_mgr, msg_addr):
msg = AccessStruct(self.mem,MessageDef,struct_addr=msg_addr)
dos_pkt_addr = msg.r_s("mn_Node.ln_Name")
dos_pkt = AccessStruct(self.mem,DosPacketDef,struct_addr=dos_pkt_addr)
reply_port_addr = dos_pkt.r_s("dp_Port")
pkt_type = dos_pkt.r_s("dp_Type")
log_file.info("DosPacket: msg=%06x -> pkt=%06x: reply_port=%06x type=%06x", msg_addr, dos_pkt_addr, reply_port_addr, pkt_type)
# handle packet
if pkt_type == ord('R'): # read
fh_b_addr = dos_pkt.r_s("dp_Arg1")
buf_ptr = dos_pkt.r_s("dp_Arg2")
size = dos_pkt.r_s("dp_Arg3")
# get fh and read
fh = self.get_by_b_addr(fh_b_addr)
data = fh.read(size)
self.mem.access.w_data(buf_ptr, data)
got = len(data)
log_file.info("DosPacket: Read fh_b_addr=%06x buf=%06x len=%06x -> got=%06x fh=%s", fh_b_addr, buf_ptr, size, got, fh)
dos_pkt.w_s("dp_Res1", got)
elif pkt_type == ord('W'): # write
fh_b_addr = dos_pkt.r_s("dp_Arg1")
buf_ptr = dos_pkt.r_s("dp_Arg2")
size = dos_pkt.r_s("dp_Arg3")
fh = self.get_by_b_addr(fh_b_addr)
data = self.mem.access.r_data(buf_ptr, size)
fh.write(data)
put = len(data)
log_file.info("DosPacket: Write fh=%06x buf=%06x len=%06x -> put=%06x fh=%s", fh_b_addr, buf_ptr, size, put, fh)
dos_pkt.w_s("dp_Res1", put)
else:
raise UnsupportedFeatureError("Unsupported DosPacket: type=%d" % pkt_type)
# do reply
if not port_mgr.has_port(reply_port_addr):
port_mgr.register_port(reply_port_addr)
port_mgr.put_msg(reply_port_addr, msg_addr)
| [
"optiroc@gmail.com"
] | optiroc@gmail.com |
96941c0474d62831f8ffe51c87665c7acf1682f9 | 7147ed6f206175ec4e92db2eb3421d2d411d1479 | /clinic/migrations/0024_delete_tinhthanh.py | 2177d910772710c43e7fed04e8daf12d2b642bbc | [] | no_license | jiroakira/QLPK_MEDOTIS | c8d6a5ac7bdbf25170704989520697deed6015d4 | 23de8d579df15a085767eed0fe8b857b7ccdfc4b | refs/heads/main | 2023-06-22T20:08:13.844852 | 2021-07-15T02:44:55 | 2021-07-15T02:44:55 | 326,596,095 | 0 | 2 | null | 2021-07-15T02:44:55 | 2021-01-04T06:54:51 | Python | UTF-8 | Python | false | false | 288 | py | # Generated by Django 3.1.6 on 2021-02-20 18:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('clinic', '0023_tinhthanh'),
]
operations = [
migrations.DeleteModel(
name='TinhThanh',
),
]
| [
"72379893+jiroakira@users.noreply.github.com"
] | 72379893+jiroakira@users.noreply.github.com |
91a80f36411eaa0287c81fe0a4414a82d2b3022a | a7104434e0ddb4575ef0a6cd467bac6620570de8 | /hunter108.py | ff44819a2ca4401163bea362d9ae1cf41d6bc5c3 | [] | no_license | GauthamAjayKannan/GUVI-1 | 7b276eef3195bec9671eec8bb6bcc588cb5c970e | fafabab93df55abcc399f6e2664286ed511fd683 | refs/heads/master | 2020-06-25T07:38:08.465414 | 2019-05-17T11:24:53 | 2019-05-17T11:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | x = list(input())
list1 = []
out = 0
if len(x) == 1:
out = int(x[0]) * int(x[0])
else:
for i in x:
list1.append(int(i))
for i in range(len(list1)-1):
out += (list1[i] ** list1[i+1])
out += (list1[len(list1)-1] ** list1[0] )
print(out)
| [
"noreply@github.com"
] | GauthamAjayKannan.noreply@github.com |
697e98e66b34e1c1521548ba26c8602e5d8e8b3b | d34d1c00aa5e2044c93066046a7cac1d5f23f6df | /perpost/apps.py | d815129416a01ea9fb16d6dc00852aae98f203a0 | [] | no_license | Mahmoud-alzoubi95/permissions-postgres | 83c6ac1cf6d2f804ebbc9225b6c5f2fb73773c2c | 489a71a2fc7c3422f6fe1d5a74c02d31ddec5028 | refs/heads/main | 2023-06-04T00:07:03.205111 | 2021-06-23T18:07:28 | 2021-06-23T18:07:28 | 379,012,735 | 0 | 0 | null | 2021-06-23T18:07:28 | 2021-06-21T17:32:26 | Python | UTF-8 | Python | false | false | 146 | py | from django.apps import AppConfig
class PerpostConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'perpost'
| [
"mahmoudalzoubi073@gmail.com"
] | mahmoudalzoubi073@gmail.com |
19a749d69b3343bf098422222082ff5f6d881d1a | 9d2826f385d6a97f2eaa52711e8d89c5dd8921c1 | /Ludo2D-Game/Ludo-game.py | 740e2e244e06e313200f5bc53a3befe6ef2f4595 | [] | no_license | Atchudhan/Ludo_Game | 254065e4695959fea0786625928a0bc90af5b63f | 756d044b75255dc0c79159be14323254d8561c50 | refs/heads/main | 2023-04-22T01:36:07.523590 | 2021-05-12T18:04:34 | 2021-05-12T18:04:34 | 366,806,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,686 | py | from tkinter import * # Tkinter is used as the GUI.
from tkinter import messagebox
import sys
import os
import random
import tkinter.messagebox
root = Tk()
root.resizable(width=False, height=False) # The window size of the game.
root.geometry('1000x750')
root.configure(background='green')
root.title("Checkers")
logo = PhotoImage(file="whitebox.gif") # Loading all the image files that are required in the game.
logo2 = PhotoImage(file="red side.gif") # Loading all the image files that are required in the game.
logo3 = PhotoImage(file="red.gif") # Loading all the image files that are required in the game.
logo4 = PhotoImage(file="blue side.gif")
logo5 = PhotoImage(file="green side.gif")
logo6 = PhotoImage(file="yellow side.gif")
logo7 = PhotoImage(file="center.gif")
logoxx = PhotoImage(file="test.gif")
logog = PhotoImage(file="greenbox.gif")
logogs = PhotoImage(file="greenstop.gif")
logoy = PhotoImage(file="yellowbox.gif")
logoys = PhotoImage(file="yellowstop.gif")
logob = PhotoImage(file="bluebox.gif")
logobs = PhotoImage(file="bluestop.gif")
logor = PhotoImage(file="redbox.gif")
logors = PhotoImage(file="redstop.gif")
logoh = PhotoImage(file="head.gif")
logot = PhotoImage(file="tail.gif")
logoh1 = PhotoImage(file="head1.gif")
logot1 = PhotoImage(file="tail1.gif")
logoh2 = PhotoImage(file="head2.gif")
logot2 = PhotoImage(file="tail2.gif")
logoh3 = PhotoImage(file="head3.gif")
logot3 = PhotoImage(file="tail3.gif")
logoab= PhotoImage(file="blue.gif")
logoay= PhotoImage(file="yellow.gif")
logoag= PhotoImage(file="green.gif")
Label(image=logo2, width=298, height=298).place(x=-1, y=-1) #setting up board images
Label(image=logo4, width=300, height=300).place(x=(-2), y=(448))
Label(image=logo5, width=296, height=296).place(x=(450), y=(0))
Label(image=logo6, width=294, height=294).place(x=(450), y=(450))
Label(image=logo7, width=150, height=150).place(x=(298), y=(298))
c = 0 #initializing variable and flags that are to be used in the game
lx = 0
bb =0
nc = 0
rollc = 0
rolls = []
RED = True
BLUE = False
GREEN = False
YELLOW = False
TURN = True
REDKILL = False
BLUEKILL = False
GREENKILL = False
YELLOWKILL = False
def board(): #Drawing the board, piece by piece.
#Splash Screen.
tkinter.messagebox.showinfo(title=None, message="TO START GAME PRESS OKAY & TO EXIT PRESS CROSS UP IN THE WINDOW")
v = 0
z = 0
while (v != 300): #Drawing White boxes
z = 0
while (z != 150):
Label(image=logo, width=46, height=46).place(x=(300 + z), y=(0 + v))
z = z + 50
v = v + 50
z = 0
v = 0
while (v != 300): #Drawing White boxes
z = 0
while (z != 150):
Label(image=logo, width=46, height=46).place(x=(0 + v), y=(300 + z))
z = z + 50
v = v + 50
#####################
v = 0
z = 0
while (v != 300): #Drawing White boxes
z = 0
while (z != 150):
Label(image=logo, width=46, height=46).place(x=(300 + z), y=(450 + v))
z = z + 50
v = v + 50
z = 0
v = 0
while (v != 300): #Drawing White boxes
z = 0
while (z != 150):
Label(image=logo, width=46, height=46).place(x=(450 + v), y=(300 + z))
z = z + 50
v = v + 50
v = 0
while (v != 250): #Drawing Green boxes
Label(image=logog, width=46, height=46).place(x=(350), y=(50 + v))
v = v + 50
Label(image=logog, width=46, height=46).place(x=(300), y=(100))
Label(image=logogs, width=46, height=46).place(x=(400), y=(50))
v = 0
while (v != 250): #Drawing Yellow boxes
Label(image=logoy, width=46, height=46).place(x=(450 + v), y=(350))
v = v + 50
Label(image=logoy, width=46, height=46).place(x=(600), y=(300))
Label(image=logoys, width=46, height=46).place(x=(650), y=(400))
v = 0
while (v != 250): #Drawing Red Boxes
Label(image=logor, width=46, height=46).place(x=(50 + v), y=(350))
v = v + 50
Label(image=logor, width=46, height=46).place(x=(100), y=(400))
Label(image=logors, width=46, height=46).place(x=(50), y=(300))
v = 0
while (v != 250): #Drawing Blue Boxes
Label(image=logob, width=46, height=46).place(x=(350), y=(450 + v))
v = v + 50
Label(image=logobs, width=46, height=46).place(x=(300), y=(650))
Label(image=logob, width=46, height=46).place(x=(400), y=(600))
Label(image=logoh, width=46, height=46).place(x=250, y=400) #Drawing arrows
Label(image=logot, width=46, height=46).place(x=300, y=450)
Label(image=logoh1, width=46, height=46).place(x=400, y=450)
Label(image=logot1, width=46, height=46).place(x=450, y=400)
Label(image=logoh2, width=46, height=46).place(x=450, y=300)
Label(image=logot2, width=46, height=46).place(x=400, y=250)
Label(image=logoh3, width=46, height=46).place(x=300, y=250)
Label(image=logot3, width=46, height=46).place(x=250, y=300)
class YBox: #Class of yellow box
rap = None
def __init__(self, num=-1, x=0, y=0, x0=0, y0=0, double=False, ):
self.num = num #no of gamepiece acc to box
self.x = x #initial and final co-ordinates of the boxes
self.y = y
self.x0 = x0
self.y0 = y0
self.rap = Label(image=logoay, width=20, height=20) #image of game piece.
self.double = double #if one game piece on top of another.
def swap(self): #Swaps the position of gamepiece according to the number on dice.
self.rap.place(x=self.x0 + 13, y=self.y0 + 14)
class GBox: #Class of green box
rap = None
def __init__(self, num=-1, x=0, y=0, x0=0, y0=0, double=False, ):
self.num = num
self.x = x
self.y = y
self.x0 = x0
self.y0 = y0
self.rap = Label(image=logoag, width=20, height=20)
self.double = double
def swap(self):
self.rap.place(x=self.x0 + 13, y=self.y0 + 14)
class BBox: #Class of Blue box
rap = None
def __init__(self, num=-1, x=0, y=0, x0=0, y0=0, double=False, ):
self.num = num
self.x = x
self.y = y
self.x0 = x0
self.y0 = y0
self.rap = Label(image=logoab, width=20, height=20)
self.double = double
def swap(self):
self.rap.place(x=self.x0 + 13, y=self.y0 + 14)
class Box: #class of red box
rap = None
def __init__(self, num=-1, x=0, y=0, x0=0, y0=0, double=False, ):
self.num = num
self.x = x
self.y = y
self.x0 = x0
self.y0 = y0
self.rap = Label(image=logo3, width=20, height=20)
self.double = double
def swap(self):
self.rap.place(x=self.x0 + 13, y=self.y0 + 14)
def main(): # Main game function.
global box, redbox, bluebox, greenbox, yellowbox, redhome, bluehome, yellowhome, greenhome
global red, blue, yellow, green, rap, RED, BLUE, GREEN, YELLOW, dice, nc, TURN, bb
if c == 0: #constructs the game pieces first time the code is ran.
board()
box = [Box() for i in range(52)]
redbox = [Box() for i in range(57)] # list of co-ordinates of all the colored boxes, excluding home and stop.
bluebox = [Box() for i in range(57)]
greenbox = [Box() for i in range(57)]
yellowbox = [Box() for i in range(57)]
redhome = [Box() for i in range(4)] # list co-ordinates of all the home positions
bluehome = [Box() for i in range(4)]
greenhome = [Box() for i in range(4)]
yellowhome = [Box() for i in range(4)]
red = [Box() for i in range(4)] # list of co-ordinates of all the game pieces in their initial state
blue = [BBox() for i in range(4)] # that is equal to their respective home co-ordinates.
green = [GBox() for i in range(4)]
yellow = [YBox() for i in range(4)]
for i in range(2): #Populates list of homeboxes, colored boxes, gamepieces and white boxes
redhome[i].x = (100 + (100 * i))
redhome[i].y = 100
red[i].x0 = redhome[i].x
red[i].y0 = redhome[i].y
red[i].x = (red[i].x0) + 25
red[i].y = (red[i].y0) + 25
bluehome[i].x = (100 + (100 * i))
bluehome[i].y = (550)
blue[i].x0 = bluehome[i].x
blue[i].y0 = bluehome[i].y
blue[i].x = (blue[i].x0) + 25
blue[i].y = (blue[i].y0) + 25
yellowhome[i].x = (550 + (100 * i))
yellowhome[i].y = (550)
yellow[i].x0 = yellowhome[i].x
yellow[i].y0 = yellowhome[i].y
yellow[i].x = (yellow[i].x0) + 25
yellow[i].y = (yellow[i].y0) + 25
greenhome[i].x = (550 + (100 * i))
greenhome[i].y = (100)
green[i].x0 = greenhome[i].x
green[i].y0 = greenhome[i].y
green[i].x = (green[i].x0) + 25
green[i].y = (green[i].y0) + 25
for i in range(2, 4):
redhome[i].x = (100 + (100 * (i - 2)))
redhome[i].y = 200
red[i].x0 = redhome[i].x
red[i].y0 = redhome[i].y
red[i].x = (red[i].x0) + 25
red[i].y = (red[i].y0) + 25
bluehome[i].x = (100 + (100 * (i - 2)))
bluehome[i].y = (650)
blue[i].x0 = bluehome[i].x
blue[i].y0 = bluehome[i].y
blue[i].x = (blue[i].x0) + 25
blue[i].y = (blue[i].y0) + 25
yellowhome[i].x = (550 + (100 * (i - 2)))
yellowhome[i].y = (650)
yellow[i].x0 = yellowhome[i].x
yellow[i].y0 = yellowhome[i].y
yellow[i].x = (yellow[i].x0) + 25
yellow[i].y = (yellow[i].y0) + 25
greenhome[i].x = (550 + (100 * (i - 2)))
greenhome[i].y = 200
green[i].x0 = greenhome[i].x
green[i].y0 = greenhome[i].y
green[i].x = (green[i].x0) + 25
green[i].y = (green[i].y0) + 25
for i in range(6):
box[i].x = 300
box[i].y = (700 - (50 * i))
for i in range(6, 12):
box[i].x = (250 - (50 * (i - 6)))
box[i].y = (400)
box[12].x = 0
box[12].y = 350
for i in range(13, 19):
box[i].x = (0 + (50 * (i - 13)))
box[i].y = (300)
for i in range(19, 25):
box[i].x = (300)
box[i].y = (250 - (50 * (i - 19)))
box[25].x = 350
box[25].y = 0
for i in range(26, 32):
box[i].x = (400)
box[i].y = (0 + (50 * (i - 26)))
for i in range(32, 38):
box[i].x = (450 + (50 * (i - 32)))
box[i].y = (300)
box[38].x = 700
box[38].y = 350
for i in range(39, 45):
box[i].x = (700 - (50 * (i - 39)))
box[i].y = (400)
for i in range(45, 51):
box[i].x = (400)
box[i].y = (450 + (50 * (i - 45)))
box[51].x = 350
box[51].y = 700
# teshh
lx = 14
for i in range(52):
redbox[i].x = box[lx].x
redbox[i].y = box[lx].y
lx = lx + 1
if lx > 51:
lx = 0
lx = 50
for i in range(7):
redbox[lx].x = (0 + (50 * i))
redbox[lx].y = 350
lx = lx + 1
# blue
lx = 1
for i in range(52):
bluebox[i].x = box[lx].x
bluebox[i].y = box[lx].y
lx = lx + 1
if lx > 51:
lx = 0
lx = 50
for i in range(7):
bluebox[lx].x = 350
bluebox[lx].y = (700 - (50 * i))
lx = lx + 1
# yellow
lx = 40
for i in range(52):
yellowbox[i].x = box[lx].x
yellowbox[i].y = box[lx].y
lx = lx + 1
if lx > 51:
lx = 0
lx = 50
for i in range(7):
yellowbox[lx].x = (700 - (50 * i))
yellowbox[lx].y = (350)
lx = lx + 1
# green
lx = 27
for i in range(52):
greenbox[i].x = box[lx].x
greenbox[i].y = box[lx].y
lx = lx + 1
if lx > 51:
lx = 0
lx = 50
for i in range(7):
greenbox[lx].x = 350
greenbox[lx].y = (0 + (50 * i))
lx = lx + 1
for i in range(4):
red[i].swap()
blue[i].swap()
green[i].swap()
yellow[i].swap()
else:
if c >= 1: #This condition is true when a click is made.
if RED == True and TURN == False: #Red players turn
print("Red's Turn")
print("moves available: ", rolls)
la = "RED"
if (movecheck(red, redhome, redbox, la)) == False: #Checks if player can take a turn.
BLUE = True
RED = False
clear() #clears variable, next players turn
if RED == True: # searches if click is made on a red game piece.
for i in range(len(red)):
if ((((cx > red[i].x0 + 13) and (cx < red[i].x + 13)) and (
(cy > red[i].y0 + 14) and (cy < red[i].y + 14)))
and (red[i].x0 == redhome[i].x) and (red[i].y0 == redhome[i].y)):
print("woila ")
if rolls[0 + nc] == 6: #If a six occurs and gamepiece is in home
#Game piece is moved onto the home box
red[i].x0 = redbox[0].x
red[i].y0 = redbox[0].y
red[i].x = redbox[0].x + 25
red[i].y = redbox[0].y + 25
red[i].num = 0
red[i].swap()
nc = nc + 1
if nc > len(rolls) - 1: # check if all moves are made. so next players turn.
BLUE = True
RED = False
clear()
break
if ((((cx > red[i].x0 + 13) and (cx < red[i].x + 13)) and ( #if gamepiece is outside home
(cy > red[i].y0 + 14) and (cy < red[i].y + 14)))
and ((red[i].x0 > 270) or (red[i].y0 > 270))):
print("woila ")
bb = ((red[i].num) + rolls[0 + nc])
# Winning condition
if bb > 57: #prevents moves greater than allowed number
break
#bb = ((red[i].num) + rolls[0 + nc]) - 57
kill(redbox,blue,yellow,green,bluehome,yellowhome,greenhome) #checks if a kill can be made.
red[i].x0 = redbox[bb].x
red[i].y0 = redbox[bb].y
red[i].x = redbox[bb].x + 25
red[i].y = redbox[bb].y + 25
red[i].swap()
red[i].num = bb
doublecheck(red) #checks if the gamepiece can be made as a double.
nc = nc + 1
if bb == 57:
# del red[i]
red.remove(red[i]);
if nc > len(rolls) - 1:
BLUE = True #next players turn.
RED = False
clear()
break
if BLUE == True and TURN == False: #same as REDS CODE
print("Blue's Turn")
print("moves available: ", rolls)
la="BLUE"
if (movecheck(blue, bluehome, bluebox, la)) == False:
print("NO MOVES SIR JEE")
BLUE = False
YELLOW = True
clear()
if BLUE == True:
for i in range(len(blue)):
if ((((cx > blue[i].x0 + 13) and (cx < blue[i].x + 13)) and (
(cy > blue[i].y0 + 14) and (cy < blue[i].y + 14)))
and (blue[i].x0 == bluehome[i].x) and (blue[i].y0 == bluehome[i].y)):
print("woila ")
if rolls[0 + nc] == 6:
blue[i].x0 = bluebox[0].x
blue[i].y0 = bluebox[0].y
blue[i].x = bluebox[0].x + 25
blue[i].y = bluebox[0].y + 25
blue[i].num = 0
blue[i].swap()
nc = nc + 1
if nc > len(rolls) - 1:
YELLOW = True
BLUE = False
clear()
break
if ((((cx > blue[i].x0 + 13) and (cx < blue[i].x + 13)) and (
(cy > blue[i].y0 + 14) and (cy < blue[i].y + 14)))
and ((blue[i].x0 > 270) or (blue[i].y0 < 470))):
print("woila ")
bb = ((blue[i].num) + rolls[0 + nc])
if bb > 57:
break
# bb= ((blue[i].num) + rolls[0 + nc]) - 52
kill(bluebox,red,yellow,green,redhome,yellowhome,greenhome)
blue[i].x0 = bluebox[bb].x
blue[i].y0 = bluebox[bb].y
blue[i].x = bluebox[bb].x + 25
blue[i].y = bluebox[bb].y + 25
blue[i].swap()
blue[i].num = bb
doublecheck(blue)
nc = nc + 1
if bb == 57:
# del red[i]
blue.remove(blue[i]);
if nc > len(rolls) - 1:
YELLOW = True
BLUE = False
clear()
break
if YELLOW == True and TURN == False: #Same as RED's code
print("Yellows's Turn")
print("moves available: ", rolls)
la="YELLOW"
if (movecheck(yellow, yellowhome, yellowbox,la)) == False:
print("NO MOVES SIR JEE")
YELLOW = False
GREEN = True
clear()
if YELLOW == True:
for i in range(len(yellow)):
if ((((cx > yellow[i].x0 + 13) and (cx < yellow[i].x + 13)) and (
(cy > yellow[i].y0 + 14) and (cy < yellow[i].y + 14)))
and (yellow[i].x0 == yellowhome[i].x) and (yellow[i].y0 == yellowhome[i].y)):
print("woila ")
if rolls[0 + nc] == 6:
yellow[i].x0 = yellowbox[0].x
yellow[i].y0 = yellowbox[0].y
yellow[i].x = yellowbox[0].x + 25
yellow[i].y = yellowbox[0].y + 25
yellow[i].num = 0
yellow[i].swap()
nc = nc + 1
if nc > len(rolls) - 1:
YELLOW = False
GREEN = True
clear()
break
if ((((cx > yellow[i].x0 + 13) and (cx < yellow[i].x + 13)) and (
(cy > yellow[i].y0 + 14) and (cy < yellow[i].y + 14)))
and ((yellow[i].x0 < 470) or (yellow[i].y0 < 470))):
print("woila ")
bb = ((yellow[i].num) + rolls[0 + nc])
if bb > 57:
break
#bb = ((yellow[i].num) + rolls[0 + nc]) - 52
kill(yellowbox,blue,red,green,bluehome,redhome,greenhome)
yellow[i].x0 = yellowbox[bb].x
yellow[i].y0 = yellowbox[bb].y
yellow[i].x = yellowbox[bb].x + 25
yellow[i].y = yellowbox[bb].y + 25
yellow[i].swap()
yellow[i].num = bb
doublecheck(yellow)
nc = nc + 1
if bb == 57:
# del red[i]
yellow.remove(yellow[i]);
if nc > len(rolls) - 1:
YELLOW = False
GREEN = True
clear()
break
if GREEN == True and TURN == False: #Same as RED's code
print("Green's Turn")
print("moves available: ", rolls)
la="GREEN"
if (movecheck(green, greenhome, greenbox,la)) == False:
print("NO MOVES SIR JEE")
GREEN = False
RED = True
clear()
if GREEN == True:
for i in range(len(green)):
if ((((cx > green[i].x0 + 13) and (cx < green[i].x + 13)) and (
(cy > green[i].y0 + 14) and (cy < green[i].y + 14)))
and (green[i].x0 == greenhome[i].x) and (green[i].y0 == greenhome[i].y)):
print("woila ")
if rolls[0 + nc] == 6:
green[i].x0 = greenbox[0].x
green[i].y0 = greenbox[0].y
green[i].x = greenbox[0].x + 25
green[i].y = greenbox[0].y + 25
green[i].num = 0
green[i].swap()
nc = nc + 1
print("green x.y: ", green[i].x0, green[i].y0)
if nc > len(rolls) - 1:
GREEN = False
RED = True
clear()
break
if ((((cx > green[i].x0 + 13) and (cx < green[i].x + 13)) and (
(cy > green[i].y0 + 14) and (cy < green[i].y + 14)))
and ((green[i].x0 < 470) or (green[i].y0 < 470))):
print("woila ")
bb = ((green[i].num) + rolls[0 + nc])
if bb > 57:
break
# bb = ((green[i].num) + rolls[0 + nc]) - 52
kill(greenbox,blue,yellow,red,bluehome,yellowhome,redhome)
green[i].x0 = greenbox[bb].x
green[i].y0 = greenbox[bb].y
green[i].x = greenbox[bb].x + 25
green[i].y = greenbox[bb].y + 25
green[i].swap()
green[i].num = bb
nc = nc + 1
doublecheck(green)
if bb == 57:
# del red[i]
green.remove(green[i]);
if nc > len(rolls) - 1:
GREEN = False
RED = True
clear()
break
main() #Main functin is called once when c==0 to intialize all the gamepieces.
def leftClick(event): # Main play function is called on every left click.
global c, cx, cy, RED, YELLOW
c = c + 1
cx = root.winfo_pointerx() - root.winfo_rootx() # This formula returns the x,y co-ordinates of the mouse pointer relative to the board.
cy = root.winfo_pointery() - root.winfo_rooty()
print("Click at: ", cx, cy)
main() #Main function called on every click to progress the game
root.bind("<Button-1>", leftClick)
def turn(): #Prints whoose turn is it
if RED == True:
L2 = Label(root, text=" Red's Turn ", fg='Black', background='green', font=("Arial", 24, "bold"))
L2.place(x=770, y=50)
if BLUE == True:
L2 = Label(root, text=" Blue's Turn ", fg='Black', background='green', font=("Arial", 24, "bold"))
L2.place(x=770, y=50)
if GREEN == True:
L2 = Label(root, text="Green's Turn ", fg='Black', background='green', font=("Arial", 24, "bold"))
L2.place(x=770, y=50)
if YELLOW == True:
L2 = Label(root, text="Yellow's Turn", fg='Black', background='green', font=("Arial", 24, "bold"))
L2.place(x=770, y=50)
def roll(): #Rolling function that rolls a dice, goes again if its a six
global rollc, dice, dice1, dice2, TURN, rolls
if TURN == True:
rollc = rollc + 1
print("roll: ", rollc)
if rollc == 1:
dice = random.randint(1, 6)
L1 = Label(root, text=dice, fg='Black', background='green', font=("Arial", 24, "bold"))
L1.place(x=800, y=200)
print("dice: ", dice)
rolls.append(dice)
if dice != 6:
rollc = 0
TURN = False
if rollc == 2:
if dice == 6:
dice1 = random.randint(1, 6)
L3 = Label(root, text=dice1, fg='Black', background='green', font=("Arial", 24, "bold"))
L3.place(x=800, y=250)
rolls.append(dice1)
if dice1 != 6:
rollc = 0
TURN = False
if rollc == 3:
if dice1 == 6:
dice2 = random.randint(1, 6)
L4 = Label(root, text=dice2, fg='Black', background='green', font=("Arial", 24, "bold"))
L4.place(x=800, y=300)
rolls.append(dice2)
rollc = 0
TURN = False
def clear(): #clears all the variable prior to next player's turn
global nc, rolls, TURN, L1, L3, L4
nc = 0
del rolls[:]
TURN = True
L1 = Label(root, text=" ", fg='Black', background='green', font=("Arial", 24, "bold"))
L1.place(x=800, y=200)
L3 = Label(root, text=" ", fg='Black', background='green', font=("Arial", 24, "bold"))
L3.place(x=800, y=250)
L4 = Label(root, text=" ", fg='Black', background='green', font=("Arial", 24, "bold"))
L4.place(x=800, y=300)
print("cleared")
turn()
def movecheck(r, rh, rb, la): #Check if the player can make a move
if (dice == 6 and dice1 == 6 and dice2 == 6):
return False
win=True #Checking if the game is won or the player can make any moves.
for j in range(4):
if (r[j].x0 != rb[56].x) and (r[j].y0 != rb[56].y):
win=False
if win == True: #If all gamepieces home, prints that the player has won
print("YOU HAVE WON")
L2 = Label(root, text=(la + "Wins"), fg='Black', background='green', font=("Arial", 24, "bold"))
L2.place(x=770, y=500)
return False
if win == False and dice != 6: #if its not a 6 and all game pieces inside home, then next players turn
for i in range(len(r)):
if(r[i].num != -1):
(print("good hai"))
return True
print("jani all in")
return False
def kill(a,b,c,d,bh,ch,dh): #function that determines if a gamepiece can be killed
#if the game piece is not on a stop
if ((a[bb].x0 != box[1].x and a[bb].y0 != box[1].y) and (a[bb].x0 != box[14].x and a[bb].y0 != box[14].y) and
(a[bb].x0 != box[9].x and a[bb].y0 != box[9].y) and (a[bb].x0 != box[22].x and a[bb].y0 != box[22].y) and
(a[bb].x0 != box[27].x and a[bb].y0 != box[27].y) and (a[bb].x0 != box[35].x and a[bb].y0 != box[35].y) and
(a[bb].x0 != box[40].x and a[bb].y0 != box[40].y) and (a[bb].x0 != box[48].x and a[bb].y0 != box[48].y)):
#if the game piece of another color and its on the same block and it is not a double, a kill is made
for i in range (len(b)):
if (b[i].x0 == a[bb].x and b[i].y0 == a[bb].y and (b[i].double == False)):
b[i].x0 = bh[i].x
b[i].y0 = bh[i].y
b[i].x = bh[i].x + 25
b[i].y = bh[i].y + 25
b[i].num=-1
b[i].swap()
break
for i in range (len(c)):
if (c[i].x0 == a[bb].x and c[i].y0 == a[bb].y and (c[i].double == False)):
c[i].x0 = ch[i].x
c[i].y0 = ch[i].y
c[i].x = ch[i].x + 25
c[i].y = ch[i].y + 25
c[i].num=-1
c[i].swap()
break
for i in range (len(d)):
if (d[i].x0 == a[bb].x and d[i].y0 == a[bb].y and (d[i].double == False)):
d[i].x0 = dh[i].x
d[i].y0 = dh[i].y
d[i].x = dh[i].x + 25
d[i].y = dh[i].y + 25
d[i].num=-1
d[i].swap()
break
def doublecheck(a):
for k in range (len(a)):
a[k].double = False
for i in range (len(a)):
for j in range (len(a)):
if (a[i].num == a[j].num) and (i != j):
a[j].double = True
a[i].double = True
turn() #prints the "red player's turn" initially
button = Button(root, text=" ROLL ", relief="raised", font=("Arial", 20),
command=roll) # call roll function evertime this button is clicked
button.place(x=805, y=120)
root.mainloop()
#DEVELOPED BY ATCHUDHAN.S(STUDENT OF Sri Manakula Vinayagar Engineering College)
| [
"noreply@github.com"
] | Atchudhan.noreply@github.com |
5b9e715b4a6f63966cabaeca91f3f2947fd657ce | a5bacb3b8331d1be51d9664fa9f03e367813cdbb | /FlapPyBird/determine_linear_regression.py | 4da068f0b94c60fe95489e00139e418a77e091f4 | [
"MIT"
] | permissive | Anamitr/PUM | 36f31a5248ac982bce1187819753b1a81aabb85d | aecb1764757f67c3a7eaa653bbae43ea46241d29 | refs/heads/master | 2020-05-18T12:51:41.319601 | 2019-05-01T13:36:57 | 2019-05-01T13:36:57 | 184,420,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,355 | py | import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn import linear_model
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
FILE_NAME = '3'
IN_FILE_PATH = './data/' + FILE_NAME + '.csv'
OUT_FILE_PATH = './data/' + FILE_NAME + 'out.csv'
def linear_regression(X, Y):
LinReg = linear_model.LinearRegression()
LinReg.fit(X.reshape(-1, 1), Y)
Y_Pred = LinReg.predict(X.reshape(-1, 1))
#
plt.figure()
plt.scatter(X, Y)
plt.plot(X, Y_Pred, color='red')
plt.title('Linear regression')
plt.show()
def save_to_csv(X, Y):
if len(X) != len(Y):
raise Exception('len(X) != len(Y) !')
with open(OUT_FILE_PATH, 'w', newline='') as csv_file:
employee_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(0, len(X)):
employee_writer.writerow([X.item(i), Y.item(i)])
def polynomial_regression(Xin, Yin):
colors = ['teal', 'yellowgreen', 'gold']
first_degree = 12 # 9
last_degree = 13 # 13
degree_step = 1
colors = iter(cm.rainbow(np.linspace(0, 1,
30)))
plt.scatter(Xin, Yin)
for degree in range(first_degree, last_degree, degree_step):
lastIntX = int(Xin[-1])
Xout = np.arange(lastIntX)
# degree = 12
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(Xin.reshape(-1, 1), Yin)
Yout = model.predict(Xout.reshape(-1, 1))
# plt.plot(Xout, Yout, 'ro')
plt.plot(Xout, Yout,
# color=('red'),
# linewidth=2,
label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
save_to_csv(Xout, Yout)
Etykiety = []
Dane = []
# wczytanie danych z pliku
with open(IN_FILE_PATH, mode='r') as plik_csv:
csv_Rdr = csv.reader(plik_csv, delimiter=',')
# wczytanie pierwszego wiersza
Etykiety = next(csv_Rdr)
for wiersz in csv_Rdr:
Dane.append(wiersz)
print(Dane)
X = np.empty([1])
Y = np.empty([1])
for d in Dane:
X = np.append(X, float(d[0]))
Y = np.append(Y, float(d[1]))
print('X = ', X)
print('Y = ', Y)
# linear_regression(X, Y)
polynomial_regression(X, Y)
| [
"sochacki.konrad@gmail.com"
] | sochacki.konrad@gmail.com |
def3dce5cc56dc5116d525765b8c6bc66cb2e7fa | a44cfbdacdb9d695533f425ee72da86f904232c1 | /bin/summarize-days | 3a7583591a34ce37d84e1baec23fe453d926fdf1 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | sofie-lu/quac | 434d1481949ad0a229e10b7ccc725f54740c2d44 | 03e3bd9691dddd819f629aba628e9fe6d45c2d3b | refs/heads/master | 2020-04-08T09:33:54.217874 | 2014-05-15T20:32:00 | 2014-05-15T20:32:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | #!/usr/bin/env python
'''Parse the given metadata pickle file and print, TSV format, a summary of
each day's metadata on stdout. Column order matches the metadata field
documentation.'''
# Copyright (c) 2012-2013 Los Alamos National Security, LLC, and others.
import argparse
import sys
import quacpath
import pickle_glue
import tsv_glue
import u
ap = argparse.ArgumentParser()
ap.add_argument('file', metavar='METADATA_FILE')
args = u.parse_args(ap)
tsv = tsv_glue.Writer(sys.stdout.fileno())
for (day, md) in sorted(pickle_glue.File(args.file).data['days'].items()):
tsv.writerow([str(day),
md['count'] or 0,
md['count_geotag'],
md['min_id'],
md['max_id']])
| [
"reidpr@lanl.gov"
] | reidpr@lanl.gov | |
51c871479cb07d5dac24c75423d4caa1c334ccde | 7cc96234594adc8fa42f30740d8d313d5246d90a | /plot_evo_logs.py | 6ba60aef8dd663bfd03014c2df094bf3ca894c62 | [] | no_license | ku3i/evolution | 3d5a8585d2b99dc65525455d894884fd0c03cd83 | 7e93655f812f7f93498f9393f140774865a34901 | refs/heads/master | 2021-01-12T11:49:58.684976 | 2020-12-30T23:40:44 | 2020-12-30T23:40:44 | 70,154,387 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,950 | py | #!/usr/bin/python
""" plot_evo_logs
-------------
this script opens the evolution.log files per selected robot
and plots the fitness values of the best/median/worst and -- on demand --
creates an evoplot for each experiment in pdf format and
puts it into the experiment's folder.
"""
import matplotlib.pyplot as plt
import pandas as pd
import argparse
import numpy
from os.path import isfile, isdir
from tableau20 import tableau20
from common import *
columns = ['fmax', 'favg', 'fmin']#, 'rmax', 'ravg', 'rmin'] TODO
data_path = "../data/exp/"
logfile = "evolution.log"
pdfname = "evolog.pdf"
datafolder = "data"
def prepare_figure():
# create figure with aspect ratio
plt.figure(figsize=(6, 3))
# transparent the plot frame
ax = plt.subplot(111)
ax.spines["top" ].set_alpha(0.25)
ax.spines["bottom"].set_alpha(0.25)
ax.spines["right" ].set_alpha(0.25)
ax.spines["left" ].set_alpha(0.25)
# add tick on the left and bottom
ax.get_yaxis().tick_left()
ax.get_xaxis().tick_bottom()
def read_csv(filename):
return pd.read_csv(filename, sep=' ', header=None, names=columns)
def create_evolog(target, experiment):
print("creating {0}".format(experiment))
try:
evolution = read_csv(experiment+logfile)
for i,e in enumerate(evolution):
plt.plot( evolution[e].values
, color=tableau20[i]
, lw=0.5 )
#plt.show()
plt.legend(columns, loc="lower right")
target_dir = "{0}/{1}".format(experiment,datafolder)
create_folder(target_dir)
plt.savefig("{0}/{1}".format(target_dir, pdfname)
, bbox_inches="tight")
plt.clf()
except:
print("skipping " + experiment)
return
def create_all_evologs(target):
print("Creating all evologs.")
prepare_figure()
for exp in target.experiments:
create_evolog(target, exp)
def create_summary_evologs(target, name, index_list):
print("\t{0}={1}".format(name, index_list))
best,median,worst = get_best_worst_median(name, index_list)
# draw plots
trials = get_max_trials(name.format(0))
evodict = {}
assert isfile(name.format(best )+logfile)
evodict['best' ] = read_csv(name.format(best )+logfile)['favg']
evodict['median'] = read_csv(name.format(median)+logfile)['favg']
evodict['worst' ] = read_csv(name.format(worst )+logfile)['favg']
plt.plot(evodict['worst' ], color=tableau20[1], lw=0.5 )
plt.plot(evodict['best' ], color=tableau20[0], lw=0.5 )
plt.plot(evodict['median'], color=tableau20[2], lw=0.5 )
if target.limit is not None:
plt.ylim(0, target.limit)
plt.legend(['min','max','med'], loc="lower right")
#plt.show()
plt.savefig("{0}_{1}".format(name.format("").rstrip("/").lstrip("_"), pdfname), bbox_inches="tight")
plt.clf()
def create_summary(target):
print("Creating summary.")
prepare_figure()
group = group_experiments(target)
for g in group.keys():
print("_"*20)
create_summary_evologs(target, g, group[g])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--robot')
parser.add_argument('-a', '--evologs', action="store_true", default=False)
parser.add_argument('-y', '--yscale_limit', default=None, type=float)
args = parser.parse_args()
if args.robot==None:
print("Error: no robot defined.")
return
robot = str(args.robot)
target = lambda: None
target.path = data_path + robot
target.robot = robot
target.limit = args.yscale_limit
if not isdir(target.path):
print("Error: no such folder: {0}".format(target.path))
return
target.experiments = get_experiments(target)
print(target.experiments)
if args.evologs:
create_all_evologs(target)
else:
create_summary(target)
print("\n____\nDONE.\n")
return
if __name__ == "__main__": main()
| [
"kubisch@informatik.hu-berlin.de"
] | kubisch@informatik.hu-berlin.de |
25933a755301dda6561a58f195d7462cdc9f384c | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/api_admin/api/filters.py | 63d4bf6ec7132ca2326fad9c709142a6713249fd | [
"MIT",
"AGPL-3.0-only",
"AGPL-3.0-or-later"
] | permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 432 | py | """
Filters for api_admin api
"""
from rest_framework import filters
class IsOwnerOrStaffFilterBackend(filters.BaseFilterBackend):
"""
Filter that only allows users to see their own objects or all objects if it is staff user.
"""
def filter_queryset(self, request, queryset, view):
if request.user.is_staff:
return queryset
else:
return queryset.filter(user=request.user)
| [
"rafael.luque@osoco.es"
] | rafael.luque@osoco.es |
e2643c272c50ef6003fc30d02f35f62d1c40e9f6 | 3190c7895af4f7ebd6a236d4cf133ce3aa7ddac5 | /Image Hashing/index_hasher.py | 4e95f231116b6d72d76766f7a88dfe7778b836ca | [] | no_license | varuncj02/ML-Projects | 95ec40a8c782305c6df522171555c34500a301ed | 7492c83487f68eb57e9d1fdb13c0501d146bd564 | refs/heads/master | 2022-12-23T12:05:22.086180 | 2020-09-26T19:28:17 | 2020-09-26T19:28:17 | 297,880,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | py | #All the Input Images are indexed
from Hash.hash import dhash
from Hash.hash import convert_hash
from Hash.hash import hamming_distance
from imutils import paths
import argparse
import pickle
import vptree
import cv2
#Taking in Command Line Arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--images", required=True, type=str,
help="path to input directory of images")
ap.add_argument("-t", "--tree", required=True, type=str,
help="path to output VP-Tree")
ap.add_argument("-a", "--hashes", required=True, type=str,
help="path to output hashes dictionary")
args = vars(ap.parse_args())
#Computing the Imagehashes for the Dataset
imagePaths = list(paths.list_images(args["images"]))
hashes = {}
#Loop over image paths
for (i, imagePath) in enumerate(imagePaths):
print("[INFO] processing image {}/{}".format(i + 1,len(imagePaths)))
image = cv2.imread(imagePath)
h = dhash(image)
h = convert_hash(h)
l = hashes.get(h, [])
l.append(imagePath)
hashes[h] = 1
#Build the VP-Tree
print("[Info] VP Tree being built")
points = list(hashes.keys())
tree = vptree.VPTree(points, hamming_distance)
# serialize the VP-Tree to disk
print("Serializing VP-Tree...")
f = open(args["tree"], "wb")
f.write(pickle.dumps(tree))
f.close()
# serialize the hashes to dictionary
print("[INFO] serializing hashes...")
f = open(args["hashes"], "wb")
f.write(pickle.dumps(hashes))
f.close() | [
"varun.joshi.2016.go@gmail.com"
] | varun.joshi.2016.go@gmail.com |
8e278bd2560577e8ee0840a62159fc6fd226e8c8 | fea12960c40de21836fb98bcb828d978dde9d375 | /KNN.py | a74ed2ff4842b6319ada98325d40e8e0d630fe1d | [] | no_license | Tapas15/Machine-Learning-Algorithms-in-Python | e0e9e1fba56564cfd94730456ce5205953fc4074 | 6105506a91e798edc4dee8f70cc019f0c632a267 | refs/heads/master | 2022-01-11T23:36:53.539938 | 2018-12-18T20:27:05 | 2018-12-18T20:27:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | import numpy as np
def distance(x,y,p):
"""Return the l_p distance """
return sum(np.abs(x-y)**p)**(1/p)
class KNN(object):
"""The K Nearest Neighbours Classifer"""
def __init__(self,k=3,p=2):
self.k = k
self.p = p # l_p norm distance
def kdists(self,x_t):
"""return the k neareast neighbours"""
all_dists = np.array([distance(x_t,x,self.p) for x in self.X])
idx = sorted(range(len(all_dists)),key=lambda x: all_dists[x])
#print(all_dists)
#print(all_dists[idx[:self.k]])
return idx[:self.k]
def vote(self,y):
"""Vote for the most popular label """
v,c = np.unique(y,return_counts=True)
ind = np.argmax(c)
return v[ind]
def fit(self,X,y):
self.X = X
self.y = y
def predict(self,X):
return np.array([self.vote(self.y[self.kdists(x)]) for x in X])
def main():
# import data
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:,[2,3]]
y = iris.target
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=1,stratify=y)
# train the classifier
knn = KNN(k=5,p=2)
knn.fit(X_train,y_train)
# visualize the decision regions
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from Perceptron import plot_decision_regions
#plt.figure()
plot_decision_regions(X,y,classifier=knn)
plt.title('KNN')
plt.xlabel('sepal length [standardized]')
plt.ylabel('petal length [standardized]')
plt.show()
if __name__ =='__main__':
main()
| [
"stonemason11@gmail.com"
] | stonemason11@gmail.com |
61db0025c3c7ca92be1ae407066a280d88782dd7 | 63a3a601662a6b81b3fbf777ece8eef8cd93ebe3 | /1041.py | 990cfb4a58db0f1a024dbc4172b5f6b0a58c5bb1 | [] | no_license | thainadlopes/ExerciciosURI | d2b48618f02e68766c6b30a147a1c57b4271270b | a177d14d1d4dd4864d972fa9c291cf8690956e7d | refs/heads/main | 2023-08-21T10:52:42.873115 | 2021-10-22T13:44:00 | 2021-10-22T13:44:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | p = input().split(" ")
x, y = p
x = float(x)
y = float(y)
if x == 0:
if y == 0:
print("Origem")
if y != 0:
print("Eixo Y")
if y == 0:
if x != 0:
print("Eixo X")
if x > 0:
if y > 0:
print("Q1")
if x < 0:
if y > 0:
print("Q2")
if x < 0:
if (y) < 0:
print("Q3")
if x > 0:
if y < 0:
print("Q4")
| [
"noreply@github.com"
] | thainadlopes.noreply@github.com |
d119909977849bf6e3ddef452a9ddd233c0dec12 | 6c3d4a2f770e1fc2057d54f8ea5d34ceddb37052 | /joinyesterday.py | 47fa1b6c08e8401c63e658ac4829bbb9944652df | [] | no_license | thunderlz/python_projects | 924bf1fdac626cca944095a9301cf1f58c65d65c | 62a17d44022bc2f1af453f47974a27a11ca92dee | refs/heads/master | 2021-06-30T23:13:25.908600 | 2021-06-14T01:12:44 | 2021-06-14T01:12:44 | 240,705,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,521 | py | import glob
import numpy as np
import cv2
import datetime
def denoise(img,n=1):
for i in range(n):
if i==0:
mid=img
mid=cv2.fastNlMeansDenoisingColored(mid,None,10,10,7,21)
dst=mid
return dst
#获取文件名并排序
import glob
homepath='/mnt/data1'
vpaths=[]
vpaths.append(homepath+'/micam/xiaomi_camera_videos/5ce50c581545/')
vpaths.append(homepath+'/micam/xiaomi_camera_videos/5ce50c74d629/')
for vpath in vpaths:
vpathlist=[]
#获取所有文件目录
for vdir in glob.glob(vpath+'*'):
vpathlist.extend(glob.glob(vdir+'/*'))
vpathlist.sort()
print(len(vpathlist))
print(vpathlist[-1])
#每个文件取第一帧
yesterday=(datetime.datetime.now()-datetime.timedelta(1)).strftime('%Y%m%d')
start=yesterday+'00'
end=yesterday+'24'
ndvideo=[]
dim=(1920,1080)
printctl=0
for vfile in vpathlist:
# print(vfile[51:61])
if vfile[51:61]>=start and vfile[51:61]<=end:
if printctl%10==0:
print(vfile[51:68])
printctl+=1
cap=cv2.VideoCapture(vfile)
ret,img_nd=cap.read()
img_nd=cv2.resize(img_nd,dim)
# img_nd=denoise(img_nd,1)
ndvideo.append(img_nd)
# 输出视频
# fourcc = cv2.VideoWriter_fourcc(*'H264')
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter(vpath+'dist/'+yesterday+'.mp4',fourcc, 15.0, dim,True)
for i in ndvideo:
out.write(i)
out.release() | [
"thunderlz@163.com"
] | thunderlz@163.com |
7fc9221dbba610e88f8cb20d4bd30165611e1c67 | ff4944e9e4f8f62025e58fab314c2a0094585ef1 | /manage.py | 58b1490ef21721358e58c9143f054ed9de14403e | [] | no_license | zhanggm33/honvue_pag | b92fcb241ff69a822aeef24d4634a2368a2e92b1 | d0a07f2ff06c696356ba87f139ce660b4c9c394e | refs/heads/master | 2021-01-21T10:22:32.081643 | 2017-02-28T10:37:23 | 2017-02-28T10:37:23 | 83,420,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "honvue_pag.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"1458707887@qq.com"
] | 1458707887@qq.com |
a7c122232e81e5e47e91a674fc6c774c9855a176 | 312804f1d64e6f8f414f2608a4929d42f03d8ee3 | /utils/bbc_info.py | 7d87591240260f30b1e1ec57c0245820f1f30f9c | [
"Apache-2.0"
] | permissive | beyond-blockchain/bbc1 | 38824f59234aedc6848e5cc5741f8645166f153e | 3294047b2dad44768c2ef3fcb411be0d5453a4f0 | refs/heads/develop | 2023-08-11T21:18:28.450007 | 2021-05-07T23:52:40 | 2021-05-07T23:52:40 | 106,474,485 | 90 | 57 | NOASSERTION | 2023-05-28T09:27:18 | 2017-10-10T21:41:10 | Python | UTF-8 | Python | false | false | 5,156 | py | #!/bin/sh
""":" .
exec python "$0" "$@"
"""
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017 beyond-blockchain.org.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import binascii
import json
import pprint
import os
import sys
sys.path.append("../")
from bbc1.core import bbc_app
from bbc1.core.bbc_config import DEFAULT_CORE_PORT
from bbc1.core import bbclib
from bbc1.core.message_key_types import KeyType
def wait_check_result_msg_type(callback, msg_type):
dat = callback.synchronize()
if dat[KeyType.command] != msg_type:
sys.stderr.write("XXXXXX not expected result: %d <=> %d(received)\n" % (msg_type, dat[KeyType.command]))
return dat
def get_neighborlist(client):
client.get_domain_list()
domainlist = client.callback.synchronize()
for domain_id in domainlist:
client.get_domain_neighborlist(domain_id=domain_id)
dat = bbcclient.callback.synchronize()
print("====== neighbor list of domain:%s =====" % binascii.b2a_hex(domain_id).decode())
print(" node_id(4byte), ipv4, ipv6, port, is_domain0")
for k in range(len(dat)):
node_id, ipv4, ipv6, port, domain0 = dat[k]
if k == 0:
print("*myself* %s, %s, %s, %d, %s" % (binascii.b2a_hex(node_id[:4]), ipv4, ipv6, port, domain0))
else:
print(" %s, %s, %s, %d, %s" % (binascii.b2a_hex(node_id[:4]), ipv4, ipv6, port, domain0))
def argument_parser():
argparser = argparse.ArgumentParser(description='Configure bbc_core using json conf file.')
argparser.add_argument('-4', '--ip4address', action='store', default="127.0.0.1", help='bbc_core address (IPv4)')
argparser.add_argument('-6', '--ip6address', action='store', help='bbc_core address (IPv6)')
argparser.add_argument('-p', '--port', action='store', default=DEFAULT_CORE_PORT, help='port number of bbc_core')
argparser.add_argument('-d', '--domain_id', action='store', default=None, help='domain_id HEX string')
argparser.add_argument('-l', '--neighborlist', action='store_true', default=False,
help='Get neighbor_list in bbc_core')
argparser.add_argument('-u', '--userlist', action='store_true', default=False, help='Get user_ist in bbc_core')
argparser.add_argument('-n', '--my_node_id', action='store_true', default=False, help='Get my node_id')
argparser.add_argument('--stat', action='store_true', default=False, help='Get statistics of the bbc_core')
argparser.add_argument('--getconfig', action='store_true', default=False, help='Get config from bbc_core')
argparser.add_argument('-k', '--node_key', action='store', default=".bbc1/node_key.pem",
help="path to node key pem file")
return argparser.parse_args()
if __name__ == '__main__':
parsed_args = argument_parser()
addr = None
port = None
if parsed_args.ip4address:
addr = parsed_args.ip4address
if parsed_args.ip6address:
addr = parsed_args.ip6address
port = parsed_args.port
bbcclient = bbc_app.BBcAppClient(host=addr, port=port, multiq=False, loglevel="all")
if os.path.exists(parsed_args.node_key):
bbcclient.set_node_key(parsed_args.node_key)
if parsed_args.getconfig:
bbcclient.get_bbc_config()
dat = wait_check_result_msg_type(bbcclient.callback, bbclib.MsgType.RESPONSE_GET_CONFIG)
print("------ config.json ------")
conf = json.loads(dat[KeyType.bbc_configuration].decode())
pprint.pprint(conf, width=80)
sys.exit(0)
if parsed_args.stat or (not parsed_args.my_node_id and not parsed_args.userlist and not parsed_args.neighborlist):
bbcclient.get_stats()
dat = wait_check_result_msg_type(bbcclient.callback, bbclib.MsgType.RESPONSE_GET_STATS)
print("------ statistics ------")
pprint.pprint(dat[KeyType.stats], width=80)
sys.exit(0)
if parsed_args.domain_id is None:
sys.stderr.write("-d option (domain_id) is mandatory\n")
sys.exit(1)
domain_id = bbclib.convert_idstring_to_bytes(parsed_args.domain_id)
bbcclient.set_domain_id(domain_id)
if parsed_args.my_node_id:
bbcclient.get_node_id()
node_id = bbcclient.callback.synchronize()
print("Node_id is %s" % node_id.hex())
elif parsed_args.userlist:
bbcclient.get_user_list()
user_list = bbcclient.callback.synchronize()
print("------- user_list -------")
for uid in user_list:
print("User_id: ", uid.hex())
elif parsed_args.neighborlist:
get_neighborlist(bbcclient)
sys.exit(0)
| [
"takeshi@quvox.net"
] | takeshi@quvox.net |
daedea80fd2334ac4115c77ce54a62b7056bc5f6 | 869dc33acb74345301e67e4672373606427382a9 | /main.py | e831bba58de7801662d7affdc8b1a89806049101 | [] | no_license | MohammadHasan249/PizzaParlour-REST-API | 9f2e10b5002780290856972a8b8948dab515bec5 | 0c6642745c22512cfcc28b5bdc021180eaf43bb5 | refs/heads/main | 2023-05-09T06:14:52.090077 | 2021-05-25T21:05:16 | 2021-05-25T21:05:16 | 323,229,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,863 | py | import requests
from typing import Optional
"""
The main class of the program - for the customer.
"""
NOT_UNDERSTAND = "We could not understand what you typed. Please try again."
ORDER_NOT_FOUND = "Sorry, this order doesn't exist."
ITEM_NOT_FOUND = "This type of item does not exist. Please try again.\n"
LOCALHOST = "http://127.0.0.1:5000/"
def add_item() -> None:
"""
Ask the user for information of an item, and add it to an order, as long
as the order is valid.
"""
b = is_valid_order()
if not b[0]:
return
order_num = b[1]
print("Here are all the items you can add:")
print("\t1. Pizzas\n\t2. Drinks\n\t3. Sides\n\t4. Nothing")
item_num = input("Which item would you like to add? Type the number "
"corresponding to your choice: ")
if item_num == "1":
item_type = "Pizzas"
elif item_num == "2":
item_type = "Drinks"
elif item_num == "3":
item_type = "Sides"
elif item_num == "4":
return
else:
print(NOT_UNDERSTAND)
return
item_name = _find_type(item_type)
if item_name[1] is None:
return
elif item_name[1] is True:
return
else:
item = _add_specific_item(item_name[0], item_type, False)
item_details = {
"Order Number": order_num,
"Item Type": item_type,
"Item Name": item_name,
"Item Quantity": item["Quantity"]
}
if item_type == "Pizzas":
item_details["Toppings"] = item["Toppings"]
item_details["ToppingsAmount"] = item["ToppingsAmount"]
item_details["Size"] = item["Size"]
elif item_type == "Drinks":
item_details["Ice"] = item["Ice"]
item_details["Size"] = item["Size"]
response = requests.get(LOCALHOST + "add-item", params=item_details)
resp = response.json()
if resp["Status Code"] == 200:
print("Item added to your cart.\n")
elif response.status_code != 200:
print("Could not add item to cart.")
def _add_specific_item(item_name: str, item_type: str, b: bool) -> \
dict:
"""
Helper function for add_item.
"""
if item_type == "Pizzas":
return _add_pizza(item_name, b)
elif item_type == "Drinks":
return _add_drink(item_name)
else:
return _add_side(item_name)
def _add_pizza(item_name: str, b: bool) -> Optional[dict]:
"""
Helper function for _add_specific_item.
"""
p = {}
if not b:
quantity = input("How many {} pizzas would you like? Type a "
"whole number: ".format(item_name))
try:
quantity = int(quantity)
if quantity < 0:
raise ValueError
p["Quantity"] = quantity
toppings = _find_type("Toppings")
if len(toppings) == 3:
p["Toppings"] = toppings[0]
p["ToppingsAmount"] = toppings[2]
size = _find_type("PizzaSizes")[0]
p["Size"] = size
except ValueError:
print(NOT_UNDERSTAND)
return
return p
def _add_drink(item_name: str) -> Optional[dict]:
"""
Helper function for _add_specific_item.
"""
p = {}
quantity = input("How many {} drinks would you like? Type a "
"whole number: ".format(item_name))
try:
quantity = int(quantity)
if quantity < 0:
raise ValueError
p["Quantity"] = quantity
ice = input("Would you like ice in your drink? Type 1 for yes, "
"2 for no: ")
if ice in ["1", "2"]:
p["Ice"] = ice
size = _find_type("DrinkSizes")
p["Size"] = size
except ValueError:
print(NOT_UNDERSTAND)
return
return p
def _add_side(item_name: str) -> Optional[dict]:
"""
Helper function for _add_specific_item.
"""
p = {}
quantity = input("How many {} would you like? Type a "
"whole number: ".format(item_name))
try:
quantity = int(quantity)
if quantity < 0:
raise ValueError
p["Quantity"] = quantity
sauces = input("How many sauces would you like? Type a whole number: ")
sauces = int(sauces)
if sauces < 0:
raise ValueError
p["Sauces"] = sauces
except ValueError:
print(NOT_UNDERSTAND)
return
return p
def create_order() -> None:
"""
Create an order, giving it a new order number.
"""
resp = requests.get(LOCALHOST + "create-order")
details = resp.json()
if details["Status Code"] == 200:
print("You have been assigned order number {}.".format(
details["Order Number"]))
else:
print("Order could not be created.")
def is_valid_order() -> tuple:
"""
Checks if an order exists, given its order number.
:return: tuple where first value is a boolean, and second is either None or
the number of the order. None means the order doesn't exist.
"""
order_num = input("What is the order number of the order you want to "
"select? ")
try:
order_num = int(order_num)
except ValueError:
print(NOT_UNDERSTAND)
return False,
order = {
"OrderNumber": order_num
}
resp = requests.get(LOCALHOST + "valid-order", params=order)
details = resp.json()
if details["Status Code"] == 204:
print(ORDER_NOT_FOUND)
elif details["Status Code"] == 404:
print("Order hasn't been cancelled due to a connection error.")
else:
return True, order_num
return False,
def show_order() -> None:
"""
Show the total order, given its order number.
"""
order_num = input("What is the order number of the order you want to see? ")
try:
order_num = int(order_num)
except ValueError:
print(NOT_UNDERSTAND)
return
order = {
"OrderNumber": order_num
}
resp = requests.get(LOCALHOST + "show-order", params=order)
details = resp.json()
if details["Status Code"] == 204:
print(ORDER_NOT_FOUND)
elif details["Status Code"] == 404:
print("Order hasn't been processed due to a connection error.")
else:
print(details["Order"])
def checkout_order() -> None:
"""
Finalize and checkout an order, and send it for delivery.
"""
b = is_valid_order()
if not b[0]:
return
order_num = b[1]
print("Here are the options for ordering:")
print("\t1. Dine-in\n\t2. PizzaParlour's own delivery\n\t3. UberEats\n\t4."
" Foodora")
company = input("How would you like to order? Type the number corresponding"
" to your choice: ")
if company not in ["1", "2", "3", "4"]:
return
address = ""
if company != "1":
address = input("Please type your address for delivery: ")
order = {
"OrderNumber": order_num,
"Address": address,
"Company": company
}
resp = requests.get(LOCALHOST + "checkout-order", params=order)
details = resp.json()
if details["Status Code"] == 204:
print(ORDER_NOT_FOUND)
elif details["Status Code"] == 200:
if details["Notification"] is not None:
print(details["Notification"])
else:
print("Order couldn't be delivered due to a connection error.")
def cancel_order() -> None:
"""
Cancel an existing order.
"""
num = input("What is the order number of the order you want to cancel? ")
try:
num = int(num)
except ValueError:
print(NOT_UNDERSTAND)
return
order = {
"OrderNumber": num
}
resp = requests.get(LOCALHOST + "cancel-order", params=order)
details = resp.json()
if details["Status Code"] == 204:
print(ORDER_NOT_FOUND)
elif details["Status Code"] == 404:
print("Order couldn't be cancelled due to a connection error.")
else:
print("Your order has been cancelled.")
def show_menu() -> None:
"""
Show the entire menu to the customer.
"""
resp = requests.get(LOCALHOST + "show-menu")
details = resp.json()
if details["Status Code"] == 200:
print(details["Menu"])
def _find_type(item_type: str) -> Optional[tuple]:
"""
Return the flavour a customer wants based of the type of the item.
:param item_type: Type of the item.
:return: tuple
"""
items = {
"ItemName": item_type
}
resp = requests.get(LOCALHOST + "flavours", params=items)
flavours = resp.json()["Flavours"]
print("Here are all the {} types:".format(item_type))
for i in range(len(flavours)):
print("\t{}. {}".format(i + 1, flavours[i]))
if item_type not in ["PizzaSizes", "DrinkSizes"]:
print("\t{}. Nothing".format(len(flavours) + 1))
flav = input("Which type would you like to get? Type the number "
"corresponding to your choice: ")
try:
flav = int(flav)
except ValueError:
print("We could not understand what you typed.\n")
return
if flav in [i + 1 for i in range(len(flavours))]:
if item_type == "Toppings":
amount = input("How many {} do you want? Type a whole "
"number: ".format(flavours[flav - 1]))
try:
amount = int(amount)
except ValueError:
print(NOT_UNDERSTAND)
return
return flavours[flav - 1], item_type, amount
return flavours[flav - 1], item_type
elif flav == len(flavours) + 1:
return "Nothing", True
return None, None
def find_menu_item() -> None:
"""
Find a specific menu item, given the item's information.
"""
print("Here are all the item types on the menu:")
print("\t1. Pizzas\n\t2. Drinks\n\t3. Toppings\n\t4. Sides")
item_type = input("Which type of item would you like to search for? Type "
"the number corresponding to your choice: ")
t = None
if item_type == "1":
t = _find_type("Pizzas")
elif item_type == "2":
t = _find_type("Drinks")
elif item_type == "3":
t = _find_type("Toppings")
elif item_type == "4":
t = _find_type("Sides")
if not t:
print(ITEM_NOT_FOUND)
else:
result = {
"ItemName": t[0],
"ItemType": t[1]
}
resp = requests.get(LOCALHOST + "find-item", params=result)
item = resp.json()
if item["Status Code"] == 204:
print(ITEM_NOT_FOUND)
else:
print(item["Item"])
def do_command() -> None:
"""
Perform a specific command the user asks for.
"""
print("Here are your options:")
print("\t1. Create a new order")
print("\t2. Add item to an order")
print("\t3. Show the total order")
print("\t4. Checkout order for delivery")
print("\t5. Cancel an existing order")
print("\t6. Show the menu")
print("\t7. Find a specific item from the menu")
print("\t8. Exit the program")
cmd = input("\nWhat would you like to do? Type the number corresponding to "
"your choice: ")
if cmd == "1":
create_order()
elif cmd == "2":
add_item()
elif cmd == "3":
show_order()
elif cmd == "4":
checkout_order()
elif cmd == "5":
cancel_order()
elif cmd == "6":
show_menu()
elif cmd == "7":
find_menu_item()
elif cmd == "8":
return
else:
print(NOT_UNDERSTAND)
print("\n")
do_command()
def run() -> None:
"""
Main function.
"""
print("Hi! Welcome to Pizza Parlour.")
show_menu()
do_command()
if __name__ == '__main__':
run()
| [
"mohd.hasan@mail.utoronto.ca"
] | mohd.hasan@mail.utoronto.ca |
def31c0b6beeeb6ee3f5fcae6c5fd63d18c0f144 | e1b2664ad071e22b4bd2246ff2631b93339d799d | /cg_t5_full/DS/data_set_joint.py | e73f3485588b27a9c923068427e119a0dd6c3ade | [
"Apache-2.0"
] | permissive | fjiangAI/CAIL2021 | 27f7740e135abace1244dd31458079648df4c99b | d21f467773467fdb47923623e99e6d7836a67cc1 | refs/heads/main | 2023-07-28T22:02:12.432994 | 2021-09-14T02:46:05 | 2021-09-14T02:46:05 | 393,288,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,821 | py | #!/usr/bin/env python
# encoding: utf-8
'''
#-------------------------------------------------------------------#
# CONFIDENTIAL --- CUSTOM STUDIOS #
#-------------------------------------------------------------------#
# #
# @Project Name : t5p #
# #
# @File Name : data_set_joint.py #
# #
# @Programmer : Jeffrey #
# #
# @Start Date : 2021/3/27 21:19 #
# #
# @Last Update : 2021/3/27 21:19 #
# #
#-------------------------------------------------------------------#
# Classes: #
# #
#-------------------------------------------------------------------#
'''
import torch
import json
import os
from tqdm import tqdm
from torch.utils.data import Dataset
import logging
from torch.nn.utils.rnn import pad_sequence
logger = logging.getLogger(__name__)
class GPT2NewsTitleDataSet(Dataset):
"""新闻标题生成模型所需要的数据类"""
def __init__(self, tokenizer, max_len, rs_max_len, data_dir, data_set_name, path_file=None, is_overwrite=False):
"""
初始化函数
Args:
tokenizer: 分词器
max_len: 数据的最大长度
rs_max_len: 生成标题的最大长度
data_dir: 保存缓存文件的路径
data_set_name: 数据集名字
path_file: 原始数据文件
is_overwrite: 是否重新生成缓存文件
"""
self.tokenizer = tokenizer
self.du1_id = self.tokenizer.convert_tokens_to_ids("[du1]")
self.rs_id = self.tokenizer.convert_tokens_to_ids("[rs]")
self.du2_id = self.tokenizer.convert_tokens_to_ids("[du2]")
self.max_len = max_len
self.rs_max_len = rs_max_len
cached_feature_file = os.path.join(data_dir, "cached_{}_{}".format(data_set_name, max_len))
# 判断缓存文件是否存在,如果存在,则直接加载处理后数据
if os.path.exists(cached_feature_file) and not is_overwrite:
logger.info("已经存在缓存文件{},直接加载".format(cached_feature_file))
self.data_set = torch.load(cached_feature_file)["data_set"]
# 如果缓存数据不存在,则对原始数据进行数据处理操作,并将处理后的数据存成缓存文件
else:
logger.info("不存在缓存文件{},进行数据预处理操作".format(cached_feature_file))
self.data_set = self.load_data(path_file)
logger.info("数据预处理操作完成,将处理后的数据存到{}中,作为缓存文件".format(cached_feature_file))
torch.save({"data_set": self.data_set}, cached_feature_file)
def load_data(self, path_file):
"""
加载原始数据,生成数据处理后的数据
Args:
path_file: 原始数据路径
Returns:
"""
self.data_set = []
with open(path_file, "r", encoding="utf-8") as fh:
data = json.load(fh)
for idx, sample in enumerate(tqdm(data, desc="iter", disable=False)):
# 使用convert_feature函数,对新闻正文和标题进行索引化,生成模型所需数据格式
input_ids, label_ids, class_label = self.convert_feature(sample)
self.data_set.append({"input_ids": input_ids, "label_ids": label_ids, "class_ids": class_label})
return self.data_set
def find_longest_du(self, dus):
max_length = 0
max_index = -1
all_length = 0
for index, du in enumerate(dus):
all_length += len(du)
if len(du) > max_length:
max_length = len(du)
max_index = index
return max_index, all_length
def convert_feature(self, sample):
"""
数据处理函数
Args:
sample: 一个字典,包含新闻的正文和新闻的标题,格式为{"content": content, "title": title}
Returns:
"""
input_ids = []
label_ids = []
# 对新闻正文进行tokenizer.tokenize分词
du1_tokens = self.tokenizer.tokenize(sample["du1"])
rs_tokens = self.tokenizer.tokenize(sample["rs"])
du2_tokens = []
for du2 in sample["du2"]:
du2_token = self.tokenizer.tokenize(du2)
du2_tokens.append(du2_token)
# 判断如果标题过长,进行截断
if len(rs_tokens) > self.rs_max_len:
rs_tokens = rs_tokens[:self.rs_max_len]
# 判断如果正文过长,进行截断
max_index, all_length = self.find_longest_du(du2_tokens)
while len(du1_tokens) + all_length > self.max_len - len(rs_tokens) - (3 + len(du2_tokens)):
if len(du1_tokens) > len(du2_tokens[max_index]):
du1_tokens = du1_tokens[:-1]
else:
du2_tokens[max_index] = du2_tokens[max_index][:-1]
max_index, all_length = self.find_longest_du(du2_tokens)
# 生成模型所需的input_ids和token_type_ids
input_ids.append(self.tokenizer.cls_token_id)
input_ids.extend(self.tokenizer.convert_tokens_to_ids(du1_tokens))
input_ids.append(self.tokenizer.sep_token_id)
for du2_token in du2_tokens:
input_ids.extend(self.tokenizer.convert_tokens_to_ids(du2_token))
input_ids.append(self.tokenizer.sep_token_id)
label_ids.extend(self.tokenizer.convert_tokens_to_ids(rs_tokens))
label_ids.append(self.tokenizer.sep_token_id)
# 判断input_ids长度是否小于等于最大长度
assert len(input_ids + label_ids) <= self.max_len
class_ids = sample["label"]
return input_ids, label_ids, class_ids
def __len__(self):
return len(self.data_set)
def __getitem__(self, idx):
instance = self.data_set[idx]
return instance
def collate_func(batch_data):
"""
DataLoader所需的collate_fun函数,将数据处理成tensor形式
Args:
batch_data: batch数据
Returns:
"""
batch_size = len(batch_data)
# 如果batch_size为0,则返回一个空字典
if batch_size == 0:
return {}
input_ids_list, label_ids_list, class_list = [], [], []
for instance in batch_data:
# 按照batch中的最大数据长度,对数据进行padding填充
input_ids_temp = instance["input_ids"]
label_ids_temp = instance["label_ids"]
# 将input_ids_temp和token_type_ids_temp添加到对应的list中
input_ids_list.append(torch.tensor(input_ids_temp, dtype=torch.long))
label_ids_list.append(torch.tensor(label_ids_temp, dtype=torch.long))
class_list.append(instance["class_ids"])
# 使用pad_sequence函数,会将list中所有的tensor进行长度补全,补全到一个batch数据中的最大长度,补全元素为padding_value
return {"input_ids": pad_sequence(input_ids_list, batch_first=True, padding_value=0),
"label_ids": pad_sequence(label_ids_list, batch_first=True, padding_value=0),
"class_ids": torch.tensor([f for f in class_list], dtype=torch.long)}
| [
"jf940927320@163.com"
] | jf940927320@163.com |
aac326ee4686dead7d82f02fbaa8a541cfee5c5c | 8cf4d24a9b3b762d45baf33b10e4772a28ba020b | /machine-learning-gists/c5d8813e90b9f0585574704c6aee2910654a406a/snippet.py | 6b9e2bf825b95eb77c1a19312bd6dd8e7f2a48fb | [
"Apache-2.0"
] | permissive | qwbjtu2015/dockerizeme | 0b76dcf989391dd80954c4fa098365ebcec8ff5f | 9039beacf281ea7058d721784ed4eff054453b09 | refs/heads/master | 2021-05-23T19:21:18.406516 | 2020-04-23T04:05:29 | 2020-04-23T04:05:29 | 253,433,421 | 0 | 0 | Apache-2.0 | 2020-04-06T08:03:30 | 2020-04-06T08:03:29 | null | UTF-8 | Python | false | false | 5,996 | py | #Handwritten digits datasets, such as The MNIST Database of handwritten digits, and Handwritten Digit Recognition to see how good you can get your classifier to perform on them.
#The MNIST problem is a dataset developed by Yann LeCun, Corinna Cortes and Christopher Burges for evaluating machine learning models on the handwritten digit classification problem.
#The dataset was constructed from a number of scanned document dataset available from the National Institute of Standards and Technology (NIST). This is where the name for the dataset comes from, as the Modified NIST or MNIST dataset.
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import svm
import numpy as np
from array import array
import struct
from sklearn.model_selection import train_test_split
from random import*
def load(path_img, path_lbl):
with open(path_lbl, 'rb') as file:
magic, size = struct.unpack(">II", file.read(8))
if magic != 2049:
raise ValueError('Magic number mismatch, expected 2049, got {0}'.format(magic))
labels = array("B", file.read())
with open(path_img, 'rb') as file:
magic, size, rows, cols = struct.unpack(">IIII", file.read(16))
if magic != 2051:
raise ValueError('Magic number mismatch, expected 2051, got {0}'.format(magic))
image_data = array("B", file.read())
images = []
for i in range(size):
images.append([0] * rows * cols)
# You can set divisor to any int, e.g. 1, 2, 3. If you set it to 1,
# there will be no resampling of the image. If you set it to two or higher,
# the image will be resamples by that factor of pixels. This, in turn,
# speeds up training but may reduce overall accuracy.
divisor = 1
for i in range(size):
images[i] = np.array(image_data[i * rows * cols:(i + 1) * rows * cols]).reshape(28, 28)[::divisor,
::divisor].reshape(-1)
return pd.DataFrame(images), pd.Series(labels)
def peekData(X_train):
# The 'targets' or labels are stored in y. The 'samples' or data is stored in X
print("Peeking data")
fig = plt.figure()
cnt = 0
for col in range(5):
for row in range(10):
plt.subplot(5, 10, cnt + 1)
plt.imshow(X_train.iloc[cnt, :].values.reshape(28, 28), cmap=plt.cm.gray_r, interpolation='nearest')
plt.axis('off')
cnt += 1
fig.set_tight_layout(True)
return plt.show()
def drawPredictions(model,X,y):
fig = plt.figure()
# Making some guesses
y_guess = model.predict(X)
num_rows = 10
num_cols = 5
index = 0
for col in range(num_cols):
for row in range(num_rows):
plt.subplot(num_cols, num_rows, index + 1)
# 28x28 is the size of the image, 784 pixels
plt.imshow(X.iloc[index, :].values.reshape(28, 28), cmap=plt.cm.gray_r, interpolation='nearest')
# Green = Prediction right
# Red = Fail!
if y[index] == y_guess[index]:
colors = 'green'
else:
colors = 'red'
plt.title('Prediction: %i' % y_guess[index], fontsize=6, color=colors)
plt.axis('off')
index += 1
fig.set_tight_layout(True)
plt.show()
return
def comparative (number):
# : Print out the TRUE value of the digit in the test set
# By TRUE value, we mean, the actual provided label for that sample
true_value = y[number]
print(f'{number}th Label: {true_value}')
# : Predicting the value of the digits in the data set.
# Why the model's prediction was incorrect?
guess_Value = svc.predict(X[number:(number+1)])
print(f'{number}th Prediction: {guess_Value}')
print('#'*35)
# : Using IMSHOW to display the imagez, so we can
# visually check if it was a hard or easy image
#
plt.imshow(X.iloc[number, :].values.reshape(28, 28), cmap=plt.cm.gray_r, interpolation='nearest')
plt.title(f'{number}th Number = Prediction: {guess_Value} x Label: {true_value}')
return plt.show()
def prediction(y_guess, k,y):
PredictionNotCorrect = []
for index1 in range(0, k):
if y[index1] != y_guess[index1]:
PredictionNotCorrect.append(index1)
PredictionNotCorrect1 = sample(PredictionNotCorrect, 2)
for i in range(len(PredictionNotCorrect1)):
TrueandFalse = comparative(PredictionNotCorrect1[i])
return
X, y = load('train-MNIST.data', 'train-MNIST.labels')
# : Spliting data into test / train sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,random_state=7)
# Get to know the data.
peekData(X_train)
print('#'*35)
# : Creating a SVC classifier - Linear Kernel
print("Training SVC Classifier...")
svc = svm.SVC(kernel='linear', C=1, gamma=0.001)
svc.fit(X_train, y_train)
# : Calculating score of the SVC against TESTING data
print("Scoring SVC linear Classifier...")
score = svc.score(X_test, y_test)
print("Score: ", score)
# Confirmation of accuracy by 2 image
drawPredictions(svc, X, y)
size = y.size
y_guess = svc.predict(X)
prediction1 = prediction(y_guess,size, y)
print('#'*35)
# : Changing SVC classifier - Poly Kernel
svc = svm.SVC(kernel='poly', C=1, gamma=0.001)
svc.fit(X_train, y_train)
# : Calculating score of the SVC against TESTING data
print("Scoring SVC poly Classifier...")
score = svc.score(X_test, y_test)
print("Score: ", score)
# Confirmation of accuracy by 2 image
drawPredictions(svc, X, y)
y_guess = svc.predict(X)
prediction2 = prediction(y_guess,size,y)
print('#'*35)
# Changing SVC classifier - RBF Kernel
svc = svm.SVC(kernel='rbf', C=1, gamma=0.001)
svc.fit(X_train, y_train)
# : Calculating score of the SVC against TESTING data
print("Scoring SVC rbf Classifier...")
score = svc.score(X_test, y_test)
print("Score: ", score)
# Visual Confirmation of accuracy
drawPredictions(svc, X, y)
y_guess = svc.predict(X)
prediction3 = prediction(y_guess,size, y)
print(prediction3)
print('#'*35) | [
"qwbjtu15@163.com"
] | qwbjtu15@163.com |
abb045e0c763e0758fefe7b43c7a29a7498595de | 96564e02360d9dac1e550ffd1bb69f314d900a3d | /polls/settings.py | bd07f716330fc7978bd58703a3eb0ecae21fc0f6 | [] | no_license | caspertu/polls | 4fd7f62326b0ea0e0f0615887e872f420738ca73 | e0265066ea0aa86a10da73624e9e0c450dfc96f3 | refs/heads/master | 2020-12-25T19:14:35.818862 | 2013-06-24T08:14:27 | 2013-06-24T08:14:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,529 | py | #!/usr/bin/env python
# coding: utf-8
# casper@2013/05/27
# Django settings for polls project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'tmp.dat', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = ')rak1sa-vu*17i20e2sq%+t1@!9+ni_xr34vt&f8f#=yot0!%9'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'polls.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'polls.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'pollapp',
'rest_framework',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# overwrite configs
try:
from local_settings import *
except:
pass | [
"caspertu@gmail.com"
] | caspertu@gmail.com |
9c425bac36d622aca313c816c580856ba0e53191 | cb2c30d0143df8e36ae1d4adafa414f837fbd9dd | /SyncNetHD/runner.py | c4346444cb2d2c2d700226be98241865885e9b30 | [
"MIT"
] | permissive | julianyulu/SyncNetCN | c050ef0a7a2e0257ffc68badd4e95a51eb3e58fb | cdd378aa3a2ac5a3a3edef5edc0478ae66da041d | refs/heads/main | 2023-08-28T00:20:21.720804 | 2021-11-08T15:42:55 | 2021-11-08T15:42:55 | 398,701,611 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,929 | py | import os
import glob
import torch
import argparse
from tqdm import tqdm
from .dataset import Dataset
from .model import SyncNetColor,SyncNetColorHD
from omegaconf import OmegaConf
from torch.utils import data as data_utils
from utils import wandb_utils
from utils.batch_sampler import RandomEntryBatchSampler
import pdb
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Runner:
def __init__(self, config):
self.cfg = config
# set model
if self.cfg.model.use_SyncNetHD:
self.model = SyncNetColorHD()
else:
self.model = SyncNetColor()
if torch.cuda.device_count() > 1:
self.model = torch.nn.DataParallel(self.model)
self.model = self.model.to(device)
self.optimizer = torch.optim.Adam([p for p in self.model.parameters() if p.requires_grad], lr = self.cfg.runtime.init_learning_rate)
if self.cfg.model.resume_ckpt:
self.resume_from_ckpt(self.cfg.model.resume_ckpt)
else:
os.makedirs(self.cfg.runtime.checkpoint_dir, exist_ok = True)
OmegaConf.save(self.cfg, os.path.join(self.cfg.runtime.checkpoint_dir, 'config.yaml'))
if self.cfg.wandb.enable:
wandb_utils.init(self.cfg)
def resume_from_ckpt(self, resume_ckpt):
# overwrite curr config from ckpt dir
if self.cfg.model.resume_ckpt_config:
self.cfg = OmegaConf.load(os.path.join(os.path.dirname(resume_ckpt), 'config.yaml'))
print("Loading checkpoint from: {}".format(resume_ckpt))
if torch.cuda.is_available():
checkpoint = torch.load(resume_ckpt, map_location = lambda storage, loc: storage)
else:
checkpoint = torch.load(resume_ckpt)
self.model.load_state_dict(checkpoint["state_dict"])
if not self.cfg.runtime.reset_optimizer:
optimizer_state = checkpoint["optimizer"]
if optimizer_state is not None:
print("Loading optimizer state from {}".format(resume_ckpt))
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.global_step = checkpoint["global_step"]
self.global_epoch = checkpoint["global_epoch"]
def save_checkpoint(self):
os.makedirs(self.cfg.runtime.checkpoint_dir, exist_ok = True)
ckpt_path = os.path.join(self.cfg.runtime.checkpoint_dir,
f"checkpoint_step{int(self.global_step)}.pth")
opt_state = self.optimizer.state_dict() if self.cfg.runtime.save_optimizer_state else None
model_state = self.model.state_dict()
torch.save({
"state_dict": model_state,
"optimizer": opt_state,
"global_step": self.global_step,
"global_epoch": self.global_epoch
}, ckpt_path)
print(f"Saved checkpoint: {ckpt_path}")
def get_dataloader(self, split):
dataset = Dataset(self.cfg, split)
if split == 'train':
batch_sampler = RandomEntryBatchSampler(len(dataset),
batch_size = self.cfg.runtime.batch_size,
steps_per_epoch = self.cfg.runtime.steps_per_epoch)
data_loader = data_utils.DataLoader(dataset,
pin_memory = True,
batch_sampler = batch_sampler)
else:
data_loader = data_utils.DataLoader(dataset,
batch_size = self.cfg.runtime.batch_size,
pin_memory = False,
num_workers = self.cfg.runtime.num_workers)
return data_loader
def loss_fn(self, a, v, y):
if not hasattr(self, '_bce_loss'):
self._bce_loss = torch.nn.BCELoss()
d = torch.nn.functional.cosine_similarity(a, v)
loss = self._bce_loss(d.unsqueeze(1), y)
return loss
def log(self, log_dict):
if self.cfg.wandb.enable:
wandb_utils.log(log_dict)
def eval(self):
if not hasattr(self, '_test_data_loader'):
self._test_data_loader = self.get_dataloader('val')
losses = []
step = 0
while True:
for x, mel, y in self._test_data_loader:
self.model.eval()
x = x.to(device)
mel = mel.to(device)
a, v = self.model(mel, x)
y = y.to(device)
loss = self.loss_fn(a, v, y)
losses.append(loss.item())
step += 1
if step >= self.cfg.runtime.eval_forward_steps:
break
if step >= self.cfg.runtime.eval_forward_steps:
break
averaged_loss = sum(losses) / len(losses)
return {"eval_loss": averaged_loss,
"step": self.global_step,
"epoch": self.global_epoch}
def train(self):
if not hasattr(self, 'global_step'): self.global_step = 0
if not hasattr(self, 'global_epoch'): self.global_epoch = 0
train_data_loader = self.get_dataloader('train')
while self.global_epoch < self.cfg.runtime.nepochs:
running_loss = 0.
prog_bar = tqdm(enumerate(train_data_loader), total = self.cfg.runtime.steps_per_epoch)
for step, (x, mel, y) in prog_bar:
self.model.train()
self.optimizer.zero_grad()
x = x.to(device)
mel = mel.to(device)
a, v = self.model(mel, x)
y = y.to(device)
loss = self.loss_fn(a, v, y)
loss.backward()
self.optimizer.step()
self.global_step += 1
running_loss += loss.item()
prog_bar.set_description(f"Epoch: {self.global_epoch} | Step: {self.global_step} | Train Loss: {running_loss / (step + 1):.6f}")
if self.global_step >0 and self.global_step % self.cfg.runtime.checkpoint_interval == 0:
self.save_checkpoint()
if self.global_step % self.cfg.runtime.eval_interval == 0:
with torch.no_grad():
eval_res = self.eval()
self.log(eval_res)
print(f"\nEval Loss @ step {self.global_step} | epoch {self.global_epoch}: {eval_res['eval_loss']:.6f}")
self.global_epoch += 1
self.log({"step": self.global_step,
"epoch": self.global_epoch,
"train_loss": running_loss / (step + 1)})
| [
"julianyulu@tencent.com"
] | julianyulu@tencent.com |
779f94046603bfd97f43150b9ce422c7726bddd4 | 0faeef8d3a24b4d7904c3a55d7da293ea24354d3 | /build/robot_description/catkin_generated/stamps/robot_description/wall_follow.py.stamp | 6b120ebb24d2b922aee625af58cb114caa67fbf0 | [] | no_license | MehrdadTV68/ROSproject | 8a3cbdb232f0cb3dbddf4d856f107b3e00c460cd | fcf3919382b26d40b8bbf7f73d6236c11409385e | refs/heads/main | 2023-05-14T11:32:30.221446 | 2021-06-07T12:58:04 | 2021-06-07T12:58:04 | 374,666,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,336 | stamp | #! /usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from tf import transformations
import math
pub_ = None
regions_ = {
'right': 0,
'fright': 0,
'front': 0,
'fleft': 0,
'left': 0,
}
state_ = 0
state_dict_ = {
0: 'find the wall',
1: 'turn left',
2: 'follow the wall',
}
def clbk_laser(msg):
global regions_
regions_ = {
'right': min(min(msg.ranges[0:143]), 10),
'fright': min(min(msg.ranges[144:287]), 10),
'front': min(min(msg.ranges[288:431]), 10),
'fleft': min(min(msg.ranges[432:575]), 10),
'left': min(min(msg.ranges[576:713]), 10),
}
take_action()
def change_state(state):
global state_, state_dict_
if state is not state_:
print ('Wall follower - [%s] - %s' % (state, state_dict_[state]))
state_ = state
def take_action():
global regions_
regions = regions_
msg = Twist()
linear_x = 0
angular_z = 0
state_description = ''
d0 = 1
d = 1.5
if regions['front'] > d0 and regions['fleft'] > d and regions['fright'] > d:
state_description = 'case 1 - nothing'
change_state(0)
elif regions['front'] < d0 and regions['fleft'] > d and regions['fright'] > d:
state_description = 'case 2 - front'
change_state(1)
elif regions['front'] > d0 and regions['fleft'] > d and regions['fright'] < d:
state_description = 'case 3 - fright'
change_state(2)
elif regions['front'] > d0 and regions['fleft'] < d and regions['fright'] > d:
state_description = 'case 4 - fleft'
change_state(0)
elif regions['front'] < d0 and regions['fleft'] > d and regions['fright'] < d:
state_description = 'case 5 - front and fright'
change_state(1)
elif regions['front'] < d0 and regions['fleft'] < d and regions['fright'] > d:
state_description = 'case 6 - front and fleft'
change_state(1)
elif regions['front'] < d0 and regions['fleft'] < d and regions['fright'] < d:
state_description = 'case 7 - front and fleft and fright'
change_state(1)
elif regions['front'] > d0 and regions['fleft'] < d and regions['fright'] < d:
state_description = 'case 8 - fleft and fright'
change_state(0)
else:
state_description = 'unknown case'
rospy.loginfo(regions)
def find_wall():
msg = Twist()
msg.linear.x = 0.2
msg.angular.z = -0.3
return msg
def turn_left():
msg = Twist()
msg.angular.z = 0.3
return msg
def follow_the_wall():
global regions_
msg = Twist()
msg.linear.x = 0.5
return msg
def main():
global pub_
rospy.init_node('reading_laser')
pub_ = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
sub = rospy.Subscriber('/scan', LaserScan, clbk_laser)
rate = rospy.Rate(20)
while not rospy.is_shutdown():
msg = Twist()
if state_ == 0:
msg = find_wall()
elif state_ == 1:
msg = turn_left()
elif state_ == 2:
msg = follow_the_wall()
pass
else:
rospy.logerr('Unknown state!')
pub_.publish(msg)
rate.sleep()
if __name__ == '__main__':
main()
| [
"mehrdadt68@gmail.com"
] | mehrdadt68@gmail.com |
d68cf48da0802d8205ed2b384c4bbdffe74a2b09 | 8499393c0676e2a28d817152b4c4e5974b4d1f40 | /command_center/app.py | 376ecd5b7205656bdafe1ce2a7d6823d1d52aa24 | [] | no_license | luwzko/Centurion | 1755926753438f02b1e11de2ac76819d8dd2b120 | 4f6437921f53788fe332e73a97ec409c2a8fc80b | refs/heads/master | 2023-08-21T15:29:53.949800 | 2021-10-27T18:39:38 | 2021-10-27T18:39:38 | 421,931,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | from flask import *
from werkzeug.wrappers import response
app = Flask(__name__)
# index 0: control_panel
# index 1: client
msgs = ["", "Client"]
read_states = [False, False]
@app.route("/", methods=["GET"])
def index_get():
global msgs
global read_states
u = request.args.get("u")
#protection from invalid indexes
if u == None or (u := int(u)) not in [0,1]:
return ""
else:
if read_states[1]:
read_states = [False, False]
msgs = [False, False]
return Response(response=msgs[not u])
@app.route("/", methods=["POST"])
def index_post():
global msgs
global read_states
try:
msgs[int(request.form["u"])] = request.form["msg"]
except IndexError:
pass
return ""
if __name__ == "__main__":
app.run() | [
"luwzko@gmail.com"
] | luwzko@gmail.com |
d889db92143f4bc2542cf7460690bd912b59b210 | aa749f0ee271e870fe816c3f1d518f836e50691d | /utils/helpers.py | 1b63e0ecb11fa3adc1c9251c1fc4fe297d1a8100 | [] | no_license | EloWeld/InviteJobberBot | ac996e9f73bff9e30b556d6677e1a01cde9e117c | f403a2d691686f89cc5af2fe47880dec7ec7cfb2 | refs/heads/main | 2023-07-21T11:29:30.910659 | 2021-08-30T21:24:28 | 2021-08-30T21:24:28 | 401,019,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,577 | py | import asyncio
from datetime import datetime
import requests
from aiogram import types
from src.data.config import DATETIME_FORMAT, TIME_FORMAT
async def del_message(message: types.Message, time_in_milliseconds: int):
await asyncio.sleep(time_in_milliseconds)
await message.delete()
def get_current_datetime(date_format=DATETIME_FORMAT):
return datetime.now().strftime(date_format)
def get_formatted_datetime(format_date, date_format=DATETIME_FORMAT):
return format_date.strftime(date_format)
def get_current_time(date_format=TIME_FORMAT):
return datetime.now().strftime(date_format)
def price_to_btc(price: int):
return requests.get(url='https://blockchain.info/tobtc', params={
'currency': 'RUB',
'value': price
}).text
class BroadcastTimer:
def __init__(self, timeout, callback):
self._timeout = timeout
self._callback = callback
self._task = asyncio.ensure_future(self._job())
async def _job(self):
await asyncio.sleep(self._timeout)
await self._callback()
def cancel(self):
self._task.cancel()
# Вычисляем разницу между текущим временем и временем оформления подписки
def get_subscription_diff(a_date, b_date=get_current_datetime()):
now = datetime.strptime(b_date, DATETIME_FORMAT)
post_time = datetime.strptime(a_date, DATETIME_FORMAT)
diff = (now - post_time)
return diff.days, diff.seconds / 60 / 60 # days and hours | [
"coddeboy@gmail.com"
] | coddeboy@gmail.com |
160b9d6977f9fc868c341c4e908c1e6cc77d6605 | efd9ceed8883e946d8557ff2fb56e2c9912971c5 | /service_2/application/routes.py | f0b13c4f44c747ad35ac24a5ada6c4dc08d205f7 | [] | no_license | Fardins24/BigMotoringWorld | 172cb39a0072eef8da13c0ac0babbf5c23aa0fd4 | 698ae6e02c746459ccbb275f775e55df8ce1e7a0 | refs/heads/main | 2023-05-30T08:52:15.434314 | 2021-06-10T14:07:23 | 2021-06-10T14:07:23 | 374,813,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | from application import app
from flask import request, Response
import random
@app.route("/car_name", methods=["GET"])
def get_car():
cars = ["Ford", "Lamborghini", "Mercedes", "BMW", "Audi"]
return Response(random.choice(cars)) | [
"fshah@qa.com"
] | fshah@qa.com |
9ca1da04db7918d733d01687ff70a25eff11504d | 61a8445ad4b0e45860e244de81d34bd16fa557e3 | /Data Structure and Algorithms/python_join.py | 8e8f3f3bc8c5682f0390c4bd1c5be09bb795e314 | [] | no_license | dfworldwang/Python-Elementary-Practice | bc4e211ab0deaa819ee990fc8272beca5329b7f8 | 6351a3f5a5aa90d5c289e304de1dd00a121379dc | refs/heads/master | 2021-01-21T04:41:04.608885 | 2016-06-13T01:06:47 | 2016-06-13T01:06:47 | 55,869,465 | 0 | 0 | null | 2016-06-16T22:57:13 | 2016-04-09T22:20:09 | Python | UTF-8 | Python | false | false | 565 | py |
li = ['my', 'name', 'is', 'bob']
# Concatenate the elements of list with blank
print(' '.join(li))
print('..'.join(li))
class Foo():
def __init__(self):
self.name = [{"Susan": ("Boyle", 50, "alive")}, {"Albert": ("Speer", 106, "dead")}]
def __str__(self):
ret_str = ""
for d in self.name:
for k in d:
ret_str += "".join([k, " ", d[k][0], " is ", str(d[k][1]),
" and ", d[k][2], ". "])
return ret_str
foo = Foo()
print(foo)
| [
"noreply@github.com"
] | dfworldwang.noreply@github.com |
9a5e5efdc0c74a4a6fd7414fc9920cbcb2cbea38 | ceba819390f96736c6abfddfc9e7fdfb5163e364 | /cake.py | 7249dba6dff6d598fc0909152d57c7dd06980986 | [] | no_license | Dzhemile-dzh/SoftuniPython | 690d83b5eb9161695b4a994b8aa3865fb2d39d45 | 6aecd8fa53aeefbddfabebc85770385f3c79e747 | refs/heads/main | 2023-03-28T13:40:49.904337 | 2021-03-27T21:45:30 | 2021-03-27T21:45:30 | 352,174,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | cakeSide1 = int(input())
cakeSide2 = int(input())
cakeSize = cakeSide1 * cakeSide2
totalPieces = 0
command = input()
while command != 'STOP':
piecesCounter = int(command)
totalPieces += piecesCounter
if totalPieces > cakeSize:
break
command = input()
if totalPieces > cakeSize:
print(f'No more cake left! You need {totalPieces - cakeSize} pieces more.')
if command == 'STOP' or cakeSize > totalPieces:
print(f'{cakeSize - totalPieces} pieces are left.') | [
"dzhemile_ahmet@abv.bg"
] | dzhemile_ahmet@abv.bg |
76ea21e0cd0bb9f8f9684fc16048be3713d1df62 | 1cc54d31a4a443230668ca063bcd27179ff096c2 | /store/urls.py | fbf6972755a0e56ef5d0ca947869dfff8b9f903d | [] | no_license | akhmadakhmedov/e-commerce | 8d84f0ae7acd4dc80c8afbe3ab55ed13873ef631 | 6708aa62dec08be9b18fae15125eeef266d869e3 | refs/heads/main | 2023-05-29T21:36:40.354231 | 2021-06-14T13:23:22 | 2021-06-14T13:23:22 | 370,982,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.store, name='store'),
path('category/<slug:category_slug>/', views.store, name='products_by_category'),
path('category/<slug:category_slug>/<product_slug>/', views.product_detail, name='product_detail'),
path('search/', views.search, name='search'),
]
| [
"ahmedov.thy@gmail.com"
] | ahmedov.thy@gmail.com |
cb5ff038a1b7eb64f1acbd6e4b8a645b0097f0b8 | 9f6a13b2c89e955c6445f2c7369d777825404db8 | /Lab 02 - Expansão de Consultas/.ipynb_checkpoints/coocurrence_matrix-checkpoint.py | 40320a241fb264c1a8165233e28ad4bfc623daa1 | [] | no_license | wesleyroseno/Recuperacao-da-Informacao-e-Busca-na-Web | 6f48573ddc1493741a3a2d1addc8127afb575cc0 | f6dbc1ce23953e08612eebb891d2a1be56f1d738 | refs/heads/master | 2020-03-12T08:55:16.848724 | 2018-08-01T01:23:30 | 2018-08-01T01:23:30 | 130,539,904 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,976 | py |
# coding: utf-8
# ## Matrix and Vocabulary Construction
# In[ ]:
from pandas import read_csv
from scipy import sparse
from nltk import FreqDist
from nltk import bigrams
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
# In[ ]:
news = read_csv("estadao_noticias_eleicao.csv", encoding="utf-8")
news = news.fillna("")
# In[ ]:
content = news.titulo + " " + news.subTitulo + " " + news.conteudo
# In[ ]:
def co_occurrence_matrix(corpus):
vocab = set(corpus)
vocab = list(vocab)
n = len(vocab)
vocab_to_index = {word:i for i, word in enumerate(vocab)}
bi_grams = list(bigrams(corpus))
bigram_freq = FreqDist(bi_grams).most_common(len(bi_grams))
I=list()
J=list()
V=list()
for bigram in bigram_freq:
current = bigram[0][1]
previous = bigram[0][0]
count = bigram[1]
I.append(vocab_to_index[previous])
J.append(vocab_to_index[current])
V.append(count)
co_occurrence_matrix = sparse.coo_matrix((V,(I,J)), shape=(n,n))
return co_occurrence_matrix, vocab_to_index
# #### Removing punctuation
# In[ ]:
tokenizer = RegexpTokenizer(r'\w+')
tokens_lists = content.apply(lambda text: tokenizer.tokenize(text.lower()))
# #### Removing stopwords
# In[ ]:
stopword_ = stopwords.words('portuguese')
filtered_tokens = tokens_lists.apply(lambda tokens: [token for token in tokens if token not in stopword_])
# #### Transforming list of lists into one list
# In[ ]:
tokens = [token for tokens_list in filtered_tokens for token in tokens_list]
# In[ ]:
matrix, vocab = co_occurrence_matrix(tokens)
# ## Consult Bigram Frequency
# In[ ]:
consultable_matrix = matrix.tocsr()
''' Trecho do codigo não utilizado para o Lab 02
# In[ ]:
def consult_frequency(w1, w2):
return(consultable_matrix[vocab[w1],vocab[w2]])
# #### Example
# In[ ]:
w1 = 'poucos'
w2 = 'recursos'
consult_frequency(w1, w2)
'''
| [
"37702941+wesleyroseno@users.noreply.github.com"
] | 37702941+wesleyroseno@users.noreply.github.com |
53aa1b2409b3fe45fb8cacb3d6c9abc63b5229eb | f6f3ade5a59fcb904a147fa3cf1933a1b225338f | /src/gate_timer.py | 536a3cef3cb573db60d205d844c69d50ccab9872 | [] | no_license | HajimeKawahara/autobop | 3b559011f9dceba68b02e47cd95fdef4fa9ef41e | 2c99625895206d24587db90a2ac03d1e536eb9ca | refs/heads/master | 2021-01-16T01:03:01.351588 | 2018-03-18T09:15:53 | 2018-03-18T09:15:53 | 107,845,791 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | #!/usr/bin/python
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pylab
import argparse
#import chord_probability as cp
import rest
def gate_stop(mnow,counter,finger=1,width=5.0,c=65.0):
if c==np.inf:
counter=counter+1
return mnow, counter
numrest=rest.get_numrest()
stopp=1.0/(1+np.exp(-(1.0/width)*(float(counter)-c))) #stop probability (sigmoid type)
j=np.random.random()
if j < stopp:
mnow=numrest*np.ones(finger,dtype=int)
counter=-1
else:
counter=counter+1
return mnow, counter
| [
"divrot@gmail.com"
] | divrot@gmail.com |
55e4a9778ff59c0161d9877f8b727552e30befcb | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5738606668808192_0/Python/Nihilant/p3.py | 9a79afbff433b6e056e4bf1c99769fccfd98c045 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | import sys, functools, math
def jc(n, j):
coin_n = 2**(n-1) + 1
for i in range(j):
test = True
while test:
coin = bin(coin_n)[2:]
sol = []
for base in range(2, 11):
num = int(coin, base=base)
k = -1
limit = int(math.sqrt(num))
for div in range(2, limit):
if num % div == 0:
k = div
break
if k == -1:
coin_n = coin_n + 2
break
else:
sol.append(k)
if len(sol) == 9:
coin_n = coin_n + 2
print(coin, ' '.join(map(str, sol)))
test = False
if __name__ == "__main__":
f = sys.stdin
if len(sys.argv) >= 2:
fn = sys.argv[1]
if fn != '-':
f = open(fn)
T = int(f.readline())
for i in range(T):
N, J = f.readline().strip('\n').split(" ")
print("Case #{0}:".format(i + 1))
jc(int(N), int(J)) | [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
3f1ce17c7e56aa343e288281207e4e0013191cf9 | ec53949dafa4b6ad675d679b05ed7c83fef2c69a | /DataStructuresAndAlgo/DynamicProgramming/FibonacciTabulation.py | d691cd1c6b7a1de451aa33b49df8d84df1b3b17e | [] | no_license | tpotjj/Python | 9a5a20a53cd7a6ec14386c1db8ce155e0fc9ab8a | ca73c116ada4d05c0c565508163557744c86fc76 | refs/heads/master | 2023-07-11T16:37:10.039522 | 2021-08-14T11:17:55 | 2021-08-14T11:17:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | def fibTab(n):
tb = [0, 1]
for i in range(2, n):
tb.append(tb[i-1] + tb[i-2])
return tb[n-1]
print(fibTab(6)) | [
"joris97jansen@gmail.com"
] | joris97jansen@gmail.com |
1c4e246164d741c7eb9d62c3f9a4bcf52e70675a | 9b3339afd2a3ab63f127cdfcfa1867a07438b2b4 | /auto/bin/easy_install | 9f80143f60436832af6025b0e5895c2bd792f583 | [] | no_license | WilfredLemus/vehiculo | ea3c739d2b5a15f8a6fbb6d33753b360fb67b59d | a82ca272a85540f3ba071dbe443afe3f88d7b2b0 | refs/heads/master | 2021-05-28T14:56:46.566651 | 2014-11-16T01:57:38 | 2014-11-16T01:57:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | #!/home/rachel/Proyectos/auto/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'distribute==0.6.24','console_scripts','easy_install'
__requires__ = 'distribute==0.6.24'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('distribute==0.6.24', 'console_scripts', 'easy_install')()
)
| [
"wilfred.lean.15@gmail.com"
] | wilfred.lean.15@gmail.com | |
4ce3dcc0fc7c6037e9c5ce4941d6d1895672bad9 | 74da969ffdf43a16847bcc29e6017a21991e9b40 | /original_code/train_blizzard.py | bedd5fb9bca0133ae05ea67fa75c8cc35f419061 | [] | no_license | hksBRT/zforcing | f8d1a5c95dbe3ff6f53c30eea83ad22518d78c5c | 002224f845fc7d88da32c8e66da3a452875d426d | refs/heads/master | 2020-05-18T10:36:47.661550 | 2019-05-14T01:49:58 | 2019-05-14T01:49:58 | 184,356,613 | 0 | 0 | null | 2019-05-01T01:55:28 | 2019-05-01T01:55:27 | null | UTF-8 | Python | false | false | 7,821 | py | import torch
from torch.autograd import Variable
import time
import click
import numpy as np
import os
from itertools import chain
import load
from blizzard_data import Blizzard_tbptt
from model import ZForcing
def evaluate(dataset, model):
model.eval()
hidden = model.init_hidden(dataset.batch_size)
loss = []
for x, y, x_mask in dataset:
x = Variable(torch.from_numpy(x), volatile=True).float().cuda()
y = Variable(torch.from_numpy(y), volatile=True).float().cuda()
x_mask = Variable(torch.from_numpy(x_mask)).float().cuda()
# compute all the states for forward and backward
fwd_nll, bwd_nll, aux_nll, kld = \
model(x, y, x_mask, hidden)
loss.append((fwd_nll + kld).data[0])
return np.mean(np.asarray(loss))
@click.command()
@click.option('--expname', default='blizzard_logs')
@click.option('--nlayers', default=1)
@click.option('--seed', default=1234)
@click.option('--num_epochs', default=100)
@click.option('--rnn_dim', default=2048) # As in SRNN.
@click.option('--data', default='./')
@click.option('--bsz', default=128) # As in SRNN.
@click.option('--lr', default=0.0003) # As in SRNN.
@click.option('--z_dim', default=256) # As in SRNN.
@click.option('--emb_dim', default=1024) # CHECK: As in SRNN?
@click.option('--mlp_dim', default=1024) # As in SRNN.
@click.option('--bwd', default=0.)
@click.option('--aux_sta', default=0.0)
@click.option('--aux_end', default=0.0)
@click.option('--kla_sta', default=0.2)
@click.option('--cond_ln', is_flag=True)
@click.option('--z_force', is_flag=True)
def train(expname, nlayers, seed, num_epochs, rnn_dim, data, bsz, lr, z_dim,
emb_dim, mlp_dim, aux_sta, aux_end, kla_sta, bwd, cond_ln, z_force):
rng = np.random.RandomState(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
log_interval = 10
model_id = 'blizzard_seed{}_cln{}_zf{}_auxsta{}_auxend{}_klasta{}_bwd{}'.format(
seed, int(cond_ln), z_force, aux_sta, aux_end, kla_sta, bwd)
if not os.path.exists(expname):
os.makedirs(expname)
log_file_name = os.path.join(expname, model_id + '.txt')
model_file_name = os.path.join(expname, model_id + '.pt')
log_file = open(log_file_name, 'w')
model = ZForcing(200, emb_dim, rnn_dim, z_dim,
mlp_dim, 400, nlayers=nlayers,
cond_ln=cond_ln, z_force=z_force)
print('Loading data..')
file_name = 'blizzard_unseg_tbptt'
normal_params = np.load(data + file_name + '_normal.npz')
X_mean = normal_params['X_mean']
X_std = normal_params['X_std']
train_data = Blizzard_tbptt(name='train',
path=data,
frame_size=200,
file_name=file_name,
X_mean=X_mean,
X_std=X_std)
valid_data = Blizzard_tbptt(name='valid',
path=data,
frame_size=200,
file_name=file_name,
X_mean=X_mean,
X_std=X_std)
test_data = Blizzard_tbptt(name='test',
path=data,
frame_size=200,
file_name=file_name,
X_mean=X_mean,
X_std=X_std)
# The following numbers are for batch_size of 128 as in SRNN.
assert bsz == 128
train_data = load.BlizzardIterator(train_data, bsz, start=0, end=2040064)
valid_data = load.BlizzardIterator(valid_data, bsz, start=2040064, end=2152704)
# Use complete batch only.
test_data = load.BlizzardIterator(test_data, bsz, start=2152704, end=2267008-128)
print('Done.')
model.cuda()
hidden = model.init_hidden(bsz)
opt = torch.optim.Adam(model.parameters(), lr=lr, eps=1e-5)
nbatches = train_data.nbatch
kld_step = 0.00005
aux_step = abs(aux_end - aux_sta) / (2 * nbatches) # Annealing over two epochs.
print("aux_step: {}".format(aux_step))
kld_weight = kla_sta
aux_weight = aux_sta
t = time.time()
for epoch in range(num_epochs):
step = 0
old_valid_loss = np.inf
b_fwd_loss, b_bwd_loss, b_kld_loss, b_aux_loss, b_all_loss = \
(0., 0., 0., 0., 0.)
model.train()
print('Epoch {}: ({})'.format(epoch, model_id.upper()))
for x, y, x_mask in train_data:
step += 1
opt.zero_grad()
x = Variable(torch.from_numpy(x)).float().cuda()
y = Variable(torch.from_numpy(y)).float().cuda()
x_mask = Variable(torch.from_numpy(x_mask)).float().cuda()
# compute all the states for forward and backward
fwd_nll, bwd_nll, aux_nll, kld = model(x, y, x_mask, hidden)
bwd_nll = (aux_weight > 0.) * (bwd * bwd_nll)
aux_nll = aux_weight * aux_nll
all_loss = fwd_nll + bwd_nll + aux_nll + kld_weight * kld
# anneal kld cost
kld_weight += kld_step
kld_weight = min(kld_weight, 1.)
# anneal auxiliary cost
if aux_sta <= aux_end:
aux_weight += aux_step
aux_weight = min(aux_weight, aux_end)
else:
aux_weight -= aux_step
aux_weight = max(aux_weight, aux_end)
if kld.data[0] >= 10000:
continue
if np.isnan(all_loss.data[0]) or np.isinf(all_loss.data[0]):
print("NaN", end="\r") # Useful to see if training is stuck.
continue
all_loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), 100.)
opt.step()
b_all_loss += all_loss.data[0]
b_fwd_loss += fwd_nll.data[0]
b_bwd_loss += bwd_nll.data[0]
b_kld_loss += kld.data[0]
b_aux_loss += aux_nll.data[0]
if step % log_interval == 0:
s = time.time()
log_line = 'epoch: [%d/%d], step: [%d/%d], loss: %f, fwd loss: %f, aux loss: %f, bwd loss: %f, kld: %f, kld weight: %f, aux weight: %.4f, %.2fit/s' % (
epoch, num_epochs, step, nbatches,
b_all_loss / log_interval,
b_fwd_loss / log_interval,
b_aux_loss / log_interval,
b_bwd_loss / log_interval,
b_kld_loss / log_interval,
kld_weight,
aux_weight,
log_interval / (s - t))
b_all_loss = 0.
b_fwd_loss = 0.
b_bwd_loss = 0.
b_aux_loss = 0.
b_kld_loss = 0.
t = time.time()
print(log_line)
log_file.write(log_line + '\n')
log_file.flush()
# evaluate per epoch
print('--- Epoch finished ----')
val_loss = evaluate(valid_data, model)
log_line = 'valid -- epoch: %s, nll: %f' % (epoch, val_loss)
print(log_line)
log_file.write(log_line + '\n')
test_loss = evaluate(test_data, model)
log_line = 'test -- epoch: %s, nll: %f' % (epoch, test_loss)
print(log_line)
log_file.write(log_line + '\n')
log_file.flush()
if old_valid_loss > val_loss:
old_valid_loss = val_loss
model.save(model_file_name)
else:
for param_group in opt.param_groups:
lr = param_group['lr']
if lr > 0.0001:
lr *= 0.5
param_group['lr'] = lr
if __name__ == '__main__':
train()
| [
"harikrishnan.suresh@bluerivert.com"
] | harikrishnan.suresh@bluerivert.com |
5bcb0760c6c64e527ed4a662ff790c3cb71afad6 | b1ff576cdde5adf698b98446538e0b56d18f070f | /grading/apps.py | b507c75018b33f6f0904ff9ce425d1006d934d9a | [] | no_license | DUMBALINYOLO/gbc_oms | e3cfba17a12f3600b6503fc70cc9f3dcab5cc0e2 | cdea6fd81333088b2db9911140681fec9577132a | refs/heads/main | 2023-08-20T11:48:36.418990 | 2021-10-11T23:25:35 | 2021-10-11T23:25:35 | 322,593,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from django.apps import AppConfig
class GradingConfig(AppConfig):
name = 'grading'
# def ready(self):
# import grading.signals
| [
"baridzimaximillem@gmail.com"
] | baridzimaximillem@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.