blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b0b63bd6581762807f1f8058e7f33472280bc291 | 18b3661cbcfb1ebbf51564981e059bb25a27e0c8 | /countdown_clock/main.py | d225a648c4845393ea7d31bf97988650b7ca4fe5 | [
"MIT"
] | permissive | PrashantMhrzn/beginner-python-projects | c131413184508256ac6862e240f8496f8728081b | 3911ee019179e43ce2f512515df3a4d9bcc21c53 | refs/heads/master | 2022-12-03T09:43:04.010249 | 2020-08-23T06:37:23 | 2020-08-23T06:37:23 | 283,092,986 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | import time
def countdown(total_sec):
while total_sec != 0:
time.sleep(1)
if total_sec == 1:
print('1 second')
else:
print(f'{total_sec} seconds')
total_sec-=1
try:
usr_inp = input('Start count down at: <sec> <space> <min> <space> <hour>(enter 0 to pass none)')
smh=usr_inp.split()
min_to_sec=int(smh[1])*60
hour_to_sec=int(smh[2])*3600
total_sec=hour_to_sec+min_to_sec+int(smh[0])
countdown(total_sec)
except:
print('Enter appropriate values!')
print('countdown over')
try:
go=input('start over?(y/n)')
if go == 'y':
countdown(total_sec)
elif go == 'n':
print('exiting')
else:
print("Please enter 'y' or 'n'!!!")
except ValueError:
print("Please enter 'y' or 'n'!!!") | [
"prashantmaharjan5@gmail.com"
] | prashantmaharjan5@gmail.com |
13df18b783a250ba20786b431b57390ce52ba5a3 | 8ad1f12e3b48e73162f89e88da8c67fd6e4b4a8f | /src/6.py | e2e4acb7b2c2dde9bacafbd7c2aea4d5e11a6ea5 | [] | no_license | ouked/advent_of_code2020 | 9577232479f5b8c1ce3f246b623908be426cbb6f | a97e178753e4397aaf278cd18488d82420894458 | refs/heads/master | 2023-01-30T16:09:36.008297 | 2020-12-08T23:05:44 | 2020-12-08T23:05:44 | 318,203,540 | 0 | 0 | null | 2020-12-08T23:02:17 | 2020-12-03T13:32:14 | Python | UTF-8 | Python | false | false | 1,087 | py | # For each group, count the number of questions to which anyone answered "yes". What is the sum of those counts?
# https://adventofcode.com/2020/day/6
from src.aoc_core import *
c = Core(6)
c.tic()
# Append "" so last block is processed
data = c.get_str_input()
data.append("")
# Part 1
part1 = 0
positive_answers = []
# Part 2
part2 = 0
mutual_positive_answers = []
# Flag to know when to refill mutual_positive_answers
# Fixes bug where the array would be pre-maturely filled
is_new_group = 1
for line in data:
# Reset arrays
if is_new_group:
mutual_positive_answers = [char for char in line]
positive_answers = []
is_new_group = 0
# End of block
if line == "":
part1 += len(positive_answers)
part2 += len(mutual_positive_answers)
is_new_group = 1
continue
positive_answers += [char for char in line if char not in positive_answers]
mutual_positive_answers = [char for char in mutual_positive_answers if char in line]
print("1)", part1)
print("2)", part2)
c.print_toc()
# 1) 6259
# 2) 3178
| [
"alexander.dawkins@gmail.com"
] | alexander.dawkins@gmail.com |
0cbd1c8aeac8d787abd3ecf791a38ec0389941b3 | 4569d707a4942d3451f3bbcfebaa8011cc5a128d | /privateticketsplugin/branches/0.10/privatetickets/report.py | e36569c4591e109ecf3d68f4f5d34b955ec69b4a | [] | no_license | woochica/trachacks | 28749b924c897747faa411876a3739edaed4cff4 | 4fcd4aeba81d734654f5d9ec524218b91d54a0e1 | refs/heads/master | 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,095 | py | from trac.core import *
from trac.web.api import IRequestFilter
from trac.ticket.report import ReportModule
from api import PrivateTicketsSystem
__all__ = ['PrivateTicketsReportFilter']
class PrivateTicketsReportFilter(Component):
"""Show only ticket the user is involved in in the reports."""
implements(IRequestFilter)
# IRequestFilter methods
def pre_process_request(self, req, handler):
if isinstance(handler, ReportModule) and \
not req.perm.has_permission('TICKET_VIEW') and \
req.args.get('format') in ('tab', 'csv'):
raise TracError('Access denied')
return handler
def post_process_request(self, req, template, content_type):
if req.args.get('DO_PRIVATETICKETS_FILTER') == 'report':
# Walk the HDF
fn = PrivateTicketsSystem(self.env).check_ticket_access
deleted = []
left = []
node = req.hdf.getObj('report.items')
if node is None:
return template, content_type
node = node.child()
while node:
i = node.name()
id = req.hdf['report.items.%s.ticket'%i]
if not fn(req, id):
deleted.append(i)
else:
left.append(i)
node = node.next()
# Delete the needed subtrees
for n in deleted:
req.hdf.removeTree('report.items.%s'%n)
# Recalculate this
req.hdf['report.numrows'] = len(left)
# Move the remaining items into their normal places
for src, dest in zip(left, xrange(len(left)+len(deleted))):
if src == dest: continue
req.hdf.getObj('report.items').copy(str(dest), req.hdf.getObj('report.items.%s'%src))
for n in xrange(len(left), len(left)+len(deleted)):
req.hdf.removeTree('report.items.%s'%n)
return template, content_type
| [
"coderanger@7322e99d-02ea-0310-aa39-e9a107903beb"
] | coderanger@7322e99d-02ea-0310-aa39-e9a107903beb |
f17d60f3ba2d4ccd6446efee607a59d13b9f6596 | b09db2bba8019b1d11720f1092304e5ce9948d91 | /lib/sqlalchemy/util/__init__.py | 273570357b09f600df0913bd840eed8f0a4f6efe | [
"MIT"
] | permissive | theosotr/sqlalchemy | 6da34f5e28859f4ae7479db4ca9963c8392d7ac8 | e1d4e59116bbf1a12bb6b3f57d33ddfc757d4567 | refs/heads/master | 2022-10-17T08:42:31.757925 | 2020-06-11T03:14:46 | 2020-06-11T03:14:46 | 271,558,840 | 0 | 0 | MIT | 2020-06-11T13:51:28 | 2020-06-11T13:51:28 | null | UTF-8 | Python | false | false | 6,629 | py | # util/__init__.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import defaultdict # noqa
from contextlib import contextmanager # noqa
from functools import partial # noqa
from functools import update_wrapper # noqa
from ._collections import coerce_generator_arg # noqa
from ._collections import collections_abc # noqa
from ._collections import column_dict # noqa
from ._collections import column_set # noqa
from ._collections import EMPTY_SET # noqa
from ._collections import FacadeDict # noqa
from ._collections import flatten_iterator # noqa
from ._collections import has_dupes # noqa
from ._collections import has_intersection # noqa
from ._collections import IdentitySet # noqa
from ._collections import ImmutableContainer # noqa
from ._collections import immutabledict # noqa
from ._collections import ImmutableProperties # noqa
from ._collections import LRUCache # noqa
from ._collections import ordered_column_set # noqa
from ._collections import OrderedDict # noqa
from ._collections import OrderedIdentitySet # noqa
from ._collections import OrderedProperties # noqa
from ._collections import OrderedSet # noqa
from ._collections import PopulateDict # noqa
from ._collections import Properties # noqa
from ._collections import ScopedRegistry # noqa
from ._collections import ThreadLocalRegistry # noqa
from ._collections import to_column_set # noqa
from ._collections import to_list # noqa
from ._collections import to_set # noqa
from ._collections import unique_list # noqa
from ._collections import UniqueAppender # noqa
from ._collections import update_copy # noqa
from ._collections import WeakPopulateDict # noqa
from ._collections import WeakSequence # noqa
from .compat import b # noqa
from .compat import b64decode # noqa
from .compat import b64encode # noqa
from .compat import binary_type # noqa
from .compat import byte_buffer # noqa
from .compat import callable # noqa
from .compat import cmp # noqa
from .compat import cpython # noqa
from .compat import decode_backslashreplace # noqa
from .compat import dottedgetter # noqa
from .compat import has_refcount_gc # noqa
from .compat import inspect_getfullargspec # noqa
from .compat import int_types # noqa
from .compat import iterbytes # noqa
from .compat import itertools_filter # noqa
from .compat import itertools_filterfalse # noqa
from .compat import namedtuple # noqa
from .compat import next # noqa
from .compat import parse_qsl # noqa
from .compat import pickle # noqa
from .compat import print_ # noqa
from .compat import py2k # noqa
from .compat import py36 # noqa
from .compat import py37 # noqa
from .compat import py3k # noqa
from .compat import quote_plus # noqa
from .compat import raise_ # noqa
from .compat import raise_from_cause # noqa
from .compat import reduce # noqa
from .compat import reraise # noqa
from .compat import string_types # noqa
from .compat import StringIO # noqa
from .compat import text_type # noqa
from .compat import threading # noqa
from .compat import timezone # noqa
from .compat import TYPE_CHECKING # noqa
from .compat import u # noqa
from .compat import ue # noqa
from .compat import unquote # noqa
from .compat import unquote_plus # noqa
from .compat import win32 # noqa
from .compat import with_metaclass # noqa
from .compat import zip_longest # noqa
from .deprecations import deprecated # noqa
from .deprecations import deprecated_20 # noqa
from .deprecations import deprecated_20_cls # noqa
from .deprecations import deprecated_cls # noqa
from .deprecations import deprecated_params # noqa
from .deprecations import inject_docstring_text # noqa
from .deprecations import warn_deprecated # noqa
from .deprecations import warn_deprecated_20 # noqa
from .langhelpers import add_parameter_text # noqa
from .langhelpers import as_interface # noqa
from .langhelpers import asbool # noqa
from .langhelpers import asint # noqa
from .langhelpers import assert_arg_type # noqa
from .langhelpers import attrsetter # noqa
from .langhelpers import bool_or_str # noqa
from .langhelpers import chop_traceback # noqa
from .langhelpers import class_hierarchy # noqa
from .langhelpers import classproperty # noqa
from .langhelpers import clsname_as_plain_name # noqa
from .langhelpers import coerce_kw_type # noqa
from .langhelpers import constructor_copy # noqa
from .langhelpers import constructor_key # noqa
from .langhelpers import counter # noqa
from .langhelpers import decode_slice # noqa
from .langhelpers import decorator # noqa
from .langhelpers import dictlike_iteritems # noqa
from .langhelpers import duck_type_collection # noqa
from .langhelpers import ellipses_string # noqa
from .langhelpers import EnsureKWArgType # noqa
from .langhelpers import format_argspec_init # noqa
from .langhelpers import format_argspec_plus # noqa
from .langhelpers import generic_repr # noqa
from .langhelpers import get_callable_argspec # noqa
from .langhelpers import get_cls_kwargs # noqa
from .langhelpers import get_func_kwargs # noqa
from .langhelpers import getargspec_init # noqa
from .langhelpers import HasMemoized # noqa
from .langhelpers import hybridmethod # noqa
from .langhelpers import hybridproperty # noqa
from .langhelpers import iterate_attributes # noqa
from .langhelpers import map_bits # noqa
from .langhelpers import md5_hex # noqa
from .langhelpers import memoized_instancemethod # noqa
from .langhelpers import memoized_property # noqa
from .langhelpers import MemoizedSlots # noqa
from .langhelpers import methods_equivalent # noqa
from .langhelpers import monkeypatch_proxied_specials # noqa
from .langhelpers import NoneType # noqa
from .langhelpers import only_once # noqa
from .langhelpers import PluginLoader # noqa
from .langhelpers import portable_instancemethod # noqa
from .langhelpers import preload_module # noqa
from .langhelpers import preloaded # noqa
from .langhelpers import quoted_token_parser # noqa
from .langhelpers import safe_reraise # noqa
from .langhelpers import set_creation_order # noqa
from .langhelpers import string_or_unprintable # noqa
from .langhelpers import symbol # noqa
from .langhelpers import unbound_method_to_callable # noqa
from .langhelpers import warn # noqa
from .langhelpers import warn_exception # noqa
from .langhelpers import warn_limited # noqa
from .langhelpers import wrap_callable # noqa
SQLALCHEMY_WARN_20 = False
| [
"mike_mp@zzzcomputing.com"
] | mike_mp@zzzcomputing.com |
72d39a6c4a2057758f588c718c98fe591544ee9e | 0cd64f3f67c6a3b130a788906da84ffc3d15396a | /Library/lib/python3.9/site-packages/sympy/stats/frv_types.py | 2baaa93e936929c304e4f1d3e880cd670228af8c | [
"MIT",
"BSD-3-Clause",
"0BSD",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-python-cwi",
"Python-2.0"
] | permissive | Ryorama/codeapp | 32ef44a3e8058da9858924df211bf82f5f5018f1 | cf7f5753c6c4c3431d8209cbaacf5208c3c664fa | refs/heads/main | 2023-06-26T09:24:13.724462 | 2021-07-27T17:54:25 | 2021-07-27T17:54:25 | 388,520,626 | 0 | 0 | MIT | 2021-07-22T16:01:32 | 2021-07-22T16:01:32 | null | UTF-8 | Python | false | false | 22,777 | py | """
Finite Discrete Random Variables - Prebuilt variable types
Contains
========
FiniteRV
DiscreteUniform
Die
Bernoulli
Coin
Binomial
BetaBinomial
Hypergeometric
Rademacher
IdealSoliton
RobustSoliton
"""
from sympy import (S, sympify, Rational, binomial, cacheit, Integer,
Dummy, Eq, Intersection, Interval, log, Range,
Symbol, Lambda, Piecewise, Or, Gt, Lt, Ge, Le, Contains)
from sympy import beta as beta_fn
from sympy.stats.frv import (SingleFiniteDistribution,
SingleFinitePSpace)
from sympy.stats.rv import _value_check, Density, is_random
__all__ = ['FiniteRV',
'DiscreteUniform',
'Die',
'Bernoulli',
'Coin',
'Binomial',
'BetaBinomial',
'Hypergeometric',
'Rademacher',
'IdealSoliton',
'RobustSoliton',
]
def rv(name, cls, *args, **kwargs):
args = list(map(sympify, args))
dist = cls(*args)
if kwargs.pop('check', True):
dist.check(*args)
pspace = SingleFinitePSpace(name, dist)
if any(is_random(arg) for arg in args):
from sympy.stats.compound_rv import CompoundPSpace, CompoundDistribution
pspace = CompoundPSpace(name, CompoundDistribution(dist))
return pspace.value
class FiniteDistributionHandmade(SingleFiniteDistribution):
@property
def dict(self):
return self.args[0]
def pmf(self, x):
x = Symbol('x')
return Lambda(x, Piecewise(*(
[(v, Eq(k, x)) for k, v in self.dict.items()] + [(S.Zero, True)])))
@property
def set(self):
return set(self.dict.keys())
@staticmethod
def check(density):
for p in density.values():
_value_check((p >= 0, p <= 1),
"Probability at a point must be between 0 and 1.")
val = sum(density.values())
_value_check(Eq(val, 1) != S.false, "Total Probability must be 1.")
def FiniteRV(name, density, **kwargs):
r"""
Create a Finite Random Variable given a dict representing the density.
Parameters
==========
name : Symbol
Represents name of the random variable.
density: A dict
Dictionary conatining the pdf of finite distribution
check : bool
If True, it will check whether the given density
integrates to 1 over the given set. If False, it
will not perform this check. Default is False.
Examples
========
>>> from sympy.stats import FiniteRV, P, E
>>> density = {0: .1, 1: .2, 2: .3, 3: .4}
>>> X = FiniteRV('X', density)
>>> E(X)
2.00000000000000
>>> P(X >= 2)
0.700000000000000
Returns
=======
RandomSymbol
"""
# have a default of False while `rv` should have a default of True
kwargs['check'] = kwargs.pop('check', False)
return rv(name, FiniteDistributionHandmade, density, **kwargs)
class DiscreteUniformDistribution(SingleFiniteDistribution):
@staticmethod
def check(*args):
# not using _value_check since there is a
# suggestion for the user
if len(set(args)) != len(args):
from sympy.utilities.iterables import multiset
from sympy.utilities.misc import filldedent
weights = multiset(args)
n = Integer(len(args))
for k in weights:
weights[k] /= n
raise ValueError(filldedent("""
Repeated args detected but set expected. For a
distribution having different weights for each
item use the following:""") + (
'\nS("FiniteRV(%s, %s)")' % ("'X'", weights)))
@property
def p(self):
return Rational(1, len(self.args))
@property # type: ignore
@cacheit
def dict(self):
return {k: self.p for k in self.set}
@property
def set(self):
return set(self.args)
def pmf(self, x):
if x in self.args:
return self.p
else:
return S.Zero
def DiscreteUniform(name, items):
r"""
Create a Finite Random Variable representing a uniform distribution over
the input set.
Parameters
==========
items: list/tuple
Items over which Uniform distribution is to be made
Examples
========
>>> from sympy.stats import DiscreteUniform, density
>>> from sympy import symbols
>>> X = DiscreteUniform('X', symbols('a b c')) # equally likely over a, b, c
>>> density(X).dict
{a: 1/3, b: 1/3, c: 1/3}
>>> Y = DiscreteUniform('Y', list(range(5))) # distribution over a range
>>> density(Y).dict
{0: 1/5, 1: 1/5, 2: 1/5, 3: 1/5, 4: 1/5}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Discrete_uniform_distribution
.. [2] http://mathworld.wolfram.com/DiscreteUniformDistribution.html
"""
return rv(name, DiscreteUniformDistribution, *items)
class DieDistribution(SingleFiniteDistribution):
_argnames = ('sides',)
@staticmethod
def check(sides):
_value_check((sides.is_positive, sides.is_integer),
"number of sides must be a positive integer.")
@property
def is_symbolic(self):
return not self.sides.is_number
@property
def high(self):
return self.sides
@property
def low(self):
return S.One
@property
def set(self):
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(0, self.sides))
return set(map(Integer, list(range(1, self.sides + 1))))
def pmf(self, x):
x = sympify(x)
if not (x.is_number or x.is_Symbol or is_random(x)):
raise ValueError("'x' expected as an argument of type 'number' or 'Symbol' or , "
"'RandomSymbol' not %s" % (type(x)))
cond = Ge(x, 1) & Le(x, self.sides) & Contains(x, S.Integers)
return Piecewise((S.One/self.sides, cond), (S.Zero, True))
def Die(name, sides=6):
r"""
Create a Finite Random Variable representing a fair die.
Parameters
==========
sides: Integer
Represents the number of sides of the Die, by default is 6
Examples
========
>>> from sympy.stats import Die, density
>>> from sympy import Symbol
>>> D6 = Die('D6', 6) # Six sided Die
>>> density(D6).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> D4 = Die('D4', 4) # Four sided Die
>>> density(D4).dict
{1: 1/4, 2: 1/4, 3: 1/4, 4: 1/4}
>>> n = Symbol('n', positive=True, integer=True)
>>> Dn = Die('Dn', n) # n sided Die
>>> density(Dn).dict
Density(DieDistribution(n))
>>> density(Dn).dict.subs(n, 4).doit()
{1: 1/4, 2: 1/4, 3: 1/4, 4: 1/4}
Returns
=======
RandomSymbol
"""
return rv(name, DieDistribution, sides)
class BernoulliDistribution(SingleFiniteDistribution):
_argnames = ('p', 'succ', 'fail')
@staticmethod
def check(p, succ, fail):
_value_check((p >= 0, p <= 1),
"p should be in range [0, 1].")
@property
def set(self):
return {self.succ, self.fail}
def pmf(self, x):
if isinstance(self.succ, Symbol) and isinstance(self.fail, Symbol):
return Piecewise((self.p, x == self.succ),
(1 - self.p, x == self.fail),
(S.Zero, True))
return Piecewise((self.p, Eq(x, self.succ)),
(1 - self.p, Eq(x, self.fail)),
(S.Zero, True))
def Bernoulli(name, p, succ=1, fail=0):
r"""
Create a Finite Random Variable representing a Bernoulli process.
Parameters
==========
p : Rational number between 0 and 1
Represents probability of success
succ : Integer/symbol/string
Represents event of success
fail : Integer/symbol/string
Represents event of failure
Examples
========
>>> from sympy.stats import Bernoulli, density
>>> from sympy import S
>>> X = Bernoulli('X', S(3)/4) # 1-0 Bernoulli variable, probability = 3/4
>>> density(X).dict
{0: 1/4, 1: 3/4}
>>> X = Bernoulli('X', S.Half, 'Heads', 'Tails') # A fair coin toss
>>> density(X).dict
{Heads: 1/2, Tails: 1/2}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Bernoulli_distribution
.. [2] http://mathworld.wolfram.com/BernoulliDistribution.html
"""
return rv(name, BernoulliDistribution, p, succ, fail)
def Coin(name, p=S.Half):
r"""
Create a Finite Random Variable representing a Coin toss.
Parameters
==========
p : Rational Numeber between 0 and 1
Represents probability of getting "Heads", by default is Half
Examples
========
>>> from sympy.stats import Coin, density
>>> from sympy import Rational
>>> C = Coin('C') # A fair coin toss
>>> density(C).dict
{H: 1/2, T: 1/2}
>>> C2 = Coin('C2', Rational(3, 5)) # An unfair coin
>>> density(C2).dict
{H: 3/5, T: 2/5}
Returns
=======
RandomSymbol
See Also
========
sympy.stats.Binomial
References
==========
.. [1] https://en.wikipedia.org/wiki/Coin_flipping
"""
return rv(name, BernoulliDistribution, p, 'H', 'T')
class BinomialDistribution(SingleFiniteDistribution):
_argnames = ('n', 'p', 'succ', 'fail')
@staticmethod
def check(n, p, succ, fail):
_value_check((n.is_integer, n.is_nonnegative),
"'n' must be nonnegative integer.")
_value_check((p <= 1, p >= 0),
"p should be in range [0, 1].")
@property
def high(self):
return self.n
@property
def low(self):
return S.Zero
@property
def is_symbolic(self):
return not self.n.is_number
@property
def set(self):
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(0, self.n))
return set(self.dict.keys())
def pmf(self, x):
n, p = self.n, self.p
x = sympify(x)
if not (x.is_number or x.is_Symbol or is_random(x)):
raise ValueError("'x' expected as an argument of type 'number' or 'Symbol' or , "
"'RandomSymbol' not %s" % (type(x)))
cond = Ge(x, 0) & Le(x, n) & Contains(x, S.Integers)
return Piecewise((binomial(n, x) * p**x * (1 - p)**(n - x), cond), (S.Zero, True))
@property # type: ignore
@cacheit
def dict(self):
if self.is_symbolic:
return Density(self)
return {k*self.succ + (self.n-k)*self.fail: self.pmf(k)
for k in range(0, self.n + 1)}
def Binomial(name, n, p, succ=1, fail=0):
r"""
Create a Finite Random Variable representing a binomial distribution.
Parameters
==========
n : Positive Integer
Represents number of trials
p : Rational Number between 0 and 1
Represents probability of success
succ : Integer/symbol/string
Represents event of success, by default is 1
fail : Integer/symbol/string
Represents event of failure, by default is 0
Examples
========
>>> from sympy.stats import Binomial, density
>>> from sympy import S, Symbol
>>> X = Binomial('X', 4, S.Half) # Four "coin flips"
>>> density(X).dict
{0: 1/16, 1: 1/4, 2: 3/8, 3: 1/4, 4: 1/16}
>>> n = Symbol('n', positive=True, integer=True)
>>> p = Symbol('p', positive=True)
>>> X = Binomial('X', n, S.Half) # n "coin flips"
>>> density(X).dict
Density(BinomialDistribution(n, 1/2, 1, 0))
>>> density(X).dict.subs(n, 4).doit()
{0: 1/16, 1: 1/4, 2: 3/8, 3: 1/4, 4: 1/16}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Binomial_distribution
.. [2] http://mathworld.wolfram.com/BinomialDistribution.html
"""
return rv(name, BinomialDistribution, n, p, succ, fail)
#-------------------------------------------------------------------------------
# Beta-binomial distribution ----------------------------------------------------------
class BetaBinomialDistribution(SingleFiniteDistribution):
_argnames = ('n', 'alpha', 'beta')
@staticmethod
def check(n, alpha, beta):
_value_check((n.is_integer, n.is_nonnegative),
"'n' must be nonnegative integer. n = %s." % str(n))
_value_check((alpha > 0),
"'alpha' must be: alpha > 0 . alpha = %s" % str(alpha))
_value_check((beta > 0),
"'beta' must be: beta > 0 . beta = %s" % str(beta))
@property
def high(self):
return self.n
@property
def low(self):
return S.Zero
@property
def is_symbolic(self):
return not self.n.is_number
@property
def set(self):
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(0, self.n))
return set(map(Integer, list(range(0, self.n + 1))))
def pmf(self, k):
n, a, b = self.n, self.alpha, self.beta
return binomial(n, k) * beta_fn(k + a, n - k + b) / beta_fn(a, b)
def BetaBinomial(name, n, alpha, beta):
r"""
Create a Finite Random Variable representing a Beta-binomial distribution.
Parameters
==========
n : Positive Integer
Represents number of trials
alpha : Real positive number
beta : Real positive number
Examples
========
>>> from sympy.stats import BetaBinomial, density
>>> X = BetaBinomial('X', 2, 1, 1)
>>> density(X).dict
{0: 1/3, 1: 2*beta(2, 2), 2: 1/3}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution
.. [2] http://mathworld.wolfram.com/BetaBinomialDistribution.html
"""
return rv(name, BetaBinomialDistribution, n, alpha, beta)
class HypergeometricDistribution(SingleFiniteDistribution):
_argnames = ('N', 'm', 'n')
@staticmethod
def check(n, N, m):
_value_check((N.is_integer, N.is_nonnegative),
"'N' must be nonnegative integer. N = %s." % str(n))
_value_check((n.is_integer, n.is_nonnegative),
"'n' must be nonnegative integer. n = %s." % str(n))
_value_check((m.is_integer, m.is_nonnegative),
"'m' must be nonnegative integer. m = %s." % str(n))
@property
def is_symbolic(self):
return any(not x.is_number for x in (self.N, self.m, self.n))
@property
def high(self):
return Piecewise((self.n, Lt(self.n, self.m) != False), (self.m, True))
@property
def low(self):
return Piecewise((0, Gt(0, self.n + self.m - self.N) != False), (self.n + self.m - self.N, True))
@property
def set(self):
N, m, n = self.N, self.m, self.n
if self.is_symbolic:
return Intersection(S.Naturals0, Interval(self.low, self.high))
return {i for i in range(max(0, n + m - N), min(n, m) + 1)}
def pmf(self, k):
N, m, n = self.N, self.m, self.n
return S(binomial(m, k) * binomial(N - m, n - k))/binomial(N, n)
def Hypergeometric(name, N, m, n):
r"""
Create a Finite Random Variable representing a hypergeometric distribution.
Parameters
==========
N : Positive Integer
Represents finite population of size N.
m : Positive Integer
Represents number of trials with required feature.
n : Positive Integer
Represents numbers of draws.
Examples
========
>>> from sympy.stats import Hypergeometric, density
>>> X = Hypergeometric('X', 10, 5, 3) # 10 marbles, 5 white (success), 3 draws
>>> density(X).dict
{0: 1/12, 1: 5/12, 2: 5/12, 3: 1/12}
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Hypergeometric_distribution
.. [2] http://mathworld.wolfram.com/HypergeometricDistribution.html
"""
return rv(name, HypergeometricDistribution, N, m, n)
class RademacherDistribution(SingleFiniteDistribution):
@property
def set(self):
return {-1, 1}
@property
def pmf(self):
k = Dummy('k')
return Lambda(k, Piecewise((S.Half, Or(Eq(k, -1), Eq(k, 1))), (S.Zero, True)))
def Rademacher(name):
r"""
Create a Finite Random Variable representing a Rademacher distribution.
Examples
========
>>> from sympy.stats import Rademacher, density
>>> X = Rademacher('X')
>>> density(X).dict
{-1: 1/2, 1: 1/2}
Returns
=======
RandomSymbol
See Also
========
sympy.stats.Bernoulli
References
==========
.. [1] https://en.wikipedia.org/wiki/Rademacher_distribution
"""
return rv(name, RademacherDistribution)
class IdealSolitonDistribution(SingleFiniteDistribution):
_argnames = ('k',)
@staticmethod
def check(k):
_value_check(k.is_integer and k.is_positive,
"'k' must be a positive integer.")
@property
def low(self):
return S.One
@property
def high(self):
return self.k
@property
def set(self):
return set(list(Range(1, self.k+1)))
@property
@cacheit
def dict(self):
if self.k.is_Symbol:
return Density(self)
d = {1: Rational(1, self.k)}
d.update(dict((i, Rational(1, i*(i - 1))) for i in range(2, self.k + 1)))
return d
def pmf(self, x):
x = sympify(x)
if not (x.is_number or x.is_Symbol or is_random(x)):
raise ValueError("'x' expected as an argument of type 'number' or 'Symbol' or , "
"'RandomSymbol' not %s" % (type(x)))
cond1 = Eq(x, 1) & x.is_integer
cond2 = Ge(x, 1) & Le(x, self.k) & x.is_integer
return Piecewise((1/self.k, cond1), (1/(x*(x - 1)), cond2), (S.Zero, True))
def IdealSoliton(name, k):
r"""
Create a Finite Random Variable of Ideal Soliton Distribution
Parameters
==========
k : Positive Integer
Represents the number of input symbols in an LT (Luby Transform) code.
Examples
========
>>> from sympy.stats import IdealSoliton, density, P, E
>>> sol = IdealSoliton('sol', 5)
>>> density(sol).dict
{1: 1/5, 2: 1/2, 3: 1/6, 4: 1/12, 5: 1/20}
>>> density(sol).set
{1, 2, 3, 4, 5}
>>> from sympy import Symbol
>>> k = Symbol('k', positive=True, integer=True)
>>> sol = IdealSoliton('sol', k)
>>> density(sol).dict
Density(IdealSolitonDistribution(k))
>>> density(sol).dict.subs(k, 10).doit()
{1: 1/10, 2: 1/2, 3: 1/6, 4: 1/12, 5: 1/20, 6: 1/30, 7: 1/42, 8: 1/56, 9: 1/72, 10: 1/90}
>>> E(sol.subs(k, 10))
7381/2520
>>> P(sol.subs(k, 4) > 2)
1/4
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Soliton_distribution#Ideal_distribution
.. [2] http://pages.cs.wisc.edu/~suman/courses/740/papers/luby02lt.pdf
"""
return rv(name, IdealSolitonDistribution, k)
class RobustSolitonDistribution(SingleFiniteDistribution):
_argnames= ('k', 'delta', 'c')
@staticmethod
def check(k, delta, c):
_value_check(k.is_integer and k.is_positive,
"'k' must be a positive integer")
_value_check(Gt(delta, 0) and Le(delta, 1),
"'delta' must be a real number in the interval (0,1)")
_value_check(c.is_positive,
"'c' must be a positive real number.")
@property
def R(self):
return self.c * log(self.k/self.delta) * self.k**0.5
@property
def Z(self):
z = 0
for i in Range(1, round(self.k/self.R)):
z += (1/i)
z += log(self.R/self.delta)
return 1 + z * self.R/self.k
@property
def low(self):
return S.One
@property
def high(self):
return self.k
@property
def set(self):
return set(list(Range(1, self.k+1)))
@property
def is_symbolic(self):
return not all([self.k.is_number, self.c.is_number, self.delta.is_number])
def pmf(self, x):
x = sympify(x)
if not (x.is_number or x.is_Symbol or is_random(x)):
raise ValueError("'x' expected as an argument of type 'number' or 'Symbol' or , "
"'RandomSymbol' not %s" % (type(x)))
cond1 = Eq(x, 1) & x.is_integer
cond2 = Ge(x, 1) & Le(x, self.k) & x.is_integer
rho = Piecewise((Rational(1, self.k), cond1), (Rational(1, x*(x-1)), cond2), (S.Zero, True))
cond1 = Ge(x, 1) & Le(x, round(self.k/self.R)-1)
cond2 = Eq(x, round(self.k/self.R))
tau = Piecewise((self.R/(self.k * x), cond1), (self.R * log(self.R/self.delta)/self.k, cond2), (S.Zero, True))
return (rho + tau)/self.Z
def RobustSoliton(name, k, delta, c):
r'''
Create a Finite Random Variable of Robust Soliton Distribution
Parameters
==========
k : Positive Integer
Represents the number of input symbols in an LT (Luby Transform) code.
delta : Positive Rational Number
Represents the failure probability. Must be in the interval (0,1).
c : Positive Rational Number
Constant of proportionality. Values close to 1 are recommended
Examples
========
>>> from sympy.stats import RobustSoliton, density, P, E
>>> robSol = RobustSoliton('robSol', 5, 0.5, 0.01)
>>> density(robSol).dict
{1: 0.204253668152708, 2: 0.490631107897393, 3: 0.165210624506162, 4: 0.0834387731899302, 5: 0.0505633404760675}
>>> density(robSol).set
{1, 2, 3, 4, 5}
>>> from sympy import Symbol
>>> k = Symbol('k', positive=True, integer=True)
>>> c = Symbol('c', positive=True)
>>> robSol = RobustSoliton('robSol', k, 0.5, c)
>>> density(robSol).dict
Density(RobustSolitonDistribution(k, 0.5, c))
>>> density(robSol).dict.subs(k, 10).subs(c, 0.03).doit()
{1: 0.116641095387194, 2: 0.467045731687165, 3: 0.159984123349381, 4: 0.0821431680681869, 5: 0.0505765646770100,
6: 0.0345781523420719, 7: 0.0253132820710503, 8: 0.0194459129233227, 9: 0.0154831166726115, 10: 0.0126733075238887}
>>> E(robSol.subs(k, 10).subs(c, 0.05))
2.91358846104106
>>> P(robSol.subs(k, 4).subs(c, 0.1) > 2)
0.243650614389834
Returns
=======
RandomSymbol
References
==========
.. [1] https://en.wikipedia.org/wiki/Soliton_distribution#Robust_distribution
.. [2] http://www.inference.org.uk/mackay/itprnn/ps/588.596.pdf
.. [3] http://pages.cs.wisc.edu/~suman/courses/740/papers/luby02lt.pdf
'''
return rv(name, RobustSolitonDistribution, k, delta, c)
| [
"ken.chung@thebaselab.com"
] | ken.chung@thebaselab.com |
7bc3752326b5e034c6817753378bc836615d0592 | 6c39a06a2d790da264cabf1a99e9cba87fc6d730 | /vendeur/migrations/0014_vendeur_vend_coockie_hash.py | 8cc7d017252f797f2ac101bcf9650e4b7fabd297 | [] | no_license | lahdirakram/ameera | 0402f7662c562ea56d90241b0912e4065e02dd45 | e55e50cbc2cea491006895fbcf7d841bbad55de2 | refs/heads/master | 2021-06-04T17:24:09.794466 | 2021-06-01T18:41:13 | 2021-06-01T18:41:13 | 150,782,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # Generated by Django 2.1 on 2018-09-12 15:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vendeur', '0013_vendeur_vend_user_name'),
]
operations = [
migrations.AddField(
model_name='vendeur',
name='vend_coockie_hash',
field=models.BigIntegerField(default=0),
),
]
| [
"akramdzbuisness@gmail.com"
] | akramdzbuisness@gmail.com |
294e836c78962487589e0c6b7f5f11d385e78346 | 50840dc4dde4f19f7b9413dc964490b1288db03e | /src/createTree.py | 2c0cbf7bdf19287dabdaf87f48a42053875372b1 | [] | no_license | varunbachalli/RRT | e4194123f08ac98f189dc7bc96f503c059c59d19 | f1688f9193a9edad388895a90ded49b4ab161cee | refs/heads/master | 2022-12-11T04:27:30.064482 | 2020-08-30T07:02:43 | 2020-08-30T07:02:43 | 291,110,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | def create_tree(height, nodes_per_node):
r = 0
for i in range(height):
r += nodes_per_node**i
f = open("tree structure.txt", "w")
for i in range(r):
f.write(f"TreeNode* n{i} = new TreeNode({i}.0,{i}.0);\n")
for i in range(r-1):
f.write(f"n{int((i)/3)}->children.insert(std::pair<TreeNode*")
f.write(f", double>(n{i+1},euclidean_distance(n{i+1}->x, n{i+1}->y, n{int((i)/3)}->x, n{int((i)/3)}->y)));\n")
f.close()
create_tree(3,3) | [
"varun_bachalli@hotmail.com"
] | varun_bachalli@hotmail.com |
a4939c1fd486001c5569097c8d0b69969c4afcca | 06c54acbc3d93601182170eef1c8f69396644003 | /glTools-master/tools/mirrorDeformerWeights.py | fd416031993b524c6ae37273571ed212844d52a9 | [] | no_license | moChen0607/pubTool | bfb05b7ba763c325b871a60d1a690bd67d6ad888 | 16337badb6d1b4266f31008ceb17cfd70fec3623 | refs/heads/master | 2021-05-31T17:59:06.840382 | 2016-06-06T07:11:42 | 2016-06-06T07:11:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,666 | py | import maya.cmds as mc
import maya.OpenMaya as OpenMaya
import maya.OpenMayaAnim as OpenMayaAnim
import glTools.utils.mesh
import glTools.utils.deformer
import glTools.tools.symmetryTable
class UserInputError(Exception): pass
def mirrorWeights(mesh,deformer,axis='x',posToNeg=True,refMesh=''):
'''
Mirror deformer weights
@param mesh: Mesh to mirror weights on
@type mesh: str
@param deformer: Deformer to mirror weights for
@type deformer: str
@param axis: Axis to mirror weights across
@type axis: str
@param posToNeg: Apply weight mirror from positive to negative vertices
@type posToNeg: bool
@param refMesh: Mesh used for symmetry reference
@type refMesh: str
'''
# Check deformers
if not mc.objExists(deformer):
raise UserInputError('Deformer "'+deformer+'" does not exist!!')
# Check refMesh
if not refMesh: refMesh = mesh
# Get symmetry table
axisIndex = {'x':0,'y':1,'z':2}[axis]
sTable = glTools.tools.symmetryTable.SymmetryTable()
symTable = sTable.buildSymTable(refMesh,axisIndex)
# Get current weights
wt = glTools.utils.deformer.getWeights(deformer)
mem = glTools.utils.deformer.getDeformerSetMemberIndices(deformer,mesh)
# Mirror weights
for i in [sTable.negativeIndexList,sTable.positiveIndexList][int(posToNeg)]:
if mem.count(i) and mem.count(symTable[i]):
wt[mem.index(symTable[i])] = wt[mem.index(i)]
# Apply mirrored weights
glTools.utils.deformer.setWeights(deformer,wt,mesh)
def flipWeights(mesh,sourceDeformer,targetDeformer='',axis='x',refMesh=''):
'''
Flip deformer weights
@param mesh: Mesh to flip weights for
@type mesh: str
@param sourceDeformer: Deformer to query weights from
@type sourceDeformer: str
@param targetDeformer: Deformer to apply weights to
@type targetDeformer: str
@param axis: Axis to flip weights across
@type axis: str
@param refMesh: Mesh used for symmetry reference
@type refMesh: str
'''
# Check deformers
if not mc.objExists(sourceDeformer):
raise UserInputError('Source deformer '+sourceDeformer+' does not exist!!')
if targetDeformer and not mc.objExists(targetDeformer):
raise UserInputError('Traget deformer '+targetDeformer+' does not exist!!')
if not targetDeformer:
targetDeformer = sourceDeformer
# Check refMesh
if not refMesh: refMesh = mesh
# Get mesh shape
meshShape = mesh
if mc.objectType(meshShape) == 'transform':
meshShape = mc.listRelatives(mesh,s=True,ni=True)[0]
# Get symmetry table
axisIndex = {'x':0,'y':1,'z':2}[axis]
symTable = glTools.common.symmetryTable.SymmetryTable().buildSymTable(refMesh,axisIndex)
# Get current weights
wt = glTools.utils.deformer.getWeights(sourceDeformer,mesh)
sourceMem = glTools.utils.deformer.getDeformerSetMemberIndices(sourceDeformer,meshShape)
targetMem = glTools.utils.deformer.getDeformerSetMemberIndices(targetDeformer,meshShape)
targetWt = [0.0 for i in range(len(targetMem))]
# Mirror weights
for i in sourceMem:
if targetMem.count(symTable[i]):
try: targetWt[targetMem.index(symTable[i])] = wt[sourceMem.index(i)]
except:
print('Error @: '+str(symTable[i]))
pass
else:
print('Cant find sym index for '+str(i))
# Apply mirrored weights
glTools.utils.deformer.setWeights(targetDeformer,targetWt,mesh)
def copyWeights(sourceMesh,targetMesh,sourceDeformer,targetDeformer):
'''
Copy deformer weights from one mesh to another.
Source and Target mesh objects must have matching point order!
@param sourceMesh: Mesh to copy weights from
@type sourceMesh: str
@param targetMesh: Mesh to copy weights to
@type targetMesh: str
@param sourceDeformer: Deformer to query weights from
@type sourceDeformer: str
@param targetDeformer: Deformer to apply weights to
@type targetDeformer: str
'''
# Check source and target mesh
if not mc.objExists(sourceMesh):
raise UserInputError('Source mesh "'+sourceMesh+'" does not exist!!')
if not mc.objExists(targetMesh):
raise UserInputError('Target mesh "'+targetMesh+'" does not exist!!')
# Check deformers
if not mc.objExists(sourceDeformer):
raise UserInputError('Source deformer "'+sourceDeformer+'" does not exist!!')
if targetDeformer and not mc.objExists(targetDeformer):
raise UserInputError('Target deformer "'+targetDeformer+'" does not exist!!')
if not targetDeformer: targetDeformer = sourceDeformer
# Compare vertex count
if mc.polyEvaluate(sourceMesh,v=True) != mc.polyEvaluate(targetMesh,v=True):
raise UserInputError('Source and Target mesh vertex counts do not match!!')
# Copy weights
wtList = glTools.utils.deformer.getWeights(sourceDeformer,sourceMesh)
# Paste weights
glTools.utils.deformer.setWeights(targetDeformer,wtList,targetMesh)
| [
"auqeyjf@163.com"
] | auqeyjf@163.com |
1fa69557305442cb0b6e51932658fd93a554a32e | 12ee7ca8cf17d6dde32028a6f4259aea73793408 | /writing.py | afd97057b03fdbeffedd3f78edf25d5e57a1960d | [] | no_license | erikrev/py_io | 1ea5f3e9655009e565bd2884fddb1197bd26ddd3 | 491133beea6d64d419d0b94d38c7d2e152606315 | refs/heads/main | 2023-01-04T10:24:28.311475 | 2020-11-06T19:43:18 | 2020-11-06T19:43:18 | 310,313,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | # # cities = ["Adelaide", "Alice Springs", "Darwin", "Melbourne", "Sydney"]
# #
# # with(open("cities.txt", "w")) as city_file:
# # for city in cities:
# # print(city, file=city_file) # stdout pipe
# #
# # cities_empty = []
# #
# # with(open("cities.txt", "r")) as city_file:
# # for city in city_file:
# # cities.append(city.strip("\n"))
# #
# # print(cities)
# # for city in cities:
# # print(city)
#
# with open("binary", "bw") as bin_file:
# bin_file.write(bytes(range(17)))
#
# with open("binary", "br") as binFile:
# for b in binFile:
# print(b)
import pickle
imelda = ("More Mayhem", "Imelda May", "2011", ((1, "Pulling the rug"), (2, "Psycho")))
with open("imelda.pickle", "wb") as pickle_file:
pickle.dump(imelda, pickle_file)
| [
"erik.revaj3@gmail.com"
] | erik.revaj3@gmail.com |
73b93a628abb566067c2eb92e65f7271a0f5927b | 3f309b1dd9774ca1eef2c7bb7626447e6c3dbe70 | /peripheral/can_u2003/config/can.py | 4ed9f1ab5079c421fc350bd305ee791c8a5fc871 | [
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"LicenseRef-scancode-public-domain"
] | permissive | Unitek-KL/csp | 30892ddf1375f5191173cafdfba5f098245a0ff7 | 2ac7ba59465f23959e51d2f16a5712b57b79ef5f | refs/heads/master | 2020-12-10T13:42:26.878408 | 2019-10-14T17:55:22 | 2019-10-14T17:56:20 | 233,609,402 | 0 | 0 | NOASSERTION | 2020-01-13T14:04:51 | 2020-01-13T14:04:51 | null | UTF-8 | Python | false | false | 38,515 | py | # coding: utf-8
"""*****************************************************************************
* Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
global interruptVector
global interruptHandler
global interruptHandlerLock
canElementSizes = ["8 bytes", "12 bytes", "16 bytes", "20 bytes", "24 bytes", "32 bytes", "48 bytes", "64 bytes"]
opModeValues = ["NORMAL", "CAN FD"]
stdFilterList = []
extFilterList = []
# if the mode is changed to FD, then show options for more bytes
def showWhenFD(element, event):
if event["value"] == 'CAN FD':
element.setVisible(True)
else:
element.setVisible(False)
# Rx Buffer Element size
def RxBufferElementSize(element, event):
if ((event["id"] == 'CAN_OPMODE' and event["value"] == 'CAN FD' and Database.getSymbolValue(canInstanceName.getValue().lower(), "RXBUF_USE") == True)
or (event["id"] == 'RXBUF_USE' and event["value"] == True and Database.getSymbolValue(canInstanceName.getValue().lower(), "CAN_OPMODE") == 'CAN FD')):
element.setVisible(True)
element.setReadOnly(False)
else:
element.setVisible(False)
element.setReadOnly(True)
# for FD. Expects keyValue symbol. Use for RX and TX
def adornElementSize(fifo):
fifo.addKey("8 bytes", "0", "8 Bytes")
fifo.addKey("12 bytes", "1", "12 Bytes")
fifo.addKey("16 bytes", "2", "16 Bytes")
fifo.addKey("20 bytes", "3", "20 Bytes")
fifo.addKey("24 bytes", "4", "24 Bytes")
fifo.addKey("32 bytes", "5", "32 Bytes")
fifo.addKey("48 bytes", "6", "48 Bytes")
fifo.addKey("64 bytes", "7", "64 Bytes")
fifo.setDefaultValue(0)
fifo.setOutputMode("Value")
fifo.setDisplayMode("Description")
# if mode is changed to NORMAL then set element size to 8 bytes
def updateElementSize(symbol, event):
if event["value"] == 'CAN FD':
symbol.setVisible(True)
symbol.setReadOnly(False)
else:
symbol.setVisible(False)
symbol.setReadOnly(True)
# for extended and standard filters
def adornFilterType(filterType):
filterType.addKey("Range", "0", "Based on Range")
filterType.addKey("Dual", "1", "Based on Dual ID")
filterType.addKey("Classic", "2", "Based on Classic Mask/Value")
filterType.setOutputMode("Value")
filterType.setDisplayMode("Key")
filterType.setDefaultValue(0)
# for extended and standard filter configurations
def adornFilterConfig(config):
config.addKey("Disabled", "0", "Filter is Disabled")
config.addKey("RXF0", "1", "Store in RX FIFO 0")
config.addKey("RXF1", "2", "Store in RX FIFO 1")
config.addKey("Reject", "3", "Reject")
config.addKey("Priority", "4", "Set priority")
config.addKey("Priority0", "5", "Set priority and store in FIFO 0")
config.addKey("Priority1", "6", "Set priority and store in FIFO 1")
config.addKey("RXBUF", "7", "Store into Rx Buffer")
config.setOutputMode("Value")
config.setDisplayMode("Description")
config.setDefaultValue(1)
def standardFilterRangeCheck(symbol, event):
filterNumber = event["id"].split("_")[2].split("FILTER")[1]
if Database.getSymbolValue(canInstanceName.getValue().lower(),
canInstanceName.getValue() + "_STD_FILTER" + str(filterNumber) + "_TYPE") == 0:
id1 = Database.getSymbolValue(canInstanceName.getValue().lower(),
canInstanceName.getValue() + "_STD_FILTER" + str(filterNumber) + "_SFID1")
id2 = Database.getSymbolValue(canInstanceName.getValue().lower(),
canInstanceName.getValue() + "_STD_FILTER" + str(filterNumber) + "_SFID2")
if ((id1 > id2) and (Database.getSymbolValue(canInstanceName.getValue().lower(),
canInstanceName.getValue() + "_STD_FILTER" + str(filterNumber) + "_CONFIG") != 7)):
symbol.setVisible(True)
else:
symbol.setVisible(False)
else:
symbol.setVisible(False)
def canCreateStdFilter(component, menu, filterNumber):
stdFilter = component.createMenuSymbol(canInstanceName.getValue() + "_STD_FILTER"+ str(filterNumber), menu)
stdFilter.setLabel("Standard Filter " + str(filterNumber))
stdFilterType = component.createKeyValueSetSymbol(canInstanceName.getValue() + "_STD_FILTER" + str(filterNumber) + "_TYPE", stdFilter)
stdFilterType.setLabel("Type")
adornFilterType(stdFilterType)
sfid1 = component.createIntegerSymbol(canInstanceName.getValue() + "_STD_FILTER" + str(filterNumber) + "_SFID1", stdFilter)
sfid1.setLabel("ID1")
sfid1.setMin(0)
sfid1.setMax(2047)
sfid2 = component.createIntegerSymbol(canInstanceName.getValue() + "_STD_FILTER" + str(filterNumber) + "_SFID2", stdFilter)
sfid2.setLabel("ID2")
sfid2.setMin(0)
sfid2.setMax(2047)
stdFilterRangeInvalidSym = component.createCommentSymbol(canInstanceName.getValue() + "_STD_FILTER" + str(filterNumber) + "_COMMENT", stdFilter)
stdFilterRangeInvalidSym.setLabel("Warning!!! " + canInstanceName.getValue() + " Standard Filter " + str(filterNumber) + " ID2 must be greater or equal to ID1")
stdFilterRangeInvalidSym.setVisible(False)
config = component.createKeyValueSetSymbol(canInstanceName.getValue() + "_STD_FILTER" + str(filterNumber) + "_CONFIG", stdFilter)
config.setLabel("Element Configuration")
adornFilterConfig(config)
stdFilterRangeInvalidSym.setDependencies(standardFilterRangeCheck, [canInstanceName.getValue() + "_STD_FILTER" + str(filterNumber) + "_TYPE",
canInstanceName.getValue() + "_STD_FILTER" + str(filterNumber) + "_SFID1",
canInstanceName.getValue() + "_STD_FILTER" + str(filterNumber) + "_SFID2",
canInstanceName.getValue() + "_STD_FILTER" + str(filterNumber) + "_CONFIG"])
stdFilter.setVisible(False)
stdFilter.setEnabled(False)
return stdFilter
def extendedFilterRangeCheck(symbol, event):
filterNumber = event["id"].split("_")[2].split("FILTER")[1]
if Database.getSymbolValue(canInstanceName.getValue().lower(),
canInstanceName.getValue() + "_EXT_FILTER" + str(filterNumber) + "_TYPE") == 0:
id1 = Database.getSymbolValue(canInstanceName.getValue().lower(),
canInstanceName.getValue() + "_EXT_FILTER" + str(filterNumber) + "_EFID1")
id2 = Database.getSymbolValue(canInstanceName.getValue().lower(),
canInstanceName.getValue() + "_EXT_FILTER" + str(filterNumber) + "_EFID2")
if ((id1 > id2) and (Database.getSymbolValue(canInstanceName.getValue().lower(),
canInstanceName.getValue() + "_EXT_FILTER" + str(filterNumber) + "_CONFIG") != 7)):
symbol.setVisible(True)
else:
symbol.setVisible(False)
else:
symbol.setVisible(False)
def canCreateExtFilter(component, menu, filterNumber):
extFilter = component.createMenuSymbol(canInstanceName.getValue() + "_EXT_FILTER" + str(filterNumber), menu)
extFilter.setLabel("Extended Filter " + str(filterNumber))
extFilterType = component.createKeyValueSetSymbol(canInstanceName.getValue() + "_EXT_FILTER" + str(filterNumber) + "_TYPE", extFilter)
extFilterType.setLabel("Type")
adornFilterType(extFilterType)
efid1 = component.createIntegerSymbol(canInstanceName.getValue() + "_EXT_FILTER" + str(filterNumber) + "_EFID1", extFilter)
efid1.setLabel("ID1")
efid1.setMin(0)
efid1.setMax(536870911)
efid2 = component.createIntegerSymbol(canInstanceName.getValue() + "_EXT_FILTER" + str(filterNumber) + "_EFID2", extFilter)
efid2.setLabel("ID2")
efid2.setMin(0)
efid2.setMax(536870911)
extFilterRangeInvalidSym = component.createCommentSymbol(canInstanceName.getValue() + "_EXT_FILTER" + str(filterNumber) + "_COMMENT", extFilter)
extFilterRangeInvalidSym.setLabel("Warning!!! " + canInstanceName.getValue() + " Extended Filter " + str(filterNumber) + " ID2 must be greater or equal to ID1")
extFilterRangeInvalidSym.setVisible(False)
config = component.createKeyValueSetSymbol(canInstanceName.getValue() + "_EXT_FILTER" + str(filterNumber) + "_CONFIG", extFilter)
config.setLabel("Element Configuration")
adornFilterConfig(config)
extFilterRangeInvalidSym.setDependencies(extendedFilterRangeCheck, [canInstanceName.getValue() + "_EXT_FILTER" + str(filterNumber) + "_TYPE",
canInstanceName.getValue() + "_EXT_FILTER" + str(filterNumber) + "_EFID1",
canInstanceName.getValue() + "_EXT_FILTER" + str(filterNumber) + "_EFID2",
canInstanceName.getValue() + "_EXT_FILTER" + str(filterNumber) + "_CONFIG"])
extFilter.setVisible(False)
extFilter.setEnabled(False)
return extFilter
# adjust how many standard filters are shown based on number entered
def adjustStdFilters(filterList, event):
for filter in stdFilterList[:event["value"]]:
if filter.getVisible() != True:
filter.setVisible(True)
filter.setEnabled(True)
for filter in stdFilterList[event["value"]:]:
if filter.getVisible() != False:
filter.setVisible(False)
filter.setEnabled(False)
# adjust how many extended filters are shown based on number entered
def adjustExtFilters(filterList, event):
for filter in extFilterList[:event["value"]]:
if filter.getVisible() != True:
filter.setVisible(True)
filter.setEnabled(True)
for filter in extFilterList[event["value"]:]:
if filter.getVisible() != False:
filter.setVisible(False)
filter.setEnabled(False)
def interruptControl(symbol, event):
if event["value"] == True:
Database.setSymbolValue("core", interruptVector, True, 2)
Database.setSymbolValue("core", interruptHandler, canInstanceName.getValue() + "_InterruptHandler", 2)
Database.setSymbolValue("core", interruptHandlerLock, True, 2)
else:
Database.setSymbolValue("core", interruptVector, False, 2)
Database.setSymbolValue("core", interruptHandler, canInstanceName.getValue() + "_Handler", 2)
Database.setSymbolValue("core", interruptHandlerLock, False, 2)
# Dependency Function to show or hide the warning message depending on Interrupt enable/disable status
def InterruptStatusWarning(symbol, event):
if (Database.getSymbolValue(canInstanceName.getValue().lower(), "INTERRUPT_MODE") == True):
symbol.setVisible(event["value"])
def canCoreClockFreq(symbol, event):
symbol.setValue(int(Database.getSymbolValue("core", canInstanceName.getValue() + "_CLOCK_FREQUENCY")), 2)
def bitTimingCalculation(bitTiming, lowTq, highTq):
clk = Database.getSymbolValue("core", canInstanceName.getValue() + "_CLOCK_FREQUENCY")
if (bitTiming == "Data"):
prescaler = Database.getSymbolValue(canInstanceName.getValue().lower(), "DBTP_DBRP")
bitrate = Database.getSymbolValue(canInstanceName.getValue().lower(), "DATA_BITRATE")
samplePoint = Database.getSymbolValue(canInstanceName.getValue().lower(), "DATA_SAMPLE_POINT")
else:
prescaler = Database.getSymbolValue(canInstanceName.getValue().lower(), "NBTP_NBRP")
bitrate = Database.getSymbolValue(canInstanceName.getValue().lower(), "NOMINAL_BITRATE")
samplePoint = Database.getSymbolValue(canInstanceName.getValue().lower(), "NOMINAL_SAMPLE_POINT")
numOfTimeQuanta = clk / ((bitrate * 1000) * (prescaler + 1))
if (numOfTimeQuanta < lowTq):
canTimeQuantaInvalidSym.setLabel("Warning!!! Number of Time Quanta is too low for required " + bitTiming + " Bit Timing")
canTimeQuantaInvalidSym.setVisible(True)
canCoreClockInvalidSym.setLabel("Warning!!! " + canInstanceName.getValue() + " Clock Frequency is too low for required " + bitTiming + " Bit Timing")
canCoreClockInvalidSym.setVisible(True)
elif (numOfTimeQuanta > highTq):
canTimeQuantaInvalidSym.setLabel("Warning!!! Number of Time Quanta is too high for required " + bitTiming + " Bit Timing")
canTimeQuantaInvalidSym.setVisible(True)
canCoreClockInvalidSym.setLabel("Warning!!! " + canInstanceName.getValue() + " Clock Frequency is too high for required " + bitTiming + " Bit Timing")
canCoreClockInvalidSym.setVisible(True)
else:
canTimeQuantaInvalidSym.setVisible(False)
canCoreClockInvalidSym.setVisible(False)
tseg1 = int((numOfTimeQuanta * samplePoint) / 100.0)
tseg2 = numOfTimeQuanta - tseg1 - 1
tseg1 -= 2
return tseg1, tseg2
def dataBitTimingCalculation(symbol, event):
if (Database.getSymbolValue(canInstanceName.getValue().lower(), "CAN_OPMODE") == "CAN FD"):
tseg1, tseg2 = bitTimingCalculation("Data", 4, 49)
Database.setSymbolValue(canInstanceName.getValue().lower(), "DBTP_DTSEG1", tseg1, 2)
Database.setSymbolValue(canInstanceName.getValue().lower(), "DBTP_DTSEG2", tseg2, 2)
def nominalBitTimingCalculation(symbol, event):
tseg1, tseg2 = bitTimingCalculation("Nominal", 4, 385)
Database.setSymbolValue(canInstanceName.getValue().lower(), "NBTP_NTSEG1", tseg1, 2)
Database.setSymbolValue(canInstanceName.getValue().lower(), "NBTP_NTSEG2", tseg2, 2)
def instantiateComponent(canComponent):
global canInstanceName
global interruptVector
global interruptHandler
global interruptHandlerLock
global interruptVectorUpdate
global canCoreClockInvalidSym
global canTimeQuantaInvalidSym
canInstanceName = canComponent.createStringSymbol("CAN_INSTANCE_NAME", None)
canInstanceName.setVisible(False)
canInstanceName.setDefaultValue(canComponent.getID().upper())
print("Running " + canInstanceName.getValue())
def hideMenu(menu, event):
menu.setVisible(event["value"])
#either the watermark % changed or the number of elements
def RXF0WatermarkUpdate(watermark, event):
watermark_percentage = Database.getSymbolValue(canInstanceName.getValue().lower(), "RXF0_WP")
number_of_elements = Database.getSymbolValue(canInstanceName.getValue().lower(), "RXF0_ELEMENTS")
watermark.setValue(watermark_percentage * number_of_elements / 100, 0)
def RXF1WatermarkUpdate(watermark, event):
watermark_percentage = Database.getSymbolValue(canInstanceName.getValue().lower(), "RXF1_WP")
number_of_elements = Database.getSymbolValue(canInstanceName.getValue().lower(), "RXF1_ELEMENTS")
watermark.setValue(watermark_percentage * number_of_elements / 100, 0)
def TXWatermarkUpdate(watermark, event):
watermark_percentage = Database.getSymbolValue(canInstanceName.getValue().lower(), "TX_FIFO_WP")
number_of_elements = Database.getSymbolValue(canInstanceName.getValue().lower(), "TX_FIFO_ELEMENTS")
watermark.setValue(watermark_percentage * number_of_elements / 100, 0)
# Initialize peripheral clock
Database.setSymbolValue("core", canInstanceName.getValue()+"_CLOCK_ENABLE", True, 1)
# CAN operation mode - default to FD
canOpMode = canComponent.createComboSymbol("CAN_OPMODE", None, opModeValues)
canOpMode.setLabel("CAN Operation Mode")
canOpMode.setDefaultValue("NORMAL")
canInterruptMode = canComponent.createBooleanSymbol("INTERRUPT_MODE", None)
canInterruptMode.setLabel("Interrupt Mode")
canInterruptMode.setDefaultValue(False)
interruptVector = canInstanceName.getValue() + "_INTERRUPT_ENABLE"
interruptHandler = canInstanceName.getValue() + "_INTERRUPT_HANDLER"
interruptHandlerLock = canInstanceName.getValue() + "_INTERRUPT_HANDLER_LOCK"
interruptVectorUpdate = canInstanceName.getValue() + "_INTERRUPT_ENABLE_UPDATE"
# CAN Bit Timing Calculation
canBitTimingCalculationMenu = canComponent.createMenuSymbol("BIT_TIMING_CALCULATION", None)
canBitTimingCalculationMenu.setLabel("Bit Timing Calculation")
canBitTimingCalculationMenu.setDescription("CAN Bit Timing Calculation for Normal and CAN-FD Operation")
canCoreClockValue = canComponent.createIntegerSymbol("CAN_CORE_CLOCK_FREQ", canBitTimingCalculationMenu)
canCoreClockValue.setLabel("Clock Frequency")
canCoreClockValue.setReadOnly(True)
canCoreClockValue.setDefaultValue(int(Database.getSymbolValue("core", canInstanceName.getValue() + "_CLOCK_FREQUENCY")))
canCoreClockValue.setVisible(True)
canCoreClockValue.setDependencies(canCoreClockFreq, ["core." + canInstanceName.getValue() + "_CLOCK_FREQUENCY"])
canCoreClockInvalidSym = canComponent.createCommentSymbol("CAN_CORE_CLOCK_INVALID_COMMENT", None)
canCoreClockInvalidSym.setLabel("Warning!!! " + canInstanceName.getValue() + " Clock Frequency is too low for required Nominal Bit Timing")
canCoreClockInvalidSym.setVisible((canCoreClockValue.getValue() == 0))
canTimeQuantaInvalidSym = canComponent.createCommentSymbol("CAN_TIME_QUANTA_INVALID_COMMENT", None)
canTimeQuantaInvalidSym.setLabel("Warning!!! Number of Time Quanta is too low for required Nominal Bit Timing")
canTimeQuantaInvalidSym.setVisible(False)
# CAN Nominal Bit Timing Calculation
canNominalBitTimingMenu = canComponent.createMenuSymbol("NOMINAL_BIT_TIMING_CALCULATION", canBitTimingCalculationMenu)
canNominalBitTimingMenu.setLabel("Nominal Bit Timing")
canNominalBitTimingMenu.setDescription("This timing must be less or equal to the CAN-FD Data Bit Timing if used")
canNominalBitrate = canComponent.createIntegerSymbol("NOMINAL_BITRATE", canNominalBitTimingMenu)
canNominalBitrate.setLabel("Bit Rate (Kbps)")
canNominalBitrate.setMin(1)
canNominalBitrate.setMax(1000)
canNominalBitrate.setDefaultValue(500)
canNominalBitrate.setDependencies(nominalBitTimingCalculation, ["NOMINAL_BITRATE", "core." + canInstanceName.getValue() + "_CLOCK_FREQUENCY"])
canNominalSamplePoint = canComponent.createFloatSymbol("NOMINAL_SAMPLE_POINT", canNominalBitTimingMenu)
canNominalSamplePoint.setLabel("Sample Point %")
canNominalSamplePoint.setMin(50.0)
canNominalSamplePoint.setMax(100.0)
canNominalSamplePoint.setDefaultValue(75.0)
canNominalSamplePoint.setDependencies(nominalBitTimingCalculation, ["NOMINAL_SAMPLE_POINT"])
NBTPsyncJump = canComponent.createIntegerSymbol("NBTP_NSJW", canNominalBitTimingMenu)
NBTPsyncJump.setLabel("Synchronization Jump Width")
NBTPsyncJump.setMin(0)
NBTPsyncJump.setMax(127)
NBTPsyncJump.setDefaultValue(3)
NBTPprescale = canComponent.createIntegerSymbol("NBTP_NBRP", canNominalBitTimingMenu)
NBTPprescale.setLabel("Bit Rate Prescaler")
NBTPprescale.setMin(0)
NBTPprescale.setMax(511)
NBTPprescale.setDefaultValue(0)
NBTPprescale.setDependencies(nominalBitTimingCalculation, ["NBTP_NBRP"])
tseg1, tseg2 = bitTimingCalculation("Nominal", 4, 385)
NBTPBeforeSP = canComponent.createIntegerSymbol("NBTP_NTSEG1", canNominalBitTimingMenu)
NBTPBeforeSP.setLabel("Time Segment Before Sample Point")
NBTPBeforeSP.setMin(1)
NBTPBeforeSP.setMax(255)
NBTPBeforeSP.setDefaultValue(tseg1)
NBTPBeforeSP.setReadOnly(True)
NBTPAfterSP = canComponent.createIntegerSymbol("NBTP_NTSEG2", canNominalBitTimingMenu)
NBTPAfterSP.setLabel("Time Segment After Sample Point")
NBTPAfterSP.setMin(0)
NBTPAfterSP.setMax(127)
NBTPAfterSP.setDefaultValue(tseg2)
NBTPAfterSP.setReadOnly(True)
# CAN Data Bit Timing Calculation
canDataBitTimingMenu = canComponent.createMenuSymbol("DATA_BIT_TIMING_CALCULATION", canBitTimingCalculationMenu)
canDataBitTimingMenu.setLabel("Data Bit Timing")
canDataBitTimingMenu.setDescription("This timing must be greater or equal to the Nominal Bit Timing")
canDataBitTimingMenu.setVisible(False)
canDataBitTimingMenu.setDependencies(showWhenFD, ["CAN_OPMODE"])
canDataBitrate = canComponent.createIntegerSymbol("DATA_BITRATE", canDataBitTimingMenu)
canDataBitrate.setLabel("Bit Rate (Kbps)")
canDataBitrate.setMin(1)
canDataBitrate.setDefaultValue(500)
canDataBitrate.setDependencies(dataBitTimingCalculation, ["DATA_BITRATE", "CAN_OPMODE", "core." + canInstanceName.getValue() + "_CLOCK_FREQUENCY"])
canDataSamplePoint = canComponent.createFloatSymbol("DATA_SAMPLE_POINT", canDataBitTimingMenu)
canDataSamplePoint.setLabel("Sample Point %")
canDataSamplePoint.setMin(50.0)
canDataSamplePoint.setMax(100.0)
canDataSamplePoint.setDefaultValue(75.0)
canDataSamplePoint.setDependencies(dataBitTimingCalculation, ["DATA_SAMPLE_POINT"])
DBTPsyncJump = canComponent.createIntegerSymbol("DBTP_DSJW", canDataBitTimingMenu)
DBTPsyncJump.setLabel("Synchronization Jump Width")
DBTPsyncJump.setMin(0)
DBTPsyncJump.setDefaultValue(3)
DBTPsyncJump.setMax(7)
DBTPprescale = canComponent.createIntegerSymbol("DBTP_DBRP", canDataBitTimingMenu)
DBTPprescale.setLabel("Bit Rate Prescaler")
DBTPprescale.setMin(0)
DBTPprescale.setMax(31)
DBTPprescale.setDefaultValue(0)
DBTPprescale.setDependencies(dataBitTimingCalculation, ["DBTP_DBRP"])
DBTPBeforeSP = canComponent.createIntegerSymbol("DBTP_DTSEG1", canDataBitTimingMenu)
DBTPBeforeSP.setLabel("Time Segment Before Sample Point")
DBTPBeforeSP.setMin(1)
DBTPBeforeSP.setMax(31)
DBTPBeforeSP.setDefaultValue(10)
DBTPBeforeSP.setReadOnly(True)
DBTPAfterSP = canComponent.createIntegerSymbol("DBTP_DTSEG2", canDataBitTimingMenu)
DBTPAfterSP.setLabel("Time Segment After Sample Point")
DBTPAfterSP.setMin(0)
DBTPAfterSP.setDefaultValue(3)
DBTPAfterSP.setMax(15)
DBTPAfterSP.setReadOnly(True)
# ----- Rx FIFO 0 -----
canRXF0 = canComponent.createBooleanSymbol("RXF0_USE", None)
canRXF0.setLabel("Use RX FIFO 0")
canRXF0.setDefaultValue(True)
canRXF0.setReadOnly(True)
canRXF0Menu = canComponent.createMenuSymbol("rxf0Menu", canRXF0)
canRXF0Menu.setLabel("RX FIFO 0 Settings")
canRXF0Menu.setDependencies(hideMenu, ["RXF0_USE"])
# number of RX FIFO 0 elements
canRXF0Elements = canComponent.createIntegerSymbol("RXF0_ELEMENTS", canRXF0Menu)
canRXF0Elements.setLabel("Number of Elements")
canRXF0Elements.setDefaultValue(1)
canRXF0Elements.setMin(0)
canRXF0Elements.setMax(64)
canRXF0watermarkP = canComponent.createIntegerSymbol("RXF0_WP", canRXF0Menu)
canRXF0watermarkP.setLabel("Watermark %")
canRXF0watermarkP.setDefaultValue(0)
canRXF0watermarkP.setMin(0)
canRXF0watermarkP.setMax(99)
#This is a computed value
canRXF0watermark = canComponent.createIntegerSymbol("RXF0_WATERMARK", canRXF0Menu)
canRXF0watermark.setLabel("Watermark at element")
canRXF0watermark.setDescription("A value of 0 disables watermark")
canRXF0watermark.setDefaultValue(0)
canRXF0watermark.setReadOnly(True)
canRXF0watermark.setDependencies(RXF0WatermarkUpdate, ["RXF0_ELEMENTS", "RXF0_WP"])
canRXF0elementSize = canComponent.createKeyValueSetSymbol("RXF0_BYTES_CFG", canRXF0Menu)
canRXF0elementSize.setLabel("Element Size")
canRXF0elementSize.setVisible(False)
adornElementSize(canRXF0elementSize)
canRXF0elementSize.setDependencies(updateElementSize, ["CAN_OPMODE"])
canRx0overwrite = canComponent.createBooleanSymbol("RXF0_OVERWRITE", canRXF0Menu)
canRx0overwrite.setLabel("Use Overwrite Mode")
canRx0overwrite.setDescription("Overwrite RX FIFO 0 entries without blocking")
canRx0overwrite.setDefaultValue(True)
# ----- Rx FIFO 1 -----
canRXF1 = canComponent.createBooleanSymbol("RXF1_USE", None)
canRXF1.setLabel("Use RX FIFO 1")
canRXF1.setDefaultValue(True)
canRXF1Menu = canComponent.createMenuSymbol("rxf1menu", canRXF1)
canRXF1Menu.setLabel("RX FIFO 1 Settings")
canRXF1Menu.setDependencies(hideMenu, ["RXF1_USE"])
canRXF1Elements = canComponent.createIntegerSymbol("RXF1_ELEMENTS", canRXF1Menu)
canRXF1Elements.setLabel("Number of Elements")
canRXF1Elements.setDefaultValue(1)
canRXF1Elements.setMin(1)
canRXF1Elements.setMax(64)
canRXF1watermarkP = canComponent.createIntegerSymbol("RXF1_WP", canRXF1Menu)
canRXF1watermarkP.setLabel("Watermark %")
canRXF1watermarkP.setDefaultValue(0)
canRXF1watermarkP.setMin(0)
canRXF1watermarkP.setMax(99)
#This is a computed value for watermark
canRX1watermark = canComponent.createIntegerSymbol("RXF1_WATERMARK", canRXF1Menu)
canRX1watermark.setLabel("Watermark at element")
canRX1watermark.setDescription("A value of 0 disables watermark")
canRX1watermark.setDefaultValue(0)
canRX1watermark.setReadOnly(True)
canRX1watermark.setDependencies(RXF1WatermarkUpdate, ["RXF1_ELEMENTS", "RXF1_WP"])
canRXF1elementSize = canComponent.createKeyValueSetSymbol("RXF1_BYTES_CFG", canRXF1Menu)
canRXF1elementSize.setLabel("Element Size")
canRXF1elementSize.setVisible(False)
adornElementSize(canRXF1elementSize)
canRXF1elementSize.setDependencies(updateElementSize, ["CAN_OPMODE"])
canRXF1overwrite = canComponent.createBooleanSymbol("RXF1_OVERWRITE", canRXF1Menu)
canRXF1overwrite.setLabel("Use Overwrite Mode")
canRXF1overwrite.setDescription("Overwrite RX FIFO 1 entries without blocking")
canRXF1overwrite.setDefaultValue(True)
# ----- Rx Buffer -----
canRXBuf = canComponent.createBooleanSymbol("RXBUF_USE", None)
canRXBuf.setLabel("Use Dedicated Rx Buffer")
canRXBuf.setDefaultValue(False)
canRXBufElements = canComponent.createIntegerSymbol("RX_BUFFER_ELEMENTS", canRXBuf)
canRXBufElements.setLabel("Number of Elements")
canRXBufElements.setDefaultValue(1)
canRXBufElements.setMin(1)
canRXBufElements.setMax(64)
canRXBufElements.setVisible(False)
canRXBufElements.setDependencies(hideMenu, ["RXBUF_USE"])
canRXBufelementSize = canComponent.createKeyValueSetSymbol("RX_BUFFER_BYTES_CFG", canRXBuf)
canRXBufelementSize.setLabel("Element Size")
canRXBufelementSize.setVisible(False)
adornElementSize(canRXBufelementSize)
canRXBufelementSize.setDependencies(RxBufferElementSize, ["CAN_OPMODE", "RXBUF_USE"])
# ------ T X --------------
# ----- Tx FIFO -----
canTX = canComponent.createBooleanSymbol("TX_USE", None)
canTX.setLabel("Use TX FIFO")
canTX.setDefaultValue(True)
canTX.setReadOnly(True)
# make a menu separate for TX so it can be turned off and on at one point
canTXmenu = canComponent.createMenuSymbol("cantx", canTX)
canTXmenu.setLabel("TX FIFO Settings")
canTXmenu.setDependencies(hideMenu, ["TX_USE"])
# number of TX FIFO elements
canTXnumElements = canComponent.createIntegerSymbol("TX_FIFO_ELEMENTS", canTXmenu)
canTXnumElements.setLabel("Number of Elements")
canTXnumElements.setDefaultValue(1)
canTXnumElements.setMin(1)
canTXnumElements.setMax(32)
canTXwatermarkP = canComponent.createIntegerSymbol("TX_FIFO_WP", canTXmenu)
canTXwatermarkP.setLabel("Watermark %")
canTXwatermarkP.setDefaultValue(0)
canTXwatermarkP.setMin(0)
canTXwatermarkP.setMax(99)
#This is a computed value for watermark
canTXwatermark = canComponent.createIntegerSymbol("TX_FIFO_WATERMARK", canTXmenu)
canTXwatermark.setLabel("Watermark at element")
canTXwatermark.setDescription("A value of 0 disables watermark")
canTXwatermark.setDefaultValue(0)
canTXwatermark.setReadOnly(True)
canTXwatermark.setDependencies(TXWatermarkUpdate, ["TX_FIFO_ELEMENTS", "TX_FIFO_WP"])
canTXElementCfg = canComponent.createKeyValueSetSymbol("TX_FIFO_BYTES_CFG", canTXmenu)
canTXElementCfg.setLabel("Element Size")
adornElementSize(canTXElementCfg)
canTXElementCfg.setVisible(False)
canTXElementCfg.setDependencies(updateElementSize, ["CAN_OPMODE"])
canTXpause = canComponent.createBooleanSymbol("TX_PAUSE", None)
canTXpause.setLabel("Enable TX Pause")
canTXpause.setDescription("Pause 2 CAN bit times between transmissions")
canTXpause.setDefaultValue(False)
# ----- Tx Buffer -----
canTXBuf = canComponent.createBooleanSymbol("TXBUF_USE", None)
canTXBuf.setLabel("Use Dedicated Tx Buffer")
canTXBuf.setDefaultValue(False)
# number of TX buffer elements
canTXBufElements = canComponent.createIntegerSymbol("TX_BUFFER_ELEMENTS", canTXBuf)
canTXBufElements.setLabel("Number of TX Buffer Elements")
canTXBufElements.setDefaultValue(1)
canTXBufElements.setMin(1)
canTXBufElements.setMax(32)
canTXBufElements.setVisible(False)
canTXBufElements.setDependencies(hideMenu, ["TXBUF_USE"])
# up to 128 standard filters
canStdFilterMenu = canComponent.createMenuSymbol("stdFilterMenu", None)
canStdFilterMenu.setLabel("Standard Filters (up to 128)")
canStdFilterMenu.setDependencies(adjustStdFilters, ["FILTERS_STD"])
canStdFilterNumber = canComponent.createIntegerSymbol("FILTERS_STD", canStdFilterMenu)
canStdFilterNumber.setLabel("Number of Standard Filters:")
canStdFilterNumber.setDefaultValue(0)
canStdFilterNumber.setMin(0)
canStdFilterNumber.setMax(128)
#Create all of the standard filters in a disabled state
for filter in range (128):
stdFilterList.append(canCreateStdFilter(canComponent, canStdFilterMenu, filter + 1))
#What to do when a NO-MATCH is detected on a standard packet
canNoMatchStandard = canComponent.createKeyValueSetSymbol("FILTERS_STD_NOMATCH", None)
canNoMatchStandard.setLabel("Standard message No-Match disposition:")
canNoMatchStandard.addKey("CAN_GFC_ANFS_RXF0", "0", "Move to RX FIFO 0")
canNoMatchStandard.addKey("CAN_GFC_ANFS_RXF1", "1", "Move to RX FIFO 1")
canNoMatchStandard.addKey("CAN_GFC_ANFS_REJECT", "2", "Reject")
canNoMatchStandard.setOutputMode("Key")
canNoMatchStandard.setDisplayMode("Description")
canNoMatchStandard.setDefaultValue(2)
# Reject all standard IDs?
canStdReject = canComponent.createBooleanSymbol("FILTERS_STD_REJECT", None)
canStdReject.setLabel("Reject Standard Remote Frames")
canStdReject.setDescription("Reject all remote frames with 11-bit standard IDs")
canStdReject.setDefaultValue(False)
# 64 extended filters
canExtFilterMenu = canComponent.createMenuSymbol("extFilterMenu", None)
canExtFilterMenu.setLabel("Extended Filters (up to 64)")
canExtFilterMenu.setDependencies(adjustExtFilters, ["FILTERS_EXT"])
#How many extended filters
canExtFilterNumber = canComponent.createIntegerSymbol("FILTERS_EXT", canExtFilterMenu)
canExtFilterNumber.setLabel("Number of Extended Filters:")
canExtFilterNumber.setDefaultValue(0)
canExtFilterNumber.setMin(0)
canExtFilterNumber.setMax(64)
#Create all of the 64 extended filters in a disabled state
for filter in range (64):
extFilterList.append(canCreateExtFilter(canComponent, canExtFilterMenu, filter + 1))
#What to do when a NO-MATCH is detected on an extended message
canNoMatchExtended = canComponent.createKeyValueSetSymbol("FILTERS_EXT_NOMATCH", None)
canNoMatchExtended.setLabel("Extended message No-Match disposition:")
canNoMatchExtended.addKey("CAN_GFC_ANFE_RXF0", "0", "Move to RX FIFO 0")
canNoMatchExtended.addKey("CAN_GFC_ANFE_RXF1", "1", "Move to RX FIFO 1")
canNoMatchExtended.addKey("CAN_GFC_ANFE_REJECT", "2", "Reject")
canNoMatchExtended.setOutputMode("Key")
canNoMatchExtended.setDisplayMode("Description")
canNoMatchExtended.setDefaultValue(2)
# Reject all extended IDs?
canExtReject = canComponent.createBooleanSymbol("FILTERS_EXT_REJECT", None)
canExtReject.setLabel("Reject Extended Remote Frames")
canExtReject.setDescription("Reject all remote frames with 29-bit extended IDs")
canExtReject.setDefaultValue(False)
#use timeout?
canUseTimeout = canComponent.createBooleanSymbol("CAN_TIMEOUT", None)
canUseTimeout.setLabel("Use Timeout Counter")
canUseTimeout.setDescription("Enables Timeout Counter")
canUseTimeout.setDefaultValue(False)
#timout count
canTimeoutCount = canComponent.createIntegerSymbol("TIMEOUT_COUNT", canUseTimeout)
canTimeoutCount.setDependencies(hideMenu, ["CAN_TIMEOUT"])
canTimeoutCount.setLabel("Timeout Countdown from: ")
canTimeoutCount.setDefaultValue(40000)
canTimeoutCount.setMin(10)
canTimeoutCount.setMax(65535)
canTimeoutCount.setVisible(False)
canTimeoutCount.setDependencies(hideMenu, ["CAN_TIMEOUT"])
#timeout mode
canTimeoutMode = canComponent.createKeyValueSetSymbol("TIMEOUT_SELECT", canUseTimeout)
canTimeoutMode.setLabel("Timeout mode:")
canTimeoutMode.addKey("CAN_TOCC_TOS_CONT", "0", "CONTINUOUS")
canTimeoutMode.addKey("CAN_TOCC_TOS_TXEF", "1", "TX EVENT")
canTimeoutMode.addKey("CAN_TOCC_TOS_RXF0", "2", "RX0 EVENT")
canTimeoutMode.addKey("CAN_TOCC_TOS_RXF1", "3", "RX1 EVENT")
canTimeoutMode.setOutputMode("Key")
canTimeoutMode.setDisplayMode("Description")
canTimeoutMode.setVisible(False)
canTimeoutMode.setDependencies(hideMenu, ["CAN_TIMEOUT"])
canTimeoutMode.setDefaultValue(1)
#timestamp Modes
canTimestampMode = canComponent.createKeyValueSetSymbol("TIMESTAMP_MODE", None)
canTimestampMode.setLabel("Timestamp mode")
canTimestampMode.setDescription("EXT TIMESTAMP: external counter (needed for FD). ZERO: timestamp is always 0x0000. TCP INC: incremented according to TCP.")
canTimestampMode.addKey("CAN_TSCC_TSS_ZERO", "0", "ZERO")
canTimestampMode.addKey("CAN_TSCC_TSS_INC", "1", "TCP INC")
canTimestampMode.addKey("CAN_TSCC_TSS_EXT", "2", "EXT TIMESTAMP")
canTimestampMode.setOutputMode("Key")
canTimestampMode.setDisplayMode("Description")
canTimestampMode.setDefaultValue(1)
#timestamp/timeout Counter Prescaler
canTCP = canComponent.createIntegerSymbol("TIMESTAMP_PRESCALER", None)
canTCP.setLabel("Timestamp/Timeout Counter Prescaler (TCP):")
canTCP.setDescription("Configures Timestamp & Timeout counter prescaler in multiples of CAN bit times.")
canTCP.setDefaultValue(0)
canTCP.setMin(0)
canTCP.setMax(15)
# Interrupt Dynamic settings
caninterruptControl = canComponent.createBooleanSymbol("CAN_INTERRUPT_ENABLE", None)
caninterruptControl.setVisible(False)
caninterruptControl.setDependencies(interruptControl, ["INTERRUPT_MODE"])
# Dependency Status for interrupt
canIntEnComment = canComponent.createCommentSymbol("CAN_INTERRUPT_ENABLE_COMMENT", None)
canIntEnComment.setVisible(False)
canIntEnComment.setLabel("Warning!!! " + canInstanceName.getValue() + " Interrupt is Disabled in Interrupt Manager")
canIntEnComment.setDependencies(InterruptStatusWarning, ["core." + interruptVectorUpdate])
REG_MODULE_CAN = Register.getRegisterModule("CAN")
configName = Variables.get("__CONFIGURATION_NAME")
#Master Header
canMasterHeaderFile = canComponent.createFileSymbol("headerFile", None)
canMasterHeaderFile.setSourcePath("../peripheral/can_u2003/templates/plib_can_common.h")
canMasterHeaderFile.setOutputName("plib_can_common.h")
canMasterHeaderFile.setDestPath("/peripheral/can/")
canMasterHeaderFile.setProjectPath("config/" + configName + "/peripheral/can/")
canMasterHeaderFile.setType("HEADER")
#Instance Source File
canMainSourceFile = canComponent.createFileSymbol("sourceFile", None)
canMainSourceFile.setSourcePath("../peripheral/can_u2003/templates/plib_can.c.ftl")
canMainSourceFile.setOutputName("plib_"+canInstanceName.getValue().lower()+".c")
canMainSourceFile.setDestPath("/peripheral/can/")
canMainSourceFile.setProjectPath("config/" + configName + "/peripheral/can/")
canMainSourceFile.setType("SOURCE")
canMainSourceFile.setMarkup(True)
#Instance Header File
canInstHeaderFile = canComponent.createFileSymbol("instHeaderFile", None)
canInstHeaderFile.setSourcePath("../peripheral/can_u2003/templates/plib_can.h.ftl")
canInstHeaderFile.setOutputName("plib_"+canInstanceName.getValue().lower()+".h")
canInstHeaderFile.setDestPath("/peripheral/can/")
canInstHeaderFile.setProjectPath("config/" + configName + "/peripheral/can/")
canInstHeaderFile.setType("HEADER")
canInstHeaderFile.setMarkup(True)
#CAN Initialize
canSystemInitFile = canComponent.createFileSymbol("initFile", None)
canSystemInitFile.setType("STRING")
canSystemInitFile.setOutputName("core.LIST_SYSTEM_INIT_C_SYS_INITIALIZE_PERIPHERALS")
canSystemInitFile.setSourcePath("../peripheral/can_u2003/templates/system/initialization.c.ftl")
canSystemInitFile.setMarkup(True)
#CAN definitions header
canSystemDefFile = canComponent.createFileSymbol("defFile", None)
canSystemDefFile.setType("STRING")
canSystemDefFile.setOutputName("core.LIST_SYSTEM_DEFINITIONS_H_INCLUDES")
canSystemDefFile.setSourcePath("../peripheral/can_u2003/templates/system/definitions.h.ftl")
canSystemDefFile.setMarkup(True)
| [
"http://support.microchip.com"
] | http://support.microchip.com |
5cd23633f5a6da1552eac596420b46aba1fc928d | 611d7bda1ca1415088a7d9fc29809b5885381bfb | /ios_code_generator/models/core.py | f98ace2b1c01d307023935dbca2c0c2a2aa49c19 | [
"MIT"
] | permissive | codetalks-new/iOSCodeGenerator | 5682c19c530e4bdaebe7187d93ce2fe10dd1146c | bb07930660f87378040b8783d203a6a2637b289e | refs/heads/master | 2021-09-10T10:58:59.951935 | 2018-03-25T01:31:53 | 2018-03-25T01:31:53 | 77,132,533 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,323 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
from abc import ABCMeta, abstractmethod, abstractproperty
from ios_code_generator._compat import implements_to_string
from ios_code_generator._internal import _DictAccessorProperty
from ios_code_generator.parser import ModelParserMixin
from ios_code_generator.utils import cached_property, to_camel_case, to_mixed_case
"""
速写代码
主要通过 Model-Field 来构造生成代码。
Model 控制代码的主要结构, Field 控制一些具体项的生成细节。
最开始主要是为了通过生成 MVC 中的数据模型代码。
"""
__author__ = 'banxi'
@implements_to_string
class Field(object):
__metaclass__ = ABCMeta
def __init__(self, name, ftype, attrs=None,**kwargs):
self.name = name
self.ftype = ftype
if attrs:
if isinstance(attrs, collections.Mapping):
self.attrs = dict(attrs)
else:
self.attrs = {item.ctype: item for item in attrs}
else:
self.attrs = {}
if not hasattr(self, 'model'):
self.model = None
def __str__(self):
return u"%s:%s" % (self.name, self.ftype)
@cached_property
def field_name(self):
name = self.name
if self.name.endswith('?'):
name = self.name[:-1]
return to_mixed_case(name)
@cached_property
def camel_name(self):
return to_camel_case(self.name)
@cached_property
def mixed_name(self):
return to_mixed_case(self.name)
@cached_property
def setter_func_name(self):
if self.name.endswith('?'):
return self.name[:-1]
return self.name
class model_property(_DictAccessorProperty): # noqa
"""
用于 Model 类的属性 Descriptor,依赖 `model_config` 字段。
用于读取属性值和提供默认值。
"""
def lookup(self, obj):
return obj.model_config
class model_bool_property(object):
"""
用于 Model 类的属性 Descriptor,依赖 `model_config` 字段。
直接通过提供开关名作为指令标志。如 `eq` 表示 开关值为真, `eq=false` 表示开关值为假
"""
def __init__(self, name_or_names, default = False):
"""
:param name_or_names: 单个名称或名称列表,列表主要是为了支持缩写
:param default: 开关默认值 False
"""
self.name_or_names = name_or_names
self.default = default
def __get__(self, obj, type=None):
if obj is None:
return self
config_value = self.default
false_literals = ['false','no']
if isinstance(self.name_or_names, (list, tuple)):
for name in self.name_or_names:
if name in obj.model_config:
config_value = obj.model_config[name] not in false_literals
else:
if self.name_or_names in obj.model_config:
config_value = obj.model_config[self.name_or_names] not in false_literals
return config_value
class target_property(_DictAccessorProperty): # noqa
def lookup(self, obj):
return obj.target_config
@implements_to_string
class Model(ModelParserMixin, object):
__metaclass__ = ABCMeta
target = None # 输出目标 如 data_model, ui_model enum 等
platform = "ios" # 输出目标平台
lang = "swift" # 输出目标语言
field_class = Field # 对应的字段类
template = None # 默认模板路径
prefix = model_property("prefix", default="") # 前缀
model_name = model_property('m', default='T') # 模型名,
FRAGMENT_NAME = '_FRAGMENT_'
def __init__(self, name, mtype='', config_items=None, fields=None):
config_items = config_items or []
self.name = name
self.mtype = mtype
self.model_config = dict((item.ctype, item.value) for item in config_items)
self.fields = fields
self.target_config = dict()
@property
def is_fragment(self):
return self.name == '_FRAGMENT_'
def __str__(self):
return u"%s:%s" % (self.name, self.mtype)
@cached_property
def field_names(self):
return [field.name for field in self.fields]
def has_attr(self, attr):
return attr in self.model_config
@cached_property
def camel_name(self):
return to_camel_case(self.name)
@cached_property
def mixed_name(self):
return to_mixed_case(self.name)
@property
def mixed_model_name(self):
return to_mixed_case(self.model_name)
@property
def class_name(self):
return self.camel_name
@classmethod
def template_path(cls):
"""
要生成的代码模板的路径,如果 `template` 字段不为空,则返回 `template`
否则按规则拼接对应的路径。
:return: 模板路径
"""
if cls.template:
return cls.template
else:
return "%s/%s/%s_tpl.html" % (cls.platform, cls.target, cls.lang)
def template_context(self):
"""
默认的上下文包括,`model`, 和 `fields`
:return: 模板渲染的上下文
"""
return dict(
model = self,
fields = self.fields,
)
| [
"banxi1988@gmail.com"
] | banxi1988@gmail.com |
d4d369ad725ea386900621ccf02451db45190035 | d11dea659a9fe0ed5eb08ff4635d4fcedcd4695b | /upwork_crawler/upwork_crawler/middlewares.py | 6bd5e9705d305a8c6369c7d680e9075a85e563b5 | [] | no_license | pauljherrera/upwork_scraper | e06252287af3e41ffc3b57e81771de3ed4b8cc3d | d408456a5168c09dfd9093592b4423194d31bdec | refs/heads/master | 2022-06-20T05:31:42.438662 | 2022-05-27T12:27:22 | 2022-05-27T12:27:22 | 91,928,869 | 5 | 2 | null | 2022-05-27T12:27:23 | 2017-05-21T02:42:04 | Python | UTF-8 | Python | false | false | 1,911 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class UpworkCrawlerSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"pauljherrera@gmail.com"
] | pauljherrera@gmail.com |
4f53383d0322a34abedf894eb70e82b815eb2806 | 2d8860dc684624da7188370aebda1c41b66c4641 | /models/__init__.py | b553cecdee4c28ad752fd5d733af5a086c0b6cff | [] | no_license | mroll/rate-predictr | cfa30ac2a46fd21078808f7c8f2ad00aed6068b1 | 56c4df1432435c801141f65c4776cb098804d3ab | refs/heads/master | 2020-03-07T07:39:36.315516 | 2018-03-30T19:45:40 | 2018-03-30T19:45:40 | 127,355,228 | 0 | 0 | null | 2020-11-23T20:03:44 | 2018-03-29T22:49:29 | Python | UTF-8 | Python | false | false | 94 | py | from .cost import Cost
from .cost_estimate import CostEstimate
from .location import Location
| [
"mpr56@drexel.edu"
] | mpr56@drexel.edu |
3045511d732f596bd3f6b2f73d8806af77c5a8af | 70dbf1067be57e85005d172ea09bc0ab04986669 | /basicweb/pytestpackage/testRunOrderDemo.py | 8147a081fc1c4c8b03eb45d21abd5282e4e21ea0 | [] | no_license | Chi10ya/UDEMY_SeleniumWithPython | 6e97a65810cd674cc32e9377e91a718214b88730 | 95e3ad4441eef6f243e24efad168b349b25880f6 | refs/heads/master | 2022-10-05T21:00:04.302237 | 2020-06-10T08:12:52 | 2020-06-10T08:12:52 | 271,218,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | import pytest
@pytest.mark.run(order=2)
def test_conftestdemo1_methodA(oneTimeSetUp, setUp):
print("Running test run demo 1 method A")
@pytest.mark.run(order=4)
def test_conftestdemo1_methodB(oneTimeSetUp, setUp):
print("Running test run demo 1 method B")
@pytest.mark.run(order=1)
def test_conftestdemo1_methodC(oneTimeSetUp, setUp):
print("Running test run demo 1 method C")
@pytest.mark.run(order=3)
def test_conftestdemo1_methodD(oneTimeSetUp, setUp):
print("Running test run demo 1 method D") | [
"mchintuk@yahoo.com"
] | mchintuk@yahoo.com |
abc855529ce069a7208dd306d3988daf851774db | 2cfeb115b0ea14c52c3bf99abb53e935fa3d01b7 | /examples/vanilla/settings_quickstart.py | a8a2eec01f21006986c5b8f512f375b6eaf87a00 | [
"BSD-2-Clause"
] | permissive | aykut/django-oscar | 796fbc2f62d3dd7877833610f7bead2b006b9739 | ca3629e74ea1e0affc55d3de4e97f523e352d267 | refs/heads/master | 2021-01-22T07:27:59.359441 | 2011-06-30T19:36:01 | 2011-06-30T19:36:01 | 14,263,668 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,527 | py | import os
PROJECT_DIR = os.path.dirname(__file__)
location = lambda x: os.path.join(os.path.dirname(os.path.realpath(__file__)), x)
DEBUG = True
TEMPLATE_DEBUG = True
SQL_DEBUG = True
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/oscar_vanilla',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = location("assets")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$)a7n&o80u!6y5t-+jrd3)3!%vh&shg$wqpjpxc!ar&p#!)n1a'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
# Oscar specific
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.promotions.context_processors.merchandising_blocks',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
# Oscar specific
'oscar.apps.basket.middleware.BasketMiddleware'
)
INTERNAL_IPS = ('127.0.0.1',)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'verbose'
},
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '/tmp/oscar.log',
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
},
},
'loggers': {
'django': {
'handlers':['null'],
'propagate': True,
'level':'INFO',
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'oscar.checkout': {
'handlers': ['console'],
'propagate': True,
'level':'INFO',
},
'django.db.backends': {
'handlers':['null'],
'propagate': False,
'level':'DEBUG',
},
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.flatpages',
# External dependencies
'haystack',
'sorl.thumbnail',
# Apps from oscar
'oscar',
'oscar.apps.analytics',
'oscar.apps.discount',
'oscar.apps.order',
'oscar.apps.checkout',
'oscar.apps.shipping',
'oscar.apps.order_management',
'oscar.apps.product',
'oscar.apps.basket',
'oscar.apps.payment',
'oscar.apps.offer',
'oscar.apps.address',
'oscar.apps.partner',
'oscar.apps.image',
'oscar.apps.customer',
'oscar.apps.promotions',
'oscar.apps.reports',
'oscar.apps.search',
'oscar.apps.product.reviews',
'oscar.apps.payment.datacash',
)
LOGIN_REDIRECT_URL = '/accounts/profile/'
APPEND_SLASH = True
# Oscar settings
from oscar.defaults import *
# Haystack settings
HAYSTACK_SITECONF = 'oscar.search_sites'
HAYSTACK_SEARCH_ENGINE = 'dummy'
| [
"david.winterbottom@gmail.com"
] | david.winterbottom@gmail.com |
4ca1a63aba6d81d8131ea2d1874236b45ee14bb9 | 283f85409e4aa92444fc865c802d2babd8629f88 | /app/errors/__init__.py | fab5ff4aceb9732c788992026b7de33c99e5c66b | [
"MIT"
] | permissive | tomwright01/EyeReport | df52a77b3cc6396ba51721421cc5616649286c8b | ab227190e7efe9af18125d175efd271ee11dbff4 | refs/heads/master | 2021-05-16T04:30:05.374448 | 2019-08-08T15:41:15 | 2019-08-08T15:41:15 | 106,033,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | # -*- coding: utf-8 -*-
from flask import Blueprint
bp = Blueprint('errors', __name__,
template_folder='templates')
from app.errors import handlers | [
"tom@maladmin.com"
] | tom@maladmin.com |
5c067420616477628ce259022088493729d1fc07 | 11f4ab6dffdf6341f99b79add2c942f3d4194991 | /PYTHON Training/day5/practice/grade2.py | 05ddf9ee5e8a7768ac5d25d42803d5ad6e354e8a | [] | no_license | Priyankakore21/Dailywork | ba93c95399058037218fd74bd2161213b7c73c67 | a5435882bca1517ab3c0bb6978d2fb5a083b2af3 | refs/heads/master | 2020-04-23T21:19:51.995174 | 2019-03-27T06:47:53 | 2019-03-27T06:47:53 | 161,127,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | mark = float(input('enter obtained marks'))
if mark >= 80:
grade='A'
elif mark>=65:
grade='B'
elif mark>=50
grade='C'
print(grade)
| [
"priyanka.kore@synerzip.com"
] | priyanka.kore@synerzip.com |
ceffbbae1dec4b55eeccc71251830bea92cf83ef | 670197b9861fa0097fb957bce6cfeacc0146914c | /kung_ke.py | 7725254ad7a08214b5e664af36b8b642f85ebcf6 | [] | no_license | WildButcher/kong1g | c11aeba145de6e00f68d0f539d54aa422b6ed60c | 57875df13a88d0860e7cce4b8e2a7da72e1d3b9e | refs/heads/master | 2020-04-03T08:39:34.069355 | 2018-10-29T02:34:11 | 2018-10-29T02:34:11 | 155,140,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,847 | py | # _*_ coding:utf8 _*_
import threading
import urllib2
import math
import time
import json
from pyquery import PyQuery
class ConfigureHelper:
""" 爬取不同网站的配置辅助类
在爬取网站数据的时候可以根据每个网站的布局和CSS不同添加属于这个网站自己的爬取配置
配置文件的格式如下:
{
'configure_key':_configure_name_key,
'configure_content':
{
'book_url':_book_url,
'pre_fix_url':_pre_fix_url,
......
},
'configure_docs':_configure_name_docs
}
"""
_configure_file = '' # 整个配置文件
_update_configure_url = '' # 更新地址
_configure_name_key = '' # 配置文件中每一个网站对应的唯一key,一般为网站域名
_configure_name_docs = '' # 每一个网站对应的配置说明
_configure_content = '' # 每一个网站对应配置内容
_pre_fix_url = '' # 下载地址前缀,通常后面跟目录中过去的相对地址构成完整的访问地址
_book_name_tag = '' # 页面中小说【名称】的DOM元素标记,可以是id、class和html标记等
_book_query_tag = '' # 页面中小说【目录】的DOM元素标记,可以是id、class和html标记等
_book_content_tag = '' # 页面中小说【内容】的DOM元素标记,可以是id、class和html标记等
def __init__(self):
""" 初始化配置文件 kung_ke.json如果不存在配置文件则自动创建一个
配置文件一次性加载入内存方便程序运行时候调用.kung_ke.ini存放能够更新kung_ke.json的网址
"""
try:
f = open('kung_ke.ini','r')
self.set_update_configure_url(f.readline())
fp = open('kung_ke.json', 'r')
self.set_configure_file(json.load(fp))
if len(self.get_configure_file()) == 0:
self.update_configure_file()
except Exception, e:
print e,' Loads json file failed!'
finally:
fp.close()
f.close()
pass
def get_configure_content(self):
return self.__configure_content
def set_configure_content(self, value):
self.__configure_content = value
def get_configure_file(self):
return self.__configure_file
def set_configure_file(self, value):
self.__configure_file = value
def get_configure_name_key(self):
return self.__configure_name_key
def get_configure_name_docs(self):
return self.__configure_name_docs
def get_update_configure_url(self):
return self.__update_configure_url
def get_pre_fix_url(self):
return self.__pre_fix_url
def get_book_name_tag(self):
return self.__book_name_tag
def get_book_query_tag(self):
return self.__book_query_tag
def get_book_content_tag(self):
return self.__book_content_tag
def set_configure_name_key(self, value):
self.__configure_name_key = value
def set_configure_name_docs(self, value):
self.__configure_name_docs = value
def set_update_configure_url(self, value):
self.__update_configure_url = value
def set_pre_fix_url(self, value):
self.__pre_fix_url = value
def set_book_name_tag(self, value):
self.__book_name_tag = value
def set_book_query_tag(self, value):
self.__book_query_tag = value
def set_book_content_tag(self, value):
self.__book_content_tag = value
def _private_find_configure_key(self,_configure_name_key):
""" 私有函数。用于找出某一个网站的爬取配置 """
li = self.get_configure_file()
for x in li:
if x['configure_key'] == _configure_name_key:
return x
pass
return False
def _private_route_configure_value(self):
""" 私有函数。分发传入的配置 """
self.set_book_name_tag(self.get_configure_content()['book_name_tag'])
self.set_book_query_tag(self.get_configure_content()['book_query_tag'])
self.set_book_content_tag(self.get_configure_content()['book_content_tag'])
self.set_pre_fix_url(self.get_configure_content()['pre_fix_url'])
return True
def use_one_configure(self,configure_name):
""" 使用一个网站的配置 ,如果没有整个网站的配置将打印出错信息"""
get_one = self._private_find_configure_key(configure_name)
if get_one <> False:
self.set_configure_name_key(get_one['configure_key'])
self.set_configure_name_docs(get_one['configure_docs'])
self.set_configure_content(get_one['configure_content'])
self._private_route_configure_value()
return True
print 'There is no configure by ',configure_name
return False
def get_all_configure_name(self):
""" 获取所有配置文件名称(网站名字) """
all_configure_name = []
li = self.get_configure_file()
if len(li) <> 0:
for x in li:
all_configure_name.append(x['configure_key'])
return all_configure_name
def update_configure_file(self):
"""根据更新的地址更新配置文件内容"""
req = urllib2.Request(self.get_update_configure_url())
res = urllib2.urlopen(req)
if res.code == 200:
try:
fp = open('kung_ke.json', 'w')
json.dump(res.read(),fp)
return True
except Exception,e:
print e
finally:
fp.close()
return False
class kong1g:
threadNo = 10
theradList =[]
booktext = {}
url = ''
prefix = ''
_miss_total_num = 0
def __init__(self):
self.set_booktext({})
self.set_therad_list([])
self.set_thread_no(10)
self.set_miss_total_num(0)
pass
def get_miss_total_num(self):
return self.__miss_total_num
def set_miss_total_num(self, value):
self.__miss_total_num = value
def get_therad_list(self):
return self.__theradList
def get_booktext(self):
return self.__booktext
def get_url(self):
return self.__url
def get_prefix(self):
return self.__prefix
def set_therad_list(self, value):
self.__theradList = value
def set_booktext(self, value):
self.__booktext = value
def set_url(self, value):
self.__url = value
def set_prefix(self, value):
self.__prefix = value
def get_thread_no(self):
return self.__threadNo
def set_thread_no(self, value):
self.__threadNo = value
""" 将内容保存为文本 """
def saveBook21Text(self,filename, booktext):
""" """
try:
fp = open(filename, 'a')
for b in range(self.get_thread_no()):
l = booktext[b]
for j in l:
fp.write(j['title'].encode('utf8') + '\n')
fp.write(j['content'].encode('utf8'))
fp.write('\n')
except Exception, e:
print e
finally:
fp.close()
pass
def chang_content(self,x,temp):
self.get_booktext()[x] = temp
pass
def act(self,i,startflag, endflag, leng,booklist,prefix,lock,content_tag):
tempList = []
if endflag > leng:
endflag = leng
for x in range(startflag, endflag):
try:
artTitle = booklist[x].text()
artUrl = prefix + booklist[x].attr('href')
flag = True
while flag:
req = urllib2.Request(url=artUrl)
res = urllib2.urlopen(req)
if res.code == 200:
htmltext = res.read()
doc = PyQuery(htmltext)
bookcontent = doc(content_tag).text()
t = {'title':artTitle,'content':bookcontent} #爬下来的标题和文章形成一个字典(map)的数据结构存起
tempList.append(t) #把字典再作为一个列表的一员存到一个列表中
flag = False
print artTitle,'>>>>>>>.....ok!'
except Exception, e:
self.set_miss_total_num(self.get_miss_total_num()+1)
print e,artTitle,artUrl
lock.acquire()
try:
self.chang_content(i,tempList)
finally:
lock.release()
pass
def main(self,ar):
ch = ConfigureHelper()
ch.use_one_configure(ar)
lock = threading.Lock() #线程锁,主要用于最后把所有线程的内容整合在一个文本中
print "Let's Go!"
start = time.time()
try:
req = urllib2.Request(self.get_url())
res = urllib2.urlopen(req)
if res.code == 200:
muhtml = res.read()
doc = PyQuery(muhtml)
bookName = doc(ch.get_book_name_tag()).text() + ".txt"
bookQuery = doc(ch.get_book_query_tag()).items()
booklist = list(bookQuery)
leng = float(len(booklist))
eachThreadNo = math.ceil(leng / self.get_thread_no()) # 每一个线程跑多少页面,通过向上收数保证小数被涵盖
for i in range(self.get_thread_no()):
t = threading.Thread(target=self.act, args=(i,i * int(eachThreadNo), (i + 1) * int(eachThreadNo), int(leng),booklist,ch.get_pre_fix_url(),lock,ch.get_book_content_tag(),))
self.get_therad_list().append(t)
t.start()
for t in self.get_therad_list():
t.join()
except Exception, e:
print e
self.saveBook21Text(bookName,self.get_booktext())
end = time.time()
print
print bookName,' total ',len(booklist),' artices!'
print 'Threads number is',self.get_thread_no()
print 'Missed artices is',self.get_miss_total_num()
print 'Runs %0.2f seconds.'%(end-start)
print '=======over======='
pass
miss_total_num = property(get_miss_total_num, set_miss_total_num, None, None)
if __name__=='__main__':
k = kong1g()
k.set_thread_no(40)
myurl = 'http://www.biquge.com.tw/16_16357/'
tem = myurl.split('.')
k.set_url(myurl)
try:
if len(tem) > 1:
k.main(tem[1])
except Exception, e:
print e
| [
"noreply@github.com"
] | WildButcher.noreply@github.com |
81df5fb4cda7e31f1ab5cd5b884be42f24cade5e | 137ded4225a84d1f5f46099ef6e5545b26cc5fb2 | /Configuration/GenProduction/python/Pythia8_TuneCP5_5TeV_D0_PiK_prompt_pt1p2_y2p4_cfi.py | 6281438c1a2a5fb8c8501629827135ab0b1fc8e0 | [] | no_license | davidlw/2017FlowMCRequest | 8a27f04d5a70c3f34d003d6ea25888a691e73bb6 | c9cd086db18ec3a661482cc457a1fdb5949d3b88 | refs/heads/master | 2022-08-28T21:42:32.093605 | 2022-08-02T18:00:06 | 2022-08-02T18:00:06 | 148,789,077 | 0 | 2 | null | 2021-01-06T21:45:03 | 2018-09-14T13:01:38 | Python | UTF-8 | Python | false | false | 2,674 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
from GeneratorInterface.EvtGenInterface.EvtGenSetting_cff import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(5020.0),
maxEventsToPrint = cms.untracked.int32(0),
ExternalDecays = cms.PSet(
EvtGen130 = cms.untracked.PSet(
decay_table = cms.string('GeneratorInterface/EvtGenInterface/data/DECAY_2010.DEC'),
operates_on_particles = cms.vint32(),
particle_property_file = cms.FileInPath('GeneratorInterface/EvtGenInterface/data/evt.pdl'),
user_decay_file = cms.vstring('GeneratorInterface/ExternalDecays/data/D0_Kpi.dec'),
list_forced_decays = cms.vstring('myD0', 'myanti-D0')
),
parameterSets = cms.vstring('EvtGen130')
),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
processParameters = cms.vstring(
'HardQCD:all = on',
'PhaseSpace:pTHatMin = 0.', #min pthat
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CP5Settings',
'processParameters',
)
)
)
generator.PythiaParameters.processParameters.extend(EvtGenExtraParticles)
partonfilter = cms.EDFilter("PythiaFilter",
ParticleID = cms.untracked.int32(4) # 4 for c and 5 for b quark
)
D0Daufilter = cms.EDFilter("PythiaMomDauFilter",
ParticleID = cms.untracked.int32(421),
MomMinPt = cms.untracked.double(0.0),
MomMinEta = cms.untracked.double(-10.0),
MomMaxEta = cms.untracked.double(10.0),
DaughterIDs = cms.untracked.vint32(211, -321),
NumberDaughters = cms.untracked.int32(2),
NumberDescendants = cms.untracked.int32(0),
BetaBoost = cms.untracked.double(0.0),
)
D0rapidityfilter = cms.EDFilter("PythiaFilter",
ParticleID = cms.untracked.int32(421),
MinPt = cms.untracked.double(1.2),
MaxPt = cms.untracked.double(1000.),
MinRapidity = cms.untracked.double(-2.5),
MaxRapidity = cms.untracked.double(2.5),
)
ProductionFilterSequence = cms.Sequence(generator*partonfilter*D0Daufilter*D0rapidityfilter)
| [
"liwei810812@gmail.com"
] | liwei810812@gmail.com |
4b3974f60361ee60e4342c0069ebf745d615a286 | 75afcff13207a17af4f3b9be4566f8db85a68a37 | /add_explain/SENN_eg.py | 0251fc89f422950aba9e46eb4059c686c9aeb07d | [] | no_license | ffxz/CNN-for-single-channel-speech-enhancement | 9605c1a062951006ad46681d177de2f960ac1144 | fb5c314d379f64751dc6ac9589b97639d7149946 | refs/heads/master | 2020-05-16T09:33:22.796408 | 2019-04-23T14:13:35 | 2019-04-23T14:13:35 | 182,953,480 | 0 | 0 | null | 2019-04-23T06:57:27 | 2019-04-23T06:57:27 | null | UTF-8 | Python | false | false | 10,194 | py | '''
Class SENN: speech enhancement neural network
1. transforming the original signal frames into
the features fed to the net.
2. defination of the tensorflow computational graph
that enhance the speech.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# import os
import ipdb
# import sys
import tensorflow as tf
import numpy as np
log10_fac = 1 / np.log(10)
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor
(for TensorBoard visualization)."""
with tf.name_scope('summaries'):
tensor_name = var.op.name
mean = tf.reduce_mean(var)
tf.scalar_summary(tensor_name + 'mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.scalar_summary(tensor_name + 'stddev', stddev)
tf.scalar_summary(tensor_name + 'max', tf.reduce_max(var))
tf.scalar_summary(tensor_name + 'min', tf.reduce_min(var))
tf.histogram_summary(tensor_name + 'histogram', var)
def conv2d(x, W):
'''1 dimentional convolution difined in the paper
the function's name is not appropriate and
we didn't change that'''
return tf.nn.conv2d(x, W, strides=[1, 100, 1, 1], padding='SAME')
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
class SE_NET(object):
"""Class:speech enhancement net"""
def __init__(self, batch_size, NEFF, N_IN, N_OUT, DECAY=0.999):
'''NEFF: number of effective FFT points
N_IN: number of input frames into the nets
N_OUT: only tested for 1, errors may occur for other number
DECAY: decay for global mean and var estimation using batch norm
'''
self.batch_size = batch_size
self.NEFF = NEFF
self.N_IN = N_IN
self.N_OUT = N_OUT
self.DECAY = DECAY
def inputs(self, raw_data_batch):
'''transform the raw data_batch into
the input for the nets
it runs really fast and we don't need to store
all the mixed samples'''
# ipdb.set_trace()
# transpose for FFT
# shape:
# batch, N_IN, 2, frame_length to 2 batch N_in frame_length
raw_data_batch_t = tf.transpose(raw_data_batch, [2, 0, 1, 3])
#将之前保存的数据存储为带噪和不带噪两种
raw_data = raw_data_batch_t[0][:][:][:]
raw_speech = raw_data_batch_t[1][:][:][:]
# FFT
# shape:
# batch, N_in, NFFT
#它这里的傅里叶变换是将全部的音频一起变换的?
#还是每次变换的时候只取其中的一帧进行变换?
#这里已经把数据分成了8帧输入的形式,然后有多个iter_num组成
#这样就是分开进行fft变换
data_f0 = tf.fft(tf.cast(raw_data, tf.complex64))
# shape:
# NFFT, batch, N_in
data_f1 = tf.transpose(data_f0, [2, 0, 1])
#因为傅里叶变换之后是对称的,只取其中一半的有效值
data_f2 = data_f1[0:self.NEFF][:][:]
# shape:
# batch, N_in, NEFF
data_f3 = tf.transpose(data_f2, [1, 2, 0])
#取实部和虚部的平方,这个是作为幅度谱输入
data_f4 = tf.square(tf.real(data_f3)) + tf.square(tf.imag(data_f3))
# limiting the minimum value,限制最小值,以免为0报错
data_f5 = tf.maximum(data_f4, 1e-10)
# into log spectrum(取对数谱)
data_f = 10 * tf.log(data_f5 * 10000) * log10_fac
#上面是带噪语音信号的对数谱
#下面是干净语音信号的对数谱
# same operational for reference speech
speech_f0 = tf.fft(tf.cast(raw_speech, tf.complex64))
speech_f1 = tf.transpose(speech_f0, [2, 0, 1])
speech_f2 = speech_f1[0:self.NEFF][:][:]
speech_f3 = tf.transpose(speech_f2, [1, 2, 0])
speech_f4 = tf.square(
tf.real(speech_f3)) + tf.square(tf.imag(speech_f3))
speech_f5 = tf.maximum(speech_f4, 1e-10)
speech_f = 10 * tf.log(speech_f5 * 10000) * log10_fac
# shape:
# batch, N_in, NEFF
images = data_f
#因为目标只有一帧,因此需要进行处理
#下面处理的方式还没有弄太清楚
targets = tf.concat(
0,
[tf.reshape(
speech_f[i][self.N_IN - 1][0:self.NEFF],
[1, self.NEFF])
for i in range(0, self.batch_size, 1)])
# do per image whitening (not batch normalization!)
images_reshape = tf.transpose(tf.reshape(
images, [self.batch_size, -1]))
targets_reshape = tf.transpose(tf.reshape(
targets, [self.batch_size, -1]))
batch_mean, batch_var = tf.nn.moments(images_reshape, [0])
#做归一化处理
images_reshape_norm = tf.nn.batch_normalization(
images_reshape, batch_mean, batch_var, 0, 1, 1e-10)
targets_reshape_norm = tf.nn.batch_normalization(
targets_reshape, batch_mean, batch_var, 0, 1, 1e-10)
# ipdb.set_trace()
images_norm = tf.reshape(tf.transpose(images_reshape_norm),
[self.batch_size, self.N_IN, self.NEFF])
targets_norm = tf.reshape(tf.transpose(targets_reshape_norm),
[self.batch_size, self.NEFF])
return images_norm, targets_norm
def _batch_norm_wrapper(self, inputs, is_trianing, epsilon=1e-6):
'''wrap up all the operations needed for batch norm
is_training: true -> using batch property
false -> using global(population) property'''
decay = self.DECAY
scale = tf.Variable(tf.ones(inputs.get_shape()[-1]))
beta = tf.Variable(tf.zeros(inputs.get_shape()[-1]))
# population mean and var
pop_mean = tf.Variable(
tf.zeros([inputs.get_shape()[-1]]), trainable=False)
pop_var = tf.Variable(
tf.ones([inputs.get_shape()[-1]]), trainable=False)
if is_trianing:
batch_mean, batch_var = tf.nn.moments(inputs, [0, 1, 2])
# update estimation
train_mean = tf.assign(pop_mean,
pop_mean * decay +
batch_mean * (1 - decay))
train_var = tf.assign(pop_var,
pop_var * decay +
batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
return tf.nn.batch_normalization(
inputs, batch_mean, batch_var, beta, scale, epsilon)
else:
return tf.nn.batch_normalization(
inputs, pop_mean, pop_var, beta, scale, epsilon)
def _conv_layer_wrapper(self,
input, out_feature_maps, filter_length, is_train):
'''wrap up all the ops for convolution'''
filter_width = input.get_shape()[1].value
in_feature_maps = input.get_shape()[-1].value
W_conv = weight_variable(
[filter_width, filter_length, in_feature_maps, out_feature_maps])
b_conv = bias_variable([out_feature_maps])
h_conv_t = conv2d(input, W_conv) + b_conv
# use batch norm
h_conv_b = self._batch_norm_wrapper(h_conv_t, is_train)
return tf.nn.relu(h_conv_b)
def inference(self, images, is_train):
'''Net configuration as the original paper'''
image_input = tf.reshape(images, [-1, self.N_IN, self.NEFF, 1])
# ipdb.set_trace()
with tf.variable_scope('con1') as scope:
h_conv1 = self._conv_layer_wrapper(image_input, 12, 13, is_train)
with tf.variable_scope('con2') as scope:
h_conv2 = self._conv_layer_wrapper(h_conv1, 16, 11, is_train)
with tf.variable_scope('con3') as scope:
h_conv3 = self._conv_layer_wrapper(h_conv2, 20, 9, is_train)
with tf.variable_scope('con4') as scope:
h_conv4 = self._conv_layer_wrapper(h_conv3, 24, 7, is_train)
with tf.variable_scope('con5') as scope:
h_conv5 = self._conv_layer_wrapper(h_conv4, 32, 7, is_train)
with tf.variable_scope('con6') as scope:
h_conv6 = self._conv_layer_wrapper(h_conv5, 24, 7, is_train)
with tf.variable_scope('con7') as scope:
h_conv7 = self._conv_layer_wrapper(h_conv6, 20, 9, is_train)
with tf.variable_scope('con8') as scope:
h_conv8 = self._conv_layer_wrapper(h_conv7, 16, 11, is_train)
with tf.variable_scope('con9') as scope:
h_conv9 = self._conv_layer_wrapper(h_conv8, 12, 13, is_train)
#最后一层省去_conv_layer_wrapper,可能是因为需要去除最后的relu非线性函数吧
with tf.variable_scope('con10') as scope:
f_w = h_conv9.get_shape()[1].value
i_fm = h_conv9.get_shape()[-1].value
W_con10 = weight_variable(
[f_w, 129, i_fm, 1])
b_conv10 = bias_variable([1])
h_conv10 = conv2d(h_conv9, W_con10) + b_conv10
return tf.reshape(h_conv10, [-1, self.NEFF])
def loss(self, inf_targets, targets):
'''l2 loss for the log spectrum'''
loss_v = tf.nn.l2_loss(inf_targets - targets) / self.batch_size
tf.scalar_summary('loss', loss_v)
# loss_merge = tf.cond(
# is_val, lambda: tf.scalar_summary('val_loss_batch', loss_v),
# lambda: tf.scalar_summary('loss', loss_v))
return loss_v
# return tf.reduce_mean(tf.nn.l2_loss(inf_targets - targets))
def train(self, loss, lr):
'''optimizer'''
# optimizer = tf.train.GradientDescentOptimizer(0.01)
optimizer = tf.train.AdamOptimizer(
learning_rate=lr,
beta1=0.9,
beta2=0.999,
epsilon=1e-8)
train_op = optimizer.minimize(loss)
return train_op
| [
"18351962775@163.com"
] | 18351962775@163.com |
f5045b12dc82848edc154b2bb25342fa52704c55 | 256fa93a0049814a499500a36e574f194c23d459 | /newsCraw/newsCraw/memento_settings.py | e3828d0419210432ce8d5503bd7490591f1cb02f | [] | no_license | memento7/newsCraw | e0182fdaa7893662d5823baf98833c17bbd58355 | 9570be14c73e9224f32f4ddee788036eaa52aa7b | refs/heads/master | 2022-05-17T05:15:47.378079 | 2022-05-09T07:50:05 | 2022-05-09T07:50:05 | 81,026,800 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | # Custom Settings
from os import environ
SERVER_ES = 'server2.memento.live'
SERVER_ES_INFO = {
'host': SERVER_ES,
'port': 9200,
'http_auth': (environ['MEMENTO_ELASTIC'], environ['MEMENTO_ELASTIC_PASS'])
} | [
"maytryark@gmail.com"
] | maytryark@gmail.com |
d2f161e4baa19eb0e363e5a1f1241fb46dbbd172 | 94650c902e2ca452416ac79074344a7b98755668 | /aa/cc.py | fe3636d8dfc341ed14e3b63b92d0ca03d8757024 | [] | no_license | kongzl666/test1 | 43fb72b85c9210476fe3ebf5e46883d510d46be7 | 21f03e26c34de166ac6a77d776fb12edf94ce840 | refs/heads/master | 2023-08-29T06:18:55.943158 | 2021-09-24T08:38:09 | 2021-09-24T08:38:09 | 409,906,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | bcdfg
333333333
111111111111111
| [
"1963105011@qq.com"
] | 1963105011@qq.com |
4212bc4a1f8935f9f0857d22f854f600cdab19a6 | 8fa22f78fdb6ea3a2ca0af6865ebed34ed005cc5 | /fetch_coding_challenge/transactions/migrations/0004_auto_20211007_0301.py | ff04e1b7c4b0e3ef6ce092fdc6efb31d53b49f2b | [
"MIT"
] | permissive | Jakearns95/fetch_challenge_kearns | 889b07852969266bfd4671b78f29e528b39b22c7 | f6a60e64fa998a86483d88657d6bec079f754d2f | refs/heads/master | 2023-08-18T18:22:12.273259 | 2021-10-08T23:48:26 | 2021-10-08T23:48:26 | 414,705,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | # Generated by Django 3.2.8 on 2021-10-07 03:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transactions', '0003_auto_20211007_0256'),
]
operations = [
migrations.AlterField(
model_name='transactions',
name='points',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='transactions',
name='timestamp',
field=models.DateField(),
),
]
| [
"jkearns@jakes-air.lan"
] | jkearns@jakes-air.lan |
01af588c3620b7e9a63cd6fec0b7f967cb4f7448 | b7f5ccffe0eab35440bb67fbde76926dcd2e0886 | /filter_comments.py | 842758bd435f8ddc99263f8b0994eeeeebb4efe8 | [] | no_license | Dinosauriel/ans-project-reddit | 538a745886cf1542f7ae8c82ae21c54f96b2376b | 7ee2cd9f5b816f005da26ea4bb6db08b619909b8 | refs/heads/master | 2020-05-07T13:49:27.313240 | 2019-05-20T11:12:50 | 2019-05-20T11:12:50 | 180,564,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | import mysql.connector
import env_file
env = env_file.get()
db = mysql.connector.connect(
host=env["MYSQL_HOST"],
user=env["MYSQL_USER"],
password=env["MYSQL_PW"],
database=env["MYSQL_DB"],
)
db.set_charset_collation('utf8mb4', 'utf8mb4_general_ci')
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS reduced_comments")
cursor.execute("CREATE TABLE reduced_comments LIKE comments")
db.commit()
cursor.execute("SELECT * FROM reduced_subreddits")
reduced_subreddits = cursor.fetchall()
n = 0
for subreddit in reduced_subreddits:
(id, name, created_utc, display_name, subscribers) = subreddit
print(subreddit)
n += 1
print("fetching...")
cursor.execute("SELECT * FROM comments WHERE subreddit_id = \"" + name + "\"")
coms = cursor.fetchall()
print("inserting...")
for c in coms:
cursor.execute("INSERT INTO reduced_comments VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", c)
db.commit()
if n % 10 == 0:
print(str(n) + " subreddits filtered")
cursor.close()
db.commit()
db.close()
print("done!")
print(str(n) + " subreddits filtered") | [
"aurel.feer@gmail.com"
] | aurel.feer@gmail.com |
e2da2961d9e1b16ded1f3c6a93bf250d0a911eda | a794d4e2fe1533e859086547595b0378928eae3e | /question3/dice_game.py | f6e947850cc3075ba2716c70b3afdf04561df202 | [] | no_license | cajun-code/dice_py | 673f6a132bd42bbae3b51a0d3049b9c31dd95230 | 9cc6996ba7ca2b34a725cd729e899e1530c4d20e | refs/heads/master | 2021-03-12T21:32:35.264432 | 2011-05-18T11:26:59 | 2011-05-18T11:26:59 | 1,765,607 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,159 | py | #! /usr/bin/env python
import random
#import pdb
"""
Author : Allan Davis
Answer to question 3 for CCP Games
run: python dice_game.py
"""
class Die(object):
""" Represent a die in the game"""
def __init__(self):
self.sides = 6
def roll(self):
self.value= 1 + random.randrange(self.sides)
return self.value
class LoadedDie(Die):
""" Represent a Cheater's Loaded die """
def __init__(self, target):
super(LoadedDie, self).__init__()
self.target = target
def roll(self):
self.value = self.target
return self.value
class Player(object):
""" Represent a player in the system """
def __init__(self, name):
self.dice = [Die(), Die()]
self.name = name
def take_turn(self):
""" Rolls the dice for the Player"""
self.values = []
for die in self.dice:
self.values.append(die.roll())
def print_roll(self):
print "\t{name} rolled a {die1} and {die2}".format(
name=self.name, die1=self.values[0], die2=self.values[1])
def rolls_double(self):
return self.values[0] == self.values[1]
def get_value(self):
sum = 0
for value in self.values:
sum += value
return sum
class ComputerPlayer(Player):
def __init__(self):
super(ComputerPlayer, self).__init__("Computer")
self.tries = 0
def take_turn(self):
""" Every other turn the dice get substuted with loaded dice """
if (self.tries % 2) == 0 :
target = Die().roll()
self.dice = [LoadedDie(target), LoadedDie(target)]
else:
self.dice = [Die(), Die()]
self.tries += 1
return super(ComputerPlayer, self).take_turn()
class DiceGame(object):
def setup_game(self):
name = raw_input("Player 1 please enter your name: ")
self.player1 = Player(name)
self.player2 = ComputerPlayer()
def evaluate_round(self):
#pdb.set_trace()
if self.player1.rolls_double() == self.player2.rolls_double() :
if self.player1.get_value() > self.player2.get_value():
winner = self.player1
else:
winner = self.player2
elif self.player2.rolls_double():
winner = self.player2
else:
winner = self.player1
print
print "The winner is {0}".format(winner.name)
def play_round(self):
print "Lets Roll"
print
self.player1.take_turn()
self.player1.print_roll()
self.player2.take_turn()
self.player2.print_roll()
self.evaluate_round()
print
return (raw_input("Do you want to play another round?(Y/n): ") == "Y")
def start_game(self):
print "Welcome to the Dice Game"
#print intro
self.setup_game()
while self.play_round():
self.player1, self.player2 = self.player2, self.player1
print "Thank you for playing"
if __name__ == '__main__':
DiceGame().start_game()
| [
"cajun.code@gmail.com"
] | cajun.code@gmail.com |
b916f740e286b9f3ef5c7acddf84b90d8541aa80 | 452f3354c04f887103d0c7c8b4a07dd29a72eed7 | /A2/app/form.py | 3d2b4af3bc9bb7c09fc3646a81f113d6fb7cda66 | [] | no_license | wmuf/ECE1779_Cloud_Computing | 2d8b4420a26ea6169a5ad8ea13f8dd7997190f71 | 1e385a0a54d4bd8b0c3689ccb4e4064f02efb670 | refs/heads/master | 2023-07-24T16:40:45.875193 | 2021-04-20T02:41:54 | 2021-04-20T02:41:54 | 404,382,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | from flask_wtf import FlaskForm
from wtforms import FloatField, IntegerField, SubmitField, validators
class AutoScalarForm(FlaskForm):
cpu_threshold_grow = IntegerField('Cpu_Threshold_Grow', [validators.optional(), validators.NumberRange(min=0, max=100, message="Please specify range from 0 to 100")], filters=[lambda x: x or None])
cpu_threshold_shrink = IntegerField('Cpu_Threshold_Shrink', [validators.optional(), validators.NumberRange(min=0, max=100, message="Please specify range from 0 to 100")], filters=[lambda x: x or None])
expand_ratio = IntegerField('Expand_Ratio', [validators.optional(), validators.NumberRange(min=1, max=8, message="Please specify range from 1 to 8")], filters=[lambda x: x or None])
shrink_ratio = FloatField('Shrink_Ratio', [validators.optional(), validators.NumberRange(min=0, max=1, message="Please specify range from 0 to 1")], filters=[lambda x: x or None])
submit = SubmitField('Submit')
| [
"david@Davids-MacBook-Pro.local"
] | david@Davids-MacBook-Pro.local |
a305251c4dddde2db65605e72de53c82721023c1 | 1c3ed902cd9ffc863ca1e9014824dfc80ca05212 | /hisoka/forms.py | 9b098e351146fe10f01ba2a82144b0c71da65047 | [] | no_license | Alejoss/CrHisoka | d8b727d420f43640c6762c809e2b028c2746b7e0 | 97a3ba2e25ac719c20bcbd9a50dfddddef7a6802 | refs/heads/master | 2020-12-24T07:43:24.103551 | 2016-08-03T22:31:42 | 2016-08-03T22:31:42 | 48,620,813 | 0 | 1 | null | 2020-11-17T20:52:07 | 2015-12-26T19:05:18 | HTML | UTF-8 | Python | false | false | 2,489 | py | # coding=utf-8
import logging
from django import forms
from hisoka.models import Fireball, FeralSpirit, CartaMagicPy, GrupoMagicPy
class FormCrearFireball(forms.ModelForm):
class Meta:
model = Fireball
fields = ['nombre', 'url_amazon', 'twitter', 'imagen']
widgets = {
'nombre': forms.TextInput(attrs={'class': 'form-control'}),
'url_amazon': forms.TextInput(attrs={'class': 'form-control'}),
'twitter': forms.TextInput(attrs={'class': 'form-control'}),
'imagen': forms.TextInput(attrs={'class': 'form-control'})
}
class FormCrearFeralSpirit(forms.ModelForm):
class Meta:
model = FeralSpirit
fields = ['tipo', 'texto', 'url', 'imagen']
_tipos_feral = (
('video', 'Video'),
('imagen', 'Imagen'),
('texto', 'Texto')
)
widgets = {
'texto': forms.TextInput(attrs={'class': 'form-control'}),
'tipo': forms.Select(choices=_tipos_feral, attrs={'class': 'form-control', 'id': 'tipo_feral'}),
'url': forms.TextInput(attrs={'class': 'form-control'}),
'imagen': forms.FileInput(attrs={'class': 'form-control', 'id': 'imagen_input'})
}
class MultipleImagesFeral(forms.Form):
images = forms.ClearableFileInput(attrs={'multiple': True, 'class': 'form-control'})
class FormNuevaCarta(forms.ModelForm):
queryset_grupos_magicpy = GrupoMagicPy.objects.all()
grupo = forms.ModelChoiceField(queryset_grupos_magicpy)
class Meta:
model = CartaMagicPy
fields = ['imagen_url', 'nombre_carta_magic', 'grupo', 'nombre', 'descripcion']
widgets = {
'imagen_url': forms.URLInput(attrs={'class': 'form-control', 'id': 'url_imagen'}),
'grupo': forms.TextInput(attrs={'class': 'form-control'}),
'nombre_carta_magic': forms.TextInput(attrs={'class': 'form-control'}),
'nombre': forms.TextInput(attrs={'class': 'form-control'}),
'descripcion': forms.Textarea(attrs={'class': 'form-control'})
}
class FormNuevoGrupo(forms.ModelForm):
class Meta:
model = GrupoMagicPy
fields = ['nombre', 'descripcion', 'imagen']
widgets = {
'imagen': forms.URLInput(attrs={'class': 'form-control'}),
'nombre': forms.TextInput(attrs={'class': 'form-control'}),
'descripcion': forms.Textarea(attrs={'class': 'form-control'})
}
| [
"alejoveintimilla@gmail.com"
] | alejoveintimilla@gmail.com |
6a5bf4b0e99b1713e6792399786626de21e9b989 | 425f4829adc34b380ef81553bf094d94a8884135 | /v1beta1/test/test_vulnerability_detail.py | 45663ac219647387f54a3e9e9a2a87b2a61a2167 | [
"Apache-2.0"
] | permissive | appvia/client-python | 655a77242135d4b8d2742db8a69d569666a6ac41 | 7b7158e0b857197cabaa2ccfa71af529a09fd36d | refs/heads/master | 2020-07-18T19:12:06.489460 | 2019-09-04T11:13:52 | 2019-09-04T11:13:52 | 206,297,740 | 0 | 0 | null | 2019-09-04T10:54:33 | 2019-09-04T10:54:32 | null | UTF-8 | Python | false | false | 969 | py | # coding: utf-8
"""
grafeas.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: version not set
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from models.vulnerability_detail import VulnerabilityDetail # noqa: E501
from swagger_client.rest import ApiException
class TestVulnerabilityDetail(unittest.TestCase):
"""VulnerabilityDetail unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testVulnerabilityDetail(self):
"""Test VulnerabilityDetail"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.vulnerability_detail.VulnerabilityDetail() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"danielwhatmuff@gmail.com"
] | danielwhatmuff@gmail.com |
279d8933e8c2057be7901387644e7ccbc5494a53 | a39ecd4dce4b14f5d17416233fa16c76d2d3f165 | /RepositoryBootstrap/Impl/Utilities.py | 305320a07ef4e303ff131cad0be6735155fe6662 | [
"BSL-1.0",
"Python-2.0",
"OpenSSL",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only"
] | permissive | davidbrownell/Common_Environment_v3 | 8e6bbed15004a38a4c6e6f337d78eb2339484d64 | 2981ad1566e6d3c00fd390a67dbc1277ef40aaba | refs/heads/master | 2022-09-03T19:04:57.270890 | 2022-06-28T01:33:31 | 2022-06-28T01:33:31 | 132,171,665 | 0 | 0 | BSL-1.0 | 2021-08-13T21:19:48 | 2018-05-04T17:47:30 | Python | UTF-8 | Python | false | false | 4,909 | py | # ----------------------------------------------------------------------
# |
# | Utilities.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2018-05-02 15:57:42
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Utilities used by multiple files within this module."""
import hashlib
import importlib
import os
import re
import sys
from contextlib import contextmanager
import six
from RepositoryBootstrap import Constants
from RepositoryBootstrap.Impl import CommonEnvironmentImports
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironmentImports.CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def GenerateCommands( functor, # def Func() -> []
is_debug,
):
"""
Generates shell-specific commands as returned by the provided functor.
Returns:
(result, generated_commands)
"""
assert functor
commands = []
try:
result = functor()
if isinstance(result, int):
commands = []
elif isinstance(result, tuple):
result, commands = result
else:
commands = result
result = 0
except Exception as ex:
if is_debug:
import traceback
error = traceback.format_exc()
else:
error = str(ex)
commands = [ CommonEnvironmentImports.CurrentShell.Commands.Message("\n\nERROR: {}".format(CommonEnvironmentImports.StringHelpers.LeftJustify(error, len("ERROR: ")))),
CommonEnvironmentImports.CurrentShell.Commands.Exit(return_code=-1),
]
result = -1
if is_debug and commands:
commands = [ CommonEnvironmentImports.CurrentShell.Commands.Message("{}\n".format(CommonEnvironmentImports.StringHelpers.Prepend( "Debug: ",
CommonEnvironmentImports.CurrentShell.GenerateCommands(commands),
skip_first_line=False,
))),
] + commands
return result, commands
# ----------------------------------------------------------------------
def CalculateFingerprint(repo_dirs, relative_root=None):
"""
Returns a value that can be used to determine if any configuration info
has changed for a repo and its dependencies.
"""
results = {}
for repo_dir in repo_dirs:
md5 = hashlib.md5()
filename = os.path.join(repo_dir, Constants.SETUP_ENVIRONMENT_CUSTOMIZATION_FILENAME)
if not os.path.isfile(filename):
continue
with open(filename, 'rb') as f:
# Skip the file header, as it has no impact on the file's actual contents.
in_file_header = True
for line in f:
if in_file_header and line.lstrip().startswith(b'#'):
continue
in_file_header = False
md5.update(line)
if relative_root:
repo_dir = CommonEnvironmentImports.FileSystem.GetRelativePath(relative_root, repo_dir)
results[repo_dir] = md5.hexdigest()
return results
# ----------------------------------------------------------------------
@contextmanager
def CustomMethodManager(customization_filename, method_name):
"""Attempts to load a customization filename and extract the given method."""
if not os.path.isfile(customization_filename):
yield None
return
customization_path, customization_name = os.path.split(customization_filename)
customization_name = os.path.splitext(customization_name)[0]
sys.path.insert(0, customization_path)
with CommonEnvironmentImports.CallOnExit(lambda: sys.path.pop(0)):
mod = importlib.import_module(customization_name)
with CommonEnvironmentImports.CallOnExit(lambda: sys.modules.pop(customization_name)):
yield getattr(mod, method_name, None)
| [
"db@DavidBrownell.com"
] | db@DavidBrownell.com |
6512eaa1731b6c91c774540047b19a5886180e3b | 080c13cd91a073457bd9eddc2a3d13fc2e0e56ae | /GIT-USERS/TOM2/CS32_Architecture_GP/day4/simple.py | b368bec2f1d9590d966617b2ce072a8e347ffd3e | [] | no_license | Portfolio-Projects42/UsefulResourceRepo2.0 | 1dccc8961a09347f124d3ed7c27c6d73b9806189 | 75b1e23c757845b5f1894ebe53551a1cf759c6a3 | refs/heads/master | 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,238 | py | # Let's build a data driven machine!
import sys
# What do we need to have our machine working?
"""
- Some sort of memory
- Some way of stopping operation
- Some way of keeping the CPU running
- Some sort of storage for local variables seperate from main RAM (memory) eg; Registers
- Some sort of operations that can be performed such as (printing something, saving data to a variable[register] )
- Some FETCH, DECODE, EXECUTE CYCLE
"""
# Operations that we can perform
HALT = 0b00000001
PRINT_VLAD = 2
PRINT_NUM = 3
SAVE = 0b10000010
PRINT_REG = 5
ADD = 6
# PUSH and POP
PUSH = 7
POP = 8
# TODO: CALL and RET
CALL = 0b01001001
RET = 0b00001010
SUB = 23
PRN = 0b01000111
SHL = 0b10101100
SHR = 0b10101101
# some sort of memory (lets refactor this to load in opcodes from a file)
def load_memory(filename):
# TODO do some logic here
try:
address = 0
with open(filename) as f:
for line in f:
comment_split = line.split("#")
n = comment_split[0].strip()
if n == '':
continue
val = int(n, 2)
# store val in memory
memory[address] = val
address += 1
print(f"{val:08b}: {val:d}")
except FileNotFoundError:
print(f"{sys.argv[0]}: {filename} not found")
sys.exit(2)
memory = [0] * 256
# keep track of running?
running = True
# some sort of counter
pc = 0
# Some local var holders [registers]
registers = [0] * 10
# Stack Pointer (R7) as per specs
# index of the registers list
# SP
SP = 7
# to use to store where the top of the stack is
# 0xF4 (244)
registers[SP] = 244
# size of opcode
op_size = 1
# grab any args
if len(sys.argv) != 2:
print("usage: simple.py filename")
sys.exit(1)
# load opcodes in to memory
load_memory(sys.argv[1])
# REPL to run once per cycle of CPU
# inside this we will have our FETCH, DECODE, EXECUTE CYCLE
while running:
# FETCH
cmd = memory[pc]
op_size = ((cmd >> 6) & 0b11) + 1
# DECODE
if cmd == PRINT_VLAD:
# EXECUTE
print("Vlad")
elif cmd == HALT:
running = False
elif cmd == PRINT_NUM:
num = memory[pc + 1]
print(num)
elif cmd == PRINT_REG:
index_of_reg = memory[pc + 1]
num_at_reg = registers[index_of_reg]
print(num_at_reg)
elif cmd == SAVE:
num_to_save = memory[pc + 1] # 300
reg_index = memory[pc + 2]
registers[reg_index] = num_to_save
elif cmd == ADD:
reg_index_a = memory[pc + 1]
reg_index_b = memory[pc + 2]
registers[reg_index_a] += registers[reg_index_b]
elif cmd == SUB:
reg_index_a = memory[pc + 1]
reg_index_b = memory[pc + 2]
registers[reg_index_a] -= registers[reg_index_b]
elif cmd == SHL:
reg_index_a = memory[pc + 1]
reg_index_b = memory[pc + 2]
registers[reg_index_a] <<= registers[reg_index_b]
elif cmd == SHR:
reg_index_a = memory[pc + 1]
reg_index_b = memory[pc + 2]
registers[reg_index_a] >>= registers[reg_index_b]
# PUSH
elif cmd == PUSH:
# setup
reg_index = memory[pc + 1]
val = registers[reg_index]
# decrememt Stack Pointer
registers[SP] -= 1
# insert val on to the stack
memory[registers[SP]] = val
# POP
elif cmd == POP:
# setup
reg_index = memory[pc + 1]
val = memory[registers[SP]]
# take value from stack and put it in reg
registers[reg_index] = val
# increment Stack Pointer
registers[SP] += 1
# CALL
elif cmd == CALL:
# push the return address on to the stack
registers[SP] -= 1
memory[registers[SP]] = pc + 2
# Set the PC to the subroutines address
reg = memory[pc + 1]
pc = registers[reg]
op_size = 0
# RET
elif cmd == RET:
# POP return address from stack to store in pc
pc = memory[registers[SP]]
registers[SP] += 1
op_size = 0
else:
print(f"Invalid Instruction: {cmd}")
running = False
pc += op_size | [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
e7b4933736b1017d9cecc297da7aa7482d1a900e | 87af0175ec4f9adb9412aaa1813d2b49074b76e0 | /LED_control/led_off.py | 1b4f8689cea71d0d1d2a9923bbbca7e2ac99e8fe | [] | no_license | MSHAFEEQM/Home_Auto_IOT_System | 58d5064a435cb30ccc9902529c2bd529ec5db2d6 | bc4c3ec42858e9fb5a5702afe55c512d8d377211 | refs/heads/main | 2023-07-06T16:13:18.499681 | 2021-08-15T09:58:37 | 2021-08-15T09:58:37 | 396,210,150 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | from boltiot import Bolt
api_key = "e9a57e96-679d-4ab3-988b-e7a95ad54702"
device_id = "BOLT8022282"
mybolt = Bolt(api_key, device_id)
response = mybolt.digitalWrite('0', 'LOW')
print (response)
| [
"mshafeeqmsmc@gmail.com"
] | mshafeeqmsmc@gmail.com |
0dd564c9ec118b6ab6323eccabc8304d63041320 | 0f481498bba97a7bb9f38bc2b9a1dc5b9ebf50a5 | /Pacote-download/Exercicios/ex045.py | d818f1b6795a40fce1325086d8ba0bb24fd50a3f | [
"MIT"
] | permissive | agnaka/CEV-Python-Exercicios | d7e8efd6426d60d6920ba3cfddbd049a80e7d6da | a4299abd5da283b1b15ed2436965db162f42885f | refs/heads/master | 2022-10-23T11:45:56.298286 | 2020-06-10T21:13:15 | 2020-06-10T21:13:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | from random import randint
print('-=' * 20)
print('\033[1;34mVAMOS A JOGAR JANKENPÔ!!!\033[m')
print('-=' * 20)
print('''SUAS OPÇÕES:
[1] Pedra
[2] Papel
[3] Tesoura''')
choice = int(input('Qual a sua escolha? '))
print('JAN')
print('KEN')
print('PO!!!')
itens = ('Pedra', 'Papel', 'Tesoura')
compu = randint(1, 3)
# print(compu)
if compu == 1 and choice == 1:
print('O computador escolheu {} e você {} - EMPATOU! Jogar novamente'.format('PEDRA', 'PEDRA'))
elif compu == 1 and choice == 2:
print('O computador escolheu {} e você {} - VOCÊ GANHOU!!!'.format('PEDRA', 'PAPEL'))
elif compu == 1 and choice == 3:
print('O computador escolheu {} e você {} - VOCÊ PERDEU'.format('PEDRA', 'TESOURA'))
elif compu == 2 and choice == 2:
print('O computador escolheu {} e você {} - EMPATOU! Jogar novamente'.format('PAPEL', 'PAPEL'))
elif compu == 2 and choice == 1:
print('O computador escolheu {} e você {} - VOCÊ PERDEU'.format('PAPEL', 'PEDRA'))
elif compu == 2 and choice == 3:
print('O computador escolheu {} e você {} - VOCÊ GANHOU!!!'.format('PAPEL', 'TESOURA'))
elif compu == 3 and choice == 3:
print('O computador escolheu {} e você {} - EMPATOU! Jogar novamente'.format('TESOURA', 'TESOURA'))
elif compu == 3 and choice == 1:
print('O computador escolheu {} e você {} - VOCÊ GANHOU!!!'.format('TESOURA', 'PEDRA'))
elif compu == 3 and choice == 2:
print('O computador escolheu {} e você {} - VOCÊ PERDEU'.format('TESOURA', 'PAPEL'))
| [
"andresgnaka@gmail.com"
] | andresgnaka@gmail.com |
1f786420de05855c785891cf8cc9c97b9fe8f4b5 | 15dc6e99237a5efd29a4f3ad70de562f1bf898a2 | /doc_forum/accounts/views.py | 6c48b802f1641e5fa2812fcf3f8f562012c2e8ae | [] | no_license | krisleon99/geo_forum | 7fa5e5f519eb63668d259472111db6702bdd9f27 | 8c84850969dc825f6ab871f076fbababce4f3e87 | refs/heads/master | 2020-04-09T15:18:57.725034 | 2019-01-11T19:07:02 | 2019-01-11T19:07:02 | 160,422,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
def denied_access(request):
usuario = request.user
return render_to_response('denied_acces.html', {'usuario':usuario, 'SITE_URL': Site.objects.get_current()}, context_instance=RequestContext(request))
| [
"chokokriss97@gmail.com"
] | chokokriss97@gmail.com |
5a86a3f8a354ef477fb3908e51fac8dc98a6ad73 | 2a7a74f42670ba916e87a4b62bc46b32db8ef522 | /python/mxnet/gluon/contrib/rnn/__init__.py | f27f20540bd0a59f568e667e65fc0fad9d6b50e6 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Views",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sergeykolychev/mxnet | 30eeb933156d509c99e2c0cde6c06fb7f2ed86c0 | d6290ad647249d0329125730f6544ce5a4192062 | refs/heads/master | 2020-12-31T07:00:06.159000 | 2017-09-25T00:28:04 | 2017-09-25T00:28:04 | 80,577,235 | 7 | 0 | null | 2017-09-08T01:32:22 | 2017-02-01T00:44:23 | Python | UTF-8 | Python | false | false | 913 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import
"""Contrib recurrent neural network module."""
from .conv_rnn_cell import *
| [
"muli@cs.cmu.edu"
] | muli@cs.cmu.edu |
5a0cd057037b8022faae596f834eb4afeb9fff0c | 514fc8a2a0da69df38f224cb30db63f9d0f6f4b2 | /entities/mm_pl_word_characteristic.py | 85a0422bd04646fd0df97f669bb227a054e83d16 | [] | no_license | TaTRaTa/RaliProject | 6fbbd24752cfd14d1cfe9ca83394cfc9ac37e903 | dce9234248b0d048fb56749aae3f7cf7586c6dcb | refs/heads/master | 2020-03-20T20:10:46.847413 | 2018-06-17T18:21:29 | 2018-06-17T18:21:29 | 137,673,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | '''
Table: mm_pl_word_characteristic
Description: Characteristics of the polish words
Field Type Null Default Comments
id_pl_word int(11) No Foreign key to “pl_word”
id_characteristic int(3) No Foreign key to “characteristic”
'''
class MmPlWordCharacteristic:
def __init__(self):
self.__id_pl_word = None
self.__id_characteristic = None
@property
def id_pl_word(self):
return self.__id_pl_word
@id_pl_word.setter
def id_pl_word(self, value):
if self.__id_pl_word is None:
self.__id_pl_word = value
else:
raise Exception('Already has value')
@property
def id_characteristic(self):
return self.__id_characteristic
@id_characteristic.setter
def id_characteristic(self, value):
if self.__id_characteristic is None:
self.__id_characteristic = value
else:
raise Exception('Already has value')
| [
"cvetomir.defyy@gmail.com"
] | cvetomir.defyy@gmail.com |
54d2b8e77193335207f1cbd33bc7b9284e2cc2f4 | ee2230be3f5579a95495f0bbdb768502b67442ad | /ec2mc/AmazonUtils.py | 153884721d8395ff36c2a14b4152623331558dd5 | [] | no_license | exatoa/ec2mc | 508b1d832e9b8f24d338e8a2a1daa238c32e646c | c0ece797958e191c368cdc36ddd225bc285852e5 | refs/heads/master | 2020-06-11T19:47:35.777651 | 2016-12-06T15:52:34 | 2016-12-06T15:52:34 | 75,625,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,003 | py | #-*- coding: utf-8 -*-
'''
Created on 2016. 12. 03
Updated on 2016. 12. 03
@author: Zeck
'''
from __future__ import print_function
import boto3
import os
from botocore.exceptions import ClientError
from EC2 import EC2
from AmazonInfo import *
class AmazonUtils:
'''
Amazon Web Service control utilities
This class works in boto3
'''
accounts = None
keypairPath = None
def __init__(self, _accounts, _keyset):
self.accounts = _accounts
self.keypairPath = _keyset
if os.path.exists(self.keypairPath) is False:
os.makedirs(self.keypairPath)
pass
def get_session(self, _user, _region):
session = boto3.session.Session(
aws_access_key_id=self.accounts[_user]['access_key'],
aws_secret_access_key=self.accounts[_user]['secret_key'],
region_name=_region
)
return session
#################################################################
# security_group
#################################################################
def create_security_group(self, _user, _region, _security_group):
'''
Create security_group in amazon account at specific region
:param _user: account identity existing in settings.py
:param _region: region that specified by user
:param _security_group: dictionary describing security_group
ex) {'name':'', 'desc':'', 'rules':[
{'cidr':'0.0.0.0/0, 'protocol':'tcp', 'from_port':80, 'to_port':80},
...
]
}
:return:
'''
#Check already exists Security Group
session = self.get_session(_user, _region)
ec2_client = session.client('ec2')
response = ec2_client.describe_security_groups()
groups = response['SecurityGroups']
for remote_group in groups:
if _security_group['name'] == remote_group['GroupName']:
return remote_group['GroupId']
#make new SGID
response = ec2_client.create_security_group(GroupName=_security_group['name'],
Description=_security_group['desc'])
for rule in _security_group['rules']:
ec2_client.authorize_security_group_ingress(GroupId=response['GroupId'],
IpProtocol=rule['protocol'],
CidrIp=rule['cidr'],
FromPort=rule['from_port'],
ToPort=rule['to_port'])
return response['GroupId']
def update_security_group(self, _user, _region, _security_group):
'''
update security_group
:param _user: account identity existing in settings.py
:param _region: region that specified by user
:param _security_group: dictionary describing security_group
ex) {'name':'', 'desc':'', 'rules':[
{'cidr':'0.0.0.0/0, 'protocol':'tcp', 'from_port':80, 'to_port':80},
...
]
}
:return: security group ID
'''
# Check already exists Security Group
session = self.get_session(_user, _region)
ec2_client = session.client('ec2')
result = ec2_client.describe_security_groups(GroupNames=[_security_group['name'],])
if len(result['SecurityGroups'])==0:
return False
response = result['SecurityGroups'][0]
# delete previous rules
ec2_client.revoke_security_group_ingress(IpPermissions=response['IpPermissions'])
# create new rules
for rule in _security_group['rules']:
ec2_client.authorize_security_group_ingress(GroupId=response['GroupId'],
IpProtocol=rule['protocol'],
CidrIp=rule['cidr'],
FromPort=rule['from_port'],
ToPort=rule['to_port'])
return response['GroupId']
def delete_security_group(self, _user, _region, _groupID):
'''
Delete a specific security_group
:param _user: account identity existing in settings.py
:param _region: region that specified by user
:param _groupID: security group ID
:return: None
'''
session = self.get_session(_user, _region)
ec2 = session.client('ec2')
ec2.delete_security_group(GroupId=_groupID)
pass
def search_security_group(self, _user, _region, _security_group_nme):
IDs = []
session = self.get_session(_user, _region)
ec2_client = session.client('ec2')
try:
response = ec2_client.describe_security_groups(GroupNames=[_security_group_nme])
for group in response['SecurityGroups']:
IDs.append(group['GroupId'])
except ClientError, e:
return IDs
return IDs
#################################################################
# key_pair
#################################################################
def create_key_pair(self, _user, _region, _keyName):
'''
Create new key pair in specific region in user
If the key pair is exists, returns exist key name.
:param _user: Account identity existing in settings.py
:param _region: Region that specified by user
:param _keyName: key pair name
:return:
'''
flag = False
session = self.get_session(_user, _region)
ec2_client = session.client('ec2')
response = ec2_client.describe_key_pairs()
key_pairs = response['KeyPairs']
for key_pair in key_pairs:
if _keyName == key_pair['KeyName']:
flag = True
if flag is False:
response = ec2_client.create_key_pair(KeyName=_keyName)
_keyName = response['KeyName']
f = open(os.path.join(self.keypairPath, _keyName + '.pem'), 'w')
f.write(response['KeyMaterial'])
f.close()
return _keyName
def delete_key_pair(self, _user, _region, _keyName):
'''
Delete key pair amazon server and local
:param _user:
:param _region:
:param _keyName:
:return:
'''
session = self.get_session(_user, _region)
ec2_client = session.client('ec2')
ret = ec2_client.delete_key_pair(KeyName=_keyName) #_user + u'-' + _region + u'.pem')
if ret['ResponseMetadata']['HTTPStatusCode']==200:
#delete file also
fname = os.path.join(self.keypairPath, _keyName + '.pem')
if os.path.exists(fname) is True:
os.remove(fname)
else:
return False
return True
#################################################################
# ec2 manage
#################################################################
def check_ec2_ready(self, _user, _region, _instanceID):
'''
After the instance created, check the instance's state whether it is ready to connect or not
:param _user: user account identity existing in settings.py
:param _region: region name
:param _instanceID: specific instance's ID
:return: boolean
'''
session = self.get_session(_user, _region)
ec2 = session.resource('ec2')
for status in ec2.meta.client.describe_instance_status()['InstanceStatuses']:
if status['InstanceId'] != _instanceID: continue
if (status['InstanceStatus']['Status']=='ok' and
status['SystemStatus']['Status']=='ok' and
status['InstanceState']['Name'] == 'running'):
return True
return False
def check_ec2_terminated(self, _user, _region, _instanceID):
'''
After the instance created, check the instance's state whether it is ready to connect or not
:param _user: user account identity existing in settings.py
:param _region: region name
:param _instanceID: specific instance's ID
:return: boolean
'''
session = self.get_session(_user, _region)
ec2 = session.resource('ec2')
for status in ec2.meta.client.describe_instance_status()['InstanceStatuses']:
if status['InstanceId'] != _instanceID: continue
if status['InstanceState']['Name'] == 'terminated':
return True
return False
def get_instances(self, _user, _region, _name):
'''
Returns instances matching instance name
:param _user: user account identity existing in settings.py
:param _region: region name
:param _name: specific instance's name tag
:return: the list of EC2() objects
'''
session = self.get_session(_user, _region)
ec2 = session.resource('ec2')
filters = [
{'Name': 'tag-value', 'Values': [_name]},
#{'Name': 'instance-state-name', 'Values': ['running']}
]
instances = ec2.instances.filter(Filters=filters)
results = []
for instance in instances:
results.append(EC2(instance))
return results
def get_instance(self, _user, _region, _instanceID):
'''
Returns instance corresponding instance ID
:param _user: user account identity existing in settings.py
:param _region: region name
:param _instanceID: specific instance's ID
:return: EC2() object
'''
session = self.get_session(_user, _region)
ec2 = session.resource('ec2')
return EC2(ec2.Instance(_instanceID))
def get_all_instances(self, _user, _region):
'''
Returns EC2 objects corresponding user and region
:param _user: user account identity existing in settings.py
:param _region: region name
:return: EC2() object
'''
session = self.get_session(_user, _region)
ec2 = session.resource('ec2')
results = []
for inst in ec2.instances.all():
results.append(EC2(inst))
return results
def get_instance_counts(self, _user, _onlyWorking=False):
'''
Returns all instances count corresponding user in any regions
:param _user: user account identity existing in settings.py
:return: the number of count (int)
'''
count = 0
for region in REGIONS.keys():
session = self.get_session(_user, region)
ec2 = session.resource('ec2')
for instance in ec2.instances.all():
if _onlyWorking is True:
if instance.state['Name'] == 'terminated': continue
count += 1
return count
def get_instances_state(self, _user, _region, _filter_name=None, _filter_state=None):
'''
Getting infomations in specific user, region, and filters
:param _user: account identity existing in settings.py
:param _region: region that specified by user
:param _filter_name: instance name tag to filter
:param _filter_state: instance state to filter ['running', 'terminated', 'working', ...]
'working' means not 'terminated' appended by me, this state is not in amazon
:return: the list of EC2() objects
'''
session = self.get_session(_user, _region)
ec2 = session.resource('ec2')
filters = []
if _filter_name is not None:
filters.append({'Name':'tag:Name', 'Values':[_filter_name]})
if _filter_state is not None:
if _filter_state == 'working':
values = ['pending','running','shutting-down','stopping','stopped']
else: values = [_filter_state]
filters.append({'Name':'instance-state-name', 'Values':values})
#getting instances
results = []
instances = ec2.instances.filter(Filters=filters)
for instance in instances:
results.append(EC2(instance))
return results
#################################################################
# ec2 command
#################################################################
def create_ec2(self, _name, _user, _region, _os_type=u'ubuntu', _instance_type=None, _security_group=None):
'''
Create EC2 instance in amazon account at specific region
:param _name: Instance name
:param _user: user account identity existing in settings.py
:param _region: Region that specified by user
:param _os_type: OS type be installed in instance
:param _instance_type: instance type, 't2.micro' is default.
:param _security_group: dictionary describing security_group
ex) {'name':'', 'desc':'', 'rules':[
{'cidr':'0.0.0.0/0, 'protocol':'tcp', 'from_port':80, 'to_port':80},
...
]
}
:return: the dictionary of results
{"status": "error" or "success",
"message":"description of status",
"instance": EC2() object if this function success}
'''
if _os_type not in AMI:
return {"status": "error", "message": u'Unknown OS Type.'}
if _instance_type is not None and _instance_type not in INSTANCE_TYPE:
return {"status": "error", "message": u'Unknown Instance Type.'}
targetAMI = AMI[_os_type][_region]
instanceType = _instance_type if _instance_type is not None else 't2.micro'
SGID = self.create_security_group(_user, _region, _security_group)
keyName = self.create_key_pair(_user, _region, _keyName=_user + u'-' + _region)
try:
session = self.get_session(_user, _region)
ec2 = session.resource('ec2')
data = ec2.create_instances(ImageId=targetAMI,
MinCount=1,
MaxCount=1,
InstanceType=instanceType,
SecurityGroupIds=[SGID],
KeyName=keyName)
if data is None or len(data) ==0:
return {"status": "error", "message": u'Unknown error! There is no result!'}
# set tag name
ec2.create_tags(Resources=[data[0].id], Tags=[{'Key': 'Name', 'Value': _name}])
instance = EC2(data[0])
instance.Name = _name
except ClientError as e:
return {"status":"error", "message": e.message}
return {"status": "success", "message": "Created instance", "instance": instance}
def delete_ec2(self, _user, _region, _instances):
'''
Terminate EC2 instances correspoding specific user, region
:param _user: user account identity existing in settings.py
:param _region: region name
:param _instances: the list of instances ID
:return: the number of count of terminated instances
'''
if len(_instances)<=0: return None
session = self.get_session(_user, _region)
ec2_client = session.client('ec2')
response = ec2_client.terminate_instances(InstanceIds=_instances)
return len(response['TerminatingInstances'])
| [
"exatoa@gmail.com"
] | exatoa@gmail.com |
7a78098ffbc27deea4221109dbc973f998f30cc9 | b7db8095750d1433498bb87453e7faf7531d564e | /Samify/music/urls.py | d790ee2b207c596c23b204469fe72d5ee5453905 | [] | no_license | Sambhramjain/SAMIFY | 3d2915421c422595898dd46688091afe88078d67 | 415a11d06d37fbbd487de5beaac5109a0f663e61 | refs/heads/master | 2020-09-11T11:42:24.554034 | 2019-11-16T06:10:03 | 2019-11-16T06:10:03 | 222,052,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | from django.conf.urls import url
from . import views
app_name = 'music'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^register/$', views.register, name='register'),
url(r'^login_user/$', views.login_user, name='login_user'),
url(r'^logout_user/$', views.logout_user, name='logout_user'),
url(r'^(?P<album_id>[0-9]+)/$', views.detail, name='detail'),
url(r'^(?P<song_id>[0-9]+)/favorite/$', views.favorite, name='favorite'),
url(r'^songs/(?P<filter_by>[a-zA_Z]+)/$', views.songs, name='songs'),
url(r'^create_album/$', views.create_album, name='create_album'),
url(r'^(?P<album_id>[0-9]+)/create_song/$', views.create_song, name='create_song'),
url(r'^(?P<album_id>[0-9]+)/delete_song/(?P<song_id>[0-9]+)/$', views.delete_song, name='delete_song'),
url(r'^(?P<album_id>[0-9]+)/favorite_album/$', views.favorite_album, name='favorite_album'),
url(r'^(?P<album_id>[0-9]+)/delete_album/$', views.delete_album, name='delete_album'),
]
| [
"noreply@github.com"
] | Sambhramjain.noreply@github.com |
cf50250d8ef3adadc370a28b4e97588d22adf4a9 | 8898273f9811fab29eb5621734bafcdf204d8229 | /scipy-stubs/special/_precompute/expn_asy.pyi | 61ecaf6d73b6d529e4f36b9d6019a65c5721a799 | [] | no_license | tyrion/scipy-stubs | 628ad6321a7e1502683a2b55a759777508ab4b67 | bf49a91313523c4f635bc3e5d14444c1361caf64 | refs/heads/master | 2020-05-30T21:59:43.001510 | 2019-06-03T10:30:54 | 2019-06-03T10:30:54 | 189,984,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | pyi | # Stubs for scipy.special._precompute.expn_asy (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
x: Any
def generate_A(K: Any): ...
WARNING: str
def main() -> None: ...
| [
"germano.gabbianelli@contentwise.tv"
] | germano.gabbianelli@contentwise.tv |
ae6aa22fda169aedfd0469dd32c4d41c9c318bff | 2feb48a31c0091c70d27361ad28852a1b14a29c9 | /cv_anim.py | 756fbd9e3dcf5d6bedf8235cdc3c701a4feb8d7e | [] | no_license | VrutikShah/quad-control | 3ef826758b1561464dfb98a076fef1a146e55933 | 94ff09f7deb7c7c6e7acad2fe3a9bb7f80128885 | refs/heads/master | 2023-04-28T16:25:59.059958 | 2021-05-12T18:31:33 | 2021-05-12T18:31:33 | 365,521,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,904 | py |
import numpy
import random
import cv2
import sys
def move_randomly(x,y,wind=None):
if wind == None:
wind = [0,0,0,0,0,0,0,0]
ox = random.randint(-wind[1]-1,wind[1]+1)
oy = random.randint(-wind[3]-1,wind[3]+1)
clr1=random.randint(0,150) ###########> r color
clr2=random.randint(130,256) ###########> g color
clr3=random.randint(150,250) ###########> b color
x1=x2=x
x1=y2=y
if map_matrix[y2][x2] == 4 :
return [x2 , y2]
if (x2>(wind[4]) and x2<(wind[5])) and (y2 >(wind[6]) and y2<(wind[7])) :
x1 = x2 +wind[0]+ox
y1 = y2 +wind[2]+oy
else:
x1 = x2 + ox
y1 = y2 + oy
if x1 in range(1 ,dim_x) and y1 in range(1,dim_y) and map_matrix[y1][x1] !=1 and map_matrix[y1][x1] !=3 and map_matrix[y1][x1] !=4:
cv2.circle(Fra, (x2,y2),worm_size ,(0,0,0),-1)
map_matrix[y2][x2] = 0
cv2.circle(Fra, (x1,y1),worm_size ,(clr1,clr2,clr3),-1)
map_matrix[y1][x1] = 1
return [x1 , y1]
return [x2 , y2]
elif x1 <= dim_x and x1 >= 0 and y1 <= dim_y and y1 >= 0 and map_matrix[y1][x1] !=1 and map_matrix[y1][x1] !=3 and map_matrix[y1][x1] ==4 :
cv2.circle(Fra, (x2,y2),worm_size ,(50,50,255),-1)
map_matrix[y2][x2] = 4
return [x2 , y2]
else :
cv2.circle(Fra, (x2,y2),worm_size ,(clr1,clr2,clr3),-1)
map_matrix[y2][x2] = 1
return [x2 , y2]
def print_inthebox(text_box, box_count ,y_box,x_box):
show_text = text_box+str(box_count)
cv2.putText(Fra,show_text,(y_box,x_box), font, 0.4, (0,0,0), 1, cv2.LINE_AA)
def trap_box(x1,x2,y1,y2):
for i in range(x1 ,x2):
for t in range(y1 ,y2):
cv2.circle(Fra,(i,t),0,(33,33,130),-1)
map_matrix[t][i]=4
return abs((y2-y1)*(x2-x1))
def obstecle_box(x1,x2 ,y1 ,y2):
for i in range(x1 ,x2):
for t in range(y1 ,y2):
cv2.circle(Fra,(i,t),0,(85,50,10),-1)
map_matrix[t][i]=3
def cell_generator(y):
for i in range(worm_number):
worm = [random.randint(offset_position_begin_x,offset_position_end_x),random.randint(offset_position_begin_y,offset_position_end_y)]
worms.append(worm)
while worm in worms and y <0 :
worm = move_randomly(worms[i][0],worms[i][1])
y = y -1
return y
dim_x = 1540
dim_y = 750
worm_size = 0
worm_number = 10
number_group = 500
offset_position_begin_x = 700
offset_position_begin_y = 380
offset_position_end_x = 752
offset_position_end_y = 432
activation_obstacles = False
activation_trap = False
info_box = True
direction_change=False
vortex = True
v_sp = 5
v_jk = 3
Fra = numpy.zeros((dim_y, dim_x, 4),numpy.uint8)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.namedWindow('Lotfi Leviaton worm simulation Dz ------------- Version-2.4 ------------------------ I hope !', cv2.WINDOW_NORMAL)
cv2.rectangle(Fra,(0,0),(dim_x+1,dim_y+1),(93,89,87),-1)
map_matrix = numpy.full((dim_y+1,dim_x+1),2)
print(sys.getsizeof(map_matrix) , 1 in map_matrix)
worms =[]
worm = []
count_iteration = 0
worms_max_exided_number = (offset_position_end_y-offset_position_begin_y)*(offset_position_end_x-offset_position_begin_x)
print('maximum wors =', worms_max_exided_number,'worm_number', worm_number,'len(worms)', len(worms))
vx= round(((offset_position_end_x- offset_position_end_x)/2) + offset_position_begin_x)
vy =round(((offset_position_end_y- offset_position_end_y)/2) + offset_position_begin_y)
if vortex == True :
wind_effect =[[-v_sp,v_jk,v_sp,v_jk,0,vx,0,vy],[v_sp,v_jk,v_sp,v_jk,0,vx,vy,dim_y+1]
,[-v_sp,v_jk,-v_sp,v_jk,vx,dim_x+1,0,vy],[v_sp,v_jk,-v_sp,v_jk,vx,dim_x+1,vy,dim_y+1]]
else :
wind_effect =[[0,5,10,5,0,1540,0,750]]
if activation_obstacles == True :
obstecle_box(800,890,0,751)
obstecle_box(800,820,160,245)
obstecle_box(800,820,255,600)
obstecle_box(550, 600, 500, 550)
obstecle_box(0, 800, 550, 751)
trap1 =trap2 =trap3= trap4 =0
if activation_trap == True :
trap1 =trap_box(400,420,200,220)
#trap2 =trap_box(800,820,140,160)
#trap3 = trap_box(800,820,180,300)
#trap4 =trap_box( 730,780,700,750)
if info_box == True:
obstecle_box(0, 255, 0, 100)
while worms_max_exided_number-worm_number >=0 :
cv2.imshow('Lotfi Leviaton worm simulation Dz ------------- Final Version------------------------- I hope !', Fra)
count_iteration += 1
if count_iteration%30 == 0 and direction_change==True and count_iteration>300 :
wind_effect=[[random.randint(-10,10),random.randint(10,15),random.randint(-10,10),random.randint(10,15),0,1540,0,751]]
if count_iteration%5 == 0 and number_group >0:
worms_max_exided_number=cell_generator(worms_max_exided_number)
number_group -=1
for t in wind_effect:
for i , worm in enumerate(worms) :
worms[i] = move_randomly(worms[i][0],worms[i][1],t)
if info_box == True :
cv2.rectangle(Fra,(10,5),(240,95),(200,200,200),-1)
total_traps =trap1+trap2+trap3+trap4
print_inthebox(' Total number of cells = ',(map_matrix==1).sum(),10,20)
print_inthebox(' Total of dead cells = ', (map_matrix==4).sum()-total_traps ,10,50)
print_inthebox(' Number of iteration = ',count_iteration,10,80)
k = cv2.waitKey(10)
if k == 27:
print((map_matrix==1).sum()-(map_matrix==4).sum()+total_traps ,worms_max_exided_number, count_iteration)
break
cv2.destroyAllWindows() | [
"44443648+VrutikShah@users.noreply.github.com"
] | 44443648+VrutikShah@users.noreply.github.com |
0d32c0b115941112465bddac00f22ba07aac683b | af5a440251bdf7b44cd99278f1de3c1c0c37c81b | /Version3.1/modista/tienda.py | 1f7bfdd594c745e247b7174b10d15c33b258adc7 | [] | no_license | Kvedulfr/Odoo_Project | 5e7f817f60fb0c78327d02d04cf4aeff245abcb2 | d2ed4adabc8d4a02ed8d974be9e695ab8eb7b978 | refs/heads/master | 2021-03-29T08:54:56.968903 | 2020-05-19T11:06:05 | 2020-05-19T11:06:05 | 247,939,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,210 | py | #-*- encoding: utf-8 -*-
from odoo import time, models, fields, api
#Import para controlar Validar los campos
from odoo.exceptions import UserError, AccessError, ValidationError
class cliente(models.Model):
#Nombre que vamos a usar para referirnos a nuestra clase en las vistas
_name= 'modista.cliente'
#Sirve para indicar que campo se va a visualizar en el menu de many2one (display)
_rec_name='nombre'
_sql_constraints = [
('telefono_unique',
'unique(telefono)',
"El telefono ya existe, comprueba que el cliente no lo haya insertado antes.")]
#Nombre de los atributos y con el atributo string le asignamos el valor "visual"
nombre= fields.Char(string='Nombre Cliente', required=True)
telefono=fields.Integer(string='Teléfono',required=True, help="Si no te dan telefono puedes ponerlo en 0")
#Enlace de los atributos de fields https://odoo-new-api-guide-line.readthedocs.io/en/latest/fields.html
@api.multi
def limpiar_registros(self):
self.nombre = ""
self.telefono = ""
#Funcion para comprobar que el campo es correcto
@api.constrains('telefono')
def _check_number(self):
telefono = self.telefono
if telefono and len(str(abs(telefono))) != 9:
raise ValidationError('Has insertado un numero de telefono invalido tiene: '+telefono)
class pedido(models.Model):
#Nombre que vamos a usar para referirnos a nuestra clase en las vistas
_name= 'modista.pedido'
_sql_constraints = [
('fecha_pago_check',
'CHECK(fecha_pago >= fecha_p)',
"La fecha de pago no puede ser menor que del pedido"),
('fecha_r_check',
'CHECK(fecha_r >= fecha_p)',
"La fecha de recogida no puede ser menor que del pedido")]
t_pedido= fields.Selection(string='Tipo pedido',selection=[('1','Arreglo'),('2','Cofeccion a Medida'),('3','Traje Sevillana')], required=True)
fecha_p= fields.Date(string='Fecha de pedido',required=True)
fecha_pago= fields.Date(string='Fecha de pago')
fecha_r= fields.Date(string='Fecha de recogida')
pagado=fields.Selection(string='¿Pagado?',selection=[('1','Pagado'),('2','No Pagado')], required=True)
#pagado=fields.Boolean(string='¿Pagado?')
#cliente=fields.Many2one("modista.cliente",string="Cliente")
cliente=fields.Many2one('modista.cliente','Cliente',required="true")
precio= fields.Float(string="Precio", group_operator="sum")
descripcion= fields.Text('Descripcion de Pedido', required=True)
image= fields.Binary('Imagen')
#Enlace de los atributos de fields https://odoo-new-api-guide-line.readthedocs.io/en/latest/fields.html
@api.multi
def limpiar_registros(self): #Atributos comentados no pueden ser nulos
self.fecha_pago = None
self.fecha_p = time.strftime("%Y-%m-%d")
self.fecha_r = None
self.precio = 0
self.descripcion = ""
#self.cliente=""
#Funcion para comprobar que el campo es correcto
@api.constrains('precio')
def _check_price(self):
if self.precio < 0:
raise ValidationError('Has insertado un valor negativo por lo tanto no es valido')
def fecha_actual(self): #No le doy uso
self.fecha = time.strftime("%Y-%m-%d")
class gastos(models.Model):
_name= 'modista.gastos'
nombre_p=fields.Char(string="Producto", required=True)
cantidad=fields.Integer(string="Cantidad",required=True)
coste=fields.Float(string="Precio", group_operator="sum")
descripcion=fields.Text(string="Comentario")
fecha_c=fields.Date(string="Fecha de compra")
#Boton para limpiar el registro
@api.multi
def limpiar_registros(self): #Atributos comentados no pueden ser nulos
#self.t_pedido = ""
self.nombre_p = ""
self.cantidad = 0
self.coste = 0
self.fecha_c = time.strftime("%Y-%m-%d")
self.descripcion = ""
#Comprobamos que el coste no es negativa
@api.constrains("coste")
def _check_coste(self):
if self.coste < 0:
raise ValidationError("No se puede insertar un coste negativa")
#Comprobamos que no se pueda insertar una cantidad negativa
@api.constrains('cantidad')
def _check_stock(self):
if self.cantidad < 0:
raise ValidationError('No puedes insertar una cantidad negativa')
| [
"franciscojose.munoz.exposito@iesjandula.es"
] | franciscojose.munoz.exposito@iesjandula.es |
aef4ea9e63f604ed4ff4c241686e1e8c877848cc | 45a8475c0b961948689ba3796368b1750f391ef9 | /errors.py | 4bb36553c41b0738db0bc940d4d2e73c779071f4 | [] | no_license | szaxar/kompilatory | 014b02193587745a75b3bbae0902ff0739f92b8a | cf3109f5ed5bdb83f8fa6ba2d241926319f2129c | refs/heads/master | 2020-03-20T15:28:13.603685 | 2018-06-12T00:26:13 | 2018-06-12T00:26:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | class TypeException(Exception):
def __init__(self, position, reason):
self.position = position
self.reason = reason
def __repr__(self):
return '{}: {} at line {}'.format(self.__class__.__name__, self.reason, self.position)
__str__ = __repr__
class MatrixDimensionsError(TypeException):
def __init__(self, position):
super().__init__(position, 'Matrix needs to have rows of equal length')
class ArithmeticOperationError(TypeException):
pass
class MatrixAccessError(TypeException):
pass
class InvalidNameError(TypeException):
def __init__(self, name, position):
super().__init__(position, '{} undefined'.format(name))
self.name = name
class InvalidRangeError(TypeException):
def __init__(self, position, addition):
super().__init__(position, 'Invalid range ' + str(addition))
class StatementNotAllowetOutsideLoopError(TypeException):
def __init__(self, statement_name, position):
super().__init__(position, 'Statement {} not allowed outside of loop'.format(statement_name))
class ArgumentError(TypeException):
pass
| [
"majron15@gmail.com"
] | majron15@gmail.com |
5c09abb2ec9acd0688c19182dae38e4aa1c57115 | 57b36a2bc74220ebc6af3fcb3a1909edfa53ec9d | /xuexiqiangguo/tasks/others.py | c84adaea8b6aea87adfb2966fe881b2dcf5f05b6 | [] | no_license | lilunjiaax/JvlunlTest | 066324c88be23b493560588d4d7e1ffd7d9a907d | ad8251b26fcc4dd872df1396ba0db50509cd623c | refs/heads/master | 2022-12-06T00:09:54.267633 | 2020-02-09T12:53:08 | 2020-02-09T12:53:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | import time
import os
import subprocess
"""
模拟点击位置:
import os
os.system('adb shell input tap 100 100');
实现截图判断:
import os
os.system("adb shell /system/bin/screencap -p /sdcard/4.png")
0
将截图保存到电脑:
os.system("adb pull /sdcard/4.png d:phone_file/5.png")
/sdcard/4.png: 1 file pulled. 12.3 MB/s (496759 bytes in 0.038s)
0
滑屏移动:每一个栏的像素是 :328
>>> os.system("adb shell input swipe 200 828 200 500 1000")
200 565 200 400
800 635
"""
| [
"jvlunl@163.com"
] | jvlunl@163.com |
727caf6baa73d7e0a199df4bb555b86a4fd71a1e | badbb9313843d9f1888416f23d18ce698e51fd4e | /ex30.py | fe4a020510d4434f483a532168624436b3680aee | [] | no_license | sund888/hello-world | 31b70d159e651112aad0d7c92584b67747bbbdbc | 7ef3f92a6f842d97a4e29fe4aac252489d82737c | refs/heads/master | 2020-12-24T13:20:19.188668 | 2016-05-06T20:19:14 | 2016-05-06T20:19:14 | 37,608,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | people = 30
cars = 40
buses = 15
if cars > people:
print "We should take the cars."
elif cars < people:
print "We should not take the cars."
else:
print "We can't decide."
if buses > cars:
print "That too many buses."
elif buses < cars:
print "Maybe we could take the buses."
else:
print "We still can't decide."
if people > buses:
print "Alright, let's just take the buses."
else:
print "Fine, let's stay home then."
| [
"SDang@playnetwork.com"
] | SDang@playnetwork.com |
aad39fc7e11a137cab21efc908a1d657c84ef517 | 4584f968518a301716d42f229ce345307d87b852 | /train/Train.py | 21127f47c60c162ca125b1b0dd37400f7f068f23 | [] | no_license | celisun/Playing_atari_with_Reinforcement_Learning | f951dfc46229260853b86628fb309cbd13a46a82 | 18e400ba958dd6f2edfde3b752aa6fd499bc7b46 | refs/heads/master | 2021-09-10T10:42:50.679935 | 2018-03-24T17:17:26 | 2018-03-24T17:17:26 | 108,890,622 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,860 | py | import gym
from gym import envs
from gym import wrappers
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from Actor import Actor
from Critic import Critic
from SumTreeMemoryBuffer import SumTreeMemoryBuffer
from collections import namedtuple
from itertools import count
Transition = namedtuple('Transition',
('state', 'action', 'reward', 'next_state'))
# set up matplotlib
is_ipython = 'inline' in matplotlib.get_backend()
if is_ipython:
from IPython import display
plt.ion()
# Train ACER-p, prioritized ACER
def start_p(GAME_NAME, BATCH_SIZE=32, MEMORY_CAPACITY=50000):
env = gym.make(GAME_NAME)
actor = Actor(env.observation_space, env.action_space)
critic = Critic(env.observation_space, env.action_space)
reward_per_epi=[]
durations_per_epi=[]
l_A=[]
l_C=[]
MAX_EPISODE = 150
RENDER = False
MAX_EP_STEPS= 1000
DISPLAY_REWARD_THRESHOLD=200
BATCH_SIZE=BATCH_SIZE
MEMORY_CAPACITY=MEMORY_CAPACITY
replay_memory = SumTreeMemoryBuffer(MEMORY_CAPACITY)
print "begin.\n\n"
for i_episode in range(MAX_EPISODE):
s = env.reset()
track_r = []
critic._v_=[] # clean critic loss buffer
actor._loss_=[] # clean actor loss buffer
for t in count():
if RENDER: env.render()
a = actor.choose_action(s)
s_, r, done, info = env.step(a)
if done: r = -20 # Penalty if die
track_r.append(r)
# ACER: Critic Actor with Experience Replay
if not done:
transition = np.hstack((s, a, r, s_))
replay_memory.save(transition) # Save non-final transition
if len(replay_memory) >= MEMORY_CAPACITY:
tree_idx, batch, ISWeights = replay_memory.sample(BATCH_SIZE) # Sample from memory
s_b = np.asarray(batch[-1,0:8]) # state
s_b_n = np.asarray(batch[-1,10:18]) # next state
a_b = np.asarray(batch[-1,8]) # action
r_b = np.asarray(batch[-1,9]) # reward
td_error, abs_error = critic.learn(s_b, r_b, s_b_n, ISWeights) # Critic Learn
replay_memory.batch_update(tree_idx, abs_error) # Update priority
actor.learn(s_b, a_b, td_error) # Actor Learn
s = s_
print "... in episode (%d) step (%d)" % (i_episode+1,t)
if is_ipython:
display.clear_output(wait=True)
display.display(plt.gcf())
#env.render() # display game window
if done or t >= MAX_EP_STEPS:
ep_rs_sum = sum(track_r)/float(t)
if 'running_reward' not in globals():
running_reward = ep_rs_sum
else:
running_reward = running_reward * 0.95 + ep_rs_sum * 0.05
if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True # rendering
reward_per_epi.append(running_reward)
durations_per_epi.append(t)
l_A.append(np.mean(actor._loss_))
l_C.append(np.mean(critic._loss_))
print("episode:", i_episode, " reward:", running_reward)
#plot(reward_per_epi, durations_per_epi, l_A, l_C)
break
return reward_per_epi, durations_per_epi, l_A, l_C
def plot (x,d,la,lc):
fig = plt.figure(figsize=(12,12))
plt.clf()
ax1 = fig.add_subplot(2,2,1)
ax2 = fig.add_subplot(2,2,2)
ax3 = fig.add_subplot(2,2,3)
ax4 = fig.add_subplot(2,2,4)
ax1.plot(x)
ax2.plot(d)
ax3.plot(la)
ax4.plot(lc)
ax1.set_title('REWARD - Training LunarLander-v2')
ax1.set_ylabel('Reward per Episode')
ax2.set_ylabel('Durations per Episode')
ax3.set_ylabel('Actor Loss per Episode')
ax4.set_ylabel('Critic Loss(TDerror) per Episode')
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
display.clear_output(wait=True)
display.display(plt.gcf())
fig.savefig('acerp.png')
print "training done and saved to acerp.png"
def _r_percent_(r, t): # calculate reward percentage
n=len(r)
c=0.
for i in r:
if i>=t:
c += 1
return c/n
# ------ Train -----
r3, d3, l_A3, l_C3 = start_p('LunarLander-v2', BATCH_SIZE=1, MEMORY_CAPACITY=5)
plot(r3, d3, l_A3, l_C3)
print "episode, r > 0: %.01f%s" % (float(_r_percent_(r3, 0)*100), "%")
print "episode, r > -1: %.01f%s" % (float(_r_percent_(r3, -1)*100) , "%")
print "episode, r > -2: %.01f%s" % (float(_r_percent_(r3, -2)*100) , "%")
print "Highest score: %.02f" % max(r3)
print "Highest total score: %.01f" % max([x*y for x, y in zip(r3, d3)])
print "----"
print r3, d3, l_A3, l_C3
| [
"s1676438@sms.ed.ac.uk"
] | s1676438@sms.ed.ac.uk |
7557b31d5f98ea2c2c7f9df591d067658163f0a1 | 3035e6a2b4e5b5662670c188785ed9fad0e1a315 | /Chapter07/example/python/permissions/can_get_all_acc_txs.py | 18c41042f78d6a276b339039ec7df00cbc8a5bdd | [
"MIT"
] | permissive | mahen92/Hyperledger-Cookbook | 52491da47ea7e4b3d988b1303ad4641d89bd3c0e | c2aaf9f9fd58757110a2a6b3ab7498da11fba254 | refs/heads/master | 2021-01-09T15:36:10.368893 | 2020-04-10T18:17:41 | 2020-04-10T18:17:41 | 242,358,174 | 0 | 0 | MIT | 2020-02-22T14:46:54 | 2020-02-22T14:46:53 | null | UTF-8 | Python | false | false | 1,308 | py | #
# Copyright Soramitsu Co., Ltd. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
import iroha
import commons
admin = commons.new_user('admin@first')
alice = commons.new_user('alice@second')
@commons.hex
def genesis_tx():
test_permissions = iroha.RolePermissionSet([iroha.Role_kGetAllAccTxs])
tx = iroha.ModelTransactionBuilder() \
.createdTime(commons.now()) \
.creatorAccountId(admin['id']) \
.addPeer('0.0.0.0:50541', admin['key'].publicKey()) \
.createRole('admin_role', commons.all_permissions()) \
.createRole('test_role', test_permissions) \
.createDomain('first', 'admin_role') \
.createDomain('second', 'test_role') \
.createAccount('admin', 'first', admin['key'].publicKey()) \
.createAccount('alice', 'second', alice['key'].publicKey()) \
.build()
return iroha.ModelProtoTransaction(tx) \
.signAndAddSignature(admin['key']).finish()
@commons.hex
def account_transactions_query():
tx = iroha.ModelQueryBuilder() \
.createdTime(commons.now()) \
.queryCounter(1) \
.creatorAccountId(alice['id']) \
.getAccountTransactions(admin['id']) \
.build()
return iroha.ModelProtoQuery(tx) \
.signAndAddSignature(alice['key']).finish()
| [
"packt.suwarnar@gmail.com"
] | packt.suwarnar@gmail.com |
52f0fa10cb0e0b8baf2d5f5691951006d34e52a4 | 131ad725cef62cffbe5e93f05d458ecd620a3a66 | /Round2_exer2.py | 3e2eee03baabc44a8a770b6d831a6a4f3edda7b5 | [] | no_license | timothyakanni/python_exercises | 022df98340f5d332fb2c4cc2fd04a3b340f6c7e5 | afc3243777e7a677fab9c8118297dfd85a20d616 | refs/heads/master | 2023-06-09T10:09:19.655598 | 2017-11-11T18:11:33 | 2017-11-11T18:11:33 | 104,383,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | # Soduku --- Done
ch_list = []
for ch in "8239574166748215939516542874862" \
"15739539768124217349865762583941345192678198476352":
ch_list.append(ch)
for k in range(3): # created first structure and duplicate 3 times
for i in range(37): #i first line of # only
print("#", end="")
print("")
for m in range(5): # duplicate second line in code below 5 times
for j in range(37): # created second line and use range of 5 above to dupliace 5 times
if (j%12 == 0):
print("#", end="")
elif(j%4 == 0 and m%2 == 0):
print("|", end="")
elif(j%4 == 0 and m%2 != 0):
print("+", end="")
elif (m % 2 != 0):
print("-", end="")
elif (j%2 == 0):
# put the first character in the ch_list and remove the
# character from the list
print(ch_list[0], end="")
del ch_list[0]
else:
print(" ", end="")
print("")
for i in range(37): # last line of # only
print("#", end="")
print() | [
"noreply@github.com"
] | timothyakanni.noreply@github.com |
d804c50328a4fba8062db9b0e169945d259daf97 | 086793e114683d6ccacd6b666d0dbde09d6a43b6 | /common/img_stats.py | 1d0630cbba9f9d99a6a3c7814993cf887b885c5b | [] | no_license | weiliangxie/hierarchical_visual_localisation | 77018d4f08b149c5ae055bc298b3fda67bc1fff4 | 50ecd84ab10274b8bfe9091a9a76965af7300310 | refs/heads/master | 2023-03-15T21:46:45.340388 | 2019-12-19T13:22:19 | 2019-12-19T13:22:19 | 533,205,741 | 1 | 0 | null | 2022-09-06T07:15:27 | 2022-09-06T07:15:27 | null | UTF-8 | Python | false | false | 2,117 | py | import numpy as np
import argparse
import time
from models.cirtorch_utils.genericdataset import ImagesFromList
from evaluate import get_files, time_to_str
parser = argparse.ArgumentParser()
parser.add_argument('--database_dir', default='data/AachenDayNight/images_upright/db', help='Specify where images are stored')
parser.add_argument('--img_format', default='.jpg', help='Image format ending')
parser.add_argument('--save_file', default='data/img_stats.txt', help='Where to store results')
parser.add_argument('--print_chunk', type=float, default=10, help='Print after every completed nth chunk of dataset')
parser.add_argument('--overfit', type=int, default=None, help='Reduce num images for testing')
args = parser.parse_args()
t = time.time()
img_names = get_files(args.database_dir, '*'+args.img_format)
images = ImagesFromList('', img_names)
ns, ms, ss = [], [], []
for i, img in enumerate(images):
#print('img shape: {}'.format(img.size))
if (i % 5) == 0: #(len(images)//args.print_chunk)) == 0:
print('\rCompleted {:4d}/{} images'.format(i, len(images)), end = '')
n_pixel = np.multiply(*img.size)
mean, std = np.mean(img, axis=(0,1)), np.std(img, axis=(0,1))
ns.append(n_pixel)
ms.append(mean)
ss.append(std)
#print('n={}\tm={}\tstd={}'.format(n_pixel, mean, std))
if args.overfit is not None and i > args.overfit:
break
print('')
ns = np.array(ns)
ms = np.stack(ms)
ss = np.stack(ss)
overall_mean = np.average(ms, axis=0, weights=ns)
## formula is based on https://www.researchgate.net/post/How_to_combine_standard_deviations_for_three_groups
os1 = np.dot(ns, ss**2)
os2 = np.dot(ns, (ms - np.mean(ms, axis=0))**2)
overall_var = (os1 + os2)/np.sum(ns).astype(np.float)
overall_std = np.sqrt(overall_var)
print('Overall mean: {}'.format(overall_mean))
print('Overall std: {}'.format(overall_std))
result_store = np.vstack((overall_mean, overall_std))
if args.save_file != 'None':
np.savetxt(args.save_file, result_store)
t = time.time() - t
print('Finished in {}\n({}/image)'.format(time_to_str(t), time_to_str(t/float(len(images)))))
| [
"alex_ziller@arcor.de"
] | alex_ziller@arcor.de |
aa3d939fd2f26dbafa9e15674cfdef5c88993f5c | 9fd48b8ea4260517785aac6742a44561963c7bd8 | /cjapp/urls.py | 1c418a85888f5bdfe7f9f5a6c6eca2688b531af1 | [] | no_license | manavchawla2012/test-109-q-6770d32c402944d39af188b2543b6fc2--django-2.1-p-3.7-in-docker-2 | 7d5a9307c9fcfec829c11c791beee8f6f58252fb | 320b154a7ca57aee9521492238c65d47cc6daab7 | refs/heads/master | 2022-11-24T04:07:19.118465 | 2020-08-01T23:37:03 | 2020-08-01T23:37:03 | 284,342,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | """cjapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.urls import include, path
urlpatterns = [
#url(r'^admin/', admin.site.urls),
path("api/v1/", include("restapi.urls"))
]
| [
"manavchawla2012@gmail.com"
] | manavchawla2012@gmail.com |
049e59ba754b68bc1d3f203412ddeb720e1b87c9 | fced5a1cd9f0b7a86cf47dc61dbf4bd15b1ba569 | /admin_api/google_api/migrations/0004_googleapi_limite.py | 136e21b51f07520ec1cd5f1b2ba5848c2483fecb | [] | no_license | Jujulego/admin | b65bc9a85d8b20ddc797f0eae413cd0bdc3bc774 | 1e063094aa86bb4236f7f21e86f98b289dc15a1a | refs/heads/master | 2022-07-11T20:44:59.674797 | 2019-01-21T19:10:32 | 2019-01-21T19:10:32 | 144,492,938 | 0 | 0 | null | 2021-07-02T23:06:05 | 2018-08-12T18:56:17 | Python | UTF-8 | Python | false | false | 417 | py | # Generated by Django 2.1.5 on 2019-01-08 11:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('google_api', '0003_maillog'),
]
operations = [
migrations.AddField(
model_name='googleapi',
name='limite',
field=models.BigIntegerField(default=0),
preserve_default=False,
),
]
| [
"julien@capellari.net"
] | julien@capellari.net |
d5b5dda3dd8f8ee6cac006a9226e8a1d0c8f87ab | 604babd949f209506b034fae5c7b28bceabb751c | /source/autoticketsys/serializers.py | 81dd450b16769c944c741706f16acf81da71fa17 | [] | no_license | ksssr801/AutoTicketingSystem | 10434cad920dc2d78dee6bbc72113bb05cf7541e | 8e428fe2ec22f70aea2a36563f60a548d0373dbd | refs/heads/master | 2021-07-13T03:48:38.979174 | 2021-06-22T18:26:45 | 2021-06-22T18:26:45 | 224,496,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | from rest_framework import serializers
from autoticketsys.models import ParkingDetails, SlotDetails
class ParkingDetailsSerializer(serializers.ModelSerializer):
class Meta:
model = ParkingDetails
fields = '__all__'
class SlotDetailsSerializer(serializers.ModelSerializer):
class Meta:
model = SlotDetails
fields = '__all__' | [
"krsahas.git@gmail.com"
] | krsahas.git@gmail.com |
84121e7d7b8a8ad3e425e359590d86217a83b2e0 | 4eb5659ea87745215692b863ccc8007e9e6d55dc | /deploy/genbuildnr.py | c33083d8d66b68f64dab4456cfc233fc67d47de8 | [] | no_license | rbianchi66/pyqt_md | a256bbd9f29a519d24398ee215d296780d0e354f | 11d5f06c9e79cc45a0e849fdfedf73004133e792 | refs/heads/master | 2021-01-10T05:40:19.525779 | 2019-01-04T23:13:36 | 2019-01-04T23:13:36 | 44,700,909 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,345 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import os
import sys
import time
from os.path import *
import xml.etree.ElementTree as ET
from subprocess2 import Popen, PIPE
from common import *
def getExternals(path, env=None):
if env is None:
env = os.environ.copy()
env["LC_MESSAGES"] = "C"
svn_status = Popen(["svn", "status", path, "--xml"], stdout=PIPE, env=env).stdout.read()
st = ET.fromstring(svn_status)
return [e.get("path") for e in st.find("target") if e.find("wc-status").get('item') == 'external']
def checkIsSVNRepository(env=None, path=None):
if env is None:
env = os.environ.copy()
env["LC_MESSAGES"] = "C"
args = ["svn", "info"]
if path is not None:
args.append(path)
svninfo = Popen(args, stdout=PIPE, env=env)
svninfo.wait()
return not bool(svninfo.returncode)
def GetBuildNumber(svn_path=None, escludi_externals=False):
# Usa svnversion per estrarre il build number. Parsare .svn/entries è
# sconsigliato ufficialmente perché il formato può cambiare tra una versione
# e l'altra di Subversion.
svn_path = svn_path or "."
def executeCommand(arg, shell=False):
env = os.environ.copy()
env["LC_MESSAGES"] = "C"
execution = Popen(arg, stdout=PIPE, env=env, shell=shell)
execution.wait()
if execution.returncode:
return None
resp = ""
for L in execution.stdout:
L = L.strip()
resp += L + "\n"
return resp.strip()
def gitsvn_version(path):
# Cerca l'ultima revisione del path passato
shell = sys.platform.startswith("win")
log = executeCommand(['git', 'svn', 'log', '--limit=1', path], shell=shell)
if log is not None:
# Prendo la prima riga dove so esserci la revisione
rev_string = log.split("\n")[1]
# Prendo il primo elemento dove c'è la revisione del file
rev = rev_string.split(" ")[0][1:]
return rev
return ""
def svnversion(*args):
return executeCommand(["svnversion"] + list(args))
def svn_info(args):
svn_string = executeCommand(['svn', 'info', '--xml', args])
if svn_string is None:
return None
info = ET.fromstring(svn_string)
commits = info.findall(".//commit")
assert len(commits) == 1
return commits[0].attrib['revision']
if checkIsSVNRepository(path=svn_path):
# A seconda che il path sia un file o una directory chiama funzioni diverse
if os.path.isfile(svn_path):
rev = svn_info(svn_path)
else:
rev = svnversion(svn_path)
else:
return gitsvn_version(svn_path)
if rev is None:
return ""
if escludi_externals:
return rev
# Chiede il build number anche sugli external; se ne trova uno modificato
# appende una "X" per far capire che la modifica è su un external.
for ext in getExternals("."):
if "M" in svnversion(ext):
rev = rev + "X-%s" % basename(ext)
break
# Se il numero di revisione è pulito (una sola revisione senza modifiche
# locali), possiamo "ottimizzarlo" prendendo il numero di revisione
# dell'ultimo commit in *questa* directory, che è più significativo
# (perché non cambia in caso di commit esterni al progetto).
try:
int(rev)
except ValueError:
return rev
last_rev = -1
for dir in getExternals(".") + ["."]:
rev = svnversion("-c", dir)
if ":" in rev:
rev = rev.split(":")[1]
if rev > last_rev:
last_rev = rev
return last_rev
def getRelativePath():
# Usa svn info per estrarre il path relativo alla root del repositorio in
# cui si trova il programma.
env = os.environ.copy()
env["LC_MESSAGES"] = "C"
info = {}
# Prova a cercare le informazioni con svn, se non è un repository svn prova
# con git svn
if not checkIsSVNRepository(env):
shell = sys.platform.startswith('win')
svninfo = Popen(["git", "svn", "info"], stdout=PIPE, env=env, shell=shell)
else:
svninfo = Popen(["svn", "info"], stdout=PIPE, env=env)
svninfo.wait()
for L in svninfo.stdout:
L = L.strip()
if L:
k,v = L.split(": ", 1)
info[k] = v
assert info["URL"].startswith(info["Repository Root"])
return info["URL"][len(info["Repository Root"]):]
def getPathDescription():
rp = getRelativePath()
assert rp[0] == "/"
comp = rp[1:].split("/")
if "trunk" in comp:
return "trunk"
try:
idx = comp.index("branches")
except ValueError:
pass
else:
return "branch %s" % comp[idx+1]
try:
idx = comp.index("tags")
except ValueError:
pass
else:
return "tag %s" % comp[idx+1]
# Se non riusciamo a parsare, torniamo il path relativo completo (meglio
# di nulla)
return rp
def isFinal():
# Per semplicità, assumiamo che le release finali vengano buildate solo
# a partire da tag.
rp = getRelativePath()
assert rp[0] == "/"
comp = rp[1:].split("/")
return "tags" in comp
def _GenerateBuildNrModule(fn, infos={}):
nr = GetBuildNumber()
tm = time.time()
helpers = """
def systemDate():
'''
Torna la data e l'ora di build in formato GMT ISO.
'''
import time
return time.strftime("%Y-%m-%d %H:%M", time.gmtime(date))
def localeDate():
'''
Torna una stringa contenente la data e l'ora di build, formattata secondo
il locale corrente. L'ora è anche aggiustata in base al fuso orario.
'''
import time
return time.strftime("%c", time.localtime(date))
def localeYear():
'''
Torna una stringa contenente l'anno in cui è stata fatta la build.
Utile per mostrare il copyright.
'''
import time
return time.strftime("%Y", time.localtime(date))
def strictRevNum():
'''
Se la working copy era in uno stato consistente, torna il numero di
versione. Torna invece None se c'erano modifiche locali, sottodirectory
non aggiornate, o directory switchate.
'''
try:
return int(revnum)
except ValueError:
return None
def versionString():
'''
Torna il numero di versione con aggiunto "-pre" se non è una versione
finale (presa da una tag).
'''
if final:
return version
else:
return version + "-pre"
"""
if fn == "-":
f = sys.stdout
else:
f = file(fn, "w")
f.write("#-*- coding: utf-8 -*-\n")
infos.setdefault('lts', False)
for k,v in sorted(infos.items()):
f.write("%s = %r\n" % (k,v))
f.write("revnum = %r\n" % nr)
f.write("date = %r\n" % tm)
f.write("path = %r\n" % getPathDescription())
f.write("final = %s\n" % isFinal())
f.write(helpers)
f.close()
def GenerateBuildNrModule(fn):
cfg = parseConfig(join(root_dir, "deploy.cfg"))
infos = dict(cfg["infos"])
infos["name"] = cfg["name"]
return _GenerateBuildNrModule(fn, infos)
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print "Usage: genbuildnr.py [filename]"
sys.exit(2)
GenerateBuildNrModule(sys.argv[1])
| [
"rbianchi66@gmail.com"
] | rbianchi66@gmail.com |
7836669b42ce6c1a1c5733449caf06c79c54c5ed | 67fe60da435c4b69d0f1b945b22aec84e1fb351c | /SnakeEating.py | 8a8108b0dc9d8854e50b44b76cc3ad1f86982eb6 | [] | no_license | RiflerRick/SnackDown17NU | de6baef09244c7378de504efbe95a826ecf16df3 | 45d308212731d5651544cef6508eb7d8ab3e1f67 | refs/heads/master | 2021-01-21T19:35:35.699025 | 2017-05-28T10:26:43 | 2017-05-28T10:26:43 | 92,141,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | def answer(l, k):
# lengthList is a list of lengths of all the snakes
lengthList=l[:]
num=0
# sorting the list in ascending order
lengthList.sort()
index=len(lengthList)-1
while index>0:
# traversing the list from back to front
if int(lengthList[index])>=k:
num+=1
# pop the length of the snake that is already going to be greater than k
index-=1
else:
lengthList[index]=str(int(lengthList[index]+1))
lengthList.pop(index-1)
index-=1
# here we will land at index=0
if int(lengthList[index])==k:
num+=1
return num
testcase=int(input())
for i in range(testcase):
firstLine=input() # this line o
firstLineList=firstLine.split(' ')
numSnakes=int(firstLineList[0])
numQueries=int(firstLineList[1])
snakeLengthString=input()
snakeLengthList=snakeLengthString.split(' ')
# snakeLengthList = list(map(int, snakeLengthString.split(' ')))
# map function is used to map characters to integers in one go
for j in range(numQueries):
query=input()
query=int(query)
# print('list now:'+str(snakeLengthList))
print(answer(snakeLengthList, query)) | [
"rajdeepmukhrj@gmail.com"
] | rajdeepmukhrj@gmail.com |
ef464d2028beaa30b26f3bd7318554f2e18e9109 | 7142c3941481e661075154d714a29d5e283a3074 | /Decorator1.py | d4a71bacf012ffb8e07545dfa66863b19ccd5332 | [] | no_license | nirajan5/Demo | 5642a9669fedcca47b0304ac423c0b3e6333b8e2 | 2451875bf5698cd38af69baa117c14099951bc9f | refs/heads/master | 2023-07-27T17:04:03.689673 | 2021-09-15T11:14:25 | 2021-09-15T11:14:25 | 406,732,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | def make_pretty(func):
def inner():
print("I got decorated")
func()
return inner
def simple():
print("I am simple")
simple()
# let's decorate this ordinary function
pretty = make_pretty(simple)
pretty()
| [
"jhanirajan5@gmail.com"
] | jhanirajan5@gmail.com |
5a50046c5e3c8909b138b672702ead688b012d00 | 7f34d98a9c9eeda5cf556e5d40da97d67e097755 | /FirstFactorial.py | d4e3b3f56058add951b5b7c08ce5ebe5c84d85ae | [] | no_license | Metalbuster/Java_Exam | ad0d9a37e1e75bef25783dcaeeb4a4b309aa1bb3 | bda34664c1a0289f3244b840357541e524af4c6c | refs/heads/master | 2020-05-20T15:28:54.556597 | 2019-05-09T12:43:26 | 2019-05-09T12:43:26 | 185,644,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | def FirstFactorial(num):
if num == 1:
return 1
else:
return num * FirstFactorial(num - 1)
num = int(input ("Input: "))
total = FirstFactorial(num)
print("Output:",total) | [
"noreply@github.com"
] | Metalbuster.noreply@github.com |
b6068726b044e3a2485222e0f9cb15889b3f1a4b | bdd18ea14c2905ad9f7ac737060a21935768bee3 | /virtual/bin/django-admin | 7cdc7f9a98582cb5f6808c861be74426c8ccd524 | [
"MIT"
] | permissive | AlexWanyoike/Neighborhood-Api | 381433d982036a569710e689ad5eb4f4d93a7e2e | a3ebc72421c0602a44e8817ff2b283683a5ae93d | refs/heads/main | 2023-05-18T15:53:32.967732 | 2021-06-07T20:54:06 | 2021-06-07T20:54:06 | 373,601,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | #!/home/alex/Documents/neighborhood/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"alex.wanyoike12@gmail.com"
] | alex.wanyoike12@gmail.com | |
f03d78865aa943a2184426a1b2b98fbe6b5b1c44 | 2d4545d2295ff88c425205af1ea593b85cf3b3c7 | /backend/manage.py | efb841ad080b09e4d08ac5b7802dcf086c4584af | [] | no_license | crowdbotics-apps/square-cloud-27361 | 202cf34b8d05497692ae16087a17dcd214526f36 | fb6441cbbf08d174c39e3d3969ce417fa44caf59 | refs/heads/master | 2023-05-04T12:43:40.597182 | 2021-05-24T23:24:56 | 2021-05-24T23:24:56 | 370,509,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'square_cloud_27361.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
1f26110f249939ecb0f0260b32cca636fdea2aee | 0c534f461e9c1e8b9ef442c1bac1d7a1dea851b1 | /new_plotter.py | 0dab04a15268c6cbe758b8500943bf32a14cc5ad | [] | no_license | paulgowdy/nle | bb77e07a02e319775266091e34ad6f669d1034cd | 27c62f443b7ff6fcd3822596b86152ef2f320804 | refs/heads/main | 2023-08-03T16:44:00.607002 | 2021-09-03T04:33:12 | 2021-09-03T04:33:12 | 390,802,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,721 | py | import matplotlib.pyplot as plt
import pandas as pd
runs = [
#'2021-08-05/09-53-24',
'2021-08-08/17-38-17',
'2021-08-09/17-54-30',
]
colors = ['navy','darkred','green','navy','navy','red','aqua','cyan','red','red','darkred']
prefix = "//wsl$/Ubuntu-20.04/home/paulgamble/neurips-2021-the-nethack-challenge/nethack_baselines/torchbeast/outputs/"
#prefix = "//wsl$/Ubuntu-20.04/home/paulgamble/hackbot_transformer/nethack_baselines/torchbeast/outputs/"
suffix = "/logs.csv"
roll_window = 100
plt.figure()
ax = plt.gca()
for r, c in zip(runs, colors):
log_fn = prefix + r + suffix
df = pd.read_csv(log_fn)
df['rolling_score'] = df['mean_episode_return'].rolling(roll_window).mean()
#df['score_std_low'] = df['rolling_score'] - df['mean_episode_return'].rolling(roll_window).std()
#df['score_std_high'] = df['rolling_score'] + df['mean_episode_return'].rolling(roll_window).std()
#ax.fill_between(df['step'], df['score_std_low'], df['score_std_high'], color=c, alpha=0.3)
df.plot(x='step',y='rolling_score',ax=ax, color=c)
labels = [x.split('/')[-1] for x in runs]
plt.legend(labels)
plt.title("Mean Episode Score")
#plt.ylim(-200,0)
plt.figure()
ax = plt.gca()
for r, c in zip(runs, colors):
log_fn = prefix + r + suffix
df = pd.read_csv(log_fn)
df['rolling_score'] = df['mean_episode_step'].rolling(roll_window).mean()
#df['rolling_score'] = df['mean_episode_return'].rolling(roll_window).mean()
#df['rolling_score'].plot(x='step')
#df['mean_episode_return'].plot()
df.plot(x='step',y='rolling_score',ax=ax, color=c)
plt.legend(runs)
#plt.ylim(-200,0)
plt.title("Mean Episode Steps")
plt.show()
| [
"noreply@github.com"
] | paulgowdy.noreply@github.com |
f5cdc5003ce1b282fd8a9e56ffd9182a55ea6b92 | 1cbcc478379544ad03698abdff6ef837852a1d73 | /Crazy Eight Card Game/CrazyEight.py | e2e546ac02c8a9345704140143a1cd098f307a3a | [] | no_license | kta5/Personal-Projects | a82d5ddc36ca6f48d4605debdca409a699ddec2a | a15add09b365a02f8d9e09604353b5065881abde | refs/heads/master | 2020-03-26T22:50:15.513579 | 2018-10-19T01:53:32 | 2018-10-19T01:53:32 | 145,490,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,171 | py | import random
suitChoices = ["Spades", "Hearts", "Clubs", "Diamonds"]
numChoices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
pile = []
deck = []
player1 = []
player2 = []
player = []
def turns ():
count = count + 1
if count % 2:
playerChoice = player1
else:
playerChoice = player2
def distributeCards():
drawCard(player1, deck)
drawCard(player2, deck)
drawCard(player1, deck)
drawCard(player2, deck)
drawCard(player1, deck)
drawCard(player2, deck)
drawCard(player1, deck)
drawCard(player2, deck)
drawCard(player1, deck)
drawCard(player2, deck)
def makeDeck():
for a in suitChoices:
for b in numChoices:
deck.append([a, b])
def shuffleDeck(y):
for x in y:
random.shuffle(deck)
return deck
def drawCard(Recipient, DeckUTakeFrom):
Recipient.append(DeckUTakeFrom[-1])
DeckUTakeFrom.pop(-1)
def checkWin():
if len(player1) == 0 | len(player2) == 0:
return True
else:
return False
'''
def play():
if checkWin() == False:
while (checkWin() == False):
play()
'''
def start():
if pile[-1][1] == 12:
print "Oh, we have a Queen! You can put any club or another Queen..."
deck.insert(pile[len(deck)/2])
pile.pop(-1)
start()
turns()
y = raw_input("Let's Play Crazy Eight! Please enter the number of times to shuffle: ")
makeDeck()
deck = shuffleDeck(deck)
#print "Great! Let's Play!"
#check deck status
for x in range(0, len(deck)):
print deck[x]
print "Number of Cards: "
print len(deck)
distributeCards()
for x in range(0, len(deck)):
print deck[x]
print "Number of Cards: "
print len(deck)
print "Player 1 Cards: "
print player1
print "Player2 Cards: "
print player2
print deck [-1][0]
drawCard(pile, deck)
print pile[-1]
if pile[-1][1] == 8:
print pile[-1]
print "Oh, we have an eight! That hasn't happened in a while. Putting in the middle of the deck"
deck.insert(pile[len(deck)/2])
pile.pop(-1)
drawCard(pile, deck)
start()
else:
start()
| [
"kta5@ucmerced.edu"
] | kta5@ucmerced.edu |
64ff0b3da04db2adfecb58e8771034b3ad7b2520 | 859093a06bb7b8ff2c00f21d4d3052b9d6b3a580 | /schedule/widgets.py | b7aa89545511619cbebd2f923c9a003ca96d629d | [
"MIT"
] | permissive | fitahol/fitahol | bbf71b695fbacad2d3a1f99a034c041ea6069529 | ce84dc909aa98f2dc7594ef26568e015cbfe0e94 | refs/heads/master | 2021-01-19T20:18:11.677674 | 2017-02-20T14:05:39 | 2017-02-20T14:05:39 | 82,561,065 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,540 | py | from __future__ import unicode_literals
from django.forms.widgets import TextInput
from django.utils.safestring import mark_safe
class SpectrumColorPicker(TextInput):
"""
Based on Brian Grinstead's Spectrum - http://bgrins.github.com/spectrum/
"""
class Media:
css = {'all': ("//cdnjs.cloudflare.com/ajax/libs/spectrum/1.7.1/spectrum.css",)}
js = ('//cdnjs.cloudflare.com/ajax/libs/jquery/1.8.3/jquery.min.js',
'//cdnjs.cloudflare.com/ajax/libs/spectrum/1.7.1/spectrum.js',)
def _render_js(self, _id, value):
js = u"""
<script type="text/javascript">
$(document).ready(function(){
$('#%s').spectrum({
color: "",
allowEmpty: true,
showAlpha: true,
showInput: true,
className: "full-spectrum",
showInitial: true,
showPalette: true,
showSelectionPalette: true,
maxSelectionSize: 10,
preferredFormat: "hex",
localStorageKey: "spectrum.demo",
palette: [
["rgb(0, 0, 0)", "rgb(67, 67, 67)", "rgb(102, 102, 102)",
"rgb(204, 204, 204)", "rgb(217, 217, 217)","rgb(255, 255, 255)"],
["rgb(152, 0, 0)", "rgb(255, 0, 0)", "rgb(255, 153, 0)", "rgb(255, 255, 0)", "rgb(0, 255, 0)",
"rgb(0, 255, 255)", "rgb(74, 134, 232)", "rgb(0, 0, 255)", "rgb(153, 0, 255)", "rgb(255, 0, 255)"],
["rgb(230, 184, 175)", "rgb(244, 204, 204)", "rgb(252, 229, 205)", "rgb(255, 242, 204)", "rgb(217, 234, 211)",
"rgb(208, 224, 227)", "rgb(201, 218, 248)", "rgb(207, 226, 243)", "rgb(217, 210, 233)", "rgb(234, 209, 220)",
"rgb(221, 126, 107)", "rgb(234, 153, 153)", "rgb(249, 203, 156)", "rgb(255, 229, 153)", "rgb(182, 215, 168)",
"rgb(162, 196, 201)", "rgb(164, 194, 244)", "rgb(159, 197, 232)", "rgb(180, 167, 214)", "rgb(213, 166, 189)",
"rgb(204, 65, 37)", "rgb(224, 102, 102)", "rgb(246, 178, 107)", "rgb(255, 217, 102)", "rgb(147, 196, 125)",
"rgb(118, 165, 175)", "rgb(109, 158, 235)", "rgb(111, 168, 220)", "rgb(142, 124, 195)", "rgb(194, 123, 160)",
"rgb(166, 28, 0)", "rgb(204, 0, 0)", "rgb(230, 145, 56)", "rgb(241, 194, 50)", "rgb(106, 168, 79)",
"rgb(69, 129, 142)", "rgb(60, 120, 216)", "rgb(61, 133, 198)", "rgb(103, 78, 167)", "rgb(166, 77, 121)",
"rgb(91, 15, 0)", "rgb(102, 0, 0)", "rgb(120, 63, 4)", "rgb(127, 96, 0)", "rgb(39, 78, 19)",
"rgb(12, 52, 61)", "rgb(28, 69, 135)", "rgb(7, 55, 99)", "rgb(32, 18, 77)", "rgb(76, 17, 48)"]
]
});
});
</script>""" % (_id)
return js
def render(self, name, value, attrs=None):
if 'id' not in attrs:
attrs['id'] = "id_%s" % name
rendered = super(SpectrumColorPicker, self).render(name, value, attrs)
return mark_safe(rendered + self._render_js(attrs['id'], value)) | [
"lingnck@gmail.com"
] | lingnck@gmail.com |
e8112892250644b6fa3180265a68d0efcb3674f7 | 5a072bb59a86bebb5c5c89775320a5518ba0d609 | /devel/lib/python2.7/dist-packages/mavros_msgs/srv/_SetMavFrame.py | fe0e03f60649e74f033de6f2ec0f9dee12038e18 | [] | no_license | michou214/catkin_ws | 33bff88e29159cb7030c7ae9453071f2241fb98e | 19c5a2af82d674a77abafdbe86a7761556c65e84 | refs/heads/master | 2020-04-03T07:34:55.016148 | 2018-10-31T12:30:10 | 2018-10-31T12:30:10 | 155,106,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | /home/mpperret/catkin_ws/devel/.private/mavros_msgs/lib/python2.7/dist-packages/mavros_msgs/srv/_SetMavFrame.py | [
"mpperret@lispclab8.epfl.ch"
] | mpperret@lispclab8.epfl.ch |
8fed07c0efc16ac680f9ec6b68ba00e3bfde473a | e5fd7503802bf5e3ff12dfeebb752cab4dfe1a46 | /timeConverter.py | af7627ed54b54cb01ac4d49a0c99b57d8fb64ab4 | [] | no_license | ShiranthaKellum/timeConverter | edf8012340482a2f7a83b365f7bf983c1c123c8d | b4d2095ea5808d01f5a52a8b18fd76f9ae5de262 | refs/heads/main | 2023-07-05T01:46:55.107239 | 2021-08-30T04:03:18 | 2021-08-30T04:03:18 | 401,135,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py |
def convert (fTime):
hour = int(fTime[0] + fTime[1]) # convert hours part into an integer
minutes = int(fTime[3] + fTime[4]) # convert minutes part into an integer
seconds = int(fTime[6] + fTime[7]) # convert seconds part into an integer
if fTime [len (fTime) - 2] == "P": # afternoon convertion
if hour != 12:
hour = hour + 12
else:
if hour == 12: # check for midnight (12:00:00PM = 00:00:00)
hour = 0
print(f"{hour :02d}", ":", f"{minutes :02d}", ":", f"{seconds :02d}", sep="")
#time = '12:05:45AM'
time = input ()
convert (time)
| [
"noreply@github.com"
] | ShiranthaKellum.noreply@github.com |
163151df9104f70f4b43557fcb6448775b8fe379 | 2e2dbd9a0fd5c105823a7260225fc7791793e9c2 | /rango/models.py | 732436340b210a893cacbcc32c22087cd70d3fe6 | [] | no_license | YauheniKr/tango-with-django-project | dc46185fcb0e74a3d1f9226fbbeeedf6aaeddbcb | 13576c3084186e08bedd936f834f87f27d9b618b | refs/heads/master | 2020-05-25T18:10:24.740833 | 2019-06-26T20:02:15 | 2019-06-26T20:02:15 | 187,923,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | from django.db import models
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=128, unique=True)
views = models.IntegerField(default=0)
likes = models.IntegerField(default=0)
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Category, self).save(*args, **kwargs)
class Meta:
verbose_name_plural = 'Categories'
def __str__(self):
return self.name
class Page(models.Model):
category = models.ForeignKey(Category, on_delete=models.CASCADE)
title = models.CharField(max_length=128)
url = models.URLField()
views = models.IntegerField(default=0)
def __str__(self):
return self.title
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
website = models.URLField(blank=True)
picture = models.ImageField(upload_to='profile_images', blank=True)
def __str__(self):
return self.user.username | [
"smilodon2@yandex.ru"
] | smilodon2@yandex.ru |
acc48682a0220bc482ce43a4544bb1272e4cc883 | 88fae7f498ec504ca6228a7d622ca8e72a2195ae | /bilibili_video_api/biclass.py | 4445832c3961b329bf5db240dbf046b30c777ae1 | [
"MIT"
] | permissive | mo-han/bilibili-appcache-extract | f9a244385a7be708391184a99c9d3abf24b03a01 | c2a504d3d4fe7b05adbb2225339fd86ec6b338bb | refs/heads/master | 2020-04-05T09:23:59.985798 | 2019-05-19T03:57:01 | 2019-05-19T03:57:01 | 156,754,279 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,585 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 28 01:22:20 2014
@author: Vespa
"""
class User():
def __init__(self,m_mid=None,m_name=None):
if m_mid:
self.mid = m_mid
if m_name:
if isinstance(m_name,str):
m_name = m_name.encode('utf8')
self.name = m_name
# 获取空间地址
def GetSpace(self):
return 'http://space.bilibili.tv/'+str(self.mid)
mid = None
name = None
isApprove = None#是否是认证账号
spaceName = None
sex = None
rank = None
avatar = None
follow = None#关注好友数目
fans = None#粉丝数目
article = None#投稿数
place = None#所在地
description = None#认证用户为认证信息 普通用户为交友宣言
followlist = None#关注的好友列表
friend = None
DisplayRank = None
class Video():
def __init__(self,m_aid=None,m_title=None):
if m_aid:
self.aid = m_aid
if m_title:
if isinstance(m_title,str):
m_title = m_title.encode('utf8')
self.title = m_title
aid = None
title = None
guankan = None
shoucang = None
danmu = None
date = None
cover = None
commentNumber = None
description = None
tag = None
author = None
page = None
credit = None
coin = None
spid = None
cid = None
offsite = None#Flash播放调用地址
Iscopy = None
subtitle = None
duration = None
episode = None
arcurl = None#网页地址
arcrank = None#不明
tid = None
typename = None
#不明:
instant_server = None
src = None
partname = None
allow_bp = None
allow_feed = None
created = None
#播放信息:
play_site = None
play_forward = None
play_mobile = None
class Bangumi():
def __init__(self):
pass
typeid = None
lastupdate = None
areaid = None
bgmcount = None#番剧当前总集数
title = None
lastupdate_at = None
attention = None #订阅数
cover = None
priority = None
area = None
weekday = None
spid = None
new = None
scover = None
mcover = None
click = None
season_id = None
click = None # 浏览数
video_view = None
class Comment():
def __init__(self):
self.post_user = User()
lv = None#楼层
fbid = None#评论id
msg = None
ad_check = None#状态 (0: 正常 1: UP主隐藏 2: 管理员删除 3: 因举报删除)
post_user = None
class CommentList():
def __init__(self):
pass
comments = None
commentLen = None
page = None
class ZhuantiInfo():
def __init__(self, m_spid,m_title):
self.spid = m_spid
if isinstance(m_title,str):
m_title = m_title.encode('utf8')
self.title = m_title
spid = None
title = None
author = None
cover = None
thumb = None
ischeck = None #不明
typeurl = None #总是"http://www.bilibili.com"
tag = None
description = None
pubdate = None # 不明
postdate = None
lastupdate = None
click = None
favourite = None
attention = None
count = None
bgmcount = None
spcount = None
season_id = None
is_bangumi = None
arcurl = None
class Danmu():
def __init__(self):
pass
t_video = None
t_stamp = None
mid_crc = None # 值为:hex(binascii.crc32(mid))
danmu_type = None # 1:滚动弹幕 5:顶端弹幕 4:底部弹幕
content = None
danmu_color = None
danmu_fontsize = None | [
"zmhungrown@gmail.com"
] | zmhungrown@gmail.com |
19b2f2abb8a4d87113528cde7973434dfe586d03 | 9b9e61c1dd519d7a6cdb74b73ee42e302d74a1ea | /mc_video/mc_acfun_spider/mc_acfun_spider/items.py | dcd24ae1daa5e81a544267d22e4a45b2e1b27ac2 | [] | no_license | 649435349/scrapy_manyvideowebsite | 29641d9579b9ae017701f34b320255c2f2d5f8af | 90ea5df2a5f50feadb3f7f4937c04e984d8c766b | refs/heads/master | 2021-01-22T22:20:06.113666 | 2017-07-03T15:17:01 | 2017-07-03T15:17:01 | 85,529,551 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,469 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class McAcfunSpiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
crawl_time=scrapy.Field()
video_url=scrapy.Field()
page_url=scrapy.Field()
title=scrapy.Field()
category=scrapy.Field()
upload_time=scrapy.Field()
play_cnt=scrapy.Field()
comment_cnt=scrapy.Field()
author=scrapy.Field()
personal_homepage = scrapy.Field()
title_p = scrapy.Field()
duration = scrapy.Field()
danmu_cnt = scrapy.Field()
collect_cnt=scrapy.Field()
banana_cnt=scrapy.Field()
introduction = scrapy.Field()
label=scrapy.Field()
personal_signiture = scrapy.Field()
post_cnt = scrapy.Field()
listener_cnt=scrapy.Field()
video_src=scrapy.Field()
class PublicItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
crawl_time= scrapy.Field()
website= scrapy.Field()
video_url= scrapy.Field()
page_url= scrapy.Field()
title= scrapy.Field()
category= scrapy.Field()
upload_time= scrapy.Field()
play_cnt= scrapy.Field()
comment_cnt= scrapy.Field()
label= scrapy.Field()
author= scrapy.Field()
fans_cnt= scrapy.Field()
post_cnt= scrapy.Field()
all_play_cnt= scrapy.Field()
author_url=scrapy.Field()
| [
"hzfengyufei@corp.netease.com"
] | hzfengyufei@corp.netease.com |
b55ca3a9052700ee2904b06e7b11fdccae4a3323 | 18ce885a43f0ea61202cc2e7e870e3a216442b29 | /181CO152_29-01-2021/naive_bayes-181CO152.py | 112a56799fe5723ca78604fc8a7dfe293b1cf305 | [] | no_license | shumbul/Machine-Learning_lab-CS353 | 0b68a5ae06862671733aa0c7787645d824924b64 | 4de632a98ca2db5dc26ba30bf22ee757c72bba1c | refs/heads/main | 2023-04-25T03:35:34.133875 | 2021-04-25T17:53:26 | 2021-04-25T17:53:26 | 361,498,048 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,678 | py | #!/usr/bin/env python
# coding: utf-8
# <h1 align='center'>CS353 Machine Learning Lab</h1>
# <h1 align='center'>Naive Bayes (29/01/21)</h1>
# <h2 align='center'>Shumbul Arifa (181CO152)</h2>
# # Introduction
# *Topic: Performing Naive Bayes Classifier on Iris Dataset.*
#
# Naive Bayes methods are a set of supervised learning algorithms based on
# applying Bayes’ theorem with a strong assumption that all the predictors are
# independent of each other i.e. the presence of a feature in a class is independent
# of the presence of any other feature in the same class. This is a naive
# assumption that is why these methods are called Naive Bayes methods. Bayes
# theorem states the following relationship in order to find the posterior probability
# of class i.e. the probability of a label and some observed features, P(Y | features).
#
# **P(Y | features) = P(Y) * P(features | Y) / P(features)**
#
# Here, P(Y| features) is the posterior probability of class. P(Y) is the prior
# probability of class. P(features | Y) is the likelihood which is the probability of the
# predictor given class. P(features) is the prior probability of the predictor.
#
# # Dataset
# Iris Dataset is a standard dataset included in scikit learn standard library.
# ## Importing Python Libraries
# In[1]:
from sklearn.datasets import load_iris
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pandas as pd
# ## Loading dataset
# In[10]:
iris = load_iris()
X, y = load_iris(return_X_y=True)
X[0:5]
# ## Splitting the dataset
# The dataset is split in the ratio of 8:2 for training : test data respectfully, and the random state is set to 20.
# In[12]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 20)
# In[16]:
y_train
# In[20]:
y_test
# ## Scaling data using StandardScaler
# In[21]:
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# ## Fitting Model
# In[22]:
from sklearn.naive_bayes import GaussianNB
nvclassifier = GaussianNB()
nvclassifier.fit(X_train, y_train)
# In[24]:
y_pred = nvclassifier.predict(X_test)
y_pred
# ## Accuracy
# In[25]:
print("Accuracy score of Naive Bayes Model: ", nvclassifier.score(X_test, y_test))
# ## Confusion Matrix
# In[28]:
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(y_test,y_pred))
# In[29]:
print(confusion_matrix(y_test,y_pred))
# <h1 align='center'> Observation </h1>
# **Accuracy score of Naive Bayes Model is 93.33%.**
| [
"shumbul.181co152@nitk.edu.in"
] | shumbul.181co152@nitk.edu.in |
e69f431e65fa8f7d03b5b76e233d7b52ac4ddc55 | dcc2f815674f2d0d105c9d16007f3eaadd65f272 | /backend/app/password_encrypt.py | 455f4bd86cf3e1e3292ef14201f2e0024efc1707 | [] | no_license | renan-77/hiring-dashboard-hackathon | 188dd92e2921ed1f040657f9873b3ec0819c43a4 | 9648121a4d47991550570dffa17a186104dfb0f9 | refs/heads/main | 2023-04-18T14:34:06.138475 | 2021-05-06T15:51:22 | 2021-05-06T15:51:22 | 362,763,045 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | from app import bcrypt # Importing bcrypt instance from app.
def hash_password(password):
"""
Function that takes password as a parameter and returns the hashed password.
:param str password: A string password.
"""
try:
hashed = bcrypt.generate_password_hash(password)
return hashed
except TypeError:
return 'Please insert a string to continue'
def compare_passwords(password, hashed):
"""
Function to compare password with hash.
:param str password: A string password.
:param str hashed: A string hashed password.
:return: A boolean stating whether the password and hash match or not.
"""
# Encoding parameters to bytes for the 'checkpw' function.
return bcrypt.check_password_hash(hashed, password)
if __name__ == '__main__':
print(hash_password('password123'))
| [
"renanmonteiroft@gmail.com"
] | renanmonteiroft@gmail.com |
dc8d100760aa4712f78b1798321abb7dc6030e06 | 0a96a58554e7812977f42781d9be1b7883e55575 | /root/firstsite/firstapp/migrations/0025_auto_20161127_0247.py | 26caf6e9bc856611af54bc57080f0a9ffa13f94e | [] | no_license | miaozaiye/Python-Full-Stack | c7a6cf01dfb734362ec9c62c2ef55b6fffb19e25 | c5978c126f110ff647b814d4b15b35ff42459bef | refs/heads/master | 2020-05-21T07:02:18.625344 | 2017-03-15T15:25:00 | 2017-03-15T15:25:00 | 67,462,905 | 0 | 0 | null | 2017-03-15T15:32:23 | 2016-09-06T01:37:27 | CSS | UTF-8 | Python | false | false | 1,102 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-27 02:47
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('firstapp', '0024_auto_20161126_0641'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_image', models.FileField(upload_to='profile_image')),
('belong_to', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='comment',
name='time',
field=models.DateField(default=datetime.datetime(2016, 11, 27, 2, 47, 10, 640108)),
),
]
| [
"fevberkeley@vip.sina.com"
] | fevberkeley@vip.sina.com |
016456f45becbaf0779f89cca55b33a420074c86 | 811ea9b0f2f805a41d66062febdc6a1ab21d3ce0 | /dig_pow.py | efcce502beb27441948226b84a36a7db4bccbccd | [] | no_license | vitaliivolodin/codewars | d76a95b7c1f7bdf7525d1ac6dc38ee4bac986b2b | d0e9f8347ea8d80eef01c117798c9c3aa67dd273 | refs/heads/master | 2021-05-08T06:07:36.660925 | 2017-10-11T16:37:41 | 2017-10-11T16:37:41 | 106,577,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | def dig_pow(n, p):
n = int(n)
p = int(p)
number_list = [int(x) for x in str(n)]
power_list = list(range(p, p + len(number_list)))
result = 0
L = list(zip(number_list, power_list))
for a, b in L:
result += (a ** b)
if (result % n) == 0:
return result / n
else:
return -1
if __name__ == '__main__':
print(dig_pow(3263, 4)) | [
"vitaliivolodin17@gmail.com"
] | vitaliivolodin17@gmail.com |
a24920e7edf1172ef586def47598b305fa1d1d93 | 93e715a9e0423c0a026b57109db51dd9c4bba734 | /RestrictedBoltzmannMachine/Python_codes/test_rbm.py | 8ef1d5422b967457c1eed5154a29d8fc0c45499e | [] | no_license | zhaoyun404/MachineLearning | fbfe4a88a5fca6b016644bbe3e1cd727e8934426 | 6f2e50bdee50f60a3165b9501076a882397a879e | refs/heads/master | 2022-04-24T13:05:12.388223 | 2020-04-27T21:12:13 | 2020-04-27T21:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,667 | py | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import time
import os
from rbm import rbm, make_binary
print("Downloading dataset...")
mnist = tf.keras.datasets.mnist
(trainX, trainY), (testX, testY) = mnist.load_data()
shuffled_indices = np.random.permutation(60000)
trainX, trainY = trainX[shuffled_indices], trainY[shuffled_indices]
shuffled_indices = np.random.permutation(10000)
testX, testY = testX[shuffled_indices], testY[shuffled_indices]
size_of_dataset = 60000;
#Initialize the model
my_rbm = rbm(nvisible=28*28,
nhidden=8*8,
eta = 0.08,
momentum = 0.9,
nCDsteps = 25,
nepochs = 500,
batch_size = 100,
size_of_dataset = size_of_dataset)
print("Preparing data...")
training_data = np.zeros((size_of_dataset, 28*28))
trainX = trainX/255.0
trainX = trainX > 0.5
for k in range(size_of_dataset):
training_data[k] = trainX[k].flat[:]
print("Training model...")
start = time.time()
my_rbm.train_model(training_data)
end = time.time()
elapsed_time = end-start;
print("Model was trained for ", elapsed_time, " seconds")
my_rbm.plot_loss()
#Testing
print("Making predictions...")
test_data = np.zeros(28*28)
for k in range(1,10):
example = np.zeros((28,28),float)
test_data[:] = testX[k-1].flat[:]
test_data = make_binary(test_data)
my_rbm.predict(test_data)
example.flat[:] = my_rbm.visibleprob[:]
plt.subplot(330 + k)
plt.imshow(example, cmap = plt.get_cmap("gray"))
plt.figure()
for k in range(1,10):
plt.subplot(330 + k)
plt.imshow(testX[k-1], cmap = plt.get_cmap("gray"))
plt.show()
print("Finished.")
| [
"reneaas@student.matnat.uio.no"
] | reneaas@student.matnat.uio.no |
66be3cadae5fa359a93f11f7ea6e72e0f60f0dfa | 26f17b0fa8ef1fb39ea6818fb48e80ea71a71f48 | /incremental_dataloader.py | d0dbb83669ca8f113ef14edee82b3bca455fc51e | [] | no_license | joeljosephjin/itaml-pytorch | e4a62b39033d0bb52083ef095d79aa3d4248d0ff | e320868277b782bf137b2920867a49c5ab83750d | refs/heads/main | 2023-07-18T08:19:25.441220 | 2021-08-31T16:45:51 | 2021-08-31T16:45:51 | 401,776,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,971 | py | '''
TaICML incremental learning
Copyright (c) Jathushan Rajasegaran, 2019
'''
import random
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import Sampler
from torchvision import datasets, transforms
# datasets.MNIST.resources = [
# ('https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz', 'f68b3c2dcbeaaa9fbdd348bbdeb94873'),
# ('https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz', 'd53e105ee54ea40749a09fcbcd1e9432'),
# ('https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz', '9fb629c4189551a2d022fa330f9573f3'),
# ('https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz', 'ec29112dd5afa0611ce80d1b7f02629c')
# ]
# from imagenet import ImageNet
# from idatasets.CUB200 import Cub2011
# from idatasets.omniglot import Omniglot
# from idatasets.celeb_1m import MS1M
import collections
class SubsetRandomSampler(Sampler):
r"""Samples elements randomly from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices, shuffle):
self.indices = indices
self.shuffle = shuffle
def __iter__(self):
if(self.shuffle):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
else:
return (self.indices[i] for i in range(len(self.indices)))
def __len__(self):
return len(self.indices)
class IncrementalDataset:
def __init__(
self,
dataset_name,
args,
random_order=False,
shuffle=True,
workers=10,
batch_size=128,
seed=1,
increment=10,
validation_split=0.
):
self.dataset_name = dataset_name.lower().strip()
datasets = _get_datasets(dataset_name)
self.train_transforms = datasets[0].train_transforms
self.common_transforms = datasets[0].common_transforms
try:
self.meta_transforms = datasets[0].meta_transforms
except:
self.meta_transforms = datasets[0].train_transforms
self.args = args
self._setup_data(
datasets,
args.data_path,
random_order=random_order,
seed=seed,
increment=increment,
validation_split=validation_split
)
self._current_task = 0
self._batch_size = batch_size
self._workers = workers
self._shuffle = shuffle
self.sample_per_task_testing = {}
@property
def n_tasks(self):
return len(self.increments)
def get_same_index(self, target, label, mode="train", memory=None):
label_indices = []
label_targets = []
for i in range(len(target)):
if int(target[i]) in label:
label_indices.append(i)
label_targets.append(target[i])
for_memory = (label_indices.copy(),label_targets.copy())
if memory is not None:
memory_indices, memory_targets = memory
memory_indices2 = np.tile(memory_indices, (self.args.mu,))
all_indices = np.concatenate([memory_indices2,label_indices])
else:
all_indices = label_indices
return all_indices, for_memory
def get_same_index_test_chunk(self, target, label, mode="test", memory=None):
label_indices = []
label_targets = []
np_target = np.array(target, dtype="uint32")
np_indices = np.array(list(range(len(target))), dtype="uint32")
for t in range(len(label)//self.args.class_per_task):
task_idx = []
for class_id in label[t*self.args.class_per_task: (t+1)*self.args.class_per_task]:
idx = np.where(np_target==class_id)[0]
task_idx.extend(list(idx.ravel()))
task_idx = np.array(task_idx, dtype="uint32")
task_idx.ravel()
random.shuffle(task_idx)
label_indices.extend(list(np_indices[task_idx]))
label_targets.extend(list(np_target[task_idx]))
if(t not in self.sample_per_task_testing.keys()):
self.sample_per_task_testing[t] = len(task_idx)
label_indices = np.array(label_indices, dtype="uint32")
label_indices.ravel()
return list(label_indices), label_targets
def new_task(self, memory=None):
print(self._current_task)
print(self.increments)
min_class = sum(self.increments[:self._current_task])
max_class = sum(self.increments[:self._current_task + 1])
train_indices, for_memory = self.get_same_index(self.train_dataset.targets, list(range(min_class, max_class)), mode="train", memory=memory)
test_indices, _ = self.get_same_index_test_chunk(self.test_dataset.targets, list(range(max_class)), mode="test")
self.train_data_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self._batch_size,shuffle=False,num_workers=16, sampler=SubsetRandomSampler(train_indices, True))
self.test_data_loader = torch.utils.data.DataLoader(self.test_dataset, batch_size=self.args.test_batch,shuffle=False,num_workers=16, sampler=SubsetRandomSampler(test_indices, False))
task_info = {
"min_class": min_class,
"max_class": max_class,
"task": self._current_task,
"max_task": len(self.increments),
"n_train_data": len(train_indices),
"n_test_data": len(test_indices)
}
self._current_task += 1
return task_info, self.train_data_loader, self.test_data_loader, self.test_data_loader, for_memory
# for verification
def get_galary(self, task, batch_size=10):
indexes = []
dict_ind = {}
seen_classes = []
for i, t in enumerate(self.train_dataset.targets):
if not(t in seen_classes) and (t< (task+1)*self.args.class_per_task and (t>= (task)*self.args.class_per_task)):
seen_classes.append(t)
dict_ind[t] = i
od = collections.OrderedDict(sorted(dict_ind.items()))
for k, v in od.items():
indexes.append(v)
data_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(indexes, False))
return data_loader
def get_custom_loader_idx(self, indexes, mode="train", batch_size=10, shuffle=True):
if(mode=="train"):
data_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(indexes, True))
else:
data_loader = torch.utils.data.DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(indexes, False))
return data_loader
def get_custom_loader_class(self, class_id, mode="train", batch_size=10, shuffle=False):
if(mode=="train"):
train_indices, for_memory = self.get_same_index(self.train_dataset.targets, class_id, mode="train", memory=None)
data_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(train_indices, True))
else:
test_indices, _ = self.get_same_index(self.test_dataset.targets, class_id, mode="test")
data_loader = torch.utils.data.DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(test_indices, False))
return data_loader
def _setup_data(self, datasets, path, random_order=False, seed=1, increment=10, validation_split=0.):
self.increments = []
self.class_order = []
trsf_train = transforms.Compose(self.train_transforms)
try:
trsf_mata = transforms.Compose(self.meta_transforms)
except:
trsf_mata = transforms.Compose(self.train_transforms)
trsf_test = transforms.Compose(self.common_transforms)
current_class_idx = 0 # When using multiple datasets
for dataset in datasets:
if(self.dataset_name=="imagenet"):
train_dataset = dataset.base_dataset(root=path, split='train', download=False, transform=trsf_train)
test_dataset = dataset.base_dataset(root=path, split='val', download=False, transform=trsf_test)
elif(self.dataset_name=="cub200" or self.dataset_name=="cifar100" or self.dataset_name=="mnist" or self.dataset_name=="caltech101" or self.dataset_name=="omniglot" or self.dataset_name=="celeb"):
train_dataset = dataset.base_dataset(root=path, train=True, download=True, transform=trsf_train)
test_dataset = dataset.base_dataset(root=path, train=False, download=True, transform=trsf_test)
elif(self.dataset_name=="svhn"):
train_dataset = dataset.base_dataset(root=path, split='train', download=True, transform=trsf_train)
test_dataset = dataset.base_dataset(root=path, split='test', download=True, transform=trsf_test)
train_dataset.targets = train_dataset.labels
test_dataset.targets = test_dataset.labels
order = [i for i in range(self.args.num_class)]
if random_order:
random.seed(seed)
random.shuffle(order)
elif dataset.class_order is not None:
order = dataset.class_order
for i,t in enumerate(train_dataset.targets):
train_dataset.targets[i] = order[t]
for i,t in enumerate(test_dataset.targets):
test_dataset.targets[i] = order[t]
self.class_order.append(order)
self.increments = [increment for _ in range(len(order) // increment)]
self.train_dataset = train_dataset
self.test_dataset = test_dataset
@staticmethod
def _map_new_class_index(y, order):
"""Transforms targets for new class order."""
return np.array(list(map(lambda x: order.index(x), y)))
def get_memory(self, memory, for_memory, seed=1):
random.seed(seed)
memory_per_task = self.args.memory // ((self.args.sess+1)*self.args.class_per_task)
self._data_memory, self._targets_memory = np.array([]), np.array([])
mu = 1
#update old memory
if(memory is not None):
data_memory, targets_memory = memory
data_memory = np.array(data_memory, dtype="int32")
targets_memory = np.array(targets_memory, dtype="int32")
for class_idx in range(self.args.class_per_task*(self.args.sess)):
idx = np.where(targets_memory==class_idx)[0][:memory_per_task]
self._data_memory = np.concatenate([self._data_memory, np.tile(data_memory[idx], (mu,)) ])
self._targets_memory = np.concatenate([self._targets_memory, np.tile(targets_memory[idx], (mu,)) ])
#add new classes to the memory
new_indices, new_targets = for_memory
new_indices = np.array(new_indices, dtype="int32")
new_targets = np.array(new_targets, dtype="int32")
for class_idx in range(self.args.class_per_task*(self.args.sess),self.args.class_per_task*(1+self.args.sess)):
idx = np.where(new_targets==class_idx)[0][:memory_per_task]
self._data_memory = np.concatenate([self._data_memory, np.tile(new_indices[idx],(mu,)) ])
self._targets_memory = np.concatenate([self._targets_memory, np.tile(new_targets[idx],(mu,)) ])
print(len(self._data_memory))
return list(self._data_memory.astype("int32")), list(self._targets_memory.astype("int32"))
def _get_datasets(dataset_names):
return [_get_dataset(dataset_name) for dataset_name in dataset_names.split("-")]
def _get_dataset(dataset_name):
dataset_name = dataset_name.lower().strip()
return iMNIST
class DataHandler:
base_dataset = None
train_transforms = []
mata_transforms = [transforms.ToTensor()]
common_transforms = [transforms.ToTensor()]
class_order = None
class iMNIST(DataHandler):
base_dataset = datasets.MNIST
train_transforms = [ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]
common_transforms = [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
| [
"joeljosephjin@gmail.com"
] | joeljosephjin@gmail.com |
50d582d1bb0532da784ca17a19fe5ee9faa61c62 | dc066d2f8be45418f1b9c950bb8c03ab7ac0d5b1 | /platform/wb45/wtfconfig-wlan0_powercycle.py | 9e1ae15044bad9c73aab9663da5064c6f9a22d00 | [] | no_license | LairdCP/wtf | b561d98f1638e386a2791eec761f53d2294db885 | ab0229e642010b495daa7bdd1ab1dc26a01e066b | refs/heads/master | 2020-04-06T07:08:20.377277 | 2016-08-23T18:17:21 | 2016-08-30T14:34:14 | 64,800,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | import wtf.node.ap
import wtf.node.sta
import wtf.node.wb
import wtf.comm
import wtf.power
# create AP configurations that match the APs in your vicinity
# ap_serial = wtf.comm.Serial(port="/dev/ttyUSB1",
# prompt="[root@localhost dev]# ")
# ap_serial.name = "AP"
# ap_serial.verbosity = 1
# ap = wtf.node.ap.Hostapd(ap_serial, "libertas_tf_sdio", "wlan0")
sta_serial = wtf.comm.Serial(port="/dev/ttyUSB5",
prompt="# ")
sta_power = wtf.power.WebPowerSwitch('192.168.0.50', 6)
sta_serial.name = "WB45-2"
sta_serial.verbosity = 2
sta = wtf.node.wb.WB45(sta_serial, "ath6kl_sdio", "wlan0", sta_power)
# tell wtf about all of your nodes
nodes = [ sta ]
# tell wtf which test suites you want to run
suites = [ "check_wlan0_powercycle" ]
| [
"steve.derosier@lairdtech.com"
] | steve.derosier@lairdtech.com |
6fb5a5d06cc1639f1235a4e1a88e8ac5cb8298f3 | 4b763d8bb06f03a518f765ef7a4558d18fcb5844 | /test.py | 98ec36987e7e857e3cc5fd417a4dbfc1d7e6fd9f | [] | no_license | blouiecubero/flask_experiment | e4e6be887f5d54db6adf5d3ded95cb7158a7b193 | 8595e44612534b5c737b1002cf9422a4635a6343 | refs/heads/master | 2021-01-15T13:18:49.359907 | 2017-08-09T04:07:18 | 2017-08-09T04:07:18 | 99,667,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | import sqlite3
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
create_table = "CREATE TABLE users (id int, username text, password text)"
cursor.execute(create_table)
user = (1, 'jose', 'asdf')
insert_query = "INSERT INTO users VALUES (?, ?, ?)"
cursor.execute(insert_query, user)
users = [
(2, 'jeff', 'qwe'),
(3, 'erin', 'uio'),
(4, 'anne', 'ghj')
]
cursor.executemany(insert_query, users)
select_query = "SELECT * FROM users"
for row in cursor.execute(select_query):
print(row)
connection.commit()
connection.close() | [
"blouiecubero@gmail.com"
] | blouiecubero@gmail.com |
74d6fadda7ef808691b43ba376c8031125f3fb04 | 83b9eb9cc9570df03b4854cc644543f07e9322c6 | /DeepLearning_Pytorch/MyCode/Cnn.py | 7e6038cd33ea4203be977d39d8a54a8ac45f3cef | [] | no_license | hdhcy/pytorch_learning | fdd09461119b022a873074248dba499acd597fef | 44aaa338dd7a55fa8e76416c4dd94247e99a3b73 | refs/heads/master | 2020-07-06T01:25:12.889082 | 2019-09-09T01:20:04 | 2019-09-09T01:20:04 | 202,844,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | '''
Author: hdhcy
Email: 982768496@qq.com
date: 2019/9/3 15:03
'''
import torch
from torch import nn,optim
from MyModel import SimpleCNN,Lenet,AlexNet,GoogLeNet,Bottleneck
'''
model=SimpleCNN()
# print(model)
# print(model.children())
# print(model.modules())
#
# new_model=nn.Sequential(*list(model.children())[:1])
#print(new_model)
# print(model.named_children())
# print(model.named_modules())
conv_modle=nn.Sequential()
for layer in model.named_modules():
print(layer[0])
if isinstance(layer[1],nn.Conv2d):
print('true')
conv_modle.add_module(layer[0],layer[1])
print(conv_modle)
'''
model=Bottleneck(32,32)
print(model.expansion)
| [
"982768496@qq.com"
] | 982768496@qq.com |
09bcac6b49ebc720651e38106a8435ef30728e66 | 2382275cd59aa19cd66a399c1af0355613740348 | /Proc Gen RPG/FileManager.py | 35dcf09daef336570f5ce0ab5b030b1b053085ef | [
"Unlicense"
] | permissive | Rtgher/ProcGen-RPG | 6ea5cc7afe415cc5a0385c86781beac450bb106c | 61f3e15af21ad30233876040eadbc2181e1172b4 | refs/heads/master | 2021-01-21T23:20:41.772426 | 2017-06-23T15:09:28 | 2017-06-23T15:09:28 | 95,230,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | """Procedural-Generated Dungeon RPG
FileHandler.py
this is a prototype for a procedural-generated dungeon RPG
has bare-minimum graphics
created by rtgher 26/11/2014->
"""
__version__=0.2
__status__="Prototype"
__author__="traghera@gmail.com"
#License
""" The software itself is a free product. Anyone can download and install it on their own machine, without
having to pay anything for it. Anyone can dustribute the product and make copies of it as they wish.
The source code is available for view to anyone, wheter it is an individual or corporation.
Anyone can distribute and post the source code on the internet or classrooms.
Anyone can modify the source code as they will however:
#Credit must be given to the original author;
#Any software that uses this source code, or any modified verion of this source code must
be made available for free unrestricted download, unless if otherwise specified by the author.
#Any software that uses this source code, or any portions of it cannot be used in comercial products
except with the aproval of the author. (contact on e-mail).
Anyone can buy rights to use the source code in any commercial products by messaging the author;
"""
#import section
| [
"noreply@github.com"
] | Rtgher.noreply@github.com |
2ddcf7148c7696de359ace2ede7a271758df3cfc | 2118f244be2e09508e3c89dee432d4a75343b430 | /Twitter Projects/twitter_sentiment_basic_with_function_RJ_Keys.py | 981ff629e356b9bde7b1e8186617e488aaf965f0 | [] | no_license | RamiJaloudi/Python-Scripts | 91d139093a95f9498a77b1df8ec2f790c4f4dd4c | 37e740a618ae543a02c38dc04a32ef95202ff613 | refs/heads/master | 2020-04-29T14:55:41.108332 | 2019-03-18T05:42:06 | 2019-03-18T05:42:06 | 176,212,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
#consumer key, consumer secret, access token, access secret.
ckey="r2I3FdcFB3WRKpKoxhpb9pkra"
csecret="Snt0LzxPyKIUQphTQmbsf0DKPALKPfCAy4Jjr3g9O3A93AGdHM"
atoken="18894514-JsJsbjRkWF4jgA7nrMyNYfLR3RccNSUlTzrYO5shJ"
asecret="BhFpvR3ZJe46wmA3sEUJ1eStz8y83WtgIlw91jJBU01z6"
##def sentimentAnalysis(text):
## encoded_text = urllib.quote(text)
class listener(StreamListener):
def on_data(self, data):
print(data)
#return(True)
## tweet = data.split(',"text:"')[1].split('","source')[0]
##
## saveMe = tweet+'::'+sentimentRating+'\n'
## output = open('output.txt','a')
## outpute.write(saveMe)
## output.close()
## return True
def on_error(self, status):
print (status)
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=["#target"])
| [
"rjaloudi@gmail.com"
] | rjaloudi@gmail.com |
65398257cd8f44323e9a0e99c7ed1824e8f632ba | 974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184 | /sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2019_09_01/aio/_configuration.py | 38ab1c393bb97968a488f5f477a42303c3b73493 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | gaoyp830/azure-sdk-for-python | 4816f04c554dcffb7510a6b7044b0c86a2dd32e1 | 1c66defa502b754abcc9e5afa444ca03c609342f | refs/heads/master | 2022-10-20T21:33:44.281041 | 2022-09-29T17:03:13 | 2022-09-29T17:03:13 | 250,355,505 | 0 | 0 | MIT | 2020-03-26T19:42:13 | 2020-03-26T19:42:12 | null | UTF-8 | Python | false | false | 3,523 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class PolicyClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for PolicyClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2019-09-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(PolicyClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2019-09-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| [
"noreply@github.com"
] | gaoyp830.noreply@github.com |
692f1ae986c3b78350f42aa832aa27b180633536 | 5b81d836d7ccde2b09b0aea877af8b702374257c | /PL_13RegEx.py | 0e3c64d6fa9b1557fc873ff7c2d49c4db64bfef5 | [] | no_license | Daoshun/Python_Learning | 3347a00f4fbb73fa02d2583507ac8b3ae98e4528 | 5d5bd08d0c72186c099498972fb2974b5b937b62 | refs/heads/master | 2020-04-04T09:58:36.024854 | 2018-11-02T08:41:23 | 2018-11-02T08:41:23 | 155,837,953 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | #RegEx正则表达式 搜索字符串中是否含有所搜寻的词汇
import re
pattern1 = 'cat'
pattern2 = 'bird'
string = 'dog runs to cat'
print(pattern1 in string)
print(pattern2 in string)
#灵活匹配
pattern3 = r'r[au]n' #可以搜索a或u
print(re.search(pattern3,'dogs runs to cat'))
pattern4 = r'r[0-9A-Za-z]n' #可以搜索中间0-9,a-z,A-Z的任意值
print(re.search(pattern4,'dogs runs to cat'))
#\d : 任何数字
#\D : 不是数字
#\s : 任何 white space, 如 [\t\n\r\f\v]
#\S : 不是 white space
#\w : 任何大小写字母, 数字和 “” [a-zA-Z0-9]
#\W : 不是 \w
#\b : 空白字符 (只在某个字的开头或结尾)
#\B : 空白字符 (不在某个字的开头或结尾)
#\\ : 匹配 \
#. : 匹配任何字符 (除了 \n)
#^ : 匹配开头
#$ : 匹配结尾
#? : 前面的字符可有可无
pattern5 = r'r[\w]n$' #\w任何大小写字母,数字,和空格 $从结尾开始匹配,第一个是r4n
print(re.search(pattern5,'run rUn r4n'))
#re.search(r''^I'',string,flags=re.M) flags=re.M 可以做到对每一行做单独处理
string1 = '''
dog runs to cat.
I run to dog.
'''
print(re.search(r'^I',string,flags=re.M))
string2 = """
dog runs to cat.
I run to dog.
"""
print(re.search(r"^I", string)) # None
print(re.search(r"^I", string, flags=re.M)) # <_sre.SRE_Match object; span=(18, 19), match='I'>
| [
"447500628@qq.com"
] | 447500628@qq.com |
cb6b0ff04a9a7b260033eed4e2b098cc4d0a791d | 4357ec3d2d093c3b141505e78590dbabe24956bd | /ckanclient/contrib/resources_csv_dump.py | 9484dd387a25f5f9ff5a635744c95a292167ed7f | [] | no_license | timrdf/ckanclient | 6b1ac2c2e03cbcc73184e52cf200b40ef62b645d | b080e8de26527881c20102770111d41ad373282b | refs/heads/master | 2021-01-15T16:28:48.250476 | 2013-06-12T13:41:14 | 2013-06-12T13:41:14 | 10,643,533 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | '''
Create a CSV dump of a CKAN instance, resource by resource.
Compare with the 'paster db simple-dump-csv' command which is one line per dataset (although it requires access to the CKAN machine).
Author: Friedrich
'''
import argparse
import ckanclient
import csv
parser = argparse.ArgumentParser(description=
'Create a CSV dump of a CKAN instance, resource by resource.')
parser.add_argument('url', metavar='API_URL', type=str,
help='CKAN API endpoint')
parser.add_argument('outfile', metavar='OUTPUT_FILE', type=str,
help='output CSV file name')
def main():
args = parser.parse_args()
client = ckanclient.CkanClient(args.url)
rows = []
for pkg_name in client.package_register_get():
pkg = client.package_entity_get(pkg_name)
for extra, value in pkg.get('extras', {}).items():
pkg['extras_' + extra] = value
if 'extras' in pkg:
del pkg['extras']
resources = pkg.get('resources', [])
for resource in resources:
rpkg = pkg.copy()
for resprop, value in resource.items():
rpkg['resource_' + resprop] = value
rows.append(rpkg)
if not len(resources):
rows.append(pkg)
del pkg['resources']
print pkg_name
headers = set()
for row in rows:
headers.update(row.keys())
fh = open(args.outfile, 'wb')
writer = csv.DictWriter(fh, headers)
writer.writerow(dict(zip(headers, headers)))
for row in rows:
row_ = {}
for column, value in row.items():
if isinstance(value, unicode):
value = value.encode('utf-8')
row_[column] = value
writer.writerow(row_)
fh.close()
if __name__ == '__main__':
main()
| [
"david.read@okfn.org"
] | david.read@okfn.org |
80b48bbdc0ae9c3389798472a0b866f7630a16f5 | 35d8b11721e6a2a4c2b0eb4285bbab4a31501c99 | /NBM_map.py | 7adf53465b9aa847514af99bf481136c9bc2fee5 | [] | no_license | tjturnage/NBM | 7a5e1cf9c07c53c9f6758c80112a69f16b1eaf4a | bd2b2405909e9a6cf580aa60c0e97176332cae00 | refs/heads/master | 2021-07-10T00:52:17.377926 | 2020-12-07T02:02:22 | 2020-12-07T02:02:22 | 217,791,327 | 1 | 0 | null | 2020-04-19T21:43:05 | 2019-10-27T01:42:59 | Python | UTF-8 | Python | false | false | 7,042 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 15:57:27 2019
@author: thomas.turnage
"""
import re
import os
import sys
import numpy as np
import pandas as pd
import requests
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from gis_layers import make_shapes_mi
shape_mini = make_shapes_mi()
from reference_data import nbm_station_dict
station_master = nbm_station_dict()
download = False
try:
os.listdir('/var/www')
base_gis_dir = '/data/GIS'
except:
base_gis_dir = 'C:/data/GIS'
try:
os.listdir('/usr')
windows = False
base_dir = '/data'
sys.path.append('/data/scripts/resources')
image_dir = os.path.join('/var/www/html/radar','images')
image_dir = os.path.join('/data','images')
raw_nbm_file = os.path.join(base_dir,'nbm_raw.txt')
trimmed_nbm_file = os.path.join(base_dir,'nbm_trimmed.txt')
except:
windows = True
base_dir = 'C:/data'
image_dir = os.path.join(base_dir,'images','NBM')
raw_nbm_file = os.path.join(base_dir,'nbm_raw.txt')
trimmed_nbm_file = os.path.join(base_dir,'nbm_trimmed.txt')
sys.path.append('C:/data/scripts/resources')
from my_functions import dtList_nbm, categorize
download = False
bulletin_type = 'nbhtx'
def download_nbm_bulletin(url,fname,path_check):
dst = os.path.join(base_dir,fname)
if path_check != 'just_path':
r = requests.get(url)
print('downloading ... ' + str(url))
open(dst, 'wb').write(r.content)
return dst
now = datetime.utcnow()
now2 = now - timedelta(hours=3)
ymd = now2.strftime('%Y%m%d')
hour = now2.strftime('%H')
#url = 'https://para.nomads.ncep.noaa.gov/pub/data/nccf/com/blend/para/blend.20191107/15/text/blend_nbhtx.t15z'
url = 'https://para.nomads.ncep.noaa.gov/pub/data/nccf/com/blend/para/blend.' + ymd + '/' + hour + '/text/blend_' + bulletin_type + '.t' + hour + 'z'
map_plot_stations = {}
mi_stations = []
for key in station_master:
if station_master[key]['state'] == 'MI':
mi_stations.append(key)
fname = 'nbm_raw_hourly.txt'
if download:
raw_file_path = download_nbm_bulletin(url,fname,'hi')
download = False
else:
raw_file_path = download_nbm_bulletin(url,fname,'just_path')
for key in mi_stations:
#if s in ['KAZO','KGRR','KMKG','KMOP','KMKG','KBIV']:
station_id = key
station_description = station_master[key]['name']
lat = station_master[key]['lat']
lon = station_master[key]['lon']
column_list = []
station_found = False
utc_shift = station_master[key]['time_shift']
p = re.compile(key)
s = re.compile('SOL')
dt = re.compile('DT')
ymdh = re.compile('[0-9]+/[0-9]+/[0-9]+\s+[0-9]+')
dst = open(trimmed_nbm_file, 'w')
with open(raw_file_path) as fp:
for line in fp:
m = p.search(line)
sol = s.search(line)
dt_match = dt.search(line)
if m is not None:
station_found = True
dt_line = line
ymdh_match = ymdh.search(dt_line)
run_dt = datetime.strptime(ymdh_match[0], '%m/%d/%Y %H%M')
idx,model_run_local = dtList_nbm(run_dt,bulletin_type,utc_shift) ######################################################3333333
start_time = idx[1]
end_time = idx[-1]
data_list = idx[1:-1]
elif station_found and sol is None:
if dt_match is not None:
pass
else:
start = str(line[1:4])
column_list.append(start)
dst.write(line)
elif sol is not None and station_found:
dst.close()
break
nbm_old = None
nbm = None
nbm_old = pd.read_fwf(trimmed_nbm_file, widths=(5,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3))
elements = column_list[1:]
# flip table so times are rows to align with pandas
nbm = nbm_old.transpose()
# after the flip, column names are useless. Use the created column_list before the flip
# to make a dictionary that replaces bad column names with the original, pre-flip column names
old_column_names = nbm.columns.tolist()
col_rename_dict = {i:j for i,j in zip(old_column_names,elements)}
nbm.rename(columns=col_rename_dict, inplace=True)
# Now that columns names have been defined there is an extraneous line to remove
# -- With nbhtx (hourly guidance), remove the UTC line.
# -- With nbstx (short term guidance), remove the FHR line.
# Then set the index with the pandas time series
try:
nbm.drop(['UTC'], inplace=True)
except:
pass
nbm.set_index(data_list, inplace=True)
sn_plot = []
ts_list = []
sn_list = nbm.S01.tolist()
sn_cat_list = categorize(sn_list,'sn')
for t in (np.arange(0,12,2)):
ts = nbm.index[t]
t_str = ts.strftime('%d %h %Y %H')
t_str = ts.strftime('%B %d, %Y - %I %p')
sn_plot.append(sn_cat_list[t])
ts_list.append(t_str)
map_plot_stations[key] = {'snow':sn_plot, 'time_string':ts_list, 'lon':lon,'lat':lat}
cat_color_dict = {'0':(0.2,0.2,0.2),
'1':(0.3,0.3,0.4),
'2':(0.4,0.4,0.6),
'3':(0.5,0.5,0.8),
'4':(0.6,0.6,0.2),
'5':(0.6,0.0,0.0),
'6':(1,0,0),
'7':(0.9,0.0,0.0),
}
cat_color_dict = {'0':(0.2,0.4,0.4),
'1':(0.6,0.2,0.2),
'2':(0.8,0.1,0.1),
'3':(1,0,0),
'4':(0.6,0.6,0.2),
'5':(0.6,0.0,0.0),
'6':(1,0,0),
'7':(0.9,0.0,0.0),
}
extent = [-86.7,-84.3,41.5,44.5]
fig, axes = plt.subplots(2,3,figsize=(15,12),subplot_kw={'projection': ccrs.PlateCarree()})
for a,n in zip(axes.ravel(),(np.arange(0,6,1))):
#this_title = plts[y]['title']
a.set_extent(extent, crs=ccrs.PlateCarree())
a.tick_params(axis='both', labelsize=8)
for sh in shape_mini:
a.add_feature(shape_mini[sh], facecolor='none', edgecolor='gray', linewidth=0.5)
for key in map_plot_stations:
#print(lon,lat,dat,c)
this_lon = map_plot_stations[key]['lon']
this_lat = map_plot_stations[key]['lat']
this_dat = map_plot_stations[key]['snow'][n]
this_c = cat_color_dict[str(this_dat)]
a.scatter(this_lon,this_lat,s=((this_dat+2)*10),c=[this_c])
a.set_title(map_plot_stations[key]['time_string'][n])
#plt.text(lon+.03, lat+.03, key, fontsizs=e=10)
#plt.yticks(np.linspace(0,250,6,endpoint=True))
#plt.xticks(np.linspace(0,6,7,endpoint=True))
## End Test Plotting
| [
"noreply@github.com"
] | tjturnage.noreply@github.com |
75cf58a71f997885665fee89e664486bc085d0f5 | e60179542c28a7ca53df13ae78efc5415c550d76 | /attacks/brendel_bethge.py | b5ee8fbb48214385306c0bb3178a61d5f2b08d7d | [] | no_license | Harry24k/foolbox | 1199ada0872baf5e39c2a5f5362cb20e23870db6 | b5c3acd1588d02f99890321856a4768b3db4588c | refs/heads/main | 2023-05-23T21:48:17.316927 | 2021-06-10T00:34:09 | 2021-06-10T00:34:09 | 375,523,862 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84,836 | py | # mypy: allow-untyped-defs, no-strict-optional
from typing import Union, Optional, Tuple, Any
from typing_extensions import Literal
from abc import ABC
from abc import abstractmethod
import numpy as np
import eagerpy as ep
import logging
import warnings
from ..devutils import flatten
from . import LinearSearchBlendedUniformNoiseAttack
from ..tensorboard import TensorBoard
from .base import Model
from .base import MinimizationAttack
from .base import get_is_adversarial
from .base import get_criterion
from .base import T
from ..criteria import Misclassification, TargetedMisclassification
from .base import raise_if_kwargs
from ..distances import l0, l1, l2, linf
try:
from numba import jitclass # type: ignore
except (ModuleNotFoundError, ImportError) as e: # pragma: no cover
# delay the error until the attack is initialized
NUMBA_IMPORT_ERROR = e
def jitclass(*args, **kwargs):
def decorator(c):
return c
return decorator
else:
NUMBA_IMPORT_ERROR = None
EPS = 1e-10
class Optimizer(object): # pragma: no cover
""" Base class for the trust-region optimization. If feasible, this optimizer solves the problem
min_delta distance(x0, x + delta) s.t. ||delta||_2 <= r AND delta^T b = c AND min_ <= x + delta <= max_
where x0 is the original sample, x is the current optimisation state, r is the trust-region radius,
b is the current estimate of the normal vector of the decision boundary, c is the estimated distance of x
to the trust region and [min_, max_] are the value constraints of the input. The function distance(.,.)
is the distance measure to be optimised (e.g. L2, L1, L0).
"""
def __init__(self):
self.bfgsb = BFGSB() # a box-constrained BFGS solver
def solve(self, x0, x, b, min_, max_, c, r):
x0, x, b = x0.astype(np.float64), x.astype(np.float64), b.astype(np.float64)
cmax, cmaxnorm = self._max_logit_diff(x, b, min_, max_, c)
if np.abs(cmax) < np.abs(c):
# problem not solvable (boundary cannot be reached)
if np.sqrt(cmaxnorm) < r:
# make largest possible step towards boundary while staying within bounds
_delta = self.optimize_boundary_s_t_trustregion(
x0, x, b, min_, max_, c, r
)
else:
# make largest possible step towards boundary while staying within trust region
_delta = self.optimize_boundary_s_t_trustregion(
x0, x, b, min_, max_, c, r
)
else:
if cmaxnorm < r:
# problem is solvable
# proceed with standard optimization
_delta = self.optimize_distance_s_t_boundary_and_trustregion(
x0, x, b, min_, max_, c, r
)
else:
# problem might not be solvable
bnorm = np.linalg.norm(b)
minnorm = self._minimum_norm_to_boundary(x, b, min_, max_, c, bnorm)
if minnorm <= r:
# problem is solvable, proceed with standard optimization
_delta = self.optimize_distance_s_t_boundary_and_trustregion(
x0, x, b, min_, max_, c, r
)
else:
# problem not solvable (boundary cannot be reached)
# make largest step towards boundary within trust region
_delta = self.optimize_boundary_s_t_trustregion(
x0, x, b, min_, max_, c, r
)
return _delta
def _max_logit_diff(self, x, b, _ell, _u, c):
""" Tests whether the (estimated) boundary can be reached within trust region. """
N = x.shape[0]
cmax = 0.0
norm = 0.0
if c > 0:
for n in range(N):
if b[n] > 0:
cmax += b[n] * (_u - x[n])
norm += (_u - x[n]) ** 2
else:
cmax += b[n] * (_ell - x[n])
norm += (x[n] - _ell) ** 2
else:
for n in range(N):
if b[n] > 0:
cmax += b[n] * (_ell - x[n])
norm += (x[n] - _ell) ** 2
else:
cmax += b[n] * (_u - x[n])
norm += (_u - x[n]) ** 2
return cmax, np.sqrt(norm)
def _minimum_norm_to_boundary(self, x, b, _ell, _u, c, bnorm):
""" Computes the minimum norm necessary to reach the boundary. More precisely, we aim to solve the
following optimization problem
min ||delta||_2^2 s.t. lower <= x + delta <= upper AND b.dot(delta) = c
Lets forget about the box constraints for a second, i.e.
min ||delta||_2^2 s.t. b.dot(delta) = c
The dual of this problem is quite straight-forward to solve,
g(lambda, delta) = ||delta||_2^2 + lambda * (c - b.dot(delta))
The minimum of this Lagrangian is delta^* = lambda * b / 2, and so
inf_delta g(lambda, delta) = lambda^2 / 4 ||b||_2^2 + lambda * c
and so the optimal lambda, which maximizes inf_delta g(lambda, delta), is given by
lambda^* = 2c / ||b||_2^2
which in turn yields the optimal delta:
delta^* = c * b / ||b||_2^2
To take into account the box-constraints we perform a binary search over lambda and apply the box
constraint in each step.
"""
N = x.shape[0]
lambda_lower = 2 * c / bnorm ** 2
lambda_upper = (
np.sign(c) * np.inf
) # optimal initial point (if box-constraints are neglected)
_lambda = lambda_lower
k = 0
# perform a binary search over lambda
while True:
# compute _c = b.dot([- _lambda * b / 2]_clip)
k += 1
_c = 0
norm = 0
if c > 0:
for n in range(N):
lam_step = _lambda * b[n] / 2
if b[n] > 0:
max_step = _u - x[n]
delta_step = min(max_step, lam_step)
_c += b[n] * delta_step
norm += delta_step ** 2
else:
max_step = _ell - x[n]
delta_step = max(max_step, lam_step)
_c += b[n] * delta_step
norm += delta_step ** 2
else:
for n in range(N):
lam_step = _lambda * b[n] / 2
if b[n] > 0:
max_step = _ell - x[n]
delta_step = max(max_step, lam_step)
_c += b[n] * delta_step
norm += delta_step ** 2
else:
max_step = _u - x[n]
delta_step = min(max_step, lam_step)
_c += b[n] * delta_step
norm += delta_step ** 2
# adjust lambda
if np.abs(_c) < np.abs(c):
# increase absolute value of lambda
if np.isinf(lambda_upper):
_lambda *= 2
else:
lambda_lower = _lambda
_lambda = (lambda_upper - lambda_lower) / 2 + lambda_lower
else:
# decrease lambda
lambda_upper = _lambda
_lambda = (lambda_upper - lambda_lower) / 2 + lambda_lower
# stopping condition
if 0.999 * np.abs(c) - EPS < np.abs(_c) < 1.001 * np.abs(c) + EPS:
break
return np.sqrt(norm)
def optimize_distance_s_t_boundary_and_trustregion(
self, x0, x, b, min_, max_, c, r
):
""" Find the solution to the optimization problem
min_delta ||dx - delta||_p^p s.t. ||delta||_2^2 <= r^2 AND b^T delta = c AND min_ <= x + delta <= max_
"""
params0 = np.array([0.0, 0.0])
bounds = np.array([(-np.inf, np.inf), (0, np.inf)])
args = (x0, x, b, min_, max_, c, r)
qk = self.bfgsb.solve(self.fun_and_jac, params0, bounds, args)
return self._get_final_delta(
qk[0], qk[1], x0, x, b, min_, max_, c, r, touchup=True
)
def optimize_boundary_s_t_trustregion_fun_and_jac(
self, params, x0, x, b, min_, max_, c, r
):
N = x0.shape[0]
s = -np.sign(c)
_mu = params[0]
t = 1 / (2 * _mu + EPS)
g = -_mu * r ** 2
grad_mu = -(r ** 2)
for n in range(N):
d = -s * b[n] * t
if d < min_ - x[n]:
d = min_ - x[n]
elif d > max_ - x[n]:
d = max_ - x[n]
else:
grad_mu += (b[n] + 2 * _mu * d) * (b[n] / (2 * _mu ** 2 + EPS))
grad_mu += d ** 2
g += (b[n] + _mu * d) * d
return -g, -np.array([grad_mu])
def safe_div(self, nominator, denominator):
if np.abs(denominator) > EPS:
return nominator / denominator
elif denominator >= 0:
return nominator / EPS
else:
return -nominator / EPS
def optimize_boundary_s_t_trustregion(self, x0, x, b, min_, max_, c, r):
""" Find the solution to the optimization problem
min_delta sign(c) b^T delta s.t. ||delta||_2^2 <= r^2 AND min_ <= x + delta <= max_
Note: this optimization problem is independent of the Lp norm being optimized.
Lagrangian: g(delta) = sign(c) b^T delta + mu * (||delta||_2^2 - r^2)
Optimal delta: delta = - sign(c) * b / (2 * mu)
"""
params0 = np.array([1.0])
args = (x0, x, b, min_, max_, c, r)
bounds = np.array([(0, np.inf)])
qk = self.bfgsb.solve(
self.optimize_boundary_s_t_trustregion_fun_and_jac, params0, bounds, args
)
_delta = self.safe_div(-b, 2 * qk[0])
for n in range(x0.shape[0]):
if _delta[n] < min_ - x[n]:
_delta[n] = min_ - x[n]
elif _delta[n] > max_ - x[n]:
_delta[n] = max_ - x[n]
return _delta
class BrendelBethgeAttack(MinimizationAttack, ABC):
"""Base class for the Brendel & Bethge adversarial attack [#Bren19]_, a powerful
gradient-based adversarial attack that follows the adversarial boundary
(the boundary between the space of adversarial and non-adversarial images as
defined by the adversarial criterion) to find the minimum distance to the
clean image.
This is the reference implementation of the Brendel & Bethge attack.
Implementation differs from the attack used in the paper in two ways:
* The initial binary search is always using the full 10 steps (for ease of implementation).
* The adaptation of the trust region over the course of optimisation is less
greedy but is more robust, reliable and simpler (decay every K steps)
Args:
init_attack : Attack to use to find a starting points. Defaults to
LinearSearchBlendedUniformNoiseAttack. Only used if starting_points is None.
overshoot : If 1 the attack tries to return exactly to the adversarial boundary
in each iteration. For higher values the attack tries to overshoot
over the boundary to ensure that the perturbed sample in each iteration
is adversarial.
steps : Maximum number of iterations to run. Might converge and stop
before that.
lr : Trust region radius, behaves similar to a learning rate. Smaller values
decrease the step size in each iteration and ensure that the attack
follows the boundary more faithfully.
lr_decay : The trust region lr is multiplied with lr_decay in regular intervals (see
lr_reduction_interval).
lr_num_decay : Number of learning rate decays in regular intervals of
length steps / lr_num_decay.
momentum : Averaging of the boundary estimation over multiple steps. A momentum of
zero would always take the current estimate while values closer to one
average over a larger number of iterations.
tensorboard : The log directory for TensorBoard summaries. If False, TensorBoard
summaries will be disabled (default). If None, the logdir will be
runs/CURRENT_DATETIME_HOSTNAME.
binary_search_steps : Number of binary search steps used to find the adversarial boundary
between the starting point and the clean image.
References:
.. [#Bren19] Wieland Brendel, Jonas Rauber, Matthias Kümmerer,
Ivan Ustyuzhaninov, Matthias Bethge,
"Accurate, reliable and fast robustness evaluation",
33rd Conference on Neural Information Processing Systems (2019)
https://arxiv.org/abs/1907.01003
"""
def __init__(
self,
init_attack: Optional[MinimizationAttack] = None,
overshoot: float = 1.1,
steps: int = 1000,
lr: float = 1e-3,
lr_decay: float = 0.5,
lr_num_decay: int = 20,
momentum: float = 0.8,
tensorboard: Union[Literal[False], None, str] = False,
binary_search_steps: int = 10,
):
if NUMBA_IMPORT_ERROR is not None:
raise NUMBA_IMPORT_ERROR # pragma: no cover
self.init_attack = init_attack
self.overshoot = overshoot
self.steps = steps
self.lr = lr
self.lr_decay = lr_decay
self.lr_num_decay = lr_num_decay
self.momentum = momentum
self.tensorboard = tensorboard
self.binary_search_steps = binary_search_steps
self._optimizer: Optimizer = self.instantiate_optimizer()
def run( # noqa: C901
self,
model: Model,
inputs: T,
criterion: Union[TargetedMisclassification, Misclassification, T],
*,
starting_points: Optional[ep.Tensor] = None,
early_stop: Optional[float] = None,
**kwargs: Any,
) -> T:
"""Applies the Brendel & Bethge attack.
Parameters
----------
inputs : Tensor that matches model type
The original clean inputs.
labels : Integer tensor that matches model type
The reference labels for the inputs.
criterion : Callable
A callable that returns true if the given logits of perturbed
inputs should be considered adversarial w.r.t. to the given labels
and unperturbed inputs.
starting_point : Tensor of same type and shape as inputs
Adversarial inputs to use as a starting points, in particular
for targeted attacks.
"""
raise_if_kwargs(kwargs)
del kwargs
tb = TensorBoard(logdir=self.tensorboard)
originals, restore_type = ep.astensor_(inputs)
del inputs
criterion_ = get_criterion(criterion)
del criterion
is_adversarial = get_is_adversarial(criterion_, model)
if isinstance(criterion_, Misclassification):
targeted = False
classes = criterion_.labels
elif isinstance(criterion_, TargetedMisclassification):
targeted = True
classes = criterion_.target_classes
else:
raise ValueError("unsupported criterion")
if starting_points is None:
init_attack: MinimizationAttack
if self.init_attack is None:
init_attack = LinearSearchBlendedUniformNoiseAttack()
logging.info(
f"Neither starting_points nor init_attack given. Falling"
f" back to {init_attack!r} for initialization."
)
else:
init_attack = self.init_attack
# TODO: use call and support all types of attacks (once early_stop is
# possible in __call__)
starting_points = self.init_attack.run(model, originals, criterion_)
best_advs = ep.astensor(starting_points)
while not is_adversarial(best_advs).all() :
starting_points = self.init_attack.run(model, originals, criterion_)
best_advs = ep.astensor(starting_points)
# perform binary search to find adversarial boundary
# TODO: Implement more efficient search with breaking condition
N = len(originals)
rows = range(N)
bounds = model.bounds
min_, max_ = bounds
x0 = originals
x0_np_flatten = x0.numpy().reshape((N, -1))
x1 = best_advs
lower_bound = ep.zeros(x0, shape=(N,))
upper_bound = ep.ones(x0, shape=(N,))
for _ in range(self.binary_search_steps):
epsilons = (lower_bound + upper_bound) / 2
mid_points = self.mid_points(x0, x1, epsilons, bounds)
is_advs = is_adversarial(mid_points)
lower_bound = ep.where(is_advs, lower_bound, epsilons)
upper_bound = ep.where(is_advs, epsilons, upper_bound)
starting_points = self.mid_points(x0, x1, upper_bound, bounds)
tb.scalar("batchsize", N, 0)
# function to compute logits_diff and gradient
def loss_fun(x):
logits = model(x)
if targeted:
c_minimize = best_other_classes(logits, classes)
c_maximize = classes
else:
c_minimize = classes
c_maximize = best_other_classes(logits, classes)
logits_diffs = logits[rows, c_minimize] - logits[rows, c_maximize]
assert logits_diffs.shape == (N,)
return logits_diffs.sum(), logits_diffs
value_and_grad = ep.value_and_grad_fn(x0, loss_fun, has_aux=True)
def logits_diff_and_grads(x) -> Tuple[Any, Any]:
_, logits_diffs, boundary = value_and_grad(x)
return logits_diffs.numpy(), boundary.numpy().copy()
x = starting_points
lrs = self.lr * np.ones(N)
lr_reduction_interval = min(1, int(self.steps / self.lr_num_decay))
converged = np.zeros(N, dtype=np.bool)
rate_normalization = np.prod(x.shape) * (max_ - min_)
original_shape = x.shape
_best_advs = best_advs.numpy()
for step in range(1, self.steps + 1):
if converged.all():
break # pragma: no cover
# get logits and local boundary geometry
# TODO: only perform forward pass on non-converged samples
logits_diffs, _boundary = logits_diff_and_grads(x)
# record optimal adversarials
distances = self.norms(originals - x)
source_norms = self.norms(originals - best_advs)
closer = distances < source_norms
is_advs = logits_diffs < 0
closer = closer.logical_and(ep.from_numpy(x, is_advs))
x_np_flatten = x.numpy().reshape((N, -1))
if closer.any():
_best_advs = best_advs.numpy().copy()
_closer = closer.numpy().flatten()
for idx in np.arange(N)[_closer]:
_best_advs[idx] = x_np_flatten[idx].reshape(original_shape[1:])
best_advs = ep.from_numpy(x, _best_advs)
# denoise estimate of boundary using a short history of the boundary
if step == 1:
boundary = _boundary
else:
boundary = (1 - self.momentum) * _boundary + self.momentum * boundary
# learning rate adaptation
if (step + 1) % lr_reduction_interval == 0:
lrs *= self.lr_decay
# compute optimal step within trust region depending on metric
x = x.reshape((N, -1))
region = lrs * rate_normalization
# we aim to slight overshoot over the boundary to stay within the adversarial region
corr_logits_diffs = np.where(
-logits_diffs < 0,
-self.overshoot * logits_diffs,
-(2 - self.overshoot) * logits_diffs,
)
# employ solver to find optimal step within trust region
# for each sample
deltas, k = [], 0
for sample in range(N):
if converged[sample]:
# don't perform optimisation on converged samples
deltas.append(
np.zeros_like(x0_np_flatten[sample])
) # pragma: no cover
else:
_x0 = x0_np_flatten[sample]
_x = x_np_flatten[sample]
_b = boundary[k].flatten()
_c = corr_logits_diffs[k]
r = region[sample]
delta = self._optimizer.solve( # type: ignore
_x0, _x, _b, bounds[0], bounds[1], _c, r
)
deltas.append(delta)
k += 1 # idx of masked sample
deltas = np.stack(deltas)
deltas = ep.from_numpy(x, deltas.astype(np.float32)) # type: ignore
# add step to current perturbation
x = (x + ep.astensor(deltas)).reshape(original_shape)
tb.probability("converged", converged, step)
tb.histogram("norms", source_norms, step)
tb.histogram("candidates/distances", distances, step)
tb.close()
return restore_type(best_advs)
@abstractmethod
def instantiate_optimizer(self) -> Optimizer:
raise NotImplementedError
@abstractmethod
def norms(self, x: ep.Tensor) -> ep.Tensor:
raise NotImplementedError
@abstractmethod
def mid_points(
self,
x0: ep.Tensor,
x1: ep.Tensor,
epsilons: ep.Tensor,
bounds: Tuple[float, float],
) -> ep.Tensor:
raise NotImplementedError
def best_other_classes(logits: ep.Tensor, exclude: ep.Tensor) -> ep.Tensor:
other_logits = logits - ep.onehot_like(logits, exclude, value=np.inf)
return other_logits.argmax(axis=-1)
class L2BrendelBethgeAttack(BrendelBethgeAttack):
"""L2 variant of the Brendel & Bethge adversarial attack. [#Bren19]_
This is a powerful gradient-based adversarial attack that follows the
adversarial boundary (the boundary between the space of adversarial and
non-adversarial images as defined by the adversarial criterion) to find
the minimum distance to the clean image.
This is the reference implementation of the Brendel & Bethge attack.
References:
.. [#Bren19] Wieland Brendel, Jonas Rauber, Matthias Kümmerer,
Ivan Ustyuzhaninov, Matthias Bethge,
"Accurate, reliable and fast robustness evaluation",
33rd Conference on Neural Information Processing Systems (2019)
https://arxiv.org/abs/1907.01003
"""
distance = l2
def instantiate_optimizer(self):
if len(L2Optimizer._ctor.signatures) == 0:
# optimiser is not yet compiled, give user a warning/notice
warnings.warn(
"At the first initialisation the optimizer needs to be compiled. This may take between 20 to 60 seconds."
)
return L2Optimizer()
def norms(self, x: ep.Tensor) -> ep.Tensor:
return flatten(x).norms.l2(axis=-1)
def mid_points(
self, x0: ep.Tensor, x1: ep.Tensor, epsilons: ep.Tensor, bounds
) -> ep.Tensor:
# returns a point between x0 and x1 where
# epsilon = 0 returns x0 and epsilon = 1
# returns x1
# get epsilons in right shape for broadcasting
epsilons = epsilons.reshape(epsilons.shape + (1,) * (x0.ndim - 1))
return epsilons * x1 + (1 - epsilons) * x0
class LinfinityBrendelBethgeAttack(BrendelBethgeAttack):
"""L-infinity variant of the Brendel & Bethge adversarial attack. [#Bren19]_
This is a powerful gradient-based adversarial attack that follows the
adversarial boundary (the boundary between the space of adversarial and
non-adversarial images as defined by the adversarial criterion) to find
the minimum distance to the clean image.
This is the reference implementation of the Brendel & Bethge attack.
References:
.. [#Bren19] Wieland Brendel, Jonas Rauber, Matthias Kümmerer,
Ivan Ustyuzhaninov, Matthias Bethge,
"Accurate, reliable and fast robustness evaluation",
33rd Conference on Neural Information Processing Systems (2019)
https://arxiv.org/abs/1907.01003
"""
distance = linf
def instantiate_optimizer(self):
return LinfOptimizer()
def norms(self, x: ep.Tensor) -> ep.Tensor:
return flatten(x).norms.linf(axis=-1)
def mid_points(
self,
x0: ep.Tensor,
x1: ep.Tensor,
epsilons: ep.Tensor,
bounds: Tuple[float, float],
):
# returns a point between x0 and x1 where
# epsilon = 0 returns x0 and epsilon = 1
delta = x1 - x0
min_, max_ = bounds
s = max_ - min_
# get epsilons in right shape for broadcasting
epsilons = epsilons.reshape(epsilons.shape + (1,) * (x0.ndim - 1))
clipped_delta = ep.where(delta < -epsilons * s, -epsilons * s, delta)
clipped_delta = ep.where(
clipped_delta > epsilons * s, epsilons * s, clipped_delta
)
return x0 + clipped_delta
class L1BrendelBethgeAttack(BrendelBethgeAttack):
"""L1 variant of the Brendel & Bethge adversarial attack. [#Bren19]_
This is a powerful gradient-based adversarial attack that follows the
adversarial boundary (the boundary between the space of adversarial and
non-adversarial images as defined by the adversarial criterion) to find
the minimum distance to the clean image.
This is the reference implementation of the Brendel & Bethge attack.
References:
.. [#Bren19] Wieland Brendel, Jonas Rauber, Matthias Kümmerer,
Ivan Ustyuzhaninov, Matthias Bethge,
"Accurate, reliable and fast robustness evaluation",
33rd Conference on Neural Information Processing Systems (2019)
https://arxiv.org/abs/1907.01003
"""
distance = l1
def instantiate_optimizer(self):
return L1Optimizer()
def norms(self, x: ep.Tensor) -> ep.Tensor:
return flatten(x).norms.l1(axis=-1)
def mid_points(
self,
x0: ep.Tensor,
x1: ep.Tensor,
epsilons: ep.Tensor,
bounds: Tuple[float, float],
) -> ep.Tensor:
# returns a point between x0 and x1 where
# epsilon = 0 returns x0 and epsilon = 1
# returns x1
# get epsilons in right shape for broadcasting
epsilons = epsilons.reshape(epsilons.shape + (1,) * (x0.ndim - 1))
threshold = (bounds[1] - bounds[0]) * (1 - epsilons)
mask = (x1 - x0).abs() > threshold
new_x = ep.where(
mask, x0 + (x1 - x0).sign() * ((x1 - x0).abs() - threshold), x0
)
return new_x
class L0BrendelBethgeAttack(BrendelBethgeAttack):
"""L0 variant of the Brendel & Bethge adversarial attack. [#Bren19]_
This is a powerful gradient-based adversarial attack that follows the
adversarial boundary (the boundary between the space of adversarial and
non-adversarial images as defined by the adversarial criterion) to find
the minimum distance to the clean image.
This is the reference implementation of the Brendel & Bethge attack.
References:
.. [#Bren19] Wieland Brendel, Jonas Rauber, Matthias Kümmerer,
Ivan Ustyuzhaninov, Matthias Bethge,
"Accurate, reliable and fast robustness evaluation",
33rd Conference on Neural Information Processing Systems (2019)
https://arxiv.org/abs/1907.01003
"""
distance = l0
def instantiate_optimizer(self):
return L0Optimizer()
def norms(self, x: ep.Tensor) -> ep.Tensor:
return (flatten(x).abs() > 1e-4).sum(axis=-1)
def mid_points(
self,
x0: ep.Tensor,
x1: ep.Tensor,
epsilons: ep.Tensor,
bounds: Tuple[float, float],
):
# returns a point between x0 and x1 where
# epsilon = 0 returns x0 and epsilon = 1
# returns x1
# get epsilons in right shape for broadcasting
epsilons = epsilons.reshape(epsilons.shape + (1,) * (x0.ndim - 1))
threshold = (bounds[1] - bounds[0]) * epsilons
mask = ep.abs(x1 - x0) < threshold
new_x = ep.where(mask, x1, x0)
return new_x
@jitclass(spec=[])
class BFGSB(object):
def __init__(self):
pass
def solve(
self, fun_and_jac, q0, bounds, args, ftol=1e-10, pgtol=-1e-5, maxiter=None
):
N = q0.shape[0]
if maxiter is None:
maxiter = N * 200
l = bounds[:, 0] # noqa: E741
u = bounds[:, 1]
func_calls = 0
old_fval, gfk = fun_and_jac(q0, *args)
func_calls += 1
k = 0
Hk = np.eye(N)
# Sets the initial step guess to dx ~ 1
qk = q0
old_old_fval = old_fval + np.linalg.norm(gfk) / 2
# gnorm = np.amax(np.abs(gfk))
_gfk = gfk
# Compare with implementation BFGS-B implementation
# in https://github.com/andrewhooker/PopED/blob/master/R/bfgsb_min.R
while k < maxiter:
# check if projected gradient is still large enough
pg_norm = 0
for v in range(N):
if _gfk[v] < 0:
gv = max(qk[v] - u[v], _gfk[v])
else:
gv = min(qk[v] - l[v], _gfk[v])
if pg_norm < np.abs(gv):
pg_norm = np.abs(gv)
if pg_norm < pgtol:
break
# get cauchy point
x_cp = self._cauchy_point(qk, l, u, _gfk.copy(), Hk)
qk1 = self._subspace_min(qk, l, u, x_cp, _gfk.copy(), Hk)
pk = qk1 - qk
(
alpha_k,
fc,
gc,
old_fval,
old_old_fval,
gfkp1,
fnev,
) = self._line_search_wolfe(
fun_and_jac, qk, pk, _gfk, old_fval, old_old_fval, l, u, args
)
func_calls += fnev
if alpha_k is None:
break
if np.abs(old_fval - old_old_fval) <= (ftol + ftol * np.abs(old_fval)):
break
qkp1 = self._project(qk + alpha_k * pk, l, u)
if gfkp1 is None:
_, gfkp1 = fun_and_jac(qkp1, *args)
sk = qkp1 - qk
qk = qkp1
yk = np.zeros_like(qk)
for k3 in range(N):
yk[k3] = gfkp1[k3] - _gfk[k3]
if np.abs(yk[k3]) < 1e-4:
yk[k3] = -1e-4
_gfk = gfkp1
k += 1
# update inverse Hessian matrix
Hk_sk = Hk.dot(sk)
sk_yk = 0
sk_Hk_sk = 0
for v in range(N):
sk_yk += sk[v] * yk[v]
sk_Hk_sk += sk[v] * Hk_sk[v]
if np.abs(sk_yk) >= 1e-8:
rhok = 1.0 / sk_yk
else:
rhok = 100000.0
if np.abs(sk_Hk_sk) >= 1e-8:
rsk_Hk_sk = 1.0 / sk_Hk_sk
else:
rsk_Hk_sk = 100000.0
for v in range(N):
for w in range(N):
Hk[v, w] += yk[v] * yk[w] * rhok - Hk_sk[v] * Hk_sk[w] * rsk_Hk_sk
return qk
def _cauchy_point(self, x, l, u, g, B):
# finds the cauchy point for q(x)=x'Gx+x'd s$t. l<=x<=u
# g=G*x+d #gradient of q(x)
# converted from r-code: https://github.com/andrewhooker/PopED/blob/master/R/cauchy_point.R
n = x.shape[0]
t = np.zeros_like(x)
d = np.zeros_like(x)
for i in range(n):
if g[i] < 0:
t[i] = (x[i] - u[i]) / g[i]
elif g[i] > 0:
t[i] = (x[i] - l[i]) / g[i]
elif g[i] == 0:
t[i] = np.inf
if t[i] == 0:
d[i] = 0
else:
d[i] = -g[i]
ts = t.copy()
ts = ts[ts != 0]
ts = np.sort(ts)
df = g.dot(d)
d2f = d.dot(B.dot(d))
if d2f < 1e-10:
return x
dt_min = -df / d2f
t_old = 0
i = 0
z = np.zeros_like(x)
while i < ts.shape[0] and dt_min >= (ts[i] - t_old):
ind = ts[i] < t
d[~ind] = 0
z = z + (ts[i] - t_old) * d
df = g.dot(d) + d.dot(B.dot(z))
d2f = d.dot(B.dot(d))
dt_min = df / (d2f + 1e-8)
t_old = ts[i]
i += 1
dt_min = max(dt_min, 0)
t_old = t_old + dt_min
x_cp = x - t_old * g
temp = x - t * g
x_cp[t_old > t] = temp[t_old > t]
return x_cp
def _subspace_min(self, x, l, u, x_cp, d, G):
# converted from r-code: https://github.com/andrewhooker/PopED/blob/master/R/subspace_min.R
n = x.shape[0]
Z = np.eye(n)
fixed = (x_cp <= l + 1e-8) + (x_cp >= u - 1e8)
if np.all(fixed):
x = x_cp
return x
Z = Z[:, ~fixed]
rgc = Z.T.dot(d + G.dot(x_cp - x))
rB = Z.T.dot(G.dot(Z)) + 1e-10 * np.eye(Z.shape[1])
d[~fixed] = np.linalg.solve(rB, rgc)
d[~fixed] = -d[~fixed]
alpha = 1
temp1 = alpha
for i in np.arange(n)[~fixed]:
dk = d[i]
if dk < 0:
temp2 = l[i] - x_cp[i]
if temp2 >= 0:
temp1 = 0
else:
if dk * alpha < temp2:
temp1 = temp2 / dk
else:
temp2 = u[i] - x_cp[i]
else:
temp2 = u[i] - x_cp[i]
if temp1 <= 0:
temp1 = 0
else:
if dk * alpha > temp2:
temp1 = temp2 / dk
alpha = min(temp1, alpha)
return x_cp + alpha * Z.dot(d[~fixed])
def _project(self, q, l, u):
N = q.shape[0]
for k in range(N):
if q[k] < l[k]:
q[k] = l[k]
elif q[k] > u[k]:
q[k] = u[k]
return q
def _line_search_armijo(
self, fun_and_jac, pt, dpt, func_calls, m, gk, l, u, x0, x, b, min_, max_, c, r
):
ls_rho = 0.6
ls_c = 1e-4
ls_alpha = 1
t = m * ls_c
for k2 in range(100):
ls_pt = self._project(pt + ls_alpha * dpt, l, u)
gkp1, dgkp1 = fun_and_jac(ls_pt, x0, x, b, min_, max_, c, r)
func_calls += 1
if gk - gkp1 >= ls_alpha * t:
break
else:
ls_alpha *= ls_rho
return ls_alpha, ls_pt, gkp1, dgkp1, func_calls
def _line_search_wolfe( # noqa: C901
self, fun_and_jac, xk, pk, gfk, old_fval, old_old_fval, l, u, args
):
"""Find alpha that satisfies strong Wolfe conditions.
Uses the line search algorithm to enforce strong Wolfe conditions
Wright and Nocedal, 'Numerical Optimization', 1999, pg. 59-60
For the zoom phase it uses an algorithm by
Outputs: (alpha0, gc, fc)
"""
c1 = 1e-4
c2 = 0.9
N = xk.shape[0]
_ls_fc = 0
_ls_ingfk = None
alpha0 = 0
phi0 = old_fval
derphi0 = 0
for v in range(N):
derphi0 += gfk[v] * pk[v]
if derphi0 == 0:
derphi0 = 1e-8
elif np.abs(derphi0) < 1e-8:
derphi0 = np.sign(derphi0) * 1e-8
alpha1 = min(1.0, 1.01 * 2 * (phi0 - old_old_fval) / derphi0)
if alpha1 == 0:
# This shouldn't happen. Perhaps the increment has slipped below
# machine precision? For now, set the return variables skip the
# useless while loop, and raise warnflag=2 due to possible imprecision.
# print("Slipped below machine precision.")
alpha_star = None
fval_star = old_fval
old_fval = old_old_fval
fprime_star = None
_xkp1 = self._project(xk + alpha1 * pk, l, u)
phi_a1, _ls_ingfk = fun_and_jac(_xkp1, *args)
_ls_fc += 1
# derphi_a1 = phiprime(alpha1) evaluated below
phi_a0 = phi0
derphi_a0 = derphi0
i = 1
maxiter = 10
while 1: # bracketing phase
# print(" (ls) in while loop: ", alpha1, alpha0)
if alpha1 == 0:
break
if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or (
(phi_a1 >= phi_a0) and (i > 1)
):
# inlining zoom for performance reasons
# alpha0, alpha1, phi_a0, phi_a1, derphi_a0, phi0, derphi0, pk, xk
# zoom signature: (a_lo, a_hi, phi_lo, phi_hi, derphi_lo, phi0, derphi0, pk, xk)
# INLINE START
k = 0
delta1 = 0.2 # cubic interpolant check
delta2 = 0.1 # quadratic interpolant check
phi_rec = phi0
a_rec = 0
a_hi = alpha1
a_lo = alpha0
phi_lo = phi_a0
phi_hi = phi_a1
derphi_lo = derphi_a0
while 1:
# interpolate to find a trial step length between a_lo and a_hi
# Need to choose interpolation here. Use cubic interpolation and then if the
# result is within delta * dalpha or outside of the interval bounded by a_lo or a_hi
# then use quadratic interpolation, if the result is still too close, then use bisection
dalpha = a_hi - a_lo
if dalpha < 0:
a, b = a_hi, a_lo
else:
a, b = a_lo, a_hi
# minimizer of cubic interpolant
# (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
# if the result is too close to the end points (or out of the interval)
# then use quadratic interpolation with phi_lo, derphi_lo and phi_hi
# if the result is stil too close to the end points (or out of the interval)
# then use bisection
if k > 0:
cchk = delta1 * dalpha
a_j = self._cubicmin(
a_lo, phi_lo, derphi_lo, a_hi, phi_hi, a_rec, phi_rec
)
if (
(k == 0)
or (a_j is None)
or (a_j > b - cchk)
or (a_j < a + cchk)
):
qchk = delta2 * dalpha
a_j = self._quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
if (a_j is None) or (a_j > b - qchk) or (a_j < a + qchk):
a_j = a_lo + 0.5 * dalpha
# Check new value of a_j
_xkp1 = self._project(xk + a_j * pk, l, u)
# if _xkp1[1] < 0:
# _xkp1[1] = 0
phi_aj, _ls_ingfk = fun_and_jac(_xkp1, *args)
derphi_aj = 0
for v in range(N):
derphi_aj += _ls_ingfk[v] * pk[v]
if (phi_aj > phi0 + c1 * a_j * derphi0) or (phi_aj >= phi_lo):
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_j
phi_hi = phi_aj
else:
if abs(derphi_aj) <= -c2 * derphi0:
a_star = a_j
val_star = phi_aj
valprime_star = _ls_ingfk
break
if derphi_aj * (a_hi - a_lo) >= 0:
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_lo
phi_hi = phi_lo
else:
phi_rec = phi_lo
a_rec = a_lo
a_lo = a_j
phi_lo = phi_aj
derphi_lo = derphi_aj
k += 1
if k > maxiter:
a_star = a_j
val_star = phi_aj
valprime_star = None
break
alpha_star = a_star
fval_star = val_star
fprime_star = valprime_star
fnev = k
## INLINE END
_ls_fc += fnev
break
i += 1
if i > maxiter:
break
_xkp1 = self._project(xk + alpha1 * pk, l, u)
_, _ls_ingfk = fun_and_jac(_xkp1, *args)
derphi_a1 = 0
for v in range(N):
derphi_a1 += _ls_ingfk[v] * pk[v]
_ls_fc += 1
if abs(derphi_a1) <= -c2 * derphi0:
alpha_star = alpha1
fval_star = phi_a1
fprime_star = _ls_ingfk
break
if derphi_a1 >= 0:
# alpha_star, fval_star, fprime_star, fnev, _ls_ingfk = _zoom(
# alpha1, alpha0, phi_a1, phi_a0, derphi_a1, phi0, derphi0, pk, xk
# )
#
# INLINE START
maxiter = 10
k = 0
delta1 = 0.2 # cubic interpolant check
delta2 = 0.1 # quadratic interpolant check
phi_rec = phi0
a_rec = 0
a_hi = alpha0
a_lo = alpha1
phi_lo = phi_a1
phi_hi = phi_a0
derphi_lo = derphi_a1
while 1:
# interpolate to find a trial step length between a_lo and a_hi
# Need to choose interpolation here. Use cubic interpolation and then if the
# result is within delta * dalpha or outside of the interval bounded by a_lo or a_hi
# then use quadratic interpolation, if the result is still too close, then use bisection
dalpha = a_hi - a_lo
if dalpha < 0:
a, b = a_hi, a_lo
else:
a, b = a_lo, a_hi
# minimizer of cubic interpolant
# (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
# if the result is too close to the end points (or out of the interval)
# then use quadratic interpolation with phi_lo, derphi_lo and phi_hi
# if the result is stil too close to the end points (or out of the interval)
# then use bisection
if k > 0:
cchk = delta1 * dalpha
a_j = self._cubicmin(
a_lo, phi_lo, derphi_lo, a_hi, phi_hi, a_rec, phi_rec
)
if (
(k == 0)
or (a_j is None)
or (a_j > b - cchk)
or (a_j < a + cchk)
):
qchk = delta2 * dalpha
a_j = self._quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
if (a_j is None) or (a_j > b - qchk) or (a_j < a + qchk):
a_j = a_lo + 0.5 * dalpha
# Check new value of a_j
_xkp1 = self._project(xk + a_j * pk, l, u)
phi_aj, _ls_ingfk = fun_and_jac(_xkp1, *args)
derphi_aj = 0
for v in range(N):
derphi_aj += _ls_ingfk[v] * pk[v]
if (phi_aj > phi0 + c1 * a_j * derphi0) or (phi_aj >= phi_lo):
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_j
phi_hi = phi_aj
else:
if abs(derphi_aj) <= -c2 * derphi0:
a_star = a_j
val_star = phi_aj
valprime_star = _ls_ingfk
break
if derphi_aj * (a_hi - a_lo) >= 0:
phi_rec = phi_hi
a_rec = a_hi
a_hi = a_lo
phi_hi = phi_lo
else:
phi_rec = phi_lo
a_rec = a_lo
a_lo = a_j
phi_lo = phi_aj
derphi_lo = derphi_aj
k += 1
if k > maxiter:
a_star = a_j
val_star = phi_aj
valprime_star = None
break
alpha_star = a_star
fval_star = val_star
fprime_star = valprime_star
fnev = k
## INLINE END
_ls_fc += fnev
break
alpha2 = 2 * alpha1 # increase by factor of two on each iteration
i = i + 1
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
_xkp1 = self._project(xk + alpha1 * pk, l, u)
phi_a1, _ls_ingfk = fun_and_jac(_xkp1, *args)
_ls_fc += 1
derphi_a0 = derphi_a1
# stopping test if lower function not found
if i > maxiter:
alpha_star = alpha1
fval_star = phi_a1
fprime_star = None
break
return alpha_star, _ls_fc, _ls_fc, fval_star, old_fval, fprime_star, _ls_fc
def _cubicmin(self, a, fa, fpa, b, fb, c, fc):
# finds the minimizer for a cubic polynomial that goes through the
# points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.
#
# if no minimizer can be found return None
#
# f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D
C = fpa
db = b - a
dc = c - a
if (db == 0) or (dc == 0) or (b == c):
return None
denom = (db * dc) ** 2 * (db - dc)
A = dc ** 2 * (fb - fa - C * db) - db ** 2 * (fc - fa - C * dc)
B = -(dc ** 3) * (fb - fa - C * db) + db ** 3 * (fc - fa - C * dc)
A /= denom
B /= denom
radical = B * B - 3 * A * C
if radical < 0:
return None
if A == 0:
return None
xmin = a + (-B + np.sqrt(radical)) / (3 * A)
return xmin
def _quadmin(self, a, fa, fpa, b, fb):
# finds the minimizer for a quadratic polynomial that goes through
# the points (a,fa), (b,fb) with derivative at a of fpa
# f(x) = B*(x-a)^2 + C*(x-a) + D
D = fa
C = fpa
db = b - a * 1.0
if db == 0:
return None
B = (fb - D - C * db) / (db * db)
if B <= 0:
return None
xmin = a - C / (2.0 * B)
return xmin
if NUMBA_IMPORT_ERROR is None:
spec = [("bfgsb", BFGSB.class_type.instance_type)] # type: ignore
else:
spec = [] # pragma: no cover
@jitclass(spec=spec)
class L2Optimizer(Optimizer):
def optimize_distance_s_t_boundary_and_trustregion( # noqa: C901
self, x0, x, b, min_, max_, c, r
):
""" Solves the L2 trust region problem
min ||x0 - x - delta||_2 s.t. b^top delta = c
& ell <= x + delta <= u
& ||delta||_2 <= r
This is a specialised solver that does not use the generic BFGS-B solver.
Instead, this active-set solver computes the active set of indices (those that
do not hit the bounds) and then computes that optimal step size in the direction
of the boundary and the direction of the original sample over the active indices.
Parameters
----------
x0 : `numpy.ndarray`
The original image against which we minimize the perturbation
(flattened).
x : `numpy.ndarray`
The current perturbation (flattened).
b : `numpy.ndarray`
Normal vector of the local decision boundary (flattened).
min_ : float
Lower bound on the pixel values.
max_ : float
Upper bound on the pixel values.
c : float
Logit difference between the ground truth class of x0 and the
leading class different from the ground truth.
r : float
Size of the trust region.
"""
N = x0.shape[0]
clamp_c = 0
clamp_norm = 0
ck = c
rk = r
masked_values = 0
mask = np.zeros(N, dtype=np.uint8)
delta = np.empty_like(x0)
dx = x0 - x
for k in range(20):
# inner optimization that solves subproblem
bnorm = 1e-8
bdotDx = 0
for i in range(N):
if mask[i] == 0:
bnorm += b[i] * b[i]
bdotDx += b[i] * dx[i]
bdotDx = bdotDx / bnorm
ck_bnorm = ck / bnorm
b_scale = -bdotDx + ck / bnorm
new_masked_values = 0
delta_norm = 0
descent_norm = 0
boundary_step_norm = 0
# make optimal step towards boundary AND minimum
for i in range(N):
if mask[i] == 0:
delta[i] = dx[i] + b[i] * b_scale
boundary_step_norm = (
boundary_step_norm + b[i] * ck_bnorm * b[i] * ck_bnorm
)
delta_norm = delta_norm + delta[i] * delta[i]
descent_norm = descent_norm + (dx[i] - b[i] * bdotDx) * (
dx[i] - b[i] * bdotDx
)
# check of step to boundary is already larger than trust region
if boundary_step_norm > rk * rk:
for i in range(N):
if mask[i] == 0:
delta[i] = b[i] * ck_bnorm
else:
# check if combined step to large and correct step to minimum if necessary
if delta_norm > rk * rk:
region_correct = np.sqrt(rk * rk - boundary_step_norm)
region_correct = region_correct / (np.sqrt(descent_norm) + 1e-8)
b_scale = -region_correct * bdotDx + ck / bnorm
for i in range(N):
if mask[i] == 0:
delta[i] = region_correct * dx[i] + b[i] * b_scale
for i in range(N):
if mask[i] == 0:
if x[i] + delta[i] <= min_:
mask[i] = 1
delta[i] = min_ - x[i]
new_masked_values = new_masked_values + 1
clamp_norm = clamp_norm + delta[i] * delta[i]
clamp_c = clamp_c + b[i] * delta[i]
if x[i] + delta[i] >= max_:
mask[i] = 1
delta[i] = max_ - x[i]
new_masked_values = new_masked_values + 1
clamp_norm = clamp_norm + delta[i] * delta[i]
clamp_c = clamp_c + b[i] * delta[i]
# should no additional variable get out of bounds, stop optimization
if new_masked_values == 0:
break
masked_values = masked_values + new_masked_values
if clamp_norm < r * r:
rk = np.sqrt(r * r - clamp_norm)
else:
rk = 0
ck = c - clamp_c
if masked_values == N:
break
return delta
def fun_and_jac(self, params, x0, x, b, min_, max_, c, r):
# we need to compute the loss function
# g = distance + mu * (norm_d - r ** 2) + lam * (b_dot_d - c)
# and its derivative d g / d lam and d g / d mu
lam, mu = params
N = x0.shape[0]
g = 0
d_g_d_lam = 0
d_g_d_mu = 0
distance = 0
b_dot_d = 0
d_norm = 0
t = 1 / (2 * mu + 2)
for n in range(N):
dx = x0[n] - x[n]
bn = b[n]
xn = x[n]
d = (2 * dx - lam * bn) * t
if d + xn > max_:
d = max_ - xn
elif d + xn < min_:
d = min_ - xn
else:
prefac = 2 * (d - dx) + 2 * mu * d + lam * bn
d_g_d_lam -= prefac * bn * t
d_g_d_mu -= prefac * 2 * d * t
distance += (d - dx) ** 2
b_dot_d += bn * d
d_norm += d ** 2
g += (dx - d) ** 2 + mu * d ** 2 + lam * bn * d
d_g_d_lam += bn * d
d_g_d_mu += d ** 2
g += -mu * r ** 2 - lam * c
d_g_d_lam -= c
d_g_d_mu -= r ** 2
return -g, -np.array([d_g_d_lam, d_g_d_mu])
def _get_final_delta(self, lam, mu, x0, x, b, min_, max_, c, r, touchup=True):
delta = np.empty_like(x0)
N = x0.shape[0]
t = 1 / (2 * mu + 2)
for n in range(N):
d = (2 * (x0[n] - x[n]) - lam * b[n]) * t
if d + x[n] > max_:
d = max_ - x[n]
elif d + x[n] < min_:
d = min_ - x[n]
delta[n] = d
return delta
def _distance(self, x0, x):
return np.linalg.norm(x0 - x) ** 2
@jitclass(spec=spec)
class L1Optimizer(Optimizer):
def fun_and_jac(self, params, x0, x, b, min_, max_, c, r):
lam, mu = params
# arg min_delta ||delta - dx||_1 + lam * b^T delta + mu * ||delta||_2^2 s.t. min <= delta + x <= max
N = x0.shape[0]
g = 0
d_g_d_lam = 0
d_g_d_mu = 0
if mu > 0:
for n in range(N):
dx = x0[n] - x[n]
bn = b[n]
t = 1 / (2 * mu)
u = -lam * bn * t - dx
if np.abs(u) - t < 0:
# value and grad = 0
d = dx
else:
d = np.sign(u) * (np.abs(u) - t) + dx
if d + x[n] < min_:
d = min_ - x[n]
elif d + x[n] > max_:
d = max_ - x[n]
else:
prefac = np.sign(d - dx) + 2 * mu * d + lam * bn
d_g_d_lam -= prefac * bn * t
d_g_d_mu -= prefac * 2 * d * t
g += np.abs(dx - d) + mu * d ** 2 + lam * bn * d
d_g_d_lam += bn * d
d_g_d_mu += d ** 2
else: # mu == 0
for n in range(N):
dx = x0[n] - x[n]
bn = b[n]
if np.abs(lam * bn) < 1:
d = dx
elif np.sign(lam * bn) < 0:
d = max_ - x[n]
else:
d = min_ - x[n]
g += np.abs(dx - d) + mu * d ** 2 + lam * bn * d
d_g_d_lam += bn * d
d_g_d_mu += d ** 2
g += -mu * r ** 2 - lam * c
d_g_d_lam -= c
d_g_d_mu -= r ** 2
return -g, -np.array([d_g_d_lam, d_g_d_mu])
def _get_final_delta(self, lam, mu, x0, x, b, min_, max_, c, r, touchup=True):
delta = np.empty_like(x0)
N = x0.shape[0]
b_dot_d = 0
norm_d = 0
distance = 0
if mu > 0:
for n in range(N):
dx = x0[n] - x[n]
bn = b[n]
t = 1 / (2 * mu)
u = -lam * bn * t - dx
if np.abs(u) - t < 0:
# value and grad = 0
d = dx
else:
d = np.sign(u) * (np.abs(u) - t) + dx
if d + x[n] < min_:
# grad = 0
d = min_ - x[n]
elif d + x[n] > max_:
# grad = 0
d = max_ - x[n]
delta[n] = d
b_dot_d += b[n] * d
norm_d += d ** 2
distance += np.abs(d - dx)
else: # mu == 0
for n in range(N):
dx = x0[n] - x[n]
bn = b[n]
if np.abs(lam * bn) < 1:
d = dx
elif np.sign(lam * bn) < 0:
d = max_ - x[n]
else:
d = min_ - x[n]
delta[n] = d
b_dot_d += b[n] * d
norm_d += d ** 2
distance += np.abs(d - dx)
if touchup:
# search for the one index that (a) we can modify to match boundary constraint, (b) stays within our
# trust region and (c) minimize the distance to the original image
dc = c - b_dot_d
k = 0
min_distance = np.inf
min_distance_idx = 0
for n in range(N):
if np.abs(b[n]) > 0:
dx = x0[n] - x[n]
old_d = delta[n]
new_d = old_d + dc / b[n]
if (
x[n] + new_d <= max_
and x[n] + new_d >= min_
and norm_d - old_d ** 2 + new_d ** 2 <= r ** 2
):
# conditions (a) and (b) are fulfilled
if k == 0:
min_distance = (
distance - np.abs(old_d - dx) + np.abs(new_d - dx)
)
min_distance_idx = n
k += 1
else:
new_distance = (
distance - np.abs(old_d - dx) + np.abs(new_d - dx)
)
if min_distance > new_distance:
min_distance = new_distance
min_distance_idx = n
if k > 0:
# touchup successful
idx = min_distance_idx
old_d = delta[idx]
new_d = old_d + dc / b[idx]
delta[idx] = new_d
return delta
def _distance(self, x0, x):
return np.abs(x0 - x).sum()
@jitclass(spec=spec)
class LinfOptimizer(Optimizer):
def optimize_distance_s_t_boundary_and_trustregion(
self, x0, x, b, min_, max_, c, r
):
""" Find the solution to the optimization problem
min_delta ||dx - delta||_p^p s.t. ||delta||_2^2 <= r^2 AND b^T delta = c AND min_ <= x + delta <= max_
"""
params0 = np.array([0.0, 0.0])
bounds = np.array([(-np.inf, np.inf), (0, np.inf)])
return self.binary_search(params0, bounds, x0, x, b, min_, max_, c, r)
def binary_search(
self, q0, bounds, x0, x, b, min_, max_, c, r, etol=1e-6, maxiter=1000
):
# perform binary search over epsilon
epsilon = (max_ - min_) / 2.0
eps_low = min_
eps_high = max_
func_calls = 0
bnorm = np.linalg.norm(b)
lambda0 = 2 * c / bnorm ** 2
k = 0
while eps_high - eps_low > etol:
fun, nfev, _lambda0 = self.fun(
epsilon, x0, x, b, min_, max_, c, r, lambda0=lambda0
)
func_calls += nfev
if fun > -np.inf:
# decrease epsilon
eps_high = epsilon
lambda0 = _lambda0
else:
# increase epsilon
eps_low = epsilon
k += 1
epsilon = (eps_high - eps_low) / 2.0 + eps_low
if k > 20:
break
delta = self._get_final_delta(
lambda0, eps_high, x0, x, b, min_, max_, c, r, touchup=True
)
return delta
def _Linf_bounds(self, x0, epsilon, ell, u):
N = x0.shape[0]
_ell = np.empty_like(x0)
_u = np.empty_like(x0)
for i in range(N):
nx, px = x0[i] - epsilon, x0[i] + epsilon
if nx > ell:
_ell[i] = nx
else:
_ell[i] = ell
if px < u:
_u[i] = px
else:
_u[i] = u
return _ell, _u
def fun(self, epsilon, x0, x, b, ell, u, c, r, lambda0=None):
""" Computes the minimum norm necessary to reach the boundary. More precisely, we aim to solve the
following optimization problem
min ||delta||_2^2 s.t. lower <= x + delta <= upper AND b.dot(delta) = c
Lets forget about the box constraints for a second, i.e.
min ||delta||_2^2 s.t. b.dot(delta) = c
The dual of this problem is quite straight-forward to solve,
g(lambda, delta) = ||delta||_2^2 + lambda * (c - b.dot(delta))
The minimum of this Lagrangian is delta^* = lambda * b / 2, and so
inf_delta g(lambda, delta) = lambda^2 / 4 ||b||_2^2 + lambda * c
and so the optimal lambda, which maximizes inf_delta g(lambda, delta), is given by
lambda^* = 2c / ||b||_2^2
which in turn yields the optimal delta:
delta^* = c * b / ||b||_2^2
To take into account the box-constraints we perform a binary search over lambda and apply the box
constraint in each step.
"""
N = x.shape[0]
# new box constraints
_ell, _u = self._Linf_bounds(x0, epsilon, ell, u)
# initialize lambda
_lambda = lambda0
# compute delta and determine active set
k = 0
lambda_max, lambda_min = 1e10, -1e10
# check whether problem is actually solvable (i.e. check whether boundary constraint can be reached)
max_c = 0
min_c = 0
for n in range(N):
if b[n] > 0:
max_c += b[n] * (_u[n] - x[n])
min_c += b[n] * (_ell[n] - x[n])
else:
max_c += b[n] * (_ell[n] - x[n])
min_c += b[n] * (_u[n] - x[n])
if c > max_c or c < min_c:
return -np.inf, k, _lambda
while True:
k += 1
_c = 0
norm = 0
_active_bnorm = 0
for n in range(N):
lam_step = _lambda * b[n] / 2
if lam_step + x[n] < _ell[n]:
delta_step = _ell[n] - x[n]
elif lam_step + x[n] > _u[n]:
delta_step = _u[n] - x[n]
else:
delta_step = lam_step
_active_bnorm += b[n] ** 2
_c += b[n] * delta_step
norm += delta_step ** 2
if 0.9999 * np.abs(c) - EPS < np.abs(_c) < 1.0001 * np.abs(c) + EPS:
if norm > r ** 2:
return -np.inf, k, _lambda
else:
return -epsilon, k, _lambda
else:
# update lambda according to active variables
if _c > c:
lambda_max = _lambda
else:
lambda_min = _lambda
#
if _active_bnorm == 0:
# update is stepping out of feasible region, fallback to binary search
_lambda = (lambda_max - lambda_min) / 2 + lambda_min
else:
_lambda += 2 * (c - _c) / _active_bnorm
dlambda = lambda_max - lambda_min
if (
_lambda > lambda_max - 0.1 * dlambda
or _lambda < lambda_min + 0.1 * dlambda
):
# update is stepping out of feasible region, fallback to binary search
_lambda = (lambda_max - lambda_min) / 2 + lambda_min
def _get_final_delta(self, lam, eps, x0, x, b, min_, max_, c, r, touchup=True):
N = x.shape[0]
delta = np.empty_like(x0)
# new box constraints
_ell, _u = self._Linf_bounds(x0, eps, min_, max_)
for n in range(N):
lam_step = lam * b[n] / 2
if lam_step + x[n] < _ell[n]:
delta[n] = _ell[n] - x[n]
elif lam_step + x[n] > _u[n]:
delta[n] = _u[n] - x[n]
else:
delta[n] = lam_step
return delta
def _distance(self, x0, x):
return np.abs(x0 - x).max()
@jitclass(spec=spec)
class L0Optimizer(Optimizer):
def optimize_distance_s_t_boundary_and_trustregion(
self, x0, x, b, min_, max_, c, r
):
""" Find the solution to the optimization problem
min_delta ||dx - delta||_p^p s.t. ||delta||_2^2 <= r^2 AND b^T delta = c AND min_ <= x + delta <= max_
"""
params0 = np.array([0.0, 0.0])
bounds = np.array([(-np.inf, np.inf), (0, np.inf)])
return self.minimize(params0, bounds, x0, x, b, min_, max_, c, r)
def minimize(
self,
q0,
bounds,
x0,
x,
b,
min_,
max_,
c,
r,
ftol=1e-9,
xtol=-1e-5,
maxiter=1000,
):
# First check whether solution can be computed without trust region
delta, delta_norm = self.minimize_without_trustregion(
x0, x, b, c, r, min_, max_
)
if delta_norm <= r:
return delta
else:
# perform Nelder-Mead optimization
args = (x0, x, b, min_, max_, c, r)
results = self._nelder_mead_algorithm(
q0, bounds, args=args, tol_f=ftol, tol_x=xtol, max_iter=maxiter
)
delta = self._get_final_delta(
results[0], results[1], x0, x, b, min_, max_, c, r, touchup=True
)
return delta
def minimize_without_trustregion(self, x0, x, b, c, r, ell, u):
# compute maximum direction to b.dot(delta) within box-constraints
delta = x0 - x
total = np.empty_like(x0)
total_b = np.empty_like(x0)
bdotdelta = b.dot(delta)
delta_bdotdelta = c - bdotdelta
for k in range(x0.shape[0]):
if b[k] > 0 and delta_bdotdelta > 0:
total_b[k] = (u - x0[k]) * b[k] # pos
total[k] = u - x0[k]
elif b[k] > 0 and delta_bdotdelta < 0:
total_b[k] = (ell - x0[k]) * b[k] # neg
total[k] = ell - x0[k]
elif b[k] < 0 and delta_bdotdelta > 0:
total_b[k] = (ell - x0[k]) * b[k] # pos
total[k] = ell - x0[k]
else:
total_b[k] = (u - x0[k]) * b[k] # neg
total[k] = u - x0[k]
b_argsort = np.argsort(np.abs(total_b))[::-1]
for idx in b_argsort:
if np.abs(c - bdotdelta) > np.abs(total_b[idx]):
delta[idx] += total[idx]
bdotdelta += total_b[idx]
else:
delta[idx] += (c - bdotdelta) / (b[idx] + 1e-20)
break
delta_norm = np.linalg.norm(delta)
return delta, delta_norm
def _nelder_mead_algorithm(
self,
q0,
bounds,
args=(),
ρ=1.0,
χ=2.0,
γ=0.5,
σ=0.5,
tol_f=1e-8,
tol_x=1e-8,
max_iter=1000,
):
"""
Implements the Nelder-Mead algorithm described in Lagarias et al. (1998)
modified to maximize instead of minimizing.
Parameters
----------
vertices : ndarray(float, ndim=2)
Initial simplex with shape (n+1, n) to be modified in-place.
args : tuple, optional
Extra arguments passed to the objective function.
ρ : scalar(float), optional(default=1.)
Reflection parameter. Must be strictly greater than 0.
χ : scalar(float), optional(default=2.)
Expansion parameter. Must be strictly greater than max(1, ρ).
γ : scalar(float), optional(default=0.5)
Contraction parameter. Must be stricly between 0 and 1.
σ : scalar(float), optional(default=0.5)
Shrinkage parameter. Must be strictly between 0 and 1.
tol_f : scalar(float), optional(default=1e-10)
Tolerance to be used for the function value convergence test.
tol_x : scalar(float), optional(default=1e-10)
Tolerance to be used for the function domain convergence test.
max_iter : scalar(float), optional(default=1000)
The maximum number of allowed iterations.
Returns
----------
x : Approximate solution
"""
vertices = self._initialize_simplex(q0)
n = vertices.shape[1]
self._check_params(ρ, χ, γ, σ, bounds, n)
nit = 0
ργ = ρ * γ
ρχ = ρ * χ
σ_n = σ ** n
f_val = np.empty(n + 1, dtype=np.float64)
for i in range(n + 1):
f_val[i] = self._neg_bounded_fun(bounds, vertices[i], args=args)
# Step 1: Sort
sort_ind = f_val.argsort()
LV_ratio = 1
# Compute centroid
x_bar = vertices[sort_ind[:n]].sum(axis=0) / n
while True:
shrink = False
# Check termination
fail = nit >= max_iter
best_val_idx = sort_ind[0]
worst_val_idx = sort_ind[n]
term_f = f_val[worst_val_idx] - f_val[best_val_idx] < tol_f
# Linearized volume ratio test (see [2])
term_x = LV_ratio < tol_x
if term_x or term_f or fail:
break
# Step 2: Reflection
x_r = x_bar + ρ * (x_bar - vertices[worst_val_idx])
f_r = self._neg_bounded_fun(bounds, x_r, args=args)
if f_r >= f_val[best_val_idx] and f_r < f_val[sort_ind[n - 1]]:
# Accept reflection
vertices[worst_val_idx] = x_r
LV_ratio *= ρ
# Step 3: Expansion
elif f_r < f_val[best_val_idx]:
x_e = x_bar + χ * (x_r - x_bar)
f_e = self._neg_bounded_fun(bounds, x_e, args=args)
if f_e < f_r: # Greedy minimization
vertices[worst_val_idx] = x_e
LV_ratio *= ρχ
else:
vertices[worst_val_idx] = x_r
LV_ratio *= ρ
# Step 4 & 5: Contraction and Shrink
else:
# Step 4: Contraction
if f_r < f_val[worst_val_idx]: # Step 4.a: Outside Contraction
x_c = x_bar + γ * (x_r - x_bar)
LV_ratio_update = ργ
else: # Step 4.b: Inside Contraction
x_c = x_bar - γ * (x_r - x_bar)
LV_ratio_update = γ
f_c = self._neg_bounded_fun(bounds, x_c, args=args)
if f_c < min(f_r, f_val[worst_val_idx]): # Accept contraction
vertices[worst_val_idx] = x_c
LV_ratio *= LV_ratio_update
# Step 5: Shrink
else:
shrink = True
for i in sort_ind[1:]:
vertices[i] = vertices[best_val_idx] + σ * (
vertices[i] - vertices[best_val_idx]
)
f_val[i] = self._neg_bounded_fun(bounds, vertices[i], args=args)
sort_ind[1:] = f_val[sort_ind[1:]].argsort() + 1
x_bar = (
vertices[best_val_idx]
+ σ * (x_bar - vertices[best_val_idx])
+ (vertices[worst_val_idx] - vertices[sort_ind[n]]) / n
)
LV_ratio *= σ_n
if not shrink: # Nonshrink ordering rule
f_val[worst_val_idx] = self._neg_bounded_fun(
bounds, vertices[worst_val_idx], args=args
)
for i, j in enumerate(sort_ind):
if f_val[worst_val_idx] < f_val[j]:
sort_ind[i + 1 :] = sort_ind[i:-1]
sort_ind[i] = worst_val_idx
break
x_bar += (vertices[worst_val_idx] - vertices[sort_ind[n]]) / n
nit += 1
return vertices[sort_ind[0]]
def _initialize_simplex(self, x0):
"""
Generates an initial simplex for the Nelder-Mead method.
Parameters
----------
x0 : ndarray(float, ndim=1)
Initial guess. Array of real elements of size (n,), where ‘n’ is the
number of independent variables.
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x0.
Returns
----------
vertices : ndarray(float, ndim=2)
Initial simplex with shape (n+1, n).
"""
n = x0.size
vertices = np.empty((n + 1, n), dtype=np.float64)
# Broadcast x0 on row dimension
vertices[:] = x0
nonzdelt = 0.05
zdelt = 0.00025
for i in range(n):
# Generate candidate coordinate
if vertices[i + 1, i] != 0.0:
vertices[i + 1, i] *= 1 + nonzdelt
else:
vertices[i + 1, i] = zdelt
return vertices
def _check_params(self, ρ, χ, γ, σ, bounds, n):
"""
Checks whether the parameters for the Nelder-Mead algorithm are valid.
JIT-compiled in `nopython` mode using Numba.
Parameters
----------
ρ : scalar(float)
Reflection parameter. Must be strictly greater than 0.
χ : scalar(float)
Expansion parameter. Must be strictly greater than max(1, ρ).
γ : scalar(float)
Contraction parameter. Must be stricly between 0 and 1.
σ : scalar(float)
Shrinkage parameter. Must be strictly between 0 and 1.
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x.
n : scalar(int)
Number of independent variables.
"""
if ρ < 0:
raise ValueError("ρ must be strictly greater than 0.")
if χ < 1:
raise ValueError("χ must be strictly greater than 1.")
if χ < ρ:
raise ValueError("χ must be strictly greater than ρ.")
if γ < 0 or γ > 1:
raise ValueError("γ must be strictly between 0 and 1.")
if σ < 0 or σ > 1:
raise ValueError("σ must be strictly between 0 and 1.")
if not (bounds.shape == (0, 2) or bounds.shape == (n, 2)):
raise ValueError("The shape of `bounds` is not valid.")
if (np.atleast_2d(bounds)[:, 0] > np.atleast_2d(bounds)[:, 1]).any():
raise ValueError("Lower bounds must be greater than upper bounds.")
def _check_bounds(self, x, bounds):
"""
Checks whether `x` is within `bounds`. JIT-compiled in `nopython` mode
using Numba.
Parameters
----------
x : ndarray(float, ndim=1)
1-D array with shape (n,) of independent variables.
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x.
Returns
----------
bool
`True` if `x` is within `bounds`, `False` otherwise.
"""
if bounds.shape == (0, 2):
return True
else:
return (np.atleast_2d(bounds)[:, 0] <= x).all() and (
x <= np.atleast_2d(bounds)[:, 1]
).all()
def _neg_bounded_fun(self, bounds, x, args=()):
"""
Wrapper for bounding and taking the negative of `fun` for the
Nelder-Mead algorithm. JIT-compiled in `nopython` mode using Numba.
Parameters
----------
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x.
x : ndarray(float, ndim=1)
1-D array with shape (n,) of independent variables at which `fun` is
to be evaluated.
args : tuple, optional
Extra arguments passed to the objective function.
Returns
----------
scalar
`-fun(x, *args)` if x is within `bounds`, `np.inf` otherwise.
"""
if self._check_bounds(x, bounds):
return -self.fun(x, *args)
else:
return np.inf
def fun(self, params, x0, x, b, min_, max_, c, r):
# arg min_delta ||delta - dx||_0 + lam * b^T delta + mu * ||delta||_2^2 s.t. min <= delta + x <= max
lam, mu = params
N = x0.shape[0]
g = -mu * r ** 2 - lam * c
if mu > 0:
t = 1 / (2 * mu)
for n in range(N):
dx = x0[n] - x[n]
bn = b[n]
case1 = lam * bn * dx + mu * dx ** 2
optd = -lam * bn * t
if optd < min_ - x[n]:
optd = min_ - x[n]
elif optd > max_ - x[n]:
optd = max_ - x[n]
case2 = 1 + lam * bn * optd + mu * optd ** 2
if case1 <= case2:
g += mu * dx ** 2 + lam * bn * dx
else:
g += 1 + mu * optd ** 2 + lam * bn * optd
else:
# arg min_delta ||delta - dx||_0 + lam * b^T delta
# case delta[n] = dx[n]: lam * b[n] * dx[n]
# case delta[n] != dx[n]: lam * b[n] * [min_ - x[n], max_ - x[n]]
for n in range(N):
dx = x0[n] - x[n]
bn = b[n]
case1 = lam * bn * dx
case2 = 1 + lam * bn * (min_ - x[n])
case3 = 1 + lam * bn * (max_ - x[n])
if case1 <= case2 and case1 <= case3:
g += mu * dx ** 2 + lam * bn * dx
elif case2 < case3:
g += 1 + mu * (min_ - x[n]) ** 2 + lam * bn * (min_ - x[n])
else:
g += 1 + mu * (max_ - x[n]) ** 2 + lam * bn * (max_ - x[n])
return g
def _get_final_delta(self, lam, mu, x0, x, b, min_, max_, c, r, touchup=True):
if touchup:
delta = self.__get_final_delta(lam, mu, x0, x, b, min_, max_, c, r)
if delta is not None:
return delta
else:
# fallback
params = [
(lam + 1e-5, mu),
(lam, mu + 1e-5),
(lam - 1e-5, mu),
(lam, mu - 1e-5),
(lam + 1e-5, mu + 1e-5),
(lam - 1e-5, mu - 1e-5),
(lam + 1e-5, mu - 1e-5),
(lam - 1e-5, mu + 1e-5),
]
for param in params:
delta = self.__get_final_delta(
param[0], param[1], x0, x, b, min_, max_, c, r
)
if delta is not None:
return delta
# 2nd fallback
return self.__get_final_delta(
lam, mu, x0, x, b, min_, max_, c, r, False
)
else:
return self.__get_final_delta(lam, mu, x0, x, b, min_, max_, c, r, False)
def __get_final_delta(self, lam, mu, x0, x, b, min_, max_, c, r, touchup=True):
delta = np.empty_like(x0)
N = x0.shape[0]
b_dot_d = 0
norm_d = 0
distance = 0
if mu > 0:
for n in range(N):
dx = x0[n] - x[n]
bn = b[n]
t = 1 / (2 * mu)
case1 = lam * bn * dx + mu * dx ** 2
optd = -lam * bn * t
if optd < min_ - x[n]:
optd = min_ - x[n]
elif optd > max_ - x[n]:
optd = max_ - x[n]
case2 = 1 + lam * bn * optd + mu * optd ** 2
if case1 <= case2:
d = dx
else:
d = optd
distance += 1
delta[n] = d
b_dot_d += bn * d
norm_d += d ** 2
else: # mu == 0
for n in range(N):
dx = x0[n] - x[n]
bn = b[n]
case1 = lam * bn * dx
case2 = 1 + lam * bn * (min_ - x[n])
case3 = 1 + lam * bn * (max_ - x[n])
if case1 <= case2 and case1 <= case3:
d = dx
elif case2 < case3:
d = min_ - x[n]
distance += 1
else:
d = max_ - x[n]
distance += 1
delta[n] = d
norm_d += d ** 2
b_dot_d += bn * d
if touchup:
# search for the one index that
# (a) we can modify to match boundary constraint
# (b) stays within our trust region and
# (c) minimize the distance to the original image.
dc = c - b_dot_d
k = 0
min_distance = np.inf
min_norm = np.inf
min_distance_idx = 0
for n in range(N):
if np.abs(b[n]) > 0:
dx = x0[n] - x[n]
old_d = delta[n]
new_d = old_d + dc / b[n]
if (
x[n] + new_d <= max_
and x[n] + new_d >= min_
and norm_d - old_d ** 2 + new_d ** 2 <= r ** 2
):
# conditions (a) and (b) are fulfilled
if k == 0:
min_distance = (
distance
- (np.abs(old_d - dx) > 1e-10)
+ (np.abs(new_d - dx) > 1e-10)
)
min_distance_idx = n
min_norm = norm_d - old_d ** 2 + new_d ** 2
k += 1
else:
new_distance = (
distance
- (np.abs(old_d - dx) > 1e-10)
+ (np.abs(new_d - dx) > 1e-10)
)
if (
min_distance > new_distance
or min_distance == new_distance
and min_norm > norm_d - old_d ** 2 + new_d ** 2
):
min_distance = new_distance
min_norm = norm_d - old_d ** 2 + new_d ** 2
min_distance_idx = n
if k > 0:
# touchup successful
idx = min_distance_idx
old_d = delta[idx]
new_d = old_d + dc / b[idx]
delta[idx] = new_d
return delta
else:
return None
return delta
def _distance(self, x0, x):
return np.sum(np.abs(x - x0) > EPS)
| [
"24K.Harry@gmail.com"
] | 24K.Harry@gmail.com |
44ebcb657cc982a434f7739be90aaa6d43b76463 | d90c32b954a255b5069eaea31702f5151f89c25a | /supplier/admin.py | a0665a3816e7f57fe1b0be8f768a18754fc00527 | [] | no_license | Lakmal96/Django-ecommerce-project | cd1524a874665b1d9fe4cb69d6985f5952dbebf6 | 7b5f3c4229dc46b4e10adfdd92826f21889727f6 | refs/heads/main | 2023-06-28T22:38:13.997011 | 2021-08-04T13:32:57 | 2021-08-04T13:32:57 | 383,501,349 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from django.contrib import admin
from . models import SupplierOrder
# Register your models here.
admin.site.register(SupplierOrder)
| [
"tharindu96221@gmail.com"
] | tharindu96221@gmail.com |
3e9404e4d11e541baa608263d9150e061d42f754 | 7343ece3b82ac87a594865c4074623b45b0297b4 | /tests/push/test_bulk_push_rule_evaluator.py | 7c23b77e0a11be202bca81598c6613e73eb667d8 | [
"Apache-2.0"
] | permissive | matrix-org/synapse | a00111f83310783b78e2996557f8bbae4d9fb229 | d35bed8369514fe727b4fe1afb68f48cc8b2655a | refs/heads/develop | 2023-09-05T05:24:20.808942 | 2023-09-04T16:14:09 | 2023-09-04T16:14:09 | 22,844,864 | 12,215 | 2,869 | Apache-2.0 | 2023-09-14T15:20:48 | 2014-08-11T15:51:42 | Python | UTF-8 | Python | false | false | 16,762 | py | # Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
from unittest.mock import AsyncMock, patch
from parameterized import parameterized
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import EventContentFields, RelationTypes
from synapse.api.room_versions import RoomVersions
from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator
from synapse.rest import admin
from synapse.rest.client import login, register, room
from synapse.server import HomeServer
from synapse.types import JsonDict, create_requester
from synapse.util import Clock
from tests.unittest import HomeserverTestCase, override_config
class TestBulkPushRuleEvaluator(HomeserverTestCase):
servlets = [
admin.register_servlets_for_client_rest_resource,
room.register_servlets,
login.register_servlets,
register.register_servlets,
]
def prepare(
self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
) -> None:
# Create a new user and room.
self.alice = self.register_user("alice", "pass")
self.token = self.login(self.alice, "pass")
self.requester = create_requester(self.alice)
self.room_id = self.helper.create_room_as(
# This is deliberately set to V9, because we want to test the logic which
# handles stringy power levels. Stringy power levels were outlawed in V10.
self.alice,
room_version=RoomVersions.V9.identifier,
tok=self.token,
)
self.event_creation_handler = self.hs.get_event_creation_handler()
@parameterized.expand(
[
# The historically-permitted bad values. Alice's notification should be
# allowed if this threshold is at or below her power level (60)
("100", False),
("0", True),
(12.34, True),
(60.0, True),
(67.89, False),
# Values that int(...) would not successfully cast should be ignored.
# The room notification level should then default to 50, per the spec, so
# Alice's notification is allowed.
(None, True),
# We haven't seen `"room": []` or `"room": {}` in the wild (yet), but
# let's check them for paranoia's sake.
([], True),
({}, True),
]
)
def test_action_for_event_by_user_handles_noninteger_room_power_levels(
self, bad_room_level: object, should_permit: bool
) -> None:
"""We should convert strings in `room` to integers before passing to Rust.
Test this as follows:
- Create a room as Alice and invite two other users Bob and Charlie.
- Set PLs so that Alice has PL 60 and `notifications.room` is set to a bad value.
- Have Alice create a message notifying @room.
- Evaluate notification actions for that message. This should not raise.
- Look in the DB to see if that message triggered a highlight for Bob.
The test is parameterised with two arguments:
- the bad power level value for "room", before JSON serisalistion
- whether Bob should expect the message to be highlighted
Reproduces #14060.
A lack of validation: the gift that keeps on giving.
"""
# Join another user to the room, so that there is someone to see Alice's
# @room notification.
bob = self.register_user("bob", "pass")
bob_token = self.login(bob, "pass")
self.helper.join(self.room_id, bob, tok=bob_token)
# Alter the power levels in that room to include the bad @room notification
# level. We need to suppress
#
# - canonicaljson validation, because canonicaljson forbids floats;
# - the event jsonschema validation, because it will forbid bad values; and
# - the auth rules checks, because they stop us from creating power levels
# with `"room": null`. (We want to test this case, because we have seen it
# in the wild.)
#
# We have seen stringy and null values for "room" in the wild, so presumably
# some of this validation was missing in the past.
with patch("synapse.events.validator.validate_canonicaljson"), patch(
"synapse.events.validator.jsonschema.validate"
), patch("synapse.handlers.event_auth.check_state_dependent_auth_rules"):
pl_event_id = self.helper.send_state(
self.room_id,
"m.room.power_levels",
{
"users": {self.alice: 60},
"notifications": {"room": bad_room_level},
},
self.token,
state_key="",
)["event_id"]
# Create a new message event, and try to evaluate it under the dodgy
# power level event.
event, unpersisted_context = self.get_success(
self.event_creation_handler.create_event(
self.requester,
{
"type": "m.room.message",
"room_id": self.room_id,
"content": {
"msgtype": "m.text",
"body": "helo @room",
},
"sender": self.alice,
},
prev_event_ids=[pl_event_id],
)
)
context = self.get_success(unpersisted_context.persist(event))
bulk_evaluator = BulkPushRuleEvaluator(self.hs)
# should not raise
self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)]))
# Did Bob see Alice's @room notification?
highlighted_actions = self.get_success(
self.hs.get_datastores().main.db_pool.simple_select_list(
table="event_push_actions_staging",
keyvalues={
"event_id": event.event_id,
"user_id": bob,
"highlight": 1,
},
retcols=("*",),
desc="get_event_push_actions_staging",
)
)
self.assertEqual(len(highlighted_actions), int(should_permit))
@override_config({"push": {"enabled": False}})
def test_action_for_event_by_user_disabled_by_config(self) -> None:
"""Ensure that push rules are not calculated when disabled in the config"""
# Create a new message event which should cause a notification.
event, unpersisted_context = self.get_success(
self.event_creation_handler.create_event(
self.requester,
{
"type": "m.room.message",
"room_id": self.room_id,
"content": {
"msgtype": "m.text",
"body": "helo",
},
"sender": self.alice,
},
)
)
context = self.get_success(unpersisted_context.persist(event))
bulk_evaluator = BulkPushRuleEvaluator(self.hs)
# Mock the method which calculates push rules -- we do this instead of
# e.g. checking the results in the database because we want to ensure
# that code isn't even running.
bulk_evaluator._action_for_event_by_user = AsyncMock() # type: ignore[method-assign]
# Ensure no actions are generated!
self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)]))
bulk_evaluator._action_for_event_by_user.assert_not_called()
def _create_and_process(
self, bulk_evaluator: BulkPushRuleEvaluator, content: Optional[JsonDict] = None
) -> bool:
"""Returns true iff the `mentions` trigger an event push action."""
# Create a new message event which should cause a notification.
event, unpersisted_context = self.get_success(
self.event_creation_handler.create_event(
self.requester,
{
"type": "test",
"room_id": self.room_id,
"content": content or {},
"sender": f"@bob:{self.hs.hostname}",
},
)
)
context = self.get_success(unpersisted_context.persist(event))
# Execute the push rule machinery.
self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)]))
# If any actions are generated for this event, return true.
result = self.get_success(
self.hs.get_datastores().main.db_pool.simple_select_list(
table="event_push_actions_staging",
keyvalues={"event_id": event.event_id},
retcols=("*",),
desc="get_event_push_actions_staging",
)
)
return len(result) > 0
def test_user_mentions(self) -> None:
"""Test the behavior of an event which includes invalid user mentions."""
bulk_evaluator = BulkPushRuleEvaluator(self.hs)
# Not including the mentions field should not notify.
self.assertFalse(self._create_and_process(bulk_evaluator))
# An empty mentions field should not notify.
self.assertFalse(
self._create_and_process(bulk_evaluator, {EventContentFields.MENTIONS: {}})
)
# Non-dict mentions should be ignored.
#
# Avoid C-S validation as these aren't expected.
with patch(
"synapse.events.validator.EventValidator.validate_new",
new=lambda s, event, config: True,
):
mentions: Any
for mentions in (None, True, False, 1, "foo", []):
self.assertFalse(
self._create_and_process(
bulk_evaluator, {EventContentFields.MENTIONS: mentions}
)
)
# A non-list should be ignored.
for mentions in (None, True, False, 1, "foo", {}):
self.assertFalse(
self._create_and_process(
bulk_evaluator,
{EventContentFields.MENTIONS: {"user_ids": mentions}},
)
)
# The Matrix ID appearing anywhere in the list should notify.
self.assertTrue(
self._create_and_process(
bulk_evaluator,
{EventContentFields.MENTIONS: {"user_ids": [self.alice]}},
)
)
self.assertTrue(
self._create_and_process(
bulk_evaluator,
{
EventContentFields.MENTIONS: {
"user_ids": ["@another:test", self.alice]
}
},
)
)
# Duplicate user IDs should notify.
self.assertTrue(
self._create_and_process(
bulk_evaluator,
{EventContentFields.MENTIONS: {"user_ids": [self.alice, self.alice]}},
)
)
# Invalid entries in the list are ignored.
#
# Avoid C-S validation as these aren't expected.
with patch(
"synapse.events.validator.EventValidator.validate_new",
new=lambda s, event, config: True,
):
self.assertFalse(
self._create_and_process(
bulk_evaluator,
{
EventContentFields.MENTIONS: {
"user_ids": [None, True, False, {}, []]
}
},
)
)
self.assertTrue(
self._create_and_process(
bulk_evaluator,
{
EventContentFields.MENTIONS: {
"user_ids": [None, True, False, {}, [], self.alice]
}
},
)
)
# The legacy push rule should not mention if the mentions field exists.
self.assertFalse(
self._create_and_process(
bulk_evaluator,
{
"body": self.alice,
"msgtype": "m.text",
EventContentFields.MENTIONS: {},
},
)
)
def test_room_mentions(self) -> None:
"""Test the behavior of an event which includes invalid room mentions."""
bulk_evaluator = BulkPushRuleEvaluator(self.hs)
# Room mentions from those without power should not notify.
self.assertFalse(
self._create_and_process(
bulk_evaluator, {EventContentFields.MENTIONS: {"room": True}}
)
)
# Room mentions from those with power should notify.
self.helper.send_state(
self.room_id,
"m.room.power_levels",
{"notifications": {"room": 0}},
self.token,
state_key="",
)
self.assertTrue(
self._create_and_process(
bulk_evaluator, {EventContentFields.MENTIONS: {"room": True}}
)
)
# Invalid data should not notify.
#
# Avoid C-S validation as these aren't expected.
with patch(
"synapse.events.validator.EventValidator.validate_new",
new=lambda s, event, config: True,
):
mentions: Any
for mentions in (None, False, 1, "foo", [], {}):
self.assertFalse(
self._create_and_process(
bulk_evaluator,
{EventContentFields.MENTIONS: {"room": mentions}},
)
)
# The legacy push rule should not mention if the mentions field exists.
self.assertFalse(
self._create_and_process(
bulk_evaluator,
{
"body": "@room",
"msgtype": "m.text",
EventContentFields.MENTIONS: {},
},
)
)
def test_suppress_edits(self) -> None:
"""Under the default push rules, event edits should not generate notifications."""
bulk_evaluator = BulkPushRuleEvaluator(self.hs)
# Create & persist an event to use as the parent of the relation.
event, unpersisted_context = self.get_success(
self.event_creation_handler.create_event(
self.requester,
{
"type": "m.room.message",
"room_id": self.room_id,
"content": {
"msgtype": "m.text",
"body": "helo",
},
"sender": self.alice,
},
)
)
context = self.get_success(unpersisted_context.persist(event))
self.get_success(
self.event_creation_handler.handle_new_client_event(
self.requester, events_and_context=[(event, context)]
)
)
# The edit should not cause a notification.
self.assertFalse(
self._create_and_process(
bulk_evaluator,
{
"body": "Test message",
"m.relates_to": {
"rel_type": RelationTypes.REPLACE,
"event_id": event.event_id,
},
},
)
)
# An edit which is a mention will cause a notification.
self.assertTrue(
self._create_and_process(
bulk_evaluator,
{
"body": "Test message",
"m.relates_to": {
"rel_type": RelationTypes.REPLACE,
"event_id": event.event_id,
},
"m.mentions": {
"user_ids": [self.alice],
},
},
)
)
| [
"noreply@github.com"
] | matrix-org.noreply@github.com |
9d93ef35ca6b034ffb730c9f29c242b4de9b3683 | d86b8aa1166ad0fb209c7e5d56f888cd9934ba9f | /example/demo/autocomplete.py | 2074dc521b5e5d63b91547203b7dc777645d4a32 | [] | no_license | KubeljK/django-sweet-autocomplete | c8a76b2ee1000514dcf72526ced8d8a27d454eae | bcde89ed835b28e4fa621d901f86006004d5276a | refs/heads/master | 2022-04-30T17:25:08.192319 | 2022-04-07T09:44:18 | 2022-04-07T09:44:18 | 251,356,301 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | from rest_framework import serializers
from sweetautocomplete.autocomplete import autocompletefactory, ModelAutocomplete
from .models import City
class CityAutocomplete(ModelAutocomplete):
model = City
field = "name"
lookup = "__istartswith"
class Serializer(serializers.ModelSerializer):
label = serializers.CharField(source="name")
class Meta:
model = City
fields = ["label"]
autocompletefactory.register("city", CityAutocomplete)
| [
"klemen.kubelj@gmail.com"
] | klemen.kubelj@gmail.com |
10e759db010c8b6b789d7d87ec49bf54509011b0 | e57b5603085c7ed87894465a6d919947c15d1c70 | /run.py | ffc6b42078468b8fe56a10617cd5869fca671030 | [] | no_license | chineseluo/app_auto_frame_v1 | 8cf2910cd18d061a348b31723c3315f3a4e319be | cf9b1eefcb895d6556ef8bc0a743463b0d7bccb6 | refs/heads/master | 2023-03-29T13:20:51.665245 | 2020-08-18T06:33:06 | 2020-08-18T06:33:06 | 269,499,804 | 10 | 4 | null | 2021-03-25T23:49:04 | 2020-06-05T01:11:37 | Python | UTF-8 | Python | false | false | 2,824 | py | # !/user/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/5/12 21:11
# @Author : chineseluo
# @Email : 848257135@qq.com
# @File : run.py
# @Software: PyCharm
import os
import sys
import json
import logging
import pytest
from Common.publicMethod import PubMethod
root_dir = os.path.dirname(__file__)
config_yaml = PubMethod.read_yaml("./Conf/config.yaml")
def modify_report_environment_file(report_widgets_dir):
"""
向environment.json文件添加测试环境配置,展现在allure测试报告中
@return:
"""
environment_info = [
{"name": '测试地址', "values": [config_yaml['allure_environment']['URL']]},
{"name": '测试版本号', "values": [config_yaml['allure_environment']["version"]]},
{"name": '测试账户', "values": [config_yaml['allure_environment']['username']]},
{"name": '测试说明', "values": [config_yaml['allure_environment']['description']]}
]
# 确保目录存在
PubMethod.create_dirs(os.path.join(report_widgets_dir, 'widgets'))
with open('./Report/allure-results/widgets/environment.json', 'w', encoding='utf-8') as f:
json.dump(environment_info, f, ensure_ascii=False, indent=4)
def run_all_case(mobile_system):
report_widgets_dir = os.path.abspath("./Report/allure-results")
# 使用pytest.main
pytest.main()
# 生成allure报告,需要系统执行命令--clean会清楚以前写入environment.json的配置
cmd = 'allure generate ./Report/{} -o ./Report/{}/allure-results --clean'.format(mobile_system.replace(" ", "_"),
mobile_system.replace(" ", "_"))
logging.info("命令行执行cmd:{}".format(cmd))
try:
os.system(cmd)
except Exception as e:
logging.error('命令【{}】执行失败,错误信息:{}!'.format(cmd, e))
sys.exit()
# 定义allure报告环境信息
modify_report_environment_file(report_widgets_dir)
# 打印url,方便直接访问
url = '报告链接:http://127.0.0.1:63342/{}/Report/{}/allure-results/index.html'.format(root_dir.split('/')[-1],
mobile_system.replace(" ", "_"))
print(url)
# 命令行参数调用
def receive_cmd_arg():
global root_dir
input_mobile_system = sys.argv
if len(input_mobile_system) > 1:
root_dir = root_dir.replace("\\", "/")
if input_mobile_system[1] == "android":
run_all_case("android")
elif input_mobile_system[1] == "ios":
run_all_case("ios")
else:
logging.error("参数错误,请重新输入!!!")
else:
run_all_case("android")
if __name__ == "__main__":
receive_cmd_arg()
| [
"luozw@inhand.com.cn"
] | luozw@inhand.com.cn |
088c5e7a7614b6a823d7f642139b198c4418225c | e48da6ba7b778879627359c175500f01c1831b5a | /nipap-cli/setup.py | 37bc99e7cbeb0c7afe84cf6e5071c746eeb8449f | [
"X11"
] | permissive | tobbakko/NIPAP | 8ab4f4ee27dd165f719900ad75e1724129f56710 | 659547f2f59974fb3180d978a45b478379779bd7 | refs/heads/master | 2021-01-15T23:21:03.990141 | 2012-06-04T05:32:06 | 2012-06-04T05:32:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | #!/usr/bin/env python
from distutils.core import setup
import nipap_cli
setup(
name = 'nipap-cli',
version = nipap_cli.__version__,
description = "NIPAP shell command",
long_description = "A shell command to interact with NIPAP.",
author = nipap_cli.__author__,
author_email = nipap_cli.__author_email__,
license = nipap_cli.__license__,
url = nipap_cli.__url__,
packages = [ 'nipap_cli', ],
keywords = ['nipap_cli', ],
requires = ['pynipap', ],
data_files = [
('/usr/bin/', ['nipap-helper', 'nipap']),
('/usr/share/doc/nipap-cli/', ['bash_complete', 'nipaprc'])
],
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Topic :: Internet'
]
)
| [
"lukas@spritelink.net"
] | lukas@spritelink.net |
d1ae248b2cbfea871b8ed508927b98ef6691486f | 0019bfa7c7eb767a9aba68e4970a7df5feaf8ed7 | /src/parser/lexer.py | 065c2d17bfb31296733e71665b675b0e37b86a23 | [] | no_license | francescoracciatti/py-adele | c6be4fd753650519738ecf8297f811fef1012d28 | c61406bebda9423b97c7d1e406ce4a47d2a00de3 | refs/heads/master | 2020-03-08T12:07:24.064532 | 2018-06-17T15:55:09 | 2018-06-17T15:55:09 | 128,117,801 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,748 | py | # -*- coding: utf-8 -*-
""" This module contains the ADeLe's lexing rules.
Author:
Francesco Racciatti
Copyright 2018 Francesco Racciatti
"""
import logging
from typing import Tuple, Dict
from mypy_extensions import NoReturn
from ply import lex
from ply.lex import LexToken, Lexer
from lexeme import Lexeme
logger = logging.getLogger(__name__)
class Keyword(Lexeme):
""" The ADeLe's keywords. """
# Unscoped types
BOOLEAN = 'boolean'
CHAR = 'char'
INTEGER = 'integer'
FLOAT = 'float'
STRING = 'string'
# Scoped types, when the size does really matter
UINT8 = 'uint8'
UINT16 = 'uint16'
UINT32 = 'uint32'
UINT64 = 'uint64'
SINT8 = 'sint8'
SINT16 = 'sint16'
SINT32 = 'sint32'
SINT64 = 'sint64'
FLOAT32 = 'float32'
FLOAT64 = 'float64'
# Generic message
MESSAGE = 'message'
# Boolean values
FALSE = 'false'
TRUE = 'true'
# Configuration
SET_UNIT_TIME = 'setUnitTime'
SET_UNIT_LENGTH = 'setUnitLength'
SET_UNIT_ANGLE = 'setUnitAngle'
SET_TIME_START = 'setTimeStart'
# Actions
ELEMENT_MISPLACE = 'elementMisplace'
ELEMENT_ROTATE = 'elementRotate'
ELEMENT_DECEIVE = 'elementDeceive'
ELEMENT_DISABLE = 'elementDisable'
ELEMENT_ENABLE = 'elementEnable'
ELEMENT_DESTROY = 'elementDestroy'
MESSAGE_WRITE = 'messageWrite'
MESSAGE_READ = 'messageRead'
MESSAGE_FORWARD = 'messageForward'
MESSAGE_INJECT = 'messageInject'
MESSAGE_CREATE = 'messageCreate'
MESSAGE_CLONE = 'messageClone'
MESSAGE_DROP = 'messageDrop'
# Compound Statements
CONFIGURATION = 'configuration'
ATTACK = 'attack'
SCENARIO = 'scenario'
AT = 'at'
FOREACH = 'foreach'
FROM = 'from'
FOR = 'for'
IF = 'if'
ELSE = 'else'
# Containers
LIST = 'list'
RANGE = 'range'
# Accessors
IN = 'in'
# WellKnown values
CAPTURED = 'CAPTURED'
SELF = 'SELF'
START = 'START'
END = 'END'
TX = 'TX'
RX = 'RX'
# Time
HOUR = 'h'
MINUTE = 'min'
SECOND = 's'
SECOND_MILLI = 'ms'
SECOND_MICRO = 'us'
class Punctuation(Lexeme):
""" The ADeLe's punctuation. """
# Basic assignment operator
ASSIGN = r'='
# Compound assignment operators
ASSIGN_ADD = r'\+='
ASSIGN_SUB = r'-='
ASSIGN_MUL = r'\*='
ASSIGN_DIV = r'/='
ASSIGN_MOD = r'%='
# Comparison operators
NOT_EQUAL_TO = r'!='
EQUAL_TO = r'=='
GR_EQ_THAN = r'>='
LS_EQ_THAN = r'<='
GR_THAN = r'>'
LS_THAN = r'<'
# Basic operators
ADD = r'\+'
SUB = r'-'
MUL = r'\*'
DIV = r'/'
MOD = r'%'
EXP = r'\^'
NEG = r'!'
# Logical operators
LOGIC_AND = r'\&\&'
LOGIC_OR = r'\|\|'
# Parenthesis
ROUND_L = r'\('
ROUND_R = r'\)'
BRACK_L = r'\['
BRACK_R = r'\]'
CURVY_L = r'\{'
CURVY_R = r'\}'
# Other punctuation
SEMICOLON = r'\;'
COMMA = r'\,'
COLON = r'\:'
class Literal(Lexeme):
""" Supports the tokenization of literal values. """
LITERAL_IDENTIFIER = 'LITERAL_IDENTIFIER'
LITERAL_INTEGER = 'LITERAL_INTEGER'
LITERAL_STRING = 'LITERAL_STRING'
LITERAL_FLOAT = 'LITERAL_FLOAT'
LITERAL_CHAR = 'LITERAL_CHAR'
# The tokens used by the tokenizer
tokens: Tuple[str, ...] = Keyword.tokens() + Punctuation.tokens() + Literal.tokens()
# The reserved keywords
reserved: Dict[str, str] = Keyword.reverse_map()
# Tokenizers for the puncutation
t_ASSIGN: str = Punctuation.ASSIGN.lexeme
t_ASSIGN_ADD: str = Punctuation.ASSIGN_ADD.lexeme
t_ASSIGN_SUB: str = Punctuation.ASSIGN_SUB.lexeme
t_ASSIGN_MUL: str = Punctuation.ASSIGN_MUL.lexeme
t_ASSIGN_DIV: str = Punctuation.ASSIGN_DIV.lexeme
t_ASSIGN_MOD: str = Punctuation.ASSIGN_MOD.lexeme
t_NOT_EQUAL_TO: str = Punctuation.NOT_EQUAL_TO.lexeme
t_EQUAL_TO: str = Punctuation.EQUAL_TO.lexeme
t_GR_EQ_THAN: str = Punctuation.GR_EQ_THAN.lexeme
t_LS_EQ_THAN: str = Punctuation.LS_EQ_THAN.lexeme
t_GR_THAN: str = Punctuation.GR_THAN.lexeme
t_LS_THAN: str = Punctuation.LS_THAN.lexeme
t_ADD: str = Punctuation.ADD.lexeme
t_SUB: str = Punctuation.SUB.lexeme
t_MUL: str = Punctuation.MUL.lexeme
t_DIV: str = Punctuation.DIV.lexeme
t_MOD: str = Punctuation.MOD.lexeme
t_EXP: str = Punctuation.EXP.lexeme
t_NEG: str = Punctuation.NEG.lexeme
t_LOGIC_AND: str = Punctuation.LOGIC_AND.lexeme
t_LOGIC_OR: str = Punctuation.LOGIC_OR.lexeme
t_ROUND_L: str = Punctuation.ROUND_L.lexeme
t_ROUND_R: str = Punctuation.ROUND_R.lexeme
t_BRACK_L: str = Punctuation.BRACK_L.lexeme
t_BRACK_R: str = Punctuation.BRACK_R.lexeme
t_CURVY_L: str = Punctuation.CURVY_L.lexeme
t_CURVY_R: str = Punctuation.CURVY_R.lexeme
t_COMMA: str = Punctuation.COMMA.lexeme
t_COLON: str = Punctuation.COLON.lexeme
t_SEMICOLON: str = Punctuation.SEMICOLON.lexeme
# Token parsing rule for chars
def t_LITERAL_CHAR(t: LexToken) -> LexToken:
r"\'.\'"
t.value = t.value.replace("'", "")
return t
# Token parsing rule for float numbers
def t_LITERAL_FLOAT(t: LexToken) -> LexToken:
r'-?\d+\.\d+'
try:
t.value = float(t.value)
except ValueError:
logger.critical("float number [" + str(t.value) + "] badly defined")
raise
return t
# Token parsing rule for signed integer numbers
def t_LITERAL_INTEGER(t: LexToken) -> LexToken:
r'-?\d+'
try:
t.value = int(t.value)
except ValueError:
logger.critical("integer number [" + str(t.value) + "] badly defined")
raise
return t
# Token parsing rule for strings
def t_LITERAL_STRING(t: LexToken) -> LexToken:
r'\"([^\\"]|(\\.))*\"'
t.value = t.value.replace('\"', '')
return t
# Token parsing rule for identifiers
def t_LITERAL_IDENTIFIER(t: LexToken) -> LexToken:
r'[a-zA-Z][a-zA-Z_0-9]*'
# Checks if the identifier is a reserved keyword
t.type = reserved.get(t.value, Literal.LITERAL_IDENTIFIER.lexeme)
return t
# Token parsing rule to ignore tab occurrences
t_ignore: str = ' \t'
# Token parsing rule to ignore comments
def t_comment(t: LexToken) -> None:
r'\#.*'
pass
# Token parsing rule to track line numbers
def t_newline(t: LexToken) -> None:
r'\n+'
t.lexer.lineno += len(t.value)
# Token parsing rule for wrong statement or characters
def t_error(t: LexToken) -> NoReturn:
msg = "Illegal character '{}' - line {}".format(t.value[0], t.lexer.lineno)
logger.critical(msg)
raise RuntimeError(msg)
# Builds the lexer
lexer: Lexer = lex.lex()
| [
"racciatti.francesco@gmail.com"
] | racciatti.francesco@gmail.com |
248c9152bbb8623c6fc0909ddc639ffa604c646b | 99e4d9226e124215aaf66945cfaa5c42d18cc19f | /questionbot/matchableSentence.py | 08fc5ea5a19bf209ddf7989190890511301aaabe | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | mathieucaroff/oxowlbot | d826423a1a4cca8a38c90383d0a71dbb40052f35 | a10c12b7c94b3e7030cef2f57c567bbd3034c8c9 | refs/heads/master | 2022-04-18T14:06:29.049957 | 2020-04-22T14:44:57 | 2020-04-22T14:44:57 | 255,177,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | import logging
from typing import List
from .stanza.pword import PWord
symbolMap = {
-6: "<=-",
-5: "<-=",
-4: "<--",
-3: "<=",
-2: "<-",
-1: "<",
0: "==",
1: ">",
2: "->",
3: "=>",
4: "-->",
5: "=->",
6: "-=>",
}
def matchableSentence(wordList: List[PWord]) -> str:
matchableWordList = []
for word in wordList:
matchableWordList.append(matchableWord(word))
return " ".join(matchableWordList)
def matchableWord(word: PWord) -> str:
diff = word.head - int(word.id)
if word.head == 0:
diff = 0
symbol = symbolMap.get(diff)
number = "x"
hintString = ""
pastHint = 0
for piece in word.feats.split("|"):
if piece == "Number=Plur":
number = "2"
if piece == "Number=Sing":
number = "1"
if piece == "VerbForm=Part":
pastHint += 1
if piece == "Tense=Past":
pastHint += 1
if pastHint >= 2:
hintString += "_Hpast"
w = word
upos = w.upos.lower()
feats = w.feats.replace("|", "_F").replace(":", "+")
deprel = w.deprel.replace(':', '+')
result = f":{w.id}_L{w.lemma}_U{upos}_N{number}_R{deprel}{hintString}_F{feats}_{symbol}."
if "." in result[1:-1] or ":" in result[1:-1]:
logging.error(f"bad (:.) matchableWord: {result}")
result = ":" + result.replace(":", "").replace(".", "") + "."
return result
| [
"mathieu.caroff@free.fr"
] | mathieu.caroff@free.fr |
cadcf68f5c8778c6d37d161ec9f61eb7eea2b5fb | 530fb72805d9ccbf005fa264fcad00f082aa6041 | /246 shelve.py | 299e987106dec1800c1821c89a1a982f8f18e211 | [] | no_license | swapnilk02/Udemy_Python | cb6581a516042250a4e8c7787170e012f467c8fe | 2036d70cd96bfa6fc281cdc79dac067845405e09 | refs/heads/master | 2023-07-20T06:22:45.105896 | 2021-09-04T11:25:39 | 2021-09-04T11:25:39 | 394,700,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,735 | py | # note: the shelve lecture not done in depth ....need to revisit the lecture for better understanding
# shelve and the dictionary pritty much similar...the only difference is that we are incase of the shelve ..
# ..the key has to be a string and the value will be pickled
import shelve
with shelve.open('ShelfTest') as fruit:
fruit['orange'] = "a sweet, orange, citrus fruit"
fruit['apple'] = "good for making cider"
fruit['lemon'] = "a sour, yellow citrus fruit"
fruit['grape'] = "a small, sweet fruit growing in bunches"
fruit['lime'] = "a sour, green citrus fruit"
print(fruit["lemon"])
print(fruit["grape"])
#when we use the with keyword....it will close the shelve by itseld..but we have to close it manually if we dont use
#with code for that can be somehting like below
fruit=shelve.open('ShelfTest')
fruit['orange'] = "a sweet, orange, citrus fruit"
fruit['apple'] = "good for making cider"
fruit['lemon'] = "a sour, yellow citrus fruit"
fruit['grape'] = "a small, sweet fruit growing in bunches"
fruit['lime'] = "a sour, green citrus fruit"
fruit['lime']="great with tequila" # here we are assigninng new value to a exisitng key
print(fruit["lemon"])
print(fruit["grape"])
print(fruit["lime"])
#print(fruit["limee"]) # on this line we are trying to get the value for the key which is not present in shelve..this will give the error
#to avoid getting the error if we try to get the value for the key which does not exist /.....we can use get method
#trying to get the value for the key that does not exist using get method
description=fruit.get("limee","we dont have key") # second argument is the defalut value to be printed if key is absent n
print(description)
fruit.close() | [
"swapnilkagane1@gnail.com"
] | swapnilkagane1@gnail.com |
2f741834a8d047a3112833662d7cd9490303215d | dae95a676126ae88b244b6383f05a024ca538027 | /sWAP_cASE.py | 9e8fdf97a7716121cb4b96cddefb240b0b3e398f | [] | no_license | jobairBalam/HackerRank-python-problem | a51d551f9e83e711f651bbd9aa6b529c42ea7092 | 7ca7fc29833f9cbd7bea0de2e0a29a1dffac6e42 | refs/heads/master | 2021-12-14T13:38:07.157259 | 2021-11-27T08:31:19 | 2021-11-27T08:31:19 | 185,306,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | def swap_case(s):
a = ""
for let in s:
if let.isupper() == True:
a+=(let.lower())
else:
a+=(let.upper())
return a
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result)
| [
"noreply@github.com"
] | jobairBalam.noreply@github.com |
d5fc42cc9b5195a035e94c7ff35d169bb69973df | f13f1a996dbeccccb0bd30bdfa6953dcdd9fcf61 | /hub/test.py | dc35250562afdac22fa27f5607736f8fb10da72f | [] | no_license | MajimeHajime/SMKN4BDG_SmartHelper_Flask | d8766ffe4fdb66bdb777aaeec885048a1acf434d | 0e1ab76edfbc62b1c8e3ab95e00b74e7a602e877 | refs/heads/main | 2023-01-23T09:00:05.877789 | 2020-12-03T11:01:22 | 2020-12-03T11:01:22 | 315,264,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | from flask import Flask, render_template, url_for
from PredictionModel import run_prediction
from datetime import date, timedelta
import plotly
import plotly.express as px
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import json
import pickle
'''
today = date.today()
todayish = today - timedelta(days=1)
yesterday = today - timedelta(days=2)
tomorrow = today + timedelta(days=1)
outaded = bool(0)
case = 0
casePast = 0
casePred = 0
change = 0
try:
lastRun = pickle.load(open("day.pickle", "rb"))
except (OSError, IOError) as e:
lastRun = today
pickle.dump(lastRun, open("day.pickle", "wb"))
lastRun = lastRun
print(lastRun)
'''
df = pd.read_csv('testfile.csv')
today = date.today() - timedelta(days=1)
stats = int(df.loc[df['date'] == str(date.today() - timedelta(days=1)), 'total'])
print(stats)
| [
"MajimeHajime@users.noreply.github.com"
] | MajimeHajime@users.noreply.github.com |
9e367421bb74b17511012b38e47f3fc511540a62 | f98347c036a98c32a0c72c49bf1e298588d48bab | /MyProjectRest/MyProjectRest/settings.py | bdbb19e0337188845c243b6cae3526de63938721 | [] | no_license | ikki2530/django_isfun | 2de26ceb1e3e2a76063dcd602f8c3afa627713cb | 91615c96b2297005ca3a21edc123466ca7d4ae18 | refs/heads/master | 2022-12-26T21:14:29.824341 | 2020-10-16T16:21:43 | 2020-10-16T16:21:43 | 288,185,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,181 | py | """
Django settings for MyProjectRest project.
Generated by 'django-admin startproject' using Django 2.2.11.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nt!@6n=-m_klbe=fg7)g0j2hqefw-pcj9t8vb(yl!g8^h*_(d^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'api_basic',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MyProjectRest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MyProjectRest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"dagomez2530@gmail.com"
] | dagomez2530@gmail.com |
10f1a2beeb2f92dd6f7c12c073707ab12b23578b | 176497ba1cea7233f249a5f439a65f7c472b267f | /11_polls_part_4/02_refactor_using_django_form/feed/forms.py | b6e2d79cd5ed81d818be9a91bfe921cbf89f9fc8 | [] | no_license | CodingNomads/django-web-dev | 79a3a94707489ca0d5f0bf49193b7ffdf6270f4a | e03b8ed130f100afb0296c0d76a84206fbbf789d | refs/heads/master | 2023-05-02T05:12:21.427462 | 2022-11-06T17:56:14 | 2022-11-06T17:56:14 | 235,174,521 | 1 | 7 | null | 2023-04-21T20:54:10 | 2020-01-20T18:53:31 | Python | UTF-8 | Python | false | false | 489 | py | from django import forms
from django.utils.html import strip_tags
from .models import Post
class PostForm(forms.ModelForm):
body = forms.CharField(required=True,
widget=forms.widgets.Textarea(
attrs={
'placeholder': 'Post',
'class': 'form-control'
}))
class Meta:
model = Post
exclude = ('user', )
| [
"breuss.martin@gmail.com"
] | breuss.martin@gmail.com |
ed2b24be9e79cc47a29adef832946f1b9008a54f | 3a298c93b67386392d3dee243671f2c101decf01 | /hackerrank/interview-preparation-kit/string-manipulation/02_alternating_characters.py | 4ed83c47964e820fee050e52be5a67ab600cced2 | [] | no_license | Zahidsqldba07/coding-problems-2 | ffbc8408e4408fc846c828af2ec50a9d72e799bc | 020bffbd14ca9993f1e678181ee7df761f1533de | refs/heads/master | 2023-06-26T11:05:34.089697 | 2021-07-21T15:16:10 | 2021-07-21T15:16:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | def alternatingCharacters(s):
min_dels = 0
for i in range(1, len(s)):
if s[i] == s[i-1]:
i += 1
min_dels += 1
return min_dels | [
"alvee.akand@outlook.com"
] | alvee.akand@outlook.com |
0ce6e749597f55c905b7e2424bfbccf1cb5aff1d | ccea7e98c9fdbb956a2e670725a41aef8c7e8437 | /trash/old.py | da80b458e231c75927ee540c74d6efb84d04912c | [] | no_license | ftvalentini/waba-TxExplorer | 1ed73b6d707a893de747022848c63ac480d3af3a | 5b23a436b84ed91112f585794cd046ef197ff0f0 | refs/heads/master | 2021-06-18T03:04:31.877405 | 2019-09-22T13:55:34 | 2019-09-22T13:55:34 | 146,499,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,801 | py | # def get_token_holders(token_id='1.3.1236', account_prefix=r'moneda-par', omit_accounts=[r'gobierno-par']):
# url = 'http://185.208.208.184:5000/get_all_asset_holders?asset_id='+token_id
# response = urllib.request.urlopen(url)
# data = json.loads(response.read())
# accounts = {'name':[i['name'] for i in data],
# 'id': [i['account_id'] for i in data]}
# df = pd.DataFrame(accounts['name'], index=accounts['id'], columns=['name'])
# df['name'] = df['name'].str.replace(account_prefix+r'.','')
# # data1 = [pd.Series(i)[['name','account_id']] for i in data_temp]
# # df1 = pd.DataFrame(data1)
# return df.loc[~df.name.isin(omit_accounts)]
# ### Para agregar usuarios no captados:
# # open transacciones.json de usarios catalogados como holders
# # with open('transacciones.json','r') as f:
# # data = pd.read_json(f, orient='index')
# # data = data.drop(columns=['sender_name','recipient_name'])
# # agrega la cuenta del gobierno para identificar usuarios no captados
# all_accounts = get_token_holders(token_id=tk_id, account_prefix=tk_prefix, omit_accounts=[''])
# gobierno_id = list(all_accounts[all_accounts.name == 'gobierno-par'].index)[0]
# historical_gob = get_user_data(user_id=gobierno_id, max_page_num=999999999, token_id='1.3.1236')
# # historico (inc. gobierno)
# data_2 = pd.concat([data] + [historical_gob]).drop_duplicates()
# # usuarios no incluidos como asset_holders que transaccionaron con gob o con token_holders
# strange_recip = np.array(data_2.loc[~data_2.recipient.isin(users_ids + [gobierno_id]),'recipient'])
# strange_sender = np.array(data_2.loc[~data_2.sender.isin(users_ids + [gobierno_id]),'sender'])
# strange_ids = list(np.union1d(strange_recip,strange_sender))
# # nombres de usarios strange
# strange_names = [get_user_name(i) for i in strange_ids]
# # datos de las cuentas 'strange' (corrido con max_page_num=9999999999)
# historical_strange = [get_user_data(user_id=i, max_page_num=999999999, token_id=tk_id) for i in strange_ids]
# data_strange = pd.concat(historical_strange).drop_duplicates()
# accounts_strange = pd.DataFrame(data=strange_names,index=strange_ids,columns=['name'])
# # datos de cuentas holders + cuentas strange
# accounts_full = pd.concat(accounts, accounts_strange)
# data_full = pd.concat(data, data_strange).drop_duplicates().sort_values('time', ascending=True)
# data_full = pd.merge(data_full, accounts_full, how='left', left_on='sender', right_index=True)
# data_full = data_full.rename(columns={'name':'sender_name'})
# data_full = pd.merge(data_full, accounts_full, how='left', left_on='recipient', right_index=True)
# data_full = data_full.rename(columns = {'name':'recipient_name'})
# # write to json file
# data_full.to_json('transacciones_full.json', orient='index')
| [
"ft.valentini@gmail.com"
] | ft.valentini@gmail.com |
06987b844ae674541272c3184dcb10864d851190 | 1498148e5d0af365cd7fd16197174174a7fa9800 | /t001125.py | fbab733d238bfb3ac8c2b42ba0affa9097b2b6e9 | [] | no_license | feiyanshiren/myAcm | 59a2b80fe7e02787defcb152eee3eae26135322a | 00c7082d5143ddf87aeeafbdb6ce29da46dc8a12 | refs/heads/master | 2023-09-01T12:12:19.866447 | 2023-09-01T09:09:56 | 2023-09-01T09:09:56 | 148,560,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | def iToR(n):
c = [["","I","II","III","IV","V","VI","VII","VIII","IX"],
["","X","XX","XXX","XL","L","LX","LXX","LXXX","XC"],
["","C","CC","CCC","CD","D","DC","DCC","DCCC","CM"],
["","M","MM","MMM"]]
s = ""
s += c[3][n // 1000 % 10]
s += c[2][n // 100 % 10]
s += c[1][n // 10 % 10]
s += c[0][n % 10]
return s
try:
while 1:
n = int(input())
print(iToR(n))
except:
pass
| [
"feiyanshiren@163.com"
] | feiyanshiren@163.com |
d01096deba1bed148a3d391be023ef29e249f2c9 | 075b14652bfc9b0c86203bc6f0922528f8e8bf4e | /sitereview.py | d541ac07614baf3c56d935c54604ef35ae27cc5c | [] | no_license | javierkos/ArticleClassifier | a4ba01b07d854d46722dcf8af222b186f5e7c37d | e80f7e0fdde0f26c8e4811630068a0dbcd11fab9 | refs/heads/master | 2021-01-25T14:57:17.810084 | 2018-03-03T23:46:04 | 2018-03-03T23:46:04 | 123,740,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,730 | py | from __future__ import print_function
from argparse import ArgumentParser
from bs4 import BeautifulSoup
import json
import requests
import sys
class SiteReview(object):
def __init__(self):
self.baseurl = "http://sitereview.bluecoat.com/rest/categorization"
self.useragent = {"User-Agent": "Mozilla/5.0"}
def sitereview(self, url):
payload = {"url": url}
try:
self.req = requests.post(
self.baseurl,
headers=self.useragent,
data=payload
)
except requests.ConnectionError:
sys.exit("[-] ConnectionError: " \
"A connection error occurred")
return json.loads(self.req.content.decode("UTF-8"))
def check_response(self, response):
if self.req.status_code != 200:
sys.exit("[-] HTTP {} returned".format(req.status_code))
elif "error" in response:
sys.exit(response["error"])
else:
self.category = BeautifulSoup(response["categorization"], "lxml").get_text()
self.date = BeautifulSoup(response["ratedate"], "lxml").get_text()[0:35]
self.url = response["url"]
def main(url):
s = SiteReview()
response = s.sitereview(url)
s.check_response(response)
border = "=" * (len("BlueCoat Site Review") + 2)
print("\n{0}\n{1}\n{0}\n".format(border, "Blue Coat Site Review"))
print("URL: {}\n{}\nCategory: {}\n".format(
s.url,
s.date,
s.category
)
)
if __name__ == "__main__":
p = ArgumentParser()
p.add_argument("url", help="Submit domain/URL to Blue Coat's Site Review")
args = p.parse_args()
main(args.url) | [
"zcabpas@ucl.ac.uk"
] | zcabpas@ucl.ac.uk |
f74b5b661edca5ae63994aea588fc714eb0e6127 | 40d6f1aeb9708c9d64208ae6f6762bc00c9cf6f2 | /python_00100_datatypes_calculator/example_00400_operations_bools_exercise_solution.py | d1f21d80b62623f82818c80e8da67d3940a88d38 | [] | no_license | Bara-Ga/SmartninjaCourse | 37110c79deb9731d3ed24e446addaaf852e1cf22 | 300ec397b35819544835263c88be616283aaf165 | refs/heads/master | 2020-08-05T17:32:14.901033 | 2019-10-14T15:14:06 | 2019-10-14T15:14:06 | 212,634,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | # coding=utf-8
# erstelle 2 zahlen (Bsp.: 5 und 1000), speichere sie als variablen
# prüfe ob die erste größer als die zweite ist
a = 5
b = 500
print a > b
# prüfe ob beide zahlen gleich sein
print a is b
print a == b
# erstelle 2 strings (wörter) (z.B. Hallo und Tschüss)
# prüfe ob beide wörter gleich sind
word1 = "hallo"
word2 = "bye"
print word1 == word2
print word1 is word2
| [
"b_gasselsberger@hotmail.com"
] | b_gasselsberger@hotmail.com |
16e2f39d93d44121207666aaed39b10a375cc842 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /bBExn57vLEsXgHC5m_18.py | 3f968eb28deda866f7bf09e4165adcac6ad9b42e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | """
Create a function that returns `True` if three points belong to the same line,
and `False` otherwise. Each point is represented by a list consisting of an x-
and y-coordinate.
### Examples
same_line([[0, 0], [1, 1], [3, 3]]) ➞ True
same_line([[-2, -1], [2, 1], [0, 0]]) ➞ True
same_line([[-2, 0], [-10, 0], [-8, 0]]) ➞ True
same_line([[0, 0], [1, 1], [1, 2]]) ➞ False
same_line([[3, 4], [3, 5], [6, 6]]) ➞ False
### Notes
Note the special case of a vertical line.
"""
def same_line(lst):
return (lst[1][0]-lst[0][0])*(lst[2][1]-lst[0][1])==(lst[2][0]-lst[0][0])*(lst[1][1]-lst[0][1])
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
7c806f0bc8aac7f23626a44e450aa60c6fed7f38 | 6ab28db2f6b1e6501c18668f92810618b5dc7847 | /GAN/main.py | c9424aaee44bf425c72b15aa85181571b3b4e0d4 | [] | no_license | GuoxianSong/AuGan | 08c3c29600f38446472481cadd5915b16466fdb3 | d13cb3eca4669a0d8bb9d10e0800d2ce87a788ed | refs/heads/master | 2021-01-02T08:55:08.134492 | 2017-08-11T07:11:23 | 2017-08-11T07:11:23 | 99,098,000 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | import sys
import numpy as np
import tensorflow as tf
from trainer import Trainer
from config import get_config
from utils import prepare_dirs, save_config
config = None
def main():
prepare_dirs(config)
rng = np.random.RandomState(config.random_seed)
tf.set_random_seed(config.random_seed)
trainer = Trainer(config, rng)
save_config(config.model_dir, config)
if config.is_train:
trainer.train()
else:
if not config.load_path:
raise Exception("[!] You should specify `load_path` to load a pretrained model")
trainer.test()
if __name__ == "__main__":
config, unparsed = get_config()
main()
#tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"songguoxian@icloud.com"
] | songguoxian@icloud.com |
5fe3bf6dbffd3e9118e2fe234b81b3d61e52091e | d1deafeb9a3a9a8536d1c523ecfa1d245243fa6f | /.pythonrc.py | c3367536540d113bef040ff8f5ef19d20ad6deb9 | [] | no_license | riggs/dotfiles | 109f4ec08090f2b8ba2816df843971e0d416237e | bb09d17af7d7863a4d75985d9b9295614b805e56 | refs/heads/main | 2023-04-03T23:18:37.282769 | 2023-03-29T09:01:28 | 2023-03-29T09:01:28 | 5,709,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,424 | py | import os
import readline,rlcompleter
### Indenting
class TabCompleter(rlcompleter.Completer):
"""Completer that supports indenting"""
def complete(self, text, state):
if not text:
return (' ', None)[state]
else:
return rlcompleter.Completer.complete(self, text, state)
readline.set_completer(TabCompleter().complete)
### Add autocompletion
if 'libedit' in readline.__doc__:
readline.parse_and_bind("bind -e")
readline.parse_and_bind("bind '\t' rl_complete")
else:
readline.parse_and_bind("tab: complete")
### Add history
import os
histfile = os.path.join(os.environ["HOME"], ".pyhistory")
try:
readline.read_history_file(histfile)
readline.set_history_length(300)
except IOError:
pass
import atexit
atexit.register(readline.write_history_file, histfile)
del histfile
# Color Support
class TermColors(dict):
"""Gives easy access to ANSI color codes. Attempts to fall back to no color
for certain TERM values. (Mostly stolen from IPython.)"""
COLOR_TEMPLATES = (
("Black" , "0;30"),
("Red" , "0;31"),
("Green" , "0;32"),
("Brown" , "0;33"),
("Blue" , "0;34"),
("Purple" , "0;35"),
("Cyan" , "0;36"),
("LightGray" , "0;37"),
("DarkGray" , "1;30"), ("LightRed" , "1;31"),
("LightGreen" , "1;32"),
("Yellow" , "1;33"),
("LightBlue" , "1;34"),
("LightPurple" , "1;35"),
("LightCyan" , "1;36"),
("White" , "1;37"),
("Normal" , "0"),
)
NoColor = ''
_base = '\033[%sm'
def __init__(self):
if os.environ.get('TERM') in ('xterm-color', 'xterm-256color', 'linux',
'screen', 'screen-256color', 'screen-bce'):
self.update(dict((k, self._base % (v)) for k,v in self.COLOR_TEMPLATES))
else:
self.update(dict((k, self.NoColor) for k,v in self.COLOR_TEMPLATES))
_c = TermColors()
import sys
# Enable Color Prompts
sys.ps1 = '%s>>> %s' % (_c['LightGreen'], _c['Normal'])
sys.ps2 = '%s... %s' % (_c['Red'], _c['Normal'])
# Enable Pretty Printing for stdout
import pprint
def my_displayhook(value):
if value is not None:
try:
import __builtin__
__builtin__._ = value
except ImportError:
__builtins__._ = value
pprint.pprint(value)
sys.displayhook = my_displayhook
| [
"deisum@gmail.com"
] | deisum@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.