blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4abbb28ccc08c6fbdc03c756d15d17e45181a96a | e0980ab62de1b3e49f4eb7e85c58c7361f2af62d | /tqblogscn/extra_apps/haystack/__init__.py | 12362df40224ad32e3b047f17bf71a88316d8ae4 | [] | no_license | Joinpython/tqblogs | 93ebbaf8a68b22fd364e99d5fccc3227ab7b48b9 | 43ae71f1e32d109c33484caec2653b3195f29a05 | refs/heads/master | 2023-01-11T23:34:43.828651 | 2018-03-30T03:04:10 | 2018-03-30T03:04:10 | 117,675,672 | 3 | 0 | null | 2022-12-27T15:24:55 | 2018-01-16T11:34:34 | JavaScript | UTF-8 | Python | false | false | 2,448 | py | # encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from pkg_resources import DistributionNotFound, get_distribution
from haystack import signals
from haystack.constants import DEFAULT_ALIAS
from haystack.utils import loading
__author__ = 'Daniel Lindsley'
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
__version__ = (0, 0, 'dev0')
default_app_config = 'haystack.apps.HaystackConfig'
# Help people clean up from 1.X.
if hasattr(settings, 'HAYSTACK_SITECONF'):
raise ImproperlyConfigured('The HAYSTACK_SITECONF setting is no longer used & can be removed.')
if hasattr(settings, 'HAYSTACK_SEARCH_ENGINE'):
raise ImproperlyConfigured('The HAYSTACK_SEARCH_ENGINE setting has been replaced with HAYSTACK_CONNECTIONS.')
if hasattr(settings, 'HAYSTACK_ENABLE_REGISTRATIONS'):
raise ImproperlyConfigured('The HAYSTACK_ENABLE_REGISTRATIONS setting is no longer used & can be removed.')
if hasattr(settings, 'HAYSTACK_INCLUDE_SPELLING'):
raise ImproperlyConfigured('The HAYSTACK_INCLUDE_SPELLING setting is now a per-backend setting & belongs in HAYSTACK_CONNECTIONS.')
# Check the 2.X+ bits.
if not hasattr(settings, 'HAYSTACK_CONNECTIONS'):
raise ImproperlyConfigured('The HAYSTACK_CONNECTIONS setting is required.')
if DEFAULT_ALIAS not in settings.HAYSTACK_CONNECTIONS:
raise ImproperlyConfigured("The default alias '%s' must be included in the HAYSTACK_CONNECTIONS setting." % DEFAULT_ALIAS)
# Load the connections.
connections = loading.ConnectionHandler(settings.HAYSTACK_CONNECTIONS)
# Just check HAYSTACK_ROUTERS setting validity, routers will be loaded lazily
if hasattr(settings, 'HAYSTACK_ROUTERS'):
if not isinstance(settings.HAYSTACK_ROUTERS, (list, tuple)):
raise ImproperlyConfigured("The HAYSTACK_ROUTERS setting must be either a list or tuple.")
# Load the router(s).
connection_router = loading.ConnectionRouter()
# Per-request, reset the ghetto query log.
# Probably not extraordinarily thread-safe but should only matter when
# DEBUG = True.
def reset_search_queries(**kwargs):
for conn in connections.all():
if conn:
conn.reset_queries()
if settings.DEBUG:
from django.core import signals as django_signals
django_signals.request_started.connect(reset_search_queries)
| [
"3577451043@qq.com"
] | 3577451043@qq.com |
ce7bfaf85f5e55097d06bc2990ecc1757aabd37a | 673f9b85708affe260b892a4eb3b1f6a0bd39d44 | /Botnets/Phases/Phase 2/Algorithms/Algorithms after PDG-2/PDG-2-Fase-2-ENV/lib/python3.6/site-packages/setuptools/sandbox.py | e46dfc8d25e8accf6fb08c13b878da1550e4738f | [
"MIT"
] | permissive | i2tResearch/Ciberseguridad_web | feee3fe299029bef96b158d173ce2d28ef1418e4 | e6cccba69335816442c515d65d9aedea9e7dc58b | refs/heads/master | 2023-07-06T00:43:51.126684 | 2023-06-26T00:53:53 | 2023-06-26T00:53:53 | 94,152,032 | 14 | 0 | MIT | 2023-09-04T02:53:29 | 2017-06-13T00:21:00 | Jupyter Notebook | UTF-8 | Python | false | false | 14,284 | py | import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import contextlib
import pickle
import textwrap
from setuptools.extern import six
from setuptools.extern.six.moves import builtins, map
import pkg_resources.py31compat
from distutils.errors import DistutilsError
from pkg_resources import working_set
if sys.platform.startswith('java'):
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def _execfile(filename, globals, locals=None):
"""
Python 3 implementation of execfile.
"""
mode = 'rb'
with open(filename, mode) as stream:
script = stream.read()
if locals is None:
locals = globals
code = compile(script, filename, 'exec')
exec(code, globals, locals)
@contextlib.contextmanager
def save_argv(repl=None):
saved = sys.argv[:]
if repl is not None:
sys.argv[:] = repl
try:
yield saved
finally:
sys.argv[:] = saved
@contextlib.contextmanager
def save_path():
saved = sys.path[:]
try:
yield saved
finally:
sys.path[:] = saved
@contextlib.contextmanager
def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
pkg_resources.py31compat.makedirs(replacement, exist_ok=True)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved
@contextlib.contextmanager
def pushd(target):
saved = os.getcwd()
os.chdir(target)
try:
yield saved
finally:
os.chdir(saved)
class UnpickleableException(Exception):
"""
An exception representing another Exception that could not be pickled.
"""
@staticmethod
def dump(type, exc):
"""
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
"""
try:
return pickle.dumps(type), pickle.dumps(exc)
except Exception:
# get UnpickleableException inside the sandbox
from setuptools.sandbox import UnpickleableException as cls
return cls.dump(cls, cls(repr(exc)))
class ExceptionSaver:
"""
A Context Manager that will save an exception, serialized, and restore it
later.
"""
def __enter__(self):
return self
def __exit__(self, type, exc, tb):
if not exc:
return
# dump the exception
self._saved = UnpickleableException.dump(type, exc)
self._tb = tb
# suppress the exception
return True
def resume(self):
"restore and re-raise any exception"
if '_saved' not in vars(self):
return
type, exc = map(pickle.loads, self._saved)
six.reraise(type, exc, self._tb)
@contextlib.contextmanager
def save_modules():
"""
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
"""
saved = sys.modules.copy()
with ExceptionSaver() as saved_exc:
yield saved
sys.modules.update(saved)
# remove any modules imported since
del_modules = (
mod_name for mod_name in sys.modules
if mod_name not in saved
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
)
_clear_modules(del_modules)
saved_exc.resume()
def _clear_modules(module_names):
for mod_name in list(module_names):
del sys.modules[mod_name]
@contextlib.contextmanager
def save_pkg_resources_state():
saved = pkg_resources.__getstate__()
try:
yield saved
finally:
pkg_resources.__setstate__(saved)
@contextlib.contextmanager
def setup_context(setup_dir):
temp_dir = os.path.join(setup_dir, 'temp')
with save_pkg_resources_state():
with save_modules():
hide_setuptools()
with save_path():
with save_argv():
with override_temp(temp_dir):
with pushd(setup_dir):
# ensure setuptools commands are available
__import__('setuptools')
yield
def _needs_hiding(mod_name):
"""
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
>>> _needs_hiding('os')
False
>>> _needs_hiding('Cython')
True
"""
pattern = re.compile(r'(setuptools|pkg_resources|distutils|Cython)(\.|$)')
return bool(pattern.match(mod_name))
def hide_setuptools():
"""
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
"""
modules = filter(_needs_hiding, sys.modules)
_clear_modules(modules)
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = [setup_script] + list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist: dist.activate())
# __file__ should be a byte string on Python 2 (#712)
dunder_file = (
setup_script
if isinstance(setup_script, str) else
setup_script.encode(sys.getfilesystemencoding())
)
with DirectorySandbox(setup_dir):
ns = dict(__file__=dunder_file, __name__='__main__')
_execfile(setup_script, ns)
except SystemExit as v:
if v.args and v.args[0]:
raise
# Normal exit, just return
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self, name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source, name))
def __enter__(self):
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
def __exit__(self, exc_type, exc_value, traceback):
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def run(self, func):
"""Run 'func' under os sandboxing"""
with self:
return func()
def _mk_dual_path_wrapper(name):
original = getattr(_os, name)
def wrap(self, src, dst, *args, **kw):
if self._active:
src, dst = self._remap_pair(name, src, dst, *args, **kw)
return original(src, dst, *args, **kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os, name):
locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os, name)
def wrap(self, path, *args, **kw):
if self._active:
path = self._remap_input(name, path, *args, **kw)
return original(path, *args, **kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os, name):
locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os, name)
def wrap(self, path, *args, **kw):
if self._active:
path = self._remap_input(name, path, *args, **kw)
return self._remap_output(name, original(path, *args, **kw))
return original(path, *args, **kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os, name):
locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os, name)
def wrap(self, *args, **kw):
retval = original(*args, **kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os, name):
locals()[name] = _mk_query(name)
def _validate_path(self, path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self, operation, path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation + '-from', src, *args, **kw),
self._remap_input(operation + '-to', dst, *args, **kw)
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull]
else:
_EXCEPTIONS = []
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
_exception_patterns = [
# Allow lib2to3 to attempt to save a pickled grammar object (#121)
r'.*lib2to3.*\.pickle$',
]
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox, '')
self._exceptions = [
os.path.normcase(os.path.realpath(path))
for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
from setuptools.sandbox import SandboxViolation
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path, mode, *args, **kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path, mode, *args, **kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception)
for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath)
for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src, dst)
def open(self, file, flags, mode=0o777, *args, **kw):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file, flags, mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_, [
getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
tmpl = textwrap.dedent("""
SandboxViolation: {cmd}{args!r} {kwargs}
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.
""").lstrip()
def __str__(self):
cmd, args, kwargs = self.args
return self.tmpl.format(**locals())
| [
"ulcamilo@gmail.com"
] | ulcamilo@gmail.com |
9a77f1e6c9ad0e39e85484d1e1f04ba613870d62 | a70b482f30bf471a418b2c0996b4fa541e0729ca | /pattern_visualizing.py | a04b998333f43134670eea363a544bb86104879e | [] | no_license | radroof22/Portfolio-Management-Policy-Gradient | 0c88baf61e78e4276d59b04c3525a63b8aff9a70 | 98b6ac64bf48c9842ee98c02ac225fd7294ae1cc | refs/heads/master | 2023-05-10T22:34:46.518104 | 2021-05-25T00:50:25 | 2021-05-25T00:50:25 | 169,523,014 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,322 | py | import torch
from Environment import Environment
from regular_policy import Agent, select_action, format_action
import matplotlib.pyplot as plt
env = Environment()
# CONSTANTS
MODEL_PATH = "models/regular/model_proper_portfolio_value.pt"
N_TESTS = 1
agent = Agent().cuda()
agent.load_state_dict(torch.load(MODEL_PATH))
if __name__ == "__main__":
history = []
## Graph Setup
state = env.reset()
df = env.get_df()
x_axis = [i for i in range(len(df))]
buy_line = {}
sell_line = {}
for episode in range(N_TESTS):
print("Episode {}".format(episode))
episode_actions = {
"hold": 0,
"buy": 0,
"sell": 0
}
done = False
step = 30
port_value = []
while not done:
# Step through environment using chosen action
actions = select_action(state.values.reshape(env.observation_space))
action_dict = format_action(actions, monitor=False)
state, reward, done = env.step(action_dict)
agent.reward_episode.append(reward)
if action_dict["sell"] != 0:
sell_line[step] = state.iloc[-1]["Close"]
if action_dict["buy"] != 0:
buy_line[step] = state.iloc[-1]["Close"]
step += 1
port_value.append(env.portfolio["balance"])
env_change = env.net_change()
cash_change = (reward - 100000 ) / 100000
history.append((reward, env_change, cash_change))
print(env.stock_list[env.stock_i])
print("*"*30)
env_change = env.net_change()
print("Stock Appreciation: {}".format(env_change))
print("Final Balance: {}".format(history))
print(env.portfolio)
# plt.plot([i for i in range(len(df))], df["open"] , color="green", label="Open")
# plt.plot([i for i in range(len(df))], df["high"] , color="blue", label="High")
# plt.plot([i for i in range(len(df))], df["low"] , color="yellow", label="Low")
plt.title("Bot Trades")
plt.plot(list(buy_line.keys()), list(buy_line.values()) , 'go-', label="Buy")
plt.plot(list(sell_line.keys() ), list(sell_line.values()), 'ro-', label="Sell")
plt.plot(x_axis, df["Close"] , color="black", label="Close")
plt.show()
| [
"mehta.rohan@outlook.com"
] | mehta.rohan@outlook.com |
b6c7f4472327222b22711acdd68b5ac54f2469ca | 8ce5b02d02727461763a01578bc7f8a7bdb494f7 | /jennifer & Laith/laith_eatit.py | 7915543e23b3de6a7e6aa705cb744a04c2aded33 | [
"MIT"
] | permissive | idane19-meet/EAT_IT | 7fd679829909981b8b7837759d6ff076d611aad5 | 7f87c0717232775dca449aec1489d7f19fd82c7c | refs/heads/master | 2020-12-02T11:30:01.385957 | 2017-08-10T15:46:51 | 2017-08-10T15:46:51 | 96,643,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,751 | py | import random
import turtle
import time
def menu():
x = input('would you like to start the game? \n (YES/NO) \n would you like to quit the menu bar? \n (QUIT) \n *PLEASE USE CAPITAL LETTERS \n YOUR ANSWER: ')
if x == 'NO' or x == 'QUIT':
quit()
elif x == 'YES':
print('')
menu()
print('are you MALE/FEMALE ? ')
print('*PLEASE USE CAPITAL LETTERS')
gender = input('ANSWER:')
#lists
box_color_list = ["box1.gif", "box2.gif", "box3.gif", "box4.gif", "box5.gif"]
background_list = ["background1.gif", "background2.gif", "background3.gif", "background4.gif"]
randombox = random.randint (0, len(box_color_list)-1)
this_box = box_color_list[randombox]
box = turtle.clone()
turtle.register_shape(this_box)
box.shape(this_box)
background = random.randint (0,4)
screen = turtle.Screen()
randbackground = random.randint (0,len(background_list)-1)
this_background = background_list [randbackground]
turtle.register_shape(this_background)
turtle.bgpic (this_background)
turtle.tracer(1, 0)
turtle2 = turtle.clone()
score = 0
turtle2.write(str(score))
turtle2.ht()
turtle.penup()
#bird = turtle.clone()
#turtle.addshape('bird.gif')
#bird.shape('bird.gif')
turtle.shape('circle')
#turtle.hideturtle()
turtle.Screen()
turtle.fillcolor('white')
screen = turtle.Screen()
screen.bgcolor('light blue')
turtle.goto(0,-200)
good_food_pos= []
boxes_list = []
bad_food_pos = []
good_food_stamps = []
bad_food_stamps = []
box_stamps = []
box_pos=[]
bird_pos=[]
turtles_list = []
SIZE_X = 400
SIZE_Y = 400
turtle.setup(500,500)
player_size = 10
my_pos = turtle.pos()
x_pos = my_pos[0]
y_pos = my_pos[1]
UP_EDGE = 200
DOWN_EDGE = -200
RIGHT_EDGE = 200
LEFT_EDGE = -200
UP_ARROW = 'Up'
LEFT_ARROW = 'Left'
DOWN_ARROW = 'Down'
RIGHT_ARROW = 'Right'
TIME_STEP = 100
TIME_STEP2 = 10000
SPACEBAR = 'space'
def move_player():
my_pos = turtle.pos()
x_pos = my_pos[0]
y_pos = my_pos[1]
x_ok = LEFT_EDGE <= x_pos <= RIGHT_EDGE
y_ok = UP_EDGE >= y_pos >= DOWN_EDGE
within_bounds = x_ok and y_ok
if turtle.pos()[0] == RIGHT_EDGE:
turtle.goto (LEFT_EDGE + 20,turtle.pos()[1])
if turtle.pos()[0] == LEFT_EDGE :
turtle.goto (RIGHT_EDGE - 20,turtle.pos()[1])
####'''
#### if x_pos >= RIGHT_EDGE:
#### turtle.goto(RIGHT_EDGE - 10, y_pos)
#### if x_pos <= LEFT_EDGE:
#### turtle.goto(LEFT_EDGE + 10, y_pos)
#### if y_pos >= UP_EDGE:
#### turtle.goto(x_pos, UP_EDGE + 10)
####'''
## if within_bounds:
## if direction == RIGHT:
## turtle.goto(x_pos + 10,y_pos)
## elif direction == LEFT:
## turtle.goto(x_pos - 10,y_pos)
## elif direction == UP:
## turtle.goto(x_pos, y_pos +10)
##
## #if turtle.pos() == my_clone.pos():
##
##
## '''
## else:
## # x checks
## # right edge check
## if x_pos >= RIGHT_EDGE:
## if direction == LEFT:
## turtle.goto(x_pos - 1,y_pos)
## if x_pos <= LEFT_EDGE:
## if direction == RIGHT:
## turtle.goto(x_pos + 1,y_pos)
##
## if y_pos >= UP_EDGE:
## if direction == RIGHT:
## turtle.goto(x_pos + 10,y_pos)
## elif direction == LEFT:
## turtle.goto(x_pos - 10, y_pos)
## elif direction == DOWN:
## turtle.goto(x_pos, y_pos -10)
##
## if y_pos <= DOWN_EDGE:
## if direction == RIGHT:
## turtle.goto(x_pos + 10,y_pos)
## elif direction == LEFT:
## turtle.goto(x_pos - 10, y_pos)
## elif direction == UP:
## turtle.goto(x_pos, y_pos + 10)
## '''
global food,score
#turtle.ontimer(move_player,TIME_STEP)
if turtle.pos() in good_food_pos:
good_food_ind = good_food_pos.index(turtle.pos())
food.clearstamp(good_food_stamps[good_food_ind])
good_food_stamps.pop(good_food_ind)
good_food_pos.pop(good_food_ind)
print('EATEN GOOD FOOD!')
score = score + 1
turtle2.clear()
turtle2.write(str(score))
good_food()
if turtle.pos() in bad_food_pos:
bad_food_ind = bad_food_pos.index(turtle.pos())
bad_food.clearstamp(bad_food_stamps[bad_food_ind])
bad_food_stamps.pop(bad_food_ind)
bad_food_pos.pop(bad_food_ind)
print('EATEN BAD FOOD!')
score = score - 1
turtle2.clear()
turtle2.write(str(score))
if score == -5:
print('GAME OVER!')
quit()
bad_food1()
UP = 0
LEFT = 1
DOWN = 2
RIGHT = 3
direction = DOWN
turtle.register_shape('man_right.gif')
turtle.register_shape('man_left.gif')
turtle.register_shape('woman_right.gif')
turtle.register_shape('woman_left.gif')
if gender == "MALE" :
turtle.shape('man_right.gif')
else:
turtle.shape('woman_right.gif')
def left():
global direction
direction = LEFT
if gender == "MALE" :
turtle.shape('man_left.gif')
else:
turtle.shape('woman_left.gif')
move_player()
print('you pressed the left key')
def right():
global direction
direction = RIGHT
if gender == "MALE" :
turtle.shape('man_right.gif')
else:
turtle.shape('woman_right.gif')
move_player()
print('you pressed the right key')
turtle.onkeypress(left, LEFT_ARROW)
turtle.onkeypress(right, RIGHT_ARROW)
turtle.listen()
good_pos = (0,0) ##
food = turtle.clone()
food.shape('square')
food.fillcolor('green')
food.hideturtle()
def good_food():
min_x=-int(SIZE_X/2/player_size)+1
max_x=int(SIZE_X/2/player_size)-1
food_x = random.randint(min_x,max_x)*player_size
food.goto(food_x,turtle.pos()[1])
good_food_pos.append(food.pos())
stampnew = food.stamp()
#stamp_old = food_stamps[-1]
good_food_stamps.append(stampnew)
def create_box():
global y_pos,box,SIZE_X,player_size
top_y = 300
min_x=-int(SIZE_X/2/player_size)+1
max_x=int(SIZE_X/2/player_size)-1
x = random.randint(min_x,max_x)*player_size
turtles_list.append(turtle.clone())
turtles_list[-1].hideturtle()
turtles_list[-1].shape("square")
turtles_list[-1].fillcolor('red')
turtles_list[-1].goto(x,top_y)
turtles_list[-1].showturtle()
min_x=-int(SIZE_X/2/player_size)+1
max_x=int(SIZE_X/2/player_size)-1
x = random.randint(min_x,max_x)*player_size
turtles_list[-1].goto(x,top_y)
turtles_list[-1].showturtle()
chose_number()
#box.goto(x,y_pos)
#box.goto(x,260)
#box.addshape('box.gif')
#box.shape('box.gif')
#all_way = 510
count = 0
def fall():
global turtles_list,top_y,x_pos,turtle,count
for my_clone in turtles_list:
x1 = my_clone.pos()[0]
y1 = my_clone.pos()[1]
if y1 > turtle.pos()[1]:
y1 = y1 -25
#x1 = x_pos
my_clone.goto(x1,y1)
count += 1
print(count)
if count%100==0:
num_box = count//100
for i in range(num_box):
create_box()
#for num_box in :
#create_box()
#turtle.ontimer(create_box,TIME_STEP2)
turtle.ontimer(fall,TIME_STEP)
def jump():
global direction,x_pos,y_pos,my_pos,y1
if direction == UP:
turtle.goto(turtle.pos()[0],turtle.pos()[1] + 20)
for my_turtle in turtles_list:
if turtle.pos() == my_turtle.pos():
if turtle.pos() == my_turtle.pos():
turtle.goto(turtle.pos()[0],y1)
if not turtle.pos() == my_clone.pos():
turtle.goto(turtle.pos()[0],turtle.pos()[1] - 20)
def chose_number():
number_of_boxes=random.randint(1,3)
for i in range (number_of_boxes):
x5 = turtle.clone()
x5.shape("square")
boxes_list.append(x5)
for g in boxes_list:
g.goto(random.randint(-200,200),200)
bad_pos = (0,0)
bad_food = turtle.clone()
bad_food.shape('square')
bad_food.fillcolor('black')
bad_food.hideturtle()
def bad_food1():
global SIZE_X,player_size,y_pos,bad_food
min_x=-int(SIZE_X/2/player_size)+1
max_x=int(SIZE_X/2/player_size)-1
bad_food_x = random.randint(min_x,max_x)*player_size
bad_food.goto(bad_food_x,y_pos)
bad_food_pos.append(bad_food.pos())
bad_stamp_new = bad_food.stamp()
#stamp_old = food_stamps[-1]
bad_food_stamps.append(bad_stamp_new)
my_clone = turtle.clone()
my_clone.ht()
bad_food1()
good_food()
move_player()
create_box()
fall()
if turtle.pos() in box_pos:
print("YOU LOST !")
quit()
| [
"idane19@meet.mit.edu"
] | idane19@meet.mit.edu |
fc24e3042848ca03a74b20f74ca33014dc5208cd | 1ae38180c23e015fb0da9ac7cb00ea72191dc5fe | /cbvcrudapp_prj/wsgi.py | 498cc9c1ca85e79e482115ab66756f1777c34380 | [
"MIT"
] | permissive | prasanthn/django-cbvcrudapp | fa55bd6e1f862ae324985d39e70a99725dced295 | b95d909cd144943340fd49cca6e2ebfb9365b66e | refs/heads/master | 2021-01-16T18:43:56.407286 | 2014-02-17T04:20:03 | 2014-02-17T04:20:03 | 16,858,416 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for cbvcrudapp_prj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cbvcrudapp_prj.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"prasanth.n@outlook.com"
] | prasanth.n@outlook.com |
73c79b907bb4e2885350fb638e84aabe58a21fba | 15586ec0f2ccdb55c28541e6ca12422bcf6f5182 | /acosf.py | 73c1e0dc3225fef4302841778b93d41c7de6d46a | [] | no_license | abdul99ahad/pycalc | 6de64500c583702fa4fd91a6b8bc916b18287817 | 8068f78ce6921bcc0069249c682574cb5d3c200a | refs/heads/main | 2023-03-10T18:45:35.108218 | 2021-02-23T18:09:32 | 2021-02-23T18:09:32 | 341,642,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | from math import *
def arc_cos(display,deg,a,c):
try:
if a > 0:
display = a
a = 0
else:
pass
if deg==True:
display=float(display)
if display<=1 and display>=-1:
display=acos(display)
display=degrees(display)
else:
text_input.set('ERROR')
elif deg == False:
display = float(display)
if display <= 1 and display >= -1:
display = acos(display)
display = radians(display)
else:
text_input.set('ERROR')
except:
pass
c = 5
return display,deg,a,c
| [
"abdul9ahad@gmail.com"
] | abdul9ahad@gmail.com |
acc92bd6a6b69bf0413a2dbf15c334b74b5576f9 | 8d3a5dd6da1382ab890b657016470e836e0ef135 | /armstrong.py | 237f5126420e9d9d680972bdbb9106053df55827 | [] | no_license | JuhiKumariMahato/level1 | e8edc7ce9103db33df2bd3e234c210e260bca0a7 | 4d9bb78f1f13c63e52c4f6b1b4bb54eae028e48d | refs/heads/main | 2023-06-22T10:38:49.212249 | 2021-07-23T12:08:56 | 2021-07-23T12:08:56 | 388,788,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | n=int(input("enter the no"))
arm=0
x=n
while(n>0):
r=n%10
arm=arm+r**3
n=n//10
if(arm==x):
print("the number is an armstrong no")
else:
print("not an armstrong no")
| [
"noreply@github.com"
] | JuhiKumariMahato.noreply@github.com |
7c68e9555011e76ecb807ab9b5340bbc994a8aca | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_pkg_firewall_policy_vpndstnode.py | cc4c839796dde50386c6787ae3951a868ea8cab2 | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 7,919 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_pkg_firewall_policy_vpndstnode
short_description: no description
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
pkg:
description: the parameter (pkg) in requested url
type: str
required: true
policy:
description: the parameter (policy) in requested url
type: str
required: true
pkg_firewall_policy_vpndstnode:
description: the top level parameters set
required: false
type: dict
suboptions:
host:
type: str
description: no description
seq:
type: int
description: no description
subnet:
type: str
description: no description
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: no description
fmgr_pkg_firewall_policy_vpndstnode:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
pkg: <your own value>
policy: <your own value>
state: <value in [present, absent]>
pkg_firewall_policy_vpndstnode:
host: <value of string>
seq: <value of integer>
subnet: <value of string>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/pkg/{pkg}/firewall/policy/{policy}/vpn_dst_node'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/pkg/{pkg}/firewall/policy/{policy}/vpn_dst_node/{vpn_dst_node}'
]
url_params = ['adom', 'pkg', 'policy']
module_primary_key = 'seq'
module_arg_spec = {
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'pkg': {
'required': True,
'type': 'str'
},
'policy': {
'required': True,
'type': 'str'
},
'pkg_firewall_policy_vpndstnode': {
'required': False,
'type': 'dict',
'options': {
'host': {
'required': False,
'type': 'str'
},
'seq': {
'required': True,
'type': 'int'
},
'subnet': {
'required': False,
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'pkg_firewall_policy_vpndstnode'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd()
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
02ab6ce4b0a5e3cc8f4857f83855687843f7324c | 29f65ef4059ba04c20558f3be36c06fe3879a8e6 | /c1/func.py | a173c0eafdf8e495d94cfb2dc8c14bfc80c1e2be | [] | no_license | kobe24shou/pythonwebdev | d9c912bd9304802069bc41345b054b065a173272 | c7c6c5af69e7d8783e5c8b15f75e9ca61ed6a03f | refs/heads/master | 2020-03-17T23:01:41.787573 | 2018-06-06T14:11:21 | 2018-06-06T14:11:21 | 134,028,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | #!/usr/bin/env python
# -*-coding:utf-8-*-
def sum(x, y):
return x + y
def total(x, y, z):
sum_of_two = sum(x, y)
sum_of_three = sum(sum_of_two, z)
return sum_of_two, sum_of_three
# 定义了没有参数和返回值 的 main()函数
def main():
print("return of sum:", sum(4, 6))
x, y = total(1, 7, 10)
print("return of total:", x, ",", y)
if __name__ == '__main__':
main() | [
"aishou24@gmail.com"
] | aishou24@gmail.com |
a8ba8f5cd6428d840bbf8e2ae7702e2812e943d5 | 4a83e3197e0109709261ab476b97b70f06dd2535 | /minker/minker.py | 25e355574b17bec406efa51886d6807650318650 | [
"MIT"
] | permissive | RetepRennelk/minker | e5eea27674530d9070ba9483646ed5d1110d188d | 10eec582d53015e82ca628660451bcd6d3e2df6e | refs/heads/master | 2020-04-05T17:36:51.366472 | 2018-12-08T08:08:11 | 2018-12-08T08:08:11 | 157,068,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.QtGui import QIcon, QFont
from minker.tablewidget import TableWidget
from pathlib import Path
import minker.config as config
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
iconfile = Path(__file__).parent / 'list.png'
self.setWindowIcon(QIcon(str(iconfile)))
self.setWindowTitle()
tw = TableWidget(self)
self.setCentralWidget(tw)
def setWindowTitle(self):
super().setWindowTitle(config.windowTitle)
def setExtendedWindowTitle(self, fileName):
super().setWindowTitle(config.windowTitle + ": " + fileName)
def setModifiedWindowTitle(self, fileName):
if fileName != "":
s = config.windowTitle + ": " + fileName + " (*)"
super().setWindowTitle(s)
def main():
sCSS = '''
QTableWidget {
gridline-color: black;
}
QHeaderView::section {
background-color: black;
color: white
}
'''
app = QApplication(sys.argv)
app.setStyleSheet(sCSS)
app.setFont(QFont("Ubuntu Mono", config.fontSize))
m = MainWindow()
m.setGeometry(200, 200, 600, 809)
m.show()
app.exec_()
if __name__ == '__main__':
main()
| [
"peterklenner@gmx.de"
] | peterklenner@gmx.de |
47a7689341aba141ef07a367e7a1f049400f7b03 | 32ad4e49c2e704a8f8e170cbb0b95c9d72797671 | /StartScene.py | 9bef0015f5b0630099fa1fad91fc134b71ecc4d8 | [] | no_license | kruby3/nback-test | f1d1ec1453392414d9b80b909a3cd15df87e1e6b | 3c470447046e9f26c7a49af8ed273158b91b625d | refs/heads/master | 2021-08-11T11:23:01.357240 | 2017-11-13T14:48:38 | 2017-11-13T14:48:38 | 109,038,227 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,285 | py | from InstructionScene import InstructionScene
from Scene import Scene
from Button import Button
import Settings
import pygame
class StartScene(Scene):
def __init__(self):
super(StartScene, self).__init__()
self.buttons = []
def render(self, screen):
screen.fill(Settings.white)
title = Settings.statusFont.render("WAIT FOR LAB INSTRUCTOR to select n-back test", True, Settings.black)
screen.blit(title, (Settings.mid[0] - title.get_width() // 2, 10))
b1 = Button((Settings.mid[0] - 150, 90), (300, 80), pygame.Color("blue"), "1", 1)
b2 = Button((Settings.mid[0] - 150, 180), (300, 80), pygame.Color("blue"), "2", 2)
b3 = Button((Settings.mid[0] - 150, 270), (300, 80), pygame.Color("blue"), "3", 3)
b4 = Button((Settings.mid[0] - 150, 360), (300, 80), pygame.Color("blue"), "Control", 0)
self.buttons = [b1, b2, b3, b4]
for button in self.buttons:
button.draw(screen)
def update(self, seconds):
pass
def handle_events(self, events):
for event in events:
for button in self.buttons:
if button.isHit(event):
nBack = button.getInt()
self.manager.go_to(InstructionScene(nBack))
| [
"kruby3@gatech.edu"
] | kruby3@gatech.edu |
5f27a5ab246a0ff4f691918458972be437c390b2 | 900469a5aced73dbe0ca11cb96729202a420cfb1 | /plugins/inventory/terraform.py | c1f2a6fa9356826522ed2464184f5b87063d72e2 | [
"Apache-2.0"
] | permissive | wangqiang8511/customized_mantl | ccc749c9f47faf3bef98fb1f3bc0cf57aeda65e3 | 723009f004c933ad3ec58b8f8fa4041575bb4aea | refs/heads/master | 2021-01-10T01:37:49.358648 | 2015-12-16T05:18:37 | 2015-12-16T05:18:37 | 46,896,454 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,920 | py | #!/usr/bin/env python
#
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""\
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
directory and generates an inventory based on them.
"""
from __future__ import unicode_literals, print_function
import argparse
from collections import defaultdict
from functools import wraps
import json
import os
import re
VERSION = '0.3.0pre'
def tfstates(root=None):
root = root or os.getcwd()
for dirpath, _, filenames in os.walk(root):
for name in filenames:
if os.path.splitext(name)[-1] == '.tfstate':
yield os.path.join(dirpath, name)
def iterresources(filenames):
for filename in filenames:
with open(filename, 'r') as json_file:
state = json.load(json_file)
for module in state['modules']:
name = module['path'][-1]
for key, resource in module['resources'].items():
yield name, key, resource
## READ RESOURCES
PARSERS = {}
def _clean_dc(dcname):
# Consul DCs are strictly alphanumeric with underscores and hyphens -
# ensure that the consul_dc attribute meets these requirements.
return re.sub('[^\w_\-]', '-', dcname)
def iterhosts(resources):
'''yield host tuples of (name, attributes, groups)'''
for module_name, key, resource in resources:
resource_type, name = key.split('.', 1)
try:
parser = PARSERS[resource_type]
except KeyError:
continue
yield parser(resource, module_name)
def parses(prefix):
def inner(func):
PARSERS[prefix] = func
return func
return inner
def calculate_mi_vars(func):
"""calculate microservices-infrastructure vars"""
@wraps(func)
def inner(*args, **kwargs):
name, attrs, groups = func(*args, **kwargs)
# attrs
if attrs.get('role', '') == 'control':
attrs['consul_is_server'] = True
else:
attrs['consul_is_server'] = False
# groups
if attrs.get('publicly_routable', False):
groups.append('publicly_routable')
return name, attrs, groups
return inner
def _parse_prefix(source, prefix, sep='.'):
for compkey, value in source.items():
try:
curprefix, rest = compkey.split(sep, 1)
except ValueError:
continue
if curprefix != prefix or rest == '#':
continue
yield rest, value
def parse_attr_list(source, prefix, sep='.'):
attrs = defaultdict(dict)
for compkey, value in _parse_prefix(source, prefix, sep):
idx, key = compkey.split(sep, 1)
attrs[idx][key] = value
return attrs.values()
def parse_dict(source, prefix, sep='.'):
return dict(_parse_prefix(source, prefix, sep))
def parse_list(source, prefix, sep='.'):
return [value for _, value in _parse_prefix(source, prefix, sep)]
def parse_bool(string_form):
token = string_form.lower()[0]
if token == 't':
return True
elif token == 'f':
return False
else:
raise ValueError('could not convert %r to a bool' % string_form)
@parses('digitalocean_droplet')
@calculate_mi_vars
def digitalocean_host(resource, tfvars=None):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'id': raw_attrs['id'],
'image': raw_attrs['image'],
'ipv4_address': raw_attrs['ipv4_address'],
'locked': parse_bool(raw_attrs['locked']),
'metadata': json.loads(raw_attrs['user_data']),
'region': raw_attrs['region'],
'size': raw_attrs['size'],
'ssh_keys': parse_list(raw_attrs, 'ssh_keys'),
'status': raw_attrs['status'],
# ansible
'ansible_ssh_host': raw_attrs['ipv4_address'],
'ansible_ssh_port': 22,
'ansible_ssh_user': 'root', # it's always "root" on DO
# generic
'public_ipv4': raw_attrs['ipv4_address'],
'private_ipv4': raw_attrs['ipv4_address'],
'provider': 'digitalocean',
}
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
'role': attrs['metadata'].get('role', 'none')
})
# add groups based on attrs
groups.append('do_image=' + attrs['image'])
groups.append('do_locked=%s' % attrs['locked'])
groups.append('do_region=' + attrs['region'])
groups.append('do_size=' + attrs['size'])
groups.append('do_status=' + attrs['status'])
groups.extend('do_metadata_%s=%s' % item
for item in attrs['metadata'].items())
# groups specific to microservices-infrastructure
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('softlayer_virtualserver')
@calculate_mi_vars
def softlayer_host(resource, module_name):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'id': raw_attrs['id'],
'image': raw_attrs['image'],
'ipv4_address': raw_attrs['ipv4_address'],
'metadata': json.loads(raw_attrs['user_data']),
'region': raw_attrs['region'],
'ram': raw_attrs['ram'],
'cpu': raw_attrs['cpu'],
'ssh_keys': parse_list(raw_attrs, 'ssh_keys'),
'public_ipv4': raw_attrs['ipv4_address'],
'private_ipv4': raw_attrs['ipv4_address_private'],
'ansible_ssh_host': raw_attrs['ipv4_address'],
'ansible_ssh_port': 22,
'ansible_ssh_user': 'root',
'provider': 'softlayer',
}
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
'role': attrs['metadata'].get('role', 'none')
})
# groups specific to microservices-infrastructure
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('openstack_compute_instance_v2')
@calculate_mi_vars
def openstack_host(resource, module_name):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'access_ip_v4': raw_attrs['access_ip_v4'],
'access_ip_v6': raw_attrs['access_ip_v6'],
'flavor': parse_dict(raw_attrs, 'flavor',
sep='_'),
'id': raw_attrs['id'],
'image': parse_dict(raw_attrs, 'image',
sep='_'),
'key_pair': raw_attrs['key_pair'],
'metadata': parse_dict(raw_attrs, 'metadata'),
'network': parse_attr_list(raw_attrs, 'network'),
'region': raw_attrs.get('region', ''),
'security_groups': parse_list(raw_attrs, 'security_groups'),
# ansible
'ansible_ssh_port': 22,
'ansible_ssh_user': raw_attrs.get('metadata.ssh_user', 'centos'),
# workaround for an OpenStack bug where hosts have a different domain
# after they're restarted
'host_domain': 'novalocal',
'use_host_domain': True,
# generic
'public_ipv4': raw_attrs['access_ip_v4'],
'private_ipv4': raw_attrs['access_ip_v4'],
'provider': 'openstack',
}
if 'floating_ip' in raw_attrs:
attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4']
try:
attrs.update({
'ansible_ssh_host': raw_attrs['access_ip_v4'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
})
# add groups based on attrs
groups.append('os_image=' + attrs['image']['name'])
groups.append('os_flavor=' + attrs['flavor']['name'])
groups.extend('os_metadata_%s=%s' % item
for item in attrs['metadata'].items())
groups.append('os_region=' + attrs['region'])
# groups specific to microservices-infrastructure
groups.append('role=' + attrs['metadata'].get('role', 'none'))
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('aws_instance')
@calculate_mi_vars
def aws_host(resource, module_name):
name = resource['primary']['attributes']['tags.Name']
raw_attrs = resource['primary']['attributes']
groups = []
attrs = {
'ami': raw_attrs['ami'],
'availability_zone': raw_attrs['availability_zone'],
'ebs_block_device': parse_attr_list(raw_attrs, 'ebs_block_device'),
'ebs_optimized': parse_bool(raw_attrs['ebs_optimized']),
'ephemeral_block_device': parse_attr_list(raw_attrs,
'ephemeral_block_device'),
'id': raw_attrs['id'],
'key_name': raw_attrs['key_name'],
'private': parse_dict(raw_attrs, 'private',
sep='_'),
'public': parse_dict(raw_attrs, 'public',
sep='_'),
'root_block_device': parse_attr_list(raw_attrs, 'root_block_device'),
'security_groups': parse_list(raw_attrs, 'security_groups'),
'subnet': parse_dict(raw_attrs, 'subnet',
sep='_'),
'tags': parse_dict(raw_attrs, 'tags'),
'tenancy': raw_attrs['tenancy'],
'vpc_security_group_ids': parse_list(raw_attrs,
'vpc_security_group_ids'),
# ansible-specific
'ansible_ssh_port': 22,
'ansible_ssh_user': raw_attrs['tags.sshUser'],
'ansible_ssh_host': raw_attrs['public_ip'],
# generic
'public_ipv4': raw_attrs['public_ip'],
'private_ipv4': raw_attrs['private_ip'],
'provider': 'aws',
}
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': _clean_dc(attrs['tags'].get('dc', module_name)),
'role': attrs['tags'].get('role', 'none')
})
# groups specific to microservices-infrastructure
groups.extend(['aws_ami=' + attrs['ami'],
'aws_az=' + attrs['availability_zone'],
'aws_key_name=' + attrs['key_name'],
'aws_tenancy=' + attrs['tenancy']])
groups.extend('aws_tag_%s=%s' % item for item in attrs['tags'].items())
groups.extend('aws_vpc_security_group=' + group
for group in attrs['vpc_security_group_ids'])
groups.extend('aws_subnet_%s=%s' % subnet
for subnet in attrs['subnet'].items())
# groups specific to microservices-infrastructure
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('google_compute_instance')
@calculate_mi_vars
def gce_host(resource, module_name):
name = resource['primary']['id']
raw_attrs = resource['primary']['attributes']
groups = []
# network interfaces
interfaces = parse_attr_list(raw_attrs, 'network_interface')
for interface in interfaces:
interface['access_config'] = parse_attr_list(interface,
'access_config')
for key in interface.keys():
if '.' in key:
del interface[key]
# general attrs
attrs = {
'can_ip_forward': raw_attrs['can_ip_forward'] == 'true',
'disks': parse_attr_list(raw_attrs, 'disk'),
'machine_type': raw_attrs['machine_type'],
'metadata': parse_dict(raw_attrs, 'metadata'),
'network': parse_attr_list(raw_attrs, 'network'),
'network_interface': interfaces,
'self_link': raw_attrs['self_link'],
'service_account': parse_attr_list(raw_attrs, 'service_account'),
'tags': parse_list(raw_attrs, 'tags'),
'zone': raw_attrs['zone'],
# ansible
'ansible_ssh_port': 22,
'ansible_ssh_user': raw_attrs.get('metadata.ssh_user', 'centos'),
'provider': 'gce',
}
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
})
try:
attrs.update({
'ansible_ssh_host': interfaces[0]['access_config'][0]['nat_ip'],
'public_ipv4': interfaces[0]['access_config'][0]['nat_ip'],
'private_ipv4': interfaces[0]['address'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
# add groups based on attrs
groups.extend('gce_image=' + disk['image'] for disk in attrs['disks'])
groups.append('gce_machine_type=' + attrs['machine_type'])
groups.extend('gce_metadata_%s=%s' % (key, value)
for (key, value) in attrs['metadata'].items()
if key not in set(['sshKeys']))
groups.extend('gce_tag=' + tag for tag in attrs['tags'])
groups.append('gce_zone=' + attrs['zone'])
if attrs['can_ip_forward']:
groups.append('gce_ip_forward')
if attrs['publicly_routable']:
groups.append('gce_publicly_routable')
# groups specific to microservices-infrastructure
groups.append('role=' + attrs['metadata'].get('role', 'none'))
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('vsphere_virtual_machine')
@calculate_mi_vars
def vsphere_host(resource, module_name):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'id': raw_attrs['id'],
'ip_address': raw_attrs['ip_address'],
'metadata': parse_dict(raw_attrs, 'configuration_parameters'),
'ansible_ssh_port': 22,
'provider': 'vsphere',
}
try:
attrs.update({
'ansible_ssh_host': raw_attrs['ip_address'],
})
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', })
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('consul_dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
'ansible_ssh_user': attrs['metadata'].get('ssh_user', 'centos'),
})
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
## QUERY TYPES
def query_host(hosts, target):
for name, attrs, _ in hosts:
if name == target:
return attrs
return {}
def query_list(hosts):
groups = defaultdict(dict)
meta = {}
for name, attrs, hostgroups in hosts:
for group in set(hostgroups):
groups[group].setdefault('hosts', [])
groups[group]['hosts'].append(name)
meta[name] = attrs
groups['_meta'] = {'hostvars': meta}
return groups
def query_hostfile(hosts):
out = ['## begin hosts generated by terraform.py ##']
out.extend(
'{}\t{}'.format(attrs['ansible_ssh_host'].ljust(16), name)
for name, attrs, _ in hosts
)
out.append('## end hosts generated by terraform.py ##')
return '\n'.join(out)
def main():
parser = argparse.ArgumentParser(
__file__, __doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter, )
modes = parser.add_mutually_exclusive_group(required=True)
modes.add_argument('--list',
action='store_true',
help='list all variables')
modes.add_argument('--host', help='list variables for a single host')
modes.add_argument('--version',
action='store_true',
help='print version and exit')
modes.add_argument('--hostfile',
action='store_true',
help='print hosts as a /etc/hosts snippet')
parser.add_argument('--pretty',
action='store_true',
help='pretty-print output JSON')
parser.add_argument('--nometa',
action='store_true',
help='with --list, exclude hostvars')
default_root = os.environ.get('TERRAFORM_STATE_ROOT',
os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', )))
parser.add_argument('--root',
default=default_root,
help='custom root to search for `.tfstate`s in')
args = parser.parse_args()
if args.version:
print('%s %s' % (__file__, VERSION))
parser.exit()
hosts = iterhosts(iterresources(tfstates(args.root)))
if args.list:
output = query_list(hosts)
if args.nometa:
del output['_meta']
print(json.dumps(output, indent=4 if args.pretty else None))
elif args.host:
output = query_host(hosts, args.host)
print(json.dumps(output, indent=4 if args.pretty else None))
elif args.hostfile:
output = query_hostfile(hosts)
print(output)
parser.exit()
if __name__ == '__main__':
main()
| [
"wangqiang8511@gmail.com"
] | wangqiang8511@gmail.com |
043d846664e3ce02332e791f914cb489d23f59f0 | 9a55e24a449d302efd404fdec5fa11dbd80c6e0e | /ProcessImages/batchAverage.py | c56cb3dcaaddb7071d37c9e45a9267ce41d86ea1 | [] | no_license | kristofGovaerts/MRI | 38d115a03925d4a1ccd5676ca21cd4fa99b5681d | 03b15da313b25fcd1f3d4949415f3a1c6b93c56f | refs/heads/master | 2021-03-09T17:11:13.837201 | 2020-07-08T19:53:32 | 2020-07-08T19:53:32 | 246,360,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 12 16:16:08 2014
@author: u0091609
Batch averaging tool for fMRI data. For registration purposes.
"""
import nibabel as nib
import Tkinter, tkFileDialog
root = Tkinter.Tk()
import glob
import numpy as np
import os
while True:
rootdir = tkFileDialog.askdirectory(initialdir="/",title='Please select a directory')
if os.path.isdir(rootdir) is True: #Checks if entered dir exists
os.chdir(rootdir)
root.destroy()
break
else:
print "Pathname invalid. Try again."
continue
os.chdir(rootdir)
filelist = glob.glob("*SC.nii")
for i, f in enumerate(filelist):
print "File %s of %s: %s" %(i+1, len(filelist), f)
img = nib.load(f)
affine = img.get_affine()
data = img.get_data()
data= np.average(data, axis=-1)
nif = nib.Nifti1Image(data, affine)
nib.save(nif, f[:-4] + '_avg')
del img #clear memory, fMRI data is big
| [
"noreply@github.com"
] | kristofGovaerts.noreply@github.com |
63359d67242621399aecfe0cfc6ad62c46e1699c | f0f62ceb4aa53b143f1a579e32bb95265948d7ea | /verilog_for_fpga/spi_ctrl2.py | f74cf6ad37686ddc53811bf31170d3691ae717d2 | [] | no_license | gabesk/xmas | 82ddf76e0fd2d2da5bb9c8fb9b4f65f740ff2e11 | 1897fa8100f433028c8d1c2805af9bfae1331e5c | refs/heads/main | 2023-04-24T01:27:16.989688 | 2021-05-26T03:04:25 | 2021-05-26T03:04:25 | 323,360,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,876 | py | import argparse
import sys
import serial
import struct
import random
import time
parser = argparse.ArgumentParser()
parser.add_argument("com_port", help="The com port of the FPGA (ex: 'COM3:')")
parser.add_argument("prom_file", help="The BIN prom file to program")
args = parser.parse_args()
ser = serial.Serial(args.com_port, 115200, timeout=2)
def write_byte(num):
byte = struct.pack('B', int(num))
ser.write(byte)
#ser.flush()
def lower_cs():
ser.write(b'l')
def raise_cs():
ser.write(b'r')
def readwrite_spi_byte(to):
ser.write(b'b')
write_byte(to)
b = ser.read()
if len(b) != 1:
# Something unexpected happened. Try to raise chip select to place SPI
# flash in known state.
raise_cs()
raise_cs()
raise_cs()
raise_cs()
raise Exception("Timeout reading data from COM port.")
return b
def transact(cmd, bytes_to, num_bytes_back):
lower_cs()
readwrite_spi_byte(cmd)
for b in bytes_to:
readwrite_spi_byte(b)
bytes_back = []
for i in range(num_bytes_back):
bytes_back.append(readwrite_spi_byte(0))
raise_cs()
return b''.join(bytes_back)
def validate_id():
id = transact(0x9f, b'', 3)
if id != bytes.fromhex('c22012'):
raise Exception("ID from chip doesn't match. Expected 0xc22013 but got 0x%s." % (id.hex()))
pass
return id
def make_three_byte_address(addr):
# Note the > in struct.pack because this address must be big endian as it is
# sent to flash LSB first.
addr_bytes = struct.pack('>I', addr)
if (addr_bytes[0] != 0):
raise Exception("Invalid address: 0x%x." % (addr))
return addr_bytes[1:]
def read_page(addr):
data = transact(0x03, make_three_byte_address(addr), 256)
#print(repr(data))
return data
def read_status(write_enable_set, write_in_progress_set = False, raise_if_not = True):
status = transact(0x05, b'', 1)
status = struct.unpack('B', status)[0]
#print('Read status of 0x%x' % (status))
write_enable_status = status & 2 != 0
write_in_progress_status = status & 1 != 0
if write_enable_set != write_enable_status or write_in_progress_set != write_in_progress_status:
if raise_if_not:
raise Exception("Status byte not as expected: 0x%x. write_enable_set:%s write_in_progress_set:%s" % (status, write_enable_set, write_in_progress_set))
return False
return True
def assert_page_is_blank(addr):
d = read_page(addr)
blank = d == b'\xff'*256
if not blank:
print(repr(d))
raise Exception("Page 0x%x isn't blank." % (addr))
def enable_write():
transact(0x06, b'', 0)
read_status(write_enable_set = True)
def disable_write():
transact(0x04, b'', 0)
status = read_status(write_enable_set = False)
def wait_for_write_complete():
status_reads = 0
while read_status(write_enable_set = True, write_in_progress_set = True, raise_if_not = False) == True:
#print('wait_for_write_complete ...')
status_reads += 1
read_status(write_enable_set = False)
#print('Write complete after %d status reads.' % (status_reads))
def erase_sector(addr):
# TODO: Check if sector is blank first
enable_write()
transact(0x20, make_three_byte_address(addr), 0)
wait_for_write_complete()
def read_page_fast(addr):
toser = b'g' + struct.pack('B', 0x03) + make_three_byte_address(addr)
ser.write(toser)
b = ser.read(256)
#print(len(b))
assert len(b) == 256
return b
def program_page(addr, data):
#assert_page_is_blank(addr)
enable_write()
#print(len(make_three_byte_address(addr) + data))
#transact(0x02, make_three_byte_address(addr) + data, 0)
toser = b'f' + struct.pack('B', 0x02) + make_three_byte_address(addr) + data
#print('will send this much data:')
#print(len(toser))
#print('and it is')
#print(repr(toser))
ser.write(toser)
b = ser.read()
#print(repr(b))
assert b == b'a'
b = ser.read()
#print(repr(b))
assert b == b'd'
#wait_for_write_complete()
def test():
random_page_of_data = b''
for i in range(256):
random_page_of_data += struct.pack('B', random.randint(0,255))
print('Erasing sector')
erase_sector(0x07F000)
print('Done')
print('Am going to program this:')
print(repr(random_page_of_data))
print(len(random_page_of_data))
program_page(0x07F000, random_page_of_data)
print('Done. Now reading it back.')
data_back = read_page(0x07F000)
print('Got back:')
print(repr(data_back))
print(random_page_of_data == data_back)
def put_fpga_into_reset():
ser.write(b'q')
b = ser.read()
#print(repr(b))
assert b == b'q'
def put_fpga_out_of_reset():
ser.write(b'w')
b = ser.read()
assert b == b'w'
def wait_for_fpga_done():
status_reads = 0
b = b'\x00'
while b == b'\x00':
if status_reads > 40:
raise Exception('FPGA not done after %s status reads.' % (status_reads))
status_reads += 1
ser.write(b'e')
b = ser.read()
assert b == b'\x01'
print('FPGA done after %d status reads.' % (status_reads))
def release_spi_flash_from_power_down():
ser.write(b't')
b = ser.read()
assert b == b't'
def put_avr_into_passthrough_mode():
ser.write(b'z')
b = ser.read()
print(repr(b))
# TODO Not sure why this doesn't work; probably need a dealy
assert b == b'z'
def program_xilinx_bin_file(binfile):
validate_data = True
with open(binfile, 'rb') as f:
data = f.read()
pad_amt = 4096 - (len(data) % 4096)
print('Input file contains %d bytes of data' % (len(data)))
print('Padding with %d bytes' % (pad_amt))
data += b'\xff' * pad_amt
print('Will program %d pages' % (len(data)/256))
print('Will program %d sectors' % (len(data)/4096))
print('To address %x' % (len(data)))
# regain control from FPGA in case in passthrough mode
# (it probably isn't because opening the com port will reset the AVR)
# print('Sending break')
# ser.send_break()
# ser.write(b'?')
b = ser.read()
print(repr(b))
put_fpga_into_reset()
release_spi_flash_from_power_down()
validate_id()
for address in range(0, len(data), 256):
if (address % 4096) == 0:
print('Erasing sector %x, programming and verifying' % (address))
erase_sector(address)
#print('Done')
page_data = data[address:address+256]
blank_page_data = b'\xff' * 256
if page_data == blank_page_data:
print('Skipping page addr %x because blank' % (address))
continue
#print('Programming page addr %x' % (address))
program_page(address, page_data)
#print('Done')
if validate_data:
#print('Reading page back')
data_back = read_page_fast(address)
#print('Done')
if data_back != page_data:
print('ERROR: Data received back did not match.')
print(repr(page_data))
print(repr(data_back))
put_fpga_out_of_reset()
print('Done programming. Waiting for FPGA to be done.')
wait_for_fpga_done()
print('Programming complete.')
# put serial port into passthrough mode so if it wants to, the FPGA can use it
put_avr_into_passthrough_mode()
def test2(binfile):
with open(binfile, 'rb') as f:
data = f.read()
print('Start')
ser.write(data)
print('End')
#while True:
# print(repr(validate_id()))
program_xilinx_bin_file(args.prom_file)
#test2(args.prom_file)
#put_fpga_into_reset()
#release_spi_flash_from_power_down()
#validate_id()
#put_fpga_out_of_reset()
#wait_for_fpga_done()
| [
"gabe@knezek.net"
] | gabe@knezek.net |
fc9e2301091cfc527a28dad1853ce70ab6fd9eec | 458511c628df7deb07b6ca3e00c53e33d47d4151 | /19BMC033-tds-week2/my_pkg/scripts/custom_listener.py | 864e981f219d17428881f7188acdf59ab56e3687 | [] | no_license | logesh-77/19BMC033-tds-ros | b408e9ba09e41249cfcd88f4f53f3e3c93e30f86 | ef049dabf9ba237063c2bc79bd160f6676ef23ec | refs/heads/main | 2023-03-29T00:45:34.600869 | 2021-03-23T06:50:31 | 2021-03-23T06:50:31 | 347,921,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | #!/usr/bin/env python
import rospy
from my_pkg.msg import two_ints
c = None
d = None
def callback(data):
global c,d
c = data.a
d = data.b
publisher()
def mat_op():
e = c + d
f = e * 100
return e,f
def publisher():
pub = rospy.Publisher("tl",two_ints,queue_size = 1)
x,y = mat_op()
r = rospy.Rate(1)
msg = two_ints()
while not rospy.is_shutdown():
msg.a = x
msg.b = y
pub.publish(msg)
#rospy.loginfo(y)
r.sleep()
# if __name__ =="__main__":
# try:
# publisher()
# except rospy.ROSInterruptException:
# pass
def listener():
rospy.init_node("two_ints_listener")
rospy.Subscriber("two_ints",two_ints,callback)
rospy.spin()
if __name__ == '__main__':
listener()
| [
"noreply@github.com"
] | logesh-77.noreply@github.com |
7901ae9c4187d700b2c4a4039c7e884754694b6d | 403054145b19d92d750a996e1f1cd4e880926842 | /tbay.py | 3927093f59ed10cd402ea907dbf214a9681335d4 | [] | no_license | yueyehm/Tbay | df24f3c5fdd7fcb6d3bd7f14b2e39a1fc27aa7cd | 352af4a084d49cafe2a9b89eea68bd06a128c4aa | refs/heads/master | 2021-01-10T01:08:45.936351 | 2015-12-16T10:03:57 | 2015-12-16T10:03:57 | 48,089,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from datetime import datetime
from sqlalchemy import Column, Integer, String, DateTime, Float, ForeignKey
from sqlalchemy.orm import relationship
engine = create_engine('postgresql://ubuntu:thinkful@localhost:5432/tbay')
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
class Item(Base):
__tablename__ = "item"
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
description = Column(String)
start_time = Column(DateTime, default=datetime.utcnow)
owner_id = Column(Integer, ForeignKey('user.id'), nullable=False)
bids = relationship("Bid", uselist=True, backref="item")
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
password = Column(String, nullable=False)
items = relationship("Item", uselist=True, backref="owner")
bids = relationship("Bid", backref = "bidder")
class Bid(Base):
__tablename__ = "bid"
id = Column(Integer, primary_key=True)
price = Column(Float, nullable=False)
item_id = Column(Integer, ForeignKey('item.id'), nullable=False)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
Base.metadata.create_all(engine)
| [
"yueyehm@gmail.com"
] | yueyehm@gmail.com |
e8da68398b914a9006839f01febcbe85d2c320e8 | d95e1c84f04b77e4b4af6e9612d9f3a05b3fca8e | /algo/algorithms/tests/test_factorization.py | bd315c00cdb9877c71bcde497c9f0d45100436a2 | [
"BSD-3-Clause"
] | permissive | LeeHadad/refactor | 5c63b588b8eaa04ad8d2f7bf3695625eaf4f3a66 | 8479deb1a47ab72f33dbef855bdb4f43ac734fbd | refs/heads/master | 2020-05-18T17:08:22.634541 | 2019-05-02T08:30:15 | 2019-05-02T08:30:15 | 184,546,419 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | import random
import unittest
from algorithms.factorization.pollard_rho import pollard_rho
from algorithms.factorization.trial_division import trial_division
from algorithms.factorization.fermat import fermat
class TestFermat(unittest.TestCase):
def test_fermat(self):
x = random.randint(1, 100000000)
factors = fermat(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
class TestPollardRho(unittest.TestCase):
def test_pollard_rho(self):
x = random.randint(1, 100000000000)
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
def test_pollard_rho_x_is_zero(self):
x = 0
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
class TestTrialDivision(unittest.TestCase):
def test_trial_division(self):
x = random.randint(0, 10000000000)
factors = trial_division(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
| [
"noreply@github.com"
] | LeeHadad.noreply@github.com |
cc2f066e03ede1f54ac46b07dad2bb6621a03d10 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_287/ch149_2020_04_13_19_29_39_088791.py | 5364f78261dc18794532675b8b2199879ae98f9f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | salario_bruto = int(input('Seu salário bruto: '))
n_dependentes=int(input('Quantos dependentes: ' ))
def faixa(sal):
if sal <= 1045:
return 0.075
elif sal <= 2089.6:
return 0.09
elif sal <= 3134.4:
return 0.12
else:
return 0.14
if salario_bruto <= 6101.06:
b=salario_bruto-(faixa(salario_bruto)*sal)-(n_dependentes*189.59)
else:
b=salario_bruto-(671.12)-(n_dependentes*189.59)
def deducao(c):
if c<=1903.98:
return 0
elif c<=2826.65:
return 142.8
elif c<=3751.05:
return 354.8
elif c<=4664.68:
return 636.13
else:
return 869.36
def aliquota(d):
if d<=1903.98:
return 0
elif d<=2826.65:
return 0.075
elif d<=3751.05:
return 0.15
elif d<=4664.68:
return 0.225
else:
return 0.275
IRRF=(b*aliquota(b))-ded(b)
print("Sua contribuição para o INSS é de: ",IRRF) | [
"you@example.com"
] | you@example.com |
f7f7a4d68218a4647d68a6369b3aa0625c34c3c1 | 2cfc75eca340b51baef01b2d9bca01547a4a1249 | /ratingcode.py | 2d7a4081b35bafb7ca6822fc7be047ab0919288e | [] | no_license | E-Cell-IITMandi/IPL-Auction-Scripted-Sheets- | 4959962c8354cd241ddf5d737ea3454c1048a44f | b1643df7361d7132a3559546410e0bba8541d6a8 | refs/heads/master | 2022-12-18T15:50:04.273958 | 2020-09-26T18:47:10 | 2020-09-26T18:47:10 | 298,878,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,091 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 4 00:35:09 2019
@author: KArry
"""
import requests
import csv
from bs4 import BeautifulSoup
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
def normalization(data):
scaler = MinMaxScaler(feature_range=(0,500))
data=data.iloc[:,1:3]
scaler = scaler.fit(data)
return scaler.transform(data)
def standardization(data):
scaler = StandardScaler()
#X.drop(columns=[0])
data=data.iloc[:,1:3]
scaler = scaler.fit(data)
return scaler.transform(data)
page = requests.get('https://www.iplt20.com/stats/2019/player-points')
page1=requests.get('https://www.cricbuzz.com/cricket-stats/icc-rankings/men/batting')
page2=requests.get('https://www.cricbuzz.com/cricket-stats/icc-rankings/men/bowling')
page3=requests.get('https://www.cricbuzz.com/cricket-stats/icc-rankings/men/all-rounder')
B=['JOS BUTTLER',
'MS DHONI',
'kl RAHUL',
'RISHABH PANT',
'DINESH KARThIK',
'ISHAN KISHAN',
'SANJU SAMSON',
'WRIDDHIMAN SAHA',
'SHANE WATSON',
'DWAYNE BRAVO',
'ANDRE RUSSELL',
'SUNIL NARINE',
'KRUNAL PANDYA',
'HARDIK PANDYA',
'RAVINDRA JADEJA',
'CHRIS MORRIS',
'BEN STOKES',
'GLENN MAXWELL',
'kieron POLLARD',
'vijaySHANKAR',
'DAVID WARNER',
'CHRIS LYNN',
'ABDevilliers',
'chrisGAYLE',
'KANE WILLIAMSON',
'NITISH RANA',
'MAYANK AGARWAL',
'DAVID MILLER',
'viratKOHLI',
'SURYAKUMAR YADAV',
'SHIKHAR DHAWAN',
'SURESH RAINA',
'SHREYAS IYER',
'ajinkyaRAHANE',
'robinUTHAPPA',
'ROHIT SHARMA',
'SHUBAMan GILL',
'RAHUL TRIPATHi',
'ANDREW TYE',
'RASHID KHAN',
'KAGISO RABADA',
'JOFRA ARCHER',
'MUJEEB UR RAHMAN',
'IMRAN TAHIR',
'TIM SOUTHEE',
'SIDdARTH KAUL',
'UMESH YADAV',
'jaspritBUMRAH',
'KULDEEP YADAV',
'SHARDUL THAKUR',
'MAYANK MARKANDE',
'AMIT MISHRA',
'yuzvendraCHAHAL',
'SANDEEP SHARMA',
'JAYDEV UNADKAT',
"DEEPAK CHAHAR"]
M=B.copy()
for i in range(56):
B[i]=B[i].strip().replace(" ","").lower()
soup = BeautifulSoup(page.text, 'html.parser')
article=soup.find("table",{"class":"table table--scroll-on-tablet top-players"})
A=[['andrerussell',369]]
Dict={"name":[],"score":[]}
df=pd.DataFrame(Dict)
with open('cod.csv','w') as f:
for tr in article.find_all('tr')[2:]:
tds = tr.find_all('td')
#print((str(tds[1].text)))
df2={"name":[str(tds[1].text).strip()],"score":[str(tds[1].text).strip()]}
print(str(tds[1].text).strip().replace("\n"," ").replace(" ",""))
A.append([str(tds[1].text).strip().replace("\n"," ").replace(" ","").lower(),float(str(tds[2].text).strip())])
#df.append(pd.DataFrame(df2),ignore_index=True)
#print(str(tds))
# print((tds[1].text))
f.close()
soup = BeautifulSoup(page1.text, 'html.parser')
#article=soup.find("div",{"class":"cb-col cb-col-100 cb-padding-left0"})
#for tr in article.find_all('tr')[2:]:
article1=soup.find("div",{"class":"cb-col cb-col-100 cb-plyr-tbody"})
#nx=article1.find_all("div")
nx1=article1.find("div",{"ng-show":"'batsmen-t20s' == act_rank_format"})
names= nx1.find_all("div",{"class":"cb-col cb-col-67 cb-rank-plyr"})
rate=nx1.find_all("div",{"class":"cb-col cb-col-17 cb-rank-tbl pull-right"})
n=str(names[2].text)
F=[]
for i in range(94):
v=str(names[i].text).strip().split()[0:2]
x=v[0]+v[1]
F.append([x.replace(" ","").lower(),float(rate[i].text)])
soup = BeautifulSoup(page2.text, 'html.parser')
#article=soup.find("div",{"class":"cb-col cb-col-100 cb-padding-left0"})
#for tr in article.find_all('tr')[2:]:
article1=soup.find("div",{"class":"cb-col cb-col-100 cb-plyr-tbody"})
#nx=article1.find_all("div")
nx1=article1.find("div",{"ng-show":"'bowlers-t20s' == act_rank_format"})
names= nx1.find_all("div",{"class":"cb-col cb-col-67 cb-rank-plyr"})
rate=nx1.find_all("div",{"class":"cb-col cb-col-17 cb-rank-tbl pull-right"})
n=str(names[2].text)
F2=[]
for i in range(94):
v=str(names[i].text).strip().split()[0:2]
x=v[0]+v[1]
F2.append([x.replace(" ","").lower(),float(rate[i].text)])
soup = BeautifulSoup(page3.text, 'html.parser')
#article=soup.find("div",{"class":"cb-col cb-col-100 cb-padding-left0"})
#for tr in article.find_all('tr')[2:]:
article1=soup.find("div",{"class":"cb-col cb-col-100 cb-plyr-tbody"})
#nx=article1.find_all("div")
nx1=article1.find("div",{"ng-show":"'allrounders-t20s' == act_rank_format"})
names= nx1.find_all("div",{"class":"cb-col cb-col-67 cb-rank-plyr"})
rate=nx1.find_all("div",{"class":"cb-col cb-col-17 cb-rank-tbl pull-right"})
n=str(names[2].text)
F3=[]
for i in range(9):
v=str(names[i].text).strip().split()[0:2]
x=v[0]+v[1]
F3.append([x.replace(" ","").lower(),float(rate[i].text)])
"""
df2={"name":[str(tds[1].text).strip()],"score":[str(tds[1].text).strip()]}
print(str(tds[1].text).strip().replace("\n"," ").replace(" ",""))
W.append([str(tds[1].text).strip().replace("\n"," ").replace(" ","").lower(),float(str(tds[2].text).strip())])
""" #df.append(pd.DataFrame(df2),ignore_index=True)
#print(str(tds))
X=[]
P=[]
for i in range(56):
f=0
for j in range(162):
if(B[i]==A[j][0]):
X.append([A[j][0],A[j][1],0,0])
f=1
if (f==0):
X.append(['glennmaxwell',174,0,0])
print(P)
for j in range(56):
V=[]
for i in range(94):
if (X[j][0]==F[i][0]):
V.append(F[i][1])
for i in range(94):
if (X[j][0]==F2[i][0]):
V.append(F2[i][1])
for i in range(9):
if (X[j][0]==F3[i][0]):
V.append(F3[i][1])
if(len(V)!=0):
X[j][2]=max(V)
X=pd.DataFrame(X)
X=normalization(X)
Z=[]
for i in range(56):
if(X[i][1]==0):
r=float(X[i][0])
Z.append([M[i],X[i][0],X[i][1],r])
else:
r=(float(X[i][0])*3 + float(X[i][1])*2)/5
Z.append([M[i],X[i][0],X[i][1],r])
Z=pd.DataFrame(Z)
Z.to_csv("data.csv")
#article1=article.find_all('td')
#for i in article1:
# print(i.text)
| [
"noreply@github.com"
] | E-Cell-IITMandi.noreply@github.com |
d9a97a7cbbe72cc551778e9e39bb5738403241e1 | 30301266eb9624cb399cd262711d12c299165b4f | /Hackerrank/Implementation/22. Taum and Bday.py | ea68e419a7ce51a568822f431dc18e5f973ba333 | [] | no_license | manoj2509/Python-Practice | 8178cddd2a23066859fbf1365010ffcebdf5f803 | d8658a119bd62c1fbf45c2bcebd62ac9f7c2e071 | refs/heads/master | 2021-01-18T23:44:23.869711 | 2016-07-11T19:49:57 | 2016-07-11T19:49:57 | 49,173,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | __author__ = 'Mj'
t = int(input())
text = list()
text2 = list()
for i in range(t):
temp = input().split(' ')
text.append(temp)
temp2 = input().split(' ')
text2.append(temp2)
for i in range(t):
if ((int(text2[i][0])+int(text2[i][2])) < int(text2[i][1])):
cost = int(text[i][0])*int(text2[i][0]) + int(text[i][1])*(int(text2[i][0]) + int(text2[i][2]))
elif ((int(text2[i][1])+int(text2[i][2])) < int(text2[i][0])):
cost = int(text[i][1])*int(text2[i][1]) + int(text[i][0])*(int(text2[i][1]) + int(text2[i][2]))
else:
cost = int(text[i][1])*int(text2[i][1]) + int(text[i][0])*int(text2[i][0])
print(cost) | [
"manojparihar09@gmail.com"
] | manojparihar09@gmail.com |
81797b0fe613fc298400a50eedc510f3b5b5d963 | 4414fd709105c53f332e438ce885f1d9ca04f712 | /send_sock.py | a0b4958818ee644ca8edadce3c22eead5c8035a7 | [] | no_license | pratikbitmesra/DataStructures-from-Undergrad | a8eba8430dd2ed175ec550095b4e6f15df7020b4 | 608a9a7f518368fd7db89fd8e34ea931811c6d64 | refs/heads/master | 2021-01-19T14:04:33.035885 | 2017-07-06T01:45:40 | 2017-07-06T01:45:40 | 88,123,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | # Send and Receive Data using socket
# http://www.binarytides.com/python-socket-programming-tutorial/
import sys
import socket
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error, msg:
print 'Failed to create socket with Error Code: ' + str(msg[0]) + ', Error message ' + msg[1]
sys.exit()
print 'Socket Created'
host = 'www.google.com'
port = 80
try:
remote_ip = socket.gethostbyname( host )
except socket.gaierror:
print 'HostName couldnt be rsolved.'
sys.exit()
print 'IP address of ' + host + ' is ' + remote_ip
s.connect((remote_ip, port))
print 'Socket Connected to ' +host + ' on IP ' + remote_ip
# Send data to google
message = "GET /HTTP/1.1.\r\n\r\n"
try:
s.sendall(message)
except socket.error:
print 'Failed to Send data'
sys.exit()
print 'Successfully sent'
reply = s.recv(4096)
print reply
| [
"noreply@github.com"
] | pratikbitmesra.noreply@github.com |
12d74f7c8a6aa4c9b01ba885f2a963dda379adb4 | 77c471124fb4ac4a7fe0a19cf47b666ed0eccb79 | /relational/student_projects/2020_karkkainen/models/cognitive/prism/build/lib/spatialreasoner/utilities.py | e7f6bac4b1a164a793105e439dd0b19aae24cedb | [
"MIT"
] | permissive | CognitiveComputationLab/cogmods | f8286d7aa7917a87fd4df27d0c6db666aec88c92 | ac73fb60387aad37d3b3fb823f9b2c205c6cb458 | refs/heads/master | 2023-07-26T10:15:48.647877 | 2023-07-14T08:38:23 | 2023-07-14T08:38:23 | 178,379,369 | 1 | 12 | MIT | 2021-09-27T10:30:47 | 2019-03-29T09:55:02 | Python | UTF-8 | Python | false | false | 7,640 | py | #-------------------------------------------------------------------------------
# Name: Spatial Reasoning Utility Functions
# Purpose: Module of utility functions, used by the Spatial Reasoning
# class, as well as the spatial_array module.
#
# Author: Ashwath Sampath
# Based on: http://mentalmodels.princeton.edu/programs/space-6.lisp
# Created: 29-04-2018
# Copyright: (c) Ashwath Sampath 2018
#-------------------------------------------------------------------------------
""" Module which contains functions which are called by the
SpatialReasoning class in spatial_reasoning.py. Based on LISP
code developed by PN Johnson-Laird and R.Byrne as part of their
1991 book 'Deduction' and their 1989 paper 'Spatial Reasoning'. """
def convert(rel):
""" Switches pos nums to neg, and vice versa, leaving 0s unchanged"""
return tuple((-i if i > 0 else abs(i) for i in rel))
def extract(mod, models):
""" Returns models with mod removed. """
if mod == {}:
# No mod to delete, return models as it is.
return models
return [model for model in models if model != mod]
def outside(coords, dims):
""" Returns None if coords is within dimensions dims. Otherwise, it
returns a list of newdims and neworigin, eg. for coords = (1, 3, -2)
and dims = (2, 2, 2), it returns ((2, 3, 4), (0, 0, 2))"""
new_ds = out(coords, dims)
if new_ds == dims:
return None
# Add (1,1,1) to new_ds, this is only needed in calls to make_none_array
# where each dim of dims indicates the no. of models in that direction
new_dims = list_add(new_ds, (1, 1, 1))
new_origin = new_orig(coords)
return (new_dims, new_origin)
def out(coords, dims):
""" This function recurses through the two lists, adjusting output
if a coord is too big or too small (compared to corresponding dim).
Eg. For coords = (1,3,-2) and dims=(2,2,2), (2,3,4) is returned. """
new_coords = []
for coord, dim in zip(coords, dims):
if coord > dim:
new_coords.append(coord)
elif coord < 0:
new_coords.append(abs(coord) + dim)
else:
new_coords.append(dim)
return tuple(new_coords)
def new_orig(coords):
""" Sets coordiantes of origin to (0, 0, 0) or to absolute value of
negative coordinates. Returns a tuple."""
# Create a generator
new_coords = (0 if i >= 0 else abs(i) for i in coords)
return tuple(new_coords)
def find_index_neg_num(lis):
""" This func. returns position (0 to n) of negative number in
lis, and None if there is no negative no. """
return next((index for index, val in enumerate(lis) if val < 0), None)
def find_item(item, mods):
""" Searches for item in each model in the mods list, returns a list
containing coords of item (if found) and the model in which it is found.
If item is not found in any model, it returns None. """
# Look for item in each individual model
if mods == []:
return None
for mod in mods:
# Look for item in each individual mod
coords = finds(item, mod)
if coords is not None:
# Model found in mod, return coords and mod
return (coords, mod)
# item not found in any of the mods
return None
def finds(item, mod):
""" Returns tuple of coordinates of item in mod, if found.
Otherwise, it returns None """
coords = [key for key, val in mod.items() if contains(item, val)]
# If coords!=[], it'll be for eg. [(0,0,0)]. Extract the tuple, return
return coords[0] if coords != [] else None
def contains(item, cell_value):
""" This func. returns True if item = cell-value or item is contained
in cell value (when there is more than 1 token in a cell). """
return False if cell_value is None \
else False if cell_value.find(item) == -1 \
else True
def list_add(lis1, lis2):
""" Adds numbers in lis1 with the corresponding numbers in lis2,
and returns the the tuple form of the resulting list"""
# Create a generator which yields the item-wise sum of the 2 lists
result = (lis1[i] + lis2[i] for i in range(len(lis1)))
return tuple(result)
def update_coords(coords, reln):
""" Returns coordinates of item after updating it to a new position
that satisfies reln. """
return list_add(coords, reln)
def subjfn(prop):
""" Function to retrieve the subject of prop (obtained from the
intensional representation produced by SpatialParser).
E.g. prop[1] = ['[]'], prop[1][0] = '[] """
return prop[1][0]
def relfn(prop):
""" Function to retrieve the relation of prop (obtained from the
intensional representation produced by SpatialParser). Spatial
parser returns a list, relfn converts the list into a tuple
(tuple is hashable: needed for the spatial array dict)"""
return tuple(prop[0])
def objfn(prop):
""" Function to retrieve the object of prop (obtained from the
intensional representation produced by SpatialParser).
E.g prop[2] = ['[]'], prop[2][0] = '[] """
return prop[2][0]
def list_ints(number):
""" Makes a list from number down to 0. If number = 2, returned: [2,1,0]"""
return list(range(number, -1, -1))
def rem_num(num, lis):
""" Removes all instances of a number 'num', from list lis. """
return [ele for ele in lis if ele != num]
def print_premises(premises):
""" Prints the premises given in the list 'premises'. """
print("Premises:")
for premise in premises:
print(premise[0])
def ortho(subj_coord, obj_coord, subj_dim, obj_dim):
""" It returns a tuple of 3 values: new dim for combined array,
component of subj_origin in it, component of obj_origin in it. """
if subj_coord > obj_coord:
return (subj_coord + (obj_dim - obj_coord), 0,
subj_coord - obj_coord)
if subj_coord < obj_coord:
return (obj_coord + (subj_dim - subj_coord),
obj_coord - subj_coord, 0)
if subj_dim > obj_dim:
# There is place for obj_mod's tokens in subj_mod,
# no increase of dims needed: use subj_mod's dims.
return (subj_dim, 0, 0)
# There is place for subj_mod's tokens in obj_mod,
# no increase of dims needed: use obj_mod's dims.
return (obj_dim, 0, 0)
def get_coordinates_from_token(token, mod):
""" A function which takes a mod and a token, finds the token in the mod,
and then returns the coordinate (tuple) at which the token is found. If it
is not found, it returns None. """
for coordinates, token_in_mod in mod.items():
# No possibility of duplicate tokens. If found, return the token.
if token_in_mod == token:
return coordinates
return None
def same_dir_movement(rel1, rel2):
""" Takes 2 relations, and checks if they move in the same axis (i.e.
right and left, right and right, left and left, front and front, back and
back, back and front and so on.
If yes, it returns True. If no, it returns False. It assumes that movement
can be in only one direction, in line with the rest of the code."""
for dimension in range(3):
if rel1[dimension] != 0 and rel2[dimension] != 0:
return True
return False
def not_none_dim(rel):
""" Returns the index of dimension which is not None (which is 1/-1) in rel
Index is 0 based: 0 is right/left, 1 is front/back, 2 is above/below. """
for index, element in enumerate(rel):
if element == 1:
return index
continue
# Should never reach here: None only on error
return None
| [
"sakukark@gmail.com"
] | sakukark@gmail.com |
643d38c0512e082e8c9a7018af157e92220e51da | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/fabric/rsnodetechsupp.py | 700fdc371962997c14a296087490fee59112e394 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 8,389 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsNodeTechSupP(Mo):
"""
A source relation to a utility that creates a summary report containing configuration information, logs, and diagnostic data that will help TAC in troubleshooting and resolving a technical issue.
"""
meta = NamedSourceRelationMeta("cobra.model.fabric.RsNodeTechSupP", "cobra.model.dbgexp.TechSupP")
meta.targetNameProps["name"] = "tnDbgexpTechSupPName"
meta.cardinality = SourceRelationMeta.N_TO_ONE
meta.moClassName = "fabricRsNodeTechSupP"
meta.rnFormat = "rsnodeTechSupP"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Tech Support Policy"
meta.writeAccessMask = 0x80000000001
meta.readAccessMask = 0x80000000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.fabric.SpNodePGrp")
meta.parentClasses.add("cobra.model.fabric.LeNodePGrp")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.superClasses.add("cobra.model.pol.NToRef")
meta.rnPrefixes = [
('rsnodeTechSupP', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 14000, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 11574, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4112
prop.defaultValueStr = "dbgexpTechSupP"
prop._addConstant("dbgexpTechSupP", None, 4112)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tContextDn", "tContextDn", 4990, PropCategory.REGULAR)
prop.label = "Target-context"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tContextDn", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tRn", "tRn", 4989, PropCategory.REGULAR)
prop.label = "Target-rn"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("tRn", prop)
prop = PropMeta("str", "tType", "tType", 4988, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "name"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "tnDbgexpTechSupPName", "tnDbgexpTechSupPName", 11573, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("tnDbgexpTechSupPName", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"collinsctk@qytang.com"
] | collinsctk@qytang.com |
84e2c0ca3d2e11c40051b1c6165f7cf0c5039665 | 188436fcc2311d42ea4374b21564076c8681699a | /tests/test_grading.py | bc2f2952d7e5d09381109db377607f3b6f815f94 | [
"MIT"
] | permissive | wisnercelucus/relate | e3f5fb9e751fbc8b1944399e9be4066188496aec | 833f269c258a70c28e4ba0a4c8cf4da9790e5eac | refs/heads/master | 2022-07-04T17:15:36.981566 | 2020-05-07T20:26:05 | 2020-05-07T20:26:05 | 262,824,648 | 1 | 0 | null | 2020-05-10T16:06:23 | 2020-05-10T16:06:22 | null | UTF-8 | Python | false | false | 27,009 | py | from __future__ import division
__copyright__ = "Copyright (C) 2018 Dong Zhuang"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from random import shuffle
from django.utils.timezone import now, timedelta
from django.core import mail
from django.test import TestCase, override_settings
from unittest import skipUnless
from course.models import ParticipationPermission
from course.constants import participation_permission as pperm
from tests.base_test_mixins import (
SingleCourseQuizPageTestMixin, MockAddMessageMixing)
from tests import factories
from tests.utils import mock, may_run_expensive_tests, SKIP_EXPENSIVE_TESTS_REASON
class SingleCourseQuizPageGradeInterfaceTestMixin(SingleCourseQuizPageTestMixin):
page_id = "anyup"
@classmethod
def setUpTestData(cls): # noqa
super(SingleCourseQuizPageGradeInterfaceTestMixin, cls).setUpTestData()
cls.start_flow(cls.flow_id)
cls.this_flow_session_id = cls.default_flow_params["flow_session_id"]
cls.submit_page_answer_by_page_id_and_test(cls.page_id)
@skipUnless(may_run_expensive_tests(), SKIP_EXPENSIVE_TESTS_REASON)
class SingleCourseQuizPageGradeInterfaceTest(
SingleCourseQuizPageGradeInterfaceTestMixin, MockAddMessageMixing, TestCase):
@classmethod
def setUpTestData(cls): # noqa
super(SingleCourseQuizPageGradeInterfaceTest, cls).setUpTestData()
with cls.temporarily_switch_to_user(cls.student_participation.user):
# a failure submission
cls.submit_page_answer_by_page_id_and_test(
cls.page_id, answer_data={"uploaded_file": []})
# a success full
cls.submit_page_answer_by_page_id_and_test(
cls.page_id,
do_grading=False)
cls.end_flow()
def test_post_grades(self):
self.submit_page_human_grading_by_page_id_and_test(self.page_id)
grade_data = {
"grade_points": "4",
"released": []
}
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data=grade_data, expected_grades=None)
grade_data = {
"grade_points": "4",
"released": "on"
}
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data=grade_data, expected_grades=4)
def test_post_grades_huge_points_failure(self):
grade_data = {
"grade_percent": "2000",
"released": 'on'
}
resp = self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data=grade_data, expected_grades=None)
# value exceeded allowed
self.assertResponseContextContains(
resp, "grading_form_html",
"Ensure this value is less than or equal to")
def test_post_grades_forbidden(self):
# with self.student_participation.user logged in
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, expected_grades=None,
force_login_instructor=False, expected_post_grading_status_code=403)
def test_feedback_and_notify(self):
grade_data_extra_kwargs = {
"feedback_text": 'test feedback'
}
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data_extra_kwargs=grade_data_extra_kwargs)
self.assertEqual(len(mail.outbox), 0)
grade_data_extra_kwargs["notify"] = "on"
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data_extra_kwargs=grade_data_extra_kwargs)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].reply_to, [])
# Instructor also get the feedback email
self.assertIn(self.course.notify_email, mail.outbox[0].recipients())
# make sure the name (appellation) is in the email body, not the masked one
self.assertIn(
self.student_participation.user.get_email_appellation(),
mail.outbox[0].body)
self.assertNotIn(
self.student_participation.user.get_masked_profile(),
mail.outbox[0].body)
self.assertNotIn(
"Dear user",
mail.outbox[0].body)
def test_feedback_and_notify_instructor_pperm_masked_profile(self):
# add view_participant_masked_profile pperm to instructor
pp = ParticipationPermission(
participation=self.instructor_participation,
permission=pperm.view_participant_masked_profile
)
pp.save()
self.instructor_participation.individual_permissions.set([pp])
grade_data_extra_kwargs = {
"feedback_text": 'test feedback',
"notify": "on"}
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data_extra_kwargs=grade_data_extra_kwargs)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].reply_to, [])
# Instructor also get the feedback email
self.assertIn(self.course.notify_email, mail.outbox[0].recipients())
# make sure the name (appellation) not in the email body, not the masked one
self.assertNotIn(
self.student_participation.user.get_email_appellation(),
mail.outbox[0].body)
self.assertNotIn(
self.student_participation.user.get_masked_profile(),
mail.outbox[0].body)
self.assertIn("Dear user", mail.outbox[0].body)
@override_settings(
EMAIL_CONNECTIONS={
"grader_feedback": {
'backend': 'tests.resource.MyFakeEmailBackend',
},
},
GRADER_FEEDBACK_EMAIL_FROM="my_feedback_from_email@example.com"
)
def test_feedback_notify_with_grader_feedback_connection(self):
grade_data_extra_kwargs = {
"feedback_text": 'test feedback',
"notify": "on"
}
from django.core.mail import get_connection
connection = get_connection(
backend='django.core.mail.backends.locmem.EmailBackend')
with mock.patch("django.core.mail.get_connection") as mock_get_connection:
mock_get_connection.return_value = connection
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data_extra_kwargs=grade_data_extra_kwargs)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].from_email,
"my_feedback_from_email@example.com")
self.assertEqual(
mock_get_connection.call_args[1]["backend"],
"tests.resource.MyFakeEmailBackend"
)
# make sure the name (appellation) is in the email body, not the masked one
self.assertIn(
self.student_participation.user.get_email_appellation(),
mail.outbox[0].body)
self.assertNotIn(
self.student_participation.user.get_masked_profile(),
mail.outbox[0].body)
self.assertNotIn(
"Dear user",
mail.outbox[0].body)
def test_feedback_email_may_reply(self):
grade_data_extra_kwargs = {
"feedback_text": 'test feedback',
"may_reply": "on",
"notify": "on"
}
with self.temporarily_switch_to_user(self.ta_participation.user):
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data_extra_kwargs=grade_data_extra_kwargs,
force_login_instructor=False)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].reply_to,
[self.ta_participation.user.email])
# Instructor also get the feedback email
self.assertIn(self.course.notify_email, mail.outbox[0].recipients())
# make sure the name (appellation) is in the email body, not the masked one
self.assertIn(
self.student_participation.user.get_email_appellation(),
mail.outbox[0].body)
self.assertNotIn(
self.student_participation.user.get_masked_profile(),
mail.outbox[0].body)
self.assertNotIn(
"Dear user",
mail.outbox[0].body)
def test_notes_and_notify(self):
grade_data_extra_kwargs = {
"notes": 'test notes'
}
with self.temporarily_switch_to_user(self.ta_participation.user):
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data_extra_kwargs=grade_data_extra_kwargs,
force_login_instructor=False)
self.assertEqual(len(mail.outbox), 0)
grade_data_extra_kwargs["notify_instructor"] = "on"
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data_extra_kwargs=grade_data_extra_kwargs,
force_login_instructor=False)
self.assertEqual(len(mail.outbox), 1)
self.assertIn(self.course.notify_email, mail.outbox[0].recipients())
# make sure the name (appellation) is in the email body, not the masked one
self.assertIn(
self.student_participation.user.get_email_appellation(),
mail.outbox[0].body)
self.assertNotIn(
self.student_participation.user.get_masked_profile(),
mail.outbox[0].body)
def test_notes_and_notify_ta_pperm_masked_profile(self):
# add view_participant_masked_profile pperm to ta
pp = ParticipationPermission(
participation=self.ta_participation,
permission=pperm.view_participant_masked_profile
)
pp.save()
self.ta_participation.individual_permissions.set([pp])
grade_data_extra_kwargs = {
"notes": 'test notes',
"notify_instructor": "on"}
with self.temporarily_switch_to_user(self.ta_participation.user):
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data_extra_kwargs=grade_data_extra_kwargs,
force_login_instructor=False)
self.assertEqual(len(mail.outbox), 1)
self.assertIn(self.course.notify_email, mail.outbox[0].recipients())
# make sure the name (appellation) not in the email body,
# the masked one is used instead
self.assertNotIn(
self.student_participation.user.get_email_appellation(),
mail.outbox[0].body)
self.assertIn(
self.student_participation.user.get_masked_profile(),
mail.outbox[0].body)
@override_settings(
EMAIL_CONNECTIONS={
"grader_feedback": {
'backend': 'tests.resource.MyFakeEmailBackend',
},
},
GRADER_FEEDBACK_EMAIL_FROM="my_feedback_from_email@example.com"
)
def test_notes_and_notify_with_grader_feedback_connection(self):
grade_data_extra_kwargs = {
"notes": 'test notes',
"notify_instructor": "on"
}
from django.core.mail import get_connection
connection = get_connection(
backend='django.core.mail.backends.locmem.EmailBackend')
with mock.patch("django.core.mail.get_connection") as mock_get_connection:
mock_get_connection.return_value = connection
with self.temporarily_switch_to_user(self.ta_participation.user):
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data_extra_kwargs=grade_data_extra_kwargs,
force_login_instructor=False)
self.assertEqual(len(mail.outbox), 1)
self.assertIn(self.course.notify_email, mail.outbox[0].recipients())
self.assertEqual(mail.outbox[0].from_email,
"my_feedback_from_email@example.com")
self.assertEqual(
mock_get_connection.call_args[1]["backend"],
"tests.resource.MyFakeEmailBackend"
)
# make sure the name (appellation) is in the email body, not the masked one
self.assertIn(
self.student_participation.user.get_email_appellation(),
mail.outbox[0].body)
self.assertNotIn(
self.student_participation.user.get_masked_profile(),
mail.outbox[0].body)
# {{{ test grading.get_prev_grades_dropdown_content
def test_grade_history_failure_no_perm(self):
resp = self.c.get(
self.get_page_grade_history_url_by_ordinal(
page_ordinal=1), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 403)
def test_grade_history_failure_not_ajax(self):
resp = self.c.get(
self.get_page_grade_history_url_by_ordinal(
page_ordinal=1))
self.assertEqual(resp.status_code, 403)
def test_submit_history_failure_not_get(self):
resp = self.c.post(
self.get_page_grade_history_url_by_ordinal(
page_ordinal=1), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 403)
def test_grade_history_failure_not_authenticated(self):
with self.temporarily_switch_to_user(None):
resp = self.c.get(
self.get_page_grade_history_url_by_ordinal(
page_ordinal=1), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 403)
def test_grades_history_after_graded(self):
self.submit_page_human_grading_by_page_id_and_test(self.page_id)
ordinal = self.get_page_ordinal_via_page_id(self.page_id)
self.assertGradeHistoryItemsCount(page_ordinal=ordinal, expected_count=3)
grade_data = {
"grade_points": "4",
"released": []
}
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data=grade_data, expected_grades=None)
self.assertGradeHistoryItemsCount(page_ordinal=ordinal, expected_count=4)
grade_data = {
"grade_points": "4",
"released": "on"
}
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data=grade_data, expected_grades=4)
self.assertGradeHistoryItemsCount(page_ordinal=ordinal,
expected_count=5)
# }}}
# {{{ test grade_flow_page (for cases not covered by other tests)
# {{{ prev_grade
def test_viewing_prev_grade_id_not_exist(self):
with self.temporarily_switch_to_user(self.instructor_participation.user):
resp = self.c.get(
self.get_page_grading_url_by_page_id(self.page_id)
+ "?grade_id=1000")
self.assertEqual(resp.status_code, 404)
def test_viewing_prev_grade_id_not_int(self):
with self.temporarily_switch_to_user(self.instructor_participation.user):
resp = self.c.get(
self.get_page_grading_url_by_page_id(self.page_id)
+ "?grade_id=my_id")
self.assertEqual(resp.status_code, 400)
def test_viewing_prev_grade(self):
grade_data = {
"grade_points": "4",
"released": "on"
}
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data=grade_data, expected_grades=4)
with self.temporarily_switch_to_user(
self.instructor_participation.user), mock.patch(
"course.grading.get_feedback_for_grade") as mock_get_feedback:
resp = self.c.get(
self.get_page_grading_url_by_page_id(self.page_id)
+ "?grade_id=1")
self.assertEqual(resp.status_code, 200)
self.assertEqual(mock_get_feedback.call_count, 1)
def test_viewing_prev_grade_may_not_post_grade(self):
grade_data = {
"grade_points": "4",
"released": "on"
}
self.submit_page_human_grading_by_page_id_and_test(
self.page_id, grade_data=grade_data, expected_grades=4)
ordinal = self.get_page_ordinal_via_page_id(self.page_id)
self.assertGradeHistoryItemsCount(page_ordinal=ordinal, expected_count=3)
with self.temporarily_switch_to_user(self.instructor_participation.user):
resp = self.c.post(
self.get_page_grading_url_by_page_id(self.page_id) + "?grade_id=1",
data=grade_data
)
self.assertEqual(resp.status_code, 200)
self.assertGradeHistoryItemsCount(page_ordinal=ordinal, expected_count=3)
# }}}
def test_flow_session_course_not_matching(self):
another_course = factories.CourseFactory(identifier="another-course")
some_user = factories.UserFactory()
his_participation = factories.ParticipationFactory(
course=another_course, user=some_user)
his_flow_session = factories.FlowSessionFactory(
course=another_course, participation=his_participation)
url = self.get_page_grading_url_by_ordinal(
page_ordinal=1, course_identifier=self.course.identifier,
flow_session_id=his_flow_session.pk)
with self.temporarily_switch_to_user(self.instructor_participation.user):
resp = self.c.get(url)
self.assertEqual(resp.status_code, 400)
def test_flow_session_has_no_participation(self):
null_participation_flow_session = factories.FlowSessionFactory(
course=self.course, participation=None, user=None)
url = self.get_page_grading_url_by_ordinal(
page_ordinal=1,
flow_session_id=null_participation_flow_session.pk,
)
with self.temporarily_switch_to_user(self.instructor_participation.user):
resp = self.c.get(url)
self.assertEqual(resp.status_code, 400)
def test_page_desc_none(self):
with mock.patch(
"course.content.get_flow_page_desc") as mock_get_flow_page_desc:
from django.core.exceptions import ObjectDoesNotExist
mock_get_flow_page_desc.side_effect = ObjectDoesNotExist
with self.temporarily_switch_to_user(self.instructor_participation.user):
resp = self.c.get(
self.get_page_grading_url_by_page_id(self.page_id))
self.assertEqual(resp.status_code, 404)
def test_invalid_page_data(self):
with mock.patch(
"course.page.upload.FileUploadQuestion.make_form"
) as mock_make_form:
from course.grading import InvalidPageData
error_msg = "your file is broken."
mock_make_form.side_effect = InvalidPageData(error_msg)
expected_error_msg = (
"The page data stored in the database was found "
"to be invalid for the page as given in the "
"course content. Likely the course content was "
"changed in an incompatible way (say, by adding "
"an option to a choice question) without changing "
"the question ID. The precise error encountered "
"was the following: %s" % error_msg)
with self.temporarily_switch_to_user(
self.instructor_participation.user):
resp = self.c.get(
self.get_page_grading_url_by_page_id(self.page_id))
self.assertEqual(resp.status_code, 200)
self.assertAddMessageCalledWith(expected_error_msg)
def test_no_perm_to_post_grade(self):
some_user = factories.UserFactory()
his_participation = factories.ParticipationFactory(
user=some_user, course=self.course)
from course.models import ParticipationPermission
pp = ParticipationPermission(
participation=his_participation,
permission=pperm.view_gradebook
)
pp.save()
his_participation.individual_permissions.set([pp])
with self.temporarily_switch_to_user(some_user):
resp = self.c.get(
self.get_page_grading_url_by_page_id(self.page_id))
self.assertEqual(resp.status_code, 200)
grade_data = {
"grade_points": "4",
"released": "on"
}
resp = self.post_grade_by_page_id(
self.page_id, grade_data, force_login_instructor=False)
self.assertEqual(resp.status_code, 403)
def test_flow_session_grading_opportunity_is_none(self):
grade_data = {
"grade_points": "4",
"released": "on"
}
def get_session_grading_rule_side_effect(session, flow_desc, now_datetime):
from course.utils import (
get_session_grading_rule, FlowSessionGradingRule)
true_g_rule = get_session_grading_rule(
session, flow_desc, now_datetime)
fake_grading_rule = FlowSessionGradingRule(
# make grade_identifier None
grade_identifier=None,
grade_aggregation_strategy=true_g_rule.grade_aggregation_strategy,
due=true_g_rule.due,
generates_grade=true_g_rule.generates_grade,
description=true_g_rule.description,
credit_percent=true_g_rule.credit_percent,
use_last_activity_as_completion_time=(
true_g_rule.use_last_activity_as_completion_time),
bonus_points=true_g_rule.bonus_points,
max_points=true_g_rule.max_points,
max_points_enforced_cap=true_g_rule.max_points_enforced_cap)
return fake_grading_rule
with mock.patch(
"course.grading.get_session_grading_rule"
) as mock_get_grading_rule:
mock_get_grading_rule.side_effect = get_session_grading_rule_side_effect
with self.temporarily_switch_to_user(
self.instructor_participation.user):
# get success
resp = self.c.get(
self.get_page_grading_url_by_page_id(self.page_id))
self.assertEqual(resp.status_code, 200)
self.assertResponseContextIsNone(
resp, "grading_opportunity")
# post success
resp = self.post_grade_by_page_id(
self.page_id, grade_data)
self.assertEqual(resp.status_code, 200)
self.assertResponseContextIsNone(
resp, "grading_opportunity")
class GraderSetUpMixin(object):
@classmethod
def create_flow_page_visit_grade(cls, course=None,
n_participations_per_course=1,
n_sessions_per_participation=1,
n_non_null_answer_visits_per_session=3):
if course is None:
course = factories.CourseFactory(identifier=course.identifier)
participations = factories.ParticipationFactory.create_batch(
size=n_participations_per_course, course=course)
grader1 = factories.UserFactory()
grader2 = factories.UserFactory()
graders = [grader1, grader2]
visit_time = now() - timedelta(days=1)
for participation in participations:
flow_sessions = factories.FlowSessionFactory.create_batch(
size=n_sessions_per_participation, participation=participation)
for flow_session in flow_sessions:
non_null_anaswer_fpds = factories.FlowPageDataFactory.create_batch(
size=n_non_null_answer_visits_per_session,
flow_session=flow_session
)
for fpd in non_null_anaswer_fpds:
visit_time = visit_time + timedelta(seconds=10)
factories.FlowPageVisitFactory.create(
visit_time=visit_time,
page_data=fpd,
answer={"answer": "abcd"})
shuffle(graders)
grade_time = visit_time + timedelta(seconds=10)
factories.FlowPageVisitGradeFactory.create(
grader=graders[0],
grade_time=grade_time)
n_non_null_answer_fpv = (
n_participations_per_course
* n_sessions_per_participation
* n_non_null_answer_visits_per_session)
#print(n_non_null_answer_fpv)
return n_non_null_answer_fpv
class ShowGraderStatisticsTest(
SingleCourseQuizPageTestMixin, GraderSetUpMixin, TestCase):
# test grading.show_grader_statistics
@classmethod
def setUpTestData(cls): # noqa
super(ShowGraderStatisticsTest, cls).setUpTestData()
cls.create_flow_page_visit_grade(cls.course)
def get_show_grader_statistics_url(self, flow_id, course_identifier=None):
course_identifier = (
course_identifier or self.get_default_course_identifier())
from tests.base_test_mixins import reverse
params = {"course_identifier": course_identifier,
"flow_id": flow_id}
return reverse("relate-show_grader_statistics", kwargs=params)
def test_no_permission(self):
with self.temporarily_switch_to_user(self.student_participation.user):
resp = self.c.get(self.get_show_grader_statistics_url(self.flow_id))
self.assertEqual(resp.status_code, 403)
def test_success(self):
with self.temporarily_switch_to_user(self.instructor_participation.user):
resp = self.c.get(self.get_show_grader_statistics_url(self.flow_id))
self.assertEqual(resp.status_code, 200)
# vim: fdm=marker
| [
"dzhuang.scut@gmail.com"
] | dzhuang.scut@gmail.com |
19a2316dd43d791b31a99574ad397dbf30c345ca | f999933a217e191dc494a521448ad99ee0af2168 | /userprofile/urls.py | b8c2a21ce62c6a2641d2adc3d57b42a7e9e48427 | [] | no_license | chiliexe/django-blog-project | 54bff1e0ed22bbb471704a665695116d3f43d0d4 | d937e9b4138965f73c78b805c3f2330de90dc905 | refs/heads/master | 2023-06-17T06:42:47.678037 | 2021-07-17T04:09:48 | 2021-07-17T04:09:48 | 386,830,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | from django.urls import path
from .views import ProfileListView, ProfileUpdateView
app_name = 'profile'
urlpatterns = [
path('perfil/', ProfileListView.as_view(), name='index'),
path('perfil/edit/<int:pk>/', ProfileUpdateView.as_view(), name='update')
]
| [
"chiliexe_2015@hotmail.com"
] | chiliexe_2015@hotmail.com |
6ba1ddbd15336b49276923cbea4615bfec75b5ae | f129ec404172c40244a8d93159730615805eaaab | /src/game.py | b746b327e7e04e850b5e564d8ac0a9809128df44 | [
"MIT"
] | permissive | RoboticsAndCloud/keras-image-room-clasification | 0205b334cebbaa25acf7919cfbb7428abc39fd16 | 284924cef6983e6fe8805b4e07d5306d5396b5b5 | refs/heads/master | 2022-02-22T08:41:19.149141 | 2018-01-11T04:46:41 | 2018-01-11T04:46:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,804 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, pygame, time, os
from pygame.locals import *
# Constants
WIDTH = 840
HEIGHT = 580
black = (0,0,0)
white = (255,255,255)
red = (255,0,0)
UNLABELED_DIR = 'unlabelled'
LABELED_DIR = 'labelled'
# ---------------------------------------------------------------------
def load_image(filename, transparent=False):
try:
image = pygame.image.load(filename)
image = pygame.transform.scale(image, (WIDTH, HEIGHT))
except pygame.error as message:
print("Error in file: " + filename)
raise message
image = image.convert()
if transparent:
color = image.get_at((0,0))
image.set_colorkey(color, RLEACCEL)
return image
def text_objects(text, text_color=black):
font = pygame.font.Font("freesansbold.ttf",20)
largeText = pygame.font.Font('freesansbold.ttf',115)
textSurface = font.render(text, True, text_color)
return textSurface, textSurface.get_rect()
def button(screen, msg, x, y, w, h, ic, ac, destination_folder):
global background_image
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x+w > mouse[0] > x and y+h > mouse[1] > y:
pygame.draw.rect(screen, ac,(x,y,w,h))
textSurf, textRect = text_objects(msg, white)
if click[0]==1:
org = get_first_file(UNLABELED_DIR)
dest = destination_folder + '/' + org.split('/')[-1]
move_file(org, dest)
background_image = get_first_unlabeled_photo(UNLABELED_DIR)
screen.blit(background_image, (0, 0))
print("apreto -> " + destination_folder)
time.sleep(0.5)
else:
pygame.draw.rect(screen, ic,(x,y,w,h))
textSurf, textRect = text_objects(msg, black)
textRect.center = ( (x+(w/2)), (y+(h/2)) )
screen.blit(textSurf, textRect)
def get_first_file(path):
return path +'/'+ os.listdir(path)[0]
def get_first_unlabeled_photo(path):
try :
return load_image(get_first_file(path))
except:
os.remove(get_first_file(path))
return load_image(get_first_file(path))
def move_file(org, dest):
os.rename(org, dest)
# ---------------------------------------------------------------------
def main():
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("BrotherEngine with Pygame")
background_image = get_first_unlabeled_photo(UNLABELED_DIR)
screen.blit(background_image, (0, 0))
while True:
for eventos in pygame.event.get():
if eventos.type == QUIT:
sys.exit(0)
button(screen, msg='bathroom', x=40, y=500, w=90, h=30, ic=red, ac=20, destination_folder=LABELED_DIR + '/bathroom')
button(screen, msg='living', x=140, y=500, w=90, h=30, ic=red, ac=20, destination_folder=LABELED_DIR + '/living')
button(screen, msg='exterior', x=240, y=500, w=90, h=30, ic=red, ac=20, destination_folder=LABELED_DIR + '/exterior')
button(screen, msg='kitchen', x=340, y=500, w=90, h=30, ic=red, ac=20, destination_folder=LABELED_DIR + '/kitchen')
button(screen, msg='bedroom', x=440, y=500, w=90, h=30, ic=red, ac=20, destination_folder=LABELED_DIR + '/bedroom')
button(screen, msg='plane', x=540, y=500, w=90, h=30, ic=red, ac=20, destination_folder=LABELED_DIR + '/plane')
button(screen, msg='publicity', x=640, y=500, w=90, h=30, ic=red, ac=20, destination_folder=LABELED_DIR + '/publicity')
button(screen, msg='other', x=740, y=500, w=90, h=30, ic=red, ac=20, destination_folder='./other') # This wont be classified
pygame.display.flip()
return 0
if __name__ == '__main__':
pygame.init()
main()
| [
"collinetjorge@gmail.com"
] | collinetjorge@gmail.com |
59a6a52bde79a739e7ba1e930327e7cfcb12aec2 | 9bc5491e8d3b7d5b10542d64679c36f4ea6c5fa6 | /opay_dw_code/ods/ods_sqoop_base_bd_agent_status_change_log_di.py | 6491271932e4404e9a17ad9e81fda199eb159818 | [] | no_license | socratestu/Opay_datalake-script | 6829f8788a283126cff583d268c53c361ca82f8e | 1c7796f4c0eeb00438908e61e95d84f14f4bdbec | refs/heads/master | 2022-04-17T05:31:10.423151 | 2020-04-16T13:30:03 | 2020-04-16T13:30:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,419 | py | # -*- coding: utf-8 -*-
import airflow
from datetime import datetime, timedelta
from airflow.operators.hive_operator import HiveOperator
from airflow.operators.impala_plugin import ImpalaOperator
from utils.connection_helper import get_hive_cursor
from airflow.operators.python_operator import PythonOperator
from airflow.contrib.hooks.redis_hook import RedisHook
from airflow.hooks.hive_hooks import HiveCliHook, HiveServer2Hook
from airflow.operators.hive_to_mysql import HiveToMySqlTransfer
from airflow.operators.mysql_operator import MySqlOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.sensors.external_task_sensor import ExternalTaskSensor
from airflow.operators.bash_operator import BashOperator
from airflow.sensors.named_hive_partition_sensor import NamedHivePartitionSensor
from airflow.sensors.hive_partition_sensor import HivePartitionSensor
from airflow.sensors import UFileSensor
from plugins.TaskTimeoutMonitor import TaskTimeoutMonitor
from airflow.sensors import OssSensor
from plugins.TaskTouchzSuccess import TaskTouchzSuccess
import json
import logging
from airflow.models import Variable
from plugins.CountriesAppFrame import CountriesAppFrame
import requests
import os
args = {
'owner': 'xiedong',
'start_date': datetime(2020, 3, 31),
'depends_on_past': False,
'retries': 3,
'retry_delay': timedelta(minutes=2),
'email': ['bigdata_dw@opay-inc.com'],
'email_on_failure': True,
'email_on_retry': False,
}
dag = airflow.DAG('ods_sqoop_base_bd_agent_status_change_log_di',
schedule_interval="30 00 * * *",
default_args=args,
)
##----------------------------------------- 依赖 ---------------------------------------##
ods_binlog_bd_agent_status_change_log_hi_check_task = OssSensor(
task_id='ods_binlog_bd_agent_status_change_log_hi_check_task',
bucket_key='{hdfs_path_str}/dt={pt}/hour=22/_SUCCESS'.format(
hdfs_path_str="opay_binlog/opay_agent_crm_binlog.opay_agent_crm.bd_agent_status_change_log",
pt='{{ds}}'
),
bucket_name='opay-datalake',
poke_interval=60, # 依赖不满足时,一分钟检查一次依赖状态
dag=dag
)
##----------------------------------------- 任务超时监控 ---------------------------------------##
def fun_task_timeout_monitor(ds, dag, **op_kwargs):
dag_ids = dag.dag_id
msg = [
{"dag": dag, "db": "opay_dw_ods", "table": "{dag_name}".format(dag_name=dag_ids),
"partition": "dt={pt}".format(pt=ds), "timeout": "3000"}
]
TaskTimeoutMonitor().set_task_monitor(msg)
task_timeout_monitor = PythonOperator(
task_id='task_timeout_monitor',
python_callable=fun_task_timeout_monitor,
provide_context=True,
dag=dag
)
##----------------------------------------- 变量 ---------------------------------------##
db_name = "opay_dw_ods"
table_name = "ods_sqoop_base_bd_agent_status_change_log_di"
hdfs_path = "oss://opay-datalake/opay_dw_sqoop_di/opay_agent_crm/bd_agent_status_change_log"
config = eval(Variable.get("opay_time_zone_config"))
def ods_sqoop_base_bd_agent_status_change_log_di_sql_task(ds):
HQL = '''
set hive.exec.dynamic.partition.mode=nonstrict;
set hive.exec.parallel=true;
insert overwrite table {db}.{table} partition (dt)
SELECT
id,
agent_id,
opay_account,
opay_id,
concat(substr(created_at, 1, 10), ' ', substr(created_at, 12, 8)) as created_at,
from_agent_status,
to_agent_status,
'{pt}'
from
(select *,row_number() over(partition by id order by `__ts_ms` desc,`__file` desc,cast(`__pos` as int) desc) rn
FROM opay_dw_ods.ods_binlog_base_bd_agent_status_change_log_hi
where concat(dt,' ',hour) between '{pt_y} 23' and '{pt} 22' and `__deleted` = 'false'
) m
where rn=1
'''.format(
pt=ds,
pt_y=airflow.macros.ds_add(ds, -1),
table=table_name,
db=db_name,
config=config
)
return HQL
# 主流程
def execution_data_task_id(ds, dag, **kwargs):
v_execution_time = kwargs.get('v_execution_time')
hive_hook = HiveCliHook()
args = [
{
"dag": dag,
"is_countries_online": "false",
"db_name": db_name,
"table_name": table_name,
"data_oss_path": hdfs_path,
"is_country_partition": "false",
"is_result_force_exist": "false",
"execute_time": v_execution_time,
"is_hour_task": "false",
"frame_type": "utc",
"business_key": "opay"
}
]
cf = CountriesAppFrame(args)
# 读取sql
_sql = "\n" + cf.alter_partition() + "\n" + ods_sqoop_base_bd_agent_status_change_log_di_sql_task(ds)
logging.info('Executing: %s', _sql)
# 执行Hive
hive_hook.run_cli(_sql)
# 生产success
cf.touchz_success()
ods_sqoop_base_bd_agent_status_change_log_di_task = PythonOperator(
task_id='ods_sqoop_base_bd_agent_status_change_log_di_task',
python_callable=execution_data_task_id,
provide_context=True,
op_kwargs={
'v_execution_time': '{{execution_date.strftime("%Y-%m-%d %H:%M:%S")}}',
'owner': '{{owner}}'
},
dag=dag
)
ods_binlog_bd_agent_status_change_log_hi_check_task >> ods_sqoop_base_bd_agent_status_change_log_di_task
| [
"dong.xie@opay-inc.com"
] | dong.xie@opay-inc.com |
c28eafbe6e6da6a31548cc4969bead562598846e | 1e423afef7817938ed333d862835a39071800ebf | /extract/location_info.py | 037aeeeecbfd3da34713c40a3cd247416bd567b4 | [
"MIT"
] | permissive | guyrt/court-reminder | 9680e0d9ba7d4654999641d3741e363d3e872044 | 0494e7f864c3922d1ac0bc41c6e255cd88e021a8 | refs/heads/master | 2020-04-05T11:43:51.824055 | 2019-01-21T18:11:45 | 2019-01-21T18:11:45 | 81,161,230 | 2 | 2 | MIT | 2018-12-31T19:55:23 | 2017-02-07T03:18:05 | Python | UTF-8 | Python | false | false | 1,268 | py | import re
from uszipcode import ZipcodeSearchEngine
import utils
def create_digits_for_location_parsing(s):
s = utils.wordnums_to_nums(s)
s = re.sub(r'(\d)\s+(\d)\s+(\d)\s+(\d)\s+(\d)',r'\1\2\3\4\5', s)
return s
def get_re_for_location_parsing():
states_or = '|'.join(utils.states.keys())
return r"({states_or})".format(**locals()) + r" (\d{5})"
def find_possible_locations(s):
s = create_digits_for_location_parsing(s)
q = get_re_for_location_parsing()
return list(set(re.findall(q, s)))
def extract_location(s):
states = utils.states
z_search = ZipcodeSearchEngine()
possible_locations = find_possible_locations(s)
keys = ['State', 'City', 'Zipcode']
for state, zipcode in possible_locations:
zip_info = z_search.by_zipcode(zipcode)
if states[state] == zip_info['State']:
return {key: zip_info[key] for key in keys}
return {'State': None,
'City': None,
'Zipcode': None}
if __name__ == "__main__":
ss = [
'washington nine eight one zero two',
'massachusetts zero two one three eight',
'texas seven five two four two', # uszipcode fails
]
for s in ss:
print('\n' + s)
print(extract_location(s))
| [
"david.geraghty@gamalon.com"
] | david.geraghty@gamalon.com |
d44a7c7a413401c9a7a918133e9bf5ee6845134e | f8acaa3210a5eb392d5cd2567a000f26e812cfb9 | /2020/python/day14/main.py | 8e92acdbecd40ce5fa87757a38adce198055220c | [] | no_license | andrewpickett/advent-of-code | aeb105f7e50cfe5b7ebc80e10bc58451b5a00ac6 | ab3fa828257fa0e9f64f07397d3012274b8bebaa | refs/heads/master | 2023-01-22T23:28:46.989329 | 2022-12-28T18:06:50 | 2022-12-28T18:06:50 | 224,327,063 | 2 | 0 | null | 2023-01-08T11:09:58 | 2019-11-27T02:27:23 | Python | UTF-8 | Python | false | false | 1,763 | py | from aoc_utils import run_with_timer
data = [x.strip() for x in open("input.txt").readlines()]
def apply_mask(binary_string, repls, c):
ret_val = binary_string
for x in repls:
ret_val = ret_val[:x] + c + ret_val[x+1:]
return ret_val
def part_one():
mem_addresses = {}
zeros = []
ones = []
for datum in data:
if 'mask' in datum:
bitmask = datum.split(' = ')[1]
zeros = [i for i, letter in enumerate(bitmask) if letter == '0']
ones = [i for i, letter in enumerate(bitmask) if letter == '1']
else:
parts = datum.split(' = ')
binary_string = f'{int(parts[1]):036b}'
binary_string = apply_mask(binary_string, ones, '1')
binary_string = apply_mask(binary_string, zeros, '0')
mem_addresses[parts[0]] = int(binary_string, 2)
return sum(mem_addresses[x] for x in mem_addresses.keys())
def part_two():
mem_addresses = {}
ones = []
floating = []
for datum in data:
if 'mask' in datum:
bitmask = datum.split(' = ')[1]
ones = [i for i, letter in enumerate(bitmask) if letter == '1']
floating = [i for i, letter in enumerate(bitmask) if letter == 'X']
else:
parts = datum.split(' = ')
binary_string = f'{int(parts[0][4:-1]):036b}'
binary_string = apply_mask(binary_string, ones, '1')
binary_string = apply_mask(binary_string, floating, 'X')
for i in range(2**len(floating)):
next_repl = binary_string
binary_subs = ('{0:0' + str(len(floating)) + 'b}').format(i)
for j in binary_subs:
next_repl = next_repl.replace('X', j, 1)
mem_addresses[int(next_repl, 2)] = int(parts[1])
return sum(mem_addresses[x] for x in mem_addresses.keys())
if __name__ == '__main__':
run_with_timer(part_one) # 15919415426101 -- took 6 ms
run_with_timer(part_two) # 3443997590975 -- took 198 ms
| [
"picketta@gmail.com"
] | picketta@gmail.com |
b0826b61172af755ffa20b7f6f225ad6dbdce743 | 0baf1a7e93dfd02957ab6ff001f01dc3dd692550 | /2.py | 93c8d21409ae44906c64227dfda8b4b8b6e401a4 | [] | no_license | prakhar1508/Hackerearth | f8b47ffe358242dfce3bb885f280ecc0cef72d64 | 4ce7753ced6b4c25490ee64671702b08e07e2369 | refs/heads/master | 2022-12-17T08:22:21.566632 | 2020-09-03T21:56:45 | 2020-09-03T21:56:45 | 292,557,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | print(sum(ord(c) for c in 'Jado&'))
// 420
| [
"noreply@github.com"
] | prakhar1508.noreply@github.com |
24d167c3c3c7e410b5e1252dc4a5f6d0ba0ffdda | 8528c7b7343624959bdef983c26071d990a4766a | /model.py | 4f6f1717d2eee3812fdb8eeab6e11da686be7cc1 | [] | no_license | Glasiermedic/Ojbect_detection | 3933ebddc030e23148f695aa3aac502fef656f09 | e2623423530166c2ae1d55505a3167c9acb32c94 | refs/heads/master | 2022-11-09T17:58:24.101737 | 2020-06-25T12:46:11 | 2020-06-25T12:46:11 | 274,912,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,850 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 14:10:45 2020
@author: sewmo
"""
import torch
from torch import nn
from utils import *
import torch.nn.functional as F
from math import sqrt
from itertools import product as product
import torchvision
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class VGGBase(nn.Module):
"""
VGG base convolutions to produce lower-level feature maps.
"""
def __init__(self):
super(VGGBase, self).__init__()
# Standard convolutional layers in VGG16
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) # stride = 1, by default
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) # ceiling (not floor) here for even dims
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) # retains size because stride is 1 (and padding)
# Replacements for FC6 and FC7 in VGG16
self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) # atrous convolution
self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
# Load pretrained layers
self.load_pretrained_layers()
def forward(self, image):
"""
Forward propagation.
:param image: images, a tensor of dimensions (N, 3, 300, 300)
:return: lower-level feature maps conv4_3 and conv7
"""
out = F.relu(self.conv1_1(image)) # (N, 64, 300, 300)
out = F.relu(self.conv1_2(out)) # (N, 64, 300, 300)
out = self.pool1(out) # (N, 64, 150, 150)
out = F.relu(self.conv2_1(out)) # (N, 128, 150, 150)
out = F.relu(self.conv2_2(out)) # (N, 128, 150, 150)
out = self.pool2(out) # (N, 128, 75, 75)
out = F.relu(self.conv3_1(out)) # (N, 256, 75, 75)
out = F.relu(self.conv3_2(out)) # (N, 256, 75, 75)
out = F.relu(self.conv3_3(out)) # (N, 256, 75, 75)
out = self.pool3(out) # (N, 256, 38, 38), it would have been 37 if not for ceil_mode = True
out = F.relu(self.conv4_1(out)) # (N, 512, 38, 38)
out = F.relu(self.conv4_2(out)) # (N, 512, 38, 38)
out = F.relu(self.conv4_3(out)) # (N, 512, 38, 38)
conv4_3_feats = out # (N, 512, 38, 38)
out = self.pool4(out) # (N, 512, 19, 19)
out = F.relu(self.conv5_1(out)) # (N, 512, 19, 19)
out = F.relu(self.conv5_2(out)) # (N, 512, 19, 19)
out = F.relu(self.conv5_3(out)) # (N, 512, 19, 19)
out = self.pool5(out) # (N, 512, 19, 19), pool5 does not reduce dimensions
out = F.relu(self.conv6(out)) # (N, 1024, 19, 19)
conv7_feats = F.relu(self.conv7(out)) # (N, 1024, 19, 19)
# Lower-level feature maps
return conv4_3_feats, conv7_feats
def load_pretrained_layers(self):
"""
As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network.
There's one available in PyTorch, see https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16
We copy these parameters into our network. It's straightforward for conv1 to conv5.
However, the original VGG-16 does not contain the conv6 and con7 layers.
Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py.
"""
# Current state of base
state_dict = self.state_dict()
param_names = list(state_dict.keys())
# Pretrained VGG base
pretrained_state_dict = torchvision.models.vgg16(pretrained=True).state_dict()
pretrained_param_names = list(pretrained_state_dict.keys())
# Transfer conv. parameters from pretrained model to current model
for i, param in enumerate(param_names[:-4]): # excluding conv6 and conv7 parameters
state_dict[param] = pretrained_state_dict[pretrained_param_names[i]]
# Convert fc6, fc7 to convolutional layers, and subsample (by decimation) to sizes of conv6 and conv7
# fc6
conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view(4096, 512, 7, 7) # (4096, 512, 7, 7)
conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] # (4096)
state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) # (1024, 512, 3, 3)
state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) # (1024)
# fc7
conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view(4096, 4096, 1, 1) # (4096, 4096, 1, 1)
conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] # (4096)
state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) # (1024, 1024, 1, 1)
state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) # (1024)
# Note: an FC layer of size (K) operating on a flattened version (C*H*W) of a 2D image of size (C, H, W)...
# ...is equivalent to a convolutional layer with kernel size (H, W), input channels C, output channels K...
# ...operating on the 2D image of size (C, H, W) without padding
self.load_state_dict(state_dict)
print("\nLoaded base model.\n")
class AuxiliaryConvolutions(nn.Module):
"""
Additional convolutions to produce higher-level feature maps.
"""
def __init__(self):
super(AuxiliaryConvolutions, self).__init__()
# Auxiliary/additional convolutions on top of the VGG base
self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0) # stride = 1, by default
self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) # dim. reduction because stride > 1
self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0)
self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) # dim. reduction because stride > 1
self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) # dim. reduction because padding = 0
self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) # dim. reduction because padding = 0
# Initialize convolutions' parameters
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.)
def forward(self, conv7_feats):
"""
Forward propagation.
:param conv7_feats: lower-level conv7 feature map, a tensor of dimensions (N, 1024, 19, 19)
:return: higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2
"""
out = F.relu(self.conv8_1(conv7_feats)) # (N, 256, 19, 19)
out = F.relu(self.conv8_2(out)) # (N, 512, 10, 10)
conv8_2_feats = out # (N, 512, 10, 10)
out = F.relu(self.conv9_1(out)) # (N, 128, 10, 10)
out = F.relu(self.conv9_2(out)) # (N, 256, 5, 5)
conv9_2_feats = out # (N, 256, 5, 5)
out = F.relu(self.conv10_1(out)) # (N, 128, 5, 5)
out = F.relu(self.conv10_2(out)) # (N, 256, 3, 3)
conv10_2_feats = out # (N, 256, 3, 3)
out = F.relu(self.conv11_1(out)) # (N, 128, 3, 3)
conv11_2_feats = F.relu(self.conv11_2(out)) # (N, 256, 1, 1)
# Higher-level feature maps
return conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats
class PredictionConvolutions(nn.Module):
"""
Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps.
The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 8732 prior (default) boxes.
See 'cxcy_to_gcxgcy' in utils.py for the encoding definition.
The class scores represent the scores of each object class in each of the 8732 bounding boxes located.
A high score for 'background' = no object.
"""
def __init__(self, n_classes):
"""
:param n_classes: number of different types of objects
"""
super(PredictionConvolutions, self).__init__()
self.n_classes = n_classes
# Number of prior-boxes we are considering per position in each feature map
n_boxes = {'conv4_3': 4,
'conv7': 6,
'conv8_2': 6,
'conv9_2': 6,
'conv10_2': 4,
'conv11_2': 4}
# 4 prior-boxes implies we use 4 different aspect ratios, etc.
# Localization prediction convolutions (predict offsets w.r.t prior-boxes)
self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1)
self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size=3, padding=1)
self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1)
self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1)
self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1)
self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1)
# Class prediction convolutions (predict classes in localization boxes)
self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1)
self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1)
self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1)
# Initialize convolutions' parameters
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.)
def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats):
"""
Forward propagation.
:param conv4_3_feats: conv4_3 feature map, a tensor of dimensions (N, 512, 38, 38)
:param conv7_feats: conv7 feature map, a tensor of dimensions (N, 1024, 19, 19)
:param conv8_2_feats: conv8_2 feature map, a tensor of dimensions (N, 512, 10, 10)
:param conv9_2_feats: conv9_2 feature map, a tensor of dimensions (N, 256, 5, 5)
:param conv10_2_feats: conv10_2 feature map, a tensor of dimensions (N, 256, 3, 3)
:param conv11_2_feats: conv11_2 feature map, a tensor of dimensions (N, 256, 1, 1)
:return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image
"""
batch_size = conv4_3_feats.size(0)
# Predict localization boxes' bounds (as offsets w.r.t prior-boxes)
l_conv4_3 = self.loc_conv4_3(conv4_3_feats) # (N, 16, 38, 38)
l_conv4_3 = l_conv4_3.permute(0, 2, 3,
1).contiguous() # (N, 38, 38, 16), to match prior-box order (after .view())
# (.contiguous() ensures it is stored in a contiguous chunk of memory, needed for .view() below)
l_conv4_3 = l_conv4_3.view(batch_size, -1, 4) # (N, 5776, 4), there are a total 5776 boxes on this feature map
l_conv7 = self.loc_conv7(conv7_feats) # (N, 24, 19, 19)
l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous() # (N, 19, 19, 24)
l_conv7 = l_conv7.view(batch_size, -1, 4) # (N, 2166, 4), there are a total 2116 boxes on this feature map
l_conv8_2 = self.loc_conv8_2(conv8_2_feats) # (N, 24, 10, 10)
l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous() # (N, 10, 10, 24)
l_conv8_2 = l_conv8_2.view(batch_size, -1, 4) # (N, 600, 4)
l_conv9_2 = self.loc_conv9_2(conv9_2_feats) # (N, 24, 5, 5)
l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous() # (N, 5, 5, 24)
l_conv9_2 = l_conv9_2.view(batch_size, -1, 4) # (N, 150, 4)
l_conv10_2 = self.loc_conv10_2(conv10_2_feats) # (N, 16, 3, 3)
l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous() # (N, 3, 3, 16)
l_conv10_2 = l_conv10_2.view(batch_size, -1, 4) # (N, 36, 4)
l_conv11_2 = self.loc_conv11_2(conv11_2_feats) # (N, 16, 1, 1)
l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous() # (N, 1, 1, 16)
l_conv11_2 = l_conv11_2.view(batch_size, -1, 4) # (N, 4, 4)
# Predict classes in localization boxes
c_conv4_3 = self.cl_conv4_3(conv4_3_feats) # (N, 4 * n_classes, 38, 38)
c_conv4_3 = c_conv4_3.permute(0, 2, 3,
1).contiguous() # (N, 38, 38, 4 * n_classes), to match prior-box order (after .view())
c_conv4_3 = c_conv4_3.view(batch_size, -1,
self.n_classes) # (N, 5776, n_classes), there are a total 5776 boxes on this feature map
c_conv7 = self.cl_conv7(conv7_feats) # (N, 6 * n_classes, 19, 19)
c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous() # (N, 19, 19, 6 * n_classes)
c_conv7 = c_conv7.view(batch_size, -1,
self.n_classes) # (N, 2166, n_classes), there are a total 2116 boxes on this feature map
c_conv8_2 = self.cl_conv8_2(conv8_2_feats) # (N, 6 * n_classes, 10, 10)
c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous() # (N, 10, 10, 6 * n_classes)
c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes) # (N, 600, n_classes)
c_conv9_2 = self.cl_conv9_2(conv9_2_feats) # (N, 6 * n_classes, 5, 5)
c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous() # (N, 5, 5, 6 * n_classes)
c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes) # (N, 150, n_classes)
c_conv10_2 = self.cl_conv10_2(conv10_2_feats) # (N, 4 * n_classes, 3, 3)
c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous() # (N, 3, 3, 4 * n_classes)
c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes) # (N, 36, n_classes)
c_conv11_2 = self.cl_conv11_2(conv11_2_feats) # (N, 4 * n_classes, 1, 1)
c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous() # (N, 1, 1, 4 * n_classes)
c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes) # (N, 4, n_classes)
# A total of 8732 boxes
# Concatenate in this specific order (i.e. must match the order of the prior-boxes)
locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2], dim=1) # (N, 8732, 4)
classes_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2],
dim=1) # (N, 8732, n_classes)
return locs, classes_scores
class SSD300(nn.Module):
"""
The SSD300 network - encapsulates the base VGG network, auxiliary, and prediction convolutions.
"""
def __init__(self, n_classes):
super(SSD300, self).__init__()
self.n_classes = n_classes
self.base = VGGBase()
self.aux_convs = AuxiliaryConvolutions()
self.pred_convs = PredictionConvolutions(n_classes)
# Since lower level features (conv4_3_feats) have considerably larger scales, we take the L2 norm and rescale
# Rescale factor is initially set at 20, but is learned for each channel during back-prop
self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1)) # there are 512 channels in conv4_3_feats
nn.init.constant_(self.rescale_factors, 20)
# Prior boxes
self.priors_cxcy = self.create_prior_boxes()
def forward(self, image):
"""
Forward propagation.
:param image: images, a tensor of dimensions (N, 3, 300, 300)
:return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image
"""
# Run VGG base network convolutions (lower level feature map generators)
conv4_3_feats, conv7_feats = self.base(image) # (N, 512, 38, 38), (N, 1024, 19, 19)
# Rescale conv4_3 after L2 norm
norm = conv4_3_feats.pow(2).sum(dim=1, keepdim=True).sqrt() # (N, 1, 38, 38)
conv4_3_feats = conv4_3_feats / norm # (N, 512, 38, 38)
conv4_3_feats = conv4_3_feats * self.rescale_factors # (N, 512, 38, 38)
# (PyTorch autobroadcasts singleton dimensions during arithmetic)
# Run auxiliary convolutions (higher level feature map generators)
conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats = \
self.aux_convs(conv7_feats) # (N, 512, 10, 10), (N, 256, 5, 5), (N, 256, 3, 3), (N, 256, 1, 1)
# Run prediction convolutions (predict offsets w.r.t prior-boxes and classes in each resulting localization box)
locs, classes_scores = self.pred_convs(conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats,
conv11_2_feats) # (N, 8732, 4), (N, 8732, n_classes)
return locs, classes_scores
def create_prior_boxes(self):
"""
Create the 8732 prior (default) boxes for the SSD300, as defined in the paper.
:return: prior boxes in center-size coordinates, a tensor of dimensions (8732, 4)
"""
fmap_dims = {'conv4_3': 38,
'conv7': 19,
'conv8_2': 10,
'conv9_2': 5,
'conv10_2': 3,
'conv11_2': 1}
obj_scales = {'conv4_3': 0.1,
'conv7': 0.2,
'conv8_2': 0.375,
'conv9_2': 0.55,
'conv10_2': 0.725,
'conv11_2': 0.9}
aspect_ratios = {'conv4_3': [1., 2., 0.5],
'conv7': [1., 2., 3., 0.5, .333],
'conv8_2': [1., 2., 3., 0.5, .333],
'conv9_2': [1., 2., 3., 0.5, .333],
'conv10_2': [1., 2., 0.5],
'conv11_2': [1., 2., 0.5]}
fmaps = list(fmap_dims.keys())
prior_boxes = []
for k, fmap in enumerate(fmaps):
for i in range(fmap_dims[fmap]):
for j in range(fmap_dims[fmap]):
cx = (j + 0.5) / fmap_dims[fmap]
cy = (i + 0.5) / fmap_dims[fmap]
for ratio in aspect_ratios[fmap]:
prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt(ratio), obj_scales[fmap] / sqrt(ratio)])
# For an aspect ratio of 1, use an additional prior whose scale is the geometric mean of the
# scale of the current feature map and the scale of the next feature map
if ratio == 1.:
try:
additional_scale = sqrt(obj_scales[fmap] * obj_scales[fmaps[k + 1]])
# For the last feature map, there is no "next" feature map
except IndexError:
additional_scale = 1.
prior_boxes.append([cx, cy, additional_scale, additional_scale])
prior_boxes = torch.FloatTensor(prior_boxes).to(device) # (8732, 4)
prior_boxes.clamp_(0, 1) # (8732, 4)
return prior_boxes
def detect_objects(self, predicted_locs, predicted_scores, min_score, max_overlap, top_k):
"""
Decipher the 8732 locations and class scores (output of ths SSD300) to detect objects.
For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold.
:param predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4)
:param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes)
:param min_score: minimum threshold for a box to be considered a match for a certain class
:param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS
:param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'
:return: detections (boxes, labels, and scores), lists of length batch_size
"""
batch_size = predicted_locs.size(0)
n_priors = self.priors_cxcy.size(0)
predicted_scores = F.softmax(predicted_scores, dim=2) # (N, 8732, n_classes)
# Lists to store final predicted boxes, labels, and scores for all images
all_images_boxes = list()
all_images_labels = list()
all_images_scores = list()
assert n_priors == predicted_locs.size(1) == predicted_scores.size(1)
for i in range(batch_size):
# Decode object coordinates from the form we regressed predicted boxes to
decoded_locs = cxcy_to_xy(
gcxgcy_to_cxcy(predicted_locs[i], self.priors_cxcy)) # (8732, 4), these are fractional pt. coordinates
# Lists to store boxes and scores for this image
image_boxes = list()
image_labels = list()
image_scores = list()
max_scores, best_label = predicted_scores[i].max(dim=1) # (8732)
# Check for each class
for c in range(1, self.n_classes):
# Keep only predicted boxes and scores where scores for this class are above the minimum score
class_scores = predicted_scores[i][:, c] # (8732)
score_above_min_score = class_scores > min_score # torch.uint8 (byte) tensor, for indexing
n_above_min_score = score_above_min_score.sum().item()
if n_above_min_score == 0:
continue
class_scores = class_scores[score_above_min_score] # (n_qualified), n_min_score <= 8732
class_decoded_locs = decoded_locs[score_above_min_score] # (n_qualified, 4)
# Sort predicted boxes and scores by scores
class_scores, sort_ind = class_scores.sort(dim=0, descending=True) # (n_qualified), (n_min_score)
class_decoded_locs = class_decoded_locs[sort_ind] # (n_min_score, 4)
# Find the overlap between predicted boxes
overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) # (n_qualified, n_min_score)
# Non-Maximum Suppression (NMS)
# A torch.uint8 (byte) tensor to keep track of which predicted boxes to suppress
# 1 implies suppress, 0 implies don't suppress
suppress = torch.zeros((n_above_min_score)).bool().to(device) # (n_qualified)
# Consider each box in order of decreasing scores
for box in range(class_decoded_locs.size(0)):
# If this box is already marked for suppression
if suppress[box] == 1:
continue
# Suppress boxes whose overlaps (with this box) are greater than maximum overlap
# Find such boxes and update suppress indices
suppress = suppress | (overlap[box] > max_overlap)
# The max operation retains previously suppressed boxes, like an 'OR' operation
# Don't suppress this box, even though it has an overlap of 1 with itself
suppress[box] = 0
# Store only unsuppressed boxes for this class
image_boxes.append(class_decoded_locs[~suppress])
image_labels.append(
torch.LongTensor(
(~suppress).sum().item() * [c]).to(device)
)
image_scores.append(class_scores[~suppress])
# If no object in any class is found, store a placeholder for 'background'
if len(image_boxes) == 0:
image_boxes.append(torch.FloatTensor([[0., 0., 1., 1.]]).to(device))
image_labels.append(torch.LongTensor([0]).to(device))
image_scores.append(torch.FloatTensor([0.]).to(device))
# Concatenate into single tensors
image_boxes = torch.cat(image_boxes, dim=0) # (n_objects, 4)
image_labels = torch.cat(image_labels, dim=0) # (n_objects)
image_scores = torch.cat(image_scores, dim=0) # (n_objects)
n_objects = image_scores.size(0)
# Keep only the top k objects
if n_objects > top_k:
image_scores, sort_ind = image_scores.sort(dim=0, descending=True)
image_scores = image_scores[:top_k] # (top_k)
image_boxes = image_boxes[sort_ind][:top_k] # (top_k, 4)
image_labels = image_labels[sort_ind][:top_k] # (top_k)
# Append to lists that store predicted boxes and scores for all images
all_images_boxes.append(image_boxes)
all_images_labels.append(image_labels)
all_images_scores.append(image_scores)
return all_images_boxes, all_images_labels, all_images_scores # lists of length batch_size
class MultiBoxLoss(nn.Module):
"""
The MultiBox loss, a loss function for object detection.
This is a combination of:
(1) a localization loss for the predicted locations of the boxes, and
(2) a confidence loss for the predicted class scores.
"""
def __init__(self, priors_cxcy, threshold=0.5, neg_pos_ratio=3, alpha=1.):
super(MultiBoxLoss, self).__init__()
self.priors_cxcy = priors_cxcy
self.priors_xy = cxcy_to_xy(priors_cxcy)
self.threshold = threshold
self.neg_pos_ratio = neg_pos_ratio
self.alpha = alpha
self.smooth_l1 = nn.L1Loss()
self.cross_entropy = nn.CrossEntropyLoss(reduce=False)
def forward(self, predicted_locs, predicted_scores, boxes, labels):
"""
Forward propagation.
:param predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4)
:param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes)
:param boxes: true object bounding boxes in boundary coordinates, a list of N tensors
:param labels: true object labels, a list of N tensors
:return: multibox loss, a scalar
"""
batch_size = predicted_locs.size(0)
n_priors = self.priors_cxcy.size(0)
n_classes = predicted_scores.size(2)
assert n_priors == predicted_locs.size(1) == predicted_scores.size(1)
true_locs = torch.zeros((batch_size, n_priors, 4), dtype=torch.float).to(device) # (N, 8732, 4)
true_classes = torch.zeros((batch_size, n_priors), dtype=torch.long).to(device) # (N, 8732)
# For each image
for i in range(batch_size):
n_objects = boxes[i].size(0)
overlap = find_jaccard_overlap(boxes[i],
self.priors_xy) # (n_objects, 8732)
# For each prior, find the object that has the maximum overlap
overlap_for_each_prior, object_for_each_prior = overlap.max(dim=0) # (8732)
# We don't want a situation where an object is not represented in our positive (non-background) priors -
# 1. An object might not be the best object for all priors, and is therefore not in object_for_each_prior.
# 2. All priors with the object may be assigned as background based on the threshold (0.5).
# To remedy this -
# First, find the prior that has the maximum overlap for each object.
_, prior_for_each_object = overlap.max(dim=1) # (N_o)
# Then, assign each object to the corresponding maximum-overlap-prior. (This fixes 1.)
object_for_each_prior[prior_for_each_object] = torch.LongTensor(range(n_objects)).to(device)
# To ensure these priors qualify, artificially give them an overlap of greater than 0.5. (This fixes 2.)
overlap_for_each_prior[prior_for_each_object] = 1.
# Labels for each prior
label_for_each_prior = labels[i][object_for_each_prior] # (8732)
# Set priors whose overlaps with objects are less than the threshold to be background (no object)
label_for_each_prior[overlap_for_each_prior < self.threshold] = 0 # (8732)
# Store
true_classes[i] = label_for_each_prior
# Encode center-size object coordinates into the form we regressed predicted boxes to
true_locs[i] = cxcy_to_gcxgcy(xy_to_cxcy(boxes[i][object_for_each_prior]), self.priors_cxcy) # (8732, 4)
# Identify priors that are positive (object/non-background)
positive_priors = true_classes != 0 # (N, 8732)
# LOCALIZATION LOSS
# Localization loss is computed only over positive (non-background) priors
loc_loss = self.smooth_l1(predicted_locs[positive_priors], true_locs[positive_priors]) # (), scalar
# Note: indexing with a torch.uint8 (byte) tensor flattens the tensor when indexing is across multiple dimensions (N & 8732)
# So, if predicted_locs has the shape (N, 8732, 4), predicted_locs[positive_priors] will have (total positives, 4)
# CONFIDENCE LOSS
# Confidence loss is computed over positive priors and the most difficult (hardest) negative priors in each image
# That is, FOR EACH IMAGE,
# we will take the hardest (neg_pos_ratio * n_positives) negative priors, i.e where there is maximum loss
# This is called Hard Negative Mining - it concentrates on hardest negatives in each image, and also minimizes pos/neg imbalance
# Number of positive and hard-negative priors per image
n_positives = positive_priors.sum(dim=1) # (N)
n_hard_negatives = self.neg_pos_ratio * n_positives # (N)
# First, find the loss for all priors
conf_loss_all = self.cross_entropy(predicted_scores.view(-1, n_classes), true_classes.view(-1)) # (N * 8732)
conf_loss_all = conf_loss_all.view(batch_size, n_priors) # (N, 8732)
# We already know which priors are positive
conf_loss_pos = conf_loss_all[positive_priors] # (sum(n_positives))
# Next, find which priors are hard-negative
# To do this, sort ONLY negative priors in each image in order of decreasing loss and take top n_hard_negatives
conf_loss_neg = conf_loss_all.clone() # (N, 8732)
conf_loss_neg[positive_priors] = 0. # (N, 8732), positive priors are ignored (never in top n_hard_negatives)
conf_loss_neg, _ = conf_loss_neg.sort(dim=1, descending=True) # (N, 8732), sorted by decreasing hardness
hardness_ranks = torch.LongTensor(range(n_priors)).unsqueeze(0).expand_as(conf_loss_neg).to(device) # (N, 8732)
hard_negatives = hardness_ranks < n_hard_negatives.unsqueeze(1) # (N, 8732)
conf_loss_hard_neg = conf_loss_neg[hard_negatives] # (sum(n_hard_negatives))
# As in the paper, averaged over positive priors only, although computed over both positive and hard-negative priors
conf_loss = (conf_loss_hard_neg.sum() + conf_loss_pos.sum()) / n_positives.sum().float() # (), scalar
# TOTAL LOSS
return conf_loss + self.alpha * loc_loss | [
"wrolfson@glasierstudios.com"
] | wrolfson@glasierstudios.com |
462725ad54b66a67d7cdbd3aed796e114c557f90 | 950564a291a67e1dede14dce5d6858a6c2801423 | /DBcm.py | 2fac00a65e9380ae0e90b3049cf24ad87d2b068f | [] | no_license | YarSml/Flaks_1 | 0831497f96d6cb310c37d0f2ca7b893487364cfa | 78db4c852eee2b7a35d9ae9007708fc6184ebf65 | refs/heads/master | 2023-03-31T04:26:55.859170 | 2021-04-12T14:48:22 | 2021-04-12T14:48:22 | 353,724,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 966 | py | import mysql.connector
class ConnectionError(Exception):
pass
class CredentialsError(Exception):
pass
class SQLError(Exception):
pass
class UseDatabase:
def __init__(self, config: dict) -> None:
self.configuration = config
def __enter__(self) -> 'cursor':
try:
self.conn = mysql.connector.connect(**self.configuration)
self.cursor = self.conn.cursor()
return self.cursor
except mysql.connector.errors.InterfaceError as err:
raise ConnectionError(err)
except mysql.connector.errors.ProgrammingError as err:
raise CredentialsError(err)
def __exit__(self, exc_type, exc_value, exc_trace) -> None:
self.conn.commit()
self.cursor.close()
self.conn.close()
if exc_type is mysql.connector.errors.ProgrammingError:
raise SQLError(exc_value)
elif exc_trace:
raise exc_trace(exc_value) | [
"manowa.87.03.20@gmail.com"
] | manowa.87.03.20@gmail.com |
9ff0d1cf9114d2d21e298a7b7bf8c9fe3e26d965 | 2c8d97fee18ecce7046a834c4a50c9a5b94cc40e | /src/pyamf/amf0.py | 9b0dc4c4b6051370fcdaaa32370e91e71c58b922 | [] | no_license | psalty/photolog_gae | 4a7e534e2b4717e4c25719c458b7a7012af6365f | e46d576a5d900a920dff574989400086c61ed33c | refs/heads/master | 2016-09-01T17:15:50.715533 | 2011-01-09T23:30:11 | 2011-01-09T23:30:11 | 1,221,969 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,027 | py | # Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
AMF0 implementation.
C{AMF0} supports the basic data types used for the NetConnection, NetStream,
LocalConnection, SharedObjects and other classes in the Adobe Flash Player.
@since: 0.1
@see: U{Official AMF0 Specification in English (external)
<http://opensource.adobe.com/wiki/download/attachments/1114283/amf0_spec_121207.pdf>}
@see: U{Official AMF0 Specification in Japanese (external)
<http://opensource.adobe.com/wiki/download/attachments/1114283/JP_amf0_spec_121207.pdf>}
@see: U{AMF documentation on OSFlash (external)
<http://osflash.org/documentation/amf>}
"""
import datetime
import pyamf
from pyamf import util, codec, xml
#: Represented as 9 bytes: 1 byte for C{0x00} and 8 bytes a double
#: representing the value of the number.
TYPE_NUMBER = '\x00'
#: Represented as 2 bytes: 1 byte for C{0x01} and a second, C{0x00}
#: for C{False}, C{0x01} for C{True}.
TYPE_BOOL = '\x01'
#: Represented as 3 bytes + len(String): 1 byte C{0x02}, then a UTF8 string,
#: including the top two bytes representing string length as a C{int}.
TYPE_STRING = '\x02'
#: Represented as 1 byte, C{0x03}, then pairs of UTF8 string, the key, and
#: an AMF element, ended by three bytes, C{0x00} C{0x00} C{0x09}.
TYPE_OBJECT = '\x03'
#: MovieClip does not seem to be supported by Remoting.
#: It may be used by other AMF clients such as SharedObjects.
TYPE_MOVIECLIP = '\x04'
#: 1 single byte, C{0x05} indicates null.
TYPE_NULL = '\x05'
#: 1 single byte, C{0x06} indicates null.
TYPE_UNDEFINED = '\x06'
#: When an ActionScript object refers to itself, such C{this.self = this},
#: or when objects are repeated within the same scope (for example, as the
#: two parameters of the same function called), a code of C{0x07} and an
#: C{int}, the reference number, are written.
TYPE_REFERENCE = '\x07'
#: A MixedArray is indicated by code C{0x08}, then a Long representing the
#: highest numeric index in the array, or 0 if there are none or they are
#: all negative. After that follow the elements in key : value pairs.
TYPE_MIXEDARRAY = '\x08'
#: @see: L{TYPE_OBJECT}
TYPE_OBJECTTERM = '\x09'
#: An array is indicated by C{0x0A}, then a Long for array length, then the
#: array elements themselves. Arrays are always sparse; values for
#: inexistant keys are set to null (C{0x06}) to maintain sparsity.
TYPE_ARRAY = '\x0A'
#: Date is represented as C{0x0B}, then a double, then an C{int}. The double
#: represents the number of milliseconds since 01/01/1970. The C{int} represents
#: the timezone offset in minutes between GMT. Note for the latter than values
#: greater than 720 (12 hours) are represented as M{2^16} - the value. Thus GMT+1
#: is 60 while GMT-5 is 65236.
TYPE_DATE = '\x0B'
#: LongString is reserved for strings larger then M{2^16} characters long. It
#: is represented as C{0x0C} then a LongUTF.
TYPE_LONGSTRING = '\x0C'
#: Trying to send values which don't make sense, such as prototypes, functions,
#: built-in objects, etc. will be indicated by a single C{00x0D} byte.
TYPE_UNSUPPORTED = '\x0D'
#: Remoting Server -> Client only.
#: @see: L{RecordSet}
#: @see: U{RecordSet structure on OSFlash
#: <http://osflash.org/documentation/amf/recordset>}
TYPE_RECORDSET = '\x0E'
#: The XML element is indicated by C{0x0F} and followed by a LongUTF containing
#: the string representation of the XML object. The receiving gateway may which
#: to wrap this string inside a language-specific standard XML object, or simply
#: pass as a string.
TYPE_XML = '\x0F'
#: A typed object is indicated by C{0x10}, then a UTF string indicating class
#: name, and then the same structure as a normal C{0x03} Object. The receiving
#: gateway may use a mapping scheme, or send back as a vanilla object or
#: associative array.
TYPE_TYPEDOBJECT = '\x10'
#: An AMF message sent from an AVM+ client such as the Flash Player 9 may break
#: out into L{AMF3<pyamf.amf3>} mode. In this case the next byte will be the
#: AMF3 type code and the data will be in AMF3 format until the decoded object
#: reaches it's logical conclusion (for example, an object has no more keys).
TYPE_AMF3 = '\x11'
class Context(codec.Context):
"""
"""
def clear(self):
codec.Context.clear(self)
encoder = self.extra.get('amf3_encoder', None)
if encoder:
encoder.context.clear()
decoder = self.extra.get('amf3_decoder', None)
if decoder:
decoder.context.clear()
def getAMF3Encoder(self, amf0_encoder):
encoder = self.extra.get('amf3_encoder', None)
if encoder:
return encoder
encoder = pyamf.get_encoder(pyamf.AMF3, stream=amf0_encoder.stream,
timezone_offset=amf0_encoder.timezone_offset)
self.extra['amf3_encoder'] = encoder
return encoder
def getAMF3Decoder(self, amf0_decoder):
decoder = self.extra.get('amf3_decoder', None)
if decoder:
return decoder
decoder = pyamf.get_decoder(pyamf.AMF3, stream=amf0_decoder.stream,
timezone_offset=amf0_decoder.timezone_offset)
self.extra['amf3_decoder'] = decoder
return decoder
class Decoder(codec.Decoder):
"""
Decodes an AMF0 stream.
"""
def buildContext(self):
return Context()
def getTypeFunc(self, data):
# great for coverage, sucks for readability
if data == TYPE_NUMBER:
return self.readNumber
elif data == TYPE_BOOL:
return self.readBoolean
elif data == TYPE_STRING:
return self.readString
elif data == TYPE_OBJECT:
return self.readObject
elif data == TYPE_NULL:
return self.readNull
elif data == TYPE_UNDEFINED:
return self.readUndefined
elif data == TYPE_REFERENCE:
return self.readReference
elif data == TYPE_MIXEDARRAY:
return self.readMixedArray
elif data == TYPE_ARRAY:
return self.readList
elif data == TYPE_DATE:
return self.readDate
elif data == TYPE_LONGSTRING:
return self.readLongString
elif data == TYPE_UNSUPPORTED:
return self.readNull
elif data == TYPE_XML:
return self.readXML
elif data == TYPE_TYPEDOBJECT:
return self.readTypedObject
elif data == TYPE_AMF3:
return self.readAMF3
def readNumber(self):
"""
Reads a ActionScript C{Number} value.
In ActionScript 1 and 2 the C{NumberASTypes} type represents all numbers,
both floats and integers.
@rtype: C{int} or C{float}
"""
return _check_for_int(self.stream.read_double())
def readBoolean(self):
"""
Reads a ActionScript C{Boolean} value.
@rtype: C{bool}
@return: Boolean.
"""
return bool(self.stream.read_uchar())
def readString(self, bytes=False):
"""
Reads a C{string} from the stream. If bytes is C{True} then you will get
the raw data read from the stream, otherwise a string that has been
B{utf-8} decoded.
"""
l = self.stream.read_ushort()
b = self.stream.read(l)
if bytes:
return b
return self.context.getStringForBytes(b)
def readNull(self):
"""
Reads a ActionScript C{null} value.
"""
return None
def readUndefined(self):
"""
Reads an ActionScript C{undefined} value.
@return: L{Undefined<pyamf.Undefined>}
"""
return pyamf.Undefined
def readMixedArray(self):
"""
Read mixed array.
@rtype: L{pyamf.MixedArray}
"""
# TODO: something with the length/strict
self.stream.read_ulong() # length
obj = pyamf.MixedArray()
self.context.addObject(obj)
attrs = self.readObjectAttributes(obj)
for key in attrs.keys():
try:
key = int(key)
except ValueError:
pass
obj[key] = attrs[key]
return obj
def readList(self):
"""
Read a C{list} from the data stream.
"""
obj = []
self.context.addObject(obj)
l = self.stream.read_ulong()
for i in xrange(l):
obj.append(self.readElement())
return obj
def readTypedObject(self):
"""
Reads an aliased ActionScript object from the stream and attempts to
'cast' it into a python class.
@see: L{pyamf.register_class}
"""
class_alias = self.readString()
try:
alias = self.context.getClassAlias(class_alias)
except pyamf.UnknownClassAlias:
if self.strict:
raise
alias = pyamf.TypedObjectClassAlias(class_alias)
obj = alias.createInstance(codec=self)
self.context.addObject(obj)
attrs = self.readObjectAttributes(obj)
alias.applyAttributes(obj, attrs, codec=self)
return obj
def readAMF3(self):
"""
Read AMF3 elements from the data stream.
@return: The AMF3 element read from the stream
"""
return self.context.getAMF3Decoder(self).readElement()
def readObjectAttributes(self, obj):
obj_attrs = {}
key = self.readString(True)
while self.stream.peek() != TYPE_OBJECTTERM:
obj_attrs[key] = self.readElement()
key = self.readString(True)
# discard the end marker (TYPE_OBJECTTERM)
self.stream.read(1)
return obj_attrs
def readObject(self):
"""
Reads an anonymous object from the data stream.
@rtype: L{ASObject<pyamf.ASObject>}
"""
obj = pyamf.ASObject()
self.context.addObject(obj)
obj.update(self.readObjectAttributes(obj))
return obj
def readReference(self):
"""
Reads a reference from the data stream.
@raise pyamf.ReferenceError: Unknown reference.
"""
idx = self.stream.read_ushort()
o = self.context.getObject(idx)
if o is None:
raise pyamf.ReferenceError('Unknown reference %d' % (idx,))
return o
def readDate(self):
"""
Reads a UTC date from the data stream. Client and servers are
responsible for applying their own timezones.
Date: C{0x0B T7 T6} .. C{T0 Z1 Z2 T7} to C{T0} form a 64 bit
Big Endian number that specifies the number of nanoseconds
that have passed since 1/1/1970 0:00 to the specified time.
This format is UTC 1970. C{Z1} and C{Z0} for a 16 bit Big
Endian number indicating the indicated time's timezone in
minutes.
"""
ms = self.stream.read_double() / 1000.0
self.stream.read_short() # tz
# Timezones are ignored
d = util.get_datetime(ms)
if self.timezone_offset:
d = d + self.timezone_offset
self.context.addObject(d)
return d
def readLongString(self):
"""
Read UTF8 string.
"""
l = self.stream.read_ulong()
bytes = self.stream.read(l)
return self.context.getStringForBytes(bytes)
def readXML(self):
"""
Read XML.
"""
data = self.readLongString()
root = xml.fromstring(data)
self.context.addObject(root)
return root
class Encoder(codec.Encoder):
"""
Encodes an AMF0 stream.
@ivar use_amf3: A flag to determine whether this encoder should default to
using AMF3. Defaults to C{False}
@type use_amf3: C{bool}
"""
def __init__(self, *args, **kwargs):
codec.Encoder.__init__(self, *args, **kwargs)
self.use_amf3 = kwargs.pop('use_amf3', False)
def buildContext(self):
return Context()
def getTypeFunc(self, data):
if self.use_amf3:
return self.writeAMF3
t = type(data)
if t is pyamf.MixedArray:
return self.writeMixedArray
return codec.Encoder.getTypeFunc(self, data)
def writeType(self, t):
"""
Writes the type to the stream.
@type t: C{str}
@param t: ActionScript type.
"""
self.stream.write(t)
def writeUndefined(self, data):
"""
Writes the L{undefined<TYPE_UNDEFINED>} data type to the stream.
@param data: Ignored, here for the sake of interface.
"""
self.writeType(TYPE_UNDEFINED)
def writeNull(self, n):
"""
Write null type to data stream.
"""
self.writeType(TYPE_NULL)
def writeList(self, a):
"""
Write array to the stream.
@param a: The array data to be encoded to the AMF0 data stream.
"""
if self.writeReference(a) != -1:
return
self.context.addObject(a)
self.writeType(TYPE_ARRAY)
self.stream.write_ulong(len(a))
for data in a:
self.writeElement(data)
def writeNumber(self, n):
"""
Write number to the data stream .
@param n: The number data to be encoded to the AMF0 data stream.
"""
self.writeType(TYPE_NUMBER)
self.stream.write_double(float(n))
def writeBoolean(self, b):
"""
Write boolean to the data stream.
@param b: The boolean data to be encoded to the AMF0 data stream.
"""
self.writeType(TYPE_BOOL)
if b:
self.stream.write_uchar(1)
else:
self.stream.write_uchar(0)
def serialiseString(self, s):
"""
Similar to L{writeString} but does not encode a type byte.
"""
if type(s) is unicode:
s = self.context.getBytesForString(s)
l = len(s)
if l > 0xffff:
self.stream.write_ulong(l)
else:
self.stream.write_ushort(l)
self.stream.write(s)
def writeBytes(self, s):
"""
Write a string of bytes to the data stream.
"""
l = len(s)
if l > 0xffff:
self.writeType(TYPE_LONGSTRING)
else:
self.writeType(TYPE_STRING)
if l > 0xffff:
self.stream.write_ulong(l)
else:
self.stream.write_ushort(l)
self.stream.write(s)
def writeString(self, u):
"""
Write a unicode to the data stream.
"""
s = self.context.getBytesForString(u)
self.writeBytes(s)
def writeReference(self, o):
"""
Write reference to the data stream.
@param o: The reference data to be encoded to the AMF0 datastream.
"""
idx = self.context.getObjectReference(o)
if idx == -1 or idx > 65535:
return -1
self.writeType(TYPE_REFERENCE)
self.stream.write_ushort(idx)
return idx
def _writeDict(self, o):
"""
Write C{dict} to the data stream.
@param o: The C{dict} data to be encoded to the AMF0 data stream.
"""
for key, val in o.iteritems():
self.serialiseString(key)
self.writeElement(val)
def writeMixedArray(self, o):
"""
Write mixed array to the data stream.
@type o: L{pyamf.MixedArray}
"""
if self.writeReference(o) != -1:
return
self.context.addObject(o)
self.writeType(TYPE_MIXEDARRAY)
# TODO: optimise this
# work out the highest integer index
try:
# list comprehensions to save the day
max_index = max([y[0] for y in o.items()
if isinstance(y[0], (int, long))])
if max_index < 0:
max_index = 0
except ValueError:
max_index = 0
self.stream.write_ulong(max_index)
self._writeDict(o)
self._writeEndObject()
def _writeEndObject(self):
self.stream.write('\x00\x00' + TYPE_OBJECTTERM)
def writeObject(self, o):
"""
Write a Python object to the stream.
@param o: The object data to be encoded to the AMF0 data stream.
"""
if self.writeReference(o) != -1:
return
self.context.addObject(o)
alias = self.context.getClassAlias(o.__class__)
alias.compile()
if alias.amf3:
self.writeAMF3(o)
return
if alias.anonymous:
self.writeType(TYPE_OBJECT)
else:
self.writeType(TYPE_TYPEDOBJECT)
self.serialiseString(alias.alias)
attrs = alias.getEncodableAttributes(o, codec=self)
if alias.static_attrs and attrs:
for key in alias.static_attrs:
value = attrs.pop(key)
self.serialiseString(key)
self.writeElement(value)
if attrs:
self._writeDict(attrs)
self._writeEndObject()
def writeDate(self, d):
"""
Writes a date to the data stream.
@type d: Instance of C{datetime.datetime}
@param d: The date to be encoded to the AMF0 data stream.
"""
if isinstance(d, datetime.time):
raise pyamf.EncodeError('A datetime.time instance was found but '
'AMF0 has no way to encode time objects. Please use '
'datetime.datetime instead (got:%r)' % (d,))
# According to the Red5 implementation of AMF0, dates references are
# created, but not used.
if self.timezone_offset is not None:
d -= self.timezone_offset
secs = util.get_timestamp(d)
tz = 0
self.writeType(TYPE_DATE)
self.stream.write_double(secs * 1000.0)
self.stream.write_short(tz)
def writeXML(self, e):
"""
Writes an XML instance.
"""
self.writeType(TYPE_XML)
data = xml.tostring(e)
if isinstance(data, unicode):
data = data.encode('utf-8')
self.stream.write_ulong(len(data))
self.stream.write(data)
def writeAMF3(self, data):
"""
Writes an element in L{AMF3<pyamf.amf3>} format.
"""
self.writeType(TYPE_AMF3)
self.context.getAMF3Encoder(self).writeElement(data)
class RecordSet(object):
"""
I represent the C{RecordSet} class used in Adobe Flash Remoting to hold
(amongst other things) SQL records.
@ivar columns: The columns to send.
@type columns: List of strings.
@ivar items: The C{RecordSet} data.
@type items: List of lists, the order of the data corresponds to the order
of the columns.
@ivar service: Service linked to the C{RecordSet}.
@type service:
@ivar id: The id of the C{RecordSet}.
@type id: C{str}
@see: U{RecordSet on OSFlash (external)
<http://osflash.org/documentation/amf/recordset>}
"""
class __amf__:
alias = 'RecordSet'
static = ('serverInfo',)
dynamic = False
def __init__(self, columns=[], items=[], service=None, id=None):
self.columns = columns
self.items = items
self.service = service
self.id = id
def _get_server_info(self):
ret = pyamf.ASObject(totalCount=len(self.items), cursor=1, version=1,
initialData=self.items, columnNames=self.columns)
if self.service is not None:
ret.update({'serviceName': str(self.service['name'])})
if self.id is not None:
ret.update({'id':str(self.id)})
return ret
def _set_server_info(self, val):
self.columns = val['columnNames']
self.items = val['initialData']
try:
# TODO nick: find relevant service and link in here.
self.service = dict(name=val['serviceName'])
except KeyError:
self.service = None
try:
self.id = val['id']
except KeyError:
self.id = None
serverInfo = property(_get_server_info, _set_server_info)
def __repr__(self):
ret = '<%s.%s' % (self.__module__, self.__class__.__name__)
if self.id is not None:
ret += ' id=%s' % self.id
if self.service is not None:
ret += ' service=%s' % self.service
ret += ' at 0x%x>' % id(self)
return ret
pyamf.register_class(RecordSet)
def _check_for_int(x):
"""
This is a compatibility function that takes a C{float} and converts it to an
C{int} if the values are equal.
"""
try:
y = int(x)
except (OverflowError, ValueError):
pass
else:
# There is no way in AMF0 to distinguish between integers and floats
if x == x and y == x:
return y
return x
| [
"psalty@gmail.com"
] | psalty@gmail.com |
785e732e61c0a564ca137b93bd2e9670e0e8a6e1 | 2acb2af5e93bab0168ece7cb403b90355c9ccb9d | /W2/Questions/q2.py | 340566dfc274e9a8f5a3eacb79120ac53426a120 | [] | no_license | dmathews98/MLPR | 0ebb3a6841545add2716dfb853eac3c8f2111e55 | e04fc6b079e6abd2586f1563614e644d72bf87b3 | refs/heads/main | 2023-01-13T12:41:53.509799 | 2020-11-17T13:20:50 | 2020-11-17T13:20:50 | 303,121,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,010 | py | import numpy as np
import matplotlib.pyplot as plt
def phi_linear(Xin):
return np.hstack([np.ones((Xin.shape[0],1)), Xin])
def phi_quartic(Xin):
return np.hstack([np.ones((Xin.shape[0],1)), Xin, Xin**2, Xin**3, Xin**4])
def fit_and_plot(phi_fn, T, X):
w_fit = np.linalg.lstsq(phi_fn(T), X, rcond=None)[0]
X_grid = np.tile(np.arange(0, 1.01, 0.01)[:,None], (1,1))
f_grid = np.dot(phi_fn(X_grid), w_fit)
plt.plot(X_grid, f_grid, linewidth=1)
plt.plot([1.0], f_grid[len(f_grid)-1, 0], '.')
np.random.seed(0)
amp_data = np.load('amp_data.npz')['amp_data']
'''
1 a
'''
plt.figure('Line_graph')
plt.xlabel('Time /arb')
plt.ylabel('Amplitude /arb')
plt.plot(amp_data, 'r')
plt.figure('Hist_graph')
plt.xlabel('Amplitude /arb')
plt.ylabel('Freq')
plt.hist(amp_data, bins=1000)
'''
1 b
'''
dis = np.shape(amp_data)[0] % 21
C = np.shape(amp_data)[0]/21
cut_data = amp_data[0:np.shape(amp_data)[0]-dis]
wrap_data = np.reshape(cut_data, (C, 21))
shuffle_data = np.random.permutation(wrap_data)
train_share = int(0.7*C)
val_share = int(0.15*C)
X_shuff_train = np.copy(shuffle_data[:train_share, :20])
Y_shuff_train = np.copy(shuffle_data[:train_share, 20])
X_shuff_val = np.copy(shuffle_data[train_share:train_share+val_share, :20])
Y_shuff_val = np.copy(shuffle_data[train_share:train_share+val_share, 20])
X_shuff_test = np.copy(shuffle_data[train_share+val_share:, :20])
Y_shuff_test = np.copy(shuffle_data[train_share+val_share:, 20])
X_shuff_train_copy = np.copy(X_shuff_train)
Y_shuff_train_copy = np.copy(Y_shuff_train)
X_shuff_val_copy = np.copy(X_shuff_val)
Y_shuff_val_copy = np.copy(Y_shuff_val)
X_shuff_test_copy = np.copy(X_shuff_test)
Y_shuff_test_copy = np.copy(Y_shuff_test)
'''
2 a
'''
T = np.linspace(0, 1, 20, endpoint=False)
row_index = 15000
plt.figure('2a')
plt.plot(T, X_shuff_train[row_index], 'y.')
fit_and_plot(phi_linear, T[:,None], X_shuff_train[row_index][:,None])
fit_and_plot(phi_quartic, T[:,None], X_shuff_train[row_index][:,None])
plt.show()
| [
"declanmathews@macbook-pro.home"
] | declanmathews@macbook-pro.home |
b10e258db445f8856472e47a3f2cdef86d99960d | ec6191ecb222f0b0f4036fd4a46aa4e1f8a55dd4 | /gcastle/castle/algorithms/__init__.py | 7d577a01a388c28bb66e67cf9a219fd4c209adf4 | [
"Apache-2.0",
"MIT"
] | permissive | WFP1998/trustworthyAI | a2ab63674e9d25ef888d95428502512ee44a7a9d | c13e167070876eb6dac4458f9f39e7429cac1a41 | refs/heads/master | 2023-05-12T16:22:24.121996 | 2021-05-29T03:26:00 | 2021-05-29T03:26:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | # coding=utf-8
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .ttpm import TTPM
from .pc import PC
from .lingam import DirectLiNGAM
from .lingam import ICALiNGAM
from .gradient import Notears
from .gradient import NotearsMLP
from .gradient import NotearsSob
from .gradient import NotearsLowRank
from .gradient import GOLEM
from .gradient import GraN_DAG, Parameters
from .gradient import GAE
from .gradient import MCSL
from .gradient import RL
from .gradient import CORL1
from .gradient import CORL2 | [
"zhangkeli1@huawei.com"
] | zhangkeli1@huawei.com |
4303149eb6a397ad75b3abcc45eda3d56466a31f | c36452a7f824e7f5afc7dbc34fddbcd98b4a35e5 | /.c9/metadata/workspace/pset6/mario.py | b7dff3705f02e6bf72a62e9675232ccef1dca296 | [] | no_license | jccantu8/CS50-Exercises | 56ed8f2236571b9f2e5ad433efcb6e425b50ba00 | 81c8301ef94299304aba31b39caf34d89ab1b934 | refs/heads/master | 2020-12-15T06:37:05.763303 | 2020-01-20T05:02:53 | 2020-01-20T05:02:53 | 235,021,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | {"filter":false,"title":"mario.py","tooltip":"~/workspace/pset6/mario.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":10,"column":17},"end":{"row":10,"column":17},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1553012464325,"hash":"c5a8e95f174ad5e4d8fe57d77db24c3bef9286e0"} | [
"julio.c.cantu03@utrgv.edu"
] | julio.c.cantu03@utrgv.edu |
b5be03bae05e2c31bc7c6b3158b111ca8d5fc886 | 791ce6452fb555f953ed3adb1966b59abc7c2dbb | /arxiv_net/dashboard/assets/style.py | c359a7598a7dde951f38e4ceb3c9d495568f0370 | [] | no_license | mirandrom/arxiv-net | d63b76006d7cde62a4ba4e623ffa3971436455f5 | 86bdc7a878c8d1d4a0135ddd2785cb59ca638937 | refs/heads/master | 2023-03-21T13:37:30.567726 | 2019-12-05T23:25:24 | 2019-12-05T23:25:24 | 222,019,331 | 2 | 0 | null | 2021-03-20T02:10:29 | 2019-11-15T23:26:34 | Python | UTF-8 | Python | false | false | 1,468 | py | card_style = {
"box-shadow": "0 4px 5px 0 rgba(0,0,0,0.14), 0 1px 10px 0 rgba(0,0,0,0.12), 0 2px 4px -1px rgba(0,0,0,0.3)"
}
BLUES = ["rgb(210, 218, 255)", "rgb(86, 117, 255)", "rgb(8, 31, 139)",
"rgb(105, 125, 215)", "rgb(84, 107, 208)",
"rgb(210, 210, 210)", "rgb(102, 103, 107)", "rgb(19, 23, 37)", ]
gradients = ['rgb(115, 132, 212)', 'rgb(169, 120, 219)', 'rgb(211, 107, 218)',
'rgb(237, 84, 199)',
'rgb(244, 70, 157)', 'rgb(240, 90, 127)', 'rgb(238, 117, 124)',
'rgb(230, 193, 119)']
tab_style = {
'borderLeft' : 'thin lightgrey solid',
'borderRight': 'thin lightgrey solid',
'borderTop' : '2px white solid',
'boxShadow' : 'inset 0px -1px 0px 0px lightgrey',
'fontSize' : '0.7vw',
'color' : 'black',
}
selected_style = {
'borderLeft' : 'thin lightgrey solid',
'borderRight' : 'thin lightgrey solid',
'background-image': f"linear-gradient(to top left, {','.join(gradients[:4])})",
'color' : 'white',
'fontSize' : '0.7vw',
}
container_style = {
# 'width' : '100%',
'verticalAlign': 'middle',
# 'display' : 'inlineBlock',
# 'boxShadow': 'inset 0px -1px 0px 0px lightgrey',
'alignItems' : 'center',
'padding' : '20px ',
}
# EXTERNAL CSS / JS
# app.css.config.serve_locally = True
# app.scripts.config.serve_locally = True
# app.config['suppress_callback_exceptions'] = True
| [
"arialinvlad@gmail.com"
] | arialinvlad@gmail.com |
2aa7d7541d47bf6cbc5349b3cb975f5eb6b55412 | 29145db13229d311269f317bf2819af6cba7d356 | /may easy/maxVal.py | 91313d4b8983c93bfc3cfa232fbdb5c36ee8edff | [] | no_license | rocket3989/hackerEarth2019 | 802d1ca6fd03e80657cbe07a3f123e087679af4d | 42c0a7005e52c3762496220136cc5c1ee93571bb | refs/heads/master | 2021-07-05T01:32:42.203964 | 2020-12-22T03:40:20 | 2020-12-22T03:40:20 | 211,607,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py |
fib = [1, 1]
while True:
fib.append(fib[-1] + fib[-2])
if fib[-1] > 10 ** 18:
break
N = int(input())
for val in fib:
if val <= N:
continue
print(val)
break | [
"rocket3989@gmail.com"
] | rocket3989@gmail.com |
969d2be266219f2b062ad7111a43f44275354f4d | 13b2f7ca4bbad32b0ce7d547399e6097580ae097 | /bfs+dfs/1260_DFS와 BFS.py | f69616d0dba433892b0d30f2d1628280ae3b9b5c | [] | no_license | hksoftcorn/review | dadbd3a4ee7961282bfefd697a97f6ccf78dbe83 | 474aef3747c135c54322ff28261d2a6812a3d9a0 | refs/heads/master | 2023-06-17T05:41:50.178831 | 2021-07-11T23:30:27 | 2021-07-11T23:30:27 | 385,072,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | def dfs(v):
visited[v] = 1
for w in sorted(G[v]):
if not visited[w]:
dfs_path.append(w)
dfs(w)
def bfs(v):
visit = [0] * (N+1)
visit[v] = 1
Q = [v]
while Q:
current = Q.pop(0)
for w in sorted(G[current]):
if not visit[w]:
visit[w] = 1
bfs_path.append(w)
Q.append(w)
N, E, V = map(int, input().split())
G = [[] for _ in range(N + 1)]
visited = [0] * (N + 1)
for _ in range(E):
u, v = map(int, input().split())
G[u].append(v)
G[v].append(u)
dfs_path = [V]
dfs(V)
print(' '.join(map(str, dfs_path)))
bfs_path = [V]
bfs(V)
print(' '.join(map(str, bfs_path))) | [
"hksoftcorn.dev@gmail.com"
] | hksoftcorn.dev@gmail.com |
c47929c48cfd0153ae0e3bfc265fc281115c667f | 67e33d937d4d0cfebd120a162791e06593752a51 | /src/monte_carlo_analysis.py | 0a9423112c6ebbff28c0b426f284133f0b387235 | [] | no_license | MaxKelsen/BQNS | 4435c022b1faec07bea6fa9dd1684301ada48234 | acdb17524d8c3d8a59ae355b1e4486e7e13b16f4 | refs/heads/master | 2023-03-19T15:01:17.754473 | 2021-01-21T06:05:46 | 2021-01-21T06:05:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,747 | py | """
This module implements the anaylsis of the Monte Carlo method to specify the
suitable number of noise realizations needed
"""
# preample
import numpy as np
from simulator import NoisyQubitSimulator
import pickle
###############################################################################
if __name__ == '__main__':
# define Paulis
initial_states = [np.array([[0.5,0.5],[0.5,0.5]]), np.array([[0.5,-0.5],[-0.5,0.5]]),
np.array([[0.5,-0.5j],[0.5j,0.5]]),np.array([[0.5,0.5j],[-0.5j,0.5]]),
np.array([[1,0],[0,0]]), np.array([[0,0],[0,1]]) ]
measurement_operators = [np.array([[0,1],[1,0]]), np.array([[0,-1j],[1j,0]]), np.array([[1,0],[0,-1]])]
# define the simulation parameters
T = 1 # total time
M = 4096 # number of discrete time steps
Omega = 10 # energy gap of the qubit
# define pulse sequence parameters
n_x = 5 # number of pulses in x-direction
n_y = 7 # number of pulses in y-direction
sigma = 6*T/M # standard deviation of the Gaussian pulses
tau_x = np.array([(k-0.5)/n_x for k in range(1,n_x+1)])*T # ideal CPMG pulse locations for x-axis
tau_y = np.array([(k-0.5)/n_y for k in range(1,n_y+1)])*T # ideal CPMG pulse locations for y-axis
A_x = np.pi*np.ones(tau_x.shape)/(np.sqrt(2*np.pi*sigma*sigma)) # ideal amplitude of the CPMG pi pulses for x-axis
A_y = np.pi*np.ones(tau_y.shape)/(np.sqrt(2*np.pi*sigma*sigma)) # ideal amplitude of the CPMG pi pulses for y-axis
# define noise random process parameters
K = 10000 # number of realizations of the noise random process
f = np.fft.fftfreq(M)*M/T # vector of discrteized frequencies
alpha = 1
S_Z = 1*np.array([(1/(fq+1)**alpha)*(fq<50) + (1/40)*(fq>50) + 0.8*np.exp(-((fq-20)**2)/10) for fq in f[f>=0]]) # desired single side band PSD
alpha = 1.5
S_X = 1*np.array([(1/(fq+1)**alpha)*(fq<20) + (1/96)*(fq>20) + 0.5*np.exp(-((fq-15)**2)/10) for fq in f[f>=0]]) # desired single side band PSD
# define qubit simulator
qubit_simulator = NoisyQubitSimulator(T = T , M = M, tau = [tau_x, tau_y, [0]], sigma = sigma, Omega = Omega, K = K, Type = "Gaussian", P_desired = [S_X, None, S_Z])
# apply the pulse sequence
qubit_simulator.set_pulses([A_x, A_y, [0]])
# simulate measurements
for idx_state, initial_state in enumerate([initial_states[0] ,initial_states[2], initial_states[4]]):
# initialize an array to store the expectations
expectations = np.zeros((K,3))
# loop over all realizations
for idx_U, U in enumerate(qubit_simulator.U):
# calculate the final state
final_state = U @ initial_state @ U.conj().T
# calculate the probability of the outcome
expectations[idx_U, :] = ( np.real( np.trace(final_state @ measurement_operators[0]) ),
np.real( np.trace(final_state @ measurement_operators[1]) ),
np.real( np.trace(final_state @ measurement_operators[2]) )
)
f = open("./../datasets/montecarlo_%d.ds"%idx_state, 'wb')
pickle.dump({"expectations":expectations}, f, -1)
f.close()
f = open("./../datasets/montecarlo_pulses.ds", 'wb')
pickle.dump({"h_x":qubit_simulator.h_x, "h_y":qubit_simulator.h_y, "time_range": qubit_simulator.time_range}, f, -1)
f.close() | [
"akram.youssry@eng.asu.edu.eg"
] | akram.youssry@eng.asu.edu.eg |
1b03a8531d7533b57236f251b0c713bced9b5f50 | f2befaae3840bafd181cc712108e3b64caf2696f | /app/portal/horizon/openstack_dashboard/dashboards/admin/routers/tests.py | 557966985c38691b0549627c5fe8ece11b815e77 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | F5Networks/f5-adcaas-openstack | 17d5c408d421dcfe542002e1f850b2d9f29f1663 | 02bd8a606215c0fa08b926bac1b092b5e8b278df | refs/heads/master | 2023-08-28T12:09:54.972191 | 2022-08-12T02:03:43 | 2022-08-12T02:03:43 | 164,592,273 | 4 | 23 | Apache-2.0 | 2022-08-12T02:03:44 | 2019-01-08T07:40:35 | Python | UTF-8 | Python | false | false | 10,869 | py | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IgnoreArg
from mox3.mox import IsA
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers import tests as r_test
from openstack_dashboard.test import helpers as test
INDEX_TEMPLATE = 'horizon/common/_data_table_view.html'
class RouterTests(test.BaseAdminViewTests, r_test.RouterTests):
DASHBOARD = 'admin'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
def _get_detail(self, router, extraroute=True):
res = super(RouterTests, self)._get_detail(router, extraroute,
lookup_l3=True)
return res
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'is_extension_supported'),
api.keystone: ('tenant_list',)})
def test_index(self):
tenants = self.tenants.list()
api.neutron.router_list(
IsA(http.HttpRequest)).AndReturn(self.routers.list())
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
api.neutron.is_extension_supported(IsA(http.HttpRequest),
"router_availability_zone")\
.AndReturn(True)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, INDEX_TEMPLATE)
routers = res.context['table'].data
self.assertItemsEqual(routers, self.routers.list())
@test.create_stubs({api.neutron: ('router_list',
'is_extension_supported'),
api.keystone: ('tenant_list',)})
def test_index_router_list_exception(self):
api.neutron.router_list(
IsA(http.HttpRequest)).AndRaise(self.exceptions.neutron)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
"router_availability_zone")\
.AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, INDEX_TEMPLATE)
self.assertEqual(len(res.context['table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('agent_list',
'router_list_on_l3_agent',
'network_list',
'is_extension_supported'),
api.keystone: ('tenant_list',)})
def test_list_by_l3_agent(self):
tenants = self.tenants.list()
agent = self.agents.list()[1]
api.neutron.agent_list(
IsA(http.HttpRequest),
id=agent.id).AndReturn([agent])
api.neutron.router_list_on_l3_agent(
IsA(http.HttpRequest),
agent.id,
search_opts=None).AndReturn(self.routers.list())
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
api.neutron.is_extension_supported(IsA(http.HttpRequest),
"router_availability_zone")\
.AndReturn(True)
self._mock_external_network_list()
self.mox.ReplayAll()
l3_list_url = reverse('horizon:admin:routers:l3_agent_list',
args=[agent.id])
res = self.client.get(l3_list_url)
self.assertTemplateUsed(res, INDEX_TEMPLATE)
routers = res.context['table'].data
self.assertItemsEqual(routers, self.routers.list())
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'is_extension_supported'),
api.keystone: ('tenant_list',)})
def test_set_external_network_empty(self):
router = self.routers.first()
api.neutron.router_list(
IsA(http.HttpRequest)).AndReturn([router])
api.neutron.is_extension_supported(IsA(http.HttpRequest),
"router_availability_zone")\
.AndReturn(True)
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([self.tenants.list(), False])
self._mock_external_network_list(alter_ids=True)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
table_data = res.context['table'].data
self.assertEqual(len(table_data), 1)
self.assertIn('(Not Found)',
table_data[0]['external_gateway_info']['network'])
self.assertTemplateUsed(res, INDEX_TEMPLATE)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'port_list', 'router_delete',
'is_extension_supported'),
api.keystone: ('tenant_list',)})
def test_router_delete(self):
router = self.routers.first()
tenants = self.tenants.list()
api.neutron.router_list(
IsA(http.HttpRequest)).AndReturn(self.routers.list())
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest)).AndReturn(self.routers.list())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
"router_availability_zone")\
.MultipleTimes().AndReturn(True)
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
self._mock_external_network_list()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id, device_owner=IgnoreArg())\
.AndReturn([])
api.neutron.router_delete(IsA(http.HttpRequest), router.id)
api.neutron.router_list(
IsA(http.HttpRequest)).AndReturn(self.routers.list())
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
formData = {'action': 'routers__delete__' + router.id}
res = self.client.post(self.INDEX_URL, formData, follow=True)
self.assertNoFormErrors(res)
self.assertMessageCount(response=res, success=1)
self.assertIn('Deleted Router: ' + router.name,
res.content.decode('utf-8'))
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'port_list', 'router_remove_interface',
'router_delete',
'is_extension_supported'),
api.keystone: ('tenant_list',)})
def test_router_with_interface_delete(self):
router = self.routers.first()
ports = self.ports.list()
tenants = self.tenants.list()
api.neutron.router_list(
IsA(http.HttpRequest)).AndReturn(self.routers.list())
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest)).AndReturn(self.routers.list())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
"router_availability_zone")\
.MultipleTimes().AndReturn(True)
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
self._mock_external_network_list()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id, device_owner=IgnoreArg())\
.AndReturn(ports)
for port in ports:
api.neutron.router_remove_interface(IsA(http.HttpRequest),
router.id, port_id=port.id)
api.neutron.router_delete(IsA(http.HttpRequest), router.id)
api.neutron.router_list(
IsA(http.HttpRequest)).AndReturn(self.routers.list())
api.keystone.tenant_list(IsA(http.HttpRequest))\
.AndReturn([tenants, False])
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
formData = {'action': 'routers__delete__' + router.id}
res = self.client.post(self.INDEX_URL, formData, follow=True)
self.assertNoFormErrors(res)
self.assertMessageCount(response=res, success=1)
self.assertIn('Deleted Router: ' + router.name,
res.content.decode('utf-8'))
@test.create_stubs({api.neutron: ('is_extension_supported',)})
@test.update_settings(FILTER_DATA_FIRST={'admin.routers': True})
def test_routers_list_with_admin_filter_first(self):
api.neutron.is_extension_supported(IsA(http.HttpRequest),
"router_availability_zone")\
.MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, INDEX_TEMPLATE)
routers = res.context['table'].data
self.assertItemsEqual(routers, [])
class RouterTestsNoL3Agent(RouterTests):
def _get_detail(self, router, extraroute=True):
return super(RouterTests, self)._get_detail(router, extraroute,
lookup_l3=True,
support_l3_agent=False)
class RouterRouteTest(test.BaseAdminViewTests, r_test.RouterRouteTests):
DASHBOARD = 'admin'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
def _get_detail(self, router, extraroute=True):
return super(RouterRouteTest, self)._get_detail(router, extraroute,
lookup_l3=True)
| [
"a.zong@f5.com"
] | a.zong@f5.com |
51add329e85e11a40d5d9d4042b8f3e462628776 | d2d3978088f4294d554dd20e647327165c966888 | /src/seodeploy/modules/headless/functions.py | 9f28bd1e283179829117e740d7303eb824187d68 | [
"MIT",
"Apache-2.0"
] | permissive | derekperkins/SEODeploy | a99d48d66fc2adef27caef849cbd025a99143e7b | 4202a98b51464d70ba63de3c6f924951c11fb1b3 | refs/heads/master | 2022-10-29T10:16:55.776280 | 2020-06-18T12:59:57 | 2020-06-18T12:59:57 | 273,305,561 | 0 | 0 | MIT | 2020-06-18T17:56:18 | 2020-06-18T17:56:17 | null | UTF-8 | Python | false | false | 3,293 | py | #! /usr/bin/env python
# coding: utf-8
#
# Copyright (c) 2020 JR Oakes
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from urllib.parse import urljoin
from tqdm import tqdm
from seodeploy.lib.logging import get_logger
from seodeploy.lib.helpers import group_batcher, mp_list_map, process_page_data
from seodeploy.modules.headless.render import HeadlessChrome # noqa
from seodeploy.modules.headless.exceptions import HeadlessException # noqa
_LOG = get_logger(__name__)
def _render_paths(paths, config=None, host=None):
"""Render paths in Google Chrome.
Parameters
----------
paths: list
List of paths to check.
config: class
Configuration class.
host: str
Host to use in URLs.
Returns
-------
list
List of page data.
"""
chrome = HeadlessChrome(config=config)
results = []
for path in paths:
url = urljoin(host, path)
result = chrome.render(url)
if result["error"]:
results.append({"path": path, "page_data": None, "error": result["error"]})
else:
results.append(
{"path": path, "page_data": result["page_data"], "error": None}
)
return results
def run_render(sample_paths, config):
"""Main function that kicks off Headless Processing.
Parameters
----------
sample_paths: list
List of paths to check.
config: class
Configuration class
Returns
-------
dict
Page Data dict.
"""
batches = group_batcher(sample_paths, list, config.headless.BATCH_SIZE, fill=None)
prod_result = []
stage_result = []
# Iterates batches to send to API for data update.
for batch in tqdm(batches, desc="Rendering URLs"):
prod_result.extend(
mp_list_map(
batch, _render_paths, config=config, host=config.headless.PROD_HOST
)
)
stage_result.extend(
mp_list_map(
batch, _render_paths, config=config, host=config.headless.STAGE_HOST
)
)
# Review for Errors and process into dictionary:
page_data = process_page_data(
sample_paths, prod_result, stage_result, config.headless
)
return page_data
| [
"jroakes@gmail.com"
] | jroakes@gmail.com |
54f686efc0815a540a7a9c1b958cc435f2334efd | 811ea53778535ac140e2bdfd4acf3c9521e0e05c | /examples/gen_funcs/persistent_sampling.py | 539123e8be0c7c75d66177220327056a4c893ba2 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | Libensemble/libensemble | 41aaec8cfc8bc1b70708938305abdfdf7bcafca3 | 0ffcc5f88f693ebf60ba125a7cae6a44d2c98c6f | refs/heads/main | 2023-09-04T02:44:06.067312 | 2023-07-24T22:24:13 | 2023-07-24T22:24:13 | 76,305,338 | 69 | 28 | BSD-3-Clause | 2023-09-14T16:12:21 | 2016-12-12T23:43:44 | Python | UTF-8 | Python | false | false | 50 | py | ../../libensemble/gen_funcs/persistent_sampling.py | [
"jnavarro@anl.gov"
] | jnavarro@anl.gov |
783cee8d4602ce0769c0052c184f1d68cae1529e | 40885aae85fff95d923cfc71f8283e60242f5ab2 | /dataset.py | e795e397c6c8e44826f412335e407a0a01605963 | [] | no_license | wdwang09/MNIST-Learning | 4cb1ffa00cb7dd5219a2e4e9ecca7aff31532900 | 94c42bf56d04d3738dd45269c9e61ee5813f95dd | refs/heads/master | 2022-11-15T06:08:27.090297 | 2020-07-11T12:04:17 | 2020-07-11T12:04:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,553 | py | import numpy as np
import os
class BaseDataset:
def __init__(self):
self.trainImg = None
self.trainLabel = None
self.testImg = None
self.testLabel = None
self.channels, self.rows, self.cols = 1, 28, 28
def get_vector_train_img(self, is_normalized=True):
if is_normalized:
return (self.trainImg.astype(np.float64) - 128) / 128
else:
return self.trainImg
def get_matrix_train_img_with_channel(self, is_normalized=True):
tmp = self.get_vector_train_img(is_normalized)
return tmp.reshape((tmp.shape[0], self.channels, self.rows, self.cols))
def get_vector_test_img(self, is_normalized=True):
if is_normalized:
return (self.testImg.astype(np.float64) - 128) / 128
else:
return self.testImg
def get_matrix_test_img_with_channel(self, is_normalized=True):
tmp = self.get_vector_test_img(is_normalized)
return tmp.reshape((tmp.shape[0], self.channels, self.rows, self.cols))
def get_matrix_train_label(self):
return self.trainLabel
def get_vector_train_label(self):
return np.argmax(self.trainLabel, axis=1)
def get_matrix_test_label(self):
return self.testLabel
def get_vector_test_label(self):
return np.argmax(self.testLabel, axis=1)
def pca_projection(self, dim=100):
origin = self.get_vector_train_img(True)
sigma = np.dot(origin.transpose(), origin) # / origin.shape[1] # You can divide it or not.
U, _, _ = np.linalg.svd(sigma)
return U[:, :dim]
def lda_projection(self, dim=100):
origin_imgs = self.get_vector_train_img(is_normalized=True)
origin_labels = self.get_vector_train_label()
labels = np.unique(origin_labels)
images_dict = dict()
mean_dict = dict()
for label in labels:
images_dict[label] = origin_imgs[origin_labels == label]
mean_dict[label] = np.mean(images_dict[label], axis=0)
mean_all = np.mean(origin_imgs, axis=0)
origin_feature_size = origin_imgs.shape[1]
Sw = np.zeros((origin_feature_size, origin_feature_size), dtype=np.float64)
for label in labels:
Sw += np.dot((images_dict[label]-mean_dict[label]).T,
images_dict[label]-mean_dict[label])
Sb = np.zeros((origin_feature_size, origin_feature_size), dtype=np.float64)
for label in labels:
Sb += images_dict[label].shape[0] * np.dot((mean_dict[label]-mean_all).reshape(-1, 1),
(mean_dict[label]-mean_all).reshape(1, -1))
# use pinv rather than inv to avoid singularity
Sw_Sb = np.linalg.pinv(Sw).dot(Sb)
U, _, _ = np.linalg.svd(Sw_Sb)
return U[:, :dim]
def get_pca_vector_train_img(self, dim=100, is_normalized=True):
projection = self.pca_projection(dim)
res = np.dot(self.get_vector_train_img(is_normalized=True), projection)
if is_normalized:
mean = np.mean(res, axis=0)
std = np.std(res, axis=0)
# mean = 0
# std = 1
# x_min, x_max = np.min(res, 0), np.max(res, 0)
# res = (res - x_min) / (x_max - x_min)
return (res - mean) / np.sqrt(std**2 + 0.001)
return res
def get_pca_vector_test_img(self, dim=100, is_normalized=True):
projection = self.pca_projection(dim)
res = np.dot(self.get_vector_test_img(is_normalized=True), projection)
if is_normalized:
mean = np.mean(res, axis=0)
std = np.std(res, axis=0)
# mean = 0
# std = 1
return (res - mean) / np.sqrt(std**2 + 0.001)
return res
def get_lda_vector_train_img(self, dim=100, is_normalized=True):
projection = self.lda_projection(dim)
res = np.dot(self.get_vector_train_img(is_normalized=True), projection)
if is_normalized:
mean = np.mean(res, axis=0)
std = np.std(res, axis=0)
return (res - mean) / np.sqrt(std**2 + 0.0001)
return res
def get_lda_vector_test_img(self, dim=100, is_normalized=True):
projection = self.lda_projection(dim) # same as train img
res = np.dot(self.get_vector_test_img(is_normalized=True), projection)
if is_normalized:
mean = np.mean(res, axis=0)
std = np.std(res, axis=0)
return (res - mean) / np.sqrt(std**2 + 0.0001)
return res
def t_sne(self, visual_part, labels=None, title=None):
from sklearn import manifold
import matplotlib.pyplot as plt
def plot_embedding(X, y=None, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
# plt.figure()
# ax = plt.subplot(111)
if y is None:
plt.scatter(X[:, 0], X[:, 1], marker='.')
else:
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(y[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
# plt.savefig(fname="graphs/{}.pdf".format(title.replace(' ', '_')), format="pdf")
plt.show()
print("Do T-SNE...")
tsne = manifold.TSNE(n_components=2) # , init='random', random_state=None)
X_tsne = tsne.fit_transform(visual_part[:1000])
print("Plotting...")
Y = None
if labels is not None:
Y = labels[:1000]
plot_embedding(X_tsne, Y, title)
# plot_embedding(X_tsne)
class ManualDataset(BaseDataset):
def __init__(self, is_read_from_file=False, dataset_path="MNIST", output_numpy_dir=""):
self.isReadFromFile = is_read_from_file
self.path = dataset_path
self.outputPath = output_numpy_dir
self.trainImg = None
self.trainLabel = None
self.trainNum = 0
self.testImg = None
self.testLabel = None
self.testNum = 0
self.rows = 28
self.cols = 28
self.channels = 1
self.__load()
# def get_vector_train_img(self, is_normalized=True):
# if is_normalized:
# return (self.trainImg.astype(np.float64) - 128) / 128
# else:
# return self.trainImg
# def get_matrix_train_img_with_channel(self, is_normalized=True):
# tmp = self.get_vector_train_img(is_normalized)
# return tmp.reshape((tmp.shape[0], self.channels, self.rows, self.cols))
# def get_vector_test_img(self, is_normalized=True):
# if is_normalized:
# return (self.testImg.astype(np.float64) - 128) / 128
# else:
# return self.testImg
# def get_matrix_test_img_with_channel(self, is_normalized=True):
# tmp = self.get_vector_test_img(is_normalized)
# return tmp.reshape((tmp.shape[0], self.channels, self.rows, self.cols))
# def get_matrix_train_label(self):
# return self.trainLabel
# def get_vector_train_label(self):
# return np.argmax(self.trainLabel, axis=1)
# def get_matrix_test_label(self):
# return self.testLabel
# def get_vector_test_label(self):
# return np.argmax(self.testLabel, axis=1)
# def pca_projection(self, dim=100):
# origin = self.get_vector_train_img(True)
# sigma = np.dot(origin.transpose(), origin) # / origin.shape[1] # You can divide it or not.
# U, _, _ = np.linalg.svd(sigma)
# return U[:, :dim]
# def lda_projection(self, dim=100):
# origin_imgs = self.get_vector_train_img(is_normalized=True)
# origin_labels = self.get_vector_train_label()
# labels = np.unique(origin_labels)
# images_dict = dict()
# mean_dict = dict()
# for label in labels:
# images_dict[label] = origin_imgs[origin_labels == label]
# mean_dict[label] = np.mean(images_dict[label], axis=0)
# mean_all = np.mean(origin_imgs, axis=0)
# origin_feature_size = origin_imgs.shape[1]
# Sw = np.zeros((origin_feature_size, origin_feature_size), dtype=np.float64)
# for label in labels:
# Sw += np.dot((images_dict[label]-mean_dict[label]).T,
# images_dict[label]-mean_dict[label])
# Sb = np.zeros((origin_feature_size, origin_feature_size), dtype=np.float64)
# for label in labels:
# Sb += images_dict[label].shape[0] * np.dot((mean_dict[label]-mean_all).reshape(-1, 1),
# (mean_dict[label]-mean_all).reshape(1, -1))
# # use pinv rather than inv to avoid singularity
# Sw_Sb = np.linalg.pinv(Sw).dot(Sb)
# U, _, _ = np.linalg.svd(Sw_Sb)
# return U[:, :dim]
# def get_pca_vector_train_img(self, dim=100, is_normalized=True):
# projection = self.pca_projection(dim)
# res = np.dot(self.get_vector_train_img(is_normalized=True), projection)
# if is_normalized:
# mean = np.mean(res, axis=0)
# std = np.std(res, axis=0)
# # mean = 0
# # std = 1
# # x_min, x_max = np.min(res, 0), np.max(res, 0)
# # res = (res - x_min) / (x_max - x_min)
# return (res - mean) / np.sqrt(std**2 + 0.001)
# return res
# def get_pca_vector_test_img(self, dim=100, is_normalized=True):
# projection = self.pca_projection(dim)
# res = np.dot(self.get_vector_test_img(is_normalized=True), projection)
# if is_normalized:
# mean = np.mean(res, axis=0)
# std = np.std(res, axis=0)
# # mean = 0
# # std = 1
# return (res - mean) / np.sqrt(std**2 + 0.001)
# return res
# def get_lda_vector_train_img(self, dim=100, is_normalized=True):
# projection = self.lda_projection(dim)
# res = np.dot(self.get_vector_train_img(is_normalized=True), projection)
# if is_normalized:
# mean = np.mean(res, axis=0)
# std = np.std(res, axis=0)
# return (res - mean) / np.sqrt(std**2 + 0.0001)
# return res
# def get_lda_vector_test_img(self, dim=100, is_normalized=True):
# projection = self.lda_projection(dim) # same as train img
# res = np.dot(self.get_vector_test_img(is_normalized=True), projection)
# if is_normalized:
# mean = np.mean(res, axis=0)
# std = np.std(res, axis=0)
# return (res - mean) / np.sqrt(std**2 + 0.0001)
# return res
# def t_sne(self, visual_part, labels=None, title=None):
# from sklearn import manifold
# import matplotlib.pyplot as plt
# def plot_embedding(X, y=None, title=None):
# x_min, x_max = np.min(X, 0), np.max(X, 0)
# X = (X - x_min) / (x_max - x_min)
# # plt.figure()
# # ax = plt.subplot(111)
# if y is None:
# plt.scatter(X[:, 0], X[:, 1], marker='.')
# else:
# for i in range(X.shape[0]):
# plt.text(X[i, 0], X[i, 1], str(y[i]),
# color=plt.cm.Set1(y[i] / 10.),
# fontdict={'weight': 'bold', 'size': 9})
# plt.xticks([]), plt.yticks([])
# if title is not None:
# plt.title(title)
# # plt.savefig(fname="graphs/{}.pdf".format(title.replace(' ', '_')), format="pdf")
# plt.show()
# print("Do T-SNE...")
# tsne = manifold.TSNE(n_components=2) # , init='random', random_state=None)
# X_tsne = tsne.fit_transform(visual_part[:1000])
# print("Plotting...")
# Y = None
# if labels is not None:
# Y = labels[:1000]
# plot_embedding(X_tsne, Y, title)
# # plot_embedding(X_tsne)
def __load(self): # load_method: http://yann.lecun.com/exdb/mnist/
# train img
if self.isReadFromFile and os.path.exists(os.path.join(self.path, "train-images.idx3-ubyte.npy")):
self.trainImg = np.load(os.path.join(self.path, "train-images.idx3-ubyte.npy"))
self.trainNum = self.trainImg.shape[0]
else:
with open(os.path.join(self.path, "train-images.idx3-ubyte"), 'rb') as tif:
tif.read(4) # magic number 2051
self.trainNum = int(tif.read(4).hex(), 16) # numbers of train images
rows = int(tif.read(4).hex(), 16) # numbers of rows
cols = int(tif.read(4).hex(), 16) # numbers of cols
# self.rows = rows
# self.cols = cols
self.trainImg = np.zeros((self.trainNum, rows * cols), dtype=np.uint8)
for img_id in range(self.trainNum):
pic_str = tif.read(rows * cols).hex()
for i in range(rows * cols):
self.trainImg[img_id, i] = \
int(pic_str[i * 2: i * 2 + 2], 16)
# for i in range(rows):
# for j in range(cols):
# if self.trainImg[img_id, i * rows + j] > 0:
# print('#', end='')
# else:
# print(' ', end='')
# print()
# print()
if self.outputPath != "":
if not os.path.exists(self.outputPath):
os.mkdir(self.outputPath)
np.save(os.path.join(self.outputPath, "train-images.idx3-ubyte"), self.trainImg)
# test img
if self.isReadFromFile and os.path.exists(os.path.join(self.path, "t10k-images.idx3-ubyte.npy")):
self.testImg = np.load(os.path.join(self.path, "t10k-images.idx3-ubyte.npy"))
self.testNum = self.testImg.shape[0]
else:
with open(os.path.join(self.path, "t10k-images.idx3-ubyte"), 'rb') as tif:
tif.read(4) # magic number 2051
self.testNum = int(tif.read(4).hex(), 16) # numbers of train images
rows = int(tif.read(4).hex(), 16) # numbers of rows
cols = int(tif.read(4).hex(), 16) # numbers of cols
self.testImg = np.zeros((self.testNum, rows * cols), dtype=np.uint8)
for img_id in range(self.testNum):
pic_str = tif.read(rows * cols).hex()
for i in range(rows * cols):
self.testImg[img_id, i] = \
int(pic_str[i * 2: i * 2 + 2], 16)
# for i in range(rows):
# for j in range(cols):
# if self.testImg[img_id, i * rows + j] > 0:
# print('#', end='')
# else:
# print(' ', end='')
# print()
# print()
if self.outputPath != "":
if not os.path.exists(self.outputPath):
os.mkdir(self.outputPath)
np.save(os.path.join(self.outputPath, "t10k-images.idx3-ubyte"), self.testImg)
# train label
if self.isReadFromFile and os.path.exists(os.path.join(self.path, "train-labels.idx1-ubyte.npy")):
self.trainLabel = np.load(os.path.join(self.path, "train-labels.idx1-ubyte.npy"))
else:
with open(os.path.join(self.path, "train-labels.idx1-ubyte"), 'rb') as tif:
tif.read(4) # magic number 2049
tif.read(4) # numbers of train images
self.trainLabel = np.zeros((self.trainNum, 10), dtype=np.uint8)
label_str = tif.read(self.trainNum).hex()
for img_id in range(self.trainNum):
digit = int(label_str[img_id * 2:img_id * 2 + 2], 16)
self.trainLabel[img_id][digit] = 1
if self.outputPath != "":
if not os.path.exists(self.outputPath):
os.mkdir(self.outputPath)
np.save(os.path.join(self.outputPath, "train-labels.idx1-ubyte"), self.trainLabel)
# test label
if self.isReadFromFile and os.path.exists(os.path.join(self.path, "t10k-labels.idx1-ubyte.npy")):
self.testLabel = np.load(os.path.join(self.path, "t10k-labels.idx1-ubyte.npy"))
else:
with open(os.path.join(self.path, "t10k-labels.idx1-ubyte"), 'rb') as tif:
tif.read(4) # magic number 2049
tif.read(4) # numbers of test images
self.testLabel = np.zeros((self.testNum, 10), dtype=np.uint8)
label_str = tif.read(self.testNum).hex()
for img_id in range(self.testNum):
digit = int(label_str[img_id * 2:img_id * 2 + 2], 16)
self.testLabel[img_id][digit] = 1
if self.outputPath != "":
if not os.path.exists(self.outputPath):
os.mkdir(self.outputPath)
np.save(os.path.join(self.outputPath, "t10k-labels.idx1-ubyte"), self.testLabel)
class TorchReaderDataset(BaseDataset):
def __init__(self, dataset_path="./data"):
self.path = dataset_path
self.trainImg = None
self.trainLabel = None
self.testImg = None
self.testLabel = None
self.rows = 28
self.cols = 28
self.channels = 1
self.__load()
def __load(self):
import torch
import torchvision
with torch.no_grad():
self.trainset = torchvision.datasets.MNIST(root=self.path, train=True,
download=True)
self.testset = torchvision.datasets.MNIST(root=self.path, train=False,
download=True)
# self.trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=100000000000,
# shuffle=False, num_workers=2)
# self.testloader = torch.utils.data.DataLoader(self.testset, batch_size=100000000000,
# shuffle=False, num_workers=2)
self.trainImg = self.trainset.data.view(-1, self.channels * self.rows * self.cols).cpu().numpy()
trainLabel_ = self.trainset.targets.cpu().numpy()
self.testImg = self.testset.data.view(-1, self.channels * self.rows * self.cols).cpu().numpy()
testLabel_ = self.testset.targets.cpu().numpy()
self.trainLabel = np.zeros((self.trainImg.shape[0], 10), dtype=np.uint8)
self.testLabel = np.zeros((self.testImg.shape[0], 10), dtype=np.uint8)
self.trainLabel[range(len(trainLabel_)), trainLabel_] = 1
self.testLabel[range(len(testLabel_)), testLabel_] = 1
# self.transform = torchvision.transforms.(
# [transform])
# for (data, target) in self.trainloader:
# self.trainImg = data.view(-1, self.channels * self.rows * self.cols).detach().cpu().numpy()
# self.trainLabel = target.detach().cpu().numpy()
# for (data, target) in self.testloader:
# self.testImg = data.view(-1, self.channels * self.rows * self.cols).detach().cpu().numpy()
# self.testLabel = target.detach().cpu().numpy()
# self.trainLabel = self.trainset.train_labels.clone().detach()
# self.testLabel = self.testset.test_labels.clone().detach()
# self.trainImg = ((self.trainset.train_data.to(torch.float32) - 128) / 128).view(-1, 1, 28, 28)
# self.testImg = ((self.testset.test_data.to(torch.float32) - 128) / 128).view(-1, 1, 28, 28)
if __name__ == '__main__':
# 先把MNIST全部解压,并放在第二个参数"MNIST"文件夹下
# 第一次读会读的很慢,所以如果第三个参数不为""时会把产生的矩阵存下来,存在第三个参数"MNIST"下
# 第一个参数:是否从文件中读取矩阵(如果有矩阵被保存在第二个参数),而不是重新产生矩阵(测试用,现在可忽略这个参数,设为True即可)
# ds = Dataset(True, "MNIST", "MNIST")
ds = TorchReaderDataset("./data")
# img_id = 9999
# for i in range(28):
# for j in range(28):
# if ds.testImg[img_id, i * 28 + j] > 0:
# print('#', end='')
# else:
# print(' ', end='')
# print()
# print(ds.testLabel[img_id])
# print()
# print(ds.get_vector_train_img(True)[0])
# from sklearn.decomposition import PCA
# pca = PCA(n_components=5)
# ds.t_sne(pca.fit_transform(ds.get_vector_train_img()), ds.get_vector_train_label())
# ds.t_sne(ds.get_pca_vector_train_img(15), ds.get_vector_train_label(), "PCA with 15 Dimensions")
# ds.t_sne(ds.get_pca_vector_train_img(30), ds.get_vector_train_label(), "PCA with 30 Dimensions")
# ds.t_sne(ds.get_vector_train_img(), ds.get_vector_train_label(), "without PCA")
# from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
# lda = LDA(n_components=9)
# ds.t_sne(lda.fit_transform(ds.get_vector_train_img(), ds.get_vector_train_label()), ds.get_vector_train_label())
ds.t_sne(ds.get_lda_vector_train_img(9), ds.get_vector_train_label(), "LDA with 9 Dimensions")
ds.t_sne(ds.get_pca_vector_train_img(9), ds.get_vector_train_label(), "PCA with 9 Dimensions")
| [
"wedonsjtu@outlook.com"
] | wedonsjtu@outlook.com |
1136a7c21fad840e85b384f1c66c998e887ef10b | a991bc47d645194c2e028cb70ff4a76d7b444a84 | /predict.py | 0539503a2b5bf18408c5aa429641d04f64a20180 | [] | no_license | YYYangup/yolo3-keras | 322b553d77f3501fcfe7c0cc6501f5d294e0a403 | 2756ff9819b1922fc6aec49b547a5f4906a7cd13 | refs/heads/master | 2020-09-21T22:31:28.365742 | 2019-11-27T09:49:21 | 2019-11-27T09:49:21 | 224,955,803 | 0 | 1 | null | 2019-11-30T03:48:30 | 2019-11-30T03:48:30 | null | UTF-8 | Python | false | false | 416 | py | from nets.yolo3 import yolo_body
from keras.layers import Input
from yolo import YOLO
from PIL import Image
yolo = YOLO()
while True:
img = input('Input image filename:')
try:
image = Image.open("img/street.jpg.")
except:
print('Open Error! Try again!')
continue
else:
r_image = yolo.detect_image(image)
r_image.show()
yolo.close_session()
| [
"noreply@github.com"
] | YYYangup.noreply@github.com |
73a10c7a266db9e84fa1733e854c9dff20de0492 | c7404bc844161191de145f3d3c1ebe086c69a7b0 | /项目二模型评估和验证/code 答案/splitdata.py | ca233e03a6b74cf30cf8a05684641ca12103af03 | [] | no_license | yanbin92/AI | d574bbdb4829874afa968ed31677c84dffbd6546 | 1b26bdbfd106aa246288db9a0b12ded27a858fa0 | refs/heads/master | 2021-09-26T10:33:50.244640 | 2018-10-29T11:58:08 | 2018-10-29T11:58:08 | 114,837,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | #!/usr/bin/python
""" this example borrows heavily from the example
shown on the sklearn documentation:
http://scikit-learn.org/stable/modules/cross_validation.html
"""
from sklearn import datasets
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
iris = datasets.load_iris()
features = iris.data
labels = iris.target
###############################################################
### YOUR CODE HERE
###############################################################
### import the relevant code and make your train/test split
### name the output datasets features_train, features_test,
### labels_train, and labels_test
### set the random_state to 0 and the test_size to 0.4 so
### we can exactly check your result
features_train,features_test,labels_train,labels_test=train_test_split(iris.data, iris.target, test_size=0.4, random_state=0)
###############################################################
clf = SVC(kernel="linear", C=1.)
clf.fit(features_train, labels_train)
print (clf.score(features_test, labels_test))
##############################################################
def submitAcc():
return clf.score(features_test, labels_test) | [
"ybinbin@Outlook.com"
] | ybinbin@Outlook.com |
b84323025761648473661b72434d79d1824b1ac2 | 0eb464ce1d0b4a4bd063d5dd5f4822a6efc443fd | /env/bin/easy_install | d7123822641002a57bd96b86186f85362845226b | [] | no_license | sahajap/Sahanjana | 9a651855df468b8ccc4dd5e4fb97d325c6b5526b | 73dbf9ac218cbaa0657daafdcdc92cb7621d743e | refs/heads/main | 2023-01-30T07:10:32.805960 | 2020-12-15T04:01:51 | 2020-12-15T04:01:51 | 318,038,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | #!/Users/sahaja/Documents/si-206/Sahanjana/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"sahajap@umich.edu"
] | sahajap@umich.edu | |
2a33fbb3c1830065fa8e3ab389d476b27445f188 | 747a41fbf075291c0ad49457cbbdf4ba04c078e4 | /Receiving data Bluetooth with data writing.py | 9dbf105cd42e44cb50383824e7f0f867b0fe0bb8 | [] | no_license | OsirisDevelopment/Harnes | 01537fd587acd9155c1918c8b6445e6555ea23c7 | c1a24eb55eedbb758c806dd1c1adf3829d6b1c2f | refs/heads/master | 2021-01-10T15:55:19.802058 | 2016-09-23T19:09:17 | 2016-09-23T19:09:17 | 54,903,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,239 | py | #!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import os
import time
import serial
import datetime
from zeus import client
import json
archivo=open("metricas.txt","w")
#for linea in archivo:
# continue
#prueba=open("probando.txt","w")
#conexion
#conexion = client.ZeusClient('7caecc05', 'api.ciscozeus.io')
#Para uso interno de while
flag=True
# Iniciando conexion serial
arduinoPort = serial.Serial('COM8', 9600, timeout=1)
flagCharacter = 'k'
# Retardo para establecer la conexion serial
time.sleep(1.8)
arduinoPort.write(flagCharacter)
#archivo.write('timestamp ; id ; enganche : gas : ruido : presion : temperatura : altura : oxigenacion\n')
while flag:
archivo=open("metricas.txt","r+")
for linea in archivo:
continue
metrica = '[{"timestamp": %d,"point":{"id":%d,"seguro": %d ,"colgado":%d ,"gas": %d ,"ruido": %d ,"presion":%d,"temperatura":%d,"altura":%d,"oxigenacion":%d}}]'
#timestamp de la hora del sistema
timestamp= time.time()
getSerialValue = arduinoPort.readline()
listaSenales=getSerialValue.strip().split("*")
# ["id","E1 (seguro)","E2 (Colgado o no)","Gas","Ruido","Presion","Temperatura","Altura","Oxigenacion"]
identificador=int(listaSenales[0])
seguro=int(listaSenales[1])
colgado=int(listaSenales[2])
gas=int(listaSenales[3])
ruido=int(listaSenales[4])
presion=int(listaSenales[5])
temperatura=float(listaSenales[6])
altimetro=float(listaSenales[7])
oxigenacion=float(listaSenales[8])
metrica=metrica % (timestamp ,identificador ,seguro ,colgado , gas , ruido , presion , temperatura , altimetro ,oxigenacion )
#data_string= json.dumps(eval(metrica))
#metrica2=eval(metrica)
cadena=str(int(timestamp))+';'+str(identificador)+';'+str(seguro)+';'+str(colgado)+';'+str(gas)+';'+str(ruido)+';'+str(presion)+';'+str(temperatura)+';'+str(altimetro)+';'+str(oxigenacion)+'\n'
#print type(str(eval(metrica)))
#prueba.write(str(eval(metrica)))
archivo.write(cadena)
archivo.close()
#conexion.sendMetric("Device",eval(metrica))
#print conexion.getMetricNames(metric_name="Device", limit=10)
print (metrica)
time.sleep(1)
# Cerrando puerto serial
arduinoPort.close() | [
"paolo.caviedes@gmail.com"
] | paolo.caviedes@gmail.com |
8e1117685899d2bf068c219a6f66312448e008ff | 9131dd03ff2880fca2a5883572784f8e51046e41 | /env/lib/python3.6/site-packages/clicksend_client/models/delivery_issue.py | 84f1503041f46cfe49989d1ade2142787157ff54 | [] | no_license | aviadm24/coronaap | fe10619ae42a8c839cd0a2c2c522187c5f21fbc7 | 5608c2d77cb3441b48ba51da04c06a187fb09488 | refs/heads/master | 2022-12-09T21:35:17.179422 | 2021-01-28T08:21:49 | 2021-01-28T08:21:49 | 249,938,200 | 0 | 0 | null | 2021-09-22T18:47:51 | 2020-03-25T09:36:10 | JavaScript | UTF-8 | Python | false | false | 7,502 | py | # coding: utf-8
"""
ClickSend v3 API
This is an official SDK for [ClickSend](https://clicksend.com) Below you will find a current list of the available methods for clicksend. *NOTE: You will need to create a free account to use the API. You can register [here](https://dashboard.clicksend.com/#/signup/step1/)..* # noqa: E501
OpenAPI spec version: 3.1
Contact: support@clicksend.com
Generated by: https://github.com/clicksend-api/clicksend-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DeliveryIssue(object):
"""NOTE: This class is auto generated by the clicksend code generator program.
Do not edit the class manually.
"""
"""
Attributes:
clicksend_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
clicksend_types = {
'message_id': 'str',
'type': 'str',
'description': 'str',
'client_comments': 'str',
'email_address': 'str'
}
attribute_map = {
'message_id': 'message_id',
'type': 'type',
'description': 'description',
'client_comments': 'client_comments',
'email_address': 'email_address'
}
discriminator_value_class_map = {
}
def __init__(self, message_id=None, type=None, description=None, client_comments=None, email_address=None): # noqa: E501
"""DeliveryIssue - a model defined in Swagger""" # noqa: E501
self._message_id = None
self._type = None
self._description = None
self._client_comments = None
self._email_address = None
self.discriminator = 'classType'
if message_id is not None:
self.message_id = message_id
self.type = type
self.description = description
if client_comments is not None:
self.client_comments = client_comments
self.email_address = email_address
@property
def message_id(self):
"""Gets the message_id of this DeliveryIssue. # noqa: E501
The message id of the message. # noqa: E501
:return: The message_id of this DeliveryIssue. # noqa: E501
:rtype: str
"""
return self._message_id
@message_id.setter
def message_id(self, message_id):
"""Sets the message_id of this DeliveryIssue.
The message id of the message. # noqa: E501
:param message_id: The message_id of this DeliveryIssue. # noqa: E501
:type: str
"""
self._message_id = message_id
@property
def type(self):
"""Gets the type of this DeliveryIssue. # noqa: E501
The type of message, must be one of the following values SMS, MMS, VOICE, EMAIL_MARKETING, EMAIL_TRANSACTIONAL, FAX, POST. # noqa: E501
:return: The type of this DeliveryIssue. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this DeliveryIssue.
The type of message, must be one of the following values SMS, MMS, VOICE, EMAIL_MARKETING, EMAIL_TRANSACTIONAL, FAX, POST. # noqa: E501
:param type: The type of this DeliveryIssue. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def description(self):
"""Gets the description of this DeliveryIssue. # noqa: E501
The description of the message. # noqa: E501
:return: The description of this DeliveryIssue. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this DeliveryIssue.
The description of the message. # noqa: E501
:param description: The description of this DeliveryIssue. # noqa: E501
:type: str
"""
if description is None:
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
self._description = description
@property
def client_comments(self):
"""Gets the client_comments of this DeliveryIssue. # noqa: E501
The user's comments. # noqa: E501
:return: The client_comments of this DeliveryIssue. # noqa: E501
:rtype: str
"""
return self._client_comments
@client_comments.setter
def client_comments(self, client_comments):
"""Sets the client_comments of this DeliveryIssue.
The user's comments. # noqa: E501
:param client_comments: The client_comments of this DeliveryIssue. # noqa: E501
:type: str
"""
self._client_comments = client_comments
@property
def email_address(self):
"""Gets the email_address of this DeliveryIssue. # noqa: E501
The user's email address. # noqa: E501
:return: The email_address of this DeliveryIssue. # noqa: E501
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""Sets the email_address of this DeliveryIssue.
The user's email address. # noqa: E501
:param email_address: The email_address of this DeliveryIssue. # noqa: E501
:type: str
"""
if email_address is None:
raise ValueError("Invalid value for `email_address`, must not be `None`") # noqa: E501
self._email_address = email_address
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_value = data[self.discriminator].lower()
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.clicksend_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DeliveryIssue, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeliveryIssue):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"aviadm24@gmail.com"
] | aviadm24@gmail.com |
5db5f18acdac869283999bee98d20d94a341c7df | 23f8e15047e2303bbc0ef6d7db93286f629f139a | /src/PipelineConfigurator/IPipelineConfigurator.py | b9928eb35bb45388b0b2b101041a271ddd63b835 | [] | no_license | russian-national-corpus/preprocessing | 988a0616f3082ae3eeb63f573635f4888c4e5eb1 | 4b6a5a9f822d0d7a1d16baaf72795f888add5f23 | refs/heads/master | 2021-06-13T00:14:44.104132 | 2019-10-03T20:38:34 | 2019-10-03T20:38:34 | 165,403,637 | 2 | 0 | null | 2021-03-26T13:45:33 | 2019-01-12T15:32:47 | Python | UTF-8 | Python | false | false | 320 | py | from abc import ABC, abstractmethod
from pathlib import Path
from src.Corpus.CorpusSource import CorpusSource
from src.Pipeline.Pipeline import Pipeline
class IPipelineConfigurator(ABC):
@abstractmethod
def get_pipeline(self, corpus_source: CorpusSource, destination_directory: Path) -> Pipeline:
pass
| [
"vyshkant@gmail.com"
] | vyshkant@gmail.com |
605a9276d6ff00dfeb1d7cf79fc65b18bf25f15b | 7535929cf1b873a89e50c0e4b6bdc3ec80ad39e8 | /api/migrations/0004_auto_20200604_1338.py | cca338ac9fddbb8b0920fbea783c045bdc0f677c | [] | no_license | Nikolai586/api_final_yatube | 4bdd89212d2e9a0fcd99ba81e669ada283265edc | a7b33e889053bd21a9544d6c8d81440d1266db47 | refs/heads/master | 2022-10-07T11:55:32.846633 | 2020-06-09T13:47:28 | 2020-06-09T13:47:28 | 268,489,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # Generated by Django 3.0.7 on 2020-06-04 10:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_post_group'),
]
operations = [
migrations.AlterField(
model_name='group',
name='slug',
field=models.SlugField(blank=True, null=True, unique=True),
),
]
| [
"n.chistoprudov@yandex.ru"
] | n.chistoprudov@yandex.ru |
098dc0033ac1cd59031b9b30c07319f62122441e | b338c7ec821196ad12e1fda5395723dafd06ebb8 | /MyRangoApp/forms.py | 46201aaabddb0878ccc64d7b7edc5265a516a037 | [
"Apache-2.0"
] | permissive | octobertech/MyRangoApp | 96156be35a4f5fa0ac990e9193880c950526674b | e86c3853b9836a07ccc1ed1910bb75c1596ac4cf | refs/heads/master | 2021-01-10T21:08:00.017445 | 2014-04-03T08:22:33 | 2014-04-03T08:22:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | #__author__ = 'Admin'
from django import forms
from MyRangoApp.models import Page, Category
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128, help_text="Please enter the category name.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
# An inline class to provide additional information on the form.
class Meta:
# Provide an association between the ModelForm and a model
model = Category
class PageForm(forms.ModelForm):
title = forms.CharField(max_length=128, help_text="Please enter the title of the page.")
url = forms.URLField(max_length=200, help_text="Please enter the URL of the page.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
class Meta:
# Provide an association between the ModelForm and a model
model = Page
# What fields do we want to include in our form?
# This way we don't need every field in the model present.
# Some fields may allow NULL values, so we may not want to include them...
# Here, we are hiding the foreign key.
fields = ('title', 'url', 'views')
def clean(self):
cleaned_data = self.cleaned_data
url = cleaned_data.get('url')
#if url is not empty and doesn't start with 'http://', prepend 'http://'.
if url and not url.startwith('url'):
url = 'http://' + url
cleaned_data['url'] = url
return cleaned_data
| [
"octobrtech@gmail.com"
] | octobrtech@gmail.com |
e6176b6cfd8dd4d5cfb4d30bed137a3e940c72c8 | 5b26ef34f0d59c0c6b21f847b825e419ba7b5ec2 | /Part1_Question4_b.py | fe1bbe1e9b7e8c4968b56d36e3b90371c2f2043f | [] | no_license | aniketanvit/prob_hunting | 5da1670ab464c8e7b03a5c359c9b3bc578954fe3 | 7512ce02dd9f0c55f258dd60b852a52f0c1de44d | refs/heads/master | 2021-05-07T03:09:39.352732 | 2017-11-21T02:28:44 | 2017-11-21T02:28:44 | 110,617,457 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,609 | py | from probabilistic_hunting_target_present import ProbabilisticHunting_TargetPresent
from probabilistic_hunting_target_found import ProbabilisticHunting_TargetFound
from probabilistic_hunting_move_cost import ProbabilisticHunting_WithCost_Rule2
import copy
import matplotlib.pyplot as plt
import random
def main():
bot_for_moving_cost = ProbabilisticHunting_WithCost_Rule2 (20)
bot_for_target_present = ProbabilisticHunting_TargetPresent (20)
bot_for_moving_cost.Map = copy.deepcopy (bot_for_target_present.getMap ())
iteration_count = 30
MOVECOST_PERFORMANCE = []
PRESENT_PERFORMANCE = []
for i in range (0, iteration_count, 1):
r_ind = random.randint (0, bot_for_moving_cost.dimension - 1)
c_ind = random.randint (0, bot_for_target_present.dimension - 1)
bot_for_moving_cost.target_i = r_ind
bot_for_moving_cost.target_i = c_ind
bot_for_target_present.target_i = r_ind
bot_for_target_present.target_i = c_ind
time_taken = bot_for_moving_cost.startHunt ()
MOVECOST_PERFORMANCE.append (time_taken)
time_taken = bot_for_target_present.startHunt ()
PRESENT_PERFORMANCE.append (time_taken)
bot_for_target_present.reset()
bot_for_moving_cost.reset()
plt.plot (PRESENT_PERFORMANCE, MOVECOST_PERFORMANCE, linestyle='', marker='o', color='b')
plt.ylabel ('Searches taken using Rule 2')
plt.xlabel ('Searches taken with cost for moving')
plt.title ('Performance measure - Rule 2 vs Rule 2 with Cost for moving')
plt.show ()
if __name__ == '__main__':
main() | [
"aniket.anvit@rutgers.edu"
] | aniket.anvit@rutgers.edu |
e97223c7c3bab2eb2db5d8c5f144f8c68c385a69 | d8a20107896c8ed2cb050a2e9ef55b4149a4f360 | /python/0061. Rotate List.py | f8b816ded895011c5f65425aabfef5da19851cb4 | [] | no_license | dyaroshevych/leetcode | cf7993c85f095363750f625aabbab607750854dc | 6329eaefa6698163f7e7fda66e53360760afc20f | refs/heads/master | 2021-07-16T21:55:55.057090 | 2021-03-14T10:51:41 | 2021-03-14T10:51:41 | 243,357,363 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def rotateRight(self, head: ListNode, shift: int) -> ListNode:
if (head is None):
return None
list_length = self.get_list_length(head)
shift %= list_length
if shift == 0:
return head
curr_node = head
for _ in range(list_length - shift - 1):
curr_node = curr_node.next
new_head = curr_node.next
curr_node.next = None
curr_node = new_head
while curr_node.next:
curr_node = curr_node.next
curr_node.next = head
return new_head
def get_list_length(self, head: ListNode) -> int:
list_length = 0
while head:
head = head.next
list_length += 1
return list_length
| [
"dyaroshevych@gmail.com"
] | dyaroshevych@gmail.com |
037d41e6744bc9acb9db4aa05fe1fa4f27a33ac0 | 1405f47a6e0715f163439b034987e6e298f74429 | /skee_t/wx/services/accessToken.py | 33c42997379ed0c2cf1129da9e40ca61928493da | [] | no_license | skee-t/backend | 5dd7064c62615de16c3fefba34edc19e598df00d | 941976d99245486790ca91e134b0cbae1a003f1e | refs/heads/master | 2021-05-03T20:13:35.559147 | 2016-12-26T10:45:09 | 2016-12-26T10:45:09 | 69,564,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,178 | py | #! -*- coding: UTF-8 -*-
import logging
from sqlalchemy.orm.exc import NoResultFound
from skee_t.db import DbEngine
from skee_t.db.models import WxAccessToken
from skee_t.services import BaseService
__author__ = 'rensikun'
LOG = logging.getLogger(__name__)
class wxAccessTokenService(BaseService):
"""
"""
def __init__(self):
pass
def add(self, uuid, access_token, expires_in):
"""
创建用户方法
:param dict_args:Map类型的参数,封装了由前端传来的用户信息
:return:
"""
wxAccessToken = WxAccessToken(
uuid=uuid,
access_token=access_token,
expires_in=expires_in
)
session = None
rst_code = 0
rst_desc = 'success'
try:
session = DbEngine.get_session_simple()
session.add(wxAccessToken)
session.commit()
except Exception as e:
LOG.exception("Create user information error.")
rst_code = '999999'
rst_desc = e.message
if session is not None:
session.rollback()
finally:
session.close()
return {'rst_code':rst_code, 'rst_desc':rst_desc}
def query(self, state):
"""
创建用户方法
:param dict_args:Map类型的参数,封装了由前端传来的用户信息
:return:
"""
session = None
rst_code = 0
rst_desc = 'success'
try:
session = DbEngine.get_session_simple()
return session.query(WxAccessToken) \
.filter(WxAccessToken.state == state)\
.order_by(WxAccessToken.entry_time.desc()).first()
except NoResultFound as e:
LOG.exception("access token error.")
return None
except (TypeError, Exception) as e:
LOG.exception("List SkiResort information error.")
# 数据库异常
rst_code = '999999'
rst_desc = e.message
finally:
session.close()
return {'rst_code': rst_code, 'rst_desc': rst_desc}
def update(self, uuid, state):
"""
创建用户方法
:param dict_args:Map类型的参数,封装了由前端传来的用户信息
:return:
"""
session = None
rst_code = 0
rst_desc = 'success'
try:
session = DbEngine.get_session_simple()
session.query(WxAccessToken)\
.filter(WxAccessToken.uuid == uuid)\
.update({WxAccessToken.state:state}
,synchronize_session=False
)
session.commit()
except NoResultFound as e:
LOG.exception("get_user_auth_info error.")
return None
except (TypeError, Exception) as e:
LOG.exception("List SkiResort information error.")
# 数据库异常
rst_code = '999999'
rst_desc = e.message
finally:
session.close()
return {'rst_code': rst_code, 'rst_desc': rst_desc} | [
"rensikun@paypalm.cn"
] | rensikun@paypalm.cn |
f2e1a8bc0a3be1175a538b75a5bd6dbdf53f40ae | a0bce06fa04eb353ef6751848ddbfdc46af26671 | /Filtering_steps/Filter_step_6/Filter-stage-2.py | ad10be27d2ba97dd7b2e44f2e53802089834ec8e | [] | no_license | LeebanY/avian-comparative-genomics | 0e7c39901cd87f6b5417d2393e7d185bb652b86d | 5f6e129b44ff2693ec2ebd87e409be8c06abf79e | refs/heads/master | 2021-09-25T13:34:19.370955 | 2018-10-22T17:01:14 | 2018-10-22T17:01:14 | 115,000,027 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | import os
import sys
#locations
#reference_files = '/data/bop17lhy/masters_scripts/bad_vals_zorro_PIPE3.txt'
reference_files = '/data/bop17lhy/masters_scripts/removing_stop_codons_faslist.txt'
# reference_files = '/data/bop17lhy/masters_scripts/bad_vals_zorro_rd2.txt'
# protein_alignments = '/data/bop17lhy/sequences/filters_attempt2/filter1'
#processing reference files from txt file
ref_contents = open(reference_files).readlines()
passed_fastas = []
for line in ref_contents:
split_ref = line.split('/')
new_contents = split_ref[7]
#print(new_contents)
new_ref = new_contents.rsplit('\n')[0]
passed_fastas.append(new_ref)
print(new_ref)
#print(passed_fastas)
for fasta in passed_fastas:
# tries to run file moving
try:
os.rename('/data/bop17lhy/sequences/TEST_RUN_3/Filter_stages/filtered_prot/'.format(fasta),
'/data/bop17lhy/sequences/TEST_RUN_3/Filter_stages/final_alignment/'.format(fasta))
# os.rename('/data/bop17lhy/sequences/filters/filter1.1/{}.fai'.format(fasta),
# '/data/bop17lhy/sequences/filters/filter1.2/{}.fai'.format(fasta))
# if this raise the below error
except FileNotFoundError:
continue | [
"noreply@github.com"
] | LeebanY.noreply@github.com |
0825c9ac27b0826f2ff3906dcc0f88b232c4e70b | 17c139214d59cd07bba636ba166c0f0a92fd42fa | /test_scaling.py | c7566263b622c42bf1b3922345f6ec95d0d47bae | [] | no_license | piotr-teterwak/CS655MiniProject | cec7346b40432d25d39a9e8a19946a8ddf0a7da2 | 0f5783355b5fa40be565501581cbc332ca7be956 | refs/heads/main | 2023-01-27T16:04:23.562441 | 2020-12-10T03:54:31 | 2020-12-10T03:54:31 | 319,696,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | import test_script
if __name__ == "__main__":
rtt_size_map = { '128': 'test_images/test_128.jpg',
}
for size in rtt_size_map.keys():
for wait_time in [0.5,0.2,0.1, 0.01]:
for num_servers in [1,2,4]:
average_time = test_script.run_test_loop(wait_time,100,num_servers,500,rtt_size_map[size])
print('average RTT for wait time {} with {} servers is {} seconds'.format(wait_time, num_servers, average_time))
| [
"piotrt@client.miniprojectimgclass.ch-geni-net.instageni.colorado.edu"
] | piotrt@client.miniprojectimgclass.ch-geni-net.instageni.colorado.edu |
712ebb3e8e9c6daab9c2cd3b469cecab96797c6e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_godfather.py | 813d71c96e7c700883fb63b1932814bc31f99141 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py |
#calss header
class _GODFATHER():
def __init__(self,):
self.name = "GODFATHER"
self.definitions = [u'a male godparent', u'the leader of a criminal group, especially a mafia family']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
2de7e59933c2bb81749a01df0dea8c3a5f82a350 | 77d88999f858984e531f1a575b2f46843b2be40b | /src/parser.py | 432463aa296424aac3a2850902de96abe6f9ca1d | [] | no_license | carverdamien/plot.ipanema | 70a243a1aa8a2979f0554ea416623aad0acb0574 | 94f11e9e5fbd0012055ac23321680d39875e3435 | refs/heads/master | 2022-01-18T06:11:41.337365 | 2019-06-20T09:32:38 | 2019-06-20T09:32:38 | 174,350,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,722 | py | import os, logging, json, parse
from common import *
class ProcStat:
# https://elixir.bootlin.com/linux/v4.19/source/fs/proc/stat.c
PROC_STAT_EXPECTED_OUTPUT = """cpu {procstat_user} {procstat_nice} {procstat_system} {procstat_idle} {procstat_iowait} {procstat_irq} {procstat_softirq} {procstat_steal} {procstat_guest} {procstat_guest_nice}"""
def _parse_path(self, path):
st_mtime = os.stat(path).st_mtime
data = {
'file' : path,
'st_mtime' : st_mtime,
}
with open(path, 'r') as fp:
r = parse.search(self.PROC_STAT_EXPECTED_OUTPUT,fp.read())
data.update({
k:int(r.named[k])
for k in r.named
})
# https://github.com/hishamhm/htop/blob/master/linux/LinuxProcessList.c
usertime = data['procstat_user']
guest = data['procstat_guest']
guestnice = data['procstat_guest_nice']
idletime = data['procstat_idle']
ioWait = data['procstat_iowait']
systemtime = data['procstat_system']
irq = data['procstat_irq']
softIrq = data['procstat_softirq']
nicetime = data['procstat_nice']
steal = data['procstat_steal']
usertime = usertime - guest
nicetime = nicetime - guestnice
idlealltime = idletime + ioWait
systemalltime = systemtime + irq + softIrq
virtalltime = guest + guestnice
totaltime = usertime + nicetime + systemalltime + idlealltime + steal + virtalltime
data['procstat_total'] = totaltime
return data
raise ParsingError()
def parse(self, dirPath):
try:
begin = self._parse_path(dirPath+'/stat.begin')
end = self._parse_path(dirPath+'/stat.end')
return { k:end[k]-begin[k] for k in begin if k not in ['file','st_mtime'] }
except FileNotFoundError as e:
logging.warn('In ProcStat: {}'.format(e))
pass
return {}
class SchedMonitor:
SCHED_MONITOR_EXPECTED_OUTPUT = {
'sched' : """{total_ns:d}""",
'idle' : """Idle: {total_ns} ns ({total_hits} hits)""",
'fair' : """{:s}{event_name}: {event_ns} ns ({event_hits} hits)""",
'ipanema' : """{:s}{event_name}: {event_ns} ns ({event_hits} hits)""",
}
def _parse_one_cpu(self, path, subsystem):
expected_output = self.SCHED_MONITOR_EXPECTED_OUTPUT[subsystem]
with open(path, 'r') as fp:
data = {}
for r in parse.findall(expected_output,fp.read()):
if subsystem == 'sched':
return {
'total_ns': int(r.named['total_ns'])
}
if subsystem == 'idle':
return {
'total_ns': int(r.named['total_ns']),
'total_hits': int(r.named['total_hits'])
}
event_name = r.named['event_name']
event_ns = int(r.named['event_ns'])
event_hits = int(r.named['event_hits'])
data['total_ns'] = data.get('total_ns', 0) + event_ns
data['total_hits'] = data.get('total_hits', 0) + event_hits
data.update({
'{}_ns'.format(r.named['event_name']) : event_ns,
'{}_hits'.format(r.named['event_name']) : event_hits,
})
return data
raise ParsingError()
def parse(self, dirPath):
all_data = {}
for subsystem in self.SCHED_MONITOR_EXPECTED_OUTPUT:
path = "/".join([dirPath, subsystem])
if not os.path.isdir(path):
logging.warn('SchedMonitor parser did not find {}'.format(path))
continue
data = {}
for cpu in os.listdir(path):
filepath = "/".join([path, cpu])
cpu = self._parse_one_cpu(filepath, subsystem)
for k in cpu:
data[k] = data.get(k, 0) + cpu[k]
all_data.update({
'{}_{}'.format(subsystem,k) : data[k]
for k in data
})
return all_data
class SchedDebug:
SCHED_DEBUG_CLK_EXPECTED_OUTPUT = """
ktime : {ktime}
sched_clk : {sched_clk}
cpu_clk : {cpu_clk}
"""
SCHED_DEBUG_CPU_EXPECTED_OUTPUT = """ .{key:S}{:s}: {value:S}"""
SCHED_DEBUG_CPU_EXPECTED_KEYS = [
'enQ.no_reason',
'enQ.new',
'enQ.wakeup',
'enQ.wakeup_mig_l0',
'enQ.wakeup_mig_l1',
'enQ.wakeup_mig_l2',
'enQ.lb_mig_l0',
'enQ.lb_mig_l1',
'enQ.lb_mig_l2',
'deQ.no_reason',
'deQ.sleep',
'enQ.wc.no_reason',
'enQ.wc.new',
'enQ.wc.wakeup',
'enQ.wc.wakeup_mig_l0',
'enQ.wc.wakeup_mig_l1',
'enQ.wc.wakeup_mig_l2',
'enQ.wc.lb_mig_l0',
'enQ.wc.lb_mig_l1',
'enQ.wc.lb_mig_l2',
'deQ.wc.no_reason',
'deQ.wc.sleep',
]
def _parse_path(self, path):
st_mtime = os.stat(path).st_mtime
data = {
'file' : path,
'st_mtime' : st_mtime,
}
with open(path, 'r') as fp:
r = parse.search(self.SCHED_DEBUG_CLK_EXPECTED_OUTPUT,fp.read())
for k in r.named:
data[k] = float(r.named[k])
with open(path, 'r') as fp:
for r in parse.findall(self.SCHED_DEBUG_CPU_EXPECTED_OUTPUT,fp.read()):
key = r.named['key']
if key not in self.SCHED_DEBUG_CPU_EXPECTED_KEYS:
continue
value = float(r.named['value'])
data[key] = data.get(key,0) + value
return data
def parse(self, dirPath):
values = []
for f in os.listdir(dirPath):
if 'sched_debug' not in f:
continue
p = dirPath+"/"+f
values.append(self._parse_path(p))
if len(values) == 0:
logging.warn('SchedDebug parser did not find sched_debug files in {}.'.format(dirPath))
return {}
if len(values) != 2:
logging.error('SchedDebug did not find exactly two dumps in {}.'.format(dirPath))
return {}
if values[0]['st_mtime'] < values[1]['st_mtime']:
old, new = values
else:
new, old = values
return { k:new[k]-old[k] for k in new if k not in ['file','st_mtime'] }
class Batch:
def _parse_path(self, path):
st_mtime = os.stat(path).st_mtime
with open(path, 'r') as fp:
data = json.load(fp)
data['st_mtime'] = st_mtime
data['file'] = path
return data
def parse(self, dirPath):
try:
values = []
with open(dirPath+"/machine") as fp:
machine = fp.read().strip()
with open(dirPath+"/batch") as fp:
batch = fp.read().strip()
with open(dirPath+"/scheduler") as fp:
scheduler = fp.read().strip()
with open(dirPath+"/kernel") as fp:
kernel = fp.read().strip()
for f in os.listdir(dirPath+"/data/"):
p = dirPath+"/data/"+f
try:
data = self._parse_path(path=p)
data.update(ProcStat().parse(dirPath+"/log/"+f))
data.update(SchedDebug().parse(dirPath+"/log/"+f))
data.update(SchedMonitor().parse(dirPath+"/log/"+f+"/sched_monitor"))
values.append(data)
except json.decoder.JSONDecodeError as e:
logging.error("Batch parser failed on file {}.".format(p))
pass
return {
'machine': machine,
'batch': batch,
'scheduler': scheduler,
'kernel': kernel,
'data': values,
}
except FileNotFoundError as e:
logging.error("Batch parser did not find {} file.".format(e.filename))
raise ParsingError()
class Sysbench:
SYSBENCH_EXPECTED_OUTPUT="""
Number of threads:{:s}{clients:d}
{}
Throughput:
events/s (eps):{:s}{throughput}
time elapsed:{:s}{duration}s
total number of events:{:s}{events}
Latency (ms):
min:{:s}{min_latency}
avg:{:s}{avg_latency}
max:{:s}{max_latency}
95th percentile:{:s}{p95th_latency}
sum:{:s}{sum_latency}
"""
def _parse_path(self, path):
st_mtime = os.stat(path).st_mtime
with open(path,'r') as fp:
r = parse.search(self.SYSBENCH_EXPECTED_OUTPUT,fp.read())
data = r.named
data['file'] = path
data['st_mtime'] = st_mtime
return data
def parse(self, dirPath):
try:
values = []
with open(dirPath+"/machine") as fp:
machine = fp.read().strip()
with open(dirPath+"/engine") as fp:
engine = fp.read().strip()
with open(dirPath+"/engine_scheduler") as fp:
engine_scheduler = fp.read().strip()
with open(dirPath+"/client") as fp:
client = fp.read().strip()
with open(dirPath+"/client_scheduler") as fp:
client_scheduler = fp.read().strip()
with open(dirPath+"/kernel") as fp:
kernel = fp.read().strip()
for f in os.listdir(dirPath+"/data/"):
p = dirPath+"/data/"+f
try:
data = self._parse_path(path=p)
data.update(ProcStat().parse(dirPath+"/log/"+f))
data.update(SchedDebug().parse(dirPath+"/log/"+f))
data.update(SchedMonitor().parse(dirPath+"/log/"+f+"/sched_monitor"))
values.append(data)
except AttributeError as e:
logging.error("Sysbench parser failed on file {}.".format(p))
pass
return {
'machine': machine,
'engine': engine,
'engine_sched': engine_scheduler,
'client': client,
'client_sched': client_scheduler,
'kernel': kernel,
'data': values
}
except FileNotFoundError as e:
logging.error("Sysbench parser did not find {} file.".format(e.filename))
raise ParsingError()
| [
"carverdamien@gmail.com"
] | carverdamien@gmail.com |
18a33e58c756d5caf0520e47dc00132286e798bf | 6e1a879f997080c11d67e92e0fcae323ae689e8e | /Gurobi/Project2Task3-3.py | 01c234421f72c00a75eb1e3314293eddc6061617 | [] | no_license | davidhamblin/Networking-Projects | d1c2aa6852bd836ceb2f6686d731d572db9f1431 | a96d28df01a675178c09dd5c77205b3ad23df53d | refs/heads/master | 2020-07-05T17:07:26.565524 | 2016-12-07T03:46:25 | 2016-12-07T03:46:25 | 73,989,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,483 | py | from gurobipy import *
import matplotlib.pyplot as plt
import networkx as nx
m = Model('Project2Task3-3')
nodes = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
# Capacity-dependent cost is 5 MU per 10 Mbps. Links are 1 Gbps.
cap_dep_cost = 5
# Setup cost is 100 MU * cost multiplier
setup_cost = 100
DemandNode, DemandSink = multidict({
('A'): 0,
('B'): 100,
('C'): 100,
('D'): 100,
('E'): 100,
('F'): 100,
('G'): 100,
('H'): 100
})
links, cost, install_cost = multidict({
('A', 'B'): [2*cap_dep_cost, 2*setup_cost],
('B', 'A'): [2*cap_dep_cost, 2*setup_cost],
('A', 'C'): [2*cap_dep_cost, 2*setup_cost],
('C', 'A'): [2*cap_dep_cost, 2*setup_cost],
('B', 'D'): [2*cap_dep_cost, 2*setup_cost],
('D', 'B'): [2*cap_dep_cost, 2*setup_cost],
('D', 'F'): [2*cap_dep_cost, 2*setup_cost],
('F', 'D'): [2*cap_dep_cost, 2*setup_cost],
('C', 'F'): [3*cap_dep_cost, 3*setup_cost],
('F', 'C'): [3*cap_dep_cost, 3*setup_cost],
('C', 'E'): [2*cap_dep_cost, 2*setup_cost],
('E', 'C'): [2*cap_dep_cost, 2*setup_cost],
('E', 'G'): [1*cap_dep_cost, 1*setup_cost],
('G', 'E'): [1*cap_dep_cost, 1*setup_cost],
('G', 'H'): [2*cap_dep_cost, 2*setup_cost],
('H', 'G'): [2*cap_dep_cost, 2*setup_cost],
('F', 'H'): [1*cap_dep_cost, 1*setup_cost],
('H', 'F'): [1*cap_dep_cost, 1*setup_cost]
})
links = tuplelist(links)
flow = {}
for i,j in links:
flow[i,j] = m.addVar(name='flow_%s_%s' % (i, j))
capacity = {}
install = {}
for i,j in links:
capacity[i, j] = m.addVar(name='capacity_%s_%s' % (i, j))
install[i, j] = m.addVar(vtype=GRB.INTEGER, name='install_%s_%s' % (i, j))
DemandSource = {}
for i in nodes:
DemandSource[i] = m.addVar(name='demand-source_%s' % i)
server_install = {}
for i in nodes:
server_install[i] = m.addVar(vtype=GRB.INTEGER, name='server-install_%s' % i)
m.update()
# Flow balance at source, output, and interior nodes
for i in nodes:
m.addConstr(
quicksum(flow[i,j] for i,j in links.select(i,'*')) -
quicksum(flow[k,i] for k,i in links.select('*',i))
== DemandSource[i] - DemandSink[i], 'node_%s' % (i))
# Capacity constraints
for i,j in links:
m.addConstr(flow[i,j] <= capacity[i,j],
'cap_%s_%s' % (i, j))
m.addConstr(capacity[i,j] <= 2000*install[i,j],
'install_%s_%s' % (i, j))
for i in nodes:
m.addConstr(DemandSource[i] <= 2000 * server_install[i],
'server-install-max_%s' % (i))
m.update()
server_cost = 5000
totalCost = quicksum((capacity[i, j]*cost[i, j] + install[i, j]*install_cost[i, j]) for i, j in links) + quicksum(server_install[i]*server_cost for i in nodes)
m.setObjective(totalCost, GRB.MINIMIZE)
m.update()
m.optimize()
G = nx.DiGraph()
G.add_node('A', x=0, y=0)
G.add_node('B', x=0, y=-10)
G.add_node('C', x=2.5, y=10)
G.add_node('D', x=2.5, y=-20)
G.add_node('E', x=7.5, y=10)
G.add_node('F', x=7.5, y=-20)
G.add_node('G', x=10, y=0)
G.add_node('H', x=10, y=-10)
server_nodes = []
if m.status == GRB.Status.OPTIMAL:
print()
solutionCap = m.getAttr('x', capacity)
for i, j in links:
print('Optimal capacity on link %s: cap: %s' % ((i, j), solutionCap[i, j]))
print()
solutionServer = m.getAttr('x', server_install)
print(solutionServer)
for i in nodes:
if solutionServer[i]:
print('Server in %s' % i)
server_nodes.append(i)
solutionFlow = m.getAttr('x', flow)
for i,j in links:
if solutionFlow[i,j] > 0:
print('Link %s -> %s: %g' % (i, j, solutionFlow[i,j]))
# Add the solution flow edges to the graph
G.add_edge(i, j, weight=solutionCap[i, j])
print('\nTotal cost of the network: %s' % m.ObjVal)
# Draw the graph from the optimal flow edges in G
el=[(u,v) for (u,v,d) in G.edges(data=True)]
edge_labels = dict([((u, v,), d['weight'])
for u, v, d in G.edges(data=True)])
pos = {}
for v in nx.nodes(G):
pos[v] = [G.node[v]['x'], G.node[v]['y']]
nx.draw(G, pos, with_labels=True)
# nodes
nx.draw_networkx_nodes(G,pos,node_size=400,node_color='r')
nx.draw_networkx_nodes(G,pos,nodelist=server_nodes,node_size=400,node_color='b')
# labels
nx.draw_networkx_labels(G,pos,font_size=12,font_family='sans-serif',font_color='w')
nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels)
plt.figure(1)
plt.axis('off')
plt.savefig("capacity_graph_task33.png") # save as png
plt.show() # display
| [
"david.hamblin.12@cnu.edu"
] | david.hamblin.12@cnu.edu |
10754f53ae595dce53c6057cd07313f7d340e288 | fd9a1c1d4223f54751c40cc42ce67292aae0565a | /graspologic/align/seedless_procrustes.py | f7a8d6651d1fc5703d0ae4fc6c33c0a3f70ac3dc | [
"MIT"
] | permissive | tliu68/graspologic | 296cf7b160aa53b13c82473cb201431a6561823b | d1cf7678bc63ab9769828a82a90f66bf1dfa0eff | refs/heads/dev | 2023-02-23T17:52:53.176782 | 2021-01-21T06:51:00 | 2021-01-21T06:51:00 | 322,512,481 | 0 | 0 | MIT | 2021-01-21T03:51:35 | 2020-12-18T06:44:22 | Python | UTF-8 | Python | false | false | 16,406 | py | # Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import ot
import numpy as np
from sklearn.utils import check_array
from .base import BaseAlign
from .sign_flips import SignFlips
from .orthogonal_procrustes import OrthogonalProcrustes
class SeedlessProcrustes(BaseAlign):
"""
Matches two datasets using an orthogonal matrix. Unlike
:class:`~graspologic.align.OrthogonalProcrustes`, this does not require a
matching between entries. It can even be used in the settings where the two
datasets do not have the same number of entries.
In graph setting, it is used to align the embeddings of two different
graphs, when it requires some simultaneous inference task and no 1-1
matching between the vertices of the two graphs can be established, for
example, inside of the test for the equivalence of the latent distributions
(see: :class:`~graspologic.inference.LatentDistributionTest`).
Parameters
----------
optimal_transport_lambda : float (default=0.1), optional
Regularization term of the Sinkhorn optimal transport algorithm.
optimal_transport_eps : float (default=0.01), optional
Tolerance parameter for the each Sinkhorn optimal transport algorithm.
I.e. tolerance for each "E-step".
optimal_transport_num_reps : int (default=1000), optional
Number of repetitions in each iteration of the iterative optimal
transport problem. I.e. maximum number of repetitions in each "E-step".
iterative_num_reps : int (default=100), optional
Number of reps in each iteration of the iterative optimal transport
problem. I.e. maxumum number of total iterations the whole "EM"
algorithm.
init : string, {'2d' (default), 'sign_flips', 'custom'}, optional
- '2d'
Uses :math:`2^d` different restarts, where :math:`d` is the
dimension of the datasets. In particular, tries all matrices that
are simultaneously diagonal and orthogonal. In other words, these
are diagonal matrices with all entries on the diagonal being either
+1 or -1. This is motivated by the fact that spectral graph
embeddings have two types of orthogonal non-identifiability, one of
which is captured by the orthogonal diagonal matrices. The final
result is picked based on the final values of the objective
function. For more on this, see [2]_.
- 'sign_flips'
Initial alignment done by making the median value in each dimension
have the same sign. The motivation is similar to that in '2d',
except this is a heuristic that can save time, but can sometimes
yield suboptimal results.
- 'custom'
Expects either an initial guess for :attr:`Q_` or an initial guess
for :attr:`P_`, but not both. See ``initial_Q`` and ``initial_P``,
respectively. If neither is provided, initializes ``initial_Q`` to an
identity with an appropriate number of dimensions.
initial_Q : np.ndarray, shape (d, d) or None, optional (default=None)
An initial guess for the alignment matrix, :attr:`Q_`, if such exists.
Only one of ``initial_Q``, ``initial_P`` can be provided at the same time,
and only if ``init`` argument is set to 'custom'. If None, and
``initial_P`` is also None - initializes ``initial_Q`` to identity matrix.
Must be an orthogonal matrix, if provided.
initial_P : np.ndarray, shape (n, m) or None, optional (default=None)
Initial guess for the optimal transport matrix, :attr:`P_`, if such
exists. Only one of ``initial_Q``, ``initial_P`` can be provided at the
same time, and only if ``init`` argument is set to 'custom'. If None, and
``initial_Q`` is also None - initializes ``initial_Q`` to identity matrix.
Must be a soft assignment matrix if provided (rows sum up to 1/n, cols
sum up to 1/m.)
Attributes
----------
Q_ : array, size (d, d)
Final orthogonal matrix, used to modify ``X``.
P_ : array, size (n, m) where n and m are the sizes of two datasets
Final matrix of optimal transports, represent soft matching weights
from points in one dataset to the other, normalized such that all rows
sum to 1/n and all columns sum to 1/m.
score_ : float
Final value of the objective function: :math:`|| X Q - P Y ||_F`
Lower means the datasets have been matched together better.
selected_initial_Q_ : array, size (d, d)
Initial orthogonal matrix which was used as the initialization.
If ``init`` was set to '2d' or 'sign_flips', then it is the adaptively
selected matrix.
If ``init`` was set to 'custom', and ``initial_Q`` was provided, then equal
to that. If it was not provided, but ``initial_P`` was, then it is the
matrix after the first procrustes performed. If neither was provided,
then it is the identity matrix.
References
----------
.. [1] Agterberg, J.
# TODO Cite the Seedless Procrustes preprint whenever available.
.. [2] Agterberg, J., Tang, M., Priebe., C. E. (2020).
"On Two Distinct Sources of Nonidentifiability in Latent Position Random Graph Models"
arXiv:2003.14250
Notes
-----
In essence, the goal of this procedure is to simultaneously obtain a, not
necessarily 1-to-1, correspondence between the vertices of the two data
sets, and an orthogonal alignment between two datasets. If the two datasets
are represented with matrices :math:`X \in M_{n, d}` and
:math:`Y \in M_{m, d}`, then the correspondence is a matrix
:math:`P \in M_{n, m}` that is soft assignment matrix (that is, its rows
sum to :math:`1/n`, and columns sum to :math:`1/m`) and the orthogonal
alignment is an orthogonal matrix :math:`Q \in M_{d, d}` (an orthogonal
matrix is any matrix that satisfies :math:`Q^T Q = Q Q^T = I`). The global
objective function is :math:`|| X Q - P Y ||_F`.
Note that both :math:`X` and :math:`PY` are matrices in :math:`M_{n, d}`.
Thus, if one knew :math:`P`, it would be simple to obtain an estimate for
:math:`Q`, using the regular orthogonal procrustes. On the other hand, if
:math:`Q` was known, then :math:`XQ` and :math:`Y` could be thought of
distributions over a finite number of masses, each with weight :math:`1/n`
or :math:`1/m`, respectively. These distributions could be "matched" via
solving an optimal transport problem.
However, both :math:`Q` and :math:`P` are simultaneously unknown here. So
the algorithm performs a sequence of alternating steps, obtaining
iteratively improving estimates of :math:`Q` and :math:`P`, similarly to an
expectation-maximization (EM) procedure. It is not known whether this
procedure is formally an EM, but the analogy can be drawn as follows: after
obtaining an initial guess of of :math:`\hat{Q}_{0}`, obtaining an
assignment matrix :math:`\hat{P}_{i+1} | \hat{Q}_{i}` ("E-step") is done by
solving an optimal transport problem via Sinkhorn algorithm, whereas
obtaining an orthogonal alignment matrix :math:`Q_{i+1} | P_{i}` ("M-step")
is done via regular orthogonal procurstes. These alternating steps are
performed until ``iterative_num_reps`` is reached.
For more on how the initial guess can be performed, see ``init``.
"""
def __init__(
self,
optimal_transport_lambda=0.1,
optimal_transport_eps=0.01,
optimal_transport_num_reps=1000,
iterative_num_reps=100,
init="2d",
initial_Q=None,
initial_P=None,
):
# check optimal_transport_lambda argument
if type(optimal_transport_lambda) is not float:
msg = "Optimal_transport_lambda must be a float, not {}".format(
type(optimal_transport_lambda)
)
raise TypeError(msg)
if optimal_transport_lambda < 0:
msg = "{} is an invalud value of the optimal_transport_lambda, must be non-negative".format(
optimal_transport_lambda
)
raise ValueError(msg)
# check optimal_transport_lambda argument
if type(optimal_transport_eps) is not float:
msg = "Optimal_transport_eps must be a float, not {}".format(
type(optimal_transport_eps)
)
raise TypeError(msg)
if optimal_transport_eps <= 0:
msg = "{} is an invalid value of the optimal transport eps, must be postitive".format(
optimal_transport_eps
)
raise ValueError(msg)
# check optimal_transport_num_reps argument
if type(optimal_transport_num_reps) is not int:
msg = "Optimal_transport_num_reps must be a int, not {}".format(
type(optimal_transport_num_reps)
)
raise TypeError(msg)
if optimal_transport_num_reps < 1:
msg = "{} is an invalid number of repetitions, must be non-negative".format(
iterative_num_reps
)
raise ValueError(msg)
# check iterative_num_reps argument
if type(iterative_num_reps) is not int:
msg = "Iterative_num_reps must be a int, not {}".format(
type(iterative_num_reps)
)
raise TypeError(msg)
if iterative_num_reps < 0:
msg = "{} is an invalid number of repetitions, must be non-negative".format(
iterative_num_reps
)
raise ValueError(msg)
# check init argument
if type(init) is not str:
msg = "Init must be a str, not {}".format(type(init))
raise TypeError(msg)
inits_supported = ["2d", "sign_flips", "custom"]
if init not in inits_supported:
msg = "Supported inits are {}".format(inits_supported)
raise ValueError(msg)
# check that initial_Q and intial_P aren't provided when shouldn't be
if initial_Q is not None and init != "custom":
msg = "Initial_Q can only be provided if init is set to custom"
raise ValueError(msg)
if initial_P is not None and init != "custom":
msg = "Initial_P can only be provided if init is set to custom"
raise ValueError(msg)
if initial_Q is not None and initial_P is not None:
msg = "Initial_Q and initial_P cannot be provided simultaneously"
raise ValueError(msg)
# check initial_Q argument
if initial_Q is not None:
if not isinstance(initial_Q, np.ndarray):
msg = f"Initial_Q must be np.ndarray or None, not {type(initial_Q)}"
raise TypeError(msg)
initial_Q = check_array(initial_Q, copy=True)
if initial_Q.shape[0] != initial_Q.shape[1]:
msg = "Initial_Q must be a square orthogonal matrix"
raise ValueError(msg)
if not np.allclose(initial_Q.T @ initial_Q, np.eye(initial_Q.shape[0])):
msg = "Initial_Q must be a square orthogonal matrix"
raise ValueError(msg)
# check initial_P argument
if initial_P is not None:
if not isinstance(initial_P, np.ndarray):
msg = f"Initial_P must be np.ndarray or None, not {type(initial_P)}"
raise TypeError(msg)
initial_P = check_array(initial_P, copy=True)
n, m = initial_P.shape
if not (
np.allclose(initial_P.sum(axis=0), np.ones(m) / m)
and np.allclose(initial_P.sum(axis=1), np.ones(n) / n)
):
msg = (
"Initial_P must be a soft assignment matrix "
"(rows add up to (1/number of cols) "
"and columns add up to (1/number of rows))"
)
raise ValueError(msg)
super().__init__()
self.optimal_transport_eps = optimal_transport_eps
self.optimal_transport_num_reps = optimal_transport_num_reps
self.optimal_transport_lambda = optimal_transport_lambda
self.iterative_num_reps = iterative_num_reps
self.init = init
self.initial_Q = initial_Q
self.initial_P = initial_P
def _optimal_transport(self, X, Y, Q):
# "E step" of the SeedlessProcrustes.
n, d = X.shape
m, _ = Y.shape
# initialize probability mass arrays & the cost matrix ; run sinkhorn
probability_mass_X = np.ones(n) / n
probability_mass_Y = np.ones(m) / m
cost_matrix = (
np.linalg.norm((X @ Q).reshape(n, 1, d) - Y.reshape(1, m, d), axis=2) ** 2
)
P = ot.sinkhorn(
a=probability_mass_X,
b=probability_mass_Y,
M=cost_matrix,
reg=self.optimal_transport_lambda,
numItermax=self.optimal_transport_eps,
stopThr=self.optimal_transport_eps,
)
return P
def _procrustes(self, X, Y, P):
# "M step" of the SeedlessProcurstes.
aligner = OrthogonalProcrustes()
Q = aligner.fit(X, P @ Y).Q_
return Q
def _iterative_ot(self, X, Y, Q):
# this P is not used. it is set to default in case numreps=0
P = np.ones((X.shape[0], Y.shape[0])) / (X.shape[0] * Y.shape[0])
for i in range(self.iterative_num_reps):
P = self._optimal_transport(X, Y, Q)
Q = self._procrustes(X, Y, P)
return P, Q
def _compute_objective(self, X, Y, Q=None, P=None):
if Q is None:
Q = self.Q_
if P is None:
P = self.P_
return np.linalg.norm(X @ Q - P @ Y, ord="fro")
def fit(self, X, Y):
"""
Uses the two datasets to learn the matrix `self.Q_` that aligns the
first dataset with the second.
Parameters
----------
X : np.ndarray, shape (n, d)
Dataset to be mapped to ``Y``, must have same number of dimensions
(axis 1) as ``Y``.
Y : np.ndarray, shape (m, d)
Target dataset, must have same number of dimensions (axis 1) as ``X``.
Returns
-------
self : returns an instance of self
"""
X, Y = self._check_datasets(X, Y)
n, d = X.shape
m, _ = Y.shape
if self.init == "2d":
P_matrices = np.zeros((2 ** d, n, m))
Q_matrices = np.zeros((2 ** d, d, d))
objectives = np.zeros(2 ** d)
# try 2^d different initializations
for i in range(2 ** d):
initial_Q = _sign_flip_matrix_from_int(i, d)
P_matrices[i], Q_matrices[i] = P, Q = self._iterative_ot(
X, Y, initial_Q
)
objectives[i] = self._compute_objective(X, Y, Q, P)
# pick the best one, using the objective function value
best = np.argmin(objectives)
self.selected_initial_Q_ = _sign_flip_matrix_from_int(best, d)
self.P_, self.Q_ = P_matrices[best], Q_matrices[best]
elif self.init == "sign_flips":
aligner = SignFlips()
self.selected_initial_Q_ = aligner.fit(X, Y).Q_
self.P_, self.Q_ = self._iterative_ot(X, Y, self.selected_initial_Q_)
else:
# determine initial Q if "custom"
if self.initial_Q is not None:
self.selected_initial_Q_ = self.initial_Q
elif self.initial_P is not None:
# use initial P, if provided
self.selected_initial_Q_ = self._procrustes(X, Y, self.initial_P)
else:
# set to initial Q to identity if neither Q nor P provided
self.selected_initial_Q_ = np.eye(d)
self.P_, self.Q_ = self._iterative_ot(X, Y, self.selected_initial_Q_)
self.score_ = self._compute_objective(X, Y)
return self
def _sign_flip_matrix_from_int(val_int, d):
val_bin = bin(val_int)[2:]
val_bin = "0" * (d - len(val_bin)) + val_bin
return np.diag(np.array([(float(i) - 0.5) * -2 for i in val_bin]))
| [
"noreply@github.com"
] | tliu68.noreply@github.com |
d3a6f87d6075d7414fa1b088cd9b7fdb3d66c1d8 | 7f7e98e9947aad5d95b38dbf89f5b9d2b775c9c1 | /TrinityAssemble.py | 9996aecd8df701cd313cc53212dc53b0264606df | [] | no_license | cacampbell/pythonmisc | 27aab7b7c2edb6b53a662c4f065ad07a383cb92e | d70edf4c695bbe55cf52ae82d025def285f4a1dc | refs/heads/master | 2021-01-24T08:08:57.767324 | 2017-01-18T22:44:30 | 2017-01-18T22:44:30 | 50,535,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,574 | py | #!/usr/bin/env python3
from sys import stderr
from os.path import isfile
from os.path import join
from shutil import copyfileobj
from Bash import bash
from Bash import mkdir_p
from PairedEndCommand import PairedEndCommand
class TrinityAssemble(PairedEndCommand):
def __init__(self, *args, **kwargs):
super(TrinityAssemble, self).__init__(*args, **kwargs)
self.set_default("max_intron", "50000")
self.set_default("genome_guided", False)
self.set_default("contig_len", "250")
self.modules = ['java']
self.set_default("all_reads_name", "all_reads.bam")
self.set_default("kmer_len", "25")
def make_command(self, filename):
pass
def __merge_bam_files(self):
bam_files = [f for f in self.files if f.endswith(".bam")]
output_bam = join(self.input_root, self.all_reads_name)
def __samtools_threads():
return (str(min(self.get_threads(), 8)))
if isfile(output_bam):
print("Output BAM already exists, using it...", file=stderr)
return(output_bam)
try:
(out, err) = bash("samtools -@ {t} -m {mem_p_t} "
"{o_bam} {i_bams}").format(t=__samtools_threads(),
mem_p_t="2G",
o_bam=output_bam,
i_bams=" ".join(
bam_files))
if err:
print(err, file=stderr)
except Exception as error:
print("Error while merging bam files: {}".format(error),
file=stderr)
raise (error)
return (output_bam)
def __genome_guided_assembly(self):
merged_bam = ""
if not self.dry_run:
merged_bam = self.__merge_bam_files()
job_name = "{}".format(self.cluster_options["job_name"])
command = ("Trinity --genome_guided_bam {mb} --genome_guided_max_intron"
" {mi} --max_memory {mem} --CPU {t} --output {o} "
"--KMER_SIZE={kmer_len} --min_contig_length {contiglen} "
"--full_cleanup").format(
mb=merged_bam,
mi=self.max_intron,
mem=self.get_mem(fraction=0.95),
t=self.get_threads(),
o=self.output_root,
contiglen=self.contig_len,
kmer_len=self.kmer_len
)
self.commands[job_name] = command
if self.verbose:
print(command, file=stderr)
def __merge_files(self):
files = [f for f in self.files if "unmerged" not in f]
if self.verbose:
print("Merging single and paired end reads...", file=stderr)
merged_files = []
for merged in files:
unmerged = self.replace_extension_with(".unmerged{}".format(
self.extension), merged)
combined = self.replace_extension_with(".combined{}".format(
self.extension), merged)
merged_files += [combined]
try:
if not isfile(combined):
with open(combined, 'wb') as combined:
with open(merged, 'rb') as merge:
copyfileobj(merge, combined, 1024 * 1024 * 10)
if isfile(unmerged):
with open(unmerged, 'rb') as unmerge:
copyfileobj(unmerge, combined, 1024 * 1024 * 10)
else:
print("{} already exists, using it...".format(combined),
file=stderr)
except (IOError, OSError) as err:
print("Error while combining files: {}".format(err),
file=stderr)
raise (err)
return (merged_files)
def __de_novo_assembly(self):
merged_files = []
if not self.dry_run:
merged_files = self.__merge_files()
job_name = "{}".format(self.cluster_options["job_name"])
command = ("Trinity --seqType {type} --single {filelist} "
"--KMER_SIZE={kmer_len} --run_as_paired --max_memory {mem} "
"--CPU {t} --output {o} --min_contig_length "
"{contiglen}").format(
type=self.extension.lstrip("."),
filelist=",".join(merged_files),
mem=self.get_mem(fraction=0.95),
t=self.get_threads(),
o=self.output_root,
contiglen=self.contig_len,
kmer_len=self.kmer_len
)
self.commands[job_name] = command
if self.verbose:
print(command, file=stderr)
def format_commands(self):
if self.genome_guided:
self.__genome_guided_assembly()
else:
self.__de_novo_assembly()
def run(self):
"""
Run the Parallel Command from start to finish
1) Load Environment Modules
2) Gather input files
3) Remove exclusions
4) Make Directories
5) Format Commands
6) Dispatch Scripts to Cluster Scheduler
7) Unload the modules
:return: list<str>: a list of job IDs returned by cluster scheduler
"""
if self.verbose:
print('Loading environment modules...', file=stderr)
if self.modules is not None:
self.module_cmd(['load'])
if self.verbose:
print('Gathering input files...', file=stderr)
self.get_files()
if self.verbose:
print('Removing exclusions...', file=stderr)
if self.verbose:
print("Making output directories...", file=stderr)
mkdir_p(self.output_root)
if self.exclusions_paths:
self.exclude_files_below(self.exclusions_paths)
self.exclude_files_below(self.output_root)
if self.exclusions:
self.remove_regex_from_input(self.exclusions)
self.remove_regex_from_input(r".combine.{}".format(self.extension))
self.remove_regex_from_input(r"{}".format(self.all_reads_name))
if self.verbose:
print('Formatting commands...', file=stderr)
self.format_commands()
if self.verbose:
print('Dispatching to cluster...', file=stderr)
jobs = self.dispatch() # Return the job IDs from the dispatched cmds
return (jobs)
| [
"cacampbell@ucdavis.edu"
] | cacampbell@ucdavis.edu |
5c2cc88aff82e06149ca9c765f28aa627b60312d | 949cb096f6f19f6b41d3a0444269ea6ccf2397ef | /course_search.py | a782c997d3e560d97d8510cdb3449b771b4657da | [
"MIT"
] | permissive | bharathc346-zz/CRNNotifications | c5c56f78479ec25e5b010f6726092aeea1ae49ac | ab50080a847733bf23cdd77e50a995b19f0b89d1 | refs/heads/master | 2021-07-02T02:31:12.116785 | 2017-09-04T22:03:09 | 2017-09-04T22:03:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,955 | py | from bs4 import BeautifulSoup
from models.course import Course
import requests
default_postdata = {
'CAMPUS': '0',
'TERMYEAR': '201709',
'CORE_CODE': 'AR%',
'subj_code': '',
'CRSE_NUMBER': '',
'crn': '',
'open_only': 'on',
'BTN_PRESSED': 'FIND class sections',
}
url = 'https://banweb.banner.vt.edu/ssb/prod/HZSKVTSC.P_ProcRequest'
def _get_open_courses(data):
req = requests.post(url, data=data)
soup = BeautifulSoup(req.content, 'html5lib')
rows = soup.select('table.dataentrytable tbody tr')
open_courses = list()
# The first row is the header row with the column labels
# If there's only one row, the rest of the table is empty, so there are no results
if len(rows) > 1:
rows = rows[1:]
for row in rows:
cells = row.select('td')
cells_text = list(map(lambda x: x.get_text(), cells))
crn = cells_text[0].strip()
label = cells_text[1].strip()
title = cells_text[2].strip()
professor = cells_text[6].strip()
open_courses.append(Course(crn, label, title, professor))
return open_courses
def get_open_courses_by_course(subj, num):
""" Get the open courses that match the course subject and number passed in
:param subj: The subject abbreviation
:param num: The course number
:return: Returns a list of the open courses that are matched
"""
postdata = default_postdata.copy()
postdata['subj_code'] = subj.strip().upper()
postdata['CRSE_NUMBER'] = num.strip()
return _get_open_courses(postdata)
def get_open_courses_by_crn(crn):
""" Get the open course that matches the crn passed in
:param crn: The course request number of the course section
:return: Returns a list of the open courses that are matched
"""
postdata = default_postdata.copy()
postdata['crn'] = crn.strip()
return _get_open_courses(postdata)
| [
"bharathc@gmail.com"
] | bharathc@gmail.com |
24f9f3aa83df652d60500d390c8b1ec71ab31efd | 5c72906b45827f6f468c10383e511ca98fc72cc0 | /pytorch/DCCComputation.py | bdded433ba1a6167b974f51b87c42dd1a443f5de | [] | no_license | camillarhodes/Disentanglement_Clustering | b714d7f36bbe2a729d31b18e70c990f0e1058098 | 67c939fab421fe8144970bbb2183b27516edd34c | refs/heads/master | 2020-03-30T20:30:54.972980 | 2019-07-27T14:53:26 | 2019-07-27T14:53:26 | 151,591,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,301 | py | import os
import numpy as np
import scipy.io as sio
from scipy.optimize import linear_sum_assignment
from scipy.sparse import csr_matrix, diags
import scipy.sparse as sparse
from scipy.sparse.csgraph import connected_components
from sklearn import metrics
from config import cfg, get_data_dir
def makeDCCinp(args):
# pretrained.mat or pretrained.h5 must be placed under the ../data/"db"/ directory. "db" stands for dataset
datadir = get_data_dir(args.db)
datafile = 'pretrained'
if args.step == 2:
datafile += '-fake'
if args.h5:
datafile = os.path.join(datadir, datafile+'.h5')
else:
datafile = os.path.join(datadir, datafile+'.mat')
assert os.path.exists(datafile), 'Training data not found at `{:s}`'.format(datafile)
if args.h5:
import h5py
raw_data = h5py.File(datafile, 'r')
else:
raw_data = sio.loadmat(datafile, mat_dtype=True)
data = raw_data['X'][:].astype(np.float32)
Z = raw_data['Z'][:].astype(np.float32)
labels = np.squeeze(raw_data['gtlabels'][:])
pairs = raw_data['w'][:, :2].astype(int)
if args.h5:
raw_data.close()
print('\n Loaded `{:s}` dataset for finetuning'.format(args.db))
numpairs = pairs.shape[0]
numsamples = data.shape[0]
# Creating pairwise weights and individual sample sample for reconstruction loss term
R = csr_matrix((np.ones(numpairs, dtype=np.float32), (pairs[:, 0], pairs[:, 1])), shape=(numsamples, numsamples))
R = R + R.transpose()
nconn = np.squeeze(np.array(np.sum(R, 1)))
weights = np.average(nconn) / np.sqrt(nconn[pairs[:, 0]] * nconn[pairs[:, 1]])
pairs = np.hstack((pairs, np.atleast_2d(weights).transpose()))
return data, labels, pairs, Z, nconn
def computeHyperParams(pairs, Z, step):
numpairs = len(pairs)
numsamples = len(Z)
epsilon = np.linalg.norm(Z[pairs[:, 0].astype(int)] - Z[pairs[:, 1].astype(int)], axis=1)
if step == 1:
epsilon = np.sort(epsilon[np.where(epsilon / np.sqrt(cfg.DIM) > cfg.RCC.NOISE_THRESHOLD)])
# threshold for finding connected components
robsamp = int(numpairs * cfg.RCC.MIN_RATIO_SAMPLES_DELTA)
_delta = np.average(epsilon[:robsamp])
print("_delta is ",_delta)
robsamp = min(cfg.RCC.MAX_NUM_SAMPLES_DELTA, robsamp)
_delta2 = float(np.average(epsilon[:robsamp]) / 2)
_sigma2 = float(3 * (epsilon[-1] ** 2))
_delta1 = float(np.average(np.linalg.norm(Z - np.average(Z, axis=0)[np.newaxis, :], axis=1) ** 2))
_sigma1 = float(max(cfg.RCC.GNC_DATA_START_POINT, 16 * _delta1))
print('The endpoints are Delta1: {:.3f}, Delta2: {:.3f}'.format(_delta1, _delta2))
lmdb = np.ones(numpairs, dtype=np.float32)
lmdb_data = np.ones(numsamples, dtype=np.float32)
_lambda = compute_lambda(pairs, Z, lmdb, lmdb_data)
return _sigma1, _sigma2, _lambda, _delta, _delta1, _delta2, lmdb, lmdb_data
def compute_lambda(pairs, Z, lmdb, lmdb_data):
numsamples = len(Z)
R = csr_matrix((lmdb * pairs[:,2], (pairs[:,0].astype(int), pairs[:,1].astype(int))), shape=(numsamples, numsamples))
R = R + R.transpose()
D = diags(np.squeeze(np.array(np.sum(R,1))), 0)
I = diags(lmdb_data, 0)
spndata = np.linalg.norm(I * Z, ord=2)
eiglmdbdata,_ = sparse.linalg.eigsh(I, k=1)
eigM,_ = sparse.linalg.eigsh(D - R, k=1)
_lambda = float(spndata / (eiglmdbdata + eigM))
return _lambda
def computeObj(U, pairs, _delta, gtlabels, numeval):
""" This is similar to computeObj function in Matlab """
numsamples = len(U)
diff = np.linalg.norm(U[pairs[:, 0].astype(int)] - U[pairs[:, 1].astype(int)], axis=1)**2
# computing clustering measures
index1 = np.sqrt(diff) < _delta
index = np.where(index1)
adjacency = csr_matrix((np.ones(len(index[0])), (pairs[index[0], 0].astype(int), pairs[index[0], 1].astype(int))),
shape=(numsamples, numsamples))
adjacency = adjacency + adjacency.transpose()
n_components, labels = connected_components(adjacency, directed=False)
index2 = labels[pairs[:, 0].astype(int)] == labels[pairs[:, 1].astype(int)]
ari, ami, nmi, acc = benchmarking(gtlabels[:numeval], labels[:numeval])
return index2, ari, ami, nmi, acc, n_components, labels
def benchmarking(gtlabels, labels):
# TODO: Please note that the AMI definition used in the paper differs from that in the sklearn python package.
# TODO: Please modify it accordingly.
numeval = len(gtlabels)
ari = metrics.adjusted_rand_score(gtlabels[:numeval], labels[:numeval])
ami = metrics.adjusted_mutual_info_score(gtlabels[:numeval], labels[:numeval])
nmi = metrics.normalized_mutual_info_score(gtlabels[:numeval], labels[:numeval])
acc = clustering_accuracy(gtlabels[:numeval], labels[:numeval])
return ari, ami, nmi, acc
def clustering_accuracy(gtlabels, labels):
cost_matrix = []
categories = np.unique(gtlabels)
nr = np.amax(labels) + 1
for i in np.arange(len(categories)):
cost_matrix.append(np.bincount(labels[gtlabels == categories[i]], minlength=nr))
cost_matrix = np.asarray(cost_matrix).T
row_ind, col_ind = linear_sum_assignment(np.max(cost_matrix) - cost_matrix)
return float(cost_matrix[row_ind, col_ind].sum()) / len(gtlabels)
| [
"camillarhodes@yandex.com"
] | camillarhodes@yandex.com |
bfdfdee00088f6ef34d722f1722ffddac8b10390 | 78c70249766e45c8bc2792259b98ecfab4d2d538 | /hw3/code/dicttests.py | 57682eb13177753e5e7060eed20aaa7a7a9a13d6 | [] | no_license | schuberm/6.867 | a52baccaac5fba26dcf986041fbf91c9e965cb28 | 29f4ad7619528db32eb41fb27b02ce9fca41f131 | refs/heads/master | 2021-01-09T06:37:11.935935 | 2017-02-06T00:31:54 | 2017-02-06T00:31:54 | 81,029,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | import tensorflow as tf
import numpy as np
from six.moves import cPickle as pickle
import sys
import math
import time
if __name__ == '__main__':
params = {}
params['batch_size'] = 10
params['learning_rate'] = 0.01
# params['layer1_filter_size'] = 5
# params['layer1_depth'] = 16
# params['layer1_stride'] = 2
# params['layer2_filter_size'] = 5
# params['layer2_depth'] = 16
# params['layer2_stride'] = 2
# params['layer3_num_hidden'] = 64
# params['layer4_num_hidden'] = 64
params['num_training_steps'] = 1501
layer1 = {}
layer2 = {}
layer3 = {}
layer4 = {}
layer1['filter_size'] = 5
layer1['depth'] = 16
layer1['stride'] = 2
layer1['pooling'] = False
layer1['hidden'] = False
layer2['filter_size'] = 5
layer2['depth'] = 16
layer2['stride'] = 2
layer2['pooling'] = False
layer2['hidden'] = False
layer3['hidden'] = True
layer3['num_hidden'] = 64
layer4['hidden'] = True
layer4['num_hidden'] = 64
params['layers'] = [layer1, layer2, layer3, layer4]
for count, l in enumerate(params['layers']):
# print l
# l['weights'] = 3
#print l
print l
print count
if count > 0:
print params['layers'][count-1]
#print params['layers'][1]['pooling']
| [
"schuberm@mit.edu"
] | schuberm@mit.edu |
45bd51b285b82aa9a54a49c04f1a1b5500c42905 | 5721d04752cdc912ee867d83a9e615a04b9d1e33 | /kotti_component/apis/entity.py | 36df9c99e33490d45fe2d9ba345bfbe6eb9655e0 | [
"BSD-3-Clause-Modification"
] | permissive | quyetnd-parlayz/kotti_component | 4e507d1748afc9733a6b6d568d026c0618f93bf6 | fa3064bab6f5884e2fe3a277158b45412a2ae781 | refs/heads/master | 2021-01-23T00:52:49.200722 | 2015-11-16T04:00:00 | 2015-11-16T04:00:00 | 43,043,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | # -*- coding: utf-8 -*-
from pyramid.view import view_config, view_defaults
from kotti.util import title_to_name
from kotti_component.interfaces import IEntity
from kotti_component.resources import Entity
@view_defaults(renderer='json', name="xxx")
class EntityCRUDView(object):
def __init__(self, context, request):
self.context = context
self.request = request
# @view_config(request_method="POST", permission='add')
@view_config(request_method="POST")
def post(self):
# import pdb; pdb.set_trace()
cstruct = self.request.json
if 'name' in cstruct:
name = cstruct['name']
else:
title = cstruct.get('title', None)
if not title:
self.request.response.status = 400
return {'error': "Object construction require either 'name' or 'title'"}
name = title_to_name(title)
#TODO: Check name occopuied or not
#TODO: Component validation
obj = Entity(name=name)
for key in cstruct:
setattr(obj, key, cstruct[key])
self.context[name] = obj
self.request.response.status = 201
return {}
@view_config(request_method="GET", permission='view')
def get(self):
return self.context.__json__()
@view_config(request_method="DELETE", permission='delete')
def delete(self):
del self.parent[self.name]
return {}
@view_config(request_method="PUT", permission='edit')
def put(self):
cstruct = self.request.json
@view_config(request_method="PATCH", permission='edit')
def patch(self):
cstruct = self.request.json
#TODO: Component validation
self.update(cstruct)
return self.__json__
@view_config(name="upload", request_method="POST", permission="add")
def upload(self):
pass | [
"quyet@parlayz.com"
] | quyet@parlayz.com |
7a7a911233f0651a9d14ce84ce0c7661326b6ce5 | 6b557cb17f07d28a0eafc22ce6e95683ff75da16 | /2_script/pre_wechat.py | 0e773c33705c341c9d6e0a35680c4197d3106ab5 | [] | no_license | ThianeX/seg_memoire | fa0796dd1f6394f0e93aed8b3d74947db90c1a2b | 7a73aebd3a656f9af509bf2a787ef2b778e2a93b | refs/heads/main | 2023-03-30T11:34:13.719815 | 2021-04-06T03:28:38 | 2021-04-06T03:28:38 | 319,254,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 28 16:27:33 2021
@author: Carol
"""
import re
import pypinyin
def pinyin(phrase):
"""
Input = str phrase
Output = str pinyin correspond
"""
pyin = ''
for i in pypinyin.pinyin(phrase):
pyin = pyin + ''.join(i) + " "
return pyin
text = []
with open('../1_data/1_original/chat.txt', "r") as c:
file_read = c.readlines()
for i in range(len(file_read)):
if i%5 == 2:
text.append(file_read[i])
with open('../1_data/2_corpus/corpus_chat.txt', "w") as c:
c.writelines(text)
with open('../1_data/2_corpus/corpus_chat.txt', "r", encoding="utf-8") as \
file_opened:
file_opened = file_opened.read()
pattern = r'?|。|!|\n'
result_list = re.split(pattern, file_opened)
corpus_humain = ''
c = 0
for i in result_list:
if i != '' and i[0] != '&':
c += 1
corpus_humain += str(c)+'\t'+str(i)+'\n'+('#'+pinyin(i))+'\n#\n#\n'
elif i != '' and i[0] == '&':
corpus_humain +=str(i)+'\n'
with open('../1_data/2_corpus/corpus_chat_aseg.txt', "w", encoding="utf-8") as\
file_opened:
file_opened.write(corpus_humain)
| [
"72928225+ThianeX@users.noreply.github.com"
] | 72928225+ThianeX@users.noreply.github.com |
69d13e0df84285ec17a9d7b103da20bdb16baf1a | 1432b7cdeadf80cce90802881de32482547e2897 | /Seq2seq/BLEU.py | 749599e1ae9ffa8b9a83e80e6cd09b3f5b37a689 | [] | no_license | huhanGitHub/bert-Seq2Seq | d201df3627f4c3f166ea633fcb6e4a3925a20af3 | 3aae8e8ac828fe0c398a94a689a3448da8f623bd | refs/heads/master | 2020-05-18T18:19:01.610783 | 2019-05-29T06:11:20 | 2019-05-29T06:11:20 | 184,581,588 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | from nltk.translate.bleu_score import sentence_bleu
import argparse
def argparser():
Argparser = argparse.ArgumentParser()
Argparser.add_argument('--reference', type=str, default='summaries.txt', help='Reference File')
Argparser.add_argument('--candidate', type=str, default='candidates.txt', help='Candidate file')
args = Argparser.parse_args()
return args
args = argparser()
reference = open(args.reference, 'r').readlines()
candidate = open(args.candidate, 'r').readlines()
if len(reference) != len(candidate):
raise ValueError('The number of sentences in both files do not match.')
score = 0.
for i in range(len(reference)):
score += sentence_bleu([reference[i].strip().split()], candidate[i].strip().split())
score /= len(reference)
print("The bleu score is: "+str(score)) | [
"543160303@qq.com"
] | 543160303@qq.com |
20f19c3f9f69fbff03cfb2e3f5355089e81e8278 | 6d66057b7efeb993976705080a11e5be2f2cfa11 | /mysite/settings.py | 19cf83091d7f92707984793836aff819b3565166 | [] | no_license | klt19991115/my-first-blog | fc4edd89eb1126307e61ef9ffa651c80f7f5597c | fcc7137a7644e1c488d630a1c1d1b3fe70f18c83 | refs/heads/master | 2021-08-08T21:38:39.335994 | 2017-11-11T09:26:19 | 2017-11-11T09:26:19 | 108,621,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,734 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e!b$&jk4p$7zh9c#pu!748bua=@bgg-&ngd(3&vk#2k3@*1+lu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['klt19991115.pythonanywhere.com']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"1540718150@qq.com"
] | 1540718150@qq.com |
6b0e8bbafdf04a7054fd2155d8724009ba943443 | e7441ca8b77e818a18edbe9a4cf65395b1318611 | /style_vae/model/layers.py | 7c40035198dd314cb484af3260f3ce32b25f7be4 | [
"MIT"
] | permissive | orgoro/style-vae | 18adfbd85d7c6293d8c8ff5680cbb5e8937a3a64 | e437d606b0336d793f75658eee84ebb8f387e84c | refs/heads/master | 2023-03-31T00:03:01.063640 | 2020-09-29T07:56:55 | 2020-09-29T07:56:55 | 164,336,591 | 30 | 4 | MIT | 2023-03-24T23:41:51 | 2019-01-06T19:01:57 | Python | UTF-8 | Python | false | false | 5,590 | py | # 3rd party:
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
# same category:
class VaeLayers(object):
@staticmethod
def normalize(x, epsilon=1e-8):
"""Pixelwise feature vector normalization"""
with tf.variable_scope('Normalize'):
x -= tf.reduce_mean(x, axis=[1, 2], keepdims=True)
x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=[1, 2], keepdims=True) + epsilon)
return x
@staticmethod
def blur_2d(x):
with tf.variable_scope('Blur2d'):
kernel = np.array([[1., 2., 1.], [2., 4., 2.], [1., 2., 1.]])
kernel /= np.sum(kernel)
blur_filter = tf.constant(kernel, dtype=tf.float32)
blur_filter = tf.tile(blur_filter[:, :, None, None], [1, 1, int(x.shape[3]), 1])
x = tf.nn.depthwise_conv2d(x, blur_filter, strides=[1, 1, 1, 1], padding='SAME')
return x
@staticmethod
def add_bias(x):
with tf.variable_scope('Bias'):
b = tf.Variable(initial_value=tf.zeros(shape=(1, 1, 1, x.shape[3])), name='b')
return x + b
@staticmethod
def additive_noise(x):
"""scaling noise bi-cubic to fit x and adding it"""
with tf.variable_scope('Additive-Noise'):
n = tf.random_normal([tf.shape(x)[0], x.shape[1], x.shape[2], 1], dtype=x.dtype)
b = tf.Variable(initial_value=tf.zeros(shape=(1, 1, 1, x.shape[3])), name='b')
return x + b * n
@staticmethod
def stylize(x, style):
"""stylize the feature maps AdaIN in paper"""
with tf.variable_scope('Stylize'):
style_affined = layers.Dense(units=x.shape[3] * 2, name='dense')(style)
style_scale, style_bias = tf.split(style_affined, 2, axis=1)
return x * (style_scale[:, None, None, :] + 1) + style_bias[:, None, None, :]
@staticmethod
def from_image(x, f_maps):
activation = layers.LeakyReLU(0.2)
name = 'conv1'
return layers.Conv2D(filters=f_maps,
kernel_size=1,
padding='same',
activation=activation,
name=name)(x)
@staticmethod
def conv3(x, f_maps, activation, blur=False, add_noise=True):
with tf.variable_scope('Cell'):
name = 'conv3'
x = layers.Conv2D(filters=f_maps,
kernel_size=3,
padding='same',
activation=None,
use_bias=False,
name=name)(x)
if blur:
x = VaeLayers.blur_2d(x)
if add_noise:
x = VaeLayers.additive_noise(x)
x = VaeLayers.add_bias(x)
x = activation(x)
return x
@staticmethod
def conv3_stride2(x, f_maps, activation):
name = 'conv3s2'
x = layers.Conv2D(filters=f_maps,
kernel_size=3,
padding='same',
strides=2,
activation=activation,
name=name)(x)
return x
@staticmethod
def cell_up(x, f_maps, style, activation=layers.LeakyReLU(0.2)):
with tf.variable_scope('Cell-up'):
size = [2 * int(x.shape[1]), 2 * int(x.shape[2])]
x = tf.image.resize_nearest_neighbor(x, size, align_corners=True)
x = VaeLayers.conv3(x, f_maps, activation, blur=True)
x = VaeLayers.normalize(x)
x = VaeLayers.stylize(x, style)
x = VaeLayers.conv3(x, f_maps, activation)
x = VaeLayers.normalize(x)
x = VaeLayers.stylize(x, style)
return x
@staticmethod
def to_rgb(x, activation=layers.LeakyReLU(0.2)):
name = 'to_rgb'
x = layers.Conv2D(filters=3,
kernel_size=3,
padding='same',
strides=1,
activation=activation,
kernel_initializer='glorot_normal',
name=name)(x)
return x
@staticmethod
def to_gray(x, activation=layers.LeakyReLU(0.2)):
name = 'to_gray'
x = layers.Conv2D(filters=1,
kernel_size=3,
padding='same',
strides=1,
activation=activation,
kernel_initializer='glorot_normal',
name=name)(x)
return x
@staticmethod
def first_cell_up(var, style, f_maps, activation=layers.LeakyReLU(0.2)):
with tf.variable_scope('First-cell-up'):
x = tf.identity(var)
x = VaeLayers.additive_noise(x)
x = VaeLayers.add_bias(x)
x = activation(x)
x = VaeLayers.normalize(x)
x = VaeLayers.stylize(x, style)
x = VaeLayers.conv3(x, f_maps, activation)
x = VaeLayers.normalize(x)
x = VaeLayers.stylize(x, style)
return x
@staticmethod
def cell_down(x, f_maps, activation=layers.LeakyReLU(0.2)):
with tf.variable_scope('Cell-down'):
x = VaeLayers.conv3(x, f_maps[0], activation, add_noise=False)
x = VaeLayers.conv3_stride2(x, f_maps[1], activation)
return x
@staticmethod
def map_cell(x):
mapper = layers.Dense(x.shape[1], name='map-cell')
return mapper(x)
| [
"or.gorodissky@d-id.com"
] | or.gorodissky@d-id.com |
c064647cd1304d7aff89c6683cd29e2b315cfa1e | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2863/60673/273869.py | 625083ae06703fe4379f18234384daf60c110ffb | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | # 围墙高h 第i个人高a[i] 平常走路宽度为1 弯腰2
n, h = input().split(" ")
a = input().split(" ")
n = int(n)
h = int(n)
for i in range(n):
a[i] = int(a[i])
walkNum = 0
bendNum = 0
for i in range(n):
if (a[i] <= h):
walkNum += 1
else:
bendNum += 1
print(walkNum + bendNum * 2)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
36b2bd74da83b41b74bc60e1f3a342898a87393f | c8121dda040f4483eb67e1cb9e77377d951f8c09 | /utils/evaluation.py | 4390629b23c3ef4ad18b7c05426890961ea999bd | [] | no_license | futureseadev/mofc-demand-forecasting-with-time-series-analysis | 0c9d7f4d2935a30430903153398e2bd517985ded | f644483befb20e79d32de190a0a5cfc4a9315f6d | refs/heads/main | 2023-07-12T19:31:14.308418 | 2021-08-28T01:03:28 | 2021-08-28T01:03:28 | 418,940,351 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,988 | py | from typing import Union
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
class WRMSSEEvaluator(object):
def __init__(
self,
df_train: pd.DataFrame,
df_test: pd.DataFrame,
calendar: pd.DataFrame,
selling_prices: pd.DataFrame,
test_steps: int,
):
train = df_train.copy()
test = df_test.copy()
target = train.loc[:, train.columns.str.startswith("d_")]
train_target_columns = target.columns.tolist()
weight_columns = target.iloc[:, -test_steps:].columns.tolist()
train["all_id"] = 0
key_columns = train.loc[:, ~train.columns.str.startswith("d_")].columns.tolist()
test_target_columns = test.loc[
:, test.columns.str.startswith("d_")
].columns.tolist()
if not all([column in test.columns for column in key_columns]):
test = pd.concat([train[key_columns], test], axis=1, sort=False)
self.train = train
self.test = test
self.calendar = calendar
self.selling_prices = selling_prices
self.weight_columns = weight_columns
self.key_columns = key_columns
self.test_target_columns = test_target_columns
sales_weights = self.get_sales_weight()
self.group_ids = (
"all_id",
"state_id",
"store_id",
"cat_id",
"dept_id",
["state_id", "cat_id"],
["state_id", "dept_id"],
["store_id", "cat_id"],
["store_id", "dept_id"],
"item_id",
["item_id", "state_id"],
["item_id", "store_id"],
)
for i, group_id in enumerate(tqdm(self.group_ids)):
train_total_quantities = train.groupby(group_id)[train_target_columns].sum()
scale = []
for _, row in train_total_quantities.iterrows():
series = row.values[np.argmax(row.values != 0) :]
scale.append(((series[1:] - series[:-1]) ** 2).mean())
setattr(self, f"level-{i + 1}_scale", np.array(scale))
setattr(
self, f"level-{i + 1}_train_total_quantities", train_total_quantities
)
setattr(
self,
f"level-{i + 1}_test_total_quantities",
test.groupby(group_id)[test_target_columns].sum(),
)
level_weight = (
sales_weights.groupby(group_id)[weight_columns].sum().sum(axis=1)
)
setattr(self, f"level-{i + 1}_weight", level_weight / level_weight.sum())
def get_sales_weight(self) -> pd.DataFrame:
day_to_week = self.calendar.set_index("d")["wm_yr_wk"].to_dict()
sales_weights = self.train[
["item_id", "store_id"] + self.weight_columns
].set_index(["item_id", "store_id"])
sales_weights = (
sales_weights.stack()
.reset_index()
.rename(columns={"level_2": "d", 0: "value"})
)
sales_weights["wm_yr_wk"] = sales_weights["d"].map(day_to_week)
sales_weights = sales_weights.merge(
self.selling_prices, how="left", on=["item_id", "store_id", "wm_yr_wk"]
)
sales_weights["value"] = sales_weights["value"] * sales_weights["sell_price"]
sales_weights = sales_weights.set_index(["item_id", "store_id", "d"]).unstack(
level=2
)["value"]
sales_weights = sales_weights.loc[
zip(self.train["item_id"], self.train["store_id"]), :
].reset_index(drop=True)
sales_weights = pd.concat(
[self.train[self.key_columns], sales_weights], axis=1, sort=False
)
return sales_weights
def rmsse(self, prediction: pd.DataFrame, level: int) -> pd.Series:
test_total_quantities = getattr(self, f"level-{level}_test_total_quantities")
score = ((test_total_quantities - prediction) ** 2).mean(axis=1)
scale = getattr(self, f"level-{level}_scale")
return (score / scale).map(np.sqrt)
def score(self, predictions: Union[pd.DataFrame, np.ndarray]) -> float:
assert self.test[self.test_target_columns].shape == predictions.shape
if isinstance(predictions, np.ndarray):
predictions = pd.DataFrame(predictions, columns=self.test_target_columns)
predictions = pd.concat(
[self.test[self.key_columns], predictions], axis=1, sort=False
)
all_scores = []
for i, group_id in enumerate(self.group_ids):
level_scores = self.rmsse(
predictions.groupby(group_id)[self.test_target_columns].sum(), i + 1
)
weight = getattr(self, f"level-{i + 1}_weight")
level_scores = pd.concat([weight, level_scores], axis=1, sort=False).prod(
axis=1
)
all_scores.append(level_scores.sum())
return np.mean(all_scores)
| [
"aldente0630@gim-yeongmin-ui-iMac.local"
] | aldente0630@gim-yeongmin-ui-iMac.local |
4c46fd7948d332eec39878268397f7b64b9f3b83 | fea4c2733b81d7af0f8f1ef4a7a2e2c5969691fa | /PyQt4/hufsgrade_ver1.31(test).py | 648897ec15fa96cace1c650c68e6b9a8303e99e3 | [] | no_license | datalater/hufsgrade | 8345e451690d00db57ff11cc5a315377d48a4d79 | e410607116fe428ea23959d3b3d4c8316db2f65b | refs/heads/master | 2020-05-21T23:36:00.552913 | 2016-09-28T13:30:39 | 2016-09-28T13:30:39 | 64,480,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,114 | py | import os
import sys
from PyQt4 import QtGui, QtCore
import logging
import requests
from bs4 import BeautifulSoup
import re
import time
#from os.path import join, abspath
head={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
login_url = "https://webs.hufs.ac.kr/src08/jsp/login/LOGIN1011M.jsp"
main_page = "http://webs.hufs.ac.kr:8989/src08/jsp/main.jsp?"
studentinfo_url = "http://webs.hufs.ac.kr:8989/src08/jsp/stuinfo_10/STUINFO1000C_myinfo.jsp"
credits_url = "http://webs.hufs.ac.kr:8989/src08/jsp/grade/GRADE1030L_Top.jsp?tab_lang=K"
credits_list_url = "http://webs.hufs.ac.kr:8989/src08/jsp/grade/GRADE1030L_List.jsp?tab_lang=K"
#requests.utils.DEFAULT_CA_BUNDLE_PATH = join(abspath('.'), 'cacert.pem')
cafile = 'cacert.pem'
hufsfile = 'hufslogo.png'
full_size = [660, 500]
class Window(QtGui.QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.setGeometry(100, 100, full_size[0], full_size[1])
self.setWindowTitle("HUFSGrade_ver1.3")
self.setWindowIcon(QtGui.QIcon(hufsfile))
self.label = QtGui.QLabel("ID", self)
self.label.setGeometry(200, 20, 90, 20)
self.lineEdit = QtGui.QLineEdit(self)
self.lineEdit.setGeometry(300, 20, 150, 20)
self.lineEdit.returnPressed.connect(self.login)
self.label_2 = QtGui.QLabel("PASSWORD", self)
self.label_2.setGeometry(200, 50, 90, 20)
self.lineEdit_2 = QtGui.QLineEdit(self)
self.lineEdit_2.setGeometry(300, 50, 150, 20)
self.lineEdit_2.setEchoMode(QtGui.QLineEdit.Password)
self.lineEdit_2.returnPressed.connect(self.login)
self.pushButton = QtGui.QPushButton("로그인", self)
self.pushButton.setGeometry(200, 80, 250, 28)
self.pushButton.clicked.connect(self.login)
self.pushButton.setAutoDefault(True)
#self.pushButton.returnPressed.connect(self.login)
self.progress = QtGui.QProgressBar(self)
self.progress.setGeometry(430, 475, 220, 20)
self.pushButton_2 = QtGui.QPushButton("뒤로가기", self)
self.pushButton_2.setGeometry(552, 440, 80, 28)
self.pushButton_2.clicked.connect(self.goback)
self.label_3 = QtGui.QLabel("학번", self)
self.label_3.setGeometry(10, 10, 280, 20)
self.label_5 = QtGui.QLabel("이름", self)
self.label_5.setGeometry(370, 10, 280, 20)
self.label_5.setAlignment(QtCore.Qt.AlignRight)
self.line = QtGui.QFrame(self)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setGeometry(0, 95, 660, 60)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2 = QtGui.QFrame(self)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setGeometry(0, 25, 660, 20)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.label_6 = QtGui.QLabel("영역별 취득 학점", self)
self.label_6.setGeometry(20, 65, 240, 20)
self.label_7 = QtGui.QLabel("(전공평점: )", self)
self.label_7.setGeometry(535, 65, 120, 20)
self.label_4 = QtGui.QLabel("한국외국대학교 종합정보시스템 ID와 PWD를 입력해주세요.", self)
self.label_4.setGeometry(5, 475, 500, 20)
self.tableWidget = QtGui.QTableWidget(self)
self.tableWidget.setGeometry(20, 90, 615, 130)
self.tableWidget.setColumnCount(10)
self.tableWidget.setRowCount(3)
# table cell[start]
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setVerticalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setVerticalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setHorizontalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setHorizontalHeaderItem(5, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setHorizontalHeaderItem(6, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setHorizontalHeaderItem(7, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setHorizontalHeaderItem(8, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setHorizontalHeaderItem(9, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 0, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 1, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 2, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 3, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 4, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 5, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 6, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 7, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 8, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(0, 9, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(1, 0, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(1, 1, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(1, 2, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(1, 3, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(1, 4, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(1, 5, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(1, 6, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(1, 7, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(1, 8, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.tableWidget.setItem(1, 9, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
brush = QtGui.QBrush(QtGui.QColor(220, 255, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setBackground(brush)
self.tableWidget.setItem(2, 0, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
brush = QtGui.QBrush(QtGui.QColor(220, 255, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setBackground(brush)
self.tableWidget.setItem(2, 1, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
brush = QtGui.QBrush(QtGui.QColor(220, 255, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setBackground(brush)
self.tableWidget.setItem(2, 2, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
brush = QtGui.QBrush(QtGui.QColor(220, 255, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setBackground(brush)
self.tableWidget.setItem(2, 3, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
brush = QtGui.QBrush(QtGui.QColor(220, 255, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setBackground(brush)
self.tableWidget.setItem(2, 4, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
brush = QtGui.QBrush(QtGui.QColor(220, 255, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setBackground(brush)
self.tableWidget.setItem(2, 5, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
brush = QtGui.QBrush(QtGui.QColor(220, 255, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setBackground(brush)
self.tableWidget.setItem(2, 6, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
brush = QtGui.QBrush(QtGui.QColor(220, 255, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setBackground(brush)
self.tableWidget.setItem(2, 7, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
brush = QtGui.QBrush(QtGui.QColor(220, 255, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setBackground(brush)
self.tableWidget.setItem(2, 8, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
brush = QtGui.QBrush(QtGui.QColor(220, 255, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setBackground(brush)
self.tableWidget.setItem(2, 9, item)
self.tableWidget.horizontalHeaderItem(0).setText("1전공")
self.tableWidget.horizontalHeaderItem(1).setText("이중")
self.tableWidget.horizontalHeaderItem(2).setText("2전공")
self.tableWidget.horizontalHeaderItem(3).setText("실외")
self.tableWidget.horizontalHeaderItem(4).setText("교양")
self.tableWidget.horizontalHeaderItem(5).setText("부전공")
self.tableWidget.horizontalHeaderItem(6).setText("교직")
self.tableWidget.horizontalHeaderItem(7).setText("자선")
self.tableWidget.horizontalHeaderItem(8).setText("총취득")
self.tableWidget.horizontalHeaderItem(9).setText("총평점")
self.tableWidget.verticalHeaderItem(0).setText("전체")
self.tableWidget.verticalHeaderItem(1).setText("취득")
self.tableWidget.verticalHeaderItem(2).setText("차분")
self.tableWidget.horizontalHeader().setVisible(True)
self.tableWidget.horizontalHeader().setDefaultSectionSize(57)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.verticalHeader().setVisible(True)
self.tableWidget.verticalHeader().setDefaultSectionSize(35)
self.tableWidget.verticalHeader().setStretchLastSection(True)
# table cell[end]
self.show()
self.line_2.hide()
self.label_3.hide()
self.label_5.hide()
self.label_6.hide()
self.label_7.hide()
self.tableWidget.hide()
self.pushButton_2.hide()
def login(self):
self.current_session = requests.session()
params = {'user_id': self.lineEdit.text(),'password': self.lineEdit_2.text(),'gubun': 'o','reurl': '','SSL_Login': 1}
self.current_session.post(login_url, data=params, headers=head, verify=cafile)
#-------------------------학생정보--------------------------#
self.current_session.get(main_page, headers=head, verify=cafile)
self.studentinfo = self.current_session.get(studentinfo_url, headers=head, verify=cafile)
html = BeautifulSoup(self.studentinfo.text, "html.parser")
student_college = html.find(string=re.compile('소속')).parent.next_sibling.next_sibling.next_element.next_element.string
try:
student_major = student_college.next_element.next_element.next_element.next_element.string
self.completed = 0
while self.completed < 100:
self.completed += 0.00008
self.progress.setValue(self.completed)
except AttributeError:
self.label_4.setText("잘못된 로그인입니다.")
student_id= html.find(string=re.compile('학번')).parent.next_sibling.next_sibling.string
student_name = html.find(string=re.compile('성명')).parent.parent.next_sibling.next_element.next_element.next_element.next_sibling.next_sibling.string
student_name = student_name.replace("\r\n\t\t\t\t","")
student_name_ko = html.find(string=re.compile('성명')).parent.next_sibling.next_sibling.next_sibling.next_sibling.string
print(student_id)
print(student_name)
print(student_name_ko)
# 입학연도
student_id_year = int(str(student_id)[:4])
print(student_id_year)
#-------------------------성적정보(영역별취득학점)--------------------------#
self.graduateinfo=self.current_session.get(credits_url,headers=head, verify=cafile)
html = BeautifulSoup(self.graduateinfo.text, "html.parser")
# 이중전공자 전공심화자 구분 및 각 전공 과목 parsing
major_state = ""
if html.find(string=re.compile('\[이중전공\]')) is not None:
major_state ="이중전공"
student_other_major = html.find(string=re.compile('\[이중전공\]')).next_element
student_other_major = student_other_major.replace(u'\xa0', u' ').replace("(","").replace(" ","")
student_other_major = "이중: " + student_other_major
elif html.find(string=re.compile('전공심화')) is not None:
major_state = "전공심화(부전공)"
student_other_major = html.find(string=re.compile('전공심화')).next_element
student_other_major = student_other_major.replace(u'\xa0', u' ').replace("(","").replace(" ","")
student_other_major = "부:" + student_other_major
else:
major_state = "not yet decided"
# 1전공 parsing
student_first_major = html.find(string=re.compile('\[1전공\]')).next_element
student_first_major = student_first_major.replace(u'\xa0', u' ').replace("(","").replace(" ","")
grade_data = [i.string for i in html.find("tr",class_="table_w").find_all("td")]
credits_completed = grade_data[1:-2]
grade_per_average = grade_data[-2:]
graduateinfo = credits_completed + grade_per_average
#2015~학번(사범대 제외)
dual_major_required_15 = [54, 42, 0, 6, 26, 0, 0, 6, 134, 4.5]
minor_required_15 = [70, 0, 21, 6, 26, 0, 0, 11, 134, 4.5]
#2007~2014학번(사범대 제외)
dual_major_required = [54, 54, 0, 4, 22, 0, 0, 0, 134, 4.5]
minor_required = [75, 0, 0, 4, 22, 21, 0, 12, 134, 4.5]
dual_major_required = list(map(str, dual_major_required))
minor_required = list(map(str, minor_required))
#-------------------------성적정보(전공평점)--------------------------#
self.creditsinfo=self.current_session.get(credits_list_url,headers=head)
html = BeautifulSoup(self.creditsinfo.text, "html.parser")
grade_dic = {'A+':4.5, 'A0':4.0, 'B+':3.5, 'B0':3.0, 'C+':2.5, 'C0':2.0, 'D+':1.5, 'D0':1.0, 'F':0}
# 전공 평점 구하기 시작
first_major_credit = [] # credit: 학점(e.g. 3)
first_major_grade = [] # grade: 등급(e.g. A+)
first_major_grade_float = [] # grade_float: 등급 환산 점수(e.g. A+ -> 4.5)
first_major_multiply = []
for td in html.find_all("tr",class_="table_w"):
for td_first_major in td.find_all(string=re.compile('1전공|이중')):
for td_credits in td_first_major.parent.next_sibling.next_sibling:
first_major_credit.append(float(td_credits))
for td_grades in td_first_major.parent.next_sibling.next_sibling.next_sibling.next_sibling:
first_major_grade.append(td_grades)
# 등급 점수로 환산하기(e.g. A+ -> 4.5)
for element in first_major_grade:
first_major_grade_float.append(grade_dic[element])
# 학점 곱하기 등급
for i in range(len(first_major_credit)):
first_major_multiply.append(first_major_credit[i] * first_major_grade_float[i])
# 전공 평점 구하기 끝
first_major_gpa = round(sum(first_major_multiply)/sum(first_major_credit),2)
#-------------------------학생정보 나타내기--------------------------#
self.label_3.setText(student_id + " " + student_first_major + "(" + student_other_major + ")")
self.label_6.setText("영역별 취득학점: "+major_state+" 기준")
self.label_5.setText(student_name_ko+"("+student_name+")"+"님, 반갑습니다.")
# 상태bar
self.label_4.setText("졸업 심사 시 전체를 넘는 취득학점은 자선으로 처리됩니다.")
#-------------------------성적정보 테이블 위젯에 나타내기--------------------------#
if student_id_year < 2015 or student_id_year > 2007:
if major_state == "이중전공":
for i in range(len(dual_major_required)):
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
item.setText(dual_major_required[i])
self.tableWidget.setItem(0, i, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
item.setText(graduateinfo[i])
self.tableWidget.setItem(1, i, item)
if i <9:
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
brush = QtGui.QBrush(QtGui.QColor(220, 255, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setBackground(brush)
item.setText(str(int(dual_major_required[i])-int(graduateinfo[i])))
self.tableWidget.setItem(2, i, item)
elif major_state == "전공심화(부전공)":
for i in range(len(minor_required)):
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
item.setText(minor_required[i])
self.tableWidget.setItem(0, i, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
item.setText(graduateinfo[i])
self.tableWidget.setItem(1, i, item)
if i <9:
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
brush = QtGui.QBrush(QtGui.QColor(220, 255, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setBackground(brush)
item.setText(str(int(minor_required[i])-int(graduateinfo[i])))
self.tableWidget.setItem(2, i, item)
else:
for i in range(len(dual_major_required)):
self.tableWidget.setItem(1, i, QtGui.QTableWidgetItem(graduateinfo[i]))
elif student_id_year >= 2015:
if major_state == "이중전공":
for i in range(len(dual_major_required)):
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
item.setText(dual_major_required_15[i])
self.tableWidget.setItem(0, i, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
item.setText(graduateinfo[i])
self.tableWidget.setItem(1, i, item)
if i <9:
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
brush = QtGui.QBrush(QtGui.QColor(220, 255, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setBackground(brush)
item.setText(str(int(dual_major_required_15[i])-int(graduateinfo[i])))
self.tableWidget.setItem(2, i, item)
elif major_state == "전공심화(부전공)":
for i in range(len(minor_required)):
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
item.setText(minor_required_15[i])
self.tableWidget.setItem(0, i, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
item.setText(graduateinfo[i])
self.tableWidget.setItem(1, i, item)
if i <9:
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
brush = QtGui.QBrush(QtGui.QColor(220, 255, 217))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setBackground(brush)
item.setText(str(int(minor_required_15[i])-int(graduateinfo[i])))
self.tableWidget.setItem(2, i, item)
else:
for i in range(len(dual_major_required)):
self.tableWidget.setItem(1, i, QtGui.QTableWidgetItem(graduateinfo[i]))
else:
for i in range(len(dual_major_required)):
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
item.setText(graduateinfo[i])
self.tableWidget.setItem(1, i, item)
self.label_4.setText("07년도 이전 입학 학번에 대해서는 필수학점을 제공하지 않습니다.")
self.label_7.setText("(전공평점: "+str(first_major_gpa)+")")
#-----------------------------------------------------------------------------------#
#-------------------------로그인 위젯 hide--------------------------#
self.label.hide()
self.label_2.hide()
self.lineEdit.hide()
self.lineEdit_2.hide()
self.pushButton.hide()
self.line.hide()
self.progress.hide()
#-------------------------학생/성적정보 위젯 show--------------------------#
self.line_2.show()
self.label_3.show()
self.label_5.show()
self.label_6.show()
self.label_7.show()
self.tableWidget.show()
self.pushButton_2.show()
def goback(self):
self.progress.reset()
#-------------------------학생/성적정보 위젯 hide--------------------------#
self.line_2.hide()
self.label_3.hide()
self.label_5.hide()
self.label_6.hide()
self.label_7.hide()
self.tableWidget.hide()
self.pushButton_2.hide()
#-------------------------로그인 위젯 show--------------------------#
self.label.show()
self.label_2.show()
self.lineEdit.show()
self.lineEdit_2.show()
self.pushButton.show()
self.line.show()
self.progress.show()
self.label_4.setText("한국외국대학교 종합정보시스템 ID와 PWD를 입력해주세요")
#def close_application(self):
# print("whooaaaa so custom!!!")
# sys.exit()
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Message',
"종료하시겠습니까?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
logger = logging.getLogger('my-logger')
logger.propagate = False
sys.stderr = sys.stdout
sys.tracebacklimit = 0
sys.stderr = sys.stdout = os.devnull
| [
"the7mincheol@gmail.com"
] | the7mincheol@gmail.com |
fd3b1ab4cf6018b45bfc446da9fe3335c9a279f0 | ca14dcafbcc5ab752a1f7d798fcb4bbc48f17bca | /main.py | b135f8468887cb800285fe3ac3af7467fc838405 | [] | no_license | arthurmgo/nerd_bot | b55e230a15650bcd92ce82eab1da3ff4485314b8 | 732a642bf383bcfd57a967292d6286502fac51af | refs/heads/master | 2020-04-19T04:45:31.021581 | 2019-01-28T13:52:22 | 2019-01-28T13:52:22 | 167,970,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,675 | py | import config
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
from image_recognition import PredictionThread
import wikipedia
import logging
import os
execution_path = os.getcwd()
wikipedia.set_lang("pt")
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
cont = 0
phrases = [None, None, None, None]
# Create a new instance of a ChatBot
chat_bot = ChatBot(
'Terminal',
storage_adapter='chatterbot.storage.SQLStorageAdapter',
logic_adapters=[
'chatterbot.logic.BestMatch'
],
database_uri='sqlite:///database.db'
)
trainer = ListTrainer(chat_bot)
trainer.train([
"Oi",
"Olá",
"Tudo bem?",
"Tudo bem sim, e com você?"
])
def start(bot, update):
"""Send a message when the command /start is issued."""
update.message.reply_text('Olá!')
def help(bot, update):
"""Send a message when the command /help is issued."""
update.message.reply_text('Help!')
def echo(bot, update):
global phrases
global cont
phrases[cont % 4] = update.message.text
cont += 1
if cont % 4 == 0:
trainer.train(phrases)
print(cont)
def nerd(bot, update):
query = update.message.text.replace("/nerd", "")
bot_response = chat_bot.get_response(query)
update.message.reply_text(str(bot_response))
def wiki(bot, update):
query = update.message.text.replace("/wiki", "")
wiki_response = wikipedia.summary(query)
update.message.reply_text(str(wiki_response))
def image(bot, update):
file_id = update.message.photo[-1].file_id
new_file = bot.get_file(file_id)
new_file.download("image.jpeg")
prediction_thread = PredictionThread()
prediction_thread.start()
prediction_thread.join()
update.message.reply_text(prediction_thread.result)
def error(bot, update, error):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, error)
def main():
updater = Updater(config.TELEGRAM_TOKEN)
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(CommandHandler("nerd", nerd))
dp.add_handler(CommandHandler("wiki", wiki))
dp.add_handler(MessageHandler(Filters.text, echo))
dp.add_handler(MessageHandler(Filters.photo, image))
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
| [
"arthur.mirandag@gmail.com"
] | arthur.mirandag@gmail.com |
bd47e77a4d7318b7365b45e8afb112bc3b04155b | 1332790228c2fc88c19846a1d6d13db8274d01cb | /lab_3/__init__.py | ed8387b57cb3c63a1b0435b8fbdb23b51df5fafe | [] | no_license | pawelbubak/KCK | 4a60b74d3cea8a04adaaed7fcc6e835c0ba10f3c | 03736a3311e0b75978fdb67645916cd1d14a14c2 | refs/heads/master | 2020-03-31T02:47:40.322594 | 2018-11-05T23:42:03 | 2018-11-05T23:42:03 | 151,839,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | from .lab_3 import run
| [
"pawelbubak97@gmail.com"
] | pawelbubak97@gmail.com |
ec2472bd4dab4e5bf77b2f95018bb11b43bc09d2 | a528509d32e45dce63efcd45d57eb79eed1a1aa2 | /apitest/abcmeta.py | d9f7159938c14d051638549f3eddaef9161a70f0 | [] | no_license | Rex-Du/redis- | 10853ad55cf06bcfa7c9f9fa1d0a8a0b17186e3f | 9fcecc9628969bf207f2da8435d15c606622d9cf | refs/heads/master | 2022-06-14T19:41:44.325124 | 2020-05-05T15:36:55 | 2020-05-05T15:36:55 | 261,512,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,145 | py | from _weakrefset import WeakSet
class ABCMeta(type):
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace, **kwargs):
cls = super().__new__(mcls, name, bases, namespace, **kwargs)
# Compute set of abstract method names
abstracts = {name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False)}
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC.
Returns the subclass, to allow usage as a class decorator.
"""
if not isinstance(subclass, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return subclass # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
return subclass
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print("Class: %s.%s" % (cls.__module__, cls.__qualname__), file=file)
print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
for name in sorted(cls.__dict__):
if name.startswith("_abc_"):
value = getattr(cls, name)
if isinstance(value, WeakSet):
value = set(value)
print("%s: %r" % (name, value), file=file)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking
subclass = instance.__class__
if subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is subclass:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subclass in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subclass)
return any(cls.__subclasscheck__(c) for c in {subclass, subtype})
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
def abstractmethod(funcobj):
funcobj.__isabstractmethod__ = True
return funcobj
class Base(object, metaclass=ABCMeta):
@abstractmethod
def func_a(self, data):
'''
:param data:
:return:
'''
@abstractmethod
def func_b(self, data, out):
'''
:param data:
:param out:
:return:
'''
def func_d(self):
print('func_d in base')
class Person(Base):
def fun_a(self, data):
"""
:param data:
:return:
"""
p = Person()
| [
"860435288@qq.com"
] | 860435288@qq.com |
33928601def468cab1a90517544dac9fff4e0f60 | d088f067d441d69efea110c8e9fea3ccc5094367 | /distance.py | 630b78f1364744d6805c6a9e99cab0a5f39b82e1 | [] | no_license | hemanth9516/pythonlab | b6eb53235e3f806c2ef5e61f0db2c41b933b9cf8 | 0ec2a34ecb97cbcfe9c7aa9248f688426533476f | refs/heads/master | 2022-05-21T12:48:10.961616 | 2022-04-01T18:28:04 | 2022-04-01T18:28:04 | 172,656,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | 2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
#Libraries
import RPi.GPIO as GPIO
import time
#GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM)
#set GPIO Pins
GPIO_TRIGGER = 18
GPIO_ECHO = 24
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
def distance():
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
return distance
if __name__ == '__main__':
try:
while True:
dist = distance()
print ("Measured Distance = %.1f cm" % dist)
time.sleep(1)
# Reset by pressing CTRL + C
except KeyboardInterrupt:
print("Measurement stopped by User")
GPIO.cleanup() | [
"noreply@github.com"
] | hemanth9516.noreply@github.com |
a0e1134d1d99ab8ae5a2419cc619d5f1639b5b52 | e7eafa9e62f6df05452099c7bc3bbefc325684d9 | /planeswalkers/planeswalkers/migrations/0002_auto_20150330_0328.py | 188eca29cb20c2458eeae666a3de898091424856 | [] | no_license | mekhami/planeswalkers-of-denton | 4945097f4c9a2cc47c708278804cd35652a587f6 | b132a65dd85e4c431b6e6feff80515660cd88e45 | refs/heads/master | 2016-09-05T18:41:49.570059 | 2015-03-30T03:36:52 | 2015-03-30T03:36:52 | 33,052,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('planeswalkers', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='match',
name='player1',
field=models.ForeignKey(related_name='player1_matches', to='planeswalkers.Player', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='match',
name='player2',
field=models.ForeignKey(related_name='player2_matches', to='planeswalkers.Player', null=True),
preserve_default=True,
),
]
| [
"lawrence.vanderpool@gmail.com"
] | lawrence.vanderpool@gmail.com |
6f88b6e6753e5a969b188f9669df7aae3df620ef | 1389024bb2f8a4a80846a1d7c205646d8b75373a | /Clas.py | 873b4843fc6b412068edf705d25fce20e9e4384e | [] | no_license | diegoscr/Desafio2id3450 | 8a52f95a85f7b0d9117e6bdcc2ca446b1581f79e | 52fb3b9663b16f16e16d760fe0685bbb75b19456 | refs/heads/master | 2020-09-20T09:38:32.458382 | 2016-08-28T00:50:52 | 2016-08-28T00:50:52 | 66,742,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,829 | py | #Classificação de sinais utilizando matriz euclidiana
#autor Diego Silva Caldeira Rocha
import numpy as np
a= np.array([[1, 3, 5], [7, 9, 11], [13, 15, 17]])
#c= np.array([[5,2,9]])
c=np.zeros((3))
#print(a)
#print (c)
#np.insert(a,c)
#a=np.vstack((a,c))
#a=np.concatenate((a, c),axis=0)
#print(a)
AtSig=np.zeros((21))
MS0=np.empty((21))#matriz com amostra dos atributos do sinal tipo 0
MS1=np.empty((21))#MS1#matriz com amostra dos atributos do sinal tipo 1
MS2=np.empty((21)) #MS2#matriz com amostra dos atributos do sinal tipo 2
ts0=0
ts1=0
ts2=0
ref_arquivo = open("waveform.data", "r")
ref_arqsaid = open("result.data", "w")
i=-1;
for line in range(1,2201):
value=ref_arquivo.readline()
i=i+1
AtSig[i] =float(value)
if (i==20):
id=int(ref_arquivo.readline())
i=-1;
if (id==0):
ts0=ts0+1
MS0=np.vstack((MS0,AtSig))
elif(id==1):
ts1=ts1+1
MS1=np.vstack((MS1,AtSig))
elif(id==2):
ts2=ts2+1
MS2=np.vstack((MS2,AtSig))
MS0=np.delete(MS0, 0, 0)
MS1=np.delete(MS1, 0, 0)
MS2=np.delete(MS2, 0, 0)
erro=0;
acerto=0;
#print(MS1)
while (value!=''):
CtSig=np.zeros((21))
i=-1;
for line in range(0,21):
value=ref_arquivo.readline()
i=i+1
if (value==''):
break
CtSig[i] =float(value)
if (i==21):
id=int(ref_arquivo.readline())
i=-1;
best=float('inf')
classe=3
#comparando com sinal da classe 0
for aux in range(0,ts0):
R=MS0[aux]-CtSig #distancia euclidiana entre os atributos do sinal
R=R*R
R=R**0.5
if(best>R.mean()):
best=R.mean()#calculando a média das distancia eucldiana
classe=0
#comparando com sinal da classe 1
for aux in range(0,ts1):
R=MS1[aux]-CtSig #distancia euclidiana entre os atributos do sinal
R=R*R
R=R**0.5
if(best>R.mean()):
best=R.mean()#calculando a média das distancia eucldiana
classe=1
#comparando com sinal da classe 2
for aux in range(0,ts2):
R=MS2[aux]-CtSig #distancia euclidiana entre os atributos do sinal
R=R*R
R=R**0.5
if(best>R.mean()):
best=R.mean()#calculando a média das distancia eucldiana
classe=2
if (classe==id):
acerto=acerto+1
else:
erro=erro+1
ref_arqsaid.write('classe de sinal:'+str(classe)+'atributos:'+str(CtSig)+'\n')
value=ref_arquivo.readline()
print(value)
##
##
##print(AtSig)
##print(CtSig)
##
##
##R=AtSig-CtSig
##
##
##print(R)
##
##print(R.mean())
##
ref_arqsaid.write('acertos:'+str(acerto)+'erro:'+str(erro)+'total:'+str((acerto/(acerto+erro)*100)))
ref_arquivo.close()
ref_arqsaid.close()
| [
"sddiegorocha@gmail.com"
] | sddiegorocha@gmail.com |
9dbb60b6eadba133b5ac9e024654e41453f4c5eb | e83f3d46517270a65dc678056351a255479ee761 | /examen/colegio/views/users.py | 17538e2d4d76426ae7ee69bb4ffed078841ddc84 | [] | no_license | albertdepaz/examen | 41627bdfe2628855b2a85bee258d9a2763018f56 | f7102914ea58dfc839b4c4bb16f3fe3110bb7f23 | refs/heads/master | 2021-03-02T14:51:41.477403 | 2020-03-08T19:42:03 | 2020-03-08T19:42:03 | 245,876,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.permissions import (
AllowAny,
IsAuthenticated
)
from colegio.serializers.users import (
UserModelSerializer,
UserLoginSerializer,
UserSignUpSerializer,
)
from colegio.models import User
class UserViewSet(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = User.objects.all()
serializer_class = UserModelSerializer
lookup_field = 'username'
@action(detail=False, methods=['post'])
def login(self, request):
serializer = UserLoginSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
data = {
'user': UserModelSerializer(user).data,
}
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=False, methods=['post'])
def signup(self, request):
serializer = UserSignUpSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
data = UserModelSerializer(user).data
return Response(data, status=status.HTTP_201_CREATED) | [
"albertcompu5@gmail.com"
] | albertcompu5@gmail.com |
00256e1c2a75d6e2643d1a889bf9b296376e09eb | 6be845bf70a8efaf390da28c811c52b35bf9e475 | /windows/Resources/Dsz/PyScripts/Lib/dsz/mca/file/cmd/put/type_Params.py | 7be73ef22b4325cbd9bcac9c3611c066cc82f983 | [] | no_license | kyeremalprime/ms | 228194910bf2ed314d0492bc423cc687144bb459 | 47eea098ec735b2173ff0d4e5c493cb8f04e705d | refs/heads/master | 2020-12-30T15:54:17.843982 | 2017-05-14T07:32:01 | 2017-05-14T07:32:01 | 91,180,709 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,221 | py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Params.py
from types import *
import array
PARAMS_CREATE_FLAG_PERMANENT = 1
class CreateParams:
def __init__(self):
self.__dict__['flags'] = 0
self.__dict__['writeOffset'] = 0
self.__dict__['filePath'] = ''
self.__dict__['provider'] = 0
def __getattr__(self, name):
if name == 'flags':
return self.__dict__['flags']
if name == 'writeOffset':
return self.__dict__['writeOffset']
if name == 'filePath':
return self.__dict__['filePath']
if name == 'provider':
return self.__dict__['provider']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'flags':
self.__dict__['flags'] = value
elif name == 'writeOffset':
self.__dict__['writeOffset'] = value
elif name == 'filePath':
self.__dict__['filePath'] = value
elif name == 'provider':
self.__dict__['provider'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU16(MSG_KEY_PARAMS_CREATE_FLAGS, self.__dict__['flags'])
submsg.AddU64(MSG_KEY_PARAMS_CREATE_WRITE_OFFSET, self.__dict__['writeOffset'])
submsg.AddStringUtf8(MSG_KEY_PARAMS_CREATE_FILE_PATH, self.__dict__['filePath'])
submsg.AddU32(MSG_KEY_PARAMS_CREATE_PROVIDER, self.__dict__['provider'])
mmsg.AddMessage(MSG_KEY_PARAMS_CREATE, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_PARAMS_CREATE, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['flags'] = submsg.FindU16(MSG_KEY_PARAMS_CREATE_FLAGS)
try:
self.__dict__['writeOffset'] = submsg.FindU64(MSG_KEY_PARAMS_CREATE_WRITE_OFFSET)
except:
pass
self.__dict__['filePath'] = submsg.FindString(MSG_KEY_PARAMS_CREATE_FILE_PATH)
try:
self.__dict__['provider'] = submsg.FindU32(MSG_KEY_PARAMS_CREATE_PROVIDER)
except:
pass
class WriteParams:
def __init__(self):
self.__dict__['lastData'] = False
self.__dict__['chunkIndex'] = 0
self.__dict__['data'] = array.array('B')
def __getattr__(self, name):
if name == 'lastData':
return self.__dict__['lastData']
if name == 'chunkIndex':
return self.__dict__['chunkIndex']
if name == 'data':
return self.__dict__['data']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'lastData':
self.__dict__['lastData'] = value
elif name == 'chunkIndex':
self.__dict__['chunkIndex'] = value
elif name == 'data':
self.__dict__['data'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddBool(MSG_KEY_PARAMS_WRITE_LAST_DATA, self.__dict__['lastData'])
submsg.AddU32(MSG_KEY_PARAMS_WRITE_CHUNK_INDEX, self.__dict__['chunkIndex'])
submsg.AddData(MSG_KEY_PARAMS_WRITE_DATA, self.__dict__['data'])
mmsg.AddMessage(MSG_KEY_PARAMS_WRITE, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_PARAMS_WRITE, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['lastData'] = submsg.FindBool(MSG_KEY_PARAMS_WRITE_LAST_DATA)
self.__dict__['chunkIndex'] = submsg.FindU32(MSG_KEY_PARAMS_WRITE_CHUNK_INDEX)
self.__dict__['data'] = submsg.FindData(MSG_KEY_PARAMS_WRITE_DATA) | [
"kyeremalprime@gmail.com"
] | kyeremalprime@gmail.com |
728e8a2674cf18ba93482cd096615755580e35f0 | c499bf9accc0cf02e025ca8ef524db5c3766c25b | /SN_Fitness/settings.py | 291049bf6a50a751e60233debd94a486666c1879 | [] | no_license | shadman25/SN_FItness | c5819706f60c398b32d607d7dce8324e0a4c64ad | 69bdccedb1665deabb4f2e1ec4f85ea0ba8af8ae | refs/heads/master | 2022-12-11T05:20:23.668358 | 2020-09-02T16:58:25 | 2020-09-02T16:58:25 | 292,338,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | """
Django settings for SN_Fitness project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b)%4--*2kgfu*2-+5(77bvtex^4!6d+0auh*ug1s6!2f8ayk7o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'shah_gym',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'SN_Fitness.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'SN_Fitness.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"shadman21178@gmail.com"
] | shadman21178@gmail.com |
4d74cde1dc9d7086b62bd8f6906126a67188d9c9 | 63271db556e4de0b2b8c3782ca6e083290e9c01d | /SHgold/SHgold/middlewares.py | e3bdb4a0c7a4ed6708b3caf3f8b2efb3bdbbf7c7 | [] | no_license | nuoyi-618/scrapy | 09068b37b77a3528f229c8ba368d3c652f651401 | e40f486a6fddbebd27ee1de97c7ff5a4c550f8ce | refs/heads/master | 2020-05-03T14:59:32.492113 | 2019-03-31T13:49:32 | 2019-03-31T13:49:32 | 178,693,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,878 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
import random
class ShgoldSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class my_useragent(object):
def process_request(self,request,spider):
USER_AGENT_LIST = [
'MSIE (MSIE 6.0; X11; Linux; i686) Opera 7.23',
'Opera/9.20 (Macintosh; Intel Mac OS X; U; en)',
'Opera/9.0 (Macintosh; PPC Mac OS X; U; en)',
'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)',
'Mozilla/4.76 [en_jp] (X11; U; SunOS 5.8 sun4u)',
'iTunes/4.2 (Macintosh; U; PPC Mac OS X 10.2)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:9.0) Gecko/20100101 Firefox/9.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:16.0) Gecko/20120813 Firefox/16.0',
'Mozilla/4.77 [en] (X11; I; IRIX;64 6.5 IP30)',
'Mozilla/4.8 [en] (X11; U; SunOS; 5.7 sun4u)'
]
agent = random.choice(USER_AGENT_LIST)
request.headers['User_Agent'] = agent
| [
"noreply@github.com"
] | nuoyi-618.noreply@github.com |
2d4409a3fedab8973a82770461cb460e061902fa | 61be51575c0e4a49ca03d12c70b6b616baa934f3 | /Kopanev_Roman_DZ-2/d_z_2_2.py | 93726fe729894b1a0f1bd9d56fb0bbfbad21412a | [] | no_license | RombosK/GB_1824 | 03ebbd6a0ff7f28e6a1fd275d4882c71ccbcbed2 | 502cae8ab75b76183060b9482c7107970983fb34 | refs/heads/main | 2023-09-05T16:12:15.809378 | 2021-11-08T17:55:27 | 2021-11-08T17:55:27 | 417,275,469 | 0 | 0 | null | 2021-11-08T17:57:40 | 2021-10-14T20:37:58 | Python | UTF-8 | Python | false | false | 2,028 | py | # Дан список:
# ['в', '5', 'часов', '17', 'минут', 'температура', 'воздуха', 'была', '+5', 'градусов']
# Необходимо его обработать — обособить каждое целое число (вещественные не трогаем) кавычками
# (добавить кавычку до и кавычку после элемента списка, являющегося числом) и дополнить нулём до двух целочисленных разрядов:
# ['в', '"', '05', '"', 'часов', '"', '17', '"', 'минут', 'температура', 'воздуха', 'была', '"', '+05', '"', 'градусов']
# Сформировать из обработанного списка строку:
# в "05" часов "17" минут температура воздуха была "+05" градусов
# Подумать, какое условие записать, чтобы выявить числа среди элементов списка? Как модифицировать это условие для чисел со знаком?
# Примечание: если обособление чисел кавычками не будет получаться - можете вернуться к его реализации позже. Главное: дополнить числа до двух разрядов нулём!
my_list = ['в', '5', 'часов', '17', 'минут', 'температура', 'воздуха', 'была', '+5', 'градусов']
new_list = []
for i in my_list: #Оригинальное решение!
if i[0] in ('+', '-'): #Решил им воспользоваться
i = f'"{i.zfill(2)}"' # Подправил (3) на (2) т.к. zfill автоматом формирует ноль перед '+' и '-'
if i[0].isdigit():
i = f'"{i.zfill(2)}"'
new_list.append(i)
print(' '.join(new_list))
| [
"rombos1978@gmail.com"
] | rombos1978@gmail.com |
c81cc8893ba9e4b702c53c3aaab0befb9e7ddad8 | 13ec4cf9351ba12ff51e4c7b35aa0fef6efb2413 | /transform_command.py | 878576d770a16b67534644791e9e239cb23acd0a | [] | no_license | chriswycoff/image_transformer | a43fa274894dc7b01476d27571b505b128770813 | 011dba3584c7dd44dd45fc1d3de750062997d30d | refs/heads/main | 2023-07-14T14:35:50.543125 | 2021-08-18T01:34:36 | 2021-08-18T01:34:36 | 370,458,812 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,412 | py | # import subprocess
import uuid
from numpy import invert
import moviefy
import makestl
import add_effects
import uuid
import os
import sys
import shutil
import time
from PIL import Image
def transform_image(image_path,double=False,frames=10, cleanup=True, make_mov=True):
# print("hello world")
unique_id = uuid.uuid4()
img_file = image_path
just_name = os.path.basename(img_file).split(".")[0]
image = Image.open("./test/imageToSave.jpeg")
new_image = image.resize((800, 800))
image.close()
new_image.save("./test/imageToSave.jpeg")
new_image.close()
new_file = "./images/images_3d/"+ str(unique_id) + ".stl"
makestl.make_stl(img_file, new_file)
time.sleep(0.1) #helps make sure program does not bug out
if double:
# add_effects.create_movie3(img_file,new_file,unique_id=unique_id,frames=frames)
add_effects.create_movie4(img_file,new_file,unique_id=unique_id,frames=frames)
else:
add_effects.create_movie2(img_file,new_file,unique_id=unique_id,frames=frames)
time.sleep(0.1)
if make_mov:
moviefy.moviefy(unique_id,just_name,frames)
if cleanup:
os.remove(new_file)
shutil.rmtree('./images/images_effected/'+ str(unique_id))
def transform_image_command(image_path,double=False,frames=10, cleanup=True, make_mov=False,just_see=True,invert=False,filter=2,\
background_image = None,audio=False):
# print("hello world")
unique_id = uuid.uuid4()
img_file = image_path
just_name = os.path.basename(img_file).split(".")[0]
# filename = "./test/imageToSave.jpeg"
# with open(filename, 'wb') as f:
# f.write(imgdata)
shutil.copyfile(image_path, "./images/imageToSave.jpeg")
image = Image.open("./images/imageToSave.jpeg")
new_image = image.resize((800, 800))
image.close()
new_image.save("./images/imageToSave.jpeg")
new_image.close()
new_file = "./images/images_3d/"+ str(unique_id) + ".stl"
makestl.make_stl(img_file, new_file,invert=invert,filter=filter)
time.sleep(0.2) #helps make sure program does not bug out
if just_see:
# add_effects.pyvista_command(img_file,new_file,unique_id=unique_id,frames=frames)
add_effects.pyvista_command(new_file,"./images/imageToSave.jpeg", None)
else:
if double:
if background_image != None:
add_effects.create_movie4(img_file,new_file,unique_id=unique_id,frames=frames,\
background_image=background_image)
# add_effects.create_movie5(img_file,new_file,unique_id=unique_id,frames=frames,\
# background_image=background_image)
# add_effects.create_movie6(img_file,new_file,unique_id=unique_id,frames=frames,\
# background_image=background_image)
else:
add_effects.create_movie3(img_file,new_file,unique_id=unique_id,frames=frames)
else:
add_effects.create_movie2(img_file,new_file,unique_id=unique_id,frames=frames)
time.sleep(0.1)
if make_mov:
moviefy.moviefy(unique_id,just_name,frames,audio=audio)
if cleanup:
os.remove(new_file)
if not just_see:
shutil.rmtree('./images/images_effected/'+ str(unique_id))
# TESTS
if __name__ == "__main__":
# try:
# image_path = "./images/images_2d/cool_lion.jpeg"
# transform_image(image_path,frames=1,cleanup=True)
# image_path = "./images/images_2d/snake.jpeg"
# transform_image(image_path,frames=1,cleanup=True,make_mov=True)
# image_path = "./images/images_2d/nora.jpeg"
# transform_image(image_path,frames=1,cleanup=True,make_mov=True)
# except:
# print("something went wrong likely test files are missing")
# image_path = "./test/imageToSave.jpeg"
background_image = "./images/images_2d/backgrounds/background_blobs.jpeg" # set to None if no background wanted
# background_image = None
image_path = sys.argv[1]
# transform_image_command(image_path,frames=360,cleanup=True,make_mov=True,double=True,\
# filter=2,invert=False,just_see=False,background_image=background_image,audio=None)
transform_image_command(image_path,frames=360,cleanup=False,make_mov=False,double=True,\
filter=8,invert=False,just_see=True,background_image=background_image,audio=None) | [
"wycoffc@air-weigh.com"
] | wycoffc@air-weigh.com |
b40cba513f23b058e0c54a3aead569ab2266b80f | f411224bf83c7b8c372bd693b61f6baa5a8cc121 | /chapter09/simple_button.py | 3ca0ce8327f985523f7994ffc7a13b90952fb829 | [
"MIT"
] | permissive | SpinStabilized/bbb-primer | c34da0bcbb06e16247e1690d66644c7d3cf061b7 | c8af2c488e2b0923f1fcce9010f82b5feefd926a | refs/heads/master | 2021-01-10T15:45:38.942615 | 2015-11-16T17:20:05 | 2015-11-16T17:20:05 | 46,019,191 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | import Adafruit_BBIO.GPIO as GPIO
import time
# Define program constants
BUTTON_PIN = 'P9_11'
OFF = 0
ON = 1
# Configure the GPIO pin and set the initial state of variables to track the
# state of the button.
GPIO.setup(BUTTON_PIN, GPIO.IN)
button_state_old = OFF
button_state_new = OFF
# print out a nice message to let the user know how to quit.
print('Starting, press <control>-c to quit.\n')
# Execute until a keyboard interrupt
try:
while True:
# Check the state of the pin. If it is different than the last state,
# print a message.
button_state_new = GPIO.input(BUTTON_PIN)
if button_state_new != button_state_old:
if button_state_new == OFF:
print('Button transitioned from off to on.')
else:
print('Button transitioned from on to off.')
# Update the stored button state and then wait a tenth of a second.
button_state_old = button_state_new
time.sleep(0.1)
except KeyboardInterrupt:
GPIO.cleanup()
| [
"bjmclaughlin@gmail.com"
] | bjmclaughlin@gmail.com |
80bc187797f793f00432aa1bc05b7ebc6ccac457 | 84caa57bf6068e5077a303d4f80a26db50bfdfdd | /app.py | 40e21978a6ac18c96623faa338cf55d225b33551 | [] | no_license | akhilliker/HW15_BellyButtonBio_Hilliker | 037608579cd3cd3140b8e446747a56d1d67f6993 | b704cd1cea1bfa546dfbd7205a6d69bf098afcab | refs/heads/master | 2020-05-22T01:02:13.120225 | 2019-05-13T00:54:48 | 2019-05-13T00:54:48 | 186,184,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,901 | py | import os
import pandas as pd
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from flask import Flask, jsonify, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
#################################################
# Database Setup
#################################################
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db/bellybutton.sqlite"
db = SQLAlchemy(app)
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(db.engine, reflect=True)
# Save references to each table
Samples_Metadata = Base.classes.sample_metadata
Samples = Base.classes.samples
@app.route("/")
def index():
"""Return the homepage."""
return render_template("index.html")
@app.route("/names")
def names():
"""Return a list of sample names."""
# Use Pandas to perform the sql query
stmt = db.session.query(Samples).statement
df = pd.read_sql_query(stmt, db.session.bind)
# Return a list of the column names (sample names)
return jsonify(list(df.columns)[2:])
@app.route("/metadata/<sample>")
def sample_metadata(sample):
"""Return the MetaData for a given sample."""
sel = [
Samples_Metadata.sample,
Samples_Metadata.ETHNICITY,
Samples_Metadata.GENDER,
Samples_Metadata.AGE,
Samples_Metadata.LOCATION,
Samples_Metadata.BBTYPE,
Samples_Metadata.WFREQ,
]
results = db.session.query(*sel).filter(Samples_Metadata.sample == sample).all()
# Create a dictionary entry for each row of metadata information
sample_metadata = {}
for result in results:
sample_metadata["sample"] = result[0]
sample_metadata["ETHNICITY"] = result[1]
sample_metadata["GENDER"] = result[2]
sample_metadata["AGE"] = result[3]
sample_metadata["LOCATION"] = result[4]
sample_metadata["BBTYPE"] = result[5]
sample_metadata["WFREQ"] = result[6]
print(sample_metadata)
return jsonify(sample_metadata)
@app.route("/samples/<sample>")
def samples(sample):
"""Return `otu_ids`, `otu_labels`,and `sample_values`."""
stmt = db.session.query(Samples).statement
df = pd.read_sql_query(stmt, db.session.bind)
# Filter the data based on the sample number and
# only keep rows with values above 1
sample_data = df.loc[df[sample] > 1, ["otu_id", "otu_label", sample]]
# Format the data to send as json
data = {
"otu_ids": sample_data.otu_id.values.tolist(),
"sample_values": sample_data[sample].values.tolist(),
"otu_labels": sample_data.otu_label.tolist(),
}
return jsonify(data)
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
# app.run()
| [
"ahillike@richmond.edu"
] | ahillike@richmond.edu |
6140b4c90f7393bc23a66c9441590e412679a9c9 | 50039604efee0eb882b11ad67f8e92282d4d684d | /geomet/geopackage.py | 1346375fb80441085058cf68092b877c1d45fe87 | [
"Apache-2.0"
] | permissive | tomplex/geomet | 0074f8c15073730638326f116ecf7b0a8a541ede | f57a2302d738ef8af694c8dde09e95d419457d9e | refs/heads/master | 2021-07-14T08:31:22.586384 | 2020-06-27T09:30:28 | 2020-06-27T09:30:28 | 158,293,097 | 0 | 0 | Apache-2.0 | 2020-03-10T00:59:33 | 2018-11-19T21:35:40 | Python | UTF-8 | Python | false | false | 12,041 | py | # Copyright 2020 Tom Caruso & individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct as _struct
from geomet.util import (
as_bin_str as _as_bin_str,
take as _take,
endian_token as _endian_token
)
from geomet import wkb as _wkb
def dump(obj, dest_file, big_endian=True):
"""
Dump GeoJSON-like `dict` to GeoPackage binary
and write it to the `dest_file`.
:param dict obj:
A GeoJSON-like dictionary. It must at least the keys 'type' and
'coordinates'.
:param dest_file:
Open and writable file-like object.
:param bool big_endian:
specify endianess of the dumped object.
:return:
"""
dest_file.write(dumps(obj, big_endian))
def load(source_file):
"""
Load a GeoJSON `dict` object from a ``source_file`` containing
GeoPackage (as a byte string).
:param source_file:
Open and readable file-like object.
:return:
A GeoJSON `dict` representing the geometry read from the file.
"""
return loads(source_file.read())
def dumps(obj, big_endian=True):
"""
Dump a GeoJSON-like dict to a GeoPackage bytestring.
If the dict contains a top-level 'meta' key like so:
```
'meta': {'srid': 4326}
```
then the srid will be added to the geopackage header, but *not*
to the WKB geometry header.
If the dict contains a top-level 'bbox' key like so:
```
'bbox': [0, 0, 3, 3]
```
Then an envelope will be added to the geopackage header
with this information.
If the geometry's coordinates are empty (an empty list)
then the geopackage header's "empty" flag will be set,
denoting that this geometry has no coordinates.
Please note that while this library can parse geopackages
with a mixed byte-order in the header, it will only produce
blobs with consistent byte order (albeit properly marked as such).
That means you cannot product a geopackage with e.g. little-endian
header and big-endian WKB geometry.
:param dict obj:
The geojson geometry to dump
:param bool big_endian:
if True, the geopackage binary will use big-endian
byte order, little-endian otherwise.
:return bytes:
bytestring representing the geometry in geopackage
format.
"""
header = _build_geopackage_header(obj, not big_endian)
result = _wkb._dumps(obj, big_endian, include_meta=False)
return header + result
def loads(string):
"""
Construct a GeoJSON `dict` from geopackage (string).
This function strips the geopackage header from the
string and passes the remaining WKB geometry to the
`geomet.wkb.loads` function.
The envelope, if present, is added to the GeoJSON as
a key called 'bbox' as per the GeoJSON spec, [1].
If an SRID is specified in the geopackage header
AND the wkb header, the SRID in the geopackage header
will take precedence and will replace that SRID
in the returned dict.
[1] https://tools.ietf.org/html/rfc7946#section-5
:param bytes string:
geopackage byte string.
:return dict:
GeoJSON represented the parsed geopackage binary.
"""
string = iter(string)
header = _as_bin_str(_take(_GeoPackage.HEADER_LEN, string))
_check_is_valid(header)
g, p, version, empty, envelope_indicator, is_little_endian, srid = _parse_header(header)
wkb_offset = _get_wkb_offset(envelope_indicator)
left_to_take = (wkb_offset - _GeoPackage.HEADER_LEN)
envelope_data = _as_bin_str(_take(left_to_take, string))
if envelope_data:
envelope = _parse_envelope(envelope_indicator, envelope_data, is_little_endian)
result = _wkb.loads(string)
if srid:
result['meta'] = {'srid': int(srid)}
result['crs'] = {
'type': 'name',
'properties': {'name': 'EPSG%s' % srid},
}
if envelope_data:
result['bbox'] = envelope
return result
class _GeoPackage:
"""
Much more information on geopackage structure
can be found here: http://www.geopackage.org/spec/#gpb_format
"""
# The ascii letter 'G'
MAGIC1 = 0x47
# The ascii letter 'P'
MAGIC2 = 0x50
VERSION1 = 0x00
HEADER_LEN = 8
HEADER_PACK_FMT = "BBBBI"
ENVELOPE_2D_LEN = 32
ENVELOPE_3D_LEN = 48
ENVELOPE_4D_LEN = 64
ENVELOPE_MASK = 0b00001111
EMPTY_GEOM_MASK = 0b00011111
ENDIANNESS_MASK = 0b00000001
# map the "envelope indicator" integer we get out of the geopackage header
# to the dimensionality of the envelope.
# more info here: http://www.geopackage.org/spec/#gpb_format
# in the "flags" section, bits 3, 2, 1.
_indicator_to_dim = {
0: 0,
1: 4,
2: 6,
3: 6,
4: 8,
}
# Map the dimensionality of our envelope to the indicator
# integer we will use in the geopackage binary header.
# because we have no way to tell between Z and M values,
# if the geometry has 3 dimensions we default to assume Z.
_dim_to_indicator = {
0: 0,
4: 1,
6: 2,
8: 4
}
def is_valid(data):
"""
Check if the data represents a valid geopackage
geometry. Input can be either the full geometry or
just the header.
:param bytes data:
bytes representing the geopackage binary.
:return (bool, str):
Is the geopackage valid, if not, string describing why
"""
g, p, version, _, envelope_indicator, _, _ = _parse_header(data[:8])
if (g != _GeoPackage.MAGIC1) or (p != _GeoPackage.MAGIC2):
return False, "Missing Geopackage header magic bytes"
if version != _GeoPackage.VERSION1:
return False, "Geopackage version must be 0"
if (envelope_indicator < 0) or (envelope_indicator > 4):
return False, "Envelope indicator must be between 0-4"
return True, ""
def _header_is_little_endian(header):
"""
Check to see if the header is encoded
as little endian or big endian.
Either the entire binary blob or
just the header can be passed in.
:param bytes header:
geopackage header or binary blob
:return bool: is the header little endian
"""
(flags,) = _struct.unpack("B", header[3:4])
return flags & _GeoPackage.ENDIANNESS_MASK
def _parse_header(header):
"""
Unpack all information from the geopackage
header, including "magic" GP bytes. Returns
all of them so we can confirm that this
geopackage is validly formed. Can also accept
the full binary blob.
:param header:
the header or the full geometry.
:return 7-tuple:
all attributes stored in the binary header.
"""
is_little_endian = _header_is_little_endian(header)
fmt = _endian_token(is_little_endian) + _GeoPackage.HEADER_PACK_FMT
g, p, version, flags, srid = _struct.unpack(fmt, header[:_GeoPackage.HEADER_LEN])
empty, envelope_indicator, endianness = _parse_flags(flags)
return g, p, version, empty, envelope_indicator, endianness, srid
def _parse_flags(flags):
"""
Parse the bits in the "flags" byte
of the geopackage header to retrieve
useful information. We specifically parse
the endianness, the envelope indicator,
and the "empty" flag.
Much more info can be found in
the documentation [1].
[1] http://www.geopackage.org/spec/#gpb_format
:param byte flags:
The "flags" byte of a geopackage header.
:return tuple:
"""
endianness = flags & _GeoPackage.ENDIANNESS_MASK
envelope_indicator = (flags & _GeoPackage.ENVELOPE_MASK) >> 1
empty = (flags & _GeoPackage.EMPTY_GEOM_MASK) >> 4
return empty, envelope_indicator, endianness
def _build_flags(empty, envelope_indicator, is_little_endian=1):
"""
Create the "flags" byte which goes into
the geopackage header. Much more info
can be found in the documentation [1].
[1] http://www.geopackage.org/spec/#gpb_format
:param int empty:
0 or 1 indicating whether the geometry is empty.
True and False also work as expected.
:param int envelope_indicator:
indicates the dimensionality of the envelope.
:param int is_little_endian:
0 or 1 (or False / True) indicating
whether the header should be
little-endian encoded.
:return byte:
geopackage header flags
"""
flags = 0b0
if empty:
flags = (flags | 1) << 3
if envelope_indicator:
flags = flags | envelope_indicator
return (flags << 1) | is_little_endian
def _build_geopackage_header(obj, is_little_endian):
"""
Create the geopackage header for the input object.
Looks for a 'bbox' key on the geometry to use
for an envelope, and a 'meta' key with an
SRID to encode into the header.
:param dict obj:
a geojson object
:param bool is_little_endian:
which endianness to use when
encoding the data.
:return bytes: geopackage header.
"""
# Collect geometry metadata.
empty = 1 if len(obj['coordinates']) == 0 else 0
envelope = obj.get('bbox', [])
srid = obj.get('meta', {}).get('srid', 0)
try:
envelope_indicator = _dim_to_indicator[len(envelope)]
except KeyError:
raise ValueError("Bounding box must be of length 2*n where "
"n is the number of dimensions represented "
"in the contained geometries.")
pack_args = [
_GeoPackage.MAGIC1,
_GeoPackage.MAGIC2,
_GeoPackage.VERSION1,
# This looks funny, but _build_flags wants a 1 or 0 for
# "little endian" because it uses it to `or` with the bits.
# Conveniently, in Python, False == 0 and True == 1, so
# we can pass the boolean right in and it works as expected.
_build_flags(empty, envelope_indicator, is_little_endian),
srid
]
pack_fmt = _endian_token(is_little_endian) + _GeoPackage.HEADER_PACK_FMT
# This has no effect if we have a 0 envelope indicator.
pack_fmt += ('d' * _indicator_to_dim[envelope_indicator])
pack_args.extend(envelope)
return _struct.pack(pack_fmt, *pack_args)
def _check_is_valid(data):
"""
Raise if the header is not valid geopackage.
:param bytes data: Geopackage data or header.
:return None:
"""
valid, reason = is_valid(data)
if not valid:
raise ValueError("Could not read Geopackage geometry "
"because of errors: " + reason)
def _get_wkb_offset(envelope_indicator):
"""
Get the full byte offset at which the WKB geometry lies
in the geopackage geometry.
:param int envelope_indicator:
indicates the dimensionality of the envelope.
:return int:
number of bytes until the beginning of the
WKB geometry.
"""
base_len = _GeoPackage.HEADER_LEN
return (base_len * _indicator_to_dim[envelope_indicator]) + base_len
def _parse_envelope(envelope_indicator, envelope, is_little_endian):
"""
Parse a geopackage envelope bytestring into an n-tuple
of floats.
:param int envelope_indicator:
indicates the dimensionality of the envelope.
:param bytes envelope:
Bytestring of the envelope values.
:param bool is_little_endian:
how to pack the bytes in the envelope.
:return tuple[float]: Geometry envelope.
"""
pack_fmt = _endian_token(is_little_endian)
pack_fmt += ('d' * _indicator_to_dim[envelope_indicator])
return _struct.unpack(pack_fmt, envelope)
| [
"carusot42@gmail.com"
] | carusot42@gmail.com |
637cbf3861b3eca01ecc60be273a349a2a1888c1 | 038df75bcb74d2118cf7b22f348239847045824f | /geometryCalculatorTest.py | 04ab01b187656f3a1673a416d55d8015fbfaa3f0 | [] | no_license | keegangunkel/SoftwareTestingLab2 | bbf22feae2ec0da415ed6be6636d031a8413d103 | 45e2284061c6d07916360724f664f10326053f4a | refs/heads/master | 2023-08-04T19:47:08.622823 | 2021-10-04T14:51:37 | 2021-10-04T14:51:37 | 413,226,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | import unittest
import geometryCalculator
class geometryCalculatorTest(unittest.TestCase):
#passing tests
def test_volume1(self):
assert(geometryCalculator.pick(8))
if __name__ == '__main__':
unittest.main() | [
"gunkeec@dunwoody.edu"
] | gunkeec@dunwoody.edu |
13915155f7c20e488e358ce9a8fc7c78b8049d80 | 299fe2ca879e509798e95c00b7ba33914031f4a7 | /eruditio/shared_apps/django_userhistory/userhistory.py | 10aad86e7ff44123a9ea653ae8ca81813915a013 | [
"MIT"
] | permissive | genghisu/eruditio | dcf2390c98d5d1a7c1044a9221bf319cb7d1f0f6 | 5f8f3b682ac28fd3f464e7a993c3988c1a49eb02 | refs/heads/master | 2021-01-10T11:15:28.230527 | 2010-04-23T21:13:01 | 2010-04-23T21:13:01 | 50,865,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | from django_userhistory.models import UserTrackedContent
class UserHistoryRegistry(object):
"""
Registry for UserHistory handlers. Necessary so that only one
receiver is registered for each UserTrackedContent object.
"""
def __init__(self):
self._registry = {}
self._handlers = {}
user_tracked_contents = UserTrackedContent.objects.all()
for content in user_tracked_contents:
self.register(content.content_type, content.action)
def get_handler(self, content_name):
"""
Attempt to get a handler for target content type, based
on the following naming convention.
content_type.model_class()._meta.db_table as StudlyCaps + Handler
"""
import django_userhistory.handlers as handlers
def to_studly(x):
return "".join([token.capitalize() for token in x.split("_")])
handler_class = getattr(handlers,
"%sHandler" % (to_studly(content_name)),
handlers.BaseUserHistoryHandler)
return handler_class
def register(self, content_type, action):
"""
Registers a handler from django_userhistory.handlers with the target
content type.
"""
content_name = content_type.model_class()._meta.db_table
if not content_name in self._registry.keys():
HandlerClass = self.get_handler(content_name)
handler = HandlerClass(content_type, action)
self._registry[content_name] = content_type
self._handlers[content_name] = handler
user_history_registry = UserHistoryRegistry() | [
"genghisu@6a795458-236b-11df-a5e4-cb4ff25536bb"
] | genghisu@6a795458-236b-11df-a5e4-cb4ff25536bb |
0faf97e1dd08feeca7040dd8255d1e5a40957c76 | 0fe4e9e590177a9ea3cd87b58cd1a49b38563695 | /recursion/two_number_product.py | a465f9628beb83cacbaf192b7ef1d844ac04788f | [] | no_license | Neveon/python-algorithms | 55423404982b4e6410906c677ab823407f311062 | 2a5722cebc84fb96793eb87e1d84bbe18a9da171 | refs/heads/master | 2020-07-15T13:26:28.391543 | 2019-09-04T18:32:12 | 2019-09-04T18:32:12 | 205,572,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | # Given two numbers, find their product using recursion
y = 3000
x = 500
def product_recursion(x, y):
# Cut down on total number of recursive calls
if x < y:
return product_recursion(y,x)
if y == 0:
return 0
else:
return x + product_recursion(x, y-1)
print(product_recursion(y,x)) | [
"neil.tellez@gmail.com"
] | neil.tellez@gmail.com |
cf7594d16439bef485cf4bb9c072a01bf8bafede | 3ccd609f68016aad24829b8dd3cdbb535fb0ff6d | /python/bpy/types/LineStyleColorModifier_Curvature_3D.py | 92d99c6cbf2195f90c35af6beed12322fa7454ae | [] | no_license | katharostech/blender_externs | 79b2eed064fd927e3555aced3e2eb8a45840508e | fdf7f019a460de0fe7e62375c1c94f7ab0e9f68d | refs/heads/master | 2020-04-11T14:00:29.393478 | 2018-10-01T00:40:51 | 2018-10-01T00:40:51 | 161,838,212 | 1 | 1 | null | 2018-12-14T20:41:32 | 2018-12-14T20:41:32 | null | UTF-8 | Python | false | false | 50 | py | LineStyleColorModifier_Curvature_3D.type = None
| [
"troyedwardsjr@gmail.com"
] | troyedwardsjr@gmail.com |
e589e9b8d3a9feebdb918b5bc6c69646e2a2bba0 | 911d3ffa7f6687b7b2d5609f4d7bb1f907f1703a | /Conditional Statements - More Exercises/06. Pets.py | d518cc7b89d6286ab3fc57f9402ad4d4aa37db01 | [] | no_license | ivan-yosifov88/python_basics | 923e5ba5dcdc5f2288f012eeb544176d1eb964e9 | ee02f1b7566e49566f15c4285d92b04f8fa6a986 | refs/heads/master | 2023-03-05T21:49:24.191904 | 2021-02-24T12:36:03 | 2021-02-24T12:36:03 | 341,581,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | from math import floor, ceil
number_of_days = int(input())
left_food = int(input())
dog_food = float(input())
cat_food = float(input())
turtle_food = float(input())
kilogram_food_eaten = number_of_days * (dog_food + cat_food + turtle_food / 1000)
difference = abs(left_food - kilogram_food_eaten)
if left_food >= kilogram_food_eaten:
print(f"{floor(difference)} kilos of food left.")
else:
print(f"{ceil(difference)} more kilos of food are needed.")
| [
"ivan.yosifov88gmail.com"
] | ivan.yosifov88gmail.com |
4f1b561082a6ab9c1a9ce424e98e3692546d12a0 | 1bec07c5175cc8011245c09b9db619423b0cc295 | /main.py | 43d48b62a1680b351f7e18ea127ecf102b1c977c | [] | no_license | saferq/fill_docs | 2d858df00af9ebd82785e378303a7b9713c6f94d | 989de69cc4c24da5e8bc9277023be112e3c145dc | refs/heads/main | 2023-05-05T23:10:32.182954 | 2021-05-20T16:19:21 | 2021-05-20T16:19:21 | 366,701,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,600 | py | # import os
from pprint import pprint
from backend import afd_json, afd_ggl, afd_pandas, afd_docx, afd_helps
class Main():
def __init__(self):
# os.system("mode con cols=80 lines=30")
print("0 - для выхода")
# Получение конфигурации из json файла
self.config = afd_json.WorkWithJson().get_data()
# Получение таблицы из Гугл
self.ggl = afd_ggl.GoogleSheet(self.config)
self.help = afd_helps.HelpsMetods()
def main(self):
while True:
print("""\nВвести номера строк:""")
a = input("")
if a == '0':
# os.system("cls")
break
# Загрузка таблицы
table = self.ggl.get_values(self.config['table_name'])
# Обработка в pandas
pd = afd_pandas.WorkWithPandas()
df_vk = pd.create_df(date_df=table, row_name=self.config['row_tags']-1)
if a == 'help':
print('''\nПомощь в программе \nтаки так \nи вот так вот''')
else:
list_rows = self.help.convet_text_in_numbers(a)
for row in list_rows:
vk_dict = pd.convert_row_to_dict(df_vk, row)
# pprint(vk_dict)
# Заполнение шаблона
doc = afd_docx.WorkWithDocx()
doc.fill_docx(row, vk_dict)
if __name__ == '__main__':
go = Main()
go.main()
| [
"safer88q@gmail.com"
] | safer88q@gmail.com |
0b198859239f6fee3b3ed973552e4df2c76aa7fb | 2de1b7395c59a3b3fdc65fe4b3cb2c52c5e7ecb9 | /Arcade/Intro/isLucky.py | 133c3163afbe666761d12d79318dd9f845ba9e65 | [] | no_license | sree-varma/CodeSignal | 1a5fbcfd8aa0a37d909838b4eba40143921ebc92 | 6cf84d3749e09d132ce93ae9bb675d9882017521 | refs/heads/master | 2023-07-29T19:46:57.385738 | 2021-09-17T11:57:22 | 2021-09-17T11:57:22 | 403,336,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | """
Ticket numbers usually consist of an even number of digits.
A ticket number is considered lucky if the sum of the first half of the digits is equal to the sum of the second half.
Given a ticket number n, determine if it's lucky or not.
"""
def isLucky(n):
num=[int(i) for i in str(n)]
if len(num)%2!=0:
return False
l=int(len(num)/2)
if sum(num[:l])==sum(num[l:]):
return True
return False
| [
"noreply@github.com"
] | sree-varma.noreply@github.com |
1a746154d6b61411fc15a455190d549aa0d207bb | 14ac71c2e0028e21c8930ec3d4186535f41b31c5 | /bid_tender/bid_tender/provinces/base/task_schedule.py | 39562055f0e4478bcab2bc0e5e7afb8d2880f60e | [] | no_license | gaoming-123/myspiders | 042aaa7a5058324c23720237147803606f84db69 | e3c31729f43a7f4aff2e57f588d9de31fa694ee2 | refs/heads/master | 2020-06-14T00:16:48.424610 | 2019-10-16T15:04:55 | 2019-10-16T15:04:55 | 194,833,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | __all__=['province_weight']
from bid_tender.task.task_config import PROVINCES_DICT
# ======全量爬取的开关参数======
# 从全局配置文件中读取,没有将配置为全量爬取,用于开发测试
try:
from bid_tender.task.task_config import base_FIRST_CRAWL
except:
base_FIRST_CRAWL=True
#=====================
# 省份缩写及请求权重
try:
province_name,province_weight=PROVINCES_DICT.get('省份名')
except TypeError:
province_name, province_weight=0,0
# 配置handler任务的爬取控制方式,及爬取量参数的配置
# 每日任务控制配置 通过 (页码 或 天数 或 页码和天数) 三种配置来进行日常任务控制
# 控制逻辑写入list页的handler函数中
base_task_config={
# 公共资源交易网
'jy_list_parse':{
# 控制爬取的page数
'page':5,
# 控制爬取的时间天数
'period':2,
},
# 采购网
'cg_list_parse':{
# 控制爬取的page数
'page':5,
# 控制爬取的时间天数
'period':2,
},
}
| [
"451574449@qq.com"
] | 451574449@qq.com |
fff3ef323fc12270990af0fd90b8270c1f829fdd | 3f77f513a9dd7072b4d03b29bfbee882e4c5fdbe | /面向对象.py | f62867810d27b31fda60ceab0b0424149affced2 | [] | no_license | zhongyusheng/store | 1a39c12bd69641206fee02460f1bbf5fb317ff41 | 153dee71687df770bcfb24243065e98236fdd363 | refs/heads/main | 2023-08-24T05:45:56.587624 | 2021-10-22T01:13:50 | 2021-10-22T01:13:50 | 405,902,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,894 | py | import time
# class cup():
# high=0
# colour=""
# volume=0
# texture=""
#
# def run(self) -> None:
# print("高度为:",self.high,"厘米的",self.colour,self.texture,"杯能够装",self.volume,"升的液体")
#
# c1=cup()
# c1.high=66
# c1.colour="红色"
# c1.volume="88"
# c1.texture="玻璃"
# c1.run()
#
# class cumputer():
# name=""
# screen_size=0
# price=0
# cpu=""
# memory=0
# wait_time=0
#
# def show(self):
# print("这是一台价值",self.price,"元的",self.name,"电脑","屏幕尺寸为:",self.screen_size,"寸","cpu为:",self.cpu,"内存为:",self.memory,"G","待机时间为:",self.wait_time,"小时")
# def write(self,hour):
# print("我用这台电脑练习打字",hour,"小时")
#
# def play(self,hour,game):
# print("我用这台电脑玩",game,"游戏",hour,"小时")
#
# def watch(self,hour,name):
# print("我用这台电脑看",name, hour, "小时")
#
# c2=cumputer()
# c2. name="联想"
# c2.screen_size=33
# c2.price=32210
# c2.cpu="晓龙"
# c2.memory=1000
# c2.wait_time=9
#
# c2.show()
# c2.write(5)
# c2.play(2,"超级玛丽")
# c2.watch(5,"大清王朝")
# #题目二:
# class air():
# __brand=""
# __price=0
# __time=0
# #品牌
# def setbrand(self,brand):
# self.__brand=brand
# def getbrand(self):
# return self.__brand
# #价格
# def setprice(self,price):
# if price>0:
# self.__price=price
# else:
# print("输入非法!")
# def getprice(self):
# return self.__price
# #定时时间
# def settime(self,time):
# if time>0:
# self.__time=time
# else:
# print("输入非法!")
# def gettime(self):
# return self.__time
# #展示
# def show(self):
# print("这是一台",self.__brand,"空调!","价格为:",self.__price)
#
# #开机
# def open(self):
# print("空调正在开机")
# for i in range(3):
# print(".",end="")
# time.sleep(1)
# print("空调开机了!")
# #定时关机
# def close(self):
# if self.__time<=0:
# print("输入错误!")
# else:
# print("空调将在",self.__time,"分钟后自动关闭")
#
# class test(air):
# def run(self):
# super().show()
# super().open()
# super().close()
#
# a1 = test()
# a1.setbrand("海er兄弟")
# a1.setprice(3000)
# a1.settime(20)
# a1.run()
# # 题目二:
# class student():
# __name=""
# __age=0
# #姓名
# def setname(self,name):
# self.__name=name
# def getname(self):
# return self.__name
# #年龄
# def setage(self,age):
# if age>0 and age<120:
# self.__age=age
# else:
# print("输入非法!")
# def getage(self):
# return self.__age
#
# #展示
# def show(self):
# print("大家好,我叫:",self.__name,"今年",self.__age,"岁了!")
#
# #比较
# def compare(self,student):
# if self.__age>student.getage():
# a=self.__age-student.getage()
# print("我是",self.__name,"我比我同桌大",a,"岁!")
# elif self.__age==student.getage():
# print("我是",self.__name,"我和同桌一样大!")
# elif self.__age<student.getage():
# a=student.getage()-self.__age
# print("我是",self.__name,"我比我同桌小",a,"岁!")
# s=student()
# s.setname("在噶是的")
# s.setage(32)
#
# s1=student()
# s1.setname("阿萨德")
# s1.setage(56)
#
# s.show()
# s1.show()
# s1.compare(s)
| [
"noreply@github.com"
] | zhongyusheng.noreply@github.com |
01f1570f9e4bcdc4f3aed77c516a9705bfea0924 | efc99c0c8c51a43ec33ebc0995de55bc7dd7336d | /BinarySearch/solution.py | 813d6092d95b5f053833f31a105db6b344c2bf25 | [] | no_license | hukkelas/CrackingTheCodingInterview | ad398a8dd71ae03b68dd5e75a4744faf9ad5f927 | 4b5be46ff98b46940c88edf49476a3e6e7bcf111 | refs/heads/master | 2021-01-23T20:30:19.365631 | 2017-09-08T13:23:27 | 2017-09-08T13:25:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | #!/bin/python3
# pylint: skip-file
# https://www.hackerrank.com/challenges/ctci-ice-cream-parlor/problem
def binary_search(elements, x):
low = 0
high = len(elements) -1
while low <= high:
mid = (low + high) // 2
if elements[mid] == x:
return mid
if x < elements[mid]:
high = mid - 1
else:
low = mid + 1
return -1 # Not found
# n^2 solution, can also solve with sorting and using binary search giving nlgn sol.
# Can use a hash table as well, giving a linear solution.
def solve(flavors, money, n):
for i in range(0,n-1):
remaining = money - flavors[i]
sublis = (flavors[0:i] + flavors[i+1::])
if remaining in sublis:
index = flavors.index(remaining)
if index == i:
index = sublis.index(remaining) + 1
return i+1, index+1
t = int(input().strip())
for a0 in range(t):
money = int(input().strip())
n = int(input().strip())
flavors = list(map(int, input().strip().split(' ')))
res = solve(flavors, money, n)
print("{0} {1}".format(res[0], res[1]))
| [
"haak-hu@hotmail.com"
] | haak-hu@hotmail.com |
507bb5c1686964fa97d8c7f16f97c1cd50618106 | f6c6ca7ecf61f0e588d39857e7e4fcea54253710 | /venv/bin/django-admin | 431a293ae347bc322453f86b1a045b7505c7d479 | [] | no_license | azizkarimi67/portfolio_app | 0ab062cf1c2d65ed7cbf818950332073df9d68e4 | dd825df72883c2258c9745f60d9e583c913243a2 | refs/heads/master | 2021-05-20T20:06:08.805801 | 2020-04-02T08:37:16 | 2020-04-02T08:37:16 | 252,400,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | #!/home/pamir/Documents/personal_portfolio/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"azizullah.karimy91@gmail.com"
] | azizullah.karimy91@gmail.com | |
41d1fec108b439c016bb2d337c5dba092c547b90 | f8005ef646f6d0eb1fb1588434f5a0e03c3446ba | /MainGame.py | 54207b0ea9ea956c599d18382d8bb11ff3ae63bd | [] | no_license | Raymundo1999/Final-Project-CSS-225 | 13918ef241ad45deaf0dda8251c551ee894e8c5b | a7d3ec9aef7b9aae670a1d2b1c1a3e2a0cdddbaf | refs/heads/main | 2023-01-28T10:21:36.906256 | 2020-12-09T21:54:51 | 2020-12-09T21:54:51 | 320,088,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | #Raymundo Sanchez
#Nov 1,2020
import mainCharacter as MC
import section1
import section2
import section3
#Main Character will go here
player = MC.mainCharacter()
#this will show the intro to the game
#the empty imput was included to be able to click a button to go forward
print("Hey I see your awake!")
input()
print("This is your house and the country you are in is")
input()
print("actually I don't know so don't ask me")
input()
#the players name wil go here.
#MC has a name already so it will be "Mac"
#this is just the person getting introduced to the game and section1 will start.
print("you might want to know if you have died or if you were summoned to this location")
input()
print("But in truth I'm just hungry so you are going to collect ingredients")
input()
print("that way we can all come together and eat in the end of the day")
input()
print("your no hero just a person that is hungry and wants to eat")
print("Have a good day and have fun and try not to get lost I'm hungry")
print("I'm not going to go rescue you, I'll just get food and eat")
print("Anyways have fun PS: It is not my fault if you die")
#this will send you and strt you in Section1 then after it is done it will return you with a message at the end.
section1.start(player)
print ("you are back to the main game file")
section2.start(player)
print ("you are back")
| [
"noreply@github.com"
] | Raymundo1999.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.